diff --git a/.bzrignore b/.bzrignore index cce2f366517..25f2367aa84 100644 --- a/.bzrignore +++ b/.bzrignore @@ -133,137 +133,107 @@ autom4te.cache/* autom4te.cache/output.0 autom4te.cache/requests autom4te.cache/traces.0 -bdb/*.ds? -bdb/*.vcproj -bdb/README -bdb/btree/btree_auto.c -bdb/build_unix/* -bdb/build_vxworks/db.h -bdb/build_vxworks/db_int.h -bdb/build_win32/db.h -bdb/build_win32/db_archive.dsp -bdb/build_win32/db_checkpoint.dsp -bdb/build_win32/db_config.h -bdb/build_win32/db_cxx.h -bdb/build_win32/db_deadlock.dsp -bdb/build_win32/db_dll.dsp -bdb/build_win32/db_dump.dsp -bdb/build_win32/db_int.h -bdb/build_win32/db_java.dsp -bdb/build_win32/db_load.dsp -bdb/build_win32/db_perf.dsp -bdb/build_win32/db_printlog.dsp -bdb/build_win32/db_recover.dsp -bdb/build_win32/db_stat.dsp -bdb/build_win32/db_static.dsp -bdb/build_win32/db_tcl.dsp -bdb/build_win32/db_test.dsp -bdb/build_win32/db_upgrade.dsp -bdb/build_win32/db_verify.dsp -bdb/build_win32/ex_access.dsp -bdb/build_win32/ex_btrec.dsp -bdb/build_win32/ex_env.dsp -bdb/build_win32/ex_lock.dsp -bdb/build_win32/ex_mpool.dsp -bdb/build_win32/ex_tpcb.dsp -bdb/build_win32/excxx_access.dsp -bdb/build_win32/excxx_btrec.dsp -bdb/build_win32/excxx_env.dsp -bdb/build_win32/excxx_lock.dsp -bdb/build_win32/excxx_mpool.dsp -bdb/build_win32/excxx_tpcb.dsp -bdb/build_win32/include.tcl -bdb/build_win32/libdb.def -bdb/build_win32/libdb.rc -bdb/db/crdel_auto.c -bdb/db/db_auto.c -bdb/dbinc_auto/*.* -bdb/dbreg/dbreg_auto.c -bdb/dist/autom4te-2.53.cache/* -bdb/dist/autom4te-2.53.cache/output.0 -bdb/dist/autom4te-2.53.cache/requests -bdb/dist/autom4te-2.53.cache/traces.0 -bdb/dist/autom4te.cache/* -bdb/dist/autom4te.cache/output.0 -bdb/dist/autom4te.cache/requests -bdb/dist/autom4te.cache/traces.0 -bdb/dist/config.hin -bdb/dist/configure -bdb/dist/tags -bdb/dist/template/db_server_proc -bdb/dist/template/gen_client_ret -bdb/dist/template/rec_btree -bdb/dist/template/rec_crdel -bdb/dist/template/rec_db -bdb/dist/template/rec_dbreg -bdb/dist/template/rec_fileops -bdb/dist/template/rec_hash -bdb/dist/template/rec_log -bdb/dist/template/rec_qam -bdb/dist/template/rec_txn -bdb/examples_c/ex_apprec/ex_apprec_auto.c -bdb/examples_c/ex_apprec/ex_apprec_auto.h -bdb/examples_c/ex_apprec/ex_apprec_template -bdb/examples_java -bdb/fileops/fileops_auto.c -bdb/hash/hash_auto.c -bdb/include/btree_auto.h -bdb/include/btree_ext.h -bdb/include/clib_ext.h -bdb/include/common_ext.h -bdb/include/crdel_auto.h -bdb/include/db_auto.h -bdb/include/db_ext.h -bdb/include/db_server.h -bdb/include/env_ext.h -bdb/include/gen_client_ext.h -bdb/include/gen_server_ext.h -bdb/include/hash_auto.h -bdb/include/hash_ext.h -bdb/include/lock_ext.h -bdb/include/log_auto.h -bdb/include/log_ext.h -bdb/include/mp_ext.h -bdb/include/mutex_ext.h -bdb/include/os_ext.h -bdb/include/qam_auto.h -bdb/include/qam_ext.h -bdb/include/rpc_client_ext.h -bdb/include/rpc_server_ext.h -bdb/include/tcl_ext.h -bdb/include/txn_auto.h -bdb/include/txn_ext.h -bdb/include/xa_ext.h -bdb/java/src/com/sleepycat/db/Db.java -bdb/java/src/com/sleepycat/db/DbBtreeStat.java -bdb/java/src/com/sleepycat/db/DbConstants.java -bdb/java/src/com/sleepycat/db/DbHashStat.java -bdb/java/src/com/sleepycat/db/DbLockStat.java -bdb/java/src/com/sleepycat/db/DbLogStat.java -bdb/java/src/com/sleepycat/db/DbMpoolFStat.java -bdb/java/src/com/sleepycat/db/DbQueueStat.java -bdb/java/src/com/sleepycat/db/DbRepStat.java -bdb/java/src/com/sleepycat/db/DbTxnStat.java -bdb/libdb_java/java_stat_auto.c -bdb/libdb_java/java_stat_auto.h -bdb/log/log_auto.c -bdb/qam/qam_auto.c -bdb/rpc_client/db_server_clnt.c -bdb/rpc_client/gen_client.c -bdb/rpc_server/c/db_server_proc.c -bdb/rpc_server/c/db_server_proc.sed -bdb/rpc_server/c/db_server_svc.c -bdb/rpc_server/c/db_server_xdr.c -bdb/rpc_server/c/gen_db_server.c -bdb/rpc_server/db_server.x -bdb/rpc_server/db_server_proc.sed -bdb/rpc_server/db_server_svc.c -bdb/rpc_server/db_server_xdr.c -bdb/rpc_server/gen_db_server.c -bdb/test/TESTS -bdb/test/include.tcl -bdb/test/logtrack.list -bdb/txn/txn_auto.c +storage/bdb/*.ds? +storage/bdb/*.vcproj +storage/bdb/README +storage/bdb/btree/btree_auto.c +storage/bdb/build_unix/* +storage/bdb/build_vxworks/db.h +storage/bdb/build_vxworks/db_int.h +storage/bdb/build_win32/db.h +storage/bdb/build_win32/db_archive.dsp +storage/bdb/build_win32/db_checkpoint.dsp +storage/bdb/build_win32/db_config.h +storage/bdb/build_win32/db_cxx.h +storage/bdb/build_win32/db_deadlock.dsp +storage/bdb/build_win32/db_dll.dsp +storage/bdb/build_win32/db_dump.dsp +storage/bdb/build_win32/db_int.h +storage/bdb/build_win32/db_java.dsp +storage/bdb/build_win32/db_load.dsp +storage/bdb/build_win32/db_perf.dsp +storage/bdb/build_win32/db_printlog.dsp +storage/bdb/build_win32/db_recover.dsp +storage/bdb/build_win32/db_stat.dsp +storage/bdb/build_win32/db_static.dsp +storage/bdb/build_win32/db_tcl.dsp +storage/bdb/build_win32/db_test.dsp +storage/bdb/build_win32/db_upgrade.dsp +storage/bdb/build_win32/db_verify.dsp +storage/bdb/build_win32/ex_access.dsp +storage/bdb/build_win32/ex_btrec.dsp +storage/bdb/build_win32/ex_env.dsp +storage/bdb/build_win32/ex_lock.dsp +storage/bdb/build_win32/ex_mpool.dsp +storage/bdb/build_win32/ex_tpcb.dsp +storage/bdb/build_win32/excxx_access.dsp +storage/bdb/build_win32/excxx_btrec.dsp +storage/bdb/build_win32/excxx_env.dsp +storage/bdb/build_win32/excxx_lock.dsp +storage/bdb/build_win32/excxx_mpool.dsp +storage/bdb/build_win32/excxx_tpcb.dsp +storage/bdb/build_win32/include.tcl +storage/bdb/build_win32/libdb.def +storage/bdb/build_win32/libdb.rc +storage/bdb/db/crdel_auto.c +storage/bdb/db/db_auto.c +storage/bdb/dbinc_auto/*.* +storage/bdb/dbreg/dbreg_auto.c +storage/bdb/dist/autom4te-2.53.cache/* +storage/bdb/dist/autom4te-2.53.cache/output.0 +storage/bdb/dist/autom4te-2.53.cache/requests +storage/bdb/dist/autom4te-2.53.cache/traces.0 +storage/bdb/dist/autom4te.cache/* +storage/bdb/dist/autom4te.cache/output.0 +storage/bdb/dist/autom4te.cache/requests +storage/bdb/dist/autom4te.cache/traces.0 +storage/bdb/dist/config.hin +storage/bdb/dist/configure +storage/bdb/dist/tags +storage/bdb/dist/template/db_server_proc +storage/bdb/dist/template/gen_client_ret +storage/bdb/dist/template/rec_btree +storage/bdb/dist/template/rec_crdel +storage/bdb/dist/template/rec_db +storage/bdb/dist/template/rec_dbreg +storage/bdb/dist/template/rec_fileops +storage/bdb/dist/template/rec_hash +storage/bdb/dist/template/rec_log +storage/bdb/dist/template/rec_qam +storage/bdb/dist/template/rec_txn +storage/bdb/fileops/fileops_auto.c +storage/bdb/hash/hash_auto.c +storage/bdb/include/btree_auto.h +storage/bdb/include/btree_ext.h +storage/bdb/include/clib_ext.h +storage/bdb/include/common_ext.h +storage/bdb/include/crdel_auto.h +storage/bdb/include/db_auto.h +storage/bdb/include/db_ext.h +storage/bdb/include/db_server.h +storage/bdb/include/env_ext.h +storage/bdb/include/gen_client_ext.h +storage/bdb/include/gen_server_ext.h +storage/bdb/include/hash_auto.h +storage/bdb/include/hash_ext.h +storage/bdb/include/lock_ext.h +storage/bdb/include/log_auto.h +storage/bdb/include/log_ext.h +storage/bdb/include/mp_ext.h +storage/bdb/include/mutex_ext.h +storage/bdb/include/os_ext.h +storage/bdb/include/qam_auto.h +storage/bdb/include/qam_ext.h +storage/bdb/include/rpc_client_ext.h +storage/bdb/include/rpc_server_ext.h +storage/bdb/include/tcl_ext.h +storage/bdb/include/txn_auto.h +storage/bdb/include/txn_ext.h +storage/bdb/include/xa_ext.h +storage/bdb/log/log_auto.c +storage/bdb/qam/qam_auto.c +storage/bdb/txn/txn_auto.c +storage/bdb/txn/txn_autop.c binary/* bkpull.log bkpull.log* diff --git a/storage/bdb/LICENSE b/storage/bdb/LICENSE index 8cb10e79bf8..87fa4cc6d1b 100644 --- a/storage/bdb/LICENSE +++ b/storage/bdb/LICENSE @@ -1,5 +1,5 @@ /*- - * $Id: LICENSE,v 11.12 2004/03/30 20:49:44 bostic Exp $ + * $Id: LICENSE,v 12.1 2005/06/16 20:20:10 bostic Exp $ */ The following is the license that applies to this copy of the Berkeley DB @@ -10,7 +10,7 @@ the Web at http://www.sleepycat.com. =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= /* - * Copyright (c) 1990-2004 + * Copyright (c) 1990-2005 * Sleepycat Software. All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/storage/bdb/README b/storage/bdb/README new file mode 100644 index 00000000000..38832132eb5 --- /dev/null +++ b/storage/bdb/README @@ -0,0 +1,5 @@ +Sleepycat Software: Berkeley DB 4.4.16: (November 12, 2005) + +This is version 4.4.16 of Berkeley DB from Sleepycat Software. To view +the release and installation documentation, load the distribution file +docs/index.html into your web browser. diff --git a/storage/bdb/btree/bt_compact.c b/storage/bdb/btree/bt_compact.c new file mode 100644 index 00000000000..3cc04b9aa19 --- /dev/null +++ b/storage/bdb/btree/bt_compact.c @@ -0,0 +1,2348 @@ +/*- + * See the file LICENSE for redistribution information. + * + * Copyright (c) 1999-2005 + * Sleepycat Software. All rights reserved. + * + * $Id: bt_compact.c,v 12.34 2005/11/10 21:07:48 bostic Exp $ + */ + +#include "db_config.h" + +#ifndef NO_SYSTEM_INCLUDES +#include +#include +#endif + +#include "db_int.h" +#include "dbinc/db_page.h" +#include "dbinc/db_shash.h" +#include "dbinc/btree.h" +#include "dbinc/lock.h" +#include "dbinc/log.h" +#include "dbinc/mp.h" +#include "dbinc/txn.h" + +static int __bam_compact_dups __P((DBC *, + PAGE *, u_int32_t, int, DB_COMPACT *, int *)); +static int __bam_compact_int __P((DBC *, + DBT *, DBT *, u_int32_t, int *, DB_COMPACT *, int *)); +static int __bam_csearch __P((DBC *, DBT *, u_int32_t, int)); +static int __bam_merge __P((DBC *, + DBC *, u_int32_t, DBT *, DB_COMPACT *,int *)); +static int __bam_merge_internal __P((DBC *, DBC *, int, DB_COMPACT *, int *)); +static int __bam_merge_pages __P((DBC *, DBC *, DB_COMPACT *)); +static int __bam_merge_records __P((DBC *, DBC*, u_int32_t, DB_COMPACT *)); +static int __bam_truncate_internal_overflow __P((DBC *, PAGE *, DB_COMPACT *)); +static int __bam_truncate_overflow __P((DBC *, + db_pgno_t, db_pgno_t, DB_COMPACT *)); +static int __bam_truncate_page __P((DBC *, PAGE **, int)); +static int __bam_truncate_root_page __P((DBC *, + PAGE *, u_int32_t, DB_COMPACT *)); + +#ifdef HAVE_FTRUNCATE +static int __bam_free_freelist __P((DB *, DB_TXN *)); +static int __bam_savekey __P((DBC *, int, DBT *)); +static int __bam_setup_freelist __P((DB *, struct pglist *, u_int32_t)); +static int __bam_truncate_internal __P((DB *, DB_TXN *, DB_COMPACT *)); +#endif + +#define SAVE_START \ + do { \ + save_data = *c_data; \ + ret = __db_retcopy(dbenv, \ + &save_start, end->data, end->size, \ + &save_start.data, &save_start.ulen); \ + } while (0) + +/* + * Only restore those things that are negated by aborting the + * transaction. We don't restore the number of deadlocks, for example. + */ + +#define RESTORE_START \ + do { \ + c_data->compact_pages_free = \ + save_data.compact_pages_free; \ + c_data->compact_levels = save_data.compact_levels; \ + c_data->compact_truncate = save_data.compact_truncate; \ + ret = __db_retcopy(dbenv, end, \ + save_start.data, save_start.size, \ + &end->data, &end->ulen); \ + } while (0) +/* + * __bam_compact -- compact a btree. + * + * PUBLIC: int __bam_compact __P((DB *, DB_TXN *, + * PUBLIC: DBT *, DBT *, DB_COMPACT *, u_int32_t, DBT *)); + */ +int +__bam_compact(dbp, txn, start, stop, c_data, flags, end) + DB *dbp; + DB_TXN *txn; + DBT *start, *stop; + DB_COMPACT *c_data; + u_int32_t flags; + DBT *end; +{ + DBT current, save_start; + DBC *dbc; + DB_COMPACT save_data; + DB_ENV *dbenv; + db_pgno_t last_pgno; + struct pglist *list; + u_int32_t factor, nelems, truncated; + int deadlock, done, ret, span, t_ret, txn_local; + + dbenv = dbp->dbenv; + + memset(¤t, 0, sizeof(current)); + memset(&save_start, 0, sizeof(save_start)); + dbc = NULL; + deadlock = 0; + done = 0; + factor = 0; + ret = 0; + span = 0; + truncated = 0; + last_pgno = 0; + + /* + * We pass "end" to the internal routine, indicating where + * that routine should begin its work and expecting that it + * will return to us the last key that it processed. + */ + if (end == NULL) + end = ¤t; + if (start != NULL && (ret = __db_retcopy(dbenv, + end, start->data, start->size, &end->data, &end->ulen)) != 0) + return (ret); + + list = NULL; + nelems = 0; + + if (IS_DB_AUTO_COMMIT(dbp, txn)) + txn_local = 1; + else + txn_local = 0; + if (!LF_ISSET(DB_FREE_SPACE | DB_FREELIST_ONLY)) + goto no_free; + if (LF_ISSET(DB_FREELIST_ONLY)) + LF_SET(DB_FREE_SPACE); + +#ifdef HAVE_FTRUNCATE + /* Sort the freelist and set up the in-memory list representation. */ + if (txn_local && (ret = __txn_begin(dbenv, NULL, &txn, 0)) != 0) + goto err; + + if ((ret = __db_free_truncate(dbp, + txn, flags, c_data, &list, &nelems, &last_pgno)) != 0) { + LF_CLR(DB_FREE_SPACE); + goto terr; + } + + /* If the freelist is empty and we are not filling, get out. */ + if (nelems == 0 && LF_ISSET(DB_FREELIST_ONLY)) { + ret = 0; + LF_CLR(DB_FREE_SPACE); + goto terr; + } + if ((ret = __bam_setup_freelist(dbp, list, nelems)) != 0) { + /* Someone else owns the free list. */ + if (ret == EBUSY) + ret = 0; + } + + /* Commit the txn and release the meta page lock. */ +terr: if (txn_local) { + if ((t_ret = __txn_commit(txn, DB_TXN_NOSYNC)) != 0 && ret == 0) + ret = t_ret; + txn = NULL; + } + if (ret != 0) + goto err; + + /* Save the number truncated so far, we will add what we get below. */ + truncated = c_data->compact_pages_truncated; + if (LF_ISSET(DB_FREELIST_ONLY)) + goto done; +#endif + + /* + * We want factor to be the target number of free bytes on each page, + * so we know when to stop adding items to a page. Make sure to + * subtract the page overhead when computing this target. This can + * result in a 1-2% error on the smallest page. + * First figure out how many bytes we should use: + */ +no_free: + factor = dbp->pgsize - SIZEOF_PAGE; + if (c_data->compact_fillpercent != 0) { + factor *= c_data->compact_fillpercent; + factor /= 100; + } + /* Now convert to the number of free bytes to target. */ + factor = (dbp->pgsize - SIZEOF_PAGE) - factor; + + if (c_data->compact_pages == 0) + c_data->compact_pages = DB_MAX_PAGES; + + do { + deadlock = 0; + + SAVE_START; + if (ret != 0) + break; + + if (txn_local) { + if ((ret = __txn_begin(dbenv, NULL, &txn, 0)) != 0) + break; + + if (c_data->compact_timeout != 0 && + (ret = __txn_set_timeout(txn, + c_data->compact_timeout, DB_SET_LOCK_TIMEOUT)) != 0) + goto err; + } + + if ((ret = __db_cursor(dbp, txn, &dbc, 0)) != 0) + goto err; + + if ((ret = __bam_compact_int(dbc, end, stop, factor, + &span, c_data, &done)) == DB_LOCK_DEADLOCK && txn_local) { + /* + * We retry on deadlock. Cancel the statistics + * and reset the start point to before this + * iteration. + */ + deadlock = 1; + c_data->compact_deadlock++; + RESTORE_START; + } + + if ((t_ret = __db_c_close(dbc)) != 0 && ret == 0) + ret = t_ret; + +err: if (txn_local && txn != NULL) { + if (ret == 0 && deadlock == 0) + ret = __txn_commit(txn, DB_TXN_NOSYNC); + else if ((t_ret = __txn_abort(txn)) != 0 && ret == 0) + ret = t_ret; + txn = NULL; + } + } while (ret == 0 && !done); + + if (current.data != NULL) + __os_free(dbenv, current.data); + if (save_start.data != NULL) + __os_free(dbenv, save_start.data); + +#ifdef HAVE_FTRUNCATE + /* + * Finish up truncation work. If there are pages left in the free + * list then search the internal nodes of the tree as we may have + * missed some while walking the leaf nodes. Then calculate how + * many pages we have truncated and release the in-memory free list. + */ +done: if (LF_ISSET(DB_FREE_SPACE)) { + DBMETA *meta; + db_pgno_t pgno; + + pgno = PGNO_BASE_MD; + done = 1; + if (ret == 0 && !LF_ISSET(DB_FREELIST_ONLY) && + (t_ret = __memp_fget(dbp->mpf, &pgno, 0, &meta)) == 0) { + done = meta->free == PGNO_INVALID; + ret = __memp_fput(dbp->mpf, meta, 0); + } + + if (!done) + ret = __bam_truncate_internal(dbp, txn, c_data); + + /* Clean up the free list. */ + if (list != NULL) + __os_free(dbenv, list); + + if ((t_ret = __memp_fget(dbp->mpf, &pgno, 0, &meta)) == 0) { + c_data->compact_pages_truncated = + truncated + last_pgno - meta->last_pgno; + if ((t_ret = + __memp_fput(dbp->mpf, meta, 0)) != 0 && ret == 0) + ret = t_ret; + } else if (ret == 0) + ret = t_ret; + + if ((t_ret = __bam_free_freelist(dbp, txn)) != 0 && ret == 0) + t_ret = ret; + } +#endif + + return (ret); +} + +/* + * __bam_csearch -- isolate search code for bam_compact. + * This routine hides the differences between searching + * a BTREE and a RECNO from the rest of the code. + */ +#define CS_READ 0 /* We are just reading. */ +#define CS_PARENT 1 /* We want the parent too, write lock. */ +#define CS_NEXT 2 /* Get the next page. */ +#define CS_NEXT_WRITE 3 /* Get the next page and write lock. */ +#define CS_DEL 4 /* Get a stack to delete a page. */ +#define CS_START 5 /* Starting level for stack, write lock. */ +#define CS_GETRECNO 0x80 /* Extract record number from start. */ + +static int +__bam_csearch(dbc, start, sflag, level) + DBC *dbc; + DBT *start; + u_int32_t sflag; + int level; +{ + BTREE_CURSOR *cp; + int not_used, ret; + + cp = (BTREE_CURSOR *)dbc->internal; + + if (dbc->dbtype == DB_RECNO) { + /* If GETRECNO is not set the cp->recno is what we want. */ + if (FLD_ISSET(sflag, CS_GETRECNO)) { + if (start == NULL || start->size == 0) + cp->recno = 1; + else if ((ret = + __ram_getno(dbc, start, &cp->recno, 0)) != 0) + return (ret); + FLD_CLR(sflag, CS_GETRECNO); + } + switch (sflag) { + case CS_READ: + sflag = S_READ; + break; + case CS_NEXT: + sflag = S_PARENT | S_READ; + break; + case CS_START: + level = LEAFLEVEL; + /* FALLTHROUGH */ + case CS_DEL: + case CS_NEXT_WRITE: + sflag = S_STACK; + break; + case CS_PARENT: + sflag = S_PARENT | S_WRITE; + break; + default: + return (__db_panic(dbc->dbp->dbenv, EINVAL)); + } + if ((ret = __bam_rsearch(dbc, + &cp->recno, sflag, level, ¬_used)) != 0) + return (ret); + /* Reset the cursor's recno to the beginning of the page. */ + cp->recno -= cp->csp->indx; + } else { + FLD_CLR(sflag, CS_GETRECNO); + switch (sflag) { + case CS_READ: + sflag = S_READ | S_DUPFIRST; + break; + case CS_DEL: + sflag = S_DEL; + break; + case CS_NEXT: + sflag = S_NEXT; + break; + case CS_NEXT_WRITE: + sflag = S_NEXT | S_WRITE; + break; + case CS_START: + sflag = S_START | S_WRITE; + break; + case CS_PARENT: + sflag = S_PARENT | S_WRITE; + break; + default: + return (__db_panic(dbc->dbp->dbenv, EINVAL)); + } + if (start == NULL || start->size == 0) + FLD_SET(sflag, S_MIN); + + if ((ret = __bam_search(dbc, + cp->root, start, sflag, level, NULL, ¬_used)) != 0) + return (ret); + } + + return (0); +} + +/* + * __bam_compact_int -- internal compaction routine. + * Called either with a cursor on the main database + * or a cursor initialized to the root of an off page duplicate + * tree. + */ +static int +__bam_compact_int(dbc, start, stop, factor, spanp, c_data, donep) + DBC *dbc; + DBT *start, *stop; + u_int32_t factor; + int *spanp; + DB_COMPACT *c_data; + int *donep; +{ + BTREE_CURSOR *cp, *ncp; + DB *dbp; + DBC *ndbc; + DB_ENV *dbenv; + DB_LOCK nolock; + EPG *epg; + DB_MPOOLFILE *dbmp; + PAGE *pg, *ppg, *npg; + db_pgno_t npgno; + db_recno_t next_recno; + u_int32_t sflag; + int check_dups, check_trunc, done, level; + int merged, nentry, next_page, pgs_done, ret, t_ret, tdone; +#ifdef DEBUG + DBT trace; + char buf[256]; +#define CTRACE(dbc, location, t, start, f) do { \ + trace.data = t; \ + trace.size = (u_int32_t)strlen(t); \ + DEBUG_LWRITE(dbc, dbc->txn, location, &trace, start, f) \ + } while (0) +#define PTRACE(dbc, location, p, start, f) do { \ + (void)sprintf(buf, "pgno: %lu", (u_long)p); \ + CTRACE(dbc, location, buf, start, f); \ + } while (0) +#else +#define CTRACE(dbc, location, t, start, f) +#define PTRACE(dbc, location, p, start, f) +#endif + + ndbc = NULL; + pg = NULL; + npg = NULL; + done = 0; + tdone = 0; + pgs_done = 0; + next_recno = 0; + next_page = 0; + LOCK_INIT(nolock); + check_trunc = c_data->compact_truncate != PGNO_INVALID; + check_dups = (!F_ISSET(dbc, DBC_OPD) && + F_ISSET(dbc->dbp, DB_AM_DUP)) || check_trunc; + + dbp = dbc->dbp; + dbenv = dbp->dbenv; + dbmp = dbp->mpf; + cp = (BTREE_CURSOR *)dbc->internal; + + /* Search down the tree for the starting point. */ + if ((ret = __bam_csearch(dbc, + start, CS_READ | CS_GETRECNO, LEAFLEVEL)) != 0) { + /* Its not an error to compact an empty db. */ + if (ret == DB_NOTFOUND) + ret = 0; + goto err; + } + + /* + * Get the first leaf page. The loop below will change pg so + * we clear the stack reference so we don't put a a page twice. + */ + pg = cp->csp->page; + cp->csp->page = NULL; + next_recno = cp->recno; +next: /* + * This is the start of the main compaction loop. There are 3 + * parts to the process: + * 1) Walk the leaf pages of the tree looking for a page to + * process. We do this with read locks. Save the + * key from the page and release it. + * 2) Set up a cursor stack which will write lock the page + * and enough of its ancestors to get the job done. + * This could go to the root if we might delete a subtree + * or we have record numbers to update. + * 3) Loop fetching pages after the above page and move enough + * data to fill it. + * We exit the loop if we are at the end of the leaf pages, are + * about to lock a new subtree (we span) or on error. + */ + + /* Walk the pages looking for something to fill up. */ + while ((npgno = NEXT_PGNO(pg)) != PGNO_INVALID) { + c_data->compact_pages_examine++; + PTRACE(dbc, "Next", PGNO(pg), start, 0); + + /* If we have fetched the next page, get the new key. */ + if (next_page == 1 && + dbc->dbtype != DB_RECNO && NUM_ENT(pg) != 0) { + if ((ret = __db_ret(dbp, pg, + 0, start, &start->data, &start->ulen)) != 0) + goto err; + } + next_recno += NUM_ENT(pg); + if (P_FREESPACE(dbp, pg) > factor || + (check_trunc && PGNO(pg) > c_data->compact_truncate)) + break; + /* + * The page does not need more data or to be swapped, + * check to see if we want to look at possible duplicate + * trees or overflow records and the move on to the next page. + */ + cp->recno += NUM_ENT(pg); + next_page = 1; + tdone = pgs_done; + PTRACE(dbc, "Dups", PGNO(pg), start, 0); + if (check_dups && (ret = __bam_compact_dups( + dbc, pg, factor, 0, c_data, &pgs_done)) != 0) + goto err; + npgno = NEXT_PGNO(pg); + if ((ret = __memp_fput(dbmp, pg, 0)) != 0) + goto err; + pg = NULL; + /* + * If we don't do anything we don't need to hold + * the lock on the previous page, so couple always. + */ + if ((ret = __db_lget(dbc, + tdone == pgs_done ? LCK_COUPLE_ALWAYS : LCK_COUPLE, + npgno, DB_LOCK_READ, 0, &cp->csp->lock)) != 0) + goto err; + if ((ret = __memp_fget(dbmp, &npgno, 0, &pg)) != 0) + goto err; + } + + /* + * When we get here we have 3 cases: + * 1) We've reached the end of the leaf linked list and are done. + * 2) A page whose freespace exceeds our target and therefore needs + * to have data added to it. + * 3) A page that doesn't have too much free space but needs to be + * checked for truncation. + * In both cases 2 and 3, we need that page's first key or record + * number. We may already have it, if not get it here. + */ + if ((nentry = NUM_ENT(pg)) != 0) { + next_page = 0; + /* Get a copy of the first recno on the page. */ + if (dbc->dbtype == DB_RECNO) { + if ((ret = __db_retcopy(dbp->dbenv, start, + &cp->recno, sizeof(cp->recno), + &start->data, &start->ulen)) != 0) + goto err; + } else if (start->size == 0 && + (ret = __db_ret(dbp, pg, + 0, start, &start->data, &start->ulen)) != 0) + goto err; + + if (npgno == PGNO_INVALID) { + /* End of the tree, check its duplicates and exit. */ + PTRACE(dbc, "GoDone", PGNO(pg), start, 0); + if (check_dups && (ret = + __bam_compact_dups(dbc, + pg, factor, 0, c_data, &pgs_done)) != 0) + goto err; + c_data->compact_pages_examine++; + done = 1; + goto done; + } + } + + /* Release the page so we don't deadlock getting its parent. */ + BT_STK_CLR(cp); + if ((ret = __LPUT(dbc, cp->csp->lock)) != 0) + goto err; + if ((ret = __memp_fput(dbmp, pg, 0)) != 0) + goto err; + pg = NULL; + + /* + * Setup the cursor stack. There are 3 cases: + * 1) the page is empty and will be deleted: nentry == 0. + * 2) the next page has the same parent: *spanp == 0. + * 3) the next page has a different parent: *spanp == 1. + * + * We now need to search the tree again, getting a write lock + * on the page we are going to merge or delete. We do this by + * searching down the tree and locking as much of the subtree + * above the page as needed. In the case of a delete we will + * find the maximal subtree that can be deleted. In the case + * of merge if the current page and the next page are siblings + * with the same parent then we only need to lock the parent. + * Otherwise *span will be set and we need to search to find the + * lowest common ancestor. Dbc will be set to contain the subtree + * containing the page to be merged or deleted. Ndbc will contain + * the minimal subtree containing that page and its next sibling. + * In all cases for DB_RECNO we simplify things and get the whole + * tree if we need more than a single parent. + */ + + /* Case 1 -- page is empty. */ + if (nentry == 0) { + CTRACE(dbc, "Empty", "", start, 0); + if (next_page == 1) + sflag = CS_NEXT_WRITE; + else + sflag = CS_DEL; + if ((ret = __bam_csearch(dbc, start, sflag, LEAFLEVEL)) != 0) + goto err; + + pg = cp->csp->page; + /* Check to see if the page is still empty. */ + if (NUM_ENT(pg) != 0) + npgno = PGNO(pg); + else { + npgno = NEXT_PGNO(pg); + /* If this is now the root, we are very done. */ + if (PGNO(pg) == cp->root) + done = 1; + else { + if ((ret = __bam_dpages(dbc, 0, 0)) != 0) + goto err; + c_data->compact_pages_free++; + goto next_no_release; + } + } + goto next_page; + } + + /* case 3 -- different parents. */ + if (*spanp) { + CTRACE(dbc, "Span", "", start, 0); + if (ndbc == NULL && (ret = __db_c_dup(dbc, &ndbc, 0)) != 0) + goto err; + ncp = (BTREE_CURSOR *)ndbc->internal; + ncp->recno = next_recno; + /* + * Search the tree looking for the next page after the + * current key. For RECNO get the whole stack. + * For BTREE the return will contain the stack that + * dominates both the current and next pages. + */ + if ((ret = __bam_csearch(ndbc, start, CS_NEXT_WRITE, 0)) != 0) + goto err; + + if (dbc->dbtype == DB_RECNO) { + /* + * The record we are looking for may have moved + * to the previous page. This page should + * be at the beginning of its parent. + * If not, then start over. + */ + if (ncp->csp[-1].indx != 0) { + *spanp = 0; + goto deleted; + } + + } + PTRACE(dbc, "SDups", PGNO(ncp->csp->page), start, 0); + if (check_dups && + (ret = __bam_compact_dups(ndbc, + ncp->csp->page, factor, 1, c_data, &pgs_done)) != 0) + goto err; + + /* + * We need the stacks to be the same height + * so that we can merge parents. + */ + level = LEVEL(ncp->sp->page); + sflag = CS_START; + if ((ret = __bam_csearch(dbc, start, sflag, level)) != 0) + goto err; + pg = cp->csp->page; + *spanp = 0; + + /* + * The page may have emptied while we waited for the lock. + * Reset npgno so we re-get this page when we go back to the + * top. + */ + if (NUM_ENT(pg) == 0) { + npgno = PGNO(pg); + goto next_page; + } + if (check_trunc && PGNO(pg) > c_data->compact_truncate) { + pgs_done++; + /* Get a fresh low numbered page. */ + if ((ret = __bam_truncate_page(dbc, &pg, 1)) != 0) + goto err1; + } + + npgno = NEXT_PGNO(pg); + PTRACE(dbc, "SDups", PGNO(pg), start, 0); + if (check_dups && (ret = + __bam_compact_dups(dbc, pg, + factor, 1, c_data, &pgs_done)) != 0) + goto err1; + + /* + * We may have dropped our locks, check again + * to see if we still need to fill this page and + * we are in a spanning situation. + */ + + if (P_FREESPACE(dbp, pg) <= factor || + cp->csp[-1].indx != NUM_ENT(cp->csp[-1].page) - 1) + goto next_page; + + /* + * Try to move things into a single parent. + */ + merged = 0; + for (epg = cp->sp; epg != cp->csp; epg++) { + if (PGNO(epg->page) == cp->root) + continue; + PTRACE(dbc, "PMerge", PGNO(epg->page), start, 0); + if ((ret = __bam_merge_internal(dbc, + ndbc, LEVEL(epg->page), c_data, &merged)) != 0) + goto err1; + if (merged) + break; + } + + /* If we merged the parent, then we nolonger span. */ + if (merged) { + pgs_done++; + if (cp->csp->page == NULL) + goto deleted; + npgno = PGNO(pg); + goto next_page; + } + PTRACE(dbc, "SMerge", PGNO(cp->csp->page), start, 0); + npgno = NEXT_PGNO(ncp->csp->page); + if ((ret = __bam_merge(dbc, + ndbc, factor, stop, c_data, &done)) != 0) + goto err1; + pgs_done++; + /* + * __bam_merge could have freed our stack if it + * deleted a page possibly collapsing the tree. + */ + if (cp->csp->page == NULL) + goto deleted; + cp->recno += NUM_ENT(pg); + + /* If we did not bump to the next page something did not fit. */ + if (npgno != NEXT_PGNO(pg)) { + npgno = NEXT_PGNO(pg); + goto next_page; + } + } else { + /* Case 2 -- same parents. */ + CTRACE(dbc, "Sib", "", start, 0); + if ((ret = + __bam_csearch(dbc, start, CS_PARENT, LEAFLEVEL)) != 0) + goto err; + + pg = cp->csp->page; + DB_ASSERT(cp->csp - cp->sp == 1); + npgno = PGNO(pg); + + /* We now have a write lock, recheck the page. */ + if ((nentry = NUM_ENT(pg)) == 0) + goto next_page; + + npgno = NEXT_PGNO(pg); + + /* Check duplicate trees, we have a write lock on the page. */ + PTRACE(dbc, "SibDup", PGNO(pg), start, 0); + if (check_dups && (ret = + __bam_compact_dups(dbc, pg, + factor, 1, c_data, &pgs_done)) != 0) + goto err1; + + if (check_trunc && PGNO(pg) > c_data->compact_truncate) { + pgs_done++; + /* Get a fresh low numbered page. */ + if ((ret = __bam_truncate_page(dbc, &pg, 1)) != 0) + goto err1; + } + + /* After re-locking check to see if we still need to fill. */ + if (P_FREESPACE(dbp, pg) <= factor) + goto next_page; + + /* If they have the same parent, just dup the cursor */ + if (ndbc != NULL && (ret = __db_c_close(ndbc)) != 0) + goto err1; + if ((ret = __db_c_dup(dbc, &ndbc, DB_POSITION)) != 0) + goto err1; + ncp = (BTREE_CURSOR *)ndbc->internal; + + /* + * ncp->recno needs to have the recno of the next page. + * Bump it by the number of records on the current page. + */ + ncp->recno += NUM_ENT(pg); + } + + /* Fetch pages until we fill this one. */ + while (!done && npgno != PGNO_INVALID && + P_FREESPACE(dbp, pg) > factor && c_data->compact_pages != 0) { + /* + * If our current position is the last one on a parent + * page, then we are about to merge across different + * internal nodes. Thus, we need to lock higher up + * in the tree. We will exit the routine and commit + * what we have done so far. Set spanp so we know + * we are in this case when we come back. + */ + if (cp->csp[-1].indx == NUM_ENT(cp->csp[-1].page) - 1) { + *spanp = 1; + npgno = PGNO(pg); + next_recno = cp->recno; + goto next_page; + } + + /* Lock and get the next page. */ + if ((ret = __db_lget(dbc, LCK_COUPLE, + npgno, DB_LOCK_WRITE, 0, &ncp->lock)) != 0) + goto err1; + if ((ret = __memp_fget(dbmp, &npgno, 0, &npg)) != 0) + goto err1; + + /* Fix up the next page cursor with its parent node. */ + if ((ret = __memp_fget(dbmp, + &PGNO(cp->csp[-1].page), 0, &ppg)) != 0) + goto err1; + BT_STK_PUSH(dbenv, ncp, ppg, + cp->csp[-1].indx + 1, nolock, DB_LOCK_NG, ret); + if (ret != 0) + goto err1; + + /* Put the page on the stack. */ + BT_STK_ENTER(dbenv, ncp, npg, 0, ncp->lock, DB_LOCK_WRITE, ret); + + LOCK_INIT(ncp->lock); + npg = NULL; + + c_data->compact_pages_examine++; + + PTRACE(dbc, "MDups", PGNO(ncp->csp->page), start, 0); + if (check_dups && (ret = __bam_compact_dups(ndbc, + ncp->csp->page, factor, 1, c_data, &pgs_done)) != 0) + goto err1; + + npgno = NEXT_PGNO(ncp->csp->page); + /* + * Merge the pages. This will either free the next + * page or just update its parent pointer. + */ + PTRACE(dbc, "Merge", PGNO(cp->csp->page), start, 0); + if ((ret = __bam_merge(dbc, + ndbc, factor, stop, c_data, &done)) != 0) + goto err1; + + pgs_done++; + + /* + * __bam_merge could have freed our stack if it + * deleted a page possibly collapsing the tree. + */ + if (cp->csp->page == NULL) + goto deleted; + /* If we did not bump to the next page something did not fit. */ + if (npgno != NEXT_PGNO(pg)) + break; + } + + /* Bottom of the main loop. Move to the next page. */ + npgno = NEXT_PGNO(pg); + cp->recno += NUM_ENT(pg); + next_recno = cp->recno; + +next_page: + if ((ret = __bam_stkrel(dbc, pgs_done == 0 ? STK_NOLOCK : 0)) != 0) + goto err1; + if (ndbc != NULL && + (ret = __bam_stkrel(ndbc, pgs_done == 0 ? STK_NOLOCK : 0)) != 0) + goto err1; + +next_no_release: + pg = NULL; + + if (npgno == PGNO_INVALID || c_data->compact_pages == 0) + done = 1; + if (!done) { + /* + * If we are at the end of this parent commit the + * transaction so we don't tie things up. + */ + if (pgs_done != 0 && *spanp) { +deleted: if (((ret = __bam_stkrel(ndbc, 0)) != 0 || + (ret = __db_c_close(ndbc)) != 0)) + goto err; + *donep = 0; + return (0); + } + + /* Reget the next page to look at. */ + cp->recno = next_recno; + if ((ret = __memp_fget(dbmp, &npgno, 0, &pg)) != 0) + goto err; + next_page = 1; + goto next; + } + +done: + if (0) { + /* We come here if pg is the same as cp->csp->page. */ +err1: pg = NULL; + } +err: if (dbc != NULL && + (t_ret = __bam_stkrel(dbc, STK_CLRDBC)) != 0 && ret == 0) + ret = t_ret; + if (ndbc != NULL) { + if ((t_ret = __bam_stkrel(ndbc, STK_CLRDBC)) != 0 && ret == 0) + ret = t_ret; + else if ((t_ret = __db_c_close(ndbc)) != 0 && ret == 0) + ret = t_ret; + } + + if (pg != NULL && (t_ret = __memp_fput(dbmp, pg, 0) != 0) && ret == 0) + ret = t_ret; + if (npg != NULL && (t_ret = __memp_fput(dbmp, npg, 0) != 0) && ret == 0) + ret = t_ret; + + *donep = done; + + return (ret); +} + +/* + * __bam_merge -- do actual merging of leaf pages. + */ +static int +__bam_merge(dbc, ndbc, factor, stop, c_data, donep) + DBC *dbc, *ndbc; + u_int32_t factor; + DBT *stop; + DB_COMPACT *c_data; + int *donep; +{ + BTREE_CURSOR *cp, *ncp; + BTREE *t; + DB *dbp; + PAGE *pg, *npg; + db_indx_t adj, nent; + db_recno_t recno; + int cmp, ret; + int (*func) __P((DB *, const DBT *, const DBT *)); + + dbp = dbc->dbp; + t = dbp->bt_internal; + cp = (BTREE_CURSOR *)dbc->internal; + ncp = (BTREE_CURSOR *)ndbc->internal; + pg = cp->csp->page; + npg = ncp->csp->page; + + nent = NUM_ENT(npg); + + /* If the page is empty just throw it away. */ + if (nent == 0) + goto free; + adj = TYPE(npg) == P_LBTREE ? P_INDX : O_INDX; + /* Find if the stopping point is on this page. */ + if (stop != NULL && stop->size != 0) { + if (dbc->dbtype == DB_RECNO) { + if ((ret = __ram_getno(dbc, stop, &recno, 0)) != 0) + goto err; + if (ncp->recno > recno) { + *donep = 1; + if (cp->recno > recno) + goto done; + } + } else { + func = TYPE(npg) == P_LBTREE ? + (dbp->dup_compare == NULL ? + __bam_defcmp : dbp->dup_compare) : t->bt_compare; + + if ((ret = __bam_cmp(dbp, + stop, npg, nent - adj, func, &cmp)) != 0) + goto err; + + /* + * If the last record is beyond the stopping + * point we are done after this page. If the + * first record is beyond the stopping point + * don't even bother with this page. + */ + if (cmp <= 0) { + *donep = 1; + if ((ret = __bam_cmp(dbp, + stop, npg, 0, func, &cmp)) != 0) + goto err; + if (cmp <= 0) + goto done; + } + } + } + + /* + * If there is too much data then just move records one at a time. + * Otherwise copy the data space over and fix up the index table. + * If we are on the left most child we will effect our parent's + * index entry so we call merge_records to figure out key sizes. + */ + if ((dbc->dbtype == DB_BTREE && + ncp->csp[-1].indx == 0 && ncp->csp[-1].entries != 1) || + (int)(P_FREESPACE(dbp, pg) - + ((dbp->pgsize - P_OVERHEAD(dbp)) - + P_FREESPACE(dbp, npg))) < (int)factor) + ret = __bam_merge_records(dbc, ndbc, factor, c_data); + else +free: ret = __bam_merge_pages(dbc, ndbc, c_data); + +done: +err: return (ret); +} + +static int +__bam_merge_records(dbc, ndbc, factor, c_data) + DBC *dbc, *ndbc; + u_int32_t factor; + DB_COMPACT *c_data; +{ + BKEYDATA *bk, *tmp_bk; + BINTERNAL *bi; + BTREE *t; + BTREE_CURSOR *cp, *ncp; + DB *dbp; + DBT a, b, data, hdr; + EPG *epg; + PAGE *pg, *npg; + db_indx_t adj, indx, nent, *ninp, pind; + int32_t adjust; + u_int32_t free, nksize, pfree, size; + int first_dup, is_dup, next_dup, n_ok, ret; + size_t (*func) __P((DB *, const DBT *, const DBT *)); + + dbp = dbc->dbp; + t = dbp->bt_internal; + cp = (BTREE_CURSOR *)dbc->internal; + ncp = (BTREE_CURSOR *)ndbc->internal; + pg = cp->csp->page; + npg = ncp->csp->page; + memset(&hdr, 0, sizeof(hdr)); + pind = NUM_ENT(pg); + n_ok = 0; + adjust = 0; + ret = 0; + nent = NUM_ENT(npg); + + DB_ASSERT (nent != 0); + + /* See if we want to swap out this page. */ + if (c_data->compact_truncate != PGNO_INVALID && + PGNO(npg) > c_data->compact_truncate) { + /* Get a fresh low numbered page. */ + if ((ret = __bam_truncate_page(ndbc, &npg, 1)) != 0) + goto err; + } + + ninp = P_INP(dbp, npg); + + /* + * pg is the page that is being filled, it is in the stack in cp. + * npg is the next page, it is in the stack in ncp. + */ + free = P_FREESPACE(dbp, pg); + + adj = TYPE(npg) == P_LBTREE ? P_INDX : O_INDX; + /* + * Loop through the records and find the stopping point. + */ + for (indx = 0; indx < nent; indx += adj) { + bk = GET_BKEYDATA(dbp, npg, indx); + + /* Size of the key. */ + size = BITEM_PSIZE(bk); + + /* Size of the data. */ + if (TYPE(pg) == P_LBTREE) + size += BITEM_PSIZE(GET_BKEYDATA(dbp, npg, indx + 1)); + /* + * If we are at a duplicate set, skip ahead to see and + * get the total size for the group. + */ + n_ok = adj; + if (TYPE(pg) == P_LBTREE && + indx < nent - adj && + ninp[indx] == ninp[indx + adj]) { + do { + /* Size of index for key reference. */ + size += sizeof(db_indx_t); + n_ok++; + /* Size of data item. */ + size += BITEM_PSIZE( + GET_BKEYDATA(dbp, npg, indx + n_ok)); + n_ok++; + } while (indx + n_ok < nent && + ninp[indx] == ninp[indx + n_ok]); + } + /* if the next set will not fit on the page we are done. */ + if (free < size) + break; + + /* + * Otherwise figure out if we are past the goal and if + * adding this set will put us closer to the goal than + * we are now. + */ + if ((free - size) < factor) { + if (free - factor > factor - (free - size)) + indx += n_ok; + break; + } + free -= size; + indx += n_ok - adj; + } + if (indx == 0) + goto done; + if (TYPE(pg) != P_LBTREE) { + if (indx == nent) + return (__bam_merge_pages(dbc, ndbc, c_data)); + goto no_check; + } + /* + * We need to update npg's parent key. Avoid creating a new key + * that will be too big. Get what space will be available on the + * parents. Then if there will not be room for this key, see if + * prefix compression will make it work, if not backup till we + * find something that will. (Needless to say, this is a very + * unlikely event.) If we are deleting this page then we will + * need to propagate the next key to our grand parents, so we + * see if that will fit. + */ + pfree = dbp->pgsize; + for (epg = &ncp->csp[-1]; epg >= ncp->sp; epg--) + if ((free = P_FREESPACE(dbp, epg->page)) < pfree) { + bi = GET_BINTERNAL(dbp, epg->page, epg->indx); + /* Add back in the key we will be deleting. */ + free += BINTERNAL_PSIZE(bi->len); + if (free < pfree) + pfree = free; + if (epg->indx != 0) + break; + } + + /* + * If we are at the end, we will delete this page. We need to + * check the next parent key only if we are the leftmost page and + * will therefore have to propagate the key up the tree. + */ + if (indx == nent) { + if (ncp->csp[-1].indx != 0 || + BINTERNAL_PSIZE(GET_BINTERNAL(dbp, + ncp->csp[-1].page, 1)->len) <= pfree) + return (__bam_merge_pages(dbc, ndbc, c_data)); + indx -= adj; + } + bk = GET_BKEYDATA(dbp, npg, indx); + if (indx != 0 && BINTERNAL_SIZE(bk->len) >= pfree) { + if (F_ISSET(dbc, DBC_OPD)) { + if (dbp->dup_compare == __bam_defcmp) + func = __bam_defpfx; + else + func = NULL; + } else + func = t->bt_prefix; + } else + func = NULL; + + /* Skip to the beginning of a duplicate set. */ + while (indx != 0 && ninp[indx] == ninp[indx - adj]) + indx -= adj; + + while (indx != 0 && BINTERNAL_SIZE(bk->len) >= pfree) { + if (B_TYPE(bk->type) != B_KEYDATA) + goto noprefix; + /* + * Figure out if we can truncate this key. + * Code borrowed from bt_split.c + */ + if (func == NULL) + goto noprefix; + tmp_bk = GET_BKEYDATA(dbp, npg, indx - adj); + if (B_TYPE(tmp_bk->type) != B_KEYDATA) + goto noprefix; + memset(&a, 0, sizeof(a)); + a.size = tmp_bk->len; + a.data = tmp_bk->data; + memset(&b, 0, sizeof(b)); + b.size = bk->len; + b.data = bk->data; + nksize = (u_int32_t)func(dbp, &a, &b); + if (BINTERNAL_PSIZE(nksize) < pfree) + break; +noprefix: + /* Skip to the beginning of a duplicate set. */ + do { + indx -= adj; + } while (indx != 0 && ninp[indx] == ninp[indx - adj]); + + bk = GET_BKEYDATA(dbp, npg, indx); + } + + if (indx == 0) + goto done; + DB_ASSERT(indx <= nent); + + /* Loop through the records and move them from npg to pg. */ +no_check: is_dup = first_dup = next_dup = 0; + do { + bk = GET_BKEYDATA(dbp, npg, 0); + /* Figure out if we are in a duplicate group or not. */ + if ((NUM_ENT(npg) % 2) == 0) { + if (NUM_ENT(npg) > 2 && ninp[0] == ninp[2]) { + if (!is_dup) { + first_dup = 1; + is_dup = 1; + } else + first_dup = 0; + + next_dup = 1; + } else if (next_dup) { + is_dup = 1; + first_dup = 0; + next_dup = 0; + } else + is_dup = 0; + } + + if (is_dup && !first_dup && (pind % 2) == 0) { + /* Duplicate key. */ + if ((ret = __bam_adjindx(dbc, + pg, pind, pind - P_INDX, 1)) != 0) + goto err; + if (!next_dup) + is_dup = 0; + } else switch (B_TYPE(bk->type)) { + case B_KEYDATA: + hdr.data = bk; + hdr.size = SSZA(BKEYDATA, data); + data.size = bk->len; + data.data = bk->data; + if ((ret = __db_pitem(dbc, pg, pind, + BKEYDATA_SIZE(bk->len), &hdr, &data)) != 0) + goto err; + break; + case B_OVERFLOW: + case B_DUPLICATE: + data.size = BOVERFLOW_SIZE; + data.data = bk; + if ((ret = __db_pitem(dbc, pg, pind, + BOVERFLOW_SIZE, &data, NULL)) != 0) + goto err; + break; + default: + __db_err(dbp->dbenv, + "Unknown record format, page %lu, indx 0", + (u_long)PGNO(pg)); + ret = EINVAL; + goto err; + } + pind++; + if (next_dup && (NUM_ENT(npg) % 2) == 0) { + if ((ret = __bam_adjindx(ndbc, + npg, 0, O_INDX, 0)) != 0) + goto err; + } else { + if ((ret = __db_ditem(ndbc, + npg, 0, BITEM_SIZE(bk))) != 0) + goto err; + } + adjust++; + } while (--indx != 0); + + DB_ASSERT(NUM_ENT(npg) != 0); + if ((ret = __memp_fset(dbp->mpf, npg, DB_MPOOL_DIRTY)) != 0) + goto err; + + if (adjust != 0 && + (F_ISSET(cp, C_RECNUM) || F_ISSET(dbc, DBC_OPD))) { + DB_ASSERT(cp->csp - cp->sp == ncp->csp - ncp->sp); + if (TYPE(pg) == P_LBTREE) + adjust /= P_INDX; + if ((ret = __bam_adjust(ndbc, -adjust)) != 0) + goto err; + + if ((ret = __bam_adjust(dbc, adjust)) != 0) + goto err; + } + + /* Update parent with new key. */ + if (ndbc->dbtype == DB_BTREE && + (ret = __bam_pupdate(ndbc, pg)) != 0) + goto err; + if ((ret = __memp_fset(dbp->mpf, pg, DB_MPOOL_DIRTY)) != 0) + goto err; + +done: ret = __bam_stkrel(ndbc, STK_CLRDBC); + +err: return (ret); +} + +static int +__bam_merge_pages(dbc, ndbc, c_data) + DBC *dbc, *ndbc; + DB_COMPACT *c_data; +{ + BTREE_CURSOR *cp, *ncp; + DB *dbp; + DB_MPOOLFILE *dbmp; + DBT data, hdr, ind; + PAGE *pg, *npg; + db_indx_t nent, *ninp, *pinp; + db_pgno_t ppgno; + u_int8_t *bp; + u_int32_t len; + int i, level, ret; + + COMPQUIET(ppgno, PGNO_INVALID); + dbp = dbc->dbp; + dbmp = dbp->mpf; + cp = (BTREE_CURSOR *)dbc->internal; + ncp = (BTREE_CURSOR *)ndbc->internal; + pg = cp->csp->page; + npg = ncp->csp->page; + memset(&hdr, 0, sizeof(hdr)); + nent = NUM_ENT(npg); + + /* If the page is empty just throw it away. */ + if (nent == 0) + goto free; + /* Bulk copy the data to the new page. */ + len = dbp->pgsize - HOFFSET(npg); + if (DBC_LOGGING(dbc)) { + data.data = (u_int8_t *)npg + HOFFSET(npg); + data.size = len; + ind.data = P_INP(dbp, npg); + ind.size = NUM_ENT(npg) * sizeof(db_indx_t); + if ((ret = __bam_merge_log(dbp, + dbc->txn, &LSN(pg), 0, PGNO(pg), + &LSN(pg), PGNO(npg), &LSN(npg), NULL, &data, &ind)) != 0) + goto err; + } else + LSN_NOT_LOGGED(LSN(pg)); + LSN(npg) = LSN(pg); + bp = (u_int8_t *)pg + HOFFSET(pg) - len; + memcpy(bp, (u_int8_t *)npg + HOFFSET(npg), len); + + /* Copy index table offset by what was there already. */ + pinp = P_INP(dbp, pg) + NUM_ENT(pg); + ninp = P_INP(dbp, npg); + for (i = 0; i < NUM_ENT(npg); i++) + *pinp++ = *ninp++ - (dbp->pgsize - HOFFSET(pg)); + HOFFSET(pg) -= len; + NUM_ENT(pg) += i; + + NUM_ENT(npg) = 0; + HOFFSET(npg) += len; + + if (F_ISSET(cp, C_RECNUM) || F_ISSET(dbc, DBC_OPD)) { + DB_ASSERT(cp->csp - cp->sp == ncp->csp - ncp->sp); + if (TYPE(pg) == P_LBTREE) + i /= P_INDX; + if ((ret = __bam_adjust(ndbc, -i)) != 0) + goto err; + + if ((ret = __bam_adjust(dbc, i)) != 0) + goto err; + } + ret = __memp_fset(dbp->mpf, pg, DB_MPOOL_DIRTY); + +free: /* + * __bam_dpages may decide to collapse the tree. + * This can happen if we have the root and there + * are exactly 2 pointers left in it. + * If it can collapse the tree we must free the other + * stack since it will nolonger be valid. This + * must be done before hand because we cannot + * hold a page pinned if it might be truncated. + */ + if (PGNO(ncp->sp->page) == ncp->root && + NUM_ENT(ncp->sp->page) == 2) { + if ((ret = __bam_stkrel(dbc, STK_CLRDBC | STK_PGONLY)) != 0) + goto err; + level = LEVEL(ncp->sp->page); + ppgno = PGNO(ncp->csp[-1].page); + } else + level = 0; + if (c_data->compact_truncate > PGNO(npg)) + c_data->compact_truncate--; + if ((ret = __bam_dpages(ndbc, + 0, ndbc->dbtype == DB_RECNO ? 0 : 1)) != 0) + goto err; + npg = NULL; + c_data->compact_pages_free++; + c_data->compact_pages--; + if (level != 0) { + if ((ret = __memp_fget(dbmp, &ncp->root, 0, &npg)) != 0) + goto err; + if (level == LEVEL(npg)) + level = 0; + if ((ret = __memp_fput(dbmp, npg, 0)) != 0) + goto err; + npg = NULL; + if (level != 0) { + c_data->compact_levels++; + c_data->compact_pages_free++; + if (c_data->compact_truncate > ppgno) + c_data->compact_truncate--; + if (c_data->compact_pages != 0) + c_data->compact_pages--; + } + } + +err: return (ret); +} + +/* + * __bam_merge_internal -- + * Merge internal nodes of the tree. + */ +static int +__bam_merge_internal(dbc, ndbc, level, c_data, merged) + DBC *dbc, *ndbc; + int level; + DB_COMPACT *c_data; + int *merged; +{ + BINTERNAL bi, *bip, *fip; + BTREE_CURSOR *cp, *ncp; + DB_MPOOLFILE *dbmp; + DB *dbp; + DBT data, hdr; + EPG *epg, *save_csp, *nsave_csp; + PAGE *pg, *npg; + RINTERNAL *rk; + db_indx_t indx, pind; + db_pgno_t ppgno; + int32_t trecs; + u_int16_t size; + u_int32_t free, pfree; + int ret; + + COMPQUIET(bip, NULL); + COMPQUIET(ppgno, PGNO_INVALID); + + /* + * ndbc will contain the the dominating parent of the subtree. + * dbc will have the tree containing the left child. + * + * The stacks descend to the leaf level. + * If this is a recno tree then both stacks will start at the root. + */ + dbp = dbc->dbp; + dbmp = dbp->mpf; + cp = (BTREE_CURSOR *)dbc->internal; + ncp = (BTREE_CURSOR *)ndbc->internal; + *merged = 0; + ret = 0; + + /* + * Set the stacks to the level requested. + * Save the old value to restore when we exit. + */ + save_csp = cp->csp; + epg = &cp->csp[-level + 1]; + cp->csp = epg; + pg = epg->page; + pind = NUM_ENT(pg); + + nsave_csp = ncp->csp; + epg = &ncp->csp[-level + 1]; + ncp->csp = epg; + npg = epg->page; + indx = NUM_ENT(npg); + + /* + * The caller may have two stacks that include common ancestors, we + * check here for convenience. + */ + if (npg == pg) + goto done; + + if (TYPE(pg) == P_IBTREE) { + /* + * Check for overflow keys on both pages while we have + * them locked. + */ + if ((ret = + __bam_truncate_internal_overflow(dbc, pg, c_data)) != 0) + goto err; + if ((ret = + __bam_truncate_internal_overflow(dbc, npg, c_data)) != 0) + goto err; + } + + /* + * If we are about to move data off the left most page of an + * internal node we will need to update its parents, make sure there + * will be room for the new key on all the parents in the stack. + * If not, move less data. + */ + fip = NULL; + if (TYPE(pg) == P_IBTREE) { + /* See where we run out of space. */ + free = P_FREESPACE(dbp, pg); + /* + * The leftmost key of an internal page is not accurate. + * Go up the tree to find a non-leftmost parent. + */ + while (--epg >= ncp->sp && epg->indx == 0) + continue; + fip = bip = GET_BINTERNAL(dbp, epg->page, epg->indx); + epg = ncp->csp; + + for (indx = 0;;) { + size = BINTERNAL_PSIZE(bip->len); + if (size > free) + break; + free -= size; + if (++indx >= NUM_ENT(npg)) + break; + bip = GET_BINTERNAL(dbp, npg, indx); + } + + /* See if we are deleting the page and we are not left most. */ + if (indx == NUM_ENT(npg) && epg[-1].indx != 0) + goto fits; + + pfree = dbp->pgsize; + for (epg--; epg >= ncp->sp; epg--) + if ((free = P_FREESPACE(dbp, epg->page)) < pfree) { + bip = GET_BINTERNAL(dbp, epg->page, epg->indx); + /* Add back in the key we will be deleting. */ + free += BINTERNAL_PSIZE(bip->len); + if (free < pfree) + pfree = free; + if (epg->indx != 0) + break; + } + epg = ncp->csp; + + /* If we are at the end of the page we will delete it. */ + if (indx == NUM_ENT(npg)) + bip = + GET_BINTERNAL(dbp, epg[-1].page, epg[-1].indx + 1); + else + bip = GET_BINTERNAL(dbp, npg, indx); + + /* Back up until we have a key that fits. */ + while (indx != 0 && BINTERNAL_PSIZE(bip->len) > pfree) { + indx--; + bip = GET_BINTERNAL(dbp, npg, indx); + } + if (indx == 0) + goto done; + } + +fits: memset(&bi, 0, sizeof(bi)); + memset(&hdr, 0, sizeof(hdr)); + memset(&data, 0, sizeof(data)); + trecs = 0; + + /* + * Copy data between internal nodes till one is full + * or the other is empty. + */ + do { + if (dbc->dbtype == DB_BTREE) { + bip = GET_BINTERNAL(dbp, npg, 0); + size = fip == NULL ? + BINTERNAL_SIZE(bip->len) : + BINTERNAL_SIZE(fip->len); + if (P_FREESPACE(dbp, pg) < size + sizeof(db_indx_t)) + break; + + if (fip == NULL) { + data.size = bip->len; + data.data = bip->data; + } else { + data.size = fip->len; + data.data = fip->data; + } + bi.len = data.size; + B_TSET(bi.type, bip->type, 0); + bi.pgno = bip->pgno; + bi.nrecs = bip->nrecs; + hdr.data = &bi; + hdr.size = SSZA(BINTERNAL, data); + if (F_ISSET(cp, C_RECNUM) || F_ISSET(dbc, DBC_OPD)) + trecs += (int32_t)bip->nrecs; + } else { + rk = GET_RINTERNAL(dbp, npg, 0); + size = RINTERNAL_SIZE; + if (P_FREESPACE(dbp, pg) < size + sizeof(db_indx_t)) + break; + + hdr.data = rk; + hdr.size = size; + trecs += (int32_t)rk->nrecs; + } + if ((ret = __db_pitem(dbc, pg, pind, size, &hdr, &data)) != 0) + goto err; + pind++; + if (fip != NULL) { + /* reset size to be for the record being deleted. */ + size = BINTERNAL_SIZE(bip->len); + fip = NULL; + } + if ((ret = __db_ditem(ndbc, npg, 0, size)) != 0) + goto err; + *merged = 1; + } while (--indx != 0); + + if (c_data->compact_truncate != PGNO_INVALID && + PGNO(pg) > c_data->compact_truncate && cp->csp != cp->sp) { + if ((ret = __bam_truncate_page(dbc, &pg, 1)) != 0) + goto err; + } + + if (NUM_ENT(npg) != 0 && c_data->compact_truncate != PGNO_INVALID && + PGNO(npg) > c_data->compact_truncate && ncp->csp != ncp->sp) { + if ((ret = __bam_truncate_page(ndbc, &npg, 1)) != 0) + goto err; + } + + if (!*merged) + goto done; + + if ((ret = __memp_fset(dbmp, pg, DB_MPOOL_DIRTY)) != 0) + goto err; + if ((ret = __memp_fset(dbmp, npg, DB_MPOOL_DIRTY)) != 0) + goto err; + + if (trecs != 0) { + DB_ASSERT(cp->csp - cp->sp == ncp->csp - ncp->sp); + cp->csp--; + if ((ret = __bam_adjust(dbc, trecs)) != 0) + goto err; + + ncp->csp--; + if ((ret = __bam_adjust(ndbc, -trecs)) != 0) + goto err; + ncp->csp++; + } + cp->csp = save_csp; + + /* + * Either we emptied the page or we need to update its + * parent to reflect the first page we now point to. + * First get rid of the bottom of the stack, + * bam_dpages will clear the stack. We can drop + * the locks on those pages as we have not done + * anything to them. + */ + do { + if ((ret = __memp_fput(dbmp, nsave_csp->page, 0)) != 0) + goto err; + if ((ret = __LPUT(dbc, nsave_csp->lock)) != 0) + goto err; + nsave_csp--; + } while (nsave_csp != ncp->csp); + + if (NUM_ENT(npg) == 0) { + /* + * __bam_dpages may decide to collapse the tree + * so we need to free our other stack. The tree + * will change in hight and our stack will nolonger + * be valid. + */ + if (PGNO(ncp->sp->page) == ncp->root && + NUM_ENT(ncp->sp->page) == 2) { + if ((ret = __bam_stkrel(dbc, STK_CLRDBC)) != 0) + goto err; + level = LEVEL(ncp->sp->page); + ppgno = PGNO(ncp->csp[-1].page); + } else + level = 0; + + if (c_data->compact_truncate > PGNO(npg)) + c_data->compact_truncate--; + ret = __bam_dpages(ndbc, + 0, ndbc->dbtype == DB_RECNO ? 0 : 1); + c_data->compact_pages_free++; + if (ret == 0 && level != 0) { + if ((ret = __memp_fget(dbmp, &ncp->root, 0, &npg)) != 0) + goto err; + if (level == LEVEL(npg)) + level = 0; + if ((ret = __memp_fput(dbmp, npg, 0)) != 0) + goto err; + npg = NULL; + if (level != 0) { + c_data->compact_levels++; + c_data->compact_pages_free++; + if (c_data->compact_truncate > ppgno) + c_data->compact_truncate--; + if (c_data->compact_pages != 0) + c_data->compact_pages--; + } + } + } else + ret = __bam_pupdate(ndbc, npg); + return (ret); + +done: +err: cp->csp = save_csp; + ncp->csp = nsave_csp; + + return (ret); +} + +/* + * __bam_compact_dups -- try to compress off page dup trees. + * We may or may not have a write lock on this page. + */ +static int +__bam_compact_dups(dbc, pg, factor, have_lock, c_data, donep) + DBC *dbc; + PAGE *pg; + u_int32_t factor; + int have_lock; + DB_COMPACT *c_data; + int *donep; +{ + BTREE_CURSOR *cp; + BOVERFLOW *bo; + DB *dbp; + DBC *opd; + DBT start; + DB_MPOOLFILE *dbmp; + PAGE *dpg; + db_indx_t i; + int done, level, ret, span, t_ret; + + span = 0; + ret = 0; + opd = NULL; + + dbp = dbc->dbp; + dbmp = dbp->mpf; + cp = (BTREE_CURSOR *)dbc->internal; + + for (i = 0; i < NUM_ENT(pg); i++) { + bo = GET_BOVERFLOW(dbp, pg, i); + if (B_TYPE(bo->type) == B_KEYDATA) + continue; + c_data->compact_pages_examine++; + if (bo->pgno > c_data->compact_truncate) { + (*donep)++; + if (!have_lock) { + if ((ret = __db_lget(dbc, 0, PGNO(pg), + DB_LOCK_WRITE, 0, &cp->csp->lock)) != 0) + goto err; + have_lock = 1; + } + if ((ret = + __bam_truncate_root_page(dbc, pg, i, c_data)) != 0) + goto err; + /* Just in case it should move. Could it? */ + bo = GET_BOVERFLOW(dbp, pg, i); + } + + if (B_TYPE(bo->type) == B_OVERFLOW) { + if ((ret = __bam_truncate_overflow(dbc, bo->pgno, + have_lock ? PGNO_INVALID : PGNO(pg), c_data)) != 0) + goto err; + (*donep)++; + continue; + } + /* + * Take a peek at the root. If it's a leaf then + * there is no tree here, avoid all the trouble. + */ + if ((ret = __memp_fget(dbmp, &bo->pgno, 0, &dpg)) != 0) + goto err; + + level = dpg->level; + if ((ret = __memp_fput(dbmp, dpg, 0)) != 0) + goto err; + if (level == LEAFLEVEL) + continue; + if ((ret = __db_c_newopd(dbc, bo->pgno, NULL, &opd)) != 0) + return (ret); + if (!have_lock) { + if ((ret = __db_lget(dbc, 0, + PGNO(pg), DB_LOCK_WRITE, 0, &cp->csp->lock)) != 0) + goto err; + have_lock = 1; + } + (*donep)++; + memset(&start, 0, sizeof(start)); + do { + if ((ret = __bam_compact_int(opd, &start, + NULL, factor, &span, c_data, &done)) != 0) + break; + } while (!done); + + if (start.data != NULL) + __os_free(dbp->dbenv, start.data); + + if (ret != 0) + goto err; + + ret = __db_c_close(opd); + opd = NULL; + if (ret != 0) + goto err; + } + +err: if (opd != NULL && (t_ret = __db_c_close(opd)) != 0 && ret == 0) + ret = t_ret; + return (ret); +} + +/* + * __bam_truncate_page -- swap a page with a lower numbered page. + * The cusor has a stack which includes at least the + * immediate parent of this page. + */ +static int +__bam_truncate_page(dbc, pgp, update_parent) + DBC *dbc; + PAGE **pgp; + int update_parent; +{ + BTREE_CURSOR *cp; + DB *dbp; + DBT data, hdr, ind; + DB_LSN lsn; + EPG *epg; + PAGE *newpage; + db_pgno_t newpgno, *pgnop; + int ret; + + dbp = dbc->dbp; + + /* + * We want to free a page that lives in the part of the file that + * can be truncated, so we're going to move it onto a free page + * that is in the part of the file that need not be truncated. + * Since the freelist is ordered now, we can simply call __db_new + * which will grab the first element off the freelist; we know this + * is the lowest numbered free page. + */ + + if ((ret = __db_new(dbc, P_DONTEXTEND | TYPE(*pgp), &newpage)) != 0) + return (ret); + + /* + * If newpage is null then __db_new would have had to allocate + * a new page from the filesystem, so there is no reason + * to continue this action. + */ + if (newpage == NULL) + return (0); + + /* + * It is possible that a higher page is allocated if other threads + * are allocating at the same time, if so, just put it back. + */ + if (PGNO(newpage) > PGNO(*pgp)) { + /* Its unfortunate but you can't just free a new overflow. */ + if (TYPE(newpage) == P_OVERFLOW) + OV_LEN(newpage) = 0; + return (__db_free(dbc, newpage)); + } + + /* Log if necessary. */ + if (DBC_LOGGING(dbc)) { + hdr.data = *pgp; + hdr.size = P_OVERHEAD(dbp); + if (TYPE(*pgp) == P_OVERFLOW) { + data.data = (u_int8_t *)*pgp + P_OVERHEAD(dbp); + data.size = OV_LEN(*pgp); + ind.size = 0; + } else { + data.data = (u_int8_t *)*pgp + HOFFSET(*pgp); + data.size = dbp->pgsize - HOFFSET(*pgp); + ind.data = P_INP(dbp, *pgp); + ind.size = NUM_ENT(*pgp) * sizeof(db_indx_t); + } + if ((ret = __bam_merge_log(dbp, dbc->txn, + &LSN(newpage), 0, PGNO(newpage), &LSN(newpage), + PGNO(*pgp), &LSN(*pgp), &hdr, &data, &ind)) != 0) + goto err; + } else + LSN_NOT_LOGGED(LSN(newpage)); + + newpgno = PGNO(newpage); + lsn = LSN(newpage); + memcpy(newpage, *pgp, dbp->pgsize); + PGNO(newpage) = newpgno; + LSN(newpage) = lsn; + + /* Empty the old page. */ + if (TYPE(*pgp) == P_OVERFLOW) + OV_LEN(*pgp) = 0; + else { + HOFFSET(*pgp) = dbp->pgsize; + NUM_ENT(*pgp) = 0; + } + LSN(*pgp) = lsn; + + if ((ret = __memp_fset(dbp->mpf, newpage, DB_MPOOL_DIRTY)) != 0) + goto err; + + /* Update siblings. */ + switch (TYPE(newpage)) { + case P_OVERFLOW: + case P_LBTREE: + case P_LRECNO: + case P_LDUP: + if (NEXT_PGNO(newpage) == PGNO_INVALID && + PREV_PGNO(newpage) == PGNO_INVALID) + break; + if ((ret = __bam_relink(dbc, *pgp, PGNO(newpage))) != 0) + goto err; + break; + default: + break; + } + cp = (BTREE_CURSOR*)dbc->internal; + + /* + * Now, if we free this page, it will get truncated, when we free + * all the pages after it in the file. + */ + ret = __db_free(dbc, *pgp); + /* db_free always puts the page. */ + *pgp = newpage; + + if (ret != 0) + return (ret); + + if (!update_parent) + goto done; + + /* Update the parent. */ + epg = &cp->csp[-1]; + switch (TYPE(epg->page)) { + case P_IBTREE: + pgnop = &GET_BINTERNAL(dbp, epg->page, epg->indx)->pgno; + break; + case P_IRECNO: + pgnop = &GET_RINTERNAL(dbp, epg->page, epg->indx)->pgno; + break; + default: + pgnop = &GET_BOVERFLOW(dbp, epg->page, epg->indx)->pgno; + break; + } + if (DBC_LOGGING(dbc)) { + if ((ret = __bam_pgno_log(dbp, dbc->txn, &LSN(epg->page), + 0, PGNO(epg->page), &LSN(epg->page), (u_int32_t)epg->indx, + *pgnop, PGNO(newpage))) != 0) + return (ret); + } else + LSN_NOT_LOGGED(LSN(epg->page)); + + *pgnop = PGNO(newpage); + cp->csp->page = newpage; + if ((ret = __memp_fset(dbp->mpf, epg->page, DB_MPOOL_DIRTY)) != 0) + return (ret); + +done: return (0); + +err: (void)__memp_fput(dbp->mpf, newpage, 0); + return (ret); +} + +/* + * __bam_truncate_overflow -- find overflow pages to truncate. + * Walk the pages of an overflow chain and swap out + * high numbered pages. We are passed the first page + * but only deal with the second and subsequent pages. + */ + +static int +__bam_truncate_overflow(dbc, pgno, pg_lock, c_data) + DBC *dbc; + db_pgno_t pgno; + db_pgno_t pg_lock; + DB_COMPACT *c_data; +{ + DB *dbp; + DB_LOCK lock; + PAGE *page; + int ret, t_ret; + + dbp = dbc->dbp; + page = NULL; + LOCK_INIT(lock); + + if ((ret = __memp_fget(dbp->mpf, &pgno, 0, &page)) != 0) + return (ret); + + while ((pgno = NEXT_PGNO(page)) != PGNO_INVALID) { + if ((ret = __memp_fput(dbp->mpf, page, 0)) != 0) + return (ret); + if ((ret = __memp_fget(dbp->mpf, &pgno, 0, &page)) != 0) + return (ret); + if (pgno <= c_data->compact_truncate) + continue; + if (pg_lock != PGNO_INVALID) { + if ((ret = __db_lget(dbc, + 0, pg_lock, DB_LOCK_WRITE, 0, &lock)) != 0) + break; + pg_lock = PGNO_INVALID; + } + if ((ret = __bam_truncate_page(dbc, &page, 0)) != 0) + break; + } + + if (page != NULL && + (t_ret = __memp_fput(dbp->mpf, page, 0)) != 0 && ret == 0) + ret = t_ret; + if ((t_ret = __LPUT(dbc, lock)) != 0 && ret == 0) + ret = t_ret; + return (ret); +} + +/* + * __bam_truncate_root_page -- swap a page which is + * the root of an off page dup tree or the head of an overflow. + * The page is reference by the pg/indx passed in. + */ +static int +__bam_truncate_root_page(dbc, pg, indx, c_data) + DBC *dbc; + PAGE *pg; + u_int32_t indx; + DB_COMPACT *c_data; +{ + BINTERNAL *bi; + BOVERFLOW *bo; + DB *dbp; + DBT orig; + PAGE *page; + db_pgno_t newpgno, *pgnop; + int ret, t_ret; + + COMPQUIET(c_data, NULL); + COMPQUIET(bo, NULL); + COMPQUIET(newpgno, PGNO_INVALID); + dbp = dbc->dbp; + page = NULL; + if (TYPE(pg) == P_IBTREE) { + bi = GET_BINTERNAL(dbp, pg, indx); + if (B_TYPE(bi->type) == B_OVERFLOW) { + bo = (BOVERFLOW *)(bi->data); + pgnop = &bo->pgno; + } else + pgnop = &bi->pgno; + } else { + bo = GET_BOVERFLOW(dbp, pg, indx); + pgnop = &bo->pgno; + } + + if ((ret = __memp_fget(dbp->mpf, pgnop, 0, &page)) != 0) + goto err; + + /* + * If this is a multiply reference overflow key, then we will just + * copy it and decrement the reference count. This is part of a + * fix to get rid of multiple references. + */ + if (TYPE(page) == P_OVERFLOW && OV_REF(page) > 1) { + if ((ret = __db_ovref(dbc, bo->pgno, -1)) != 0) + goto err; + memset(&orig, 0, sizeof(orig)); + if ((ret = __db_goff(dbp, &orig, + bo->tlen, bo->pgno, &orig.data, &orig.size)) == 0) + ret = __db_poff(dbc, &orig, &newpgno); + if (orig.data != NULL) + __os_free(dbp->dbenv, orig.data); + if (ret != 0) + goto err; + } else { + if ((ret = __bam_truncate_page(dbc, &page, 0)) != 0) + goto err; + newpgno = PGNO(page); + /* If we could not allocate from the free list, give up.*/ + if (newpgno == *pgnop) + goto err; + } + + /* Update the reference. */ + if (DBC_LOGGING(dbc)) { + if ((ret = __bam_pgno_log(dbp, + dbc->txn, &LSN(pg), 0, PGNO(pg), + &LSN(pg), (u_int32_t)indx, *pgnop, newpgno)) != 0) + goto err; + } else + LSN_NOT_LOGGED(LSN(pg)); + + *pgnop = newpgno; + if ((ret = __memp_fset(dbp->mpf, pg, DB_MPOOL_DIRTY)) != 0) + goto err; + +err: if (page != NULL && (t_ret = + __memp_fput(dbp->mpf, page, DB_MPOOL_DIRTY)) != 0 && ret == 0) + ret = t_ret; + return (ret); +} + +/* + * -- bam_truncate_internal_overflow -- find overflow keys + * on internal pages and if they have high page + * numbers swap them with lower pages and truncate them. + * Note that if there are overflow keys in the internal + * nodes they will get copied adding pages to the database. + */ +static int +__bam_truncate_internal_overflow(dbc, page, c_data) + DBC *dbc; + PAGE *page; + DB_COMPACT *c_data; +{ + BINTERNAL *bi; + BOVERFLOW *bo; + db_indx_t indx; + int ret; + + COMPQUIET(bo, NULL); + ret = 0; + for (indx = 0; indx < NUM_ENT(page); indx++) { + bi = GET_BINTERNAL(dbc->dbp, page, indx); + if (B_TYPE(bi->type) != B_OVERFLOW) + continue; + bo = (BOVERFLOW *)(bi->data); + if (bo->pgno > c_data->compact_truncate && (ret = + __bam_truncate_root_page(dbc, page, indx, c_data)) != 0) + break; + if ((ret = __bam_truncate_overflow( + dbc, bo->pgno, PGNO_INVALID, c_data)) != 0) + break; + } + return (ret); +} + +#ifdef HAVE_FTRUNCATE +/* + * __bam_savekey -- save the key from an internal page. + * We need to save information so that we can + * fetch then next internal node of the tree. This means + * we need the btree key on this current page, or the + * next record number. + */ +static int +__bam_savekey(dbc, next, start) + DBC *dbc; + int next; + DBT *start; +{ + BINTERNAL *bi; + BOVERFLOW *bo; + BTREE_CURSOR *cp; + DB *dbp; + DB_ENV *dbenv; + PAGE *pg; + RINTERNAL *ri; + db_indx_t indx, top; + + dbp = dbc->dbp; + dbenv = dbp->dbenv; + cp = (BTREE_CURSOR *)dbc->internal; + pg = cp->csp->page; + + if (dbc->dbtype == DB_RECNO) { + if (next) + for (indx = 0, top = NUM_ENT(pg); indx != top; indx++) { + ri = GET_RINTERNAL(dbp, pg, indx); + cp->recno += ri->nrecs; + } + return (__db_retcopy(dbenv, start, &cp->recno, + sizeof(cp->recno), &start->data, &start->ulen)); + + } + bi = GET_BINTERNAL(dbp, pg, NUM_ENT(pg) - 1); + if (B_TYPE(bi->type) == B_OVERFLOW) { + bo = (BOVERFLOW *)(bi->data); + return (__db_goff(dbp, start, + bo->tlen, bo->pgno, &start->data, &start->ulen)); + } + return (__db_retcopy(dbenv, + start, bi->data, bi->len, &start->data, &start->ulen)); +} + +/* + * bam_truncate_internal -- + * Find high numbered pages in the internal nodes of a tree and + * swap them. + */ +static int +__bam_truncate_internal(dbp, txn, c_data) + DB *dbp; + DB_TXN *txn; + DB_COMPACT *c_data; +{ + BTREE_CURSOR *cp; + DBC *dbc; + DBT start; + PAGE *pg; + db_pgno_t pgno; + u_int32_t sflag; + int level, local_txn, ret, t_ret; + + dbc = NULL; + memset(&start, 0, sizeof(start)); + + if (IS_DB_AUTO_COMMIT(dbp, txn)) { + local_txn = 1; + txn = NULL; + } else + local_txn = 0; + + level = LEAFLEVEL + 1; + sflag = CS_READ | CS_GETRECNO; + +new_txn: + if (local_txn && (ret = __txn_begin(dbp->dbenv, NULL, &txn, 0)) != 0) + goto err; + + if ((ret = __db_cursor(dbp, txn, &dbc, 0)) != 0) + goto err; + cp = (BTREE_CURSOR *)dbc->internal; + + pgno = PGNO_INVALID; + do { + if ((ret = __bam_csearch(dbc, &start, sflag, level)) != 0) { + /* No more at this level, go up one. */ + if (ret == DB_NOTFOUND) { + level++; + if (start.data != NULL) + __os_free(dbp->dbenv, start.data); + memset(&start, 0, sizeof(start)); + sflag = CS_READ | CS_GETRECNO; + continue; + } + goto err; + } + c_data->compact_pages_examine++; + + pg = cp->csp->page; + pgno = PGNO(pg); + + sflag = CS_NEXT | CS_GETRECNO; + /* Grab info about the page and drop the stack. */ + if (pgno != cp->root && (ret = __bam_savekey(dbc, + pgno <= c_data->compact_truncate, &start)) != 0) + goto err; + + if ((ret = __bam_stkrel(dbc, STK_NOLOCK)) != 0) + goto err; + if (pgno == cp->root) + break; + + if (pgno <= c_data->compact_truncate) + continue; + + /* Reget the page with a write lock, and its parent too. */ + if ((ret = __bam_csearch(dbc, + &start, CS_PARENT | CS_GETRECNO, level)) != 0) + goto err; + pg = cp->csp->page; + pgno = PGNO(pg); + + if (pgno > c_data->compact_truncate) { + if ((ret = __bam_truncate_page(dbc, &pg, 1)) != 0) + goto err; + } + if ((ret = __bam_stkrel(dbc, + pgno > c_data->compact_truncate ? 0 : STK_NOLOCK)) != 0) + goto err; + + /* We are locking subtrees, so drop the write locks asap. */ + if (local_txn && pgno > c_data->compact_truncate) + break; + } while (pgno != cp->root); + + if ((ret = __db_c_close(dbc)) != 0) + goto err; + dbc = NULL; + if (local_txn) { + if ((ret = __txn_commit(txn, DB_TXN_NOSYNC)) != 0) + goto err; + txn = NULL; + } + if (pgno != ((BTREE *)dbp->bt_internal)->bt_root) + goto new_txn; + +err: if (dbc != NULL && (t_ret = __bam_stkrel(dbc, 0)) != 0 && ret == 0) + ret = t_ret; + if (dbc != NULL && (t_ret = __db_c_close(dbc)) != 0 && ret == 0) + ret = t_ret; + if (local_txn && + txn != NULL && (t_ret = __txn_abort(txn)) != 0 && ret == 0) + ret = t_ret; + if (start.data != NULL) + __os_free(dbp->dbenv, start.data); + return (ret); +} + +static int +__bam_setup_freelist(dbp, list, nelems) + DB *dbp; + struct pglist *list; + u_int32_t nelems; +{ + DB_MPOOLFILE *mpf; + db_pgno_t *plist; + int ret; + + mpf = dbp->mpf; + + if ((ret = __memp_alloc_freelist(mpf, nelems, &plist)) != 0) + return (ret); + + while (nelems-- != 0) + *plist++ = list++->pgno; + + return (0); +} + +static int +__bam_free_freelist(dbp, txn) + DB *dbp; + DB_TXN *txn; +{ + DBC *dbc; + DB_LOCK lock; + int ret, t_ret; + + LOCK_INIT(lock); + ret = 0; + + /* + * If we are not in a transaction then we need to get + * a lock on the meta page, otherwise we should already + * have the lock. + */ + + dbc = NULL; + if (IS_DB_AUTO_COMMIT(dbp, txn)) { + /* Get a cursor so we can call __db_lget. */ + if ((ret = __db_cursor(dbp, NULL, &dbc, 0)) != 0) + return (ret); + + if ((ret = __db_lget(dbc, + 0, PGNO_BASE_MD, DB_LOCK_WRITE, 0, &lock)) != 0) + goto err; + } + + __memp_free_freelist(dbp->mpf); + +err: if ((t_ret = __LPUT(dbc, lock)) != 0 && ret == 0) + ret = t_ret; + + if (dbc != NULL && (t_ret = __db_c_close(dbc)) != 0 && ret == 0) + ret = t_ret; + + return (ret); +} +#endif diff --git a/storage/bdb/btree/bt_compare.c b/storage/bdb/btree/bt_compare.c index 81ffe098b10..126788f3100 100644 --- a/storage/bdb/btree/bt_compare.c +++ b/storage/bdb/btree/bt_compare.c @@ -1,7 +1,7 @@ /*- * See the file LICENSE for redistribution information. * - * Copyright (c) 1996-2004 + * Copyright (c) 1996-2005 * Sleepycat Software. All rights reserved. */ /* @@ -39,7 +39,7 @@ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * - * $Id: bt_compare.c,v 11.20 2004/02/21 15:54:44 bostic Exp $ + * $Id: bt_compare.c,v 12.1 2005/06/16 20:20:13 bostic Exp $ */ #include "db_config.h" diff --git a/storage/bdb/btree/bt_conv.c b/storage/bdb/btree/bt_conv.c index 39a9d825388..74bf823088a 100644 --- a/storage/bdb/btree/bt_conv.c +++ b/storage/bdb/btree/bt_conv.c @@ -1,10 +1,10 @@ /*- * See the file LICENSE for redistribution information. * - * Copyright (c) 1996-2004 + * Copyright (c) 1996-2005 * Sleepycat Software. All rights reserved. * - * $Id: bt_conv.c,v 11.15 2004/01/28 03:35:48 bostic Exp $ + * $Id: bt_conv.c,v 12.2 2005/06/16 20:20:13 bostic Exp $ */ #include "db_config.h" @@ -88,7 +88,7 @@ __bam_mswap(pg) p = (u_int8_t *)pg + sizeof(DBMETA); - SWAP32(p); /* maxkey */ + p += sizeof(u_int32_t); /* unused */ SWAP32(p); /* minkey */ SWAP32(p); /* re_len */ SWAP32(p); /* re_pad */ diff --git a/storage/bdb/btree/bt_curadj.c b/storage/bdb/btree/bt_curadj.c index 477f00b8ff8..e2128666cec 100644 --- a/storage/bdb/btree/bt_curadj.c +++ b/storage/bdb/btree/bt_curadj.c @@ -1,10 +1,10 @@ /*- * See the file LICENSE for redistribution information. * - * Copyright (c) 1996-2004 + * Copyright (c) 1996-2005 * Sleepycat Software. All rights reserved. * - * $Id: bt_curadj.c,v 11.37 2004/03/13 14:11:33 bostic Exp $ + * $Id: bt_curadj.c,v 12.3 2005/07/20 16:50:45 bostic Exp $ */ #include "db_config.h" @@ -35,14 +35,14 @@ static int __bam_opd_cursor __P((DB *, DBC *, db_pgno_t, u_int32_t, u_int32_t)); * Update the cursors when items are deleted and when already deleted * items are overwritten. Return the number of relevant cursors found. * - * PUBLIC: int __bam_ca_delete __P((DB *, db_pgno_t, u_int32_t, int)); + * PUBLIC: int __bam_ca_delete __P((DB *, db_pgno_t, u_int32_t, int, int *)); */ int -__bam_ca_delete(dbp, pgno, indx, delete) +__bam_ca_delete(dbp, pgno, indx, delete, countp) DB *dbp; db_pgno_t pgno; u_int32_t indx; - int delete; + int delete, *countp; { BTREE_CURSOR *cp; DB *ldbp; @@ -63,11 +63,11 @@ __bam_ca_delete(dbp, pgno, indx, delete) * Each cursor is single-threaded, so we only need to lock the * list of DBs and then the list of cursors in each DB. */ - MUTEX_THREAD_LOCK(dbenv, dbenv->dblist_mutexp); + MUTEX_LOCK(dbenv, dbenv->mtx_dblist); for (count = 0, ldbp = __dblist_get(dbenv, dbp->adj_fileid); ldbp != NULL && ldbp->adj_fileid == dbp->adj_fileid; ldbp = LIST_NEXT(ldbp, dblistlinks)) { - MUTEX_THREAD_LOCK(dbenv, dbp->mutexp); + MUTEX_LOCK(dbenv, dbp->mutex); for (dbc = TAILQ_FIRST(&ldbp->active_queue); dbc != NULL; dbc = TAILQ_NEXT(dbc, links)) { cp = (BTREE_CURSOR *)dbc->internal; @@ -92,23 +92,26 @@ __bam_ca_delete(dbp, pgno, indx, delete) ++count; } } - MUTEX_THREAD_UNLOCK(dbenv, dbp->mutexp); + MUTEX_UNLOCK(dbenv, dbp->mutex); } - MUTEX_THREAD_UNLOCK(dbenv, dbenv->dblist_mutexp); + MUTEX_UNLOCK(dbenv, dbenv->mtx_dblist); - return (count); + if (countp != NULL) + *countp = count; + return (0); } /* * __ram_ca_delete -- - * Return the number of relevant cursors. + * Return if any relevant cursors found. * - * PUBLIC: int __ram_ca_delete __P((DB *, db_pgno_t)); + * PUBLIC: int __ram_ca_delete __P((DB *, db_pgno_t, int *)); */ int -__ram_ca_delete(dbp, root_pgno) +__ram_ca_delete(dbp, root_pgno, foundp) DB *dbp; db_pgno_t root_pgno; + int *foundp; { DB *ldbp; DBC *dbc; @@ -121,19 +124,21 @@ __ram_ca_delete(dbp, root_pgno) /* * Review the cursors. See the comment in __bam_ca_delete(). */ - MUTEX_THREAD_LOCK(dbenv, dbenv->dblist_mutexp); + MUTEX_LOCK(dbenv, dbenv->mtx_dblist); for (ldbp = __dblist_get(dbenv, dbp->adj_fileid); found == 0 && ldbp != NULL && ldbp->adj_fileid == dbp->adj_fileid; ldbp = LIST_NEXT(ldbp, dblistlinks)) { - MUTEX_THREAD_LOCK(dbenv, dbp->mutexp); + MUTEX_LOCK(dbenv, dbp->mutex); for (dbc = TAILQ_FIRST(&ldbp->active_queue); found == 0 && dbc != NULL; dbc = TAILQ_NEXT(dbc, links)) if (dbc->internal->root == root_pgno) found = 1; - MUTEX_THREAD_UNLOCK(dbenv, dbp->mutexp); + MUTEX_UNLOCK(dbenv, dbp->mutex); } - MUTEX_THREAD_UNLOCK(dbenv, dbenv->dblist_mutexp); - return (found); + MUTEX_UNLOCK(dbenv, dbenv->mtx_dblist); + + *foundp = found; + return (0); } /* @@ -166,11 +171,11 @@ __bam_ca_di(my_dbc, pgno, indx, adjust) * Adjust the cursors. See the comment in __bam_ca_delete(). */ found = 0; - MUTEX_THREAD_LOCK(dbenv, dbenv->dblist_mutexp); + MUTEX_LOCK(dbenv, dbenv->mtx_dblist); for (ldbp = __dblist_get(dbenv, dbp->adj_fileid); ldbp != NULL && ldbp->adj_fileid == dbp->adj_fileid; ldbp = LIST_NEXT(ldbp, dblistlinks)) { - MUTEX_THREAD_LOCK(dbenv, dbp->mutexp); + MUTEX_LOCK(dbenv, dbp->mutex); for (dbc = TAILQ_FIRST(&ldbp->active_queue); dbc != NULL; dbc = TAILQ_NEXT(dbc, links)) { if (dbc->dbtype == DB_RECNO) @@ -188,9 +193,9 @@ __bam_ca_di(my_dbc, pgno, indx, adjust) found = 1; } } - MUTEX_THREAD_UNLOCK(dbenv, dbp->mutexp); + MUTEX_UNLOCK(dbenv, dbp->mutex); } - MUTEX_THREAD_UNLOCK(dbenv, dbenv->dblist_mutexp); + MUTEX_UNLOCK(dbenv, dbenv->mtx_dblist); if (found != 0 && DBC_LOGGING(my_dbc)) { if ((ret = __bam_curadj_log(dbp, my_dbc->txn, &lsn, 0, @@ -289,11 +294,11 @@ __bam_ca_dup(my_dbc, first, fpgno, fi, tpgno, ti) * Adjust the cursors. See the comment in __bam_ca_delete(). */ found = 0; - MUTEX_THREAD_LOCK(dbenv, dbenv->dblist_mutexp); + MUTEX_LOCK(dbenv, dbenv->mtx_dblist); for (ldbp = __dblist_get(dbenv, dbp->adj_fileid); ldbp != NULL && ldbp->adj_fileid == dbp->adj_fileid; ldbp = LIST_NEXT(ldbp, dblistlinks)) { -loop: MUTEX_THREAD_LOCK(dbenv, dbp->mutexp); +loop: MUTEX_LOCK(dbenv, dbp->mutex); for (dbc = TAILQ_FIRST(&ldbp->active_queue); dbc != NULL; dbc = TAILQ_NEXT(dbc, links)) { /* Find cursors pointing to this record. */ @@ -308,7 +313,7 @@ loop: MUTEX_THREAD_LOCK(dbenv, dbp->mutexp); if (orig_cp->opd != NULL) continue; - MUTEX_THREAD_UNLOCK(dbenv, dbp->mutexp); + MUTEX_UNLOCK(dbenv, dbp->mutex); /* [#8032] DB_ASSERT(!STD_LOCKING(dbc) || orig_cp->lock_mode != DB_LOCK_NG); @@ -321,9 +326,9 @@ loop: MUTEX_THREAD_LOCK(dbenv, dbp->mutexp); /* We released the mutex to get a cursor, start over. */ goto loop; } - MUTEX_THREAD_UNLOCK(dbenv, dbp->mutexp); + MUTEX_UNLOCK(dbenv, dbp->mutex); } - MUTEX_THREAD_UNLOCK(dbenv, dbenv->dblist_mutexp); + MUTEX_UNLOCK(dbenv, dbenv->mtx_dblist); if (found != 0 && DBC_LOGGING(my_dbc)) { if ((ret = __bam_curadj_log(dbp, my_dbc->txn, @@ -359,11 +364,11 @@ __bam_ca_undodup(dbp, first, fpgno, fi, ti) /* * Adjust the cursors. See the comment in __bam_ca_delete(). */ - MUTEX_THREAD_LOCK(dbenv, dbenv->dblist_mutexp); + MUTEX_LOCK(dbenv, dbenv->mtx_dblist); for (ldbp = __dblist_get(dbenv, dbp->adj_fileid); ldbp != NULL && ldbp->adj_fileid == dbp->adj_fileid; ldbp = LIST_NEXT(ldbp, dblistlinks)) { -loop: MUTEX_THREAD_LOCK(dbenv, dbp->mutexp); +loop: MUTEX_LOCK(dbenv, dbp->mutex); for (dbc = TAILQ_FIRST(&ldbp->active_queue); dbc != NULL; dbc = TAILQ_NEXT(dbc, links)) { orig_cp = (BTREE_CURSOR *)dbc->internal; @@ -377,11 +382,10 @@ loop: MUTEX_THREAD_LOCK(dbenv, dbp->mutexp); */ if (orig_cp->pgno != fpgno || orig_cp->indx != first || - orig_cp->opd == NULL || - ((BTREE_CURSOR *)orig_cp->opd->internal)->indx - != ti) + orig_cp->opd == NULL || ((BTREE_CURSOR *) + orig_cp->opd->internal)->indx != ti) continue; - MUTEX_THREAD_UNLOCK(dbenv, dbp->mutexp); + MUTEX_UNLOCK(dbenv, dbp->mutex); if ((ret = __db_c_close(orig_cp->opd)) != 0) return (ret); orig_cp->opd = NULL; @@ -392,9 +396,9 @@ loop: MUTEX_THREAD_LOCK(dbenv, dbp->mutexp); */ goto loop; } - MUTEX_THREAD_UNLOCK(dbenv, dbp->mutexp); + MUTEX_UNLOCK(dbenv, dbp->mutex); } - MUTEX_THREAD_UNLOCK(dbenv, dbenv->dblist_mutexp); + MUTEX_UNLOCK(dbenv, dbenv->mtx_dblist); return (0); } @@ -425,11 +429,11 @@ __bam_ca_rsplit(my_dbc, fpgno, tpgno) * Adjust the cursors. See the comment in __bam_ca_delete(). */ found = 0; - MUTEX_THREAD_LOCK(dbenv, dbenv->dblist_mutexp); + MUTEX_LOCK(dbenv, dbenv->mtx_dblist); for (ldbp = __dblist_get(dbenv, dbp->adj_fileid); ldbp != NULL && ldbp->adj_fileid == dbp->adj_fileid; ldbp = LIST_NEXT(ldbp, dblistlinks)) { - MUTEX_THREAD_LOCK(dbenv, dbp->mutexp); + MUTEX_LOCK(dbenv, dbp->mutex); for (dbc = TAILQ_FIRST(&ldbp->active_queue); dbc != NULL; dbc = TAILQ_NEXT(dbc, links)) { if (dbc->dbtype == DB_RECNO) @@ -444,9 +448,9 @@ __bam_ca_rsplit(my_dbc, fpgno, tpgno) found = 1; } } - MUTEX_THREAD_UNLOCK(dbenv, dbp->mutexp); + MUTEX_UNLOCK(dbenv, dbp->mutex); } - MUTEX_THREAD_UNLOCK(dbenv, dbenv->dblist_mutexp); + MUTEX_UNLOCK(dbenv, dbenv->mtx_dblist); if (found != 0 && DBC_LOGGING(my_dbc)) { if ((ret = __bam_curadj_log(dbp, my_dbc->txn, @@ -493,11 +497,11 @@ __bam_ca_split(my_dbc, ppgno, lpgno, rpgno, split_indx, cleft) * records split to the left page. */ found = 0; - MUTEX_THREAD_LOCK(dbenv, dbenv->dblist_mutexp); + MUTEX_LOCK(dbenv, dbenv->mtx_dblist); for (ldbp = __dblist_get(dbenv, dbp->adj_fileid); ldbp != NULL && ldbp->adj_fileid == dbp->adj_fileid; ldbp = LIST_NEXT(ldbp, dblistlinks)) { - MUTEX_THREAD_LOCK(dbenv, dbp->mutexp); + MUTEX_LOCK(dbenv, dbp->mutex); for (dbc = TAILQ_FIRST(&ldbp->active_queue); dbc != NULL; dbc = TAILQ_NEXT(dbc, links)) { if (dbc->dbtype == DB_RECNO) @@ -519,9 +523,9 @@ __bam_ca_split(my_dbc, ppgno, lpgno, rpgno, split_indx, cleft) } } } - MUTEX_THREAD_UNLOCK(dbenv, dbp->mutexp); + MUTEX_UNLOCK(dbenv, dbp->mutex); } - MUTEX_THREAD_UNLOCK(dbenv, dbenv->dblist_mutexp); + MUTEX_UNLOCK(dbenv, dbenv->mtx_dblist); if (found != 0 && DBC_LOGGING(my_dbc)) { if ((ret = __bam_curadj_log(dbp, @@ -540,10 +544,10 @@ __bam_ca_split(my_dbc, ppgno, lpgno, rpgno, split_indx, cleft) * left and the right pages. * Called only during undo processing. * - * PUBLIC: void __bam_ca_undosplit __P((DB *, + * PUBLIC: int __bam_ca_undosplit __P((DB *, * PUBLIC: db_pgno_t, db_pgno_t, db_pgno_t, u_int32_t)); */ -void +int __bam_ca_undosplit(dbp, frompgno, topgno, lpgno, split_indx) DB *dbp; db_pgno_t frompgno, topgno, lpgno; @@ -562,11 +566,11 @@ __bam_ca_undosplit(dbp, frompgno, topgno, lpgno, split_indx) * When backing out a split, we move the cursor back * to the original offset and bump it by the split_indx. */ - MUTEX_THREAD_LOCK(dbenv, dbenv->dblist_mutexp); + MUTEX_LOCK(dbenv, dbenv->mtx_dblist); for (ldbp = __dblist_get(dbenv, dbp->adj_fileid); ldbp != NULL && ldbp->adj_fileid == dbp->adj_fileid; ldbp = LIST_NEXT(ldbp, dblistlinks)) { - MUTEX_THREAD_LOCK(dbenv, dbp->mutexp); + MUTEX_LOCK(dbenv, dbp->mutex); for (dbc = TAILQ_FIRST(&ldbp->active_queue); dbc != NULL; dbc = TAILQ_NEXT(dbc, links)) { if (dbc->dbtype == DB_RECNO) @@ -578,7 +582,9 @@ __bam_ca_undosplit(dbp, frompgno, topgno, lpgno, split_indx) } else if (cp->pgno == lpgno) cp->pgno = frompgno; } - MUTEX_THREAD_UNLOCK(dbenv, dbp->mutexp); + MUTEX_UNLOCK(dbenv, dbp->mutex); } - MUTEX_THREAD_UNLOCK(dbenv, dbenv->dblist_mutexp); + MUTEX_UNLOCK(dbenv, dbenv->mtx_dblist); + + return (0); } diff --git a/storage/bdb/btree/bt_cursor.c b/storage/bdb/btree/bt_cursor.c index 82d6cc43556..808dd7aa873 100644 --- a/storage/bdb/btree/bt_cursor.c +++ b/storage/bdb/btree/bt_cursor.c @@ -1,10 +1,10 @@ /*- * See the file LICENSE for redistribution information. * - * Copyright (c) 1996-2004 + * Copyright (c) 1996-2005 * Sleepycat Software. All rights reserved. * - * $Id: bt_cursor.c,v 11.190 2004/09/22 21:46:32 ubell Exp $ + * $Id: bt_cursor.c,v 12.7 2005/08/08 14:27:59 bostic Exp $ */ #include "db_config.h" @@ -26,10 +26,8 @@ static int __bam_bulk __P((DBC *, DBT *, u_int32_t)); static int __bam_c_close __P((DBC *, db_pgno_t, int *)); static int __bam_c_del __P((DBC *)); static int __bam_c_destroy __P((DBC *)); -static int __bam_c_first __P((DBC *)); static int __bam_c_get __P((DBC *, DBT *, DBT *, u_int32_t, db_pgno_t *)); static int __bam_c_getstack __P((DBC *)); -static int __bam_c_last __P((DBC *)); static int __bam_c_next __P((DBC *, int, int)); static int __bam_c_physdel __P((DBC *)); static int __bam_c_prev __P((DBC *)); @@ -68,21 +66,6 @@ static int __bam_isopd __P((DBC *, db_pgno_t *)); ret = __memp_fget(__mpf, &(fpgno), 0, &(pagep)); \ } while (0) -#undef ACQUIRE_COUPLE -#define ACQUIRE_COUPLE(dbc, mode, lpgno, lock, fpgno, pagep, ret) do { \ - DB_MPOOLFILE *__mpf = (dbc)->dbp->mpf; \ - if ((pagep) != NULL) { \ - ret = __memp_fput(__mpf, pagep, 0); \ - pagep = NULL; \ - } else \ - ret = 0; \ - if ((ret) == 0 && STD_LOCKING(dbc)) \ - ret = __db_lget(dbc, \ - LCK_COUPLE_ALWAYS, lpgno, mode, 0, &(lock)); \ - if ((ret) == 0) \ - ret = __memp_fget(__mpf, &(fpgno), 0, &(pagep)); \ -} while (0) - /* Acquire a new page/lock for a cursor. */ #undef ACQUIRE_CUR #define ACQUIRE_CUR(dbc, mode, p, ret) do { \ @@ -96,23 +79,6 @@ static int __bam_isopd __P((DBC *, db_pgno_t *)); } \ } while (0) -/* - * Acquire a new page/lock for a cursor and release the previous. - * This is typically used when descending a tree and we do not - * want to hold the interior nodes locked. - */ -#undef ACQUIRE_CUR_COUPLE -#define ACQUIRE_CUR_COUPLE(dbc, mode, p, ret) do { \ - BTREE_CURSOR *__cp = (BTREE_CURSOR *)(dbc)->internal; \ - if (p != __cp->pgno) \ - __cp->pgno = PGNO_INVALID; \ - ACQUIRE_COUPLE(dbc, mode, p, __cp->lock, p, __cp->page, ret); \ - if ((ret) == 0) { \ - __cp->pgno = p; \ - __cp->lock_mode = (mode); \ - } \ -} while (0) - /* * Acquire a write lock if we don't already have one. * @@ -196,11 +162,11 @@ __bam_c_init(dbc, dbtype) /* Allocate/initialize the internal structure. */ if (dbc->internal == NULL && (ret = - __os_malloc(dbenv, sizeof(BTREE_CURSOR), &dbc->internal)) != 0) + __os_calloc(dbenv, 1, sizeof(BTREE_CURSOR), &dbc->internal)) != 0) return (ret); /* Initialize methods. */ - dbc->c_close = __db_c_close; + dbc->c_close = __db_c_close_pp; dbc->c_count = __db_c_count_pp; dbc->c_del = __db_c_del_pp; dbc->c_dup = __db_c_dup_pp; @@ -257,8 +223,11 @@ __bam_c_refresh(dbc) LOCK_INIT(cp->lock); cp->lock_mode = DB_LOCK_NG; - cp->sp = cp->csp = cp->stack; - cp->esp = cp->stack + sizeof(cp->stack) / sizeof(cp->stack[0]); + if (cp->sp == NULL) { + cp->sp = cp->stack; + cp->esp = cp->stack + sizeof(cp->stack) / sizeof(cp->stack[0]); + } + BT_STK_CLR(cp); /* * The btree leaf page data structures require that two key/data pairs @@ -308,7 +277,7 @@ __bam_c_close(dbc, root_pgno, rmroot) DBC *dbc_opd, *dbc_c; DB_MPOOLFILE *mpf; PAGE *h; - int cdb_lock, ret; + int cdb_lock, count, ret; dbp = dbc->dbp; mpf = dbp->mpf; @@ -378,22 +347,28 @@ __bam_c_close(dbc, root_pgno, rmroot) dbc_c = dbc; switch (dbc->dbtype) { case DB_BTREE: /* Case #1, #3. */ - if (__bam_ca_delete(dbp, cp->pgno, cp->indx, 1) == 0) + if ((ret = __bam_ca_delete( + dbp, cp->pgno, cp->indx, 1, &count)) != 0) + goto err; + if (count == 0) goto lock; goto done; case DB_RECNO: if (!F_ISSET(dbc, DBC_OPD)) /* Case #1. */ goto done; /* Case #3. */ - if (__ram_ca_delete(dbp, cp->root) == 0) + if ((ret = __ram_ca_delete(dbp, cp->root, &count)) != 0) + goto err; + if (count == 0) goto lock; goto done; case DB_HASH: case DB_QUEUE: case DB_UNKNOWN: default: - return (__db_unknown_type(dbp->dbenv, - "__bam_c_close", dbc->dbtype)); + ret = __db_unknown_type(dbp->dbenv, + "__bam_c_close", dbc->dbtype); + goto err; } } @@ -414,20 +389,26 @@ __bam_c_close(dbc, root_pgno, rmroot) dbc_c = dbc_opd; switch (dbc_opd->dbtype) { case DB_BTREE: - if (__bam_ca_delete( - dbp, cp_opd->pgno, cp_opd->indx, 1) == 0) + if ((ret = __bam_ca_delete( + dbp, cp_opd->pgno, cp_opd->indx, 1, &count)) != 0) + goto err; + if (count == 0) goto lock; goto done; case DB_RECNO: - if (__ram_ca_delete(dbp, cp_opd->root) == 0) + if ((ret = + __ram_ca_delete(dbp, cp_opd->root, &count)) != 0) + goto err; + if (count == 0) goto lock; goto done; case DB_HASH: case DB_QUEUE: case DB_UNKNOWN: default: - return (__db_unknown_type( - dbp->dbenv, "__bam_c_close", dbc->dbtype)); + ret = __db_unknown_type( + dbp->dbenv, "__bam_c_close", dbc->dbtype); + goto err; } } goto done; @@ -588,8 +569,14 @@ static int __bam_c_destroy(dbc) DBC *dbc; { + BTREE_CURSOR *cp; + + cp = (BTREE_CURSOR *)dbc->internal; + /* Discard the structures. */ - __os_free(dbc->dbp->dbenv, dbc->internal); + if (cp->sp != cp->stack) + __os_free(dbc->dbp->dbenv, cp->sp); + __os_free(dbc->dbp->dbenv, cp); return (0); } @@ -693,7 +680,7 @@ __bam_c_del(dbc) BTREE_CURSOR *cp; DB *dbp; DB_MPOOLFILE *mpf; - int ret, t_ret; + int count, ret, t_ret; dbp = dbc->dbp; mpf = dbp->mpf; @@ -760,9 +747,12 @@ err: /* cp->page = NULL; - /* Update the cursors last, after all chance of failure is past. */ + /* + * Update the cursors last, after all chance of recoverable failure + * is past. + */ if (ret == 0) - (void)__bam_ca_delete(dbp, cp->pgno, cp->indx, 1); + ret = __bam_ca_delete(dbp, cp->pgno, cp->indx, 1, &count); return (ret); } @@ -846,7 +836,8 @@ __bam_c_get(dbc, key, data, flags, pgnop) break; case DB_FIRST: newopd = 1; - if ((ret = __bam_c_first(dbc)) != 0) + if ((ret = __bam_c_search(dbc, + PGNO_INVALID, NULL, flags, &exact)) != 0) goto err; break; case DB_GET_BOTH: @@ -910,13 +901,15 @@ __bam_c_get(dbc, key, data, flags, pgnop) break; case DB_LAST: newopd = 1; - if ((ret = __bam_c_last(dbc)) != 0) + if ((ret = __bam_c_search(dbc, + PGNO_INVALID, NULL, flags, &exact)) != 0) goto err; break; case DB_NEXT: newopd = 1; if (cp->pgno == PGNO_INVALID) { - if ((ret = __bam_c_first(dbc)) != 0) + if ((ret = __bam_c_search(dbc, + PGNO_INVALID, NULL, DB_FIRST, &exact)) != 0) goto err; } else if ((ret = __bam_c_next(dbc, 1, 0)) != 0) @@ -933,7 +926,8 @@ __bam_c_get(dbc, key, data, flags, pgnop) case DB_NEXT_NODUP: newopd = 1; if (cp->pgno == PGNO_INVALID) { - if ((ret = __bam_c_first(dbc)) != 0) + if ((ret = __bam_c_search(dbc, + PGNO_INVALID, NULL, DB_FIRST, &exact)) != 0) goto err; } else do { @@ -944,7 +938,8 @@ __bam_c_get(dbc, key, data, flags, pgnop) case DB_PREV: newopd = 1; if (cp->pgno == PGNO_INVALID) { - if ((ret = __bam_c_last(dbc)) != 0) + if ((ret = __bam_c_search(dbc, + PGNO_INVALID, NULL, DB_LAST, &exact)) != 0) goto err; } else if ((ret = __bam_c_prev(dbc)) != 0) @@ -953,7 +948,8 @@ __bam_c_get(dbc, key, data, flags, pgnop) case DB_PREV_NODUP: newopd = 1; if (cp->pgno == PGNO_INVALID) { - if ((ret = __bam_c_last(dbc)) != 0) + if ((ret = __bam_c_search(dbc, + PGNO_INVALID, NULL, DB_LAST, &exact)) != 0) goto err; } else do { @@ -2136,99 +2132,6 @@ __bam_c_writelock(dbc) return (ret); } -/* - * __bam_c_first -- - * Return the first record. - */ -static int -__bam_c_first(dbc) - DBC *dbc; -{ - BTREE_CURSOR *cp; - db_pgno_t pgno; - int ret; - - cp = (BTREE_CURSOR *)dbc->internal; - ret = 0; - - /* Walk down the left-hand side of the tree. */ - for (pgno = cp->root;;) { - ACQUIRE_CUR_COUPLE(dbc, DB_LOCK_READ, pgno, ret); - if (ret != 0) - return (ret); - - /* If we find a leaf page, we're done. */ - if (ISLEAF(cp->page)) - break; - - pgno = GET_BINTERNAL(dbc->dbp, cp->page, 0)->pgno; - } - - /* If we want a write lock instead of a read lock, get it now. */ - if (F_ISSET(dbc, DBC_RMW)) { - ACQUIRE_WRITE_LOCK(dbc, ret); - if (ret != 0) - return (ret); - } - - cp->indx = 0; - - /* If on an empty page or a deleted record, move to the next one. */ - if (NUM_ENT(cp->page) == 0 || IS_CUR_DELETED(dbc)) - if ((ret = __bam_c_next(dbc, 0, 0)) != 0) - return (ret); - - return (0); -} - -/* - * __bam_c_last -- - * Return the last record. - */ -static int -__bam_c_last(dbc) - DBC *dbc; -{ - BTREE_CURSOR *cp; - db_pgno_t pgno; - int ret; - - cp = (BTREE_CURSOR *)dbc->internal; - ret = 0; - - /* Walk down the right-hand side of the tree. */ - for (pgno = cp->root;;) { - ACQUIRE_CUR_COUPLE(dbc, DB_LOCK_READ, pgno, ret); - if (ret != 0) - return (ret); - - /* If we find a leaf page, we're done. */ - if (ISLEAF(cp->page)) - break; - - pgno = GET_BINTERNAL(dbc->dbp, cp->page, - NUM_ENT(cp->page) - O_INDX)->pgno; - } - - /* If we want a write lock instead of a read lock, get it now. */ - if (F_ISSET(dbc, DBC_RMW)) { - ACQUIRE_WRITE_LOCK(dbc, ret); - if (ret != 0) - return (ret); - } - - cp->indx = NUM_ENT(cp->page) == 0 ? 0 : - NUM_ENT(cp->page) - - (TYPE(cp->page) == P_LBTREE ? P_INDX : O_INDX); - - /* If on an empty page or a deleted record, move to the previous one. */ - if (NUM_ENT(cp->page) == 0 || IS_CUR_DELETED(dbc)) - if ((ret = __bam_c_prev(dbc)) != 0) - return (ret); - - return (0); -} - /* * __bam_c_next -- * Move to the next record. @@ -2398,6 +2301,12 @@ __bam_c_search(dbc, root_pgno, key, flags, exactp) return (ret); switch (flags) { + case DB_FIRST: + sflags = (F_ISSET(dbc, DBC_RMW) ? S_WRITE : S_READ) | S_MIN; + goto search; + case DB_LAST: + sflags = (F_ISSET(dbc, DBC_RMW) ? S_WRITE : S_READ) | S_MAX; + goto search; case DB_SET_RECNO: if ((ret = __ram_getno(dbc, key, &recno, 0)) != 0) return (ret); @@ -2575,7 +2484,6 @@ search: if ((ret = __bam_search(dbc, root_pgno, default: return (__db_unknown_flag(dbp->dbenv, "__bam_c_search", flags)); } - /* Initialize the cursor from the stack. */ cp->page = cp->csp->page; cp->pgno = cp->csp->page->pgno; @@ -2583,6 +2491,16 @@ search: if ((ret = __bam_search(dbc, root_pgno, cp->lock = cp->csp->lock; cp->lock_mode = cp->csp->lock_mode; + /* If on an empty page or a deleted record, move to the next one. */ + if (flags == DB_FIRST && + (NUM_ENT(cp->page) == 0 || IS_CUR_DELETED(dbc))) + if ((ret = __bam_c_next(dbc, 0, 0)) != 0) + return (ret); + if (flags == DB_LAST && + (NUM_ENT(cp->page) == 0 || IS_CUR_DELETED(dbc))) + if ((ret = __bam_c_prev(dbc)) != 0) + return (ret); + return (0); } @@ -2597,15 +2515,10 @@ __bam_c_physdel(dbc) BTREE_CURSOR *cp; DB *dbp; DBT key; - DB_LOCK lock; - DB_MPOOLFILE *mpf; - PAGE *h; - db_pgno_t pgno; - int delete_page, empty_page, exact, level, ret; + int delete_page, empty_page, exact, ret; dbp = dbc->dbp; memset(&key, 0, sizeof(DBT)); - mpf = dbp->mpf; cp = (BTREE_CURSOR *)dbc->internal; delete_page = empty_page = ret = 0; @@ -2683,91 +2596,7 @@ __bam_c_physdel(dbc) if (!delete_page) return (0); - /* - * Call __bam_search to reacquire the empty leaf page, but this time - * get both the leaf page and it's parent, locked. Jump back up the - * tree, until we have the top pair of pages that we want to delete. - * Once we have the top page that we want to delete locked, lock the - * underlying pages and check to make sure they're still empty. If - * they are, delete them. - */ - for (level = LEAFLEVEL;; ++level) { - /* Acquire a page and its parent, locked. */ - if ((ret = __bam_search(dbc, PGNO_INVALID, - &key, S_WRPAIR, level, NULL, &exact)) != 0) - return (ret); - - /* - * If we reach the root or the parent page isn't going to be - * empty when we delete one record, stop. - */ - h = cp->csp[-1].page; - if (h->pgno == cp->root || NUM_ENT(h) != 1) - break; - - /* Discard the stack, retaining no locks. */ - (void)__bam_stkrel(dbc, STK_NOLOCK); - } - - /* - * Move the stack pointer one after the last entry, we may be about - * to push more items onto the page stack. - */ - ++cp->csp; - - /* - * cp->csp[-2].page is now the parent page, which we may or may not be - * going to delete, and cp->csp[-1].page is the first page we know we - * are going to delete. Walk down the chain of pages, acquiring pages - * until we've acquired a leaf page. Generally, this shouldn't happen; - * we should only see a single internal page with one item and a single - * leaf page with no items. The scenario where we could see something - * else is if reverse splits were turned off for awhile and then turned - * back on. That could result in all sorts of strangeness, e.g., empty - * pages in the tree, trees that looked like linked lists, and so on. - * - * !!! - * Sheer paranoia: if we find any pages that aren't going to be emptied - * by the delete, someone else added an item while we were walking the - * tree, and we discontinue the delete. Shouldn't be possible, but we - * check regardless. - */ - for (h = cp->csp[-1].page;;) { - if (ISLEAF(h)) { - if (NUM_ENT(h) != 0) - break; - break; - } else - if (NUM_ENT(h) != 1) - break; - - /* - * Get the next page, write lock it and push it onto the stack. - * We know it's index 0, because it can only have one element. - */ - switch (TYPE(h)) { - case P_IBTREE: - pgno = GET_BINTERNAL(dbp, h, 0)->pgno; - break; - case P_IRECNO: - pgno = GET_RINTERNAL(dbp, h, 0)->pgno; - break; - default: - return (__db_pgfmt(dbp->dbenv, PGNO(h))); - } - - if ((ret = - __db_lget(dbc, 0, pgno, DB_LOCK_WRITE, 0, &lock)) != 0) - break; - if ((ret = __memp_fget(mpf, &pgno, 0, &h)) != 0) - break; - BT_STK_PUSH(dbp->dbenv, cp, h, 0, lock, DB_LOCK_WRITE, ret); - if (ret != 0) - break; - } - - /* Adjust the cursor stack to reference the last page on the stack. */ - BT_STK_POP(cp); + ret = __bam_search(dbc, PGNO_INVALID, &key, S_DEL, 0, NULL, &exact); /* * If everything worked, delete the stack, otherwise, release the @@ -2776,7 +2605,7 @@ __bam_c_physdel(dbc) if (ret == 0) DISCARD_CUR(dbc, ret); if (ret == 0) - ret = __bam_dpages(dbc, cp->sp); + ret = __bam_dpages(dbc, 1, 0); else (void)__bam_stkrel(dbc, 0); diff --git a/storage/bdb/btree/bt_delete.c b/storage/bdb/btree/bt_delete.c index 018c8ef496b..1e54687453f 100644 --- a/storage/bdb/btree/bt_delete.c +++ b/storage/bdb/btree/bt_delete.c @@ -1,7 +1,7 @@ /*- * See the file LICENSE for redistribution information. * - * Copyright (c) 1996-2004 + * Copyright (c) 1996-2005 * Sleepycat Software. All rights reserved. */ /* @@ -39,7 +39,7 @@ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * - * $Id: bt_delete.c,v 11.49 2004/02/27 12:38:28 bostic Exp $ + * $Id: bt_delete.c,v 12.13 2005/10/20 18:14:59 bostic Exp $ */ #include "db_config.h" @@ -220,12 +220,13 @@ __bam_adjindx(dbc, h, indx, indx_copy, is_insert) * __bam_dpages -- * Delete a set of locked pages. * - * PUBLIC: int __bam_dpages __P((DBC *, EPG *)); + * PUBLIC: int __bam_dpages __P((DBC *, int, int)); */ int -__bam_dpages(dbc, stack_epg) +__bam_dpages(dbc, use_top, update) DBC *dbc; - EPG *stack_epg; + int use_top; + int update; { BTREE_CURSOR *cp; BINTERNAL *bi; @@ -233,7 +234,7 @@ __bam_dpages(dbc, stack_epg) DBT a, b; DB_LOCK c_lock, p_lock; DB_MPOOLFILE *mpf; - EPG *epg; + EPG *epg, *save_sp, *stack_epg; PAGE *child, *parent; db_indx_t nitems; db_pgno_t pgno, root_pgno; @@ -243,30 +244,27 @@ __bam_dpages(dbc, stack_epg) dbp = dbc->dbp; mpf = dbp->mpf; cp = (BTREE_CURSOR *)dbc->internal; + nitems = 0; + pgno = PGNO_INVALID; /* * We have the entire stack of deletable pages locked. * - * Btree calls us with a pointer to the beginning of a stack, where - * the first page in the stack is to have a single item deleted, and - * the rest of the pages are to be removed. + * Btree calls us with the first page in the stack is to have a + * single item deleted, and the rest of the pages are to be removed. * - * Recno calls us with a pointer into the middle of the stack, where - * the referenced page is to have a single item deleted, and pages - * after the stack reference are to be removed. - * - * First, discard any pages that we don't care about. + * Recno always has a stack to the root and __bam_merge operations + * may have unneeded items in the sack. We find the lowest page + * in the stack that has more than one record in it and start there. */ ret = 0; - for (epg = cp->sp; epg < stack_epg; ++epg) { - if ((t_ret = __memp_fput(mpf, epg->page, 0)) != 0 && ret == 0) - ret = t_ret; - if ((t_ret = __TLPUT(dbc, epg->lock)) != 0 && ret == 0) - ret = t_ret; - } - if (ret != 0) - goto err; - + if (use_top) + stack_epg = cp->sp; + else + for (stack_epg = cp->csp; stack_epg > cp->sp; --stack_epg) + if (NUM_ENT(stack_epg->page) > 1) + break; + epg = stack_epg; /* * !!! * There is an interesting deadlock situation here. We have to relink @@ -276,8 +274,9 @@ __bam_dpages(dbc, stack_epg) * It will deadlock here. Before we unlink the subtree, we relink the * leaf page chain. */ - if ((ret = __bam_relink(dbc, cp->csp->page, NULL)) != 0) - goto err; + if (LEVEL(cp->csp->page) == 1 && + (ret = __bam_relink(dbc, cp->csp->page, PGNO_INVALID)) != 0) + goto discard; /* * Delete the last item that references the underlying pages that are @@ -288,9 +287,18 @@ __bam_dpages(dbc, stack_epg) * immediately. */ if ((ret = __bam_ditem(dbc, epg->page, epg->indx)) != 0) - goto err; + goto discard; if ((ret = __bam_ca_di(dbc, PGNO(epg->page), epg->indx, -1)) != 0) - goto err; + goto discard; + + if (update && epg->indx == 0) { + save_sp = cp->csp; + cp->csp = epg; + ret = __bam_pupdate(dbc, epg->page); + cp->csp = save_sp; + if (ret != 0) + goto discard; + } pgno = PGNO(epg->page); nitems = NUM_ENT(epg->page); @@ -301,6 +309,17 @@ __bam_dpages(dbc, stack_epg) if (ret != 0) goto err_inc; + /* Then, discard any pages that we don't care about. */ +discard: for (epg = cp->sp; epg < stack_epg; ++epg) { + if ((t_ret = __memp_fput(mpf, epg->page, 0)) != 0 && ret == 0) + ret = t_ret; + epg->page = NULL; + if ((t_ret = __TLPUT(dbc, epg->lock)) != 0 && ret == 0) + ret = t_ret; + } + if (ret != 0) + goto err; + /* Free the rest of the pages in the stack. */ while (++epg <= cp->csp) { /* @@ -310,13 +329,24 @@ __bam_dpages(dbc, stack_epg) * be referenced by a cursor. */ if (NUM_ENT(epg->page) != 0) { - DB_ASSERT(NUM_ENT(epg->page) == 1); + DB_ASSERT(LEVEL(epg->page) != 1); if ((ret = __bam_ditem(dbc, epg->page, epg->indx)) != 0) goto err; + /* + * Sheer paranoia: if we find any pages that aren't + * emptied by the delete, someone else added an item + * while we were walking the tree, and we discontinue + * the delete. Shouldn't be possible, but we check + * regardless. + */ + if (NUM_ENT(epg->page) != 0) + goto err; } ret = __db_free(dbc, epg->page); + if (cp->page == epg->page) + cp->page = NULL; epg->page = NULL; if ((t_ret = __TLPUT(dbc, epg->lock)) != 0 && ret == 0) ret = t_ret; @@ -468,12 +498,13 @@ stop: done = 1; * __bam_relink -- * Relink around a deleted page. * - * PUBLIC: int __bam_relink __P((DBC *, PAGE *, PAGE **)); + * PUBLIC: int __bam_relink __P((DBC *, PAGE *, db_pgno_t)); */ int -__bam_relink(dbc, pagep, new_next) +__bam_relink(dbc, pagep, new_pgno) DBC *dbc; - PAGE *pagep, **new_next; + PAGE *pagep; + db_pgno_t new_pgno; { DB *dbp; PAGE *np, *pp; @@ -519,7 +550,7 @@ __bam_relink(dbc, pagep, new_next) /* Log the change. */ if (DBC_LOGGING(dbc)) { if ((ret = __bam_relink_log(dbp, dbc->txn, &ret_lsn, 0, - pagep->pgno, &pagep->lsn, pagep->prev_pgno, plsnp, + pagep->pgno, new_pgno, pagep->prev_pgno, plsnp, pagep->next_pgno, nlsnp)) != 0) goto err; } else @@ -528,33 +559,27 @@ __bam_relink(dbc, pagep, new_next) np->lsn = ret_lsn; if (pp != NULL) pp->lsn = ret_lsn; - pagep->lsn = ret_lsn; /* * Modify and release the two pages. - * - * !!! - * The parameter new_next gets set to the page following the page we - * are removing. If there is no following page, then new_next gets - * set to NULL. */ if (np != NULL) { - np->prev_pgno = pagep->prev_pgno; - if (new_next == NULL) - ret = __memp_fput(mpf, np, DB_MPOOL_DIRTY); - else { - *new_next = np; - ret = __memp_fset(mpf, np, DB_MPOOL_DIRTY); - } + if (new_pgno == PGNO_INVALID) + np->prev_pgno = pagep->prev_pgno; + else + np->prev_pgno = new_pgno; + ret = __memp_fput(mpf, np, DB_MPOOL_DIRTY); if ((t_ret = __TLPUT(dbc, npl)) != 0 && ret == 0) ret = t_ret; if (ret != 0) goto err; - } else if (new_next != NULL) - *new_next = NULL; + } if (pp != NULL) { - pp->next_pgno = pagep->next_pgno; + if (new_pgno == PGNO_INVALID) + pp->next_pgno = pagep->next_pgno; + else + pp->next_pgno = new_pgno; ret = __memp_fput(mpf, pp, DB_MPOOL_DIRTY); if ((t_ret = __TLPUT(dbc, ppl)) != 0 && ret == 0) ret = t_ret; @@ -571,3 +596,48 @@ err: if (np != NULL) (void)__TLPUT(dbc, ppl); return (ret); } + +/* + * __bam_pupdate -- + * Update parent key pointers up the tree. + * + * PUBLIC: int __bam_pupdate __P((DBC *, PAGE *)); + */ +int +__bam_pupdate(dbc, lpg) + DBC *dbc; + PAGE *lpg; +{ + BTREE_CURSOR *cp; + DB_ENV *dbenv; + EPG *epg; + int ret; + + dbenv = dbc->dbp->dbenv; + cp = (BTREE_CURSOR *)dbc->internal; + ret = 0; + + /* + * Update the parents up the tree. __bam_pinsert only looks at the + * left child if is a leaf page, so we don't need to change it. We + * just do a delete and insert; a replace is possible but reusing + * pinsert is better. + */ + for (epg = &cp->csp[-1]; epg >= cp->sp; epg--) { + if ((ret = __bam_ditem(dbc, epg->page, epg->indx)) != 0) + return (ret); + epg->indx--; + if ((ret = __bam_pinsert(dbc, epg, + lpg, epg[1].page, BPI_NORECNUM)) != 0) { + if (ret == DB_NEEDSPLIT) { + /* This should not happen. */ + __db_err(dbenv, + "Not enough room in parent: %s: page %lu", + dbc->dbp->fname, (u_long)PGNO(epg->page)); + ret = __db_panic(dbenv, EINVAL); + } + return (ret); + } + } + return (ret); +} diff --git a/storage/bdb/btree/bt_method.c b/storage/bdb/btree/bt_method.c index 0b67da91efe..c6bfa869fd1 100644 --- a/storage/bdb/btree/bt_method.c +++ b/storage/bdb/btree/bt_method.c @@ -1,10 +1,10 @@ /*- * See the file LICENSE for redistribution information. * - * Copyright (c) 1999-2004 + * Copyright (c) 1999-2005 * Sleepycat Software. All rights reserved. * - * $Id: bt_method.c,v 11.38 2004/09/22 03:31:26 bostic Exp $ + * $Id: bt_method.c,v 12.2 2005/06/16 20:20:16 bostic Exp $ */ #include "db_config.h" @@ -18,7 +18,6 @@ #include "dbinc/btree.h" #include "dbinc/qam.h" -static int __bam_set_bt_maxkey __P((DB *, u_int32_t)); static int __bam_set_bt_minkey __P((DB *, u_int32_t)); static int __bam_set_bt_prefix __P((DB *, size_t(*)(DB *, const DBT *, const DBT *))); @@ -52,7 +51,6 @@ __bam_db_create(dbp) t->bt_prefix = __bam_defpfx; dbp->set_bt_compare = __bam_set_bt_compare; - dbp->set_bt_maxkey = __bam_set_bt_maxkey; dbp->get_bt_minkey = __bam_get_bt_minkey; dbp->set_bt_minkey = __bam_set_bt_minkey; dbp->set_bt_prefix = __bam_set_bt_prefix; @@ -208,31 +206,6 @@ __bam_set_bt_compare(dbp, func) return (0); } -/* - * __bam_set_bt_maxkey -- - * Set the maximum keys per page. - */ -static int -__bam_set_bt_maxkey(dbp, bt_maxkey) - DB *dbp; - u_int32_t bt_maxkey; -{ - BTREE *t; - - DB_ILLEGAL_AFTER_OPEN(dbp, "DB->set_bt_maxkey"); - DB_ILLEGAL_METHOD(dbp, DB_OK_BTREE); - - t = dbp->bt_internal; - - if (bt_maxkey < 1) { - __db_err(dbp->dbenv, "minimum bt_maxkey value is 1"); - return (EINVAL); - } - - t->bt_maxkey = bt_maxkey; - return (0); -} - /* * __db_get_bt_minkey -- * Get the minimum keys per page. diff --git a/storage/bdb/btree/bt_open.c b/storage/bdb/btree/bt_open.c index e890c5dd75d..d1fcaa76597 100644 --- a/storage/bdb/btree/bt_open.c +++ b/storage/bdb/btree/bt_open.c @@ -1,7 +1,7 @@ /*- * See the file LICENSE for redistribution information. * - * Copyright (c) 1996-2004 + * Copyright (c) 1996-2005 * Sleepycat Software. All rights reserved. */ /* @@ -39,7 +39,7 @@ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * - * $Id: bt_open.c,v 11.92 2004/04/29 14:39:47 ubell Exp $ + * $Id: bt_open.c,v 12.5 2005/09/28 17:44:17 margo Exp $ */ #include "db_config.h" @@ -311,7 +311,6 @@ __bam_read_root(dbp, txn, base_pgno, flags) * metadata page will be created/initialized elsewhere. */ if (meta->dbmeta.magic == DB_BTREEMAGIC) { - t->bt_maxkey = meta->maxkey; t->bt_minkey = meta->minkey; t->re_pad = (int)meta->re_pad; t->re_len = meta->re_len; @@ -395,7 +394,6 @@ __bam_init_meta(dbp, meta, pgno, lsnp) memcpy(meta->dbmeta.uid, dbp->fileid, DB_FILE_ID_LEN); t = dbp->bt_internal; - meta->maxkey = t->bt_maxkey; meta->minkey = t->bt_minkey; meta->re_len = t->re_len; meta->re_pad = (u_int32_t)t->re_pad; @@ -428,7 +426,7 @@ __bam_new_file(dbp, txn, fhp, name) DBT pdbt; PAGE *root; db_pgno_t pgno; - int ret; + int ret, t_ret; void *buf; dbenv = dbp->dbenv; @@ -437,7 +435,7 @@ __bam_new_file(dbp, txn, fhp, name) meta = NULL; buf = NULL; - if (name == NULL) { + if (F_ISSET(dbp, DB_AM_INMEM)) { /* Build the meta-data page. */ pgno = PGNO_BASE_MD; if ((ret = @@ -447,6 +445,9 @@ __bam_new_file(dbp, txn, fhp, name) __bam_init_meta(dbp, meta, PGNO_BASE_MD, &lsn); meta->root = 1; meta->dbmeta.last_pgno = 1; + if ((ret = + __db_log_page(dbp, txn, &lsn, pgno, (PAGE *)meta)) != 0) + goto err; ret = __memp_fput(mpf, meta, DB_MPOOL_DIRTY); meta = NULL; if (ret != 0) @@ -460,6 +461,9 @@ __bam_new_file(dbp, txn, fhp, name) P_INIT(root, dbp->pgsize, 1, PGNO_INVALID, PGNO_INVALID, LEAFLEVEL, dbp->type == DB_RECNO ? P_LRECNO : P_LBTREE); LSN_NOT_LOGGED(root->lsn); + if ((ret = + __db_log_page(dbp, txn, &root->lsn, pgno, root)) != 0) + goto err; ret = __memp_fput(mpf, root, DB_MPOOL_DIRTY); root = NULL; if (ret != 0) @@ -509,10 +513,12 @@ __bam_new_file(dbp, txn, fhp, name) err: if (buf != NULL) __os_free(dbenv, buf); else { - if (meta != NULL) - (void)__memp_fput(mpf, meta, 0); - if (root != NULL) - (void)__memp_fput(mpf, root, 0); + if (meta != NULL && + (t_ret = __memp_fput(mpf, meta, 0)) != 0 && ret == 0) + ret = t_ret; + if (root != NULL && + (t_ret = __memp_fput(mpf, root, 0)) != 0 && ret == 0) + ret = t_ret; } return (ret); } diff --git a/storage/bdb/btree/bt_put.c b/storage/bdb/btree/bt_put.c index 128b16a82f0..dd56f9d3523 100644 --- a/storage/bdb/btree/bt_put.c +++ b/storage/bdb/btree/bt_put.c @@ -1,7 +1,7 @@ /*- * See the file LICENSE for redistribution information. * - * Copyright (c) 1996-2004 + * Copyright (c) 1996-2005 * Sleepycat Software. All rights reserved. */ /* @@ -39,7 +39,7 @@ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * - * $Id: bt_put.c,v 11.80 2004/10/29 17:33:25 ubell Exp $ + * $Id: bt_put.c,v 12.10 2005/10/20 18:57:00 bostic Exp $ */ #include "db_config.h" @@ -58,7 +58,9 @@ static int __bam_build __P((DBC *, u_int32_t, DBT *, PAGE *, u_int32_t, u_int32_t)); -static int __bam_dup_convert __P((DBC *, PAGE *, u_int32_t)); +static int __bam_dup_check __P((DBC *, u_int32_t, + PAGE *, u_int32_t, u_int32_t, db_indx_t *)); +static int __bam_dup_convert __P((DBC *, PAGE *, u_int32_t, u_int32_t)); static int __bam_ovput __P((DBC *, u_int32_t, db_pgno_t, PAGE *, u_int32_t, DBT *)); static u_int32_t @@ -84,11 +86,12 @@ __bam_iitem(dbc, key, data, op, flags) DBT bk_hdr, tdbt; DB_MPOOLFILE *mpf; PAGE *h; - db_indx_t indx; - u_int32_t data_size, have_bytes, need_bytes, needed; + db_indx_t cnt, indx; + u_int32_t data_size, have_bytes, need_bytes, needed, pages, pagespace; int cmp, bigkey, bigdata, dupadjust, padrec, replace, ret, was_deleted; COMPQUIET(bk, NULL); + COMPQUIET(cnt, 0); dbp = dbc->dbp; dbenv = dbp->dbenv; @@ -217,13 +220,39 @@ __bam_iitem(dbc, key, data, op, flags) return (__db_unknown_flag(dbenv, "DB->put", op)); } - /* - * If there's not enough room, or the user has put a ceiling on the - * number of keys permitted in the page, split the page. - */ + /* Split the page if there's not enough room. */ if (P_FREESPACE(dbp, h) < needed) return (DB_NEEDSPLIT); + /* + * Check to see if we will convert to off page duplicates -- if + * so, we'll need a page. + */ + if (F_ISSET(dbp, DB_AM_DUP) && + TYPE(h) == P_LBTREE && op != DB_KEYFIRST && + P_FREESPACE(dbp, h) - needed <= dbp->pgsize / 2 && + __bam_dup_check(dbc, op, h, indx, needed, &cnt)) { + pages = 1; + dupadjust = 1; + } else + pages = 0; + + /* + * If we are not using transactions and there is a page limit + * set on the file, then figure out if things will fit before + * taking action. + */ + if (dbc->txn == NULL && dbp->mpf->mfp->maxpgno != 0) { + pagespace = P_MAXSPACE(dbp, dbp->pgsize); + if (bigdata) + pages += ((data_size - 1) / pagespace) + 1; + if (bigkey) + pages += ((key->size - 1) / pagespace) + 1; + + if (pages > (dbp->mpf->mfp->maxpgno - dbp->mpf->mfp->last_pgno)) + return (__db_space_err(dbp)); + } + /* * The code breaks it up into five cases: * @@ -259,7 +288,6 @@ __bam_iitem(dbc, key, data, op, flags) return (ret); indx += 3; - dupadjust = 1; cp->indx += 2; } else { @@ -276,7 +304,6 @@ __bam_iitem(dbc, key, data, op, flags) return (ret); ++indx; - dupadjust = 1; } break; case DB_CURRENT: @@ -287,11 +314,11 @@ __bam_iitem(dbc, key, data, op, flags) * will try and remove the item because the cursor's delete * flag is set. */ - (void)__bam_ca_delete(dbp, PGNO(h), indx, 0); + if ((ret = __bam_ca_delete(dbp, PGNO(h), indx, 0, NULL)) != 0) + return (ret); if (TYPE(h) == P_LBTREE) { ++indx; - dupadjust = 1; } /* @@ -380,10 +407,9 @@ __bam_iitem(dbc, key, data, op, flags) * up at least 25% of the space on the page. If it does, move it onto * its own page. */ - if (dupadjust && P_FREESPACE(dbp, h) <= dbp->pgsize / 2) { - if ((ret = __bam_dup_convert(dbc, h, indx - O_INDX)) != 0) - return (ret); - } + if (dupadjust && + (ret = __bam_dup_convert(dbc, h, indx - O_INDX, cnt)) != 0) + return (ret); /* If we've modified a recno file, set the flag. */ if (dbc->dbtype == DB_RECNO) @@ -664,26 +690,22 @@ __bam_ritem(dbc, h, indx, data) } /* - * __bam_dup_convert -- + * __bam_dup_check -- * Check to see if the duplicate set at indx should have its own page. - * If it should, create it. */ static int -__bam_dup_convert(dbc, h, indx) +__bam_dup_check(dbc, op, h, indx, sz, cntp) DBC *dbc; + u_int32_t op; PAGE *h; - u_int32_t indx; + u_int32_t indx, sz; + db_indx_t *cntp; { BKEYDATA *bk; DB *dbp; - DBT hdr; - DB_MPOOLFILE *mpf; - PAGE *dp; - db_indx_t cnt, cpindx, dindx, first, *inp, sz; - int ret; + db_indx_t cnt, first, *inp; dbp = dbc->dbp; - mpf = dbp->mpf; inp = P_INP(dbp, h); /* @@ -695,11 +717,21 @@ __bam_dup_convert(dbc, h, indx) /* Count the key once. */ bk = GET_BKEYDATA(dbp, h, indx); - sz = B_TYPE(bk->type) == B_KEYDATA ? + sz += B_TYPE(bk->type) == B_KEYDATA ? BKEYDATA_PSIZE(bk->len) : BOVERFLOW_PSIZE; /* Sum up all the data items. */ - for (cnt = 0, first = indx; + first = indx; + + /* + * Account for the record being inserted. If we are replacing it, + * don't count it twice. + * + * We execute the loop with first == indx to get the size of the + * first record. + */ + cnt = op == DB_CURRENT ? 0 : 1; + for (first = indx; indx < NUM_ENT(h) && inp[first] == inp[indx]; ++cnt, indx += P_INDX) { bk = GET_BKEYDATA(dbp, h, indx + O_INDX); @@ -726,6 +758,36 @@ __bam_dup_convert(dbc, h, indx) if (sz < dbp->pgsize / 4) return (0); + *cntp = cnt; + return (1); +} + +/* + * __bam_dup_convert -- + * Move a set of duplicates off-page and into their own tree. + */ +static int +__bam_dup_convert(dbc, h, indx, cnt) + DBC *dbc; + PAGE *h; + u_int32_t indx, cnt; +{ + BKEYDATA *bk; + DB *dbp; + DBT hdr; + DB_MPOOLFILE *mpf; + PAGE *dp; + db_indx_t cpindx, dindx, first, *inp; + int ret; + + dbp = dbc->dbp; + mpf = dbp->mpf; + inp = P_INP(dbp, h); + + /* Move to the beginning of the dup set. */ + while (indx > 0 && inp[indx] == inp[indx - P_INDX]) + indx -= P_INDX; + /* Get a new page. */ if ((ret = __db_new(dbc, dbp->dup_compare == NULL ? P_LRECNO : P_LDUP, &dp)) != 0) @@ -739,8 +801,8 @@ __bam_dup_convert(dbc, h, indx) * we're dealing with. */ memset(&hdr, 0, sizeof(hdr)); - dindx = first; - indx = first; + first = indx; + dindx = indx; cpindx = 0; do { /* Move cursors referencing the old entry to the new entry. */ diff --git a/storage/bdb/btree/bt_rec.c b/storage/bdb/btree/bt_rec.c index e3fa7363c1d..3667ee12c58 100644 --- a/storage/bdb/btree/bt_rec.c +++ b/storage/bdb/btree/bt_rec.c @@ -1,10 +1,10 @@ /*- * See the file LICENSE for redistribution information. * - * Copyright (c) 1996-2004 + * Copyright (c) 1996-2005 * Sleepycat Software. All rights reserved. * - * $Id: bt_rec.c,v 11.70 2004/09/24 00:43:12 bostic Exp $ + * $Id: bt_rec.c,v 12.11 2005/10/20 18:57:01 bostic Exp $ */ #include "db_config.h" @@ -58,7 +58,7 @@ __bam_split_recover(dbenv, dbtp, lsnp, op, info) _lp = lp = np = pp = _rp = rp = NULL; sp = NULL; - REC_INTRO(__bam_split_read, 1); + REC_INTRO(__bam_split_read, 1, 0); /* * There are two kinds of splits that we have to recover from. The @@ -96,21 +96,21 @@ redo: if (DB_REDO(op)) { REC_FGET(mpf, root_pgno, &pp, do_left); cmp = log_compare(&LSN(pp), &LSN(argp->pg.data)); - CHECK_LSN(op, + CHECK_LSN(dbenv, op, cmp, &LSN(pp), &LSN(argp->pg.data)); p_update = cmp == 0; } do_left: if (lp != NULL) { cmp = log_compare(&LSN(lp), &argp->llsn); - CHECK_LSN(op, cmp, &LSN(lp), &argp->llsn); + CHECK_LSN(dbenv, op, cmp, &LSN(lp), &argp->llsn); if (cmp == 0) l_update = 1; } if (rp != NULL) { cmp = log_compare(&LSN(rp), &argp->rlsn); - CHECK_LSN(op, cmp, &LSN(rp), &argp->rlsn); + CHECK_LSN(dbenv, op, cmp, &LSN(rp), &argp->rlsn); if (cmp == 0) r_update = 1; } @@ -211,7 +211,7 @@ check_next: /* goto done; } cmp = log_compare(&LSN(np), &argp->nlsn); - CHECK_LSN(op, cmp, &LSN(np), &argp->nlsn); + CHECK_LSN(dbenv, op, cmp, &LSN(np), &argp->nlsn); if (cmp == 0) { PREV_PGNO(np) = argp->right; np->lsn = *lsnp; @@ -337,12 +337,13 @@ __bam_rsplit_recover(dbenv, dbtp, lsnp, op, info) DB_MPOOLFILE *mpf; PAGE *pagep; db_pgno_t pgno, root_pgno; + db_recno_t rcnt; int cmp_n, cmp_p, modified, ret; pagep = NULL; COMPQUIET(info, NULL); REC_PRINT(__bam_rsplit_print); - REC_INTRO(__bam_rsplit_read, 1); + REC_INTRO(__bam_rsplit_read, 1, 1); /* Fix the root page. */ pgno = root_pgno = argp->root_pgno; @@ -361,10 +362,19 @@ __bam_rsplit_recover(dbenv, dbtp, lsnp, op, info) modified = 0; cmp_n = log_compare(lsnp, &LSN(pagep)); cmp_p = log_compare(&LSN(pagep), &argp->rootlsn); - CHECK_LSN(op, cmp_p, &LSN(pagep), &argp->rootlsn); + CHECK_LSN(dbenv, op, cmp_p, &LSN(pagep), &argp->rootlsn); if (cmp_p == 0 && DB_REDO(op)) { - /* Need to redo update described. */ + /* + * Copy the new data to the root page. If it is not now a + * leaf page we need to restore the record number. We could + * try to determine if C_RECNUM was set in the btree, but + * that's not really necessary since the field is not used + * otherwise. + */ + rcnt = RE_NREC(pagep); memcpy(pagep, argp->pgdbt.data, argp->pgdbt.size); + if (LEVEL(pagep) > LEAFLEVEL) + RE_NREC_SET(pagep, rcnt); pagep->pgno = root_pgno; pagep->lsn = *lsnp; modified = 1; @@ -403,7 +413,7 @@ do_page: (void)__ua_memcpy(©_lsn, &LSN(argp->pgdbt.data), sizeof(DB_LSN)); cmp_n = log_compare(lsnp, &LSN(pagep)); cmp_p = log_compare(&LSN(pagep), ©_lsn); - CHECK_LSN(op, cmp_p, &LSN(pagep), ©_lsn); + CHECK_LSN(dbenv, op, cmp_p, &LSN(pagep), ©_lsn); if (cmp_p == 0 && DB_REDO(op)) { /* Need to redo update described. */ pagep->lsn = *lsnp; @@ -450,7 +460,7 @@ __bam_adj_recover(dbenv, dbtp, lsnp, op, info) pagep = NULL; COMPQUIET(info, NULL); REC_PRINT(__bam_adj_print); - REC_INTRO(__bam_adj_read, 1); + REC_INTRO(__bam_adj_read, 1, 1); /* Get the page; if it never existed and we're undoing, we're done. */ if ((ret = __memp_fget(mpf, &argp->pgno, 0, &pagep)) != 0) { @@ -468,7 +478,7 @@ __bam_adj_recover(dbenv, dbtp, lsnp, op, info) modified = 0; cmp_n = log_compare(lsnp, &LSN(pagep)); cmp_p = log_compare(&LSN(pagep), &argp->lsn); - CHECK_LSN(op, cmp_p, &LSN(pagep), &argp->lsn); + CHECK_LSN(dbenv, op, cmp_p, &LSN(pagep), &argp->lsn); if (cmp_p == 0 && DB_REDO(op)) { /* Need to redo update described. */ if ((ret = __bam_adjindx(dbc, @@ -524,7 +534,7 @@ __bam_cadjust_recover(dbenv, dbtp, lsnp, op, info) pagep = NULL; COMPQUIET(info, NULL); REC_PRINT(__bam_cadjust_print); - REC_INTRO(__bam_cadjust_read, 1); + REC_INTRO(__bam_cadjust_read, 1, 0); /* Get the page; if it never existed and we're undoing, we're done. */ if ((ret = __memp_fget(mpf, &argp->pgno, 0, &pagep)) != 0) { @@ -542,7 +552,7 @@ __bam_cadjust_recover(dbenv, dbtp, lsnp, op, info) modified = 0; cmp_n = log_compare(lsnp, &LSN(pagep)); cmp_p = log_compare(&LSN(pagep), &argp->lsn); - CHECK_LSN(op, cmp_p, &LSN(pagep), &argp->lsn); + CHECK_LSN(dbenv, op, cmp_p, &LSN(pagep), &argp->lsn); if (cmp_p == 0 && DB_REDO(op)) { /* Need to redo update described. */ if (IS_BTREE_PAGE(pagep)) { @@ -613,7 +623,7 @@ __bam_cdel_recover(dbenv, dbtp, lsnp, op, info) pagep = NULL; COMPQUIET(info, NULL); REC_PRINT(__bam_cdel_print); - REC_INTRO(__bam_cdel_read, 1); + REC_INTRO(__bam_cdel_read, 1, 0); /* Get the page; if it never existed and we're undoing, we're done. */ if ((ret = __memp_fget(mpf, &argp->pgno, 0, &pagep)) != 0) { @@ -631,7 +641,7 @@ __bam_cdel_recover(dbenv, dbtp, lsnp, op, info) modified = 0; cmp_n = log_compare(lsnp, &LSN(pagep)); cmp_p = log_compare(&LSN(pagep), &argp->lsn); - CHECK_LSN(op, cmp_p, &LSN(pagep), &argp->lsn); + CHECK_LSN(dbenv, op, cmp_p, &LSN(pagep), &argp->lsn); if (cmp_p == 0 && DB_REDO(op)) { /* Need to redo update described. */ indx = argp->indx + (TYPE(pagep) == P_LBTREE ? O_INDX : 0); @@ -644,7 +654,9 @@ __bam_cdel_recover(dbenv, dbtp, lsnp, op, info) indx = argp->indx + (TYPE(pagep) == P_LBTREE ? O_INDX : 0); B_DCLR(GET_BKEYDATA(file_dbp, pagep, indx)->type); - (void)__bam_ca_delete(file_dbp, argp->pgno, argp->indx, 0); + if ((ret = __bam_ca_delete( + file_dbp, argp->pgno, argp->indx, 0, NULL)) != 0) + goto out; LSN(pagep) = argp->lsn; modified = 1; @@ -689,7 +701,7 @@ __bam_repl_recover(dbenv, dbtp, lsnp, op, info) pagep = NULL; COMPQUIET(info, NULL); REC_PRINT(__bam_repl_print); - REC_INTRO(__bam_repl_read, 1); + REC_INTRO(__bam_repl_read, 1, 1); /* Get the page; if it never existed and we're undoing, we're done. */ if ((ret = __memp_fget(mpf, &argp->pgno, 0, &pagep)) != 0) { @@ -708,7 +720,7 @@ __bam_repl_recover(dbenv, dbtp, lsnp, op, info) modified = 0; cmp_n = log_compare(lsnp, &LSN(pagep)); cmp_p = log_compare(&LSN(pagep), &argp->lsn); - CHECK_LSN(op, cmp_p, &LSN(pagep), &argp->lsn); + CHECK_LSN(dbenv, op, cmp_p, &LSN(pagep), &argp->lsn); if (cmp_p == 0 && DB_REDO(op)) { /* * Need to redo update described. @@ -799,7 +811,7 @@ __bam_root_recover(dbenv, dbtp, lsnp, op, info) meta = NULL; COMPQUIET(info, NULL); REC_PRINT(__bam_root_print); - REC_INTRO(__bam_root_read, 0); + REC_INTRO(__bam_root_read, 0, 0); if ((ret = __memp_fget(mpf, &argp->meta_pgno, 0, &meta)) != 0) { if (ret != DB_PAGE_NOTFOUND @@ -816,7 +828,7 @@ __bam_root_recover(dbenv, dbtp, lsnp, op, info) modified = 0; cmp_n = log_compare(lsnp, &LSN(meta)); cmp_p = log_compare(&LSN(meta), &argp->meta_lsn); - CHECK_LSN(op, cmp_p, &LSN(meta), &argp->meta_lsn); + CHECK_LSN(dbenv, op, cmp_p, &LSN(meta), &argp->meta_lsn); if (cmp_p == 0 && DB_REDO(op)) { /* Need to redo update described. */ meta->root = argp->root_pgno; @@ -866,7 +878,7 @@ __bam_curadj_recover(dbenv, dbtp, lsnp, op, info) COMPQUIET(mpf, NULL); REC_PRINT(__bam_curadj_print); - REC_INTRO(__bam_curadj_read, 0); + REC_INTRO(__bam_curadj_read, 0, 1); ret = 0; if (op != DB_TXN_ABORT) @@ -891,8 +903,9 @@ __bam_curadj_recover(dbenv, dbtp, lsnp, op, info) break; case DB_CA_SPLIT: - __bam_ca_undosplit(file_dbp, argp->from_pgno, - argp->to_pgno, argp->left_pgno, argp->from_indx); + if ((ret = __bam_ca_undosplit(file_dbp, argp->from_pgno, + argp->to_pgno, argp->left_pgno, argp->from_indx)) != 0) + goto out; break; } @@ -928,7 +941,7 @@ __bam_rcuradj_recover(dbenv, dbtp, lsnp, op, info) rdbc = NULL; REC_PRINT(__bam_rcuradj_print); - REC_INTRO(__bam_rcuradj_read, 0); + REC_INTRO(__bam_rcuradj_read, 0, 1); ret = t_ret = 0; @@ -1008,7 +1021,7 @@ __bam_relink_recover(dbenv, dbtp, lsnp, op, info) pagep = NULL; COMPQUIET(info, NULL); REC_PRINT(__bam_relink_print); - REC_INTRO(__bam_relink_read, 1); + REC_INTRO(__bam_relink_read, 1, 0); /* * There are up to three pages we need to check -- the page, and the @@ -1016,38 +1029,7 @@ __bam_relink_recover(dbenv, dbtp, lsnp, op, info) * the current page is the result of a split and is being recovered * elsewhere, so all we need do is recover the next page. */ - if ((ret = __memp_fget(mpf, &argp->pgno, 0, &pagep)) != 0) { - if (ret != DB_PAGE_NOTFOUND -#ifndef HAVE_FTRUNCATE - || DB_REDO(op) -#endif - ) { - ret = __db_pgerr(file_dbp, argp->pgno, ret); - goto out; - } else - goto next2; - } - modified = 0; - - cmp_p = log_compare(&LSN(pagep), &argp->lsn); - CHECK_LSN(op, cmp_p, &LSN(pagep), &argp->lsn); - if (cmp_p == 0 && DB_REDO(op)) { - /* Redo the relink. */ - pagep->lsn = *lsnp; - modified = 1; - } else if (log_compare(lsnp, &LSN(pagep)) == 0 && DB_UNDO(op)) { - /* Undo the relink. */ - pagep->next_pgno = argp->next; - pagep->prev_pgno = argp->prev; - - pagep->lsn = argp->lsn; - modified = 1; - } - if ((ret = __memp_fput(mpf, pagep, modified ? DB_MPOOL_DIRTY : 0)) != 0) - goto out; - pagep = NULL; - -next2: if ((ret = __memp_fget(mpf, &argp->next, 0, &pagep)) != 0) { + if ((ret = __memp_fget(mpf, &argp->next, 0, &pagep)) != 0) { if (ret != DB_PAGE_NOTFOUND #ifndef HAVE_FTRUNCATE || DB_REDO(op) @@ -1062,24 +1044,24 @@ next2: if ((ret = __memp_fget(mpf, &argp->next, 0, &pagep)) != 0) { modified = 0; cmp_n = log_compare(lsnp, &LSN(pagep)); cmp_p = log_compare(&LSN(pagep), &argp->lsn_next); - CHECK_LSN(op, cmp_p, &LSN(pagep), &argp->lsn_next); + CHECK_LSN(dbenv, op, cmp_p, &LSN(pagep), &argp->lsn_next); if (cmp_p == 0 && DB_REDO(op)) { - /* Redo the remove or undo the add. */ - pagep->prev_pgno = argp->prev; + /* Redo the remove or replace. */ + if (argp->new_pgno == PGNO_INVALID) + pagep->prev_pgno = argp->prev; + else + pagep->prev_pgno = argp->new_pgno; + pagep->lsn = *lsnp; modified = 1; } else if (cmp_n == 0 && DB_UNDO(op)) { - /* Undo the remove or redo the add. */ + /* Undo the remove or replace. */ pagep->prev_pgno = argp->pgno; + pagep->lsn = argp->lsn_next; modified = 1; } - if (modified == 1) { - if (DB_UNDO(op)) - pagep->lsn = argp->lsn_next; - else - pagep->lsn = *lsnp; - } + if ((ret = __memp_fput(mpf, pagep, modified ? DB_MPOOL_DIRTY : 0)) != 0) goto out; pagep = NULL; @@ -1098,24 +1080,24 @@ prev: if ((ret = __memp_fget(mpf, &argp->prev, 0, &pagep)) != 0) { modified = 0; cmp_p = log_compare(&LSN(pagep), &argp->lsn_prev); - CHECK_LSN(op, cmp_p, &LSN(pagep), &argp->lsn_prev); + CHECK_LSN(dbenv, op, cmp_p, &LSN(pagep), &argp->lsn_prev); if (cmp_p == 0 && DB_REDO(op)) { /* Redo the relink. */ - pagep->next_pgno = argp->next; + if (argp->new_pgno == PGNO_INVALID) + pagep->next_pgno = argp->next; + else + pagep->next_pgno = argp->new_pgno; + pagep->lsn = *lsnp; modified = 1; } else if (log_compare(lsnp, &LSN(pagep)) == 0 && DB_UNDO(op)) { /* Undo the relink. */ pagep->next_pgno = argp->pgno; + pagep->lsn = argp->lsn_prev; modified = 1; } - if (modified == 1) { - if (DB_UNDO(op)) - pagep->lsn = argp->lsn_prev; - else - pagep->lsn = *lsnp; - } + if ((ret = __memp_fput(mpf, pagep, modified ? DB_MPOOL_DIRTY : 0)) != 0) goto out; pagep = NULL; @@ -1127,3 +1109,281 @@ out: if (pagep != NULL) (void)__memp_fput(mpf, pagep, 0); REC_CLOSE; } + +/* + * __bam_merge_recover -- + * Recovery function for merge. + * + * PUBLIC: int __bam_merge_recover + * PUBLIC: __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *)); + */ +int +__bam_merge_recover(dbenv, dbtp, lsnp, op, info) + DB_ENV *dbenv; + DBT *dbtp; + DB_LSN *lsnp; + db_recops op; + void *info; +{ + __bam_merge_args *argp; + BKEYDATA *bk; + DB *file_dbp; + DBC *dbc; + DB_MPOOLFILE *mpf; + PAGE *pagep; + db_indx_t indx, *ninp, *pinp; + u_int32_t size; + u_int8_t *bp; + int cmp_n, cmp_p, i, modified, ret; + + COMPQUIET(info, NULL); + + REC_PRINT(__bam_merge_print); + REC_INTRO(__bam_merge_read, 1, 1); + + if ((ret = __memp_fget(mpf, &argp->pgno, 0, &pagep)) != 0) { + if (ret != DB_PAGE_NOTFOUND +#ifndef HAVE_FTRUNCATE + || DB_REDO(op) +#endif + ) { + ret = __db_pgerr(file_dbp, argp->pgno, ret); + goto out; + } else + goto next; + } + + modified = 0; + cmp_n = log_compare(lsnp, &LSN(pagep)); + cmp_p = log_compare(&LSN(pagep), &argp->lsn); + CHECK_LSN(file_dbp->dbenv, op, cmp_p, &LSN(pagep), &argp->lsn); + + if (cmp_p == 0 && DB_REDO(op)) { + /* + * If the header is provided the page is empty, copy the + * needed data. + */ + DB_ASSERT(argp->hdr.size == 0 || NUM_ENT(pagep) == 0); + if (argp->hdr.size != 0) { + P_INIT(pagep, file_dbp->pgsize, pagep->pgno, + PREV_PGNO(argp->hdr.data), + NEXT_PGNO(argp->hdr.data), + LEVEL(argp->hdr.data), TYPE(argp->hdr.data)); + } + if (TYPE(pagep) == P_OVERFLOW) { + OV_REF(pagep) = OV_REF(argp->hdr.data); + OV_LEN(pagep) = OV_LEN(argp->hdr.data); + bp = (u_int8_t *) pagep + P_OVERHEAD(file_dbp); + memcpy(bp, argp->data.data, argp->data.size); + } else { + /* Copy the data segment. */ + bp = (u_int8_t *)pagep + + (db_indx_t)(HOFFSET(pagep) - argp->data.size); + memcpy(bp, argp->data.data, argp->data.size); + + /* Copy index table offset past the current entries. */ + pinp = P_INP(file_dbp, pagep) + NUM_ENT(pagep); + ninp = argp->ind.data; + for (i = 0; + i < (int)(argp->ind.size / sizeof(*ninp)); i++) + *pinp++ = *ninp++ + - (file_dbp->pgsize - HOFFSET(pagep)); + HOFFSET(pagep) -= argp->data.size; + NUM_ENT(pagep) += i; + } + pagep->lsn = *lsnp; + modified = 1; + } else if (cmp_n == 0 && !DB_REDO(op)) { + /* + * Since logging is logical at the page level + * we cannot just truncate the data space. Delete + * the proper number of items from the logical end + * of the page. + */ + for (i = 0; i < (int)(argp->ind.size / sizeof(*ninp)); i++) { + indx = NUM_ENT(pagep) - 1; + if (P_INP(file_dbp, pagep)[indx] == + P_INP(file_dbp, pagep)[indx - P_INDX]) { + NUM_ENT(pagep)--; + continue; + } + switch (TYPE(pagep)) { + case P_LBTREE: + case P_LRECNO: + case P_LDUP: + bk = GET_BKEYDATA(file_dbp, pagep, indx); + size = BITEM_SIZE(bk); + break; + + case P_IBTREE: + size = BINTERNAL_SIZE( + GET_BINTERNAL(file_dbp, pagep, indx)->len); + break; + case P_IRECNO: + size = RINTERNAL_SIZE; + break; + + default: + ret = __db_pgfmt(dbenv, PGNO(pagep)); + goto out; + } + if ((ret = + __db_ditem(dbc, pagep, indx, size)) != 0) + goto out; + } + if (argp->ind.size == 0) + HOFFSET(pagep) = file_dbp->pgsize; + pagep->lsn = argp->lsn; + modified = 1; + } + + if ((ret = __memp_fput(mpf, pagep, modified ? DB_MPOOL_DIRTY : 0)) != 0) + goto out; + +next: if ((ret = __memp_fget(mpf, &argp->npgno, 0, &pagep)) != 0) { + if (ret != DB_PAGE_NOTFOUND +#ifndef HAVE_FTRUNCATE + || DB_REDO(op) +#endif + ) { + ret = __db_pgerr(file_dbp, argp->pgno, ret); + goto out; + } else + goto done; + } + + modified = 0; + cmp_n = log_compare(lsnp, &LSN(pagep)); + cmp_p = log_compare(&LSN(pagep), &argp->nlsn); + CHECK_LSN(file_dbp->dbenv, op, cmp_p, &LSN(pagep), &argp->nlsn); + + if (cmp_p == 0 && DB_REDO(op)) { + /* Need to truncate the page. */ + HOFFSET(pagep) = file_dbp->pgsize; + NUM_ENT(pagep) = 0; + pagep->lsn = *lsnp; + modified = 1; + } else if (cmp_n == 0 && !DB_REDO(op)) { + /* Need to put the data back on the page. */ + if (TYPE(pagep) == P_OVERFLOW) { + OV_REF(pagep) = OV_REF(argp->hdr.data); + OV_LEN(pagep) = OV_LEN(argp->hdr.data); + bp = (u_int8_t *) pagep + P_OVERHEAD(file_dbp); + memcpy(bp, argp->data.data, argp->data.size); + } else { + bp = (u_int8_t *)pagep + + (db_indx_t)(HOFFSET(pagep) - argp->data.size); + memcpy(bp, argp->data.data, argp->data.size); + + /* Copy index table. */ + pinp = P_INP(file_dbp, pagep) + NUM_ENT(pagep); + ninp = argp->ind.data; + for (i = 0; + i < (int)(argp->ind.size / sizeof(*ninp)); i++) + *pinp++ = *ninp++; + HOFFSET(pagep) -= argp->data.size; + NUM_ENT(pagep) = i; + } + pagep->lsn = argp->nlsn; + modified = 1; + } + + if ((ret = __memp_fput(mpf, pagep, modified ? DB_MPOOL_DIRTY : 0)) != 0) + goto out; +done: + *lsnp = argp->prev_lsn; + ret = 0; + +out: REC_CLOSE; +} + +/* + * __bam_pgno_recover -- + * Recovery function for page number replacment. + * + * PUBLIC: int __bam_pgno_recover + * PUBLIC: __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *)); + */ +int +__bam_pgno_recover(dbenv, dbtp, lsnp, op, info) + DB_ENV *dbenv; + DBT *dbtp; + DB_LSN *lsnp; + db_recops op; + void *info; +{ + BINTERNAL *bi; + __bam_pgno_args *argp; + DB *file_dbp; + DBC *dbc; + DB_MPOOLFILE *mpf; + PAGE *pagep, *npagep; + db_pgno_t *pgnop; + int cmp_n, cmp_p, modified, ret; + + COMPQUIET(info, NULL); + + REC_PRINT(__bam_pgno_print); + REC_INTRO(__bam_pgno_read, 1, 0); + + REC_FGET(mpf, argp->pgno, &pagep, done); + + modified = 0; + cmp_n = log_compare(lsnp, &LSN(pagep)); + cmp_p = log_compare(&LSN(pagep), &argp->lsn); + CHECK_LSN(file_dbp->dbenv, op, cmp_p, &LSN(pagep), &argp->lsn); + + if ((cmp_p == 0 && DB_REDO(op)) || (cmp_n == 0 && !DB_REDO(op))) { + switch (TYPE(pagep)) { + case P_IBTREE: + /* + * An internal record can have both a overflow + * and child pointer. Fetch the page to see + * which it is. + */ + bi = GET_BINTERNAL(file_dbp, pagep, argp->indx); + if (B_TYPE(bi->type) == B_OVERFLOW) { + REC_FGET(mpf, argp->npgno, &npagep, out); + + if (TYPE(npagep) == P_OVERFLOW) + pgnop = + &((BOVERFLOW *)(bi->data))->pgno; + else + pgnop = &bi->pgno; + if ((ret = __memp_fput(mpf, npagep, 0)) != 0) + goto out; + break; + } + pgnop = &bi->pgno; + break; + case P_IRECNO: + pgnop = + &GET_RINTERNAL(file_dbp, pagep, argp->indx)->pgno; + break; + default: + pgnop = + &GET_BOVERFLOW(file_dbp, pagep, argp->indx)->pgno; + break; + } + + if (DB_REDO(op)) { + /* Need to redo update described. */ + *pgnop = argp->npgno; + pagep->lsn = *lsnp; + modified = 1; + } else { + *pgnop = argp->opgno; + pagep->lsn = argp->lsn; + modified = 1; + } + } + + if ((ret = __memp_fput(mpf, pagep, modified ? DB_MPOOL_DIRTY : 0)) != 0) + goto out; + +done: + *lsnp = argp->prev_lsn; + ret = 0; + +out: REC_CLOSE; +} diff --git a/storage/bdb/btree/bt_reclaim.c b/storage/bdb/btree/bt_reclaim.c index ee722a30f15..d7884a79e0c 100644 --- a/storage/bdb/btree/bt_reclaim.c +++ b/storage/bdb/btree/bt_reclaim.c @@ -1,10 +1,10 @@ /*- * See the file LICENSE for redistribution information. * - * Copyright (c) 1998-2004 + * Copyright (c) 1998-2005 * Sleepycat Software. All rights reserved. * - * $Id: bt_reclaim.c,v 11.15 2004/01/28 03:35:49 bostic Exp $ + * $Id: bt_reclaim.c,v 12.2 2005/06/16 20:20:19 bostic Exp $ */ #include "db_config.h" @@ -69,7 +69,8 @@ __bam_truncate(dbc, countp) ret = __bam_traverse(dbc, DB_LOCK_WRITE, dbc->internal->root, __db_truncate_callback, &trunc); - *countp = trunc.count; + if (countp != NULL) + *countp = trunc.count; return (ret); } diff --git a/storage/bdb/btree/bt_recno.c b/storage/bdb/btree/bt_recno.c index 78f149dd61c..a7da96ded4d 100644 --- a/storage/bdb/btree/bt_recno.c +++ b/storage/bdb/btree/bt_recno.c @@ -1,10 +1,10 @@ /*- * See the file LICENSE for redistribution information. * - * Copyright (c) 1997-2004 + * Copyright (c) 1997-2005 * Sleepycat Software. All rights reserved. * - * $Id: bt_recno.c,v 11.117 2004/03/28 17:01:01 bostic Exp $ + * $Id: bt_recno.c,v 12.6 2005/08/08 14:27:59 bostic Exp $ */ #include "db_config.h" @@ -203,7 +203,6 @@ __ram_c_del(dbc) DB *dbp; DB_LSN lsn; DBT hdr, data; - EPG *epg; int exact, ret, stack, t_ret; dbp = dbc->dbp; @@ -280,21 +279,11 @@ __ram_c_del(dbc) * are closed, and then clean it up. */ if (NUM_ENT(cp->page) == 0 && PGNO(cp->page) != cp->root) { - /* - * We already have a locked stack of pages. However, - * there are likely entries in the stack that aren't - * going to be emptied by removing the single reference - * to the emptied page (or one of its parents). - */ - for (epg = cp->csp; epg >= cp->sp; --epg) - if (NUM_ENT(epg->page) > 1) - break; - /* * We want to delete a single item out of the last page * that we're not deleting. */ - ret = __bam_dpages(dbc, epg); + ret = __bam_dpages(dbc, 0, 0); /* * Regardless of the return from __bam_dpages, it will @@ -764,7 +753,7 @@ __ram_ca(dbc_arg, op) */ DB_ASSERT(F_ISSET(cp_arg, C_RENUMBER)); - MUTEX_THREAD_LOCK(dbenv, dbenv->dblist_mutexp); + MUTEX_LOCK(dbenv, dbenv->mtx_dblist); /* * Adjust the cursors. See the comment in __bam_ca_delete(). */ @@ -780,7 +769,7 @@ __ram_ca(dbc_arg, op) for (ldbp = __dblist_get(dbenv, dbp->adj_fileid); ldbp != NULL && ldbp->adj_fileid == dbp->adj_fileid; ldbp = LIST_NEXT(ldbp, dblistlinks)) { - MUTEX_THREAD_LOCK(dbenv, dbp->mutexp); + MUTEX_LOCK(dbenv, dbp->mutex); for (dbc = TAILQ_FIRST(&ldbp->active_queue); dbc != NULL; dbc = TAILQ_NEXT(dbc, links)) { cp = (BTREE_CURSOR *)dbc->internal; @@ -789,7 +778,7 @@ __ram_ca(dbc_arg, op) order <= cp->order) order = cp->order + 1; } - MUTEX_THREAD_UNLOCK(dbenv, dbp->mutexp); + MUTEX_UNLOCK(dbenv, dbp->mutex); } } else order = INVALID_ORDER; @@ -798,7 +787,7 @@ __ram_ca(dbc_arg, op) for (ldbp = __dblist_get(dbenv, dbp->adj_fileid); ldbp != NULL && ldbp->adj_fileid == dbp->adj_fileid; ldbp = LIST_NEXT(ldbp, dblistlinks)) { - MUTEX_THREAD_LOCK(dbenv, dbp->mutexp); + MUTEX_LOCK(dbenv, dbp->mutex); for (dbc = TAILQ_FIRST(&ldbp->active_queue); dbc != NULL; dbc = TAILQ_NEXT(dbc, links)) { cp = (BTREE_CURSOR *)dbc->internal; @@ -868,9 +857,9 @@ iafter: if (!adjusted && C_LESSTHAN(cp_arg, cp)) { break; } } - MUTEX_THREAD_UNLOCK(dbp->dbenv, dbp->mutexp); + MUTEX_UNLOCK(dbp->dbenv, dbp->mutex); } - MUTEX_THREAD_UNLOCK(dbenv, dbenv->dblist_mutexp); + MUTEX_UNLOCK(dbenv, dbenv->mtx_dblist); return (found); } @@ -1037,6 +1026,18 @@ __ram_writeback(dbp) return (0); } + /* + * We step through the records, writing each one out. Use the record + * number and the dbp->get() function, instead of a cursor, so we find + * and write out "deleted" or non-existent records. The DB handle may + * be threaded, so allocate memory as we go. + */ + memset(&key, 0, sizeof(key)); + key.size = sizeof(db_recno_t); + key.data = &keyno; + memset(&data, 0, sizeof(data)); + F_SET(&data, DB_DBT_REALLOC); + /* Allocate a cursor. */ if ((ret = __db_cursor(dbp, NULL, &dbc, 0)) != 0) return (ret); @@ -1064,7 +1065,7 @@ __ram_writeback(dbp) */ if ((ret = __ram_update(dbc, DB_MAX_RECORDS, 0)) != 0 && ret != DB_NOTFOUND) - return (ret); + goto err; /* * Close any existing file handle and re-open the file, truncating it. @@ -1082,18 +1083,6 @@ __ram_writeback(dbp) goto err; } - /* - * We step through the records, writing each one out. Use the record - * number and the dbp->get() function, instead of a cursor, so we find - * and write out "deleted" or non-existent records. The DB handle may - * be threaded, so allocate memory as we go. - */ - memset(&key, 0, sizeof(key)); - key.size = sizeof(db_recno_t); - key.data = &keyno; - memset(&data, 0, sizeof(data)); - F_SET(&data, DB_DBT_REALLOC); - /* * We'll need the delimiter if we're doing variable-length records, * and the pad character if we're doing fixed-length records. diff --git a/storage/bdb/btree/bt_rsearch.c b/storage/bdb/btree/bt_rsearch.c index 0027ec9e4f3..8e93ee213dd 100644 --- a/storage/bdb/btree/bt_rsearch.c +++ b/storage/bdb/btree/bt_rsearch.c @@ -1,7 +1,7 @@ /*- * See the file LICENSE for redistribution information. * - * Copyright (c) 1996-2004 + * Copyright (c) 1996-2005 * Sleepycat Software. All rights reserved. */ /* @@ -36,7 +36,7 @@ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * - * $Id: bt_rsearch.c,v 11.40 2004/07/23 17:21:09 bostic Exp $ + * $Id: bt_rsearch.c,v 12.5 2005/08/08 03:37:05 ubell Exp $ */ #include "db_config.h" @@ -100,44 +100,14 @@ __bam_rsearch(dbc, recnop, flags, stop, exactp) * * Retrieve the root page. */ - pg = cp->root; - stack = LF_ISSET(S_STACK) ? 1 : 0; - lock_mode = stack ? DB_LOCK_WRITE : DB_LOCK_READ; - if ((ret = __db_lget(dbc, 0, pg, lock_mode, 0, &lock)) != 0) - return (ret); - if ((ret = __memp_fget(mpf, &pg, 0, &h)) != 0) { - /* Did not read it, so we can release the lock */ - (void)__LPUT(dbc, lock); - return (ret); - } - /* - * Decide if we need to save this page; if we do, write lock it. - * We deliberately don't lock-couple on this call. If the tree - * is tiny, i.e., one page, and two threads are busily updating - * the root page, we're almost guaranteed deadlocks galore, as - * each one gets a read lock and then blocks the other's attempt - * for a write lock. - */ - if (!stack && - ((LF_ISSET(S_PARENT) && (u_int8_t)(stop + 1) >= h->level) || - (LF_ISSET(S_WRITE) && h->level == LEAFLEVEL))) { - ret = __memp_fput(mpf, h, 0); - if ((t_ret = __LPUT(dbc, lock)) != 0 && ret == 0) - ret = t_ret; - if (ret != 0) - return (ret); - lock_mode = DB_LOCK_WRITE; - if ((ret = __db_lget(dbc, 0, pg, lock_mode, 0, &lock)) != 0) - return (ret); - if ((ret = __memp_fget(mpf, &pg, 0, &h)) != 0) { - /* Did not read it, so we can release the lock */ - (void)__LPUT(dbc, lock); - return (ret); - } - stack = 1; - } + if ((ret = __bam_get_root(dbc, cp->root, stop, flags, &stack)) != 0) + return (ret); + lock_mode = cp->csp->lock_mode; + lock = cp->csp->lock; + h = cp->csp->page; + BT_STK_CLR(cp); /* * If appending to the tree, set the record number now -- we have the * root page locked. @@ -260,15 +230,15 @@ __bam_rsearch(dbc, recnop, flags, stop, exactp) } --indx; + /* Return if this is the lowest page wanted. */ + if (stop == LEVEL(h)) { + BT_STK_ENTER(dbp->dbenv, + cp, h, indx, lock, lock_mode, ret); + if (ret != 0) + goto err; + return (0); + } if (stack) { - /* Return if this is the lowest page wanted. */ - if (LF_ISSET(S_PARENT) && stop == h->level) { - BT_STK_ENTER(dbp->dbenv, - cp, h, indx, lock, lock_mode, ret); - if (ret != 0) - goto err; - return (0); - } BT_STK_PUSH(dbp->dbenv, cp, h, indx, lock, lock_mode, ret); if (ret != 0) @@ -286,8 +256,8 @@ __bam_rsearch(dbc, recnop, flags, stop, exactp) * never unlock it. */ if ((LF_ISSET(S_PARENT) && - (u_int8_t)(stop + 1) >= (u_int8_t)(h->level - 1)) || - (h->level - 1) == LEAFLEVEL) + (u_int8_t)(stop + 1) >= (u_int8_t)(LEVEL(h) - 1)) || + (LEVEL(h) - 1) == LEAFLEVEL) stack = 1; if ((ret = __memp_fput(mpf, h, 0)) != 0) diff --git a/storage/bdb/btree/bt_search.c b/storage/bdb/btree/bt_search.c index 4fb07f44694..aedd5304a91 100644 --- a/storage/bdb/btree/bt_search.c +++ b/storage/bdb/btree/bt_search.c @@ -1,7 +1,7 @@ /*- * See the file LICENSE for redistribution information. * - * Copyright (c) 1996-2004 + * Copyright (c) 1996-2005 * Sleepycat Software. All rights reserved. */ /* @@ -39,7 +39,7 @@ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * - * $Id: bt_search.c,v 11.50 2004/07/23 17:21:09 bostic Exp $ + * $Id: bt_search.c,v 12.17 2005/11/10 21:17:13 ubell Exp $ */ #include "db_config.h" @@ -57,6 +57,102 @@ #include "dbinc/lock.h" #include "dbinc/mp.h" +/* + * __bam_get_root -- + * Fetch the root of a tree and see if we want to keep + * it in the stack. + * + * PUBLIC: int __bam_get_root __P((DBC *, db_pgno_t, int, u_int32_t, int *)); + */ +int +__bam_get_root(dbc, pg, slevel, flags, stack) + DBC *dbc; + db_pgno_t pg; + int slevel; + u_int32_t flags; + int *stack; +{ + BTREE_CURSOR *cp; + DB *dbp; + DB_LOCK lock; + DB_MPOOLFILE *mpf; + PAGE *h; + db_lockmode_t lock_mode; + int ret, t_ret; + + dbp = dbc->dbp; + mpf = dbp->mpf; + cp = (BTREE_CURSOR *)dbc->internal; + /* + * If write-locking pages, we need to know whether or not to acquire a + * write lock on a page before getting it. This depends on how deep it + * is in tree, which we don't know until we acquire the root page. So, + * if we need to lock the root page we may have to upgrade it later, + * because we won't get the correct lock initially. + * + * Retrieve the root page. + */ +try_again: + *stack = LF_ISSET(S_STACK) && + (dbc->dbtype == DB_RECNO || F_ISSET(cp, C_RECNUM)); + lock_mode = DB_LOCK_READ; + if (*stack || + LF_ISSET(S_DEL) || (LF_ISSET(S_NEXT) && LF_ISSET(S_WRITE))) + lock_mode = DB_LOCK_WRITE; + if ((ret = __db_lget(dbc, 0, pg, lock_mode, 0, &lock)) != 0) + return (ret); + if ((ret = __memp_fget(mpf, &pg, 0, &h)) != 0) { + /* Did not read it, so we can release the lock */ + (void)__LPUT(dbc, lock); + return (ret); + } + + /* + * Decide if we need to save this page; if we do, write lock it. + * We deliberately don't lock-couple on this call. If the tree + * is tiny, i.e., one page, and two threads are busily updating + * the root page, we're almost guaranteed deadlocks galore, as + * each one gets a read lock and then blocks the other's attempt + * for a write lock. + */ + if (!*stack && + ((LF_ISSET(S_PARENT) && (u_int8_t)(slevel + 1) >= LEVEL(h)) || + (LF_ISSET(S_WRITE) && LEVEL(h) == LEAFLEVEL) || + (LF_ISSET(S_START) && slevel == LEVEL(h)))) { + if (!STD_LOCKING(dbc)) + goto no_relock; + ret = __memp_fput(mpf, h, 0); + if ((t_ret = __LPUT(dbc, lock)) != 0 && ret == 0) + ret = t_ret; + if (ret != 0) + return (ret); + lock_mode = DB_LOCK_WRITE; + if ((ret = __db_lget(dbc, 0, pg, lock_mode, 0, &lock)) != 0) + return (ret); + if ((ret = __memp_fget(mpf, &pg, 0, &h)) != 0) { + /* Did not read it, so we can release the lock */ + (void)__LPUT(dbc, lock); + return (ret); + } + if (!((LF_ISSET(S_PARENT) && + (u_int8_t)(slevel + 1) >= LEVEL(h)) || + (LF_ISSET(S_WRITE) && LEVEL(h) == LEAFLEVEL) || + (LF_ISSET(S_START) && slevel == LEVEL(h)))) { + /* Someone else split the root, start over. */ + ret = __memp_fput(mpf, h, 0); + if ((t_ret = __LPUT(dbc, lock)) != 0 && ret == 0) + ret = t_ret; + if (ret != 0) + return (ret); + goto try_again; + } +no_relock: *stack = 1; + } + BT_STK_ENTER(dbp->dbenv, cp, h, 0, lock, lock_mode, ret); + + return (ret); +} + /* * __bam_search -- * Search a btree for a key. @@ -65,12 +161,12 @@ * PUBLIC: const DBT *, u_int32_t, int, db_recno_t *, int *)); */ int -__bam_search(dbc, root_pgno, key, flags, stop, recnop, exactp) +__bam_search(dbc, root_pgno, key, flags, slevel, recnop, exactp) DBC *dbc; db_pgno_t root_pgno; const DBT *key; u_int32_t flags; - int stop, *exactp; + int slevel, *exactp; db_recno_t *recnop; { BTREE *t; @@ -102,64 +198,17 @@ __bam_search(dbc, root_pgno, key, flags, stop, recnop, exactp) * deleted items, and if we are locking pairs of pages. In addition, * if we're modifying record numbers, we have to lock the entire tree * regardless. See btree.h for more details. - * - * If write-locking pages, we need to know whether or not to acquire a - * write lock on a page before getting it. This depends on how deep it - * is in tree, which we don't know until we acquire the root page. So, - * if we need to lock the root page we may have to upgrade it later, - * because we won't get the correct lock initially. - * - * Retrieve the root page. */ -try_again: - pg = root_pgno == PGNO_INVALID ? cp->root : root_pgno; - stack = LF_ISSET(S_STACK) && F_ISSET(cp, C_RECNUM); - lock_mode = stack ? DB_LOCK_WRITE : DB_LOCK_READ; - if ((ret = __db_lget(dbc, 0, pg, lock_mode, 0, &lock)) != 0) - return (ret); - if ((ret = __memp_fget(mpf, &pg, 0, &h)) != 0) { - /* Did not read it, so we can release the lock */ - (void)__LPUT(dbc, lock); - return (ret); - } - /* - * Decide if we need to save this page; if we do, write lock it. - * We deliberately don't lock-couple on this call. If the tree - * is tiny, i.e., one page, and two threads are busily updating - * the root page, we're almost guaranteed deadlocks galore, as - * each one gets a read lock and then blocks the other's attempt - * for a write lock. - */ - if (!stack && - ((LF_ISSET(S_PARENT) && (u_int8_t)(stop + 1) >= h->level) || - (LF_ISSET(S_WRITE) && h->level == LEAFLEVEL))) { - ret = __memp_fput(mpf, h, 0); - if ((t_ret = __LPUT(dbc, lock)) != 0 && ret == 0) - ret = t_ret; - if (ret != 0) - return (ret); - lock_mode = DB_LOCK_WRITE; - if ((ret = __db_lget(dbc, 0, pg, lock_mode, 0, &lock)) != 0) - return (ret); - if ((ret = __memp_fget(mpf, &pg, 0, &h)) != 0) { - /* Did not read it, so we can release the lock */ - (void)__LPUT(dbc, lock); - return (ret); - } - if (!((LF_ISSET(S_PARENT) && - (u_int8_t)(stop + 1) >= h->level) || - (LF_ISSET(S_WRITE) && h->level == LEAFLEVEL))) { - /* Someone else split the root, start over. */ - ret = __memp_fput(mpf, h, 0); - if ((t_ret = __LPUT(dbc, lock)) != 0 && ret == 0) - ret = t_ret; - if (ret != 0) - return (ret); - goto try_again; - } - stack = 1; - } + if (root_pgno == PGNO_INVALID) + root_pgno = cp->root; + if ((ret = __bam_get_root(dbc, root_pgno, slevel, flags, &stack)) != 0) + return (ret); + lock_mode = cp->csp->lock_mode; + lock = cp->csp->lock; + h = cp->csp->page; + + BT_STK_CLR(cp); /* Choose a comparison function. */ func = F_ISSET(dbc, DBC_OPD) ? @@ -168,6 +217,23 @@ try_again: for (;;) { inp = P_INP(dbp, h); + adjust = TYPE(h) == P_LBTREE ? P_INDX : O_INDX; + if (LF_ISSET(S_MIN | S_MAX)) { + if (LF_ISSET(S_MIN) || NUM_ENT(h) == 0) + indx = 0; + else if (TYPE(h) == P_LBTREE) + indx = NUM_ENT(h) - 2; + else + indx = NUM_ENT(h) - 1; + + if (LEVEL(h) == LEAFLEVEL || + (!LF_ISSET(S_START) && LEVEL(h) == slevel)) { + if (LF_ISSET(S_NEXT)) + goto get_next; + goto found; + } + goto next; + } /* * Do a binary search on the current page. If we're searching * a Btree leaf page, we have to walk the indices in groups of @@ -175,7 +241,6 @@ try_again: * page, they're an index per page item. If we find an exact * match on a leaf page, we're done. */ - adjust = TYPE(h) == P_LBTREE ? P_INDX : O_INDX; for (base = 0, lim = NUM_ENT(h) / (db_indx_t)adjust; lim != 0; lim >>= 1) { indx = base + ((lim >> 1) * adjust); @@ -183,8 +248,13 @@ try_again: __bam_cmp(dbp, key, h, indx, func, &cmp)) != 0) goto err; if (cmp == 0) { - if (TYPE(h) == P_LBTREE || TYPE(h) == P_LDUP) + if (LEVEL(h) == LEAFLEVEL || + (!LF_ISSET(S_START) && + LEVEL(h) == slevel)) { + if (LF_ISSET(S_NEXT)) + goto get_next; goto found; + } goto next; } if (cmp > 0) { @@ -197,10 +267,12 @@ try_again: * No match found. Base is the smallest index greater than * key and may be zero or a last + O_INDX index. * - * If it's a leaf page, return base as the "found" value. + * If it's a leaf page or the stopping point, + * return base as the "found" value. * Delete only deletes exact matches. */ - if (TYPE(h) == P_LBTREE || TYPE(h) == P_LDUP) { + if (LEVEL(h) == LEAFLEVEL || + (!LF_ISSET(S_START) && LEVEL(h) == slevel)) { *exactp = 0; if (LF_ISSET(S_EXACT)) { @@ -218,6 +290,43 @@ try_again: ret = t_ret; return (ret); } + if (LF_ISSET(S_NEXT)) { +get_next: /* + * The caller could have asked for a NEXT + * at the root if the tree recently collapsed. + */ + if (PGNO(h) == root_pgno) { + ret = DB_NOTFOUND; + goto err; + } + /* + * Save the root of the subtree + * and drop the rest of the subtree + * and search down again starting at + * the next child. + */ + if ((ret = __LPUT(dbc, lock)) != 0) + goto err; + if ((ret = __memp_fput(mpf, h, 0)) != 0) + goto err; + h = NULL; + LF_SET(S_MIN); + LF_CLR(S_NEXT); + indx = cp->sp->indx + 1; + if (indx == NUM_ENT(cp->sp->page)) { + ret = DB_NOTFOUND; + cp->csp++; + goto err; + } + h = cp->sp->page; + cp->sp->page = NULL; + lock = cp->sp->lock; + LOCK_INIT(cp->sp->lock); + if ((ret = __bam_stkrel(dbc, STK_NOLOCK)) != 0) + goto err; + stack = 1; + goto next; + } /* * !!! @@ -227,6 +336,8 @@ try_again: * to find an undeleted record. This is handled by the * calling routine. */ + if (LF_ISSET(S_DEL) && cp->csp == cp->sp) + cp->csp++; BT_STK_ENTER(dbp->dbenv, cp, h, base, lock, lock_mode, ret); if (ret != 0) @@ -252,8 +363,12 @@ next: if (recnop != NULL) pg = GET_BINTERNAL(dbp, h, indx)->pgno; + /* See if we are at the level to start stacking. */ + if (LF_ISSET(S_START) && slevel == LEVEL(h)) + stack = 1; + if (LF_ISSET(S_STK_ONLY)) { - if (stop == h->level) { + if (slevel == LEVEL(h)) { BT_STK_NUM(dbp->dbenv, cp, h, indx, ret); if ((t_ret = __LPUT(dbc, lock)) != 0 && ret == 0) @@ -278,13 +393,24 @@ next: if (recnop != NULL) } } else if (stack) { /* Return if this is the lowest page wanted. */ - if (LF_ISSET(S_PARENT) && stop == h->level) { + if (LF_ISSET(S_PARENT) && slevel == LEVEL(h)) { BT_STK_ENTER(dbp->dbenv, cp, h, indx, lock, lock_mode, ret); if (ret != 0) goto err; return (0); } + if (LF_ISSET(S_DEL) && NUM_ENT(h) > 1) { + /* + * There was a page with a singleton pointer + * to a non-empty subtree. + */ + cp->csp--; + if ((ret = __bam_stkrel(dbc, STK_NOLOCK)) != 0) + goto err; + stack = 0; + goto do_del; + } BT_STK_PUSH(dbp->dbenv, cp, h, indx, lock, lock_mode, ret); if (ret != 0) @@ -302,16 +428,69 @@ next: if (recnop != NULL) * unlock it. */ if ((LF_ISSET(S_PARENT) && - (u_int8_t)(stop + 1) >= (u_int8_t)(h->level - 1)) || - (h->level - 1) == LEAFLEVEL) + (u_int8_t)(slevel + 1) >= (LEVEL(h) - 1)) || + (LEVEL(h) - 1) == LEAFLEVEL) stack = 1; - if ((ret = __memp_fput(mpf, h, 0)) != 0) - goto err; - h = NULL; + /* + * Returning a subtree. See if we have hit the start + * point if so save the parent and set stack. + * Otherwise free the parent and temporarily + * save this one. + * For S_DEL we need to find a page with 1 entry. + * For S_NEXT we want find the minimal subtree + * that contains the key and the next page. + * We save pages as long as we are at the right + * edge of the subtree. When we leave the right + * edge, then drop the subtree. + */ + if (!LF_ISSET(S_DEL | S_NEXT)) { + if ((ret = __memp_fput(mpf, h, 0)) != 0) + goto err; + goto lock_next; + } - lock_mode = stack && - LF_ISSET(S_WRITE) ? DB_LOCK_WRITE : DB_LOCK_READ; + if ((LF_ISSET(S_DEL) && NUM_ENT(h) == 1)) { + stack = 1; + LF_SET(S_WRITE); + /* Push the parent. */ + cp->csp++; + /* Push this node. */ + BT_STK_PUSH(dbp->dbenv, cp, h, + indx, lock, lock_mode, ret); + if (ret != 0) + goto err; + LOCK_INIT(lock); + } else { + /* + * See if we want to save the tree so far. + * If we are looking for the next key, + * then we must save this node if we are + * at the end of the page. If not then + * discard anything we have saved so far. + * For delete only keep one node until + * we find a singleton. + */ +do_del: if (cp->csp->page != NULL) { + if (LF_ISSET(S_NEXT) && + indx == NUM_ENT(h) - 1) + cp->csp++; + else if ((ret = + __bam_stkrel(dbc, STK_NOLOCK)) != 0) + goto err; + } + /* Save this node. */ + BT_STK_ENTER(dbp->dbenv, cp, + h, indx, lock, lock_mode, ret); + if (ret != 0) + goto err; + LOCK_INIT(lock); + } + +lock_next: h = NULL; + + if (stack && LF_ISSET(S_WRITE)) + lock_mode = DB_LOCK_WRITE; if ((ret = __db_lget(dbc, LCK_COUPLE_ALWAYS, pg, lock_mode, 0, &lock)) != 0) { /* @@ -320,6 +499,8 @@ next: if (recnop != NULL) * descending the tree holding read-locks. */ (void)__LPUT(dbc, lock); + if (LF_ISSET(S_DEL | S_NEXT)) + cp->csp++; goto err; } } @@ -340,12 +521,12 @@ found: *exactp = 1; * all duplicate sets that are not on overflow pages exist on a * single leaf page. */ - if (TYPE(h) == P_LBTREE) { + if (TYPE(h) == P_LBTREE && NUM_ENT(h) > P_INDX) { if (LF_ISSET(S_DUPLAST)) while (indx < (db_indx_t)(NUM_ENT(h) - P_INDX) && inp[indx] == inp[indx + P_INDX]) indx += P_INDX; - else + else if (LF_ISSET(S_DUPFIRST)) while (indx > 0 && inp[indx] == inp[indx - P_INDX]) indx -= P_INDX; @@ -406,8 +587,11 @@ found: *exactp = 1; ret = t_ret; if ((t_ret = __memp_fput(mpf, h, 0)) != 0 && ret == 0) ret = t_ret; - } else + } else { + if (LF_ISSET(S_DEL) && cp->csp == cp->sp) + cp->csp++; BT_STK_ENTER(dbp->dbenv, cp, h, indx, lock, lock_mode, ret); + } if (ret != 0) goto err; @@ -471,6 +655,13 @@ __bam_stkrel(dbc, flags) */ epg->page = NULL; } + /* + * We set this if we need to release our pins, + * but are not logically ready to have the pages + * visible. + */ + if (LF_ISSET(STK_PGONLY)) + continue; if (LF_ISSET(STK_NOLOCK)) { if ((t_ret = __LPUT(dbc, epg->lock)) != 0 && ret == 0) ret = t_ret; @@ -480,7 +671,8 @@ __bam_stkrel(dbc, flags) } /* Clear the stack, all pages have been released. */ - BT_STK_CLR(cp); + if (!LF_ISSET(STK_PGONLY)) + BT_STK_CLR(cp); return (ret); } diff --git a/storage/bdb/btree/bt_split.c b/storage/bdb/btree/bt_split.c index 3e2cb4e6dfb..fb696ebf768 100644 --- a/storage/bdb/btree/bt_split.c +++ b/storage/bdb/btree/bt_split.c @@ -1,7 +1,7 @@ /*- * See the file LICENSE for redistribution information. * - * Copyright (c) 1996-2004 + * Copyright (c) 1996-2005 * Sleepycat Software. All rights reserved. */ /* @@ -36,7 +36,7 @@ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * - * $Id: bt_split.c,v 11.66 2004/10/01 13:00:21 bostic Exp $ + * $Id: bt_split.c,v 12.4 2005/06/16 20:20:22 bostic Exp $ */ #include "db_config.h" @@ -56,7 +56,6 @@ static int __bam_broot __P((DBC *, PAGE *, PAGE *, PAGE *)); static int __bam_page __P((DBC *, EPG *, EPG *)); -static int __bam_pinsert __P((DBC *, EPG *, PAGE *, PAGE *, int)); static int __bam_psplit __P((DBC *, EPG *, PAGE *, PAGE *, db_indx_t *)); static int __bam_root __P((DBC *, EPG *)); static int __ram_root __P((DBC *, PAGE *, PAGE *, PAGE *)); @@ -338,7 +337,7 @@ __bam_page(dbc, pp, cp) * page can't hold the new keys, and has to be split in turn, in which * case we want to release all the locks we can. */ - if ((ret = __bam_pinsert(dbc, pp, lp, rp, 1)) != 0) + if ((ret = __bam_pinsert(dbc, pp, lp, rp, BPI_SPACEONLY)) != 0) goto err; /* @@ -349,7 +348,7 @@ __bam_page(dbc, pp, cp) * a page that's not in our direct ancestry. Consider a cursor walking * backward through the leaf pages, that has our following page locked, * and is waiting on a lock for the page we're splitting. In that case - * we're going to deadlock here . It's probably OK, stepping backward + * we're going to deadlock here. It's probably OK, stepping backward * through the tree isn't a common operation. */ if (ISLEAF(cp->page) && NEXT_PGNO(cp->page) != PGNO_INVALID) { @@ -685,13 +684,15 @@ __ram_root(dbc, rootp, lp, rp) /* * __bam_pinsert -- * Insert a new key into a parent page, completing the split. + * + * PUBLIC: int __bam_pinsert __P((DBC *, EPG *, PAGE *, PAGE *, int)); */ -static int -__bam_pinsert(dbc, parent, lchild, rchild, space_check) +int +__bam_pinsert(dbc, parent, lchild, rchild, flags) DBC *dbc; EPG *parent; PAGE *lchild, *rchild; - int space_check; + int flags; { BINTERNAL bi, *child_bi; BKEYDATA *child_bk, *tmp_bk; @@ -714,7 +715,7 @@ __bam_pinsert(dbc, parent, lchild, rchild, space_check) /* If handling record numbers, count records split to the right page. */ nrecs = F_ISSET(cp, C_RECNUM) && - !space_check ? __bam_total(dbp, rchild) : 0; + !LF_ISSET(BPI_SPACEONLY) ? __bam_total(dbp, rchild) : 0; /* * Now we insert the new page's first key into the parent page, which @@ -750,7 +751,7 @@ __bam_pinsert(dbc, parent, lchild, rchild, space_check) if (P_FREESPACE(dbp, ppage) < nbytes) return (DB_NEEDSPLIT); - if (space_check) + if (LF_ISSET(BPI_SPACEONLY)) return (0); /* Add a new record for the right page. */ @@ -780,7 +781,11 @@ __bam_pinsert(dbc, parent, lchild, rchild, space_check) child_bk = GET_BKEYDATA(dbp, rchild, 0); switch (B_TYPE(child_bk->type)) { case B_KEYDATA: + nbytes = BINTERNAL_PSIZE(child_bk->len); + nksize = child_bk->len; + /* + * Prefix compression: * We set t->bt_prefix to NULL if we have a comparison * callback but no prefix compression callback. But, * if we're splitting in an off-page duplicates tree, @@ -792,6 +797,14 @@ __bam_pinsert(dbc, parent, lchild, rchild, space_check) * as there's no way for an application to specify a * prefix compression callback that corresponds to its * comparison callback. + * + * No prefix compression if we don't have a compression + * function, or the key we'd compress isn't a normal + * key (for example, it references an overflow page). + * + * Generate a parent page key for the right child page + * from a comparison of the last key on the left child + * page and the first key on the right child page. */ if (F_ISSET(dbc, DBC_OPD)) { if (dbp->dup_compare == __bam_defcmp) @@ -800,13 +813,8 @@ __bam_pinsert(dbc, parent, lchild, rchild, space_check) func = NULL; } else func = t->bt_prefix; - - nbytes = BINTERNAL_PSIZE(child_bk->len); - nksize = child_bk->len; if (func == NULL) goto noprefix; - if (ppage->prev_pgno == PGNO_INVALID && off <= 1) - goto noprefix; tmp_bk = GET_BKEYDATA(dbp, lchild, NUM_ENT(lchild) - (TYPE(lchild) == P_LDUP ? O_INDX : P_INDX)); if (B_TYPE(tmp_bk->type) != B_KEYDATA) @@ -821,11 +829,11 @@ __bam_pinsert(dbc, parent, lchild, rchild, space_check) if ((n = BINTERNAL_PSIZE(nksize)) < nbytes) nbytes = n; else -noprefix: nksize = child_bk->len; + nksize = child_bk->len; - if (P_FREESPACE(dbp, ppage) < nbytes) +noprefix: if (P_FREESPACE(dbp, ppage) < nbytes) return (DB_NEEDSPLIT); - if (space_check) + if (LF_ISSET(BPI_SPACEONLY)) return (0); memset(&bi, 0, sizeof(bi)); @@ -849,7 +857,7 @@ noprefix: nksize = child_bk->len; if (P_FREESPACE(dbp, ppage) < nbytes) return (DB_NEEDSPLIT); - if (space_check) + if (LF_ISSET(BPI_SPACEONLY)) return (0); memset(&bi, 0, sizeof(bi)); @@ -883,7 +891,7 @@ noprefix: nksize = child_bk->len; if (P_FREESPACE(dbp, ppage) < nbytes) return (DB_NEEDSPLIT); - if (space_check) + if (LF_ISSET(BPI_SPACEONLY)) return (0); /* Add a new record for the right page. */ @@ -904,13 +912,13 @@ noprefix: nksize = child_bk->len; * If a Recno or Btree with record numbers AM page, or an off-page * duplicates tree, adjust the parent page's left page record count. */ - if (F_ISSET(cp, C_RECNUM)) { + if (F_ISSET(cp, C_RECNUM) && !LF_ISSET(BPI_NORECNUM)) { /* Log the change. */ if (DBC_LOGGING(dbc)) { - if ((ret = __bam_cadjust_log(dbp, dbc->txn, - &LSN(ppage), 0, PGNO(ppage), - &LSN(ppage), parent->indx, -(int32_t)nrecs, 0)) != 0) - return (ret); + if ((ret = __bam_cadjust_log(dbp, dbc->txn, + &LSN(ppage), 0, PGNO(ppage), &LSN(ppage), + parent->indx, -(int32_t)nrecs, 0)) != 0) + return (ret); } else LSN_NOT_LOGGED(LSN(ppage)); diff --git a/storage/bdb/btree/bt_stat.c b/storage/bdb/btree/bt_stat.c index 9d99ee2c422..98e3b9561f7 100644 --- a/storage/bdb/btree/bt_stat.c +++ b/storage/bdb/btree/bt_stat.c @@ -1,10 +1,10 @@ /*- * See the file LICENSE for redistribution information. * - * Copyright (c) 1996-2004 + * Copyright (c) 1996-2005 * Sleepycat Software. All rights reserved. * - * $Id: bt_stat.c,v 11.78 2004/09/22 03:31:26 bostic Exp $ + * $Id: bt_stat.c,v 12.3 2005/06/16 20:20:23 bostic Exp $ */ #include "db_config.h" @@ -155,7 +155,6 @@ meta_only: /* Get metadata page statistics. */ sp->bt_metaflags = meta->dbmeta.flags; - sp->bt_maxkey = meta->maxkey; sp->bt_minkey = meta->minkey; sp->bt_re_len = meta->re_len; sp->bt_re_pad = meta->re_pad; @@ -246,16 +245,12 @@ __bam_stat_print(dbc, flags) } __db_msg(dbenv, "%s\tByte order", s); __db_prflags(dbenv, NULL, sp->bt_metaflags, fn, NULL, "\tFlags"); - if (dbp->type == DB_BTREE) { -#ifdef NOT_IMPLEMENTED - __db_dl(dbenv, "Maximum keys per-page", (u_long)sp->bt_maxkey); -#endif + if (dbp->type == DB_BTREE) __db_dl(dbenv, "Minimum keys per-page", (u_long)sp->bt_minkey); - } if (dbp->type == DB_RECNO) { __db_dl(dbenv, "Fixed-length record size", (u_long)sp->bt_re_len); - __db_dl(dbenv, + __db_msg(dbenv, "%#x\tFixed-length record pad", (u_int)sp->bt_re_pad); } __db_dl(dbenv, diff --git a/storage/bdb/btree/bt_upgrade.c b/storage/bdb/btree/bt_upgrade.c index f899017897c..8ace2864cd3 100644 --- a/storage/bdb/btree/bt_upgrade.c +++ b/storage/bdb/btree/bt_upgrade.c @@ -1,10 +1,10 @@ /*- * See the file LICENSE for redistribution information. * - * Copyright (c) 1996-2004 + * Copyright (c) 1996-2005 * Sleepycat Software. All rights reserved. * - * $Id: bt_upgrade.c,v 11.30 2004/01/28 03:35:49 bostic Exp $ + * $Id: bt_upgrade.c,v 12.1 2005/06/16 20:20:23 bostic Exp $ */ #include "db_config.h" diff --git a/storage/bdb/btree/bt_verify.c b/storage/bdb/btree/bt_verify.c index 6b78cbd17ea..055cc46892e 100644 --- a/storage/bdb/btree/bt_verify.c +++ b/storage/bdb/btree/bt_verify.c @@ -1,10 +1,10 @@ /*- * See the file LICENSE for redistribution information. * - * Copyright (c) 1999-2004 + * Copyright (c) 1999-2005 * Sleepycat Software. All rights reserved. * - * $Id: bt_verify.c,v 1.97 2004/10/11 18:47:46 bostic Exp $ + * $Id: bt_verify.c,v 12.13 2005/11/11 20:27:49 ubell Exp $ */ #include "db_config.h" @@ -89,10 +89,8 @@ __bam_vrfy_meta(dbp, vdp, meta, pgno, flags) } else pip->bt_minkey = meta->minkey; - /* bt_maxkey: unsupported so no constraints. */ - pip->bt_maxkey = meta->maxkey; - /* re_len: no constraints on this (may be zero or huge--we make rope) */ + pip->re_pad = meta->re_pad; pip->re_len = meta->re_len; /* @@ -1618,8 +1616,17 @@ bad_prev: isbad = 1; if (relenp) *relenp = relen; } - if (LF_ISSET(ST_RECNUM)) + if (LF_ISSET(ST_RECNUM)) { + if (child->nrecs != child_nrecs) { + isbad = 1; + EPRINT((dbenv, + "Page %lu: record count incorrect: actual %lu, in record %lu", + (u_long)child->pgno, + (u_long)child_nrecs, + (u_long)child->nrecs)); + } nrecs += child_nrecs; + } if (isbad == 0 && level != child_level + 1) { isbad = 1; EPRINT((dbenv, @@ -2037,38 +2044,36 @@ __bam_salvage(dbp, vdp, pgno, pgtype, h, handle, callback, key, flags) DBT *key; u_int32_t flags; { - DBT dbt, unkdbt; - DB_ENV *dbenv; BKEYDATA *bk; BOVERFLOW *bo; + DBT dbt, unknown_key, unknown_data; + DB_ENV *dbenv; VRFY_ITEM *pgmap; - db_indx_t i, beg, end, *inp; + db_indx_t i, last, beg, end, *inp; u_int32_t himark; void *ovflbuf; - int t_ret, ret, err_ret; + int ret, t_ret, t2_ret; dbenv = dbp->dbenv; - - /* Shut up lint. */ - COMPQUIET(end, 0); - ovflbuf = pgmap = NULL; - err_ret = ret = 0; inp = P_INP(dbp, h); memset(&dbt, 0, sizeof(DBT)); dbt.flags = DB_DBT_REALLOC; - memset(&unkdbt, 0, sizeof(DBT)); - unkdbt.size = (u_int32_t)(strlen("UNKNOWN") + 1); - unkdbt.data = "UNKNOWN"; + memset(&unknown_key, 0, sizeof(DBT)); + unknown_key.size = (u_int32_t)strlen("UNKNOWN_KEY"); + unknown_key.data = "UNKNOWN_KEY"; + memset(&unknown_data, 0, sizeof(DBT)); + unknown_data.size = (u_int32_t)strlen("UNKNOWN_DATA"); + unknown_data.data = "UNKNOWN_DATA"; /* * Allocate a buffer for overflow items. Start at one page; * __db_safe_goff will realloc as needed. */ if ((ret = __os_malloc(dbenv, dbp->pgsize, &ovflbuf)) != 0) - return (ret); + goto err; if (LF_ISSET(DB_AGGRESSIVE) && (ret = __os_calloc(dbenv, dbp->pgsize, sizeof(pgmap[0]), &pgmap)) != 0) @@ -2077,161 +2082,185 @@ __bam_salvage(dbp, vdp, pgno, pgtype, h, handle, callback, key, flags) /* * Loop through the inp array, spitting out key/data pairs. * - * If we're salvaging normally, loop from 0 through NUM_ENT(h). - * If we're being aggressive, loop until we hit the end of the page-- + * If we're salvaging normally, loop from 0 through NUM_ENT(h). If + * we're being aggressive, loop until we hit the end of the page -- * NUM_ENT() may be bogus. */ himark = dbp->pgsize; - for (i = 0;; i += O_INDX) { + for (i = 0, last = UINT16_MAX;; i += O_INDX) { /* If we're not aggressive, break when we hit NUM_ENT(h). */ if (!LF_ISSET(DB_AGGRESSIVE) && i >= NUM_ENT(h)) break; /* Verify the current item. */ - ret = __db_vrfy_inpitem(dbp, - h, pgno, i, 1, flags, &himark, NULL); - /* If this returned a fatality, it's time to break. */ - if (ret == DB_VERIFY_FATAL) { + t_ret = + __db_vrfy_inpitem(dbp, h, pgno, i, 1, flags, &himark, NULL); + + if (t_ret != 0) { /* - * Don't return DB_VERIFY_FATAL; it's private - * and means only that we can't go on with this - * page, not with the whole database. It's - * not even an error if we've run into it - * after NUM_ENT(h). + * If this is a btree leaf and we've printed out a key + * but not its associated data item, fix this imbalance + * by printing an "UNKNOWN_DATA". */ - ret = (i < NUM_ENT(h)) ? DB_VERIFY_BAD : 0; - break; + if (pgtype == P_LBTREE && i % P_INDX == 1 && + last == i - 1 && (t2_ret = __db_vrfy_prdbt( + &unknown_data, + 0, " ", handle, callback, 0, vdp)) != 0) { + if (ret == 0) + ret = t2_ret; + goto err; + } + + /* + * Don't return DB_VERIFY_FATAL; it's private and means + * only that we can't go on with this page, not with + * the whole database. It's not even an error if we've + * run into it after NUM_ENT(h). + */ + if (t_ret == DB_VERIFY_FATAL) { + if (i < NUM_ENT(h) && ret == 0) + ret = DB_VERIFY_BAD; + break; + } + continue; } /* * If this returned 0, it's safe to print or (carefully) * try to fetch. + * + * We only print deleted items if DB_AGGRESSIVE is set. */ - if (ret == 0) { - /* - * We only want to print deleted items if - * DB_AGGRESSIVE is set. - */ - bk = GET_BKEYDATA(dbp, h, i); - if (!LF_ISSET(DB_AGGRESSIVE) && B_DISSET(bk->type)) - continue; + bk = GET_BKEYDATA(dbp, h, i); + if (!LF_ISSET(DB_AGGRESSIVE) && B_DISSET(bk->type)) + continue; - /* - * We're going to go try to print the next item. If - * key is non-NULL, we're a dup page, so we've got to - * print the key first, unless SA_SKIPFIRSTKEY is set - * and we're on the first entry. - */ - if (key != NULL && - (i != 0 || !LF_ISSET(SA_SKIPFIRSTKEY))) - if ((ret = __db_vrfy_prdbt(key, - 0, " ", handle, callback, 0, vdp)) != 0) - err_ret = ret; + /* + * If this is a btree leaf and we're about to print out a data + * item for which we didn't print out a key, fix this imbalance + * by printing an "UNKNOWN_KEY". + */ + if (pgtype == P_LBTREE && i % P_INDX == 1 && + last != i - 1 && (t_ret = __db_vrfy_prdbt( + &unknown_key, 0, " ", handle, callback, 0, vdp)) != 0) { + if (ret == 0) + ret = t_ret; + goto err; + } + last = i; - beg = inp[i]; - switch (B_TYPE(bk->type)) { - case B_DUPLICATE: - end = beg + BOVERFLOW_SIZE - 1; - /* - * If we're not on a normal btree leaf page, - * there shouldn't be off-page - * dup sets. Something's confused; just - * drop it, and the code to pick up unlinked - * offpage dup sets will print it out - * with key "UNKNOWN" later. - */ - if (pgtype != P_LBTREE) - break; - - bo = (BOVERFLOW *)bk; - - /* - * If the page number is unreasonable, or - * if this is supposed to be a key item, - * just spit out "UNKNOWN"--the best we - * can do is run into the data items in the - * unlinked offpage dup pass. - */ - if (!IS_VALID_PGNO(bo->pgno) || - (i % P_INDX == 0)) { - /* Not much to do on failure. */ - if ((ret = - __db_vrfy_prdbt(&unkdbt, 0, " ", - handle, callback, 0, vdp)) != 0) - err_ret = ret; - break; - } - - if ((ret = __db_salvage_duptree(dbp, - vdp, bo->pgno, &dbt, handle, callback, - flags | SA_SKIPFIRSTKEY)) != 0) - err_ret = ret; - - break; - case B_KEYDATA: - end = (db_indx_t)DB_ALIGN( - beg + bk->len, sizeof(u_int32_t)) - 1; - dbt.data = bk->data; - dbt.size = bk->len; - if ((ret = __db_vrfy_prdbt(&dbt, - 0, " ", handle, callback, 0, vdp)) != 0) - err_ret = ret; - break; - case B_OVERFLOW: - end = beg + BOVERFLOW_SIZE - 1; - bo = (BOVERFLOW *)bk; - if ((ret = __db_safe_goff(dbp, vdp, - bo->pgno, &dbt, &ovflbuf, flags)) != 0) { - err_ret = ret; - /* We care about err_ret more. */ - (void)__db_vrfy_prdbt(&unkdbt, 0, " ", - handle, callback, 0, vdp); - break; - } - if ((ret = __db_vrfy_prdbt(&dbt, - 0, " ", handle, callback, 0, vdp)) != 0) - err_ret = ret; - break; - default: - /* - * We should never get here; __db_vrfy_inpitem - * should not be returning 0 if bk->type - * is unrecognizable. - */ - DB_ASSERT(0); - return (EINVAL); + /* + * We're going to go try to print the next item. If key is + * non-NULL, we're a dup page, so we've got to print the key + * first, unless SA_SKIPFIRSTKEY is set and we're on the first + * entry. + */ + if (key != NULL && (i != 0 || !LF_ISSET(SA_SKIPFIRSTKEY))) + if ((t_ret = __db_vrfy_prdbt(key, + 0, " ", handle, callback, 0, vdp)) != 0) { + if (ret == 0) + ret = t_ret; + goto err; } + beg = inp[i]; + switch (B_TYPE(bk->type)) { + case B_DUPLICATE: + end = beg + BOVERFLOW_SIZE - 1; /* - * If we're being aggressive, mark the beginning - * and end of the item; we'll come back and print - * whatever "junk" is in the gaps in case we had - * any bogus inp elements and thereby missed stuff. + * If we're not on a normal btree leaf page, there + * shouldn't be off-page dup sets. Something's + * confused; just drop it, and the code to pick up + * unlinked offpage dup sets will print it out + * with key "UNKNOWN" later. */ - if (LF_ISSET(DB_AGGRESSIVE)) { - pgmap[beg] = VRFY_ITEM_BEGIN; - pgmap[end] = VRFY_ITEM_END; + if (pgtype != P_LBTREE) + break; + + bo = (BOVERFLOW *)bk; + + /* + * If the page number is unreasonable, or if this is + * supposed to be a key item, output "UNKNOWN_KEY" -- + * the best we can do is run into the data items in + * the unlinked offpage dup pass. + */ + if (!IS_VALID_PGNO(bo->pgno) || (i % P_INDX == 0)) { + /* Not much to do on failure. */ + if ((t_ret = __db_vrfy_prdbt(&unknown_key, + 0, " ", handle, callback, 0, vdp)) != 0) { + if (ret == 0) + ret = t_ret; + goto err; + } + break; } + + /* Don't stop on error. */ + if ((t_ret = __db_salvage_duptree(dbp, + vdp, bo->pgno, &dbt, handle, callback, + flags | SA_SKIPFIRSTKEY)) != 0 && ret == 0) + ret = t_ret; + + break; + case B_KEYDATA: + end = (db_indx_t)DB_ALIGN( + beg + bk->len, sizeof(u_int32_t)) - 1; + dbt.data = bk->data; + dbt.size = bk->len; + if ((t_ret = __db_vrfy_prdbt(&dbt, + 0, " ", handle, callback, 0, vdp)) != 0) { + if (ret == 0) + ret = t_ret; + goto err; + } + break; + case B_OVERFLOW: + end = beg + BOVERFLOW_SIZE - 1; + bo = (BOVERFLOW *)bk; + + /* Don't stop on error. */ + if ((t_ret = __db_safe_goff(dbp, vdp, + bo->pgno, &dbt, &ovflbuf, flags)) != 0 && ret == 0) + ret = t_ret; + if ((t_ret = __db_vrfy_prdbt( + t_ret == 0 ? &dbt : &unknown_key, + 0, " ", handle, callback, 0, vdp)) != 0 && ret == 0) + ret = t_ret; + break; + default: + /* + * We should never get here; __db_vrfy_inpitem should + * not be returning 0 if bk->type is unrecognizable. + */ + DB_ASSERT(0); + if (ret == 0) + ret = EINVAL; + goto err; + } + + /* + * If we're being aggressive, mark the beginning and end of + * the item; we'll come back and print whatever "junk" is in + * the gaps in case we had any bogus inp elements and thereby + * missed stuff. + */ + if (LF_ISSET(DB_AGGRESSIVE)) { + pgmap[beg] = VRFY_ITEM_BEGIN; + pgmap[end] = VRFY_ITEM_END; } } - /* - * If i is odd and this is a btree leaf, we've printed out a key but not - * a datum; fix this imbalance by printing an "UNKNOWN". - */ - if (pgtype == P_LBTREE && (i % P_INDX == 1) && ((ret = - __db_vrfy_prdbt(&unkdbt, 0, " ", handle, callback, 0, vdp)) != 0)) - err_ret = ret; - err: if (pgmap != NULL) __os_free(dbenv, pgmap); - __os_free(dbenv, ovflbuf); + if (ovflbuf != NULL) + __os_free(dbenv, ovflbuf); /* Mark this page as done. */ - if ((t_ret = __db_salvage_markdone(vdp, pgno)) != 0) - return (t_ret); + if ((t_ret = __db_salvage_markdone(vdp, pgno)) != 0 && ret == 0) + ret = t_ret; - return ((err_ret != 0) ? err_ret : ret); + return (ret); } /* diff --git a/storage/bdb/btree/btree.src b/storage/bdb/btree/btree.src index c4f761de0af..1827cffcc53 100644 --- a/storage/bdb/btree/btree.src +++ b/storage/bdb/btree/btree.src @@ -1,10 +1,10 @@ /*- * See the file LICENSE for redistribution information. * - * Copyright (c) 1996-2004 + * Copyright (c) 1996-2005 * Sleepycat Software. All rights reserved. * - * $Id: btree.src,v 10.42 2004/06/17 17:35:12 bostic Exp $ + * $Id: btree.src,v 12.3 2005/08/08 03:37:05 ubell Exp $ */ PREFIX __bam @@ -210,10 +210,10 @@ END BEGIN relink 147 /* Fileid of db affected. */ DB fileid int32_t ld -/* The page being changed. */ +/* The page being removed. */ ARG pgno db_pgno_t lu -/* The page's original lsn. */ -POINTER lsn DB_LSN * lu +/* The new page number, if any. */ +ARG new_pgno db_pgno_t lu /* The previous page. */ ARG prev db_pgno_t lu /* The previous page's original lsn. */ @@ -223,3 +223,30 @@ ARG next db_pgno_t lu /* The previous page's original lsn. */ POINTER lsn_next DB_LSN * lu END + +/* + * BTREE-merge -- Handles merging of pages during a compaction. + */ +BEGIN merge 148 +DB fileid int32_t ld +ARG pgno db_pgno_t lu +POINTER lsn DB_LSN * lu +ARG npgno db_pgno_t lu +POINTER nlsn DB_LSN * lu +DBT hdr DBT s +DBT data DBT s +DBT ind DBT s +END + +/* + * BTREE-pgno -- Handles replacing a page number in the record + * refernece on pgno by indx. + */ +BEGIN pgno 149 +DB fileid int32_t ld +ARG pgno db_pgno_t lu +POINTER lsn DB_LSN * lu +ARG indx u_int32_t lu +ARG opgno db_pgno_t lu +ARG npgno db_pgno_t lu +END diff --git a/storage/bdb/build_vxworks/BerkeleyDB.wsp b/storage/bdb/build_vxworks/BerkeleyDB.wsp deleted file mode 100644 index ce2e71b0eb3..00000000000 --- a/storage/bdb/build_vxworks/BerkeleyDB.wsp +++ /dev/null @@ -1,29 +0,0 @@ -Document file - DO NOT EDIT - - CORE_INFO_TYPE -Workspace - - - CORE_INFO_VERSION -2.0 - - - projectList -$(PRJ_DIR)/BerkeleyDB.wpj \ - $(PRJ_DIR)/db_archive/db_archive.wpj \ - $(PRJ_DIR)/db_checkpoint/db_checkpoint.wpj \ - $(PRJ_DIR)/db_deadlock/db_deadlock.wpj \ - $(PRJ_DIR)/db_dump/db_dump.wpj \ - $(PRJ_DIR)/db_load/db_load.wpj \ - $(PRJ_DIR)/db_printlog/db_printlog.wpj \ - $(PRJ_DIR)/db_recover/db_recover.wpj \ - $(PRJ_DIR)/db_stat/db_stat.wpj \ - $(PRJ_DIR)/db_upgrade/db_upgrade.wpj \ - $(PRJ_DIR)/db_verify/db_verify.wpj \ - $(PRJ_DIR)/dbdemo/dbdemo.wpj - - - userComments - - - diff --git a/storage/bdb/build_vxworks/db_deadlock/db_deadlock.c b/storage/bdb/build_vxworks/db_deadlock/db_deadlock.c deleted file mode 100644 index 32689d20345..00000000000 --- a/storage/bdb/build_vxworks/db_deadlock/db_deadlock.c +++ /dev/null @@ -1,251 +0,0 @@ -/*- - * See the file LICENSE for redistribution information. - * - * Copyright (c) 1996-2004 - * Sleepycat Software. All rights reserved. - * - * $Id: db_deadlock.c,v 11.45 2004/03/24 15:13:12 bostic Exp $ - */ - -#include "db_config.h" - -#ifndef lint -static const char copyright[] = - "Copyright (c) 1996-2004\nSleepycat Software Inc. All rights reserved.\n"; -#endif - -#ifndef NO_SYSTEM_INCLUDES -#include - -#if TIME_WITH_SYS_TIME -#include -#include -#else -#if HAVE_SYS_TIME_H -#include -#else -#include -#endif -#endif - -#include -#include -#include -#include -#include -#endif - -#include "db_int.h" - -int db_deadlock_main __P((int, char *[])); -int db_deadlock_usage __P((void)); -int db_deadlock_version_check __P((const char *)); - -int -db_deadlock(args) - char *args; -{ - int argc; - char **argv; - - __db_util_arg("db_deadlock", args, &argc, &argv); - return (db_deadlock_main(argc, argv) ? EXIT_FAILURE : EXIT_SUCCESS); -} - -#include -#define ERROR_RETURN ERROR - -int -db_deadlock_main(argc, argv) - int argc; - char *argv[]; -{ - extern char *optarg; - extern int optind, __db_getopt_reset; - const char *progname = "db_deadlock"; - DB_ENV *dbenv; - u_int32_t atype; - time_t now; - u_long secs, usecs; - int ch, exitval, ret, verbose; - char *home, *logfile, *str; - - if ((ret = db_deadlock_version_check(progname)) != 0) - return (ret); - - dbenv = NULL; - atype = DB_LOCK_DEFAULT; - home = logfile = NULL; - secs = usecs = 0; - exitval = verbose = 0; - __db_getopt_reset = 1; - while ((ch = getopt(argc, argv, "a:h:L:t:Vvw")) != EOF) - switch (ch) { - case 'a': - switch (optarg[0]) { - case 'e': - atype = DB_LOCK_EXPIRE; - break; - case 'm': - atype = DB_LOCK_MAXLOCKS; - break; - case 'n': - atype = DB_LOCK_MINLOCKS; - break; - case 'o': - atype = DB_LOCK_OLDEST; - break; - case 'W': - atype = DB_LOCK_MAXWRITE; - break; - case 'w': - atype = DB_LOCK_MINWRITE; - break; - case 'y': - atype = DB_LOCK_YOUNGEST; - break; - default: - return (db_deadlock_usage()); - /* NOTREACHED */ - } - if (optarg[1] != '\0') - return (db_deadlock_usage()); - break; - case 'h': - home = optarg; - break; - case 'L': - logfile = optarg; - break; - case 't': - if ((str = strchr(optarg, '.')) != NULL) { - *str++ = '\0'; - if (*str != '\0' && __db_getulong( - NULL, progname, str, 0, LONG_MAX, &usecs)) - return (EXIT_FAILURE); - } - if (*optarg != '\0' && __db_getulong( - NULL, progname, optarg, 0, LONG_MAX, &secs)) - return (EXIT_FAILURE); - if (secs == 0 && usecs == 0) - return (db_deadlock_usage()); - - break; - - case 'V': - printf("%s\n", db_version(NULL, NULL, NULL)); - return (EXIT_SUCCESS); - case 'v': - verbose = 1; - break; - case 'w': /* Undocumented. */ - /* Detect every 100ms (100000 us) when polling. */ - secs = 0; - usecs = 100000; - break; - case '?': - default: - return (db_deadlock_usage()); - } - argc -= optind; - argv += optind; - - if (argc != 0) - return (db_deadlock_usage()); - - /* Handle possible interruptions. */ - __db_util_siginit(); - - /* Log our process ID. */ - if (logfile != NULL && __db_util_logset(progname, logfile)) - goto shutdown; - - /* - * Create an environment object and initialize it for error - * reporting. - */ - if ((ret = db_env_create(&dbenv, 0)) != 0) { - fprintf(stderr, - "%s: db_env_create: %s\n", progname, db_strerror(ret)); - goto shutdown; - } - - dbenv->set_errfile(dbenv, stderr); - dbenv->set_errpfx(dbenv, progname); - - if (verbose) { - (void)dbenv->set_verbose(dbenv, DB_VERB_DEADLOCK, 1); - (void)dbenv->set_verbose(dbenv, DB_VERB_WAITSFOR, 1); - } - - /* An environment is required. */ - if ((ret = - dbenv->open(dbenv, home, DB_INIT_LOCK | DB_USE_ENVIRON, 0)) != 0) { - dbenv->err(dbenv, ret, "open"); - goto shutdown; - } - - while (!__db_util_interrupted()) { - if (verbose) { - (void)time(&now); - dbenv->errx(dbenv, "running at %.24s", ctime(&now)); - } - - if ((ret = dbenv->lock_detect(dbenv, 0, atype, NULL)) != 0) { - dbenv->err(dbenv, ret, "DB_ENV->lock_detect"); - goto shutdown; - } - - /* Make a pass every "secs" secs and "usecs" usecs. */ - if (secs == 0 && usecs == 0) - break; - __os_sleep(dbenv, secs, usecs); - } - - if (0) { -shutdown: exitval = 1; - } - - /* Clean up the logfile. */ - if (logfile != NULL) - (void)remove(logfile); - - /* Clean up the environment. */ - if (dbenv != NULL && (ret = dbenv->close(dbenv, 0)) != 0) { - exitval = 1; - fprintf(stderr, - "%s: dbenv->close: %s\n", progname, db_strerror(ret)); - } - - /* Resend any caught signal. */ - __db_util_sigresend(); - - return (exitval == 0 ? EXIT_SUCCESS : EXIT_FAILURE); -} - -int -db_deadlock_usage() -{ - (void)fprintf(stderr, "%s\n\t%s\n", - "usage: db_deadlock [-Vv]", - "[-a e | m | n | o | W | w | y] [-h home] [-L file] [-t sec.usec]"); - return (EXIT_FAILURE); -} - -int -db_deadlock_version_check(progname) - const char *progname; -{ - int v_major, v_minor, v_patch; - - /* Make sure we're loaded with the right version of the DB library. */ - (void)db_version(&v_major, &v_minor, &v_patch); - if (v_major != DB_VERSION_MAJOR || v_minor != DB_VERSION_MINOR) { - fprintf(stderr, - "%s: version %d.%d doesn't match library version %d.%d\n", - progname, DB_VERSION_MAJOR, DB_VERSION_MINOR, - v_major, v_minor); - return (EXIT_FAILURE); - } - return (0); -} diff --git a/storage/bdb/build_vxworks/dbdemo/README b/storage/bdb/build_vxworks/dbdemo/README deleted file mode 100644 index 1a2c7c7d073..00000000000 --- a/storage/bdb/build_vxworks/dbdemo/README +++ /dev/null @@ -1,39 +0,0 @@ -This README describes the steps needed to run a demo example of BerkeleyDB. - -1. Read the pages in the Reference Guide that describe building - BerkeleyDB on VxWorks: - - $(WIND_BASE)/target/src/BerkeleyDB/docs/ref/build_vxworks/intro.html - $(WIND_BASE)/target/src/BerkeleyDB/docs/ref/build_vxworks/notes.html - $(WIND_BASE)/target/src/BerkeleyDB/docs/ref/build_vxworks/faq.html - -2. Launch Tornado 2.0 and open up the BerkeleyDB project. - -3. Add the demo project to that workspace: - - $(WIND_BASE)/target/src/BerkeleyDB/build_vxworks/demo/dbdemo.wpj - -4. Build BerkeleyDB as described in the Reference Guide. - -5. Build the dbdemo project. - -6. Download BerkeleyDB onto the target. - -7. Download the dbdemo project onto the target. - -8. Open a windsh to the target and run the demo: - - -> dbdemo "/" - - Where pathname is a pathname string pointing to a directory that the - demo can create a database in. That directory should already exist. - The dbname is the name for the database. For example: - - -> dbdemo "/tmp/demo.db" - -9. The demo program will ask for input. You can type in any string. - The program will add an entry to the database with that string as - the key and the reverse of that string as the data item for that key. - It will continue asking for input until you hit ^D or enter "quit". - Upon doing so, the demo program will display all the keys you have - entered as input and their data items. diff --git a/storage/bdb/build_win32/Berkeley_DB.dsw b/storage/bdb/build_win32/Berkeley_DB.dsw index 8e39c20eff3..91440869b83 100644 --- a/storage/bdb/build_win32/Berkeley_DB.dsw +++ b/storage/bdb/build_win32/Berkeley_DB.dsw @@ -21,12 +21,15 @@ Package=<4> Project_Dep_Name db_deadlock End Project Dependency Begin Project Dependency - Project_Dep_Name DB_DLL + Project_Dep_Name db_dll End Project Dependency Begin Project Dependency Project_Dep_Name db_dump End Project Dependency Begin Project Dependency + Project_Dep_Name db_hotbackup + End Project Dependency + Begin Project Dependency Project_Dep_Name db_load End Project Dependency Begin Project Dependency @@ -45,7 +48,7 @@ Package=<4> Project_Dep_Name db_verify End Project Dependency Begin Project Dependency - Project_Dep_Name DB_Static + Project_Dep_Name db_static End Project Dependency Begin Project Dependency Project_Dep_Name ex_access @@ -63,9 +66,18 @@ Package=<4> Project_Dep_Name ex_mpool End Project Dependency Begin Project Dependency + Project_Dep_Name ex_sequence + End Project Dependency + Begin Project Dependency Project_Dep_Name ex_tpcb End Project Dependency Begin Project Dependency + Project_Dep_Name example_database_load + End Project Dependency + Begin Project Dependency + Project_Dep_Name example_database_read + End Project Dependency + Begin Project Dependency Project_Dep_Name excxx_access End Project Dependency Begin Project Dependency @@ -81,10 +93,31 @@ Package=<4> Project_Dep_Name excxx_mpool End Project Dependency Begin Project Dependency + Project_Dep_Name excxx_sequence + End Project Dependency + Begin Project Dependency Project_Dep_Name excxx_tpcb End Project Dependency Begin Project Dependency - Project_Dep_Name db_lib + Project_Dep_Name excxx_example_database_load + End Project Dependency + Begin Project Dependency + Project_Dep_Name excxx_example_database_read + End Project Dependency + Begin Project Dependency + Project_Dep_Name ex_repquote + End Project Dependency + Begin Project Dependency + Project_Dep_Name ex_txnguide + End Project Dependency + Begin Project Dependency + Project_Dep_Name ex_txnguide_inmem + End Project Dependency + Begin Project Dependency + Project_Dep_Name excxx_txnguide + End Project Dependency + Begin Project Dependency + Project_Dep_Name excxx_txnguide_inmem End Project Dependency }}} @@ -99,7 +132,7 @@ Package=<5> Package=<4> {{{ Begin Project Dependency - Project_Dep_Name db_lib + Project_Dep_Name db_dll End Project Dependency }}} @@ -114,7 +147,7 @@ Package=<5> Package=<4> {{{ Begin Project Dependency - Project_Dep_Name db_lib + Project_Dep_Name db_dll End Project Dependency }}} @@ -129,7 +162,7 @@ Package=<5> Package=<4> {{{ Begin Project Dependency - Project_Dep_Name DB_DLL + Project_Dep_Name db_dll End Project Dependency }}} @@ -156,7 +189,22 @@ Package=<5> Package=<4> {{{ Begin Project Dependency - Project_Dep_Name db_lib + Project_Dep_Name db_dll + End Project Dependency +}}} + +############################################################################### + +Project: "db_hotbackup"=.\db_hotbackup.dsp - Package Owner=<4> + +Package=<5> +{{{ +}}} + +Package=<4> +{{{ + Begin Project Dependency + Project_Dep_Name db_dll End Project Dependency }}} @@ -168,29 +216,11 @@ Package=<5> {{{ }}} -Package=<4> -{{{ - Begin Project Dependency - Project_Dep_Name DB_DLL - End Project Dependency -}}} - -############################################################################### - -Project: "db_lib"=.\db_lib.dsp - Package Owner=<4> - -Package=<5> -{{{ -}}} - Package=<4> {{{ Begin Project Dependency Project_Dep_Name db_dll End Project Dependency - Begin Project Dependency - Project_Dep_Name db_static - End Project Dependency }}} ############################################################################### @@ -204,7 +234,7 @@ Package=<5> Package=<4> {{{ Begin Project Dependency - Project_Dep_Name db_lib + Project_Dep_Name db_dll End Project Dependency }}} @@ -219,7 +249,7 @@ Package=<5> Package=<4> {{{ Begin Project Dependency - Project_Dep_Name db_lib + Project_Dep_Name db_dll End Project Dependency }}} @@ -234,12 +264,24 @@ Package=<5> Package=<4> {{{ Begin Project Dependency - Project_Dep_Name db_lib + Project_Dep_Name db_dll End Project Dependency }}} ############################################################################### +Project: "db_small"=.\db_small.dsp - Package Owner=<4> + +Package=<5> +{{{ +}}} + +Package=<4> +{{{ +}}} + +############################################################################### + Project: "db_stat"=.\db_stat.dsp - Package Owner=<4> Package=<5> @@ -249,7 +291,7 @@ Package=<5> Package=<4> {{{ Begin Project Dependency - Project_Dep_Name db_lib + Project_Dep_Name db_dll End Project Dependency }}} @@ -276,7 +318,7 @@ Package=<5> Package=<4> {{{ Begin Project Dependency - Project_Dep_Name DB_DLL + Project_Dep_Name db_dll End Project Dependency }}} @@ -309,7 +351,7 @@ Package=<5> Package=<4> {{{ Begin Project Dependency - Project_Dep_Name db_lib + Project_Dep_Name db_dll End Project Dependency }}} @@ -324,7 +366,7 @@ Package=<5> Package=<4> {{{ Begin Project Dependency - Project_Dep_Name db_lib + Project_Dep_Name db_dll End Project Dependency }}} @@ -339,7 +381,7 @@ Package=<5> Package=<4> {{{ Begin Project Dependency - Project_Dep_Name db_lib + Project_Dep_Name db_dll End Project Dependency }}} @@ -354,7 +396,58 @@ Package=<5> Package=<4> {{{ Begin Project Dependency - Project_Dep_Name db_lib + Project_Dep_Name db_dll + End Project Dependency +}}} + +############################################################################### + +Project: "ex_csvcode"=.\ex_csvcode.dsp - Package Owner=<4> + +Package=<5> +{{{ +}}} + +Package=<4> +{{{ + Begin Project Dependency + Project_Dep_Name db_dll + End Project Dependency +}}} + +############################################################################### + +Project: "ex_csvload"=.\ex_csvload.dsp - Package Owner=<4> + +Package=<5> +{{{ +}}} + +Package=<4> +{{{ + Begin Project Dependency + Project_Dep_Name db_dll + End Project Dependency + Begin Project Dependency + Project_Dep_Name ex_csvcode + End Project Dependency +}}} + +############################################################################### + +Project: "ex_csvquery"=.\ex_csvquery.dsp - Package Owner=<4> + +Package=<5> +{{{ +}}} + +Package=<4> +{{{ + Begin Project Dependency + Project_Dep_Name db_dll + End Project Dependency + Begin Project Dependency + Project_Dep_Name ex_csvcode End Project Dependency }}} @@ -369,7 +462,7 @@ Package=<5> Package=<4> {{{ Begin Project Dependency - Project_Dep_Name db_lib + Project_Dep_Name db_dll End Project Dependency }}} @@ -384,7 +477,7 @@ Package=<5> Package=<4> {{{ Begin Project Dependency - Project_Dep_Name db_lib + Project_Dep_Name db_dll End Project Dependency }}} @@ -399,7 +492,7 @@ Package=<5> Package=<4> {{{ Begin Project Dependency - Project_Dep_Name db_lib + Project_Dep_Name db_dll End Project Dependency }}} @@ -414,7 +507,22 @@ Package=<5> Package=<4> {{{ Begin Project Dependency - Project_Dep_Name db_lib + Project_Dep_Name db_dll + End Project Dependency +}}} + +############################################################################### + +Project: "ex_sequence"=.\ex_sequence.dsp - Package Owner=<4> + +Package=<5> +{{{ +}}} + +Package=<4> +{{{ + Begin Project Dependency + Project_Dep_Name db_dll End Project Dependency }}} @@ -429,7 +537,67 @@ Package=<5> Package=<4> {{{ Begin Project Dependency - Project_Dep_Name db_lib + Project_Dep_Name db_dll + End Project Dependency +}}} + +############################################################################### + +Project: "ex_txnguide"=.\ex_txnguide.dsp - Package Owner=<4> + +Package=<5> +{{{ +}}} + +Package=<4> +{{{ + Begin Project Dependency + Project_Dep_Name db_dll + End Project Dependency +}}} + +############################################################################### + +Project: "ex_txnguide_inmem"=.\ex_txnguide_inmem.dsp - Package Owner=<4> + +Package=<5> +{{{ +}}} + +Package=<4> +{{{ + Begin Project Dependency + Project_Dep_Name db_dll + End Project Dependency +}}} + +############################################################################### + +Project: "example_database_load"=.\example_database_load.dsp - Package Owner=<4> + +Package=<5> +{{{ +}}} + +Package=<4> +{{{ + Begin Project Dependency + Project_Dep_Name db_dll + End Project Dependency +}}} + +############################################################################### + +Project: "example_database_read"=.\example_database_read.dsp - Package Owner=<4> + +Package=<5> +{{{ +}}} + +Package=<4> +{{{ + Begin Project Dependency + Project_Dep_Name db_dll End Project Dependency }}} @@ -444,7 +612,7 @@ Package=<5> Package=<4> {{{ Begin Project Dependency - Project_Dep_Name db_lib + Project_Dep_Name db_dll End Project Dependency }}} @@ -459,7 +627,7 @@ Package=<5> Package=<4> {{{ Begin Project Dependency - Project_Dep_Name db_lib + Project_Dep_Name db_dll End Project Dependency }}} @@ -474,7 +642,37 @@ Package=<5> Package=<4> {{{ Begin Project Dependency - Project_Dep_Name db_lib + Project_Dep_Name db_dll + End Project Dependency +}}} + +############################################################################### + +Project: "excxx_example_database_load"=.\excxx_example_database_load.dsp - Package Owner=<4> + +Package=<5> +{{{ +}}} + +Package=<4> +{{{ + Begin Project Dependency + Project_Dep_Name db_dll + End Project Dependency +}}} + +############################################################################### + +Project: "excxx_example_database_read"=.\excxx_example_database_read.dsp - Package Owner=<4> + +Package=<5> +{{{ +}}} + +Package=<4> +{{{ + Begin Project Dependency + Project_Dep_Name db_dll End Project Dependency }}} @@ -489,7 +687,7 @@ Package=<5> Package=<4> {{{ Begin Project Dependency - Project_Dep_Name db_lib + Project_Dep_Name db_dll End Project Dependency }}} @@ -504,7 +702,22 @@ Package=<5> Package=<4> {{{ Begin Project Dependency - Project_Dep_Name db_lib + Project_Dep_Name db_dll + End Project Dependency +}}} + +############################################################################### + +Project: "excxx_sequence"=.\excxx_sequence.dsp - Package Owner=<4> + +Package=<5> +{{{ +}}} + +Package=<4> +{{{ + Begin Project Dependency + Project_Dep_Name db_dll End Project Dependency }}} @@ -519,7 +732,37 @@ Package=<5> Package=<4> {{{ Begin Project Dependency - Project_Dep_Name db_lib + Project_Dep_Name db_dll + End Project Dependency +}}} + +############################################################################### + +Project: "excxx_txnguide"=.\excxx_txnguide.dsp - Package Owner=<4> + +Package=<5> +{{{ +}}} + +Package=<4> +{{{ + Begin Project Dependency + Project_Dep_Name db_dll + End Project Dependency +}}} + +############################################################################### + +Project: "excxx_txnguide_inmem"=.\excxx_txnguide_inmem.dsp - Package Owner=<4> + +Package=<5> +{{{ +}}} + +Package=<4> +{{{ + Begin Project Dependency + Project_Dep_Name db_dll End Project Dependency }}} diff --git a/storage/bdb/build_win32/app_dsp.src b/storage/bdb/build_win32/app_dsp.src index cc4d0bab41c..e8644c71299 100644 --- a/storage/bdb/build_win32/app_dsp.src +++ b/storage/bdb/build_win32/app_dsp.src @@ -4,7 +4,7 @@ # TARGTYPE "Win32 (x86) Console Application" 0x0103 -CFG=@project_name@ - Win32 Debug Static +CFG=@project_name@ - Win32 Debug !MESSAGE This is not a valid makefile. To build this project using NMAKE, !MESSAGE use the Export Makefile command and run !MESSAGE @@ -13,14 +13,18 @@ CFG=@project_name@ - Win32 Debug Static !MESSAGE You can specify a configuration when running NMAKE !MESSAGE by defining the macro CFG on the command line. For example: !MESSAGE -!MESSAGE NMAKE /f "@project_name@.mak" CFG="@project_name@ - Win32 Debug Static" +!MESSAGE NMAKE /f "@project_name@.mak" CFG="@project_name@ - Win32 Debug" !MESSAGE !MESSAGE Possible choices for configuration are: !MESSAGE !MESSAGE "@project_name@ - Win32 Release" (based on "Win32 (x86) Console Application") !MESSAGE "@project_name@ - Win32 Debug" (based on "Win32 (x86) Console Application") -!MESSAGE "@project_name@ - Win32 Release Static" (based on "Win32 (x86) Console Application") -!MESSAGE "@project_name@ - Win32 Debug Static" (based on "Win32 (x86) Console Application") +!MESSAGE "@project_name@ - Win32 ASCII Debug" (based on "Win32 (x86) Console Application") +!MESSAGE "@project_name@ - Win32 ASCII Release" (based on "Win32 (x86) Console Application") +!MESSAGE "@project_name@ - Win64 Debug AMD64" (based on "Win32 (x86) Console Application") +!MESSAGE "@project_name@ - Win64 Release AMD64" (based on "Win32 (x86) Console Application") +!MESSAGE "@project_name@ - Win64 Debug IA64" (based on "Win32 (x86) Console Application") +!MESSAGE "@project_name@ - Win64 Release IA64" (based on "Win32 (x86) Console Application") !MESSAGE # Begin Project @@ -34,99 +38,207 @@ RSC=rc.exe # PROP BASE Use_MFC 0 # PROP BASE Use_Debug_Libraries 0 -# PROP BASE Output_Dir "Release" -# PROP BASE Intermediate_Dir "Release" +# PROP BASE Output_Dir "@bin_rel_dest@" +# PROP BASE Intermediate_Dir "Release/@project_name@" # PROP BASE Target_Dir "" # PROP Use_MFC 0 # PROP Use_Debug_Libraries 0 -# PROP Output_Dir "Release" -# PROP Intermediate_Dir "Release" +# PROP Output_Dir "@bin_rel_dest@" +# PROP Intermediate_Dir "Release/@project_name@" # PROP Ignore_Export_Lib 0 # PROP Target_Dir "" -# ADD BASE CPP /nologo /W3 /GX /O2 /D "WIN32" /D "NDEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c -# ADD CPP /nologo /MD /W3 /GX /O2 /I "." /I ".." /D "WIN32" /D "NDEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c +# ADD BASE CPP /nologo /W3 /GX /O2 /D "WIN32" /D "NDEBUG" /D "_CONSOLE" @extra_cppflags@ /FD /c +# ADD CPP /nologo /MD /W3 /GX /O2 /I "." /I ".." /D "WIN32" /D "NDEBUG" /D "_CONSOLE" @extra_cppflags@ /FD /c # ADD BASE RSC /l 0x409 /d "NDEBUG" # ADD RSC /l 0x409 /d "NDEBUG" BSC32=bscmake.exe # ADD BASE BSC32 /nologo # ADD BSC32 /nologo LINK32=link.exe -# ADD BASE LINK32 kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:console /machine:I386 -# ADD LINK32 Release/libdb@DB_VERSION_MAJOR@@DB_VERSION_MINOR@.lib /nologo /subsystem:console /machine:I386 /nodefaultlib:"libcmt" +# ADD BASE LINK32 @release_libs@ kernel32.lib user32.lib advapi32.lib shell32.lib /nologo /subsystem:console /machine:I386 +# ADD LINK32 libdb@DB_VERSION_MAJOR@@DB_VERSION_MINOR@.lib @release_libs@ kernel32.lib user32.lib advapi32.lib shell32.lib /nologo /subsystem:console /machine:I386 /nodefaultlib:"libcmt" /libpath:"@lib_rel_dest@" +@POST_BUILD@ !ELSEIF "$(CFG)" == "@project_name@ - Win32 Debug" # PROP BASE Use_MFC 0 # PROP BASE Use_Debug_Libraries 1 -# PROP BASE Output_Dir "Debug" -# PROP BASE Intermediate_Dir "Debug" +# PROP BASE Output_Dir "@bin_debug_dest@" +# PROP BASE Intermediate_Dir "Debug/@project_name@" # PROP BASE Target_Dir "" # PROP Use_MFC 0 # PROP Use_Debug_Libraries 1 -# PROP Output_Dir "Debug" -# PROP Intermediate_Dir "Debug" +# PROP Output_Dir "@bin_debug_dest@" +# PROP Intermediate_Dir "Debug/@project_name@" # PROP Ignore_Export_Lib 0 # PROP Target_Dir "" -# ADD BASE CPP /nologo /W3 /Gm /GX /Zi /Od /D "WIN32" /D "_DEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c -# ADD CPP /nologo /MDd /W3 /GX /Z7 /Od /I "." /I ".." /D "WIN32" /D "_DEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c +# ADD BASE CPP /nologo /W3 /Gm /GX /Zi /Od /D "WIN32" /D "_DEBUG" /D "_CONSOLE" @extra_cppflags@ /FD /c +# ADD CPP /nologo /MDd /W3 /GX /Z7 /Od /I "." /I ".." /D "WIN32" /D "_DEBUG" /D "_CONSOLE" @extra_cppflags@ /FD /c # ADD BASE RSC /l 0x409 /d "_DEBUG" # ADD RSC /l 0x409 /d "_DEBUG" BSC32=bscmake.exe # ADD BASE BSC32 /nologo # ADD BSC32 /nologo LINK32=link.exe -# ADD BASE LINK32 kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:console /debug /machine:I386 /pdbtype:sept -# ADD LINK32 Debug/libdb@DB_VERSION_MAJOR@@DB_VERSION_MINOR@d.lib /nologo /subsystem:console /pdb:none /debug /machine:I386 /nodefaultlib:"libcmtd" /fixed:no +# ADD BASE LINK32 @debug_libs@ kernel32.lib user32.lib advapi32.lib shell32.lib /nologo /subsystem:console /debug /machine:I386 /pdbtype:sept +# ADD LINK32 libdb@DB_VERSION_MAJOR@@DB_VERSION_MINOR@d.lib @debug_libs@ kernel32.lib user32.lib advapi32.lib shell32.lib /nologo /subsystem:console /pdb:none /debug /machine:I386 /nodefaultlib:"libcmtd" /fixed:no /libpath:"@lib_debug_dest@" +@POST_BUILD@ -!ELSEIF "$(CFG)" == "@project_name@ - Win32 Release Static" +!ELSEIF "$(CFG)" == "@project_name@ - Win32 ASCII Debug" + +# PROP BASE Use_MFC 0 +# PROP BASE Use_Debug_Libraries 1 +# PROP BASE Output_Dir "@bin_debug_dest@_ASCII" +# PROP BASE Intermediate_Dir "Debug_ASCII/@project_name@" +# PROP BASE Ignore_Export_Lib 0 +# PROP BASE Target_Dir "" +# PROP Use_MFC 0 +# PROP Use_Debug_Libraries 1 +# PROP Output_Dir "@bin_debug_dest@_ASCII" +# PROP Intermediate_Dir "Debug_ASCII/@project_name@" +# PROP Ignore_Export_Lib 0 +# PROP Target_Dir "" +# ADD BASE CPP /nologo /MDd /W3 /GX /Z7 /Od /I "." /I ".." /D "WIN32" /D "_DEBUG" /D "_CONSOLE" @extra_cppflags@ /FD /c +# ADD CPP /nologo /MDd /W3 /GX /Z7 /Od /I "." /I ".." /D "WIN32" /D "_DEBUG" /D "_CONSOLE" @extra_cppflags@ /FD /c +# ADD BASE RSC /l 0x409 /d "_DEBUG" +# ADD RSC /l 0x409 /d "_DEBUG" +BSC32=bscmake.exe +# ADD BASE BSC32 /nologo +# ADD BSC32 /nologo +LINK32=link.exe +# ADD BASE LINK32 libdb@DB_VERSION_MAJOR@@DB_VERSION_MINOR@d.lib @debug_libs@ kernel32.lib user32.lib advapi32.lib shell32.lib /nologo /subsystem:console /pdb:none /debug /machine:I386 /nodefaultlib:"libcmtd" /fixed:no +# ADD LINK32 libdb@DB_VERSION_MAJOR@@DB_VERSION_MINOR@d.lib @debug_libs@ kernel32.lib user32.lib advapi32.lib shell32.lib /nologo /subsystem:console /pdb:none /debug /machine:I386 /nodefaultlib:"libcmtd" /fixed:no /libpath:"@lib_debug_dest@_ASCII" +@POST_BUILD@ + +!ELSEIF "$(CFG)" == "@project_name@ - Win32 ASCII Release" # PROP BASE Use_MFC 0 # PROP BASE Use_Debug_Libraries 0 -# PROP BASE Output_Dir "Release" -# PROP BASE Intermediate_Dir "Release" +# PROP BASE Output_Dir "@bin_rel_dest@_ASCII" +# PROP BASE Intermediate_Dir "Release_ASCII/@project_name@" # PROP BASE Ignore_Export_Lib 0 # PROP BASE Target_Dir "" # PROP Use_MFC 0 # PROP Use_Debug_Libraries 0 -# PROP Output_Dir "Release_static" -# PROP Intermediate_Dir "Release_static" +# PROP Output_Dir "@bin_rel_dest@_ASCII" +# PROP Intermediate_Dir "Release_ASCII/@project_name@" # PROP Ignore_Export_Lib 0 # PROP Target_Dir "" -# ADD BASE CPP /nologo /MD /W3 /GX /O2 /I "." /I ".." /D "WIN32" /D "NDEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c -# ADD CPP /nologo /MT /W3 /GX /O2 /I "." /I ".." /D "WIN32" /D "NDEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c +# ADD BASE CPP /nologo /MD /W3 /GX /O2 /I "." /I ".." /D "WIN32" /D "NDEBUG" /D "_CONSOLE" @extra_cppflags@ /FD /c +# ADD CPP /nologo /MD /W3 /GX /O2 /I "." /I ".." /D "WIN32" /D "NDEBUG" /D "_CONSOLE" @extra_cppflags@ /FD /c # ADD BASE RSC /l 0x409 /d "NDEBUG" # ADD RSC /l 0x409 /d "NDEBUG" BSC32=bscmake.exe # ADD BASE BSC32 /nologo # ADD BSC32 /nologo LINK32=link.exe -# ADD BASE LINK32 Release_static/libdb@DB_VERSION_MAJOR@@DB_VERSION_MINOR@.lib /nologo /subsystem:console /machine:I386 -# ADD LINK32 Release_static/libdb@DB_VERSION_MAJOR@@DB_VERSION_MINOR@s.lib /nologo /subsystem:console /machine:I386 +# ADD BASE LINK32 libdb@DB_VERSION_MAJOR@@DB_VERSION_MINOR@.lib @release_libs@ kernel32.lib user32.lib advapi32.lib shell32.lib /nologo /subsystem:console /machine:I386 /nodefaultlib:"libcmt" +# ADD LINK32 libdb@DB_VERSION_MAJOR@@DB_VERSION_MINOR@.lib @release_libs@ kernel32.lib user32.lib advapi32.lib shell32.lib /nologo /subsystem:console /machine:I386 /nodefaultlib:"libcmt" /libpath:"@lib_rel_dest@_ASCII" +@POST_BUILD@ -!ELSEIF "$(CFG)" == "@project_name@ - Win32 Debug Static" +!ELSEIF "$(CFG)" == "@project_name@ - Win64 Debug AMD64" # PROP BASE Use_MFC 0 # PROP BASE Use_Debug_Libraries 1 -# PROP BASE Output_Dir "Debug" -# PROP BASE Intermediate_Dir "Debug" +# PROP BASE Output_Dir "@bin_debug_dest@_AMD64" +# PROP BASE Intermediate_Dir "Debug_AMD64/@project_name@" # PROP BASE Ignore_Export_Lib 0 # PROP BASE Target_Dir "" # PROP Use_MFC 0 # PROP Use_Debug_Libraries 1 -# PROP Output_Dir "Debug_static" -# PROP Intermediate_Dir "Debug_static" +# PROP Output_Dir "@bin_debug_dest@_AMD64" +# PROP Intermediate_Dir "Debug_AMD64/@project_name@" # PROP Ignore_Export_Lib 0 # PROP Target_Dir "" -# ADD BASE CPP /nologo /MDd /W3 /GX /Z7 /Od /I "." /I ".." /D "WIN32" /D "_DEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c -# ADD CPP /nologo /MTd /W3 /GX /Z7 /Od /I "." /I ".." /D "WIN32" /D "_DEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c +# ADD BASE CPP /nologo /MDd /W3 /EHsc /Z7 /Od /I "." /I ".." /D "WIN32" /D "_DEBUG" /D "_CONSOLE" @extra_cppflags@ /Wp64 /FD /c +# ADD CPP /nologo /MDd /W3 /EHsc /Z7 /Od /I "." /I ".." /D "WIN32" /D "_DEBUG" /D "_CONSOLE" @extra_cppflags@ /Wp64 /FD /c # ADD BASE RSC /l 0x409 /d "_DEBUG" # ADD RSC /l 0x409 /d "_DEBUG" BSC32=bscmake.exe # ADD BASE BSC32 /nologo # ADD BSC32 /nologo LINK32=link.exe -# ADD BASE LINK32 Debug_static/libdb@DB_VERSION_MAJOR@@DB_VERSION_MINOR@d.lib kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:console /pdb:none /debug /machine:I386 /fixed:no -# ADD LINK32 Debug_static/libdb@DB_VERSION_MAJOR@@DB_VERSION_MINOR@sd.lib kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:console /pdb:none /debug /machine:I386 /fixed:no +# ADD BASE LINK32 libdb@DB_VERSION_MAJOR@@DB_VERSION_MINOR@d.lib @debug_libs@ bufferoverflowU.lib kernel32.lib user32.lib advapi32.lib shell32.lib /nologo /subsystem:console /debug /machine:AMD64 /nodefaultlib:"libcmtd" /fixed:no +# ADD LINK32 libdb@DB_VERSION_MAJOR@@DB_VERSION_MINOR@d.lib @debug_libs@ bufferoverflowU.lib kernel32.lib user32.lib advapi32.lib shell32.lib /nologo /subsystem:console /debug /machine:AMD64 /nodefaultlib:"libcmtd" /fixed:no /libpath:"@lib_debug_dest@_AMD64" +@POST_BUILD@ + +!ELSEIF "$(CFG)" == "@project_name@ - Win64 Release AMD64" + +# PROP BASE Use_MFC 0 +# PROP BASE Use_Debug_Libraries 0 +# PROP BASE Output_Dir "@bin_rel_dest@_AMD64" +# PROP BASE Intermediate_Dir "Release_AMD64/@project_name@" +# PROP BASE Ignore_Export_Lib 0 +# PROP BASE Target_Dir "" +# PROP Use_MFC 0 +# PROP Use_Debug_Libraries 0 +# PROP Output_Dir "@bin_rel_dest@_AMD64" +# PROP Intermediate_Dir "Release_AMD64/@project_name@" +# PROP Ignore_Export_Lib 0 +# PROP Target_Dir "" +# ADD BASE CPP /nologo /MD /W3 /EHsc /O2 /I "." /I ".." /D "WIN32" /D "NDEBUG" /D "_CONSOLE" @extra_cppflags@ /Wp64 /FD /c +# ADD CPP /nologo /MD /W3 /EHsc /O2 /I "." /I ".." /D "WIN32" /D "NDEBUG" /D "_CONSOLE" @extra_cppflags@ /Wp64 /FD /c +# ADD BASE RSC /l 0x409 /d "NDEBUG" +# ADD RSC /l 0x409 /d "NDEBUG" +BSC32=bscmake.exe +# ADD BASE BSC32 /nologo +# ADD BSC32 /nologo +LINK32=link.exe +# ADD BASE LINK32 libdb@DB_VERSION_MAJOR@@DB_VERSION_MINOR@.lib @release_libs@ bufferoverflowU.lib kernel32.lib user32.lib advapi32.lib shell32.lib /nologo /subsystem:console /machine:AMD64 /nodefaultlib:"libcmt" +# ADD LINK32 libdb@DB_VERSION_MAJOR@@DB_VERSION_MINOR@.lib @release_libs@ bufferoverflowU.lib kernel32.lib user32.lib advapi32.lib shell32.lib /nologo /subsystem:console /machine:AMD64 /nodefaultlib:"libcmt" /libpath:"@lib_rel_dest@_AMD64" +@POST_BUILD@ + +!ELSEIF "$(CFG)" == "@project_name@ - Win64 Debug IA64" + +# PROP BASE Use_MFC 0 +# PROP BASE Use_Debug_Libraries 1 +# PROP BASE Output_Dir "@bin_debug_dest@_IA64" +# PROP BASE Intermediate_Dir "Debug_IA64/@project_name@" +# PROP BASE Ignore_Export_Lib 0 +# PROP BASE Target_Dir "" +# PROP Use_MFC 0 +# PROP Use_Debug_Libraries 1 +# PROP Output_Dir "@bin_debug_dest@_IA64" +# PROP Intermediate_Dir "Debug_IA64/@project_name@" +# PROP Ignore_Export_Lib 0 +# PROP Target_Dir "" +# ADD BASE CPP /nologo /MDd /W3 /EHsc /Z7 /Od /I "." /I ".." /D "WIN32" /D "_DEBUG" /D "_CONSOLE" @extra_cppflags@ /Wp64 /FD /c +# ADD CPP /nologo /MDd /W3 /EHsc /Z7 /Od /I "." /I ".." /D "WIN32" /D "_DEBUG" /D "_CONSOLE" @extra_cppflags@ /Wp64 /FD /c +# ADD BASE RSC /l 0x409 /d "_DEBUG" +# ADD RSC /l 0x409 /d "_DEBUG" +BSC32=bscmake.exe +# ADD BASE BSC32 /nologo +# ADD BSC32 /nologo +LINK32=link.exe +# ADD BASE LINK32 libdb@DB_VERSION_MAJOR@@DB_VERSION_MINOR@d.lib @debug_libs@ bufferoverflowU.lib kernel32.lib user32.lib advapi32.lib shell32.lib /nologo /subsystem:console /debug /machine:IA64 /nodefaultlib:"libcmtd" /fixed:no +# ADD LINK32 libdb@DB_VERSION_MAJOR@@DB_VERSION_MINOR@d.lib @debug_libs@ bufferoverflowU.lib kernel32.lib user32.lib advapi32.lib shell32.lib /nologo /subsystem:console /debug /machine:IA64 /nodefaultlib:"libcmtd" /fixed:no /libpath:"@lib_debug_dest@_IA64" +@POST_BUILD@ + +!ELSEIF "$(CFG)" == "@project_name@ - Win64 Release IA64" + +# PROP BASE Use_MFC 0 +# PROP BASE Use_Debug_Libraries 0 +# PROP BASE Output_Dir "@bin_rel_dest@_IA64" +# PROP BASE Intermediate_Dir "Release_IA64/@project_name@" +# PROP BASE Ignore_Export_Lib 0 +# PROP BASE Target_Dir "" +# PROP Use_MFC 0 +# PROP Use_Debug_Libraries 0 +# PROP Output_Dir "@bin_rel_dest@_IA64" +# PROP Intermediate_Dir "Release_IA64/@project_name@" +# PROP Ignore_Export_Lib 0 +# PROP Target_Dir "" +# ADD BASE CPP /nologo /MD /W3 /EHsc /O2 /I "." /I ".." /D "WIN32" /D "NDEBUG" /D "_CONSOLE" @extra_cppflags@ /Wp64 /FD /c +# ADD CPP /nologo /MD /W3 /EHsc /O2 /I "." /I ".." /D "WIN32" /D "NDEBUG" /D "_CONSOLE" @extra_cppflags@ /Wp64 /FD /c +# ADD BASE RSC /l 0x409 /d "NDEBUG" +# ADD RSC /l 0x409 /d "NDEBUG" +BSC32=bscmake.exe +# ADD BASE BSC32 /nologo +# ADD BSC32 /nologo +LINK32=link.exe +# ADD BASE LINK32 libdb@DB_VERSION_MAJOR@@DB_VERSION_MINOR@.lib @release_libs@ bufferoverflowU.lib kernel32.lib user32.lib advapi32.lib shell32.lib /nologo /subsystem:console /machine:IA64 /nodefaultlib:"libcmt" +# ADD LINK32 libdb@DB_VERSION_MAJOR@@DB_VERSION_MINOR@.lib @release_libs@ bufferoverflowU.lib kernel32.lib user32.lib advapi32.lib shell32.lib /nologo /subsystem:console /machine:IA64 /nodefaultlib:"libcmt" /libpath:"@lib_rel_dest@_IA64" +@POST_BUILD@ !ENDIF @@ -134,8 +246,12 @@ LINK32=link.exe # Name "@project_name@ - Win32 Release" # Name "@project_name@ - Win32 Debug" -# Name "@project_name@ - Win32 Release Static" -# Name "@project_name@ - Win32 Debug Static" +# Name "@project_name@ - Win32 ASCII Debug" +# Name "@project_name@ - Win32 ASCII Release" +# Name "@project_name@ - Win64 Debug AMD64" +# Name "@project_name@ - Win64 Release AMD64" +# Name "@project_name@ - Win64 Debug IA64" +# Name "@project_name@ - Win64 Release IA64" @SOURCE_FILES@ # Begin Source File diff --git a/storage/bdb/build_win32/dbkill.cpp b/storage/bdb/build_win32/dbkill.cpp index 7be76135ce6..7a7082188f6 100644 --- a/storage/bdb/build_win32/dbkill.cpp +++ b/storage/bdb/build_win32/dbkill.cpp @@ -1,10 +1,10 @@ /*- * See the file LICENSE for redistribution information. * - * Copyright (c) 1999-2004 + * Copyright (c) 1999-2005 * Sleepycat Software. All rights reserved. * - * $Id: dbkill.cpp,v 11.9 2004/01/28 03:35:52 bostic Exp $ + * $Id: dbkill.cpp,v 12.1 2005/06/16 20:20:43 bostic Exp $ */ /* * Kill - diff --git a/storage/bdb/build_win32/dynamic_dsp.src b/storage/bdb/build_win32/dynamic_dsp.src index 2229edd3de6..641ef221369 100644 --- a/storage/bdb/build_win32/dynamic_dsp.src +++ b/storage/bdb/build_win32/dynamic_dsp.src @@ -19,6 +19,12 @@ CFG=@project_name@ - Win32 Debug !MESSAGE !MESSAGE "@project_name@ - Win32 Release" (based on "Win32 (x86) Dynamic-Link Library") !MESSAGE "@project_name@ - Win32 Debug" (based on "Win32 (x86) Dynamic-Link Library") +!MESSAGE "@project_name@ - Win32 ASCII Debug" (based on "Win32 (x86) Dynamic-Link Library") +!MESSAGE "@project_name@ - Win32 ASCII Release" (based on "Win32 (x86) Dynamic-Link Library") +!MESSAGE "@project_name@ - Win64 Debug AMD64" (based on "Win32 (x86) Dynamic-Link Library") +!MESSAGE "@project_name@ - Win64 Release AMD64" (based on "Win32 (x86) Dynamic-Link Library") +!MESSAGE "@project_name@ - Win64 Debug IA64" (based on "Win32 (x86) Dynamic-Link Library") +!MESSAGE "@project_name@ - Win64 Release IA64" (based on "Win32 (x86) Dynamic-Link Library") !MESSAGE # Begin Project @@ -33,17 +39,17 @@ RSC=rc.exe # PROP BASE Use_MFC 0 # PROP BASE Use_Debug_Libraries 0 -# PROP BASE Output_Dir "Release" -# PROP BASE Intermediate_Dir "Release" +# PROP BASE Output_Dir "@lib_rel_dest@" +# PROP BASE Intermediate_Dir "Release/@project_name@" # PROP BASE Target_Dir "" # PROP Use_MFC 0 # PROP Use_Debug_Libraries 0 -# PROP Output_Dir "Release" -# PROP Intermediate_Dir "Release" +# PROP Output_Dir "@lib_rel_dest@" +# PROP Intermediate_Dir "Release/@project_name@" # PROP Ignore_Export_Lib 0 # PROP Target_Dir "" -# ADD BASE CPP /nologo /MT /W3 /GX /O2 /D "WIN32" /D "NDEBUG" /D "_WINDOWS" /YX /FD /c -# ADD CPP /nologo /MD /W3 /GX /O2 /Ob2 /I "." /I ".." /D "DB_CREATE_DLL" /D "WIN32" /D "NDEBUG" /D "_WINDOWS" /D "_MBCS" /YX /FD /c +# ADD BASE CPP /nologo /MD /W3 /GX /O2 /D "WIN32" /D "NDEBUG" /D "_WINDOWS" /FD /c +# ADD CPP /nologo /MD /W3 /GX /O2 /Ob2 /I "." /I ".." /D "UNICODE" /D "_UNICODE" /D "DB_CREATE_DLL" /D "WIN32" /D "NDEBUG" /D "_WINDOWS" @extra_cppflags@ /FD /c # ADD BASE MTL /nologo /D "NDEBUG" /mktyplib203 /o "NUL" /win32 # ADD MTL /nologo /D "NDEBUG" /mktyplib203 /o "NUL" /win32 # ADD BASE RSC /l 0x409 /d "NDEBUG" @@ -52,24 +58,25 @@ BSC32=bscmake.exe # ADD BASE BSC32 /nologo # ADD BSC32 /nologo LINK32=link.exe -# ADD BASE LINK32 kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:windows /dll /machine:I386 -# ADD LINK32 /nologo /base:"0x13000000" /subsystem:windows /dll /machine:I386 /out:"Release/libdb@DB_VERSION_MAJOR@@DB_VERSION_MINOR@.dll" +# ADD BASE LINK32 @release_libs@ kernel32.lib user32.lib advapi32.lib shell32.lib /nologo /subsystem:windows /dll /machine:I386 +# ADD LINK32 @release_libs@ kernel32.lib user32.lib advapi32.lib shell32.lib /nologo /base:"0x13000000" /subsystem:windows /dll /machine:I386 /out:"@bin_rel_dest@/libdb@lib_suffix@@DB_VERSION_MAJOR@@DB_VERSION_MINOR@.dll" /libpath:"$(OUTDIR)" +@POST_BUILD@ !ELSEIF "$(CFG)" == "@project_name@ - Win32 Debug" # PROP BASE Use_MFC 0 # PROP BASE Use_Debug_Libraries 1 -# PROP BASE Output_Dir "Debug" -# PROP BASE Intermediate_Dir "Debug" +# PROP BASE Output_Dir "@lib_debug_dest@" +# PROP BASE Intermediate_Dir "Debug/@project_name@" # PROP BASE Target_Dir "" -# PROP Use_MFC 2 +# PROP Use_MFC 0 # PROP Use_Debug_Libraries 1 -# PROP Output_Dir "Debug" -# PROP Intermediate_Dir "Debug" +# PROP Output_Dir "@lib_debug_dest@" +# PROP Intermediate_Dir "Debug/@project_name@" # PROP Ignore_Export_Lib 0 # PROP Target_Dir "" -# ADD BASE CPP /nologo /MTd /W3 /Gm /GX /Zi /Od /D "WIN32" /D "_DEBUG" /D "_WINDOWS" /YX /FD /c -# ADD CPP /nologo /MDd /W3 /GX /Z7 /Od /I "." /I ".." /D "DB_CREATE_DLL" /D "CONFIG_TEST" /D "WIN32" /D "_DEBUG" /D "_WINDOWS" /D "_MBCS" /YX"config.h" /FD /c +# ADD BASE CPP /nologo /MDd /W3 /Gm /GX /Zi /Od /D "WIN32" /D "_DEBUG" /D "_WINDOWS" /FD /c +# ADD CPP /nologo /MDd /W3 /GX /Z7 /Od /I "." /I ".." /D "UNICODE" /D "_UNICODE" /D "DB_CREATE_DLL" /D "DIAGNOSTIC" /D "CONFIG_TEST" /D "WIN32" /D "_DEBUG" /D "_WINDOWS" @extra_cppflags@ /FD /c # SUBTRACT CPP /Fr # ADD BASE MTL /nologo /D "_DEBUG" /mktyplib203 /o "NUL" /win32 # ADD MTL /nologo /D "_DEBUG" /mktyplib203 /o "NUL" /win32 @@ -79,8 +86,182 @@ BSC32=bscmake.exe # ADD BASE BSC32 /nologo # ADD BSC32 /nologo LINK32=link.exe -# ADD BASE LINK32 kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:windows /dll /debug /machine:I386 /pdbtype:sept -# ADD LINK32 kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /base:"0x13000000" /subsystem:windows /dll /pdb:none /debug /machine:I386 /out:"Debug/libdb@DB_VERSION_MAJOR@@DB_VERSION_MINOR@d.dll" /fixed:no +# ADD BASE LINK32 @debug_libs@ kernel32.lib user32.lib advapi32.lib shell32.lib /nologo /subsystem:windows /dll /debug /machine:I386 /pdbtype:sept +# ADD LINK32 @debug_libs@ kernel32.lib user32.lib advapi32.lib shell32.lib /nologo /base:"0x13000000" /subsystem:windows /dll /export:__db_assert /pdb:none /debug /machine:I386 /out:"@bin_debug_dest@/libdb@lib_suffix@@DB_VERSION_MAJOR@@DB_VERSION_MINOR@d.dll" /fixed:no /libpath:"$(OUTDIR)" +@POST_BUILD@ + +!ELSEIF "$(CFG)" == "@project_name@ - Win32 ASCII Debug" + +# PROP BASE Use_MFC 0 +# PROP BASE Use_Debug_Libraries 1 +# PROP BASE Output_Dir "@lib_debug_dest@_ASCII" +# PROP BASE Intermediate_Dir "Debug_ASCII/@project_name@" +# PROP BASE Ignore_Export_Lib 0 +# PROP BASE Target_Dir "" +# PROP Use_MFC 0 +# PROP Use_Debug_Libraries 1 +# PROP Output_Dir "@lib_debug_dest@_ASCII" +# PROP Intermediate_Dir "Debug_ASCII/@project_name@" +# PROP Ignore_Export_Lib 0 +# PROP Target_Dir "" +# ADD BASE CPP /nologo /MDd /W3 /GX /Z7 /Od /I "." /I ".." /D "DB_CREATE_DLL" /D "DIAGNOSTIC" /D "CONFIG_TEST" /D "WIN32" /D "_DEBUG" /D "_WINDOWS" /D "_MBCS" @extra_cppflags@ /FD /c +# SUBTRACT BASE CPP /Fr +# ADD CPP /nologo /MDd /W3 /GX /Z7 /Od /I "." /I ".." /D "DB_CREATE_DLL" /D "DIAGNOSTIC" /D "CONFIG_TEST" /D "WIN32" /D "_DEBUG" /D "_WINDOWS" /D "_MBCS" @extra_cppflags@ /FD /c +# SUBTRACT CPP /Fr +# ADD BASE MTL /nologo /D "_DEBUG" /mktyplib203 /o "NUL" /win32 +# ADD MTL /nologo /D "_DEBUG" /mktyplib203 /o "NUL" /win32 +# ADD BASE RSC /l 0x409 /d "_DEBUG" +# ADD RSC /l 0x409 /d "_DEBUG" +BSC32=bscmake.exe +# ADD BASE BSC32 /nologo +# ADD BSC32 /nologo +LINK32=link.exe +# ADD BASE LINK32 @debug_libs@ kernel32.lib user32.lib advapi32.lib shell32.lib /nologo /base:"0x13000000" /subsystem:windows /dll /pdb:none /debug /machine:I386 /out:"@bin_debug_dest@_ASCII/libdb@lib_suffix@@DB_VERSION_MAJOR@@DB_VERSION_MINOR@d.dll" /fixed:no +# ADD LINK32 @debug_libs@ kernel32.lib user32.lib advapi32.lib shell32.lib /nologo /base:"0x13000000" /subsystem:windows /dll /export:__db_assert /pdb:none /debug /machine:I386 /out:"@bin_debug_dest@_ASCII/libdb@lib_suffix@@DB_VERSION_MAJOR@@DB_VERSION_MINOR@d.dll" /fixed:no /libpath:"$(OUTDIR)" +@POST_BUILD@ + +!ELSEIF "$(CFG)" == "@project_name@ - Win32 ASCII Release" + +# PROP BASE Use_MFC 0 +# PROP BASE Use_Debug_Libraries 0 +# PROP BASE Output_Dir "@lib_rel_dest@_ASCII" +# PROP BASE Intermediate_Dir "Release_ASCII/@project_name@" +# PROP BASE Ignore_Export_Lib 0 +# PROP BASE Target_Dir "" +# PROP Use_MFC 0 +# PROP Use_Debug_Libraries 0 +# PROP Output_Dir "@lib_rel_dest@_ASCII" +# PROP Intermediate_Dir "Release_ASCII/@project_name@" +# PROP Ignore_Export_Lib 0 +# PROP Target_Dir "" +# ADD BASE CPP /nologo /MD /W3 /GX /O2 /Ob2 /I "." /I ".." /D "DB_CREATE_DLL" /D "WIN32" /D "NDEBUG" /D "_WINDOWS" /D "_MBCS" @extra_cppflags@ /FD /c +# ADD CPP /nologo /MD /W3 /GX /O2 /Ob2 /I "." /I ".." /D "DB_CREATE_DLL" /D "WIN32" /D "NDEBUG" /D "_WINDOWS" /D "_MBCS" @extra_cppflags@ /FD /c +# ADD BASE MTL /nologo /D "NDEBUG" /mktyplib203 /o "NUL" /win32 +# ADD MTL /nologo /D "NDEBUG" /mktyplib203 /o "NUL" /win32 +# ADD BASE RSC /l 0x409 /d "NDEBUG" +# ADD RSC /l 0x409 /d "NDEBUG" +BSC32=bscmake.exe +# ADD BASE BSC32 /nologo +# ADD BSC32 /nologo +LINK32=link.exe +# ADD BASE LINK32 @release_libs@ kernel32.lib user32.lib advapi32.lib shell32.lib /nologo /base:"0x13000000" /subsystem:windows /dll /machine:I386 /out:"@bin_rel_dest@_ASCII/libdb@lib_suffix@@DB_VERSION_MAJOR@@DB_VERSION_MINOR@.dll" +# ADD LINK32 @release_libs@ kernel32.lib user32.lib advapi32.lib shell32.lib /nologo /base:"0x13000000" /subsystem:windows /dll /machine:I386 /out:"@bin_rel_dest@_ASCII/libdb@lib_suffix@@DB_VERSION_MAJOR@@DB_VERSION_MINOR@.dll" /libpath:"$(OUTDIR)" +@POST_BUILD@ + +!ELSEIF "$(CFG)" == "@project_name@ - Win64 Debug AMD64" + +# PROP BASE Use_MFC 0 +# PROP BASE Use_Debug_Libraries 1 +# PROP BASE Output_Dir "@lib_debug_dest@_AMD64" +# PROP BASE Intermediate_Dir "Debug_AMD64/@project_name@" +# PROP BASE Ignore_Export_Lib 0 +# PROP BASE Target_Dir "" +# PROP Use_MFC 0 +# PROP Use_Debug_Libraries 1 +# PROP Output_Dir "@lib_debug_dest@_AMD64" +# PROP Intermediate_Dir "Debug_AMD64/@project_name@" +# PROP Ignore_Export_Lib 0 +# PROP Target_Dir "" +# ADD BASE CPP /nologo /MDd /W3 /EHsc /Z7 /Od /I "." /I ".." /D "UNICODE" /D "_UNICODE" /D "DB_CREATE_DLL" /D "DIAGNOSTIC" /D "CONFIG_TEST" /D "WIN32" /D "_DEBUG" /D "_WINDOWS" @extra_cppflags@ /FD /c +# SUBTRACT BASE CPP /Fr +# ADD CPP /nologo /MDd /W3 /EHsc /Z7 /Od /I "." /I ".." /D "UNICODE" /D "_UNICODE" /D "DB_CREATE_DLL" /D "DIAGNOSTIC" /D "CONFIG_TEST" /D "WIN32" /D "_DEBUG" /D "_WINDOWS" @extra_cppflags@ /FD /Wp64 /c +# ADD BASE MTL /nologo /D "_DEBUG" /mktyplib203 /o "NUL" /win32 +# ADD MTL /nologo /D "_DEBUG" /mktyplib203 /o "NUL" /win32 +# ADD BASE RSC /l 0x409 /d "_DEBUG" +# ADD RSC /l 0x409 /d "_DEBUG" +BSC32=bscmake.exe +# ADD BASE BSC32 /nologo +# ADD BSC32 /nologo +LINK32=link.exe +# ADD BASE LINK32 @debug_libs@ bufferoverflowU.lib kernel32.lib user32.lib advapi32.lib shell32.lib /nologo /base:"0x13000000" /subsystem:windows /dll /debug /machine:AMD64 /out:"@bin_debug_dest@_AMD64/libdb@lib_suffix@@DB_VERSION_MAJOR@@DB_VERSION_MINOR@d.dll" /fixed:no +# ADD LINK32 @debug_libs@ bufferoverflowU.lib kernel32.lib user32.lib advapi32.lib shell32.lib /nologo /base:"0x13000000" /subsystem:windows /dll /export:__db_assert /debug /machine:AMD64 /out:"@bin_debug_dest@_AMD64/libdb@lib_suffix@@DB_VERSION_MAJOR@@DB_VERSION_MINOR@d.dll" /fixed:no /libpath:"$(OUTDIR)" +@POST_BUILD@ + +!ELSEIF "$(CFG)" == "@project_name@ - Win64 Release AMD64" + +# PROP BASE Use_MFC 0 +# PROP BASE Use_Debug_Libraries 0 +# PROP BASE Output_Dir "@lib_rel_dest@_AMD64" +# PROP BASE Intermediate_Dir "Release_AMD64/@project_name@" +# PROP BASE Ignore_Export_Lib 0 +# PROP BASE Target_Dir "" +# PROP Use_MFC 0 +# PROP Use_Debug_Libraries 0 +# PROP Output_Dir "@lib_rel_dest@_AMD64" +# PROP Intermediate_Dir "Release_AMD64/@project_name@" +# PROP Ignore_Export_Lib 0 +# PROP Target_Dir "" +# ADD BASE CPP /nologo /MD /W3 /EHsc /O2 /Ob2 /I "." /I ".." /D "UNICODE" /D "_UNICODE" /D "DB_CREATE_DLL" /D "WIN32" /D "NDEBUG" /D "_WINDOWS" @extra_cppflags@ /Wp64 /FD /c +# ADD CPP /nologo /MD /W3 /EHsc /O2 /Ob2 /I "." /I ".." /D "UNICODE" /D "_UNICODE" /D "DB_CREATE_DLL" /D "WIN32" /D "NDEBUG" /D "_WINDOWS" @extra_cppflags@ /Wp64 /FD /c +# ADD BASE MTL /nologo /D "NDEBUG" /mktyplib203 /o "NUL" /win32 +# ADD MTL /nologo /D "NDEBUG" /mktyplib203 /o "NUL" /win32 +# ADD BASE RSC /l 0x409 /d "NDEBUG" +# ADD RSC /l 0x409 /d "NDEBUG" +BSC32=bscmake.exe +# ADD BASE BSC32 /nologo +# ADD BSC32 /nologo +LINK32=link.exe +# ADD BASE LINK32 @release_libs@ bufferoverflowU.lib kernel32.lib user32.lib advapi32.lib shell32.lib /nologo /base:"0x13000000" /subsystem:windows /dll /machine:AMD64 /out:"@bin_rel_dest@_AMD64/libdb@lib_suffix@@DB_VERSION_MAJOR@@DB_VERSION_MINOR@.dll" +# ADD LINK32 @release_libs@ bufferoverflowU.lib kernel32.lib user32.lib advapi32.lib shell32.lib /nologo /base:"0x13000000" /subsystem:windows /dll /machine:AMD64 /out:"@bin_rel_dest@_AMD64/libdb@lib_suffix@@DB_VERSION_MAJOR@@DB_VERSION_MINOR@.dll" /libpath:"$(OUTDIR)" +@POST_BUILD@ + +!ELSEIF "$(CFG)" == "@project_name@ - Win64 Debug IA64" + +# PROP BASE Use_MFC 0 +# PROP BASE Use_Debug_Libraries 1 +# PROP BASE Output_Dir "@lib_debug_dest@_IA64" +# PROP BASE Intermediate_Dir "Debug_IA64/@project_name@" +# PROP BASE Ignore_Export_Lib 0 +# PROP BASE Target_Dir "" +# PROP Use_MFC 0 +# PROP Use_Debug_Libraries 1 +# PROP Output_Dir "@lib_debug_dest@_IA64" +# PROP Intermediate_Dir "Debug_IA64/@project_name@" +# PROP Ignore_Export_Lib 0 +# PROP Target_Dir "" +# ADD BASE CPP /nologo /MDd /W3 /EHsc /Z7 /Od /I "." /I ".." /D "UNICODE" /D "_UNICODE" /D "DB_CREATE_DLL" /D "DIAGNOSTIC" /D "CONFIG_TEST" /D "WIN32" /D "_DEBUG" /D "_WINDOWS" @extra_cppflags@ /Wp64 /FD /c +# SUBTRACT BASE CPP /Fr +# ADD CPP /nologo /MDd /W3 /EHsc /Z7 /Od /I "." /I ".." /D "UNICODE" /D "_UNICODE" /D "DB_CREATE_DLL" /D "DIAGNOSTIC" /D "CONFIG_TEST" /D "WIN32" /D "_DEBUG" /D "_WINDOWS" @extra_cppflags@ /Wp64 /FD /c +# SUBTRACT CPP /Fr +# ADD BASE MTL /nologo /D "_DEBUG" /mktyplib203 /o "NUL" /win32 +# ADD MTL /nologo /D "_DEBUG" /mktyplib203 /o "NUL" /win32 +# ADD BASE RSC /l 0x409 /d "_DEBUG" +# ADD RSC /l 0x409 /d "_DEBUG" +BSC32=bscmake.exe +# ADD BASE BSC32 /nologo +# ADD BSC32 /nologo +LINK32=link.exe +# ADD BASE LINK32 @debug_libs@ bufferoverflowU.lib kernel32.lib user32.lib advapi32.lib shell32.lib /nologo /base:"0x13000000" /subsystem:windows /dll /debug /machine:IA64 /out:"@bin_debug_dest@_IA64/libdb@lib_suffix@@DB_VERSION_MAJOR@@DB_VERSION_MINOR@d.dll" /fixed:no +# ADD LINK32 @debug_libs@ bufferoverflowU.lib kernel32.lib user32.lib advapi32.lib shell32.lib /nologo /base:"0x13000000" /subsystem:windows /dll /export:__db_assert /debug /machine:IA64 /out:"@bin_debug_dest@_IA64/libdb@lib_suffix@@DB_VERSION_MAJOR@@DB_VERSION_MINOR@d.dll" /fixed:no /libpath:"$(OUTDIR)" +@POST_BUILD@ + +!ELSEIF "$(CFG)" == "@project_name@ - Win64 Release IA64" + +# PROP BASE Use_MFC 0 +# PROP BASE Use_Debug_Libraries 0 +# PROP BASE Output_Dir "@lib_rel_dest@_IA64" +# PROP BASE Intermediate_Dir "Release_IA64/@project_name@" +# PROP BASE Ignore_Export_Lib 0 +# PROP BASE Target_Dir "" +# PROP Use_MFC 0 +# PROP Use_Debug_Libraries 0 +# PROP Output_Dir "@lib_rel_dest@_IA64" +# PROP Intermediate_Dir "Release_IA64/@project_name@" +# PROP Ignore_Export_Lib 0 +# PROP Target_Dir "" +# ADD BASE CPP /nologo /MD /W3 /EHsc /O2 /Ob2 /I "." /I ".." /D "UNICODE" /D "_UNICODE" /D "DB_CREATE_DLL" /D "WIN32" /D "NDEBUG" /D "_WINDOWS" @extra_cppflags@ /Wp64 /FD /c +# ADD CPP /nologo /MD /W3 /EHsc /O2 /Ob2 /I "." /I ".." /D "UNICODE" /D "_UNICODE" /D "DB_CREATE_DLL" /D "WIN32" /D "NDEBUG" /D "_WINDOWS" @extra_cppflags@ /Wp64 /FD /c +# ADD BASE MTL /nologo /D "NDEBUG" /mktyplib203 /o "NUL" /win32 +# ADD MTL /nologo /D "NDEBUG" /mktyplib203 /o "NUL" /win32 +# ADD BASE RSC /l 0x409 /d "NDEBUG" +# ADD RSC /l 0x409 /d "NDEBUG" +BSC32=bscmake.exe +# ADD BASE BSC32 /nologo +# ADD BSC32 /nologo +LINK32=link.exe +# ADD BASE LINK32 @release_libs@ bufferoverflowU.lib kernel32.lib user32.lib advapi32.lib shell32.lib /nologo /base:"0x13000000" /subsystem:windows /dll /machine:IA64 /out:"@bin_rel_dest@_IA64/libdb@lib_suffix@@DB_VERSION_MAJOR@@DB_VERSION_MINOR@.dll" +# ADD LINK32 @release_libs@ bufferoverflowU.lib kernel32.lib user32.lib advapi32.lib shell32.lib /nologo /base:"0x13000000" /subsystem:windows /dll /machine:IA64 /out:"@bin_rel_dest@_IA64/libdb@lib_suffix@@DB_VERSION_MAJOR@@DB_VERSION_MINOR@.dll" /libpath:"$(OUTDIR)" +@POST_BUILD@ !ENDIF @@ -88,6 +269,13 @@ LINK32=link.exe # Name "@project_name@ - Win32 Release" # Name "@project_name@ - Win32 Debug" +# Name "@project_name@ - Win32 ASCII Debug" +# Name "@project_name@ - Win32 ASCII Release" +# Name "@project_name@ - Win64 Debug AMD64" +# Name "@project_name@ - Win64 Release AMD64" +# Name "@project_name@ - Win64 Debug IA64" +# Name "@project_name@ - Win64 Release IA64" @SOURCE_FILES@ + # End Target # End Project diff --git a/storage/bdb/build_win32/libdb_tcl.def b/storage/bdb/build_win32/libdb_tcl.def index 5e8386a3591..01a89e44dff 100644 --- a/storage/bdb/build_win32/libdb_tcl.def +++ b/storage/bdb/build_win32/libdb_tcl.def @@ -1,4 +1,4 @@ -; $Id: libdb_tcl.def,v 11.7 2002/10/14 23:44:20 mjc Exp $ +; $Id: libdb_tcl.def,v 12.0 2004/11/17 03:48:15 bostic Exp $ DESCRIPTION 'Berkeley DB TCL interface Library' EXPORTS diff --git a/storage/bdb/build_win32/libdbrc.src b/storage/bdb/build_win32/libdbrc.src index 4c644ea9f4f..ec5ba9b3c6c 100644 --- a/storage/bdb/build_win32/libdbrc.src +++ b/storage/bdb/build_win32/libdbrc.src @@ -17,11 +17,11 @@ BEGIN BLOCK "040904b0" BEGIN VALUE "CompanyName", "Sleepycat Software\0" - VALUE "FileDescription", "Berkeley DB 3.0 DLL\0" + VALUE "FileDescription", "Berkeley DB %MAJOR%.%MINOR% DLL\0" VALUE "FileVersion", "%MAJOR%.%MINOR%.%PATCH%\0" - VALUE "InternalName", "libdb.dll\0" - VALUE "LegalCopyright", "Copyright © Sleepycat Software Inc. 1997-2004\0" - VALUE "OriginalFilename", "libdb.dll\0" + VALUE "InternalName", "libdb%MAJOR%%MINOR%.dll\0" + VALUE "LegalCopyright", "Copyright © Sleepycat Software Inc. 1997-2005\0" + VALUE "OriginalFilename", "libdb%MAJOR%%MINOR%.dll\0" VALUE "ProductName", "Sleepycat Software libdb\0" VALUE "ProductVersion", "%MAJOR%.%MINOR%.%PATCH%\0" END diff --git a/storage/bdb/build_win32/static_dsp.src b/storage/bdb/build_win32/static_dsp.src index 411e8df8d07..84c3d298792 100644 --- a/storage/bdb/build_win32/static_dsp.src +++ b/storage/bdb/build_win32/static_dsp.src @@ -4,7 +4,7 @@ # TARGTYPE "Win32 (x86) Static Library" 0x0104 -CFG=@project_name@ - Win32 Debug Static +CFG=@project_name@ - Win32 Debug !MESSAGE This is not a valid makefile. To build this project using NMAKE, !MESSAGE use the Export Makefile command and run !MESSAGE @@ -13,12 +13,18 @@ CFG=@project_name@ - Win32 Debug Static !MESSAGE You can specify a configuration when running NMAKE !MESSAGE by defining the macro CFG on the command line. For example: !MESSAGE -!MESSAGE NMAKE /f "@project_name@.mak" CFG="@project_name@ - Win32 Debug Static" +!MESSAGE NMAKE /f "@project_name@.mak" CFG="@project_name@ - Win32 Debug" !MESSAGE !MESSAGE Possible choices for configuration are: !MESSAGE -!MESSAGE "@project_name@ - Win32 Release Static" (based on "Win32 (x86) Static Library") -!MESSAGE "@project_name@ - Win32 Debug Static" (based on "Win32 (x86) Static Library") +!MESSAGE "@project_name@ - Win32 Release" (based on "Win32 (x86) Static Library") +!MESSAGE "@project_name@ - Win32 Debug" (based on "Win32 (x86) Static Library") +!MESSAGE "@project_name@ - Win32 ASCII Release" (based on "Win32 (x86) Static Library") +!MESSAGE "@project_name@ - Win32 ASCII Debug" (based on "Win32 (x86) Static Library") +!MESSAGE "@project_name@ - Win64 Debug AMD64" (based on "Win32 (x86) Static Library") +!MESSAGE "@project_name@ - Win64 Release AMD64" (based on "Win32 (x86) Static Library") +!MESSAGE "@project_name@ - Win64 Debug IA64" (based on "Win32 (x86) Static Library") +!MESSAGE "@project_name@ - Win64 Release IA64" (based on "Win32 (x86) Static Library") !MESSAGE # Begin Project @@ -28,58 +34,202 @@ CFG=@project_name@ - Win32 Debug Static CPP=cl.exe RSC=rc.exe -!IF "$(CFG)" == "@project_name@ - Win32 Release Static" +!IF "$(CFG)" == "@project_name@ - Win32 Release" # PROP BASE Use_MFC 0 # PROP BASE Use_Debug_Libraries 0 -# PROP BASE Output_Dir "Release_static" -# PROP BASE Intermediate_Dir "Release_static" +# PROP BASE Output_Dir "@lib_rel_dest@" +# PROP BASE Intermediate_Dir "Release/@project_name@" # PROP BASE Target_Dir "" # PROP Use_MFC 0 # PROP Use_Debug_Libraries 0 -# PROP Output_Dir "Release_static" -# PROP Intermediate_Dir "Release_static" +# PROP Output_Dir "@lib_rel_dest@" +# PROP Intermediate_Dir "Release/@project_name@" # PROP Target_Dir "" -# ADD BASE CPP /nologo /MT /W3 /GX /O2 /I "." /I ".." /D "WIN32" /D "NDEBUG" /D "_WINDOWS" /YX"config.h" /FD /c -# ADD CPP /nologo /MT /W3 /GX /O2 /I "." /I ".." /D "WIN32" /D "NDEBUG" /D "_WINDOWS" /D "_MBCS" /YX"config.h" /FD /c +# ADD BASE CPP /nologo /MD /W3 /GX /O2 /I "." /I ".." /D "UNICODE" /D "_UNICODE" /D "WIN32" /D "NDEBUG" /D "_WINDOWS" @extra_cppflags@ /FD /c +# ADD CPP /nologo /MD /W3 /GX /O2 /I "." /I ".." /D "UNICODE" /D "_UNICODE" /D "WIN32" /D "NDEBUG" /D "_WINDOWS" @extra_cppflags@ /FD /c # ADD BASE RSC /l 0xc09 # ADD RSC /l 0xc09 BSC32=bscmake.exe # ADD BASE BSC32 /nologo # ADD BSC32 /nologo LIB32=link.exe -lib -# ADD BASE LIB32 /nologo /out:"Release/libdb@DB_VERSION_MAJOR@@DB_VERSION_MINOR@s.lib" -# ADD LIB32 /nologo /out:"Release_static/libdb@DB_VERSION_MAJOR@@DB_VERSION_MINOR@s.lib" +# ADD BASE LIB32 /nologo /out:"@lib_rel_dest@/libdb@lib_suffix@@DB_VERSION_MAJOR@@DB_VERSION_MINOR@s.lib" +# ADD LIB32 /nologo /out:"@lib_rel_dest@/libdb@lib_suffix@@DB_VERSION_MAJOR@@DB_VERSION_MINOR@s.lib" -!ELSEIF "$(CFG)" == "@project_name@ - Win32 Debug Static" +!ELSEIF "$(CFG)" == "@project_name@ - Win32 Debug" -# PROP BASE Use_MFC 1 +# PROP BASE Use_MFC 0 # PROP BASE Use_Debug_Libraries 1 -# PROP BASE Output_Dir "Debug_static" -# PROP BASE Intermediate_Dir "Debug_static" +# PROP BASE Output_Dir "@lib_debug_dest@" +# PROP BASE Intermediate_Dir "Debug/@project_name@" # PROP BASE Target_Dir "" -# PROP Use_MFC 1 +# PROP Use_MFC 0 # PROP Use_Debug_Libraries 1 -# PROP Output_Dir "Debug_static" -# PROP Intermediate_Dir "Debug_static" +# PROP Output_Dir "@lib_debug_dest@" +# PROP Intermediate_Dir "Debug/@project_name@" # PROP Target_Dir "" -# ADD BASE CPP /nologo /MTd /W3 /GX /Z7 /Od /I "." /I ".." /D "CONFIG_TEST" /D "WIN32" /D "_DEBUG" /D "_WINDOWS" /YX"config.h" /FD /c -# ADD CPP /nologo /MTd /W3 /GX /Z7 /Od /I "." /I ".." /D "CONFIG_TEST" /D "WIN32" /D "_DEBUG" /D "_WINDOWS" /D "_MBCS" /YX"config.h" /FD /c +# ADD BASE CPP /nologo /MDd /W3 /GX /Z7 /Od /I "." /I ".." /D "DIAGNOSTIC" /D "UNICODE" /D "_UNICODE" /D "WIN32" /D "_DEBUG" /D "_WINDOWS" @extra_cppflags@ /FD /c +# ADD CPP /nologo /MDd /W3 /GX /Z7 /Od /I "." /I ".." /D "DIAGNOSTIC" /D "UNICODE" /D "_UNICODE" /D "WIN32" /D "_DEBUG" /D "_WINDOWS" @extra_cppflags@ /FD /c # ADD BASE RSC /l 0xc09 # ADD RSC /l 0xc09 BSC32=bscmake.exe # ADD BASE BSC32 /nologo # ADD BSC32 /nologo LIB32=link.exe -lib -# ADD BASE LIB32 /nologo /out:"Debug/libdb@DB_VERSION_MAJOR@@DB_VERSION_MINOR@sd.lib" -# ADD LIB32 /nologo /out:"Debug_static/libdb@DB_VERSION_MAJOR@@DB_VERSION_MINOR@sd.lib" +# ADD BASE LIB32 /nologo /out:"@lib_debug_dest@/libdb@lib_suffix@@DB_VERSION_MAJOR@@DB_VERSION_MINOR@sd.lib" +# ADD LIB32 /nologo /out:"@lib_debug_dest@/libdb@lib_suffix@@DB_VERSION_MAJOR@@DB_VERSION_MINOR@sd.lib" + +!ELSEIF "$(CFG)" == "@project_name@ - Win32 ASCII Release" + +# PROP BASE Use_MFC 0 +# PROP BASE Use_Debug_Libraries 0 +# PROP BASE Output_Dir "@lib_rel_dest@_ASCII" +# PROP BASE Intermediate_Dir "Release_ASCII/@project_name@" +# PROP BASE Target_Dir "" +# PROP Use_MFC 0 +# PROP Use_Debug_Libraries 0 +# PROP Output_Dir "@lib_rel_dest@_ASCII" +# PROP Intermediate_Dir "Release_ASCII/@project_name@" +# PROP Target_Dir "" +# ADD BASE CPP /nologo /MD /W3 /GX /O2 /I "." /I ".." /D "WIN32" /D "NDEBUG" /D "_WINDOWS" @extra_cppflags@ /FD /c +# ADD CPP /nologo /MD /W3 /GX /O2 /I "." /I ".." /D "WIN32" /D "NDEBUG" /D "_WINDOWS" /D "_MBCS" @extra_cppflags@ /FD /c +# ADD BASE RSC /l 0xc09 +# ADD RSC /l 0xc09 +BSC32=bscmake.exe +# ADD BASE BSC32 /nologo +# ADD BSC32 /nologo +LIB32=link.exe -lib +# ADD BASE LIB32 /nologo /out:"@lib_rel_dest@_ASCII/libdb@lib_suffix@@DB_VERSION_MAJOR@@DB_VERSION_MINOR@s.lib" +# ADD LIB32 /nologo /out:"@lib_rel_dest@_ASCII/libdb@lib_suffix@@DB_VERSION_MAJOR@@DB_VERSION_MINOR@s.lib" + +!ELSEIF "$(CFG)" == "@project_name@ - Win32 ASCII Debug" + +# PROP BASE Use_MFC 0 +# PROP BASE Use_Debug_Libraries 1 +# PROP BASE Output_Dir "@lib_debug_dest@_ASCII" +# PROP BASE Intermediate_Dir "Debug_ASCII/@project_name@" +# PROP BASE Target_Dir "" +# PROP Use_MFC 0 +# PROP Use_Debug_Libraries 1 +# PROP Output_Dir "@lib_debug_dest@_ASCII" +# PROP Intermediate_Dir "Debug_ASCII/@project_name@" +# PROP Target_Dir "" +# ADD BASE CPP /nologo /MDd /W3 /GX /Z7 /Od /I "." /I ".." /D "DIAGNOSTIC" /D "WIN32" /D "_DEBUG" /D "_WINDOWS" @extra_cppflags@ /FD /c +# ADD CPP /nologo /MDd /W3 /GX /Z7 /Od /I "." /I ".." /D "DIAGNOSTIC" /D "WIN32" /D "_DEBUG" /D "_WINDOWS" /D "_MBCS" @extra_cppflags@ /FD /c +# ADD BASE RSC /l 0xc09 +# ADD RSC /l 0xc09 +BSC32=bscmake.exe +# ADD BASE BSC32 /nologo +# ADD BSC32 /nologo +LIB32=link.exe -lib +# ADD BASE LIB32 /nologo /out:"@lib_debug_dest@_ASCII/libdb@lib_suffix@@DB_VERSION_MAJOR@@DB_VERSION_MINOR@sd.lib" +# ADD LIB32 /nologo /out:"@lib_debug_dest@_ASCII/libdb@lib_suffix@@DB_VERSION_MAJOR@@DB_VERSION_MINOR@sd.lib" + +!ELSEIF "$(CFG)" == "@project_name@ - Win64 Debug AMD64" + +# PROP BASE Use_MFC 0 +# PROP BASE Use_Debug_Libraries 1 +# PROP BASE Output_Dir "@lib_debug_dest@_AMD64" +# PROP BASE Intermediate_Dir "Debug/@project_name@" +# PROP BASE Target_Dir "" +# PROP Use_MFC 0 +# PROP Use_Debug_Libraries 1 +# PROP Output_Dir "@lib_debug_dest@_AMD64" +# PROP Intermediate_Dir "Debug_AMD64/@project_name@" +# PROP Target_Dir "" +# ADD BASE CPP /nologo /MDd /W3 /EHsc /Z7 /Od /I "." /I ".." /D "DIAGNOSTIC" /D "UNICODE" /D "_UNICODE" /D "WIN32" /D "_DEBUG" /D "_WINDOWS" @extra_cppflags@ /Wp64 /FD /c +# ADD CPP /nologo /MDd /W3 /EHsc /Z7 /Od /I "." /I ".." /D "DIAGNOSTIC" /D "UNICODE" /D "_UNICODE" /D "WIN32" /D "_DEBUG" /D "_WINDOWS" @extra_cppflags@ /Wp64 /FD /Wp64 /c +# ADD BASE RSC /l 0xc09 +# ADD RSC /l 0xc09 +BSC32=bscmake.exe +# ADD BASE BSC32 /nologo +# ADD BSC32 /nologo +LIB32=link.exe -lib +# ADD BASE LIB32 /nologo /out:"@lib_debug_dest@_AMD64/libdb@lib_suffix@@DB_VERSION_MAJOR@@DB_VERSION_MINOR@sd.lib" +# ADD LIB32 /nologo /out:"@lib_debug_dest@_AMD64/libdb@lib_suffix@@DB_VERSION_MAJOR@@DB_VERSION_MINOR@sd.lib" + +!ELSEIF "$(CFG)" == "@project_name@ - Win64 Release AMD64" + +# PROP BASE Use_MFC 0 +# PROP BASE Use_Debug_Libraries 0 +# PROP BASE Output_Dir "@lib_rel_dest@_AMD64" +# PROP BASE Intermediate_Dir "Release_AMD64/@project_name@" +# PROP BASE Target_Dir "" +# PROP Use_MFC 0 +# PROP Use_Debug_Libraries 0 +# PROP Output_Dir "@lib_rel_dest@_AMD64" +# PROP Intermediate_Dir "Release_AMD64/@project_name@" +# PROP Target_Dir "" +# ADD BASE CPP /nologo /MD /W3 /EHsc /O2 /I "." /I ".." /D "UNICODE" /D "_UNICODE" /D "WIN32" /D "NDEBUG" /D "_WINDOWS" @extra_cppflags@ /Wp64 /FD /c +# ADD CPP /nologo /MD /W3 /EHsc /O2 /I "." /I ".." /D "UNICODE" /D "_UNICODE" /D "WIN32" /D "NDEBUG" /D "_WINDOWS" @extra_cppflags@ /Wp64 /FD /c +# ADD BASE RSC /l 0xc09 +# ADD RSC /l 0xc09 +BSC32=bscmake.exe +# ADD BASE BSC32 /nologo +# ADD BSC32 /nologo +LIB32=link.exe -lib +# ADD BASE LIB32 /nologo /out:"@lib_rel_dest@_AMD64/libdb@lib_suffix@@DB_VERSION_MAJOR@@DB_VERSION_MINOR@s.lib" +# ADD LIB32 /nologo /out:"@lib_rel_dest@_AMD64/libdb@lib_suffix@@DB_VERSION_MAJOR@@DB_VERSION_MINOR@s.lib" + +!ELSEIF "$(CFG)" == "@project_name@ - Win64 Debug IA64" + +# PROP BASE Use_MFC 0 +# PROP BASE Use_Debug_Libraries 1 +# PROP BASE Output_Dir "@lib_debug_dest@_IA64" +# PROP BASE Intermediate_Dir "Debug_IA64/@project_name@" +# PROP BASE Target_Dir "" +# PROP Use_MFC 0 +# PROP Use_Debug_Libraries 1 +# PROP Output_Dir "@lib_debug_dest@_IA64" +# PROP Intermediate_Dir "Debug_IA64/@project_name@" +# PROP Target_Dir "" +# ADD BASE CPP /nologo /MDd /W3 /EHsc /Z7 /Od /I "." /I ".." /D "DIAGNOSTIC" /D "UNICODE" /D "_UNICODE" /D "WIN32" /D "_DEBUG" /D "_WINDOWS" @extra_cppflags@ /Wp64 /FD /c +# ADD CPP /nologo /MDd /W3 /EHsc /Z7 /Od /I "." /I ".." /D "DIAGNOSTIC" /D "UNICODE" /D "_UNICODE" /D "WIN32" /D "_DEBUG" /D "_WINDOWS" @extra_cppflags@ /Wp64 /FD /c +# ADD BASE RSC /l 0xc09 +# ADD RSC /l 0xc09 +BSC32=bscmake.exe +# ADD BASE BSC32 /nologo +# ADD BSC32 /nologo +LIB32=link.exe -lib +# ADD BASE LIB32 /nologo /out:"@lib_debug_dest@_IA64/libdb@lib_suffix@@DB_VERSION_MAJOR@@DB_VERSION_MINOR@sd.lib" +# ADD LIB32 /nologo /out:"@lib_debug_dest@_IA64/libdb@lib_suffix@@DB_VERSION_MAJOR@@DB_VERSION_MINOR@sd.lib" + +!ELSEIF "$(CFG)" == "@project_name@ - Win64 Release IA64" + +# PROP BASE Use_MFC 0 +# PROP BASE Use_Debug_Libraries 0 +# PROP BASE Output_Dir "@lib_rel_dest@_IA64" +# PROP BASE Intermediate_Dir "Release_IA64/@project_name@" +# PROP BASE Target_Dir "" +# PROP Use_MFC 0 +# PROP Use_Debug_Libraries 0 +# PROP Output_Dir "@lib_rel_dest@_IA64" +# PROP Intermediate_Dir "Release_IA64/@project_name@" +# PROP Target_Dir "" +# ADD BASE CPP /nologo /MD /W3 /EHsc /O2 /I "." /I ".." /D "UNICODE" /D "_UNICODE" /D "WIN32" /D "NDEBUG" /D "_WINDOWS" @extra_cppflags@ /Wp64 /FD /c +# ADD CPP /nologo /MD /W3 /EHsc /O2 /I "." /I ".." /D "UNICODE" /D "_UNICODE" /D "WIN32" /D "NDEBUG" /D "_WINDOWS" @extra_cppflags@ /Wp64 /FD /c +# ADD BASE RSC /l 0xc09 +# ADD RSC /l 0xc09 +BSC32=bscmake.exe +# ADD BASE BSC32 /nologo +# ADD BSC32 /nologo +LIB32=link.exe -lib +# ADD BASE LIB32 /nologo /out:"@lib_rel_dest@_IA64/libdb@lib_suffix@@DB_VERSION_MAJOR@@DB_VERSION_MINOR@s.lib" +# ADD LIB32 /nologo /out:"@lib_rel_dest@_IA64/libdb@lib_suffix@@DB_VERSION_MAJOR@@DB_VERSION_MINOR@s.lib" !ENDIF # Begin Target -# Name "@project_name@ - Win32 Release Static" -# Name "@project_name@ - Win32 Debug Static" +# Name "@project_name@ - Win32 Release" +# Name "@project_name@ - Win32 Debug" +# Name "@project_name@ - Win32 ASCII Release" +# Name "@project_name@ - Win32 ASCII Debug" +# Name "@project_name@ - Win64 Debug AMD64" +# Name "@project_name@ - Win64 Release AMD64" +# Name "@project_name@ - Win64 Debug IA64" +# Name "@project_name@ - Win64 Release IA64" @SOURCE_FILES@ # End Target # End Project diff --git a/storage/bdb/clib/getcwd.c b/storage/bdb/clib/getcwd.c index ec28f1fb637..367950640c9 100644 --- a/storage/bdb/clib/getcwd.c +++ b/storage/bdb/clib/getcwd.c @@ -1,7 +1,7 @@ /*- * See the file LICENSE for redistribution information. * - * Copyright (c) 1996-2004 + * Copyright (c) 1996-2005 * Sleepycat Software. All rights reserved. */ /* @@ -32,7 +32,7 @@ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * - * $Id: getcwd.c,v 11.15 2004/01/28 03:35:52 bostic Exp $ + * $Id: getcwd.c,v 12.1 2005/06/16 20:20:48 bostic Exp $ */ #include "db_config.h" diff --git a/storage/bdb/clib/getopt.c b/storage/bdb/clib/getopt.c index 527ee69678d..54bfed16362 100644 --- a/storage/bdb/clib/getopt.c +++ b/storage/bdb/clib/getopt.c @@ -1,7 +1,7 @@ /*- * See the file LICENSE for redistribution information. * - * Copyright (c) 1996-2004 + * Copyright (c) 1996-2005 * Sleepycat Software. All rights reserved. */ /* @@ -32,7 +32,7 @@ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * - * $Id: getopt.c,v 11.9 2004/01/28 03:35:52 bostic Exp $ + * $Id: getopt.c,v 12.1 2005/06/16 20:20:48 bostic Exp $ */ #include "db_config.h" diff --git a/storage/bdb/clib/memcmp.c b/storage/bdb/clib/memcmp.c index 055a2f5fa9a..e7400c1c40b 100644 --- a/storage/bdb/clib/memcmp.c +++ b/storage/bdb/clib/memcmp.c @@ -1,7 +1,7 @@ /*- * See the file LICENSE for redistribution information. * - * Copyright (c) 1996-2004 + * Copyright (c) 1996-2005 * Sleepycat Software. All rights reserved. */ /* @@ -32,7 +32,7 @@ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * - * $Id: memcmp.c,v 11.9 2004/01/28 03:35:52 bostic Exp $ + * $Id: memcmp.c,v 12.1 2005/06/16 20:20:48 bostic Exp $ */ #include "db_config.h" diff --git a/storage/bdb/clib/memmove.c b/storage/bdb/clib/memmove.c index 60ece571d64..d2a505b1f35 100644 --- a/storage/bdb/clib/memmove.c +++ b/storage/bdb/clib/memmove.c @@ -1,7 +1,7 @@ /*- * See the file LICENSE for redistribution information. * - * Copyright (c) 1996-2004 + * Copyright (c) 1996-2005 * Sleepycat Software. All rights reserved. */ /* @@ -32,7 +32,7 @@ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * - * $Id: memmove.c,v 11.8 2004/01/28 03:35:52 bostic Exp $ + * $Id: memmove.c,v 12.1 2005/06/16 20:20:49 bostic Exp $ */ #include "db_config.h" diff --git a/storage/bdb/clib/raise.c b/storage/bdb/clib/raise.c index 2f9e8cb800e..043a2007162 100644 --- a/storage/bdb/clib/raise.c +++ b/storage/bdb/clib/raise.c @@ -1,10 +1,10 @@ /*- * See the file LICENSE for redistribution information. * - * Copyright (c) 1997-2004 + * Copyright (c) 1997-2005 * Sleepycat Software. All rights reserved. * - * $Id: raise.c,v 11.8 2004/01/28 03:35:52 bostic Exp $ + * $Id: raise.c,v 12.2 2005/06/16 20:20:50 bostic Exp $ */ #include "db_config.h" @@ -26,9 +26,5 @@ int raise(s) int s; { - /* - * Do not use __os_id(), as it may not return the process ID -- any - * system with kill(3) probably has getpid(3). - */ return (kill(getpid(), s)); } diff --git a/storage/bdb/clib/snprintf.c b/storage/bdb/clib/snprintf.c index e1bc5d11244..4fa4540b9d2 100644 --- a/storage/bdb/clib/snprintf.c +++ b/storage/bdb/clib/snprintf.c @@ -1,10 +1,10 @@ /*- * See the file LICENSE for redistribution information. * - * Copyright (c) 1996-2004 + * Copyright (c) 1996-2005 * Sleepycat Software. All rights reserved. * - * $Id: snprintf.c,v 11.18 2004/09/22 03:32:43 bostic Exp $ + * $Id: snprintf.c,v 12.1 2005/06/16 20:20:50 bostic Exp $ */ #include "db_config.h" diff --git a/storage/bdb/clib/strcasecmp.c b/storage/bdb/clib/strcasecmp.c index e8365c4519a..b83daa3ccab 100644 --- a/storage/bdb/clib/strcasecmp.c +++ b/storage/bdb/clib/strcasecmp.c @@ -30,7 +30,7 @@ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * - * $Id: strcasecmp.c,v 1.8 2004/01/28 03:35:52 bostic Exp $ + * $Id: strcasecmp.c,v 12.0 2004/11/17 03:43:15 bostic Exp $ */ #include "db_config.h" diff --git a/storage/bdb/clib/strdup.c b/storage/bdb/clib/strdup.c index 9c451d3287a..e679f5a6ccd 100644 --- a/storage/bdb/clib/strdup.c +++ b/storage/bdb/clib/strdup.c @@ -30,7 +30,7 @@ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * - * $Id: strdup.c,v 1.6 2004/01/28 03:35:52 bostic Exp $ + * $Id: strdup.c,v 12.0 2004/11/17 03:43:15 bostic Exp $ */ #include "db_config.h" diff --git a/storage/bdb/clib/strerror.c b/storage/bdb/clib/strerror.c index e0710add26c..db0d71ccc5f 100644 --- a/storage/bdb/clib/strerror.c +++ b/storage/bdb/clib/strerror.c @@ -1,7 +1,7 @@ /*- * See the file LICENSE for redistribution information. * - * Copyright (c) 1997-2004 + * Copyright (c) 1997-2005 * Sleepycat Software. All rights reserved. */ /* @@ -32,7 +32,7 @@ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * - * $Id: strerror.c,v 11.8 2004/01/28 03:35:52 bostic Exp $ + * $Id: strerror.c,v 12.1 2005/06/16 20:20:51 bostic Exp $ */ #include "db_config.h" diff --git a/storage/bdb/clib/strtol.c b/storage/bdb/clib/strtol.c index 09e952dc5f8..88b17bd3e9f 100644 --- a/storage/bdb/clib/strtol.c +++ b/storage/bdb/clib/strtol.c @@ -30,7 +30,7 @@ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * - * $Id: strtol.c,v 1.3 2004/10/28 19:27:19 bostic Exp $ + * $Id: strtol.c,v 12.0 2004/11/17 03:43:15 bostic Exp $ */ #include "db_config.h" diff --git a/storage/bdb/clib/strtoul.c b/storage/bdb/clib/strtoul.c index e4356963ba0..14eacb89f3f 100644 --- a/storage/bdb/clib/strtoul.c +++ b/storage/bdb/clib/strtoul.c @@ -30,7 +30,7 @@ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * - * $Id: strtoul.c,v 1.3 2004/10/28 19:27:19 bostic Exp $ + * $Id: strtoul.c,v 12.0 2004/11/17 03:43:15 bostic Exp $ */ #include "db_config.h" diff --git a/storage/bdb/common/crypto_stub.c b/storage/bdb/common/crypto_stub.c index 68f06b4c8a6..e335b61f99a 100644 --- a/storage/bdb/common/crypto_stub.c +++ b/storage/bdb/common/crypto_stub.c @@ -1,10 +1,10 @@ /*- * See the file LICENSE for redistribution information. * - * Copyright (c) 1996-2004 + * Copyright (c) 1996-2005 * Sleepycat Software. All rights reserved. * - * $Id: crypto_stub.c,v 1.4 2004/01/28 03:35:52 bostic Exp $ + * $Id: crypto_stub.c,v 12.2 2005/07/20 16:50:55 bostic Exp $ */ #include "db_config.h" @@ -32,9 +32,9 @@ __crypto_region_init(dbenv) infop = dbenv->reginfo; renv = infop->primary; - MUTEX_LOCK(dbenv, &renv->mutex); + MUTEX_LOCK(dbenv, renv->mtx_regenv); ret = !(renv->cipher_off == INVALID_ROFF); - MUTEX_UNLOCK(dbenv, &renv->mutex); + MUTEX_UNLOCK(dbenv, renv->mtx_regenv); if (ret == 0) return (0); diff --git a/storage/bdb/common/db_byteorder.c b/storage/bdb/common/db_byteorder.c index 0a48055c8e0..60b1d293e41 100644 --- a/storage/bdb/common/db_byteorder.c +++ b/storage/bdb/common/db_byteorder.c @@ -1,10 +1,10 @@ /*- * See the file LICENSE for redistribution information. * - * Copyright (c) 1996-2004 + * Copyright (c) 1996-2005 * Sleepycat Software. All rights reserved. * - * $Id: db_byteorder.c,v 11.10 2004/01/28 03:35:52 bostic Exp $ + * $Id: db_byteorder.c,v 12.1 2005/06/16 20:20:52 bostic Exp $ */ #include "db_config.h" diff --git a/storage/bdb/common/db_clock.c b/storage/bdb/common/db_clock.c new file mode 100644 index 00000000000..d53b1961ada --- /dev/null +++ b/storage/bdb/common/db_clock.c @@ -0,0 +1,30 @@ +/*- + * See the file LICENSE for redistribution information. + * + * Copyright (c) 2005 + * Sleepycat Software. All rights reserved. + * + * $Id: db_clock.c,v 1.2 2005/08/08 14:39:52 bostic Exp $ + */ + +#include "db_config.h" +#include "db_int.h" + +/* + * __db_difftime -- + * + * Compute the difference in seconds and microseconds of two timers. + * + * PUBLIC: void __db_difftime __P((u_int32_t, u_int32_t, u_int32_t, u_int32_t, + * PUBLIC: u_int32_t *, u_int32_t *)); + */ +void +__db_difftime(ssec, esec, susec, eusec, secp, usecp) + u_int32_t ssec, esec, susec, eusec, *secp, *usecp; +{ + if ((*secp = esec - ssec) != 0 && eusec < susec) { + (*secp)--; + eusec += 1000000; + } + *usecp = eusec - susec; +} diff --git a/storage/bdb/common/db_err.c b/storage/bdb/common/db_err.c index 6e49e79411c..fd9fa89a46a 100644 --- a/storage/bdb/common/db_err.c +++ b/storage/bdb/common/db_err.c @@ -1,10 +1,10 @@ /*- * See the file LICENSE for redistribution information. * - * Copyright (c) 1996-2004 + * Copyright (c) 1996-2005 * Sleepycat Software. All rights reserved. * - * $Id: db_err.c,v 11.123 2004/09/22 03:07:50 bostic Exp $ + * $Id: db_err.c,v 12.19 2005/10/19 19:06:29 sue Exp $ */ #include "db_config.h" @@ -22,6 +22,7 @@ #include "dbinc/db_shash.h" #include "dbinc/lock.h" #include "dbinc/log.h" +#include "dbinc/mp.h" #include "dbinc/txn.h" static void __db_msgcall __P((const DB_ENV *, const char *, va_list)); @@ -88,7 +89,7 @@ __db_fnl(dbenv, name) const char *name; { __db_err(dbenv, - "%s: the DB_DIRTY_READ, DB_DEGREE_2 and DB_RMW flags require locking", + "%s: DB_READ_COMMITTED, DB_READ_UNCOMMITTED and DB_RMW require locking", name); return (EINVAL); } @@ -186,7 +187,7 @@ __db_panic(dbenv, errval) int errval; { if (dbenv != NULL) { - PANIC_SET(dbenv, 1); + __db_panic_set(dbenv, 1); __db_err(dbenv, "PANIC: %s", db_strerror(errval)); @@ -213,6 +214,22 @@ __db_panic(dbenv, errval) return (DB_RUNRECOVERY); } +/* + * __db_panic_set -- + * Set/clear unrecoverable error. + * + * PUBLIC: void __db_panic_set __P((DB_ENV *, int)); + */ +void +__db_panic_set(dbenv, on) + DB_ENV *dbenv; + int on; +{ + if (dbenv != NULL && dbenv->reginfo != NULL) + ((REGENV *) + ((REGINFO *)dbenv->reginfo)->primary)->panic = on ? 1 : 0; +} + /* * db_strerror -- * ANSI C strerror(3) for DB. @@ -275,8 +292,16 @@ db_strerror(error) return ("DB_REP_HANDLE_DEAD: Handle is no longer valid"); case DB_REP_HOLDELECTION: return ("DB_REP_HOLDELECTION: Need to hold an election"); + case DB_REP_IGNORE: + return ("DB_REP_IGNORE: Replication record ignored"); case DB_REP_ISPERM: return ("DB_REP_ISPERM: Permanent record written"); + case DB_REP_JOIN_FAILURE: + return + ("DB_REP_JOIN_FAILURE: Unable to join replication group"); + case DB_REP_LOCKOUT: + return + ("DB_REP_LOCKOUT: Waiting for replication recovery to complete"); case DB_REP_NEWMASTER: return ("DB_REP_NEWMASTER: A new master has declared itself"); case DB_REP_NEWSITE: @@ -502,59 +527,6 @@ __db_msgfile(dbenv, fmt, ap) (void)fflush(fp); } -/* - * __db_logmsg -- - * Write information into the DB log. - * - * PUBLIC: void __db_logmsg __P((const DB_ENV *, - * PUBLIC: DB_TXN *, const char *, u_int32_t, const char *, ...)) - * PUBLIC: __attribute__ ((__format__ (__printf__, 5, 6))); - */ -void -#ifdef STDC_HEADERS -__db_logmsg(const DB_ENV *dbenv, - DB_TXN *txnid, const char *opname, u_int32_t flags, const char *fmt, ...) -#else -__db_logmsg(dbenv, txnid, opname, flags, fmt, va_alist) - const DB_ENV *dbenv; - DB_TXN *txnid; - const char *opname, *fmt; - u_int32_t flags; - va_dcl -#endif -{ - DBT opdbt, msgdbt; - DB_LSN lsn; - va_list ap; - char __logbuf[2048]; /* !!!: END OF THE STACK DON'T TRUST SPRINTF. */ - - if (!LOGGING_ON(dbenv)) - return; - -#ifdef STDC_HEADERS - va_start(ap, fmt); -#else - va_start(ap); -#endif - memset(&opdbt, 0, sizeof(opdbt)); - opdbt.data = (void *)opname; - opdbt.size = (u_int32_t)(strlen(opname) + 1); - - memset(&msgdbt, 0, sizeof(msgdbt)); - msgdbt.data = __logbuf; - msgdbt.size = (u_int32_t)vsnprintf(__logbuf, sizeof(__logbuf), fmt, ap); - - va_end(ap); - - /* - * XXX - * Explicitly discard the const. Otherwise, we have to const DB_ENV - * references throughout the logging subsystem. - */ - (void)__db_debug_log( - (DB_ENV *)dbenv, txnid, &lsn, flags, &opdbt, -1, &msgdbt, NULL, 0); -} - /* * __db_unknown_flag -- report internal error * @@ -620,38 +592,14 @@ __db_check_txn(dbp, txn, assoc_lid, read_op) /* * Check for common transaction errors: - * Failure to pass a transaction handle to a DB operation - * Failure to configure the DB handle in a proper environment - * Operation on a handle whose open commit hasn't completed. - * - * Read operations don't require a txn even if we've used one before - * with this handle, although if they do have a txn, we'd better be - * prepared for it. + * an operation on a handle whose open commit hasn't completed. + * a transaction handle in a non-transactional environment + * a transaction handle for a non-transactional database */ if (txn == NULL) { - if (!read_op && F_ISSET(dbp, DB_AM_TXN)) { - __db_err(dbenv, - "DB handle previously used in transaction, missing transaction handle"); - return (EINVAL); - } - if (dbp->cur_lid >= TXN_MINIMUM) goto open_err; } else { - if (F_ISSET(txn, TXN_DEADLOCK)) { - __db_err(dbenv, - "Previous deadlock return not resolved"); - return (EINVAL); - } - if (dbp->cur_lid >= TXN_MINIMUM && - dbp->cur_lid != txn->txnid) { - if ((ret = __lock_locker_is_parent(dbenv, - dbp->cur_lid, txn->txnid, &isp)) != 0) - return (ret); - if (!isp) - goto open_err; - } - if (!TXN_ON(dbenv)) return (__db_not_txn_env(dbenv)); @@ -660,6 +608,19 @@ __db_check_txn(dbp, txn, assoc_lid, read_op) "Transaction specified for a DB handle opened outside a transaction"); return (EINVAL); } + + if (F_ISSET(txn, TXN_DEADLOCK)) { + __db_err(dbenv, + "Previous deadlock return not resolved"); + return (EINVAL); + } + if (dbp->cur_lid >= TXN_MINIMUM && dbp->cur_lid != txn->txnid) { + if ((ret = __lock_locker_is_parent(dbenv, + dbp->cur_lid, txn->txnid, &isp)) != 0) + return (ret); + if (!isp) + goto open_err; + } } /* @@ -684,6 +645,15 @@ __db_check_txn(dbp, txn, assoc_lid, read_op) return (EINVAL); } + /* + * Check the txn and dbp are from the same env. + */ + if (txn != NULL && dbenv != txn->mgrp->dbenv) { + __db_err(dbenv, + "Transaction and database from different environments"); + return (EINVAL); + } + return (0); open_err: __db_err(dbenv, @@ -738,6 +708,69 @@ __db_rec_repl(dbenv, data_size, data_dlen) return (EINVAL); } +#if defined(DIAGNOSTIC) || defined(DEBUG_ROP) || defined(DEBUG_WOP) +/* + * __dbc_logging -- + * In DIAGNOSTIC mode, check for bad replication combinations. + * + * PUBLIC: int __dbc_logging __P((DBC *)); + */ +int +__dbc_logging(dbc) + DBC *dbc; +{ + DB_ENV *dbenv; + DB_REP *db_rep; + int ret; + + dbenv = dbc->dbp->dbenv; + db_rep = dbenv->rep_handle; + + ret = LOGGING_ON(dbenv) && + !F_ISSET(dbc, DBC_RECOVER) && !IS_REP_CLIENT(dbenv); + + /* + * If we're not using replication or running recovery, return. + */ + if (db_rep == NULL || F_ISSET(dbc, DBC_RECOVER)) + return (ret); + +#ifndef DEBUG_ROP + /* + * Only check when DEBUG_ROP is not configured. People often do + * non-transactional reads, and debug_rop is going to write + * a log record. + */ + { + REP *rep; + + rep = db_rep->region; + + /* + * If we're a client and not running recovery or internally, error. + */ + if (IS_REP_CLIENT(dbenv) && !F_ISSET(dbc->dbp, DB_AM_CL_WRITER)) { + __db_err(dbenv, "Dbc_logging: Client update"); + goto err; + } + if (IS_REP_MASTER(dbenv) && dbc->txn == NULL) { + __db_err(dbenv, "Dbc_logging: Master non-txn update"); + goto err; + } + if (0) { +err: __db_err(dbenv, "Rep: flags 0x%lx msg_th %lu, start_th %d", + (u_long)rep->flags, (u_long)rep->msg_th, rep->start_th); + __db_err(dbenv, "Rep: handle %lu, opcnt %lu, in_rec %d", + (u_long)rep->handle_cnt, (u_long)rep->op_cnt, + rep->in_recovery); + abort(); + } + } +#endif + return (ret); +} +#endif + /* * __db_check_lsn -- * Display the log sequence error message. @@ -755,3 +788,53 @@ __db_check_lsn(dbenv, lsn, prev) (u_long)(prev)->file, (u_long)(prev)->offset); return (EINVAL); } + +/* + * __db_rdonly -- + * Common readonly message. + * PUBLIC: int __db_rdonly __P((const DB_ENV *, const char *)); + */ +int +__db_rdonly(dbenv, name) + const DB_ENV *dbenv; + const char *name; +{ + __db_err(dbenv, "%s: attempt to modify a read-only database", name); + return (EACCES); +} + +/* + * __db_space_err -- + * Common out of space message. + * PUBLIC: int __db_space_err __P((const DB *)); + */ +int +__db_space_err(dbp) + const DB *dbp; +{ + __db_err(dbp->dbenv, + "%s: file limited to %lu pages", + dbp->fname, (u_long)dbp->mpf->mfp->maxpgno); + return (ENOSPC); +} + +/* + * __db_failed -- + * Common failed thread message. + * + * PUBLIC: int __db_failed __P((const DB_ENV *, + * PUBLIC: const char *, pid_t, db_threadid_t)); + */ +int +__db_failed(dbenv, msg, pid, tid) + const DB_ENV *dbenv; + const char *msg; + pid_t pid; + db_threadid_t tid; +{ + char buf[DB_THREADID_STRLEN]; + + __db_err(dbenv, "Thread/process %s failed: %s", + dbenv->thread_id_string((DB_ENV*)dbenv, pid, tid, buf), msg); + return (DB_RUNRECOVERY); +} diff --git a/storage/bdb/common/db_getlong.c b/storage/bdb/common/db_getlong.c index be70f0d4a60..3d0183c602c 100644 --- a/storage/bdb/common/db_getlong.c +++ b/storage/bdb/common/db_getlong.c @@ -1,10 +1,10 @@ /*- * See the file LICENSE for redistribution information. * - * Copyright (c) 1996-2004 + * Copyright (c) 1996-2005 * Sleepycat Software. All rights reserved. * - * $Id: db_getlong.c,v 11.22 2004/10/28 14:43:26 bostic Exp $ + * $Id: db_getlong.c,v 12.1 2005/06/16 20:20:53 bostic Exp $ */ #include "db_config.h" diff --git a/storage/bdb/common/db_idspace.c b/storage/bdb/common/db_idspace.c index 49f2e91377b..3932a49ea2e 100644 --- a/storage/bdb/common/db_idspace.c +++ b/storage/bdb/common/db_idspace.c @@ -1,10 +1,10 @@ /*- * See the file LICENSE for redistribution information. * - * Copyright (c) 2001-2004 + * Copyright (c) 2001-2005 * Sleepycat Software. All rights reserved. * - * $Id: db_idspace.c,v 1.9 2004/01/28 03:35:52 bostic Exp $ + * $Id: db_idspace.c,v 12.1 2005/06/16 20:20:53 bostic Exp $ */ #include "db_config.h" diff --git a/storage/bdb/common/db_log2.c b/storage/bdb/common/db_log2.c index fcc1a603579..455340640e9 100644 --- a/storage/bdb/common/db_log2.c +++ b/storage/bdb/common/db_log2.c @@ -1,7 +1,7 @@ /*- * See the file LICENSE for redistribution information. * - * Copyright (c) 1996-2004 + * Copyright (c) 1996-2005 * Sleepycat Software. All rights reserved. */ /* @@ -35,7 +35,7 @@ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * - * $Id: db_log2.c,v 11.9 2004/01/28 03:35:52 bostic Exp $ + * $Id: db_log2.c,v 12.1 2005/06/16 20:20:53 bostic Exp $ */ #include "db_config.h" diff --git a/storage/bdb/common/util_arg.c b/storage/bdb/common/util_arg.c index 16a17ee28d6..017fda6c312 100644 --- a/storage/bdb/common/util_arg.c +++ b/storage/bdb/common/util_arg.c @@ -1,10 +1,10 @@ /*- * See the file LICENSE for redistribution information. * - * Copyright (c) 2001-2004 + * Copyright (c) 2001-2005 * Sleepycat Software. All rights reserved. * - * $Id: util_arg.c,v 1.6 2004/01/28 03:35:52 bostic Exp $ + * $Id: util_arg.c,v 12.1 2005/06/16 20:20:53 bostic Exp $ */ #include "db_config.h" diff --git a/storage/bdb/common/util_cache.c b/storage/bdb/common/util_cache.c index 006c34557c9..34ff5ff008f 100644 --- a/storage/bdb/common/util_cache.c +++ b/storage/bdb/common/util_cache.c @@ -1,10 +1,10 @@ /*- * See the file LICENSE for redistribution information. * - * Copyright (c) 2000-2004 + * Copyright (c) 2000-2005 * Sleepycat Software. All rights reserved. * - * $Id: util_cache.c,v 1.8 2004/02/17 16:03:05 bostic Exp $ + * $Id: util_cache.c,v 12.1 2005/06/16 20:20:54 bostic Exp $ */ #include "db_config.h" diff --git a/storage/bdb/common/util_log.c b/storage/bdb/common/util_log.c index 98fd1951c06..5c46d6b2d4c 100644 --- a/storage/bdb/common/util_log.c +++ b/storage/bdb/common/util_log.c @@ -1,10 +1,10 @@ /*- * See the file LICENSE for redistribution information. * - * Copyright (c) 2000-2004 + * Copyright (c) 2000-2005 * Sleepycat Software. All rights reserved. * - * $Id: util_log.c,v 1.14 2004/01/28 03:35:52 bostic Exp $ + * $Id: util_log.c,v 12.4 2005/10/12 17:47:17 bostic Exp $ */ #include "db_config.h" @@ -40,16 +40,18 @@ __db_util_logset(progname, fname) const char *progname; char *fname; { + pid_t pid; + db_threadid_t tid; FILE *fp; time_t now; - u_int32_t id; if ((fp = fopen(fname, "w")) == NULL) goto err; (void)time(&now); - __os_id(&id); - fprintf(fp, "%s: %lu %s", progname, (u_long)id, ctime(&now)); + + __os_id(NULL, &pid, &tid); + fprintf(fp, "%s: %lu %s", progname, (u_long)pid, ctime(&now)); if (fclose(fp) == EOF) goto err; diff --git a/storage/bdb/common/util_sig.c b/storage/bdb/common/util_sig.c index 53087360ee5..3561173166f 100644 --- a/storage/bdb/common/util_sig.c +++ b/storage/bdb/common/util_sig.c @@ -1,10 +1,10 @@ /*- * See the file LICENSE for redistribution information. * - * Copyright (c) 2000-2004 + * Copyright (c) 2000-2005 * Sleepycat Software. All rights reserved. * - * $Id: util_sig.c,v 1.9 2004/01/28 03:35:54 bostic Exp $ + * $Id: util_sig.c,v 12.1 2005/06/16 20:20:55 bostic Exp $ */ #include "db_config.h" diff --git a/storage/bdb/crypto/aes_method.c b/storage/bdb/crypto/aes_method.c index 567e6745667..f77616f3c35 100644 --- a/storage/bdb/crypto/aes_method.c +++ b/storage/bdb/crypto/aes_method.c @@ -1,13 +1,13 @@ /*- * See the file LICENSE for redistribution information. * - * Copyright (c) 2001-2004 + * Copyright (c) 2001-2005 * Sleepycat Software. All rights reserved. * * Some parts of this code originally written by Adam Stubblefield, * -- astubble@rice.edu. * - * $Id: aes_method.c,v 1.20 2004/09/17 22:00:25 mjc Exp $ + * $Id: aes_method.c,v 12.1 2005/06/16 20:20:55 bostic Exp $ */ #include "db_config.h" diff --git a/storage/bdb/crypto/crypto.c b/storage/bdb/crypto/crypto.c index f753ec3f0fc..63dea986fe6 100644 --- a/storage/bdb/crypto/crypto.c +++ b/storage/bdb/crypto/crypto.c @@ -1,13 +1,13 @@ /*- * See the file LICENSE for redistribution information. * - * Copyright (c) 1996-2004 + * Copyright (c) 1996-2005 * Sleepycat Software. All rights reserved. * * Some parts of this code originally written by Adam Stubblefield * -- astubble@rice.edu * - * $Id: crypto.c,v 1.31 2004/10/15 16:59:38 bostic Exp $ + * $Id: crypto.c,v 12.5 2005/07/20 16:50:56 bostic Exp $ */ #include "db_config.h" @@ -40,35 +40,31 @@ __crypto_region_init(dbenv) ret = 0; infop = dbenv->reginfo; renv = infop->primary; - MUTEX_LOCK(dbenv, &renv->mutex); if (renv->cipher_off == INVALID_ROFF) { if (!CRYPTO_ON(dbenv)) - goto err; + return (0); if (!F_ISSET(infop, REGION_CREATE)) { __db_err(dbenv, "Joining non-encrypted environment with encryption key"); - ret = EINVAL; - goto err; + return (EINVAL); } if (F_ISSET(db_cipher, CIPHER_ANY)) { __db_err(dbenv, "Encryption algorithm not supplied"); - ret = EINVAL; - goto err; + return (EINVAL); } /* - * Must create the shared information. We need: - * Shared cipher information that contains the passwd. - * After we copy the passwd, we smash and free the one in the - * dbenv. + * Must create the shared information. We need: Shared cipher + * information that contains the passwd. After we copy the + * passwd, we smash and free the one in the dbenv. */ - if ((ret = __db_shalloc( - infop, sizeof(CIPHER), MUTEX_ALIGN, &cipher)) != 0) - goto err; + if ((ret = + __db_shalloc(infop, sizeof(CIPHER), 0, &cipher)) != 0) + return (ret); memset(cipher, 0, sizeof(*cipher)); if ((ret = __db_shalloc( infop, dbenv->passwd_len, 0, &sh_passwd)) != 0) { __db_shalloc_free(infop, cipher); - goto err; + return (ret); } memset(sh_passwd, 0, dbenv->passwd_len); cipher->passwd = R_OFFSET(infop, sh_passwd); @@ -79,51 +75,44 @@ __crypto_region_init(dbenv) } else { if (!CRYPTO_ON(dbenv)) { __db_err(dbenv, - "Encrypted environment: no encryption key supplied"); - ret = EINVAL; - goto err; + "Encrypted environment: no encryption key supplied"); + return (EINVAL); } cipher = R_ADDR(infop, renv->cipher_off); sh_passwd = R_ADDR(infop, cipher->passwd); if ((cipher->passwd_len != dbenv->passwd_len) || memcmp(dbenv->passwd, sh_passwd, cipher->passwd_len) != 0) { __db_err(dbenv, "Invalid password"); - ret = EPERM; - goto err; + return (EPERM); } if (!F_ISSET(db_cipher, CIPHER_ANY) && db_cipher->alg != cipher->flags) { __db_err(dbenv, "Environment encrypted using a different algorithm"); - ret = EINVAL; - goto err; + return (EINVAL); } if (F_ISSET(db_cipher, CIPHER_ANY)) /* - * We have CIPHER_ANY and we are joining the - * existing env. Setup our cipher structure - * for whatever algorithm this env has. + * We have CIPHER_ANY and we are joining the existing + * env. Setup our cipher structure for whatever + * algorithm this env has. */ if ((ret = __crypto_algsetup(dbenv, db_cipher, cipher->flags, 0)) != 0) - goto err; + return (ret); } - MUTEX_UNLOCK(dbenv, &renv->mutex); ret = db_cipher->init(dbenv, db_cipher); /* - * On success, no matter if we allocated it or are using the - * already existing one, we are done with the passwd in the dbenv. - * We smash N-1 bytes so that we don't overwrite the nul. + * On success, no matter if we allocated it or are using the already + * existing one, we are done with the passwd in the dbenv. We smash + * N-1 bytes so that we don't overwrite the nul. */ memset(dbenv->passwd, 0xff, dbenv->passwd_len-1); __os_free(dbenv, dbenv->passwd); dbenv->passwd = NULL; dbenv->passwd_len = 0; - if (0) { -err: MUTEX_UNLOCK(dbenv, &renv->mutex); - } return (ret); } @@ -236,33 +225,54 @@ __crypto_decrypt_meta(dbenv, dbp, mbuf, do_metachk) u_int8_t *iv; /* - * If we weren't given a dbp, we just want to decrypt the page - * on behalf of some internal subsystem, not on behalf of a user - * with a dbp. Therefore, set up a dummy dbp so that the call - * to P_OVERHEAD below works. + * If we weren't given a dbp, we just want to decrypt the page on + * behalf of some internal subsystem, not on behalf of a user with + * a dbp. Therefore, set up a dummy dbp so that the call to + * P_OVERHEAD below works. */ if (dbp == NULL) { memset(&dummydb, 0, sizeof(DB)); dbp = &dummydb; } - /* - * Meta-pages may be encrypted for DBMETASIZE bytes. If - * we have a non-zero IV (that is written after encryption) - * then we decrypt (or error if the user isn't set up for - * security). We guarantee that the IV space on non-encrypted - * pages will be zero and a zero-IV is illegal for encryption. - * Therefore any non-zero IV means an encrypted database. - * This basically checks the passwd on the file - * if we cannot find a good magic number. - * We walk through all the algorithms we know about attempting - * to decrypt (and possibly byteswap). - * - * !!! - * All method meta pages have the IV and checksum at the - * exact same location, but not in DBMETA, use BTMETA. - */ + ret = 0; meta = (DBMETA *)mbuf; + + /* + * !!! + * We used an "unused" field in the meta-data page to flag whether or + * not the database is encrypted. Unfortunately, that unused field + * was used in Berkeley DB releases before 3.0 (for example, 2.7.7). + * It would have been OK, except encryption doesn't follow the usual + * rules of "upgrade before doing anything else", we check encryption + * before checking for old versions of the database. + * + * We don't have to check Btree databases -- before 3.0, the field of + * interest was the bt_maxkey field (which was never supported and has + * since been removed). + * + * Ugly check to jump out if this format is older than what we support. + * It assumes no encrypted page will have an unencrypted magic number, + * but that seems relatively safe. [#10920] + */ + if (meta->magic == DB_HASHMAGIC && meta->version <= 5) + return (0); + + /* + * Meta-pages may be encrypted for DBMETASIZE bytes. If we have a + * non-zero IV (that is written after encryption) then we decrypt (or + * error if the user isn't set up for security). We guarantee that + * the IV space on non-encrypted pages will be zero and a zero-IV is + * illegal for encryption. Therefore any non-zero IV means an + * encrypted database. This basically checks the passwd on the file + * if we cannot find a good magic number. We walk through all the + * algorithms we know about attempting to decrypt (and possibly + * byteswap). + * + * !!! + * All method meta pages have the IV and checksum at the exact same + * location, but not in DBMETA, use BTMETA. + */ if (meta->encrypt_alg != 0) { db_cipher = (DB_CIPHER *)dbenv->crypto_handle; if (!F_ISSET(dbp, DB_AM_ENCRYPT)) { @@ -272,11 +282,10 @@ __crypto_decrypt_meta(dbenv, dbp, mbuf, do_metachk) return (EINVAL); } /* - * User has a correct, secure env, but has - * encountered a database in that env that is - * secure, but user didn't dbp->set_flags. Since - * it is existing, use encryption if it is that - * way already. + * User has a correct, secure env, but has encountered + * a database in that env that is secure, but user + * didn't dbp->set_flags. Since it is existing, use + * encryption if it is that way already. */ F_SET(dbp, DB_AM_ENCRYPT|DB_AM_CHKSUM); } @@ -294,9 +303,9 @@ __crypto_decrypt_meta(dbenv, dbp, mbuf, do_metachk) DB_ASSERT(F_ISSET(dbp, DB_AM_CHKSUM)); iv = ((BTMETA *)mbuf)->iv; /* - * For ALL pages, we do not encrypt the beginning - * of the page that contains overhead information. - * This is true of meta and all other pages. + * For ALL pages, we do not encrypt the beginning of the page + * that contains overhead information. This is true of meta + * and all other pages. */ pg_off = P_OVERHEAD(dbp); alg_retry: @@ -330,10 +339,10 @@ alg_retry: goto alg_retry; } else if (F_ISSET(dbp, DB_AM_ENCRYPT)) { /* - * They gave us a passwd, but the database is not - * encrypted. This is an error. We do NOT want to - * silently allow them to write data in the clear when - * the user set up and expects encrypted data. + * They gave us a passwd, but the database is not encrypted. + * This is an error. We do NOT want to silently allow them + * to write data in the clear when the user set up and expects + * encrypted data. * * This covers at least the following scenario. * 1. User creates and sets up an encrypted database. @@ -381,5 +390,5 @@ __crypto_set_passwd(dbenv_src, dbenv_dest) cipher = R_ADDR(infop, renv->cipher_off); sh_passwd = R_ADDR(infop, cipher->passwd); - return (__dbenv_set_encrypt(dbenv_dest, sh_passwd, DB_ENCRYPT_AES)); + return (__env_set_encrypt(dbenv_dest, sh_passwd, DB_ENCRYPT_AES)); } diff --git a/storage/bdb/crypto/crypto.html b/storage/bdb/crypto/crypto.html index 9475beb2a2d..7d2804b43b4 100644 --- a/storage/bdb/crypto/crypto.html +++ b/storage/bdb/crypto/crypto.html @@ -108,7 +108,7 @@ The setup of the security subsystem will be similar to replication initializatio since it is a sort of subsystem, but it does not have its own region.  When the environment handle is created via db_env_create, we initialize our set_encrypt method to be the RPC or local version.  Therefore -the __dbenv structure needs a new pointer: +the DB_ENV structure needs a new pointer:
    void    *crypto_handle;   /* Security handle */
The crypto handle will really point to a new __db_cipher structure that will contain a set of functions and a pointer to the in-memory information @@ -134,21 +134,21 @@ this is set up, it is read-only forever.

During dbenv->set_encrypt, we set the encryption, decryption and checksumming methods to the appropriate functions based on the flags.  This function will allocate us a crypto -handle that we store in the __dbenv structure just like all the +handle that we store in the DB_ENV structure just like all the other subsystems.  For now, only AES ciphering functions and SHA1 checksumming functions are supported.  Also we will copy the password -into the __dbenv structure.  We ultimately need to keep the +into the DB_ENV structure.  We ultimately need to keep the password in the environment's shared memory region or compare this one against the one that is there, if we are joining an existing environment, but we do not have it yet because open has not yet been called.  We will allocate a structure that will be used in initialization and set up the function pointers to point to the algorithm-specific functions. -

In the  __dbenv_open path, in __db_e_attach, if we +

In the  __env_open path, in __db_e_attach, if we are creating the region and the dbenv->passwd field is set, we need to use the length of the password in the initial computation of the environment's size.  This guarantees sufficient space for storing the password in shared memory.  Then we will call a new function to initialize the -security region, __crypto_region_init in __dbenv_open.  +security region, __crypto_region_init in __env_open.  If we are the creator, we will allocate space in the shared region to store the password and copy the password into that space.  Or, if we are not the creator we will compare the password stored in the dbenv with the @@ -304,7 +304,7 @@ We will have per-process state vectors that are set up when a process begins.&nb That way we minimize the contention and only multi-threaded processes need acquire locks for the IV.  We will have the state vector in the environment handle in heap memory, as well as the index and there will be a mutex protecting -it for threaded access.  This will be added to the __dbenv +it for threaded access.  This will be added to the DB_ENV structure:

    DB_MUTEX    *mt_mutexp;   /* Mersenne Twister mutex */
     int         *mti;         /* MT index */
diff --git a/storage/bdb/crypto/mersenne/mt19937db.c b/storage/bdb/crypto/mersenne/mt19937db.c
index 1c1699db5c3..1dad5f6ad12 100644
--- a/storage/bdb/crypto/mersenne/mt19937db.c
+++ b/storage/bdb/crypto/mersenne/mt19937db.c
@@ -1,5 +1,5 @@
 /*
- * $Id: mt19937db.c,v 1.12 2004/06/14 16:54:27 mjc Exp $
+ * $Id: mt19937db.c,v 12.1 2005/07/20 16:50:57 bostic Exp $
  */
 #include "db_config.h"
 
@@ -69,7 +69,7 @@ __db_generate_iv(dbenv, iv)
 
 	ret = 0;
 	n = DB_IV_BYTES / sizeof(u_int32_t);
-	MUTEX_THREAD_LOCK(dbenv, dbenv->mt_mutexp);
+	MUTEX_LOCK(dbenv, dbenv->mtx_mt);
 	if (dbenv->mt == NULL) {
 		if ((ret = __os_calloc(dbenv, 1, N*sizeof(unsigned long),
 		    &dbenv->mt)) != 0)
@@ -77,17 +77,16 @@ __db_generate_iv(dbenv, iv)
 		/* mti==N+1 means mt[N] is not initialized */
 		dbenv->mti = N + 1;
 	}
-	for (i = 0; i < n; i++)
-{
+	for (i = 0; i < n; i++) {
 		/*
 		 * We do not allow 0.  If we get one just try again.
 		 */
 		do {
 			iv[i] = (u_int32_t)__db_genrand(dbenv);
 		} while (iv[i] == 0);
-}
+	}
 
-	MUTEX_THREAD_UNLOCK(dbenv, dbenv->mt_mutexp);
+	MUTEX_UNLOCK(dbenv, dbenv->mtx_mt);
 	return (0);
 }
 
@@ -137,7 +136,7 @@ __db_lsgenrand(seed_array, mt, mtip)
 
 static unsigned long 
 __db_genrand(dbenv)
-	DB_ENV *dbenv;
+    DB_ENV *dbenv;
 {
     unsigned long y;
     static unsigned long mag01[2]={0x0, MATRIX_A};
@@ -145,7 +144,7 @@ __db_genrand(dbenv)
     u_int32_t secs, seed, usecs;
 
     /*
-     * We are called with the mt_mutexp locked
+     * We are called with DB_ENV->mtx_mt locked.
      */
     if (dbenv->mti >= N) { /* generate N words at one time */
         int kk;
diff --git a/storage/bdb/crypto/rijndael/rijndael-alg-fst.h b/storage/bdb/crypto/rijndael/rijndael-alg-fst.h
index fe9ce381471..60c01212764 100644
--- a/storage/bdb/crypto/rijndael/rijndael-alg-fst.h
+++ b/storage/bdb/crypto/rijndael/rijndael-alg-fst.h
@@ -1,5 +1,5 @@
 /*
- * $Id: rijndael-alg-fst.h,v 1.2 2002/01/08 18:53:37 sue Exp $
+ * $Id: rijndael-alg-fst.h,v 12.0 2004/11/17 03:43:17 bostic Exp $
  */
 /**
  * rijndael-alg-fst.h
diff --git a/storage/bdb/crypto/rijndael/rijndael-api-fst.h b/storage/bdb/crypto/rijndael/rijndael-api-fst.h
index 4137aa25edc..caf0abc4aa7 100644
--- a/storage/bdb/crypto/rijndael/rijndael-api-fst.h
+++ b/storage/bdb/crypto/rijndael/rijndael-api-fst.h
@@ -1,5 +1,5 @@
 /*
- * $Id: rijndael-api-fst.h,v 1.5 2003/03/17 19:42:18 bostic Exp $
+ * $Id: rijndael-api-fst.h,v 12.0 2004/11/17 03:43:17 bostic Exp $
  */
 /**
  * rijndael-api-fst.h
diff --git a/storage/bdb/cxx/cxx_db.cpp b/storage/bdb/cxx/cxx_db.cpp
index 22f1b135d18..03e07f4d238 100644
--- a/storage/bdb/cxx/cxx_db.cpp
+++ b/storage/bdb/cxx/cxx_db.cpp
@@ -1,10 +1,10 @@
 /*-
  * See the file LICENSE for redistribution information.
  *
- * Copyright (c) 1997-2004
+ * Copyright (c) 1997-2005
  *	Sleepycat Software.  All rights reserved.
  *
- * $Id: cxx_db.cpp,v 11.87 2004/07/15 18:26:48 ubell Exp $
+ * $Id: cxx_db.cpp,v 12.4 2005/10/18 14:25:53 mjc Exp $
  */
 
 #include "db_config.h"
@@ -219,6 +219,9 @@ int Db::error_policy()
 }
 
 DB_DESTRUCTOR(close, (u_int32_t flags), (db, flags), DB_RETOK_STD)
+DB_METHOD(compact, (DbTxn *txnid, Dbt *start, Dbt *stop,
+    DB_COMPACT *c_data, u_int32_t flags, Dbt *end),
+    (db, unwrap(txnid), start, stop, c_data, flags, end), DB_RETOK_STD)
 
 // The following cast implies that Dbc can be no larger than DBC
 DB_METHOD(cursor, (DbTxn *txnid, Dbc **cursorp, u_int32_t flags),
@@ -331,7 +334,7 @@ int Db::pget(DbTxn *txnid, Dbt *key, Dbt *pkey, Dbt *value, u_int32_t flags)
 
 	/* The logic here is identical to Db::get - reuse the macro. */
 	if (!DB_RETOK_DBGET(ret)) {
-		if (ret == ENOMEM && DB_OVERFLOWED_DBT(value))
+		if (ret == DB_BUFFER_SMALL && DB_OVERFLOWED_DBT(value))
 			DB_ERROR_DBT(env_, "Db::pget", value, error_policy());
 		else
 			DB_ERROR(env_, "Db::pget", ret, error_policy());
@@ -536,8 +539,6 @@ int Db::verify(const char *name, const char *subdb,
 
 DB_METHOD(set_bt_compare, (bt_compare_fcn_type func),
     (db, func), DB_RETOK_STD)
-DB_METHOD(set_bt_maxkey, (u_int32_t bt_maxkey),
-    (db, bt_maxkey), DB_RETOK_STD)
 DB_METHOD(get_bt_minkey, (u_int32_t *bt_minkeyp),
     (db, bt_minkeyp), DB_RETOK_STD)
 DB_METHOD(set_bt_minkey, (u_int32_t bt_minkey),
diff --git a/storage/bdb/cxx/cxx_dbc.cpp b/storage/bdb/cxx/cxx_dbc.cpp
index 0ca59735f99..8f73557222a 100644
--- a/storage/bdb/cxx/cxx_dbc.cpp
+++ b/storage/bdb/cxx/cxx_dbc.cpp
@@ -1,10 +1,10 @@
 /*-
  * See the file LICENSE for redistribution information.
  *
- * Copyright (c) 1997-2004
+ * Copyright (c) 1997-2005
  *	Sleepycat Software.  All rights reserved.
  *
- * $Id: cxx_dbc.cpp,v 11.59 2004/01/28 03:35:56 bostic Exp $
+ * $Id: cxx_dbc.cpp,v 12.2 2005/09/30 07:38:25 mjc Exp $
  */
 
 #include "db_config.h"
@@ -80,10 +80,10 @@ int Dbc::get(Dbt* key, Dbt *data, u_int32_t _flags)
 	ret = dbc->c_get(dbc, key, data, _flags);
 
 	if (!DB_RETOK_DBCGET(ret)) {
-		if (ret == ENOMEM && DB_OVERFLOWED_DBT(key))
+		if (ret == DB_BUFFER_SMALL && DB_OVERFLOWED_DBT(key))
 			DB_ERROR_DBT(DbEnv::get_DbEnv(dbc->dbp->dbenv),
 				"Dbc::get", key, ON_ERROR_UNKNOWN);
-		else if (ret == ENOMEM && DB_OVERFLOWED_DBT(data))
+		else if (ret == DB_BUFFER_SMALL && DB_OVERFLOWED_DBT(data))
 			DB_ERROR_DBT(DbEnv::get_DbEnv(dbc->dbp->dbenv),
 				"Dbc::get", data, ON_ERROR_UNKNOWN);
 		else
@@ -103,10 +103,10 @@ int Dbc::pget(Dbt* key, Dbt *pkey, Dbt *data, u_int32_t _flags)
 
 	/* Logic is the same as for Dbc::get - reusing macro. */
 	if (!DB_RETOK_DBCGET(ret)) {
-		if (ret == ENOMEM && DB_OVERFLOWED_DBT(key))
+		if (ret == DB_BUFFER_SMALL && DB_OVERFLOWED_DBT(key))
 			DB_ERROR_DBT(DbEnv::get_DbEnv(dbc->dbp->dbenv),
 				"Dbc::pget", key, ON_ERROR_UNKNOWN);
-		else if (ret == ENOMEM && DB_OVERFLOWED_DBT(data))
+		else if (ret == DB_BUFFER_SMALL && DB_OVERFLOWED_DBT(data))
 			DB_ERROR_DBT(DbEnv::get_DbEnv(dbc->dbp->dbenv),
 				"Dbc::pget", data, ON_ERROR_UNKNOWN);
 		else
diff --git a/storage/bdb/cxx/cxx_dbt.cpp b/storage/bdb/cxx/cxx_dbt.cpp
index ab894249533..8062c255be5 100644
--- a/storage/bdb/cxx/cxx_dbt.cpp
+++ b/storage/bdb/cxx/cxx_dbt.cpp
@@ -1,10 +1,10 @@
 /*-
  * See the file LICENSE for redistribution information.
  *
- * Copyright (c) 1997-2004
+ * Copyright (c) 1997-2005
  *	Sleepycat Software.  All rights reserved.
  *
- * $Id: cxx_dbt.cpp,v 11.55 2004/01/28 03:35:56 bostic Exp $
+ * $Id: cxx_dbt.cpp,v 12.1 2005/06/16 20:20:58 bostic Exp $
  */
 
 #include "db_config.h"
diff --git a/storage/bdb/cxx/cxx_env.cpp b/storage/bdb/cxx/cxx_env.cpp
index 988cecb6c97..62bbb0de381 100644
--- a/storage/bdb/cxx/cxx_env.cpp
+++ b/storage/bdb/cxx/cxx_env.cpp
@@ -1,10 +1,10 @@
 /*-
  * See the file LICENSE for redistribution information.
  *
- * Copyright (c) 1997-2004
+ * Copyright (c) 1997-2005
  *	Sleepycat Software.  All rights reserved.
  *
- * $Id: cxx_env.cpp,v 11.105 2004/09/22 22:20:31 mjc Exp $
+ * $Id: cxx_env.cpp,v 12.14 2005/10/18 14:49:27 mjc Exp $
  */
 
 #include "db_config.h"
@@ -17,7 +17,11 @@
 #include "dbinc/cxx_int.h"
 
 #include "db_int.h"
+#include "dbinc/db_page.h"
+#include "dbinc/db_am.h"
+#include "dbinc/log.h"
 #include "dbinc_auto/common_ext.h"
+#include "dbinc_auto/log_ext.h"
 
 #ifdef HAVE_CXX_STDHEADERS
 using std::cerr;
@@ -91,7 +95,7 @@ void _paniccall_intercept_c(DB_ENV *env, int errval)
 
 extern "C"
 void _stream_error_function_c(const DB_ENV *env,
-			      const char *prefix, const char *message)
+    const char *prefix, const char *message)
 {
 	DbEnv::_stream_error_function(env, prefix, message);
 }
@@ -103,21 +107,38 @@ void _stream_message_function_c(const DB_ENV *env, const char *message)
 }
 
 extern "C"
-int _app_dispatch_intercept_c(DB_ENV *env, DBT *dbt,
-			      DB_LSN *lsn, db_recops op)
+int _app_dispatch_intercept_c(DB_ENV *env, DBT *dbt, DB_LSN *lsn, db_recops op)
 {
 	return (DbEnv::_app_dispatch_intercept(env, dbt, lsn, op));
 }
 
 extern "C"
-int _rep_send_intercept_c(DB_ENV *env, const DBT *cntrl,
-			  const DBT *data, const DB_LSN *lsn, int id,
-			  u_int32_t flags)
+int _rep_send_intercept_c(DB_ENV *env, const DBT *cntrl, const DBT *data,
+    const DB_LSN *lsn, int id, u_int32_t flags)
 {
 	return (DbEnv::_rep_send_intercept(env,
 	    cntrl, data, lsn, id, flags));
 }
 
+extern "C"
+int _isalive_intercept_c(DB_ENV *env, pid_t pid, db_threadid_t thrid)
+{
+	return (DbEnv::_isalive_intercept(env, pid, thrid));
+}
+
+extern "C"
+void _thread_id_intercept_c(DB_ENV *env, pid_t *pidp, db_threadid_t *thridp)
+{
+	DbEnv::_thread_id_intercept(env, pidp, thridp);
+}
+
+extern "C"
+char *_thread_id_string_intercept_c(DB_ENV *env, pid_t pid,
+    db_threadid_t thrid, char *buf)
+{
+	return (DbEnv::_thread_id_string_intercept(env, pid, thrid, buf));
+}
+
 void DbEnv::_feedback_intercept(DB_ENV *env, int opcode, int pct)
 {
 	DbEnv *cxxenv = DbEnv::get_DbEnv(env);
@@ -150,8 +171,8 @@ void DbEnv::_paniccall_intercept(DB_ENV *env, int errval)
 	(*cxxenv->paniccall_callback_)(cxxenv, errval);
 }
 
-int DbEnv::_app_dispatch_intercept(DB_ENV *env, DBT *dbt,
-				   DB_LSN *lsn, db_recops op)
+int DbEnv::_app_dispatch_intercept(DB_ENV *env, DBT *dbt, DB_LSN *lsn,
+    db_recops op)
 {
 	DbEnv *cxxenv = DbEnv::get_DbEnv(env);
 	if (cxxenv == 0) {
@@ -170,9 +191,19 @@ int DbEnv::_app_dispatch_intercept(DB_ENV *env, DBT *dbt,
 	return ((*cxxenv->app_dispatch_callback_)(cxxenv, cxxdbt, cxxlsn, op));
 }
 
-int DbEnv::_rep_send_intercept(DB_ENV *env, const DBT *cntrl,
-			       const DBT *data, const DB_LSN *lsn,
-			       int id, u_int32_t flags)
+int DbEnv::_isalive_intercept(DB_ENV *env, pid_t pid, db_threadid_t thrid)
+{
+	DbEnv *cxxenv = DbEnv::get_DbEnv(env);
+	if (cxxenv == 0) {
+		DB_ERROR(DbEnv::get_DbEnv(env),
+			"DbEnv::isalive_callback", EINVAL, ON_ERROR_UNKNOWN);
+		return (0);
+	}
+	return ((*cxxenv->isalive_callback_)(cxxenv, pid, thrid));
+}
+
+int DbEnv::_rep_send_intercept(DB_ENV *env, const DBT *cntrl, const DBT *data,
+    const DB_LSN *lsn, int id, u_int32_t flags)
 {
 	DbEnv *cxxenv = DbEnv::get_DbEnv(env);
 	if (cxxenv == 0) {
@@ -187,6 +218,28 @@ int DbEnv::_rep_send_intercept(DB_ENV *env, const DBT *cntrl,
 	    cxxcntrl, cxxdata, cxxlsn, id, flags));
 }
 
+void DbEnv::_thread_id_intercept(DB_ENV *env, pid_t *pidp, db_threadid_t *thridp)
+{
+	DbEnv *cxxenv = DbEnv::get_DbEnv(env);
+	if (cxxenv == 0) {
+		DB_ERROR(DbEnv::get_DbEnv(env),
+			"DbEnv::thread_id_callback", EINVAL, ON_ERROR_UNKNOWN);
+	} else
+		cxxenv->thread_id_callback_(cxxenv, pidp, thridp);
+}
+
+char *DbEnv::_thread_id_string_intercept(DB_ENV *env, pid_t pid,
+    db_threadid_t thrid, char *buf)
+{
+	DbEnv *cxxenv = DbEnv::get_DbEnv(env);
+	if (cxxenv == 0) {
+		DB_ERROR(DbEnv::get_DbEnv(env),
+			"DbEnv::thread_id_string_callback", EINVAL, ON_ERROR_UNKNOWN);
+		return (NULL);
+	}
+	return (cxxenv->thread_id_string_callback_(cxxenv, pid, thrid, buf));
+}
+
 // A truism for the DbEnv object is that there is a valid
 // DB_ENV handle from the constructor until close().
 // After the close, the DB_ENV handle is invalid and
@@ -208,8 +261,6 @@ DbEnv::DbEnv(u_int32_t flags)
 ,	app_dispatch_callback_(0)
 ,	feedback_callback_(0)
 ,	paniccall_callback_(0)
-,	pgin_callback_(0)
-,	pgout_callback_(0)
 ,	rep_send_callback_(0)
 {
 	if ((construct_error_ = initialize(0)) != 0)
@@ -226,8 +277,6 @@ DbEnv::DbEnv(DB_ENV *env, u_int32_t flags)
 ,	app_dispatch_callback_(0)
 ,	feedback_callback_(0)
 ,	paniccall_callback_(0)
-,	pgin_callback_(0)
-,	pgout_callback_(0)
 ,	rep_send_callback_(0)
 {
 	if ((construct_error_ = initialize(env)) != 0)
@@ -326,10 +375,20 @@ void *DbEnv::get_app_private() const
 	return unwrapConst(this)->app_private;
 }
 
+DBENV_METHOD(failchk, (u_int32_t flags), (dbenv, flags))
+DBENV_METHOD(fileid_reset, (const char *file, u_int32_t flags),
+    (dbenv, file, flags))
 DBENV_METHOD(get_home, (const char **homep), (dbenv, homep))
 DBENV_METHOD(get_open_flags, (u_int32_t *flagsp), (dbenv, flagsp))
 DBENV_METHOD(get_data_dirs, (const char ***dirspp), (dbenv, dirspp))
 
+bool DbEnv::is_bigendian()
+{
+	return unwrap(this)->is_bigendian() ? true : false;
+}
+
+DBENV_METHOD(set_thread_count, (u_int32_t count), (dbenv, count))
+
 // used internally during constructor
 // to associate an existing DB_ENV with this DbEnv,
 // or create a new one.
@@ -392,10 +451,27 @@ DBENV_METHOD(log_file, (DbLsn *lsn, char *namep, size_t len),
 DBENV_METHOD(log_flush, (const DbLsn *lsn), (dbenv, lsn))
 DBENV_METHOD(log_put, (DbLsn *lsn, const Dbt *data, u_int32_t flags),
     (dbenv, lsn, data, flags))
+
+int DbEnv::log_printf(DbTxn *txn, const char *fmt, ...)
+{
+	DB_ENV *env = unwrap(this);
+	va_list ap;
+	int ret;
+
+	va_start(ap, fmt);
+	ret = __log_printf_pp(env, unwrap(txn), fmt, ap);
+	va_end(ap);
+
+	return (ret);
+}
+
 DBENV_METHOD(log_stat, (DB_LOG_STAT **spp, u_int32_t flags),
     (dbenv, spp, flags))
 DBENV_METHOD(log_stat_print, (u_int32_t flags), (dbenv, flags))
 
+DBENV_METHOD(lsn_reset, (const char *file, u_int32_t flags),
+    (dbenv, file, flags))
+
 int DbEnv::memp_fcreate(DbMpoolFile **dbmfp, u_int32_t flags)
 {
 	DB_ENV *env = unwrap(this);
@@ -500,6 +576,12 @@ void DbEnv::runtime_error(DbEnv *env,
 				throw lng_except;
 			}
 			break;
+		case DB_REP_HANDLE_DEAD:
+			{
+				DbRepHandleDeadException dl_except(caller);
+				dl_except.set_env(env);
+				throw dl_except;
+			}
 		default:
 			{
 				DbException except(caller, error);
@@ -612,10 +694,14 @@ DBENV_METHOD_VOID(get_errfile, (FILE **errfilep), (dbenv, errfilep))
 DBENV_METHOD_VOID(set_errfile, (FILE *errfile), (dbenv, errfile))
 DBENV_METHOD_VOID(get_errpfx, (const char **errpfxp), (dbenv, errpfxp))
 DBENV_METHOD_VOID(set_errpfx, (const char *errpfx), (dbenv, errpfx))
+DBENV_METHOD(set_intermediate_dir, (int mode, u_int32_t flags),
+    (dbenv, mode, flags))
 DBENV_METHOD(get_lg_bsize, (u_int32_t *bsizep), (dbenv, bsizep))
 DBENV_METHOD(set_lg_bsize, (u_int32_t bsize), (dbenv, bsize))
 DBENV_METHOD(get_lg_dir, (const char **dirp), (dbenv, dirp))
 DBENV_METHOD(set_lg_dir, (const char *dir), (dbenv, dir))
+DBENV_METHOD(get_lg_filemode, (int *modep), (dbenv, modep))
+DBENV_METHOD(set_lg_filemode, (int mode), (dbenv, mode))
 DBENV_METHOD(get_lg_max, (u_int32_t *maxp), (dbenv, maxp))
 DBENV_METHOD(set_lg_max, (u_int32_t max), (dbenv, max))
 DBENV_METHOD(get_lg_regionmax, (u_int32_t *regionmaxp), (dbenv, regionmaxp))
@@ -635,6 +721,10 @@ DBENV_METHOD(set_lk_max_locks, (u_int32_t max_locks), (dbenv, max_locks))
 DBENV_METHOD(get_lk_max_objects, (u_int32_t *max_objectsp),
     (dbenv, max_objectsp))
 DBENV_METHOD(set_lk_max_objects, (u_int32_t max_objects), (dbenv, max_objects))
+DBENV_METHOD(get_mp_max_openfd, (int *maxopenfdp), (dbenv, maxopenfdp))
+DBENV_METHOD(set_mp_max_openfd, (int maxopenfd), (dbenv, maxopenfd))
+DBENV_METHOD(get_mp_max_write, (int *maxwritep, int *maxwrite_sleepp), (dbenv, maxwritep, maxwrite_sleepp))
+DBENV_METHOD(set_mp_max_write, (int maxwrite, int maxwrite_sleep), (dbenv, maxwrite, maxwrite_sleep))
 DBENV_METHOD(get_mp_mmapsize, (size_t *mmapsizep), (dbenv, mmapsizep))
 DBENV_METHOD(set_mp_mmapsize, (size_t mmapsize), (dbenv, mmapsize))
 DBENV_METHOD_VOID(get_msgfile, (FILE **msgfilep), (dbenv, msgfilep))
@@ -644,6 +734,8 @@ DBENV_METHOD(set_tmp_dir, (const char *tmp_dir), (dbenv, tmp_dir))
 DBENV_METHOD(get_tx_max, (u_int32_t *tx_maxp), (dbenv, tx_maxp))
 DBENV_METHOD(set_tx_max, (u_int32_t tx_max), (dbenv, tx_max))
 
+DBENV_METHOD(stat_print, (u_int32_t flags), (dbenv, flags))
+
 DBENV_METHOD_QUIET(set_alloc,
     (db_malloc_fcn_type malloc_fcn, db_realloc_fcn_type realloc_fcn,
     db_free_fcn_type free_fcn),
@@ -694,7 +786,8 @@ int DbEnv::set_feedback(void (*arg)(DbEnv *, int, int))
 
 	feedback_callback_ = arg;
 
-	return (dbenv->set_feedback(dbenv, _feedback_intercept_c));
+	return (dbenv->set_feedback(dbenv,
+	    arg == 0 ? 0 : _feedback_intercept_c));
 }
 
 DBENV_METHOD(get_flags, (u_int32_t *flagsp), (dbenv, flagsp))
@@ -708,7 +801,7 @@ void DbEnv::set_msgcall(void (*arg)(const DbEnv *, const char *))
 	message_stream_ = 0;
 
 	dbenv->set_msgcall(dbenv, (arg == 0) ? 0 :
-			   _stream_message_function_c);
+	    _stream_message_function_c);
 }
 
 __DB_STD(ostream) *DbEnv::get_message_stream()
@@ -733,7 +826,8 @@ int DbEnv::set_paniccall(void (*arg)(DbEnv *, int))
 
 	paniccall_callback_ = arg;
 
-	return (dbenv->set_paniccall(dbenv, _paniccall_intercept_c));
+	return (dbenv->set_paniccall(dbenv,
+	    arg == 0 ? 0 : _paniccall_intercept_c));
 }
 
 DBENV_METHOD(set_rpc_server,
@@ -741,9 +835,6 @@ DBENV_METHOD(set_rpc_server,
     (dbenv, cl, host, tsec, ssec, flags))
 DBENV_METHOD(get_shm_key, (long *shm_keyp), (dbenv, shm_keyp))
 DBENV_METHOD(set_shm_key, (long shm_key), (dbenv, shm_key))
-// Note: this changes from last_known_error_policy to error_policy()
-DBENV_METHOD(get_tas_spins, (u_int32_t *argp), (dbenv, argp))
-DBENV_METHOD(set_tas_spins, (u_int32_t arg), (dbenv, arg))
 
 int DbEnv::set_app_dispatch
     (int (*arg)(DbEnv *, Dbt *, DbLsn *, db_recops))
@@ -753,18 +844,77 @@ int DbEnv::set_app_dispatch
 
 	app_dispatch_callback_ = arg;
 	if ((ret = dbenv->set_app_dispatch(dbenv,
-	    _app_dispatch_intercept_c)) != 0)
+	    arg == 0 ? 0 : _app_dispatch_intercept_c)) != 0)
 		DB_ERROR(this, "DbEnv::set_app_dispatch", ret, error_policy());
 
 	return (ret);
 }
 
+int DbEnv::set_isalive
+    (int (*arg)(DbEnv *, pid_t, db_threadid_t))
+{
+	DB_ENV *dbenv = unwrap(this);
+	int ret;
+
+	isalive_callback_ = arg;
+	if ((ret = dbenv->set_isalive(dbenv,
+	    arg == 0 ? 0 : _isalive_intercept_c)) != 0)
+		DB_ERROR(this, "DbEnv::set_isalive", ret, error_policy());
+
+	return (ret);
+}
+
 DBENV_METHOD(get_tx_timestamp, (time_t *timestamp), (dbenv, timestamp))
 DBENV_METHOD(set_tx_timestamp, (time_t *timestamp), (dbenv, timestamp))
 DBENV_METHOD(get_verbose, (u_int32_t which, int *onoffp),
     (dbenv, which, onoffp))
 DBENV_METHOD(set_verbose, (u_int32_t which, int onoff), (dbenv, which, onoff))
 
+DBENV_METHOD(mutex_alloc,
+    (u_int32_t flags, db_mutex_t *mutexp), (dbenv, flags, mutexp))
+DBENV_METHOD(mutex_free, (db_mutex_t mutex), (dbenv, mutex))
+DBENV_METHOD(mutex_get_align, (u_int32_t *argp), (dbenv, argp))
+DBENV_METHOD(mutex_get_increment, (u_int32_t *argp), (dbenv, argp))
+DBENV_METHOD(mutex_get_max, (u_int32_t *argp), (dbenv, argp))
+DBENV_METHOD(mutex_get_tas_spins, (u_int32_t *argp), (dbenv, argp))
+DBENV_METHOD(mutex_lock, (db_mutex_t mutex), (dbenv, mutex))
+DBENV_METHOD(mutex_set_align, (u_int32_t arg), (dbenv, arg))
+DBENV_METHOD(mutex_set_increment, (u_int32_t arg), (dbenv, arg))
+DBENV_METHOD(mutex_set_max, (u_int32_t arg), (dbenv, arg))
+DBENV_METHOD(mutex_set_tas_spins, (u_int32_t arg), (dbenv, arg))
+DBENV_METHOD(mutex_stat,
+    (DB_MUTEX_STAT **statp, u_int32_t flags), (dbenv, statp, flags))
+DBENV_METHOD(mutex_stat_print, (u_int32_t flags), (dbenv, flags))
+DBENV_METHOD(mutex_unlock, (db_mutex_t mutex), (dbenv, mutex))
+
+int DbEnv::set_thread_id(void (*arg)(DbEnv *, pid_t *, db_threadid_t *))
+{
+	DB_ENV *dbenv = unwrap(this);
+	int ret;
+
+	thread_id_callback_ = arg;
+	if ((ret = dbenv->set_thread_id(dbenv,
+	    arg == 0 ? 0 : _thread_id_intercept_c)) != 0)
+		DB_ERROR(this, "DbEnv::set_thread_id", ret, error_policy());
+
+	return (ret);
+}
+
+int DbEnv::set_thread_id_string(
+    char *(*arg)(DbEnv *, pid_t, db_threadid_t, char *))
+{
+	DB_ENV *dbenv = unwrap(this);
+	int ret;
+
+	thread_id_string_callback_ = arg;
+	if ((ret = dbenv->set_thread_id_string(dbenv,
+	    arg == 0 ? 0 : _thread_id_string_intercept_c)) != 0)
+		DB_ERROR(this, "DbEnv::set_thread_id_string", ret,
+		    error_policy());
+
+	return (ret);
+}
+
 int DbEnv::txn_begin(DbTxn *pid, DbTxn **tid, u_int32_t flags)
 {
 	DB_ENV *env = unwrap(this);
@@ -831,15 +981,14 @@ DBENV_METHOD(txn_stat, (DB_TXN_STAT **statp, u_int32_t flags),
 DBENV_METHOD(txn_stat_print, (u_int32_t flags), (dbenv, flags))
 
 int DbEnv::set_rep_transport(int myid,
-    int (*f_send)(DbEnv *, const Dbt *, const Dbt *, const DbLsn *, int,
-		  u_int32_t))
+    int (*arg)(DbEnv *, const Dbt *, const Dbt *, const DbLsn *, int, u_int32_t))
 {
 	DB_ENV *dbenv = unwrap(this);
 	int ret;
 
-	rep_send_callback_ = f_send;
-	if ((ret = dbenv->set_rep_transport(dbenv,
-	    myid, _rep_send_intercept_c)) != 0)
+	rep_send_callback_ = arg;
+	if ((ret = dbenv->set_rep_transport(dbenv, myid,
+	    arg == 0 ? 0 : _rep_send_intercept_c)) != 0)
 		DB_ERROR(this, "DbEnv::set_rep_transport", ret, error_policy());
 
 	return (ret);
@@ -849,6 +998,10 @@ DBENV_METHOD(rep_elect,
     (int nsites,
     int nvotes, int priority, u_int32_t timeout, int *eidp, u_int32_t flags),
     (dbenv, nsites, nvotes, priority, timeout, eidp, flags))
+DBENV_METHOD(rep_flush, (), (dbenv))
+DBENV_METHOD(rep_get_config, (u_int32_t which, int *onoffp),
+    (dbenv, which, onoffp))
+DBENV_METHOD(set_rep_request, (u_int32_t min, u_int32_t max), (dbenv, min, max))
 
 int DbEnv::rep_process_message(Dbt *control,
     Dbt *rec, int *idp, DbLsn *ret_lsnp)
@@ -864,6 +1017,8 @@ int DbEnv::rep_process_message(Dbt *control,
 	return (ret);
 }
 
+DBENV_METHOD(rep_set_config,
+    (u_int32_t which, int onoff), (dbenv, which, onoff))
 DBENV_METHOD(rep_start,
     (Dbt *cookie, u_int32_t flags),
     (dbenv, (DBT *)cookie, flags))
@@ -871,6 +1026,7 @@ DBENV_METHOD(rep_start,
 DBENV_METHOD(rep_stat, (DB_REP_STAT **statp, u_int32_t flags),
     (dbenv, statp, flags))
 DBENV_METHOD(rep_stat_print, (u_int32_t flags), (dbenv, flags))
+DBENV_METHOD(rep_sync, (u_int32_t flags), (dbenv, flags))
 
 DBENV_METHOD(get_rep_limit, (u_int32_t *gbytesp, u_int32_t *bytesp),
     (dbenv, gbytesp, bytesp))
diff --git a/storage/bdb/cxx/cxx_except.cpp b/storage/bdb/cxx/cxx_except.cpp
index 22eb0eae98b..b0bf7c0690e 100644
--- a/storage/bdb/cxx/cxx_except.cpp
+++ b/storage/bdb/cxx/cxx_except.cpp
@@ -1,10 +1,10 @@
 /*-
  * See the file LICENSE for redistribution information.
  *
- * Copyright (c) 1997-2004
+ * Copyright (c) 1997-2005
  *	Sleepycat Software.  All rights reserved.
  *
- * $Id: cxx_except.cpp,v 11.28 2004/09/22 03:34:48 bostic Exp $
+ * $Id: cxx_except.cpp,v 12.2 2005/10/14 12:20:04 mjc Exp $
  */
 
 #include "db_config.h"
@@ -295,6 +295,35 @@ int DbLockNotGrantedException::get_index() const
 	return index_;
 }
 
+////////////////////////////////////////////////////////////////////////
+//                                                                    //
+//                            DbRepHandleDeadException                //
+//                                                                    //
+////////////////////////////////////////////////////////////////////////
+
+DbRepHandleDeadException::~DbRepHandleDeadException() throw()
+{
+}
+
+DbRepHandleDeadException::DbRepHandleDeadException(const char *description)
+:	DbException(description, DB_REP_HANDLE_DEAD)
+{
+}
+
+DbRepHandleDeadException::DbRepHandleDeadException
+    (const DbRepHandleDeadException &that)
+:	DbException(that)
+{
+}
+
+DbRepHandleDeadException
+&DbRepHandleDeadException::operator =(const DbRepHandleDeadException &that)
+{
+	if (this != &that)
+		DbException::operator=(that);
+	return (*this);
+}
+
 ////////////////////////////////////////////////////////////////////////
 //                                                                    //
 //                            DbRunRecoveryException                  //
diff --git a/storage/bdb/cxx/cxx_lock.cpp b/storage/bdb/cxx/cxx_lock.cpp
index d22cf662be7..47f27ae3504 100644
--- a/storage/bdb/cxx/cxx_lock.cpp
+++ b/storage/bdb/cxx/cxx_lock.cpp
@@ -1,10 +1,10 @@
 /*-
  * See the file LICENSE for redistribution information.
  *
- * Copyright (c) 1997-2004
+ * Copyright (c) 1997-2005
  *	Sleepycat Software.  All rights reserved.
  *
- * $Id: cxx_lock.cpp,v 11.19 2004/01/28 03:35:56 bostic Exp $
+ * $Id: cxx_lock.cpp,v 12.1 2005/06/16 20:20:59 bostic Exp $
  */
 
 #include "db_config.h"
diff --git a/storage/bdb/cxx/cxx_logc.cpp b/storage/bdb/cxx/cxx_logc.cpp
index c5399b531a8..63d7fd9fe17 100644
--- a/storage/bdb/cxx/cxx_logc.cpp
+++ b/storage/bdb/cxx/cxx_logc.cpp
@@ -1,10 +1,10 @@
 /*-
  * See the file LICENSE for redistribution information.
  *
- * Copyright (c) 1997-2004
+ * Copyright (c) 1997-2005
  *	Sleepycat Software.  All rights reserved.
  *
- * $Id: cxx_logc.cpp,v 11.13 2004/02/05 02:25:12 mjc Exp $
+ * $Id: cxx_logc.cpp,v 12.1 2005/06/16 20:21:00 bostic Exp $
  */
 
 #include "db_config.h"
diff --git a/storage/bdb/cxx/cxx_mpool.cpp b/storage/bdb/cxx/cxx_mpool.cpp
index 54747b0926c..475a18b3e3f 100644
--- a/storage/bdb/cxx/cxx_mpool.cpp
+++ b/storage/bdb/cxx/cxx_mpool.cpp
@@ -1,10 +1,10 @@
 /*-
  * See the file LICENSE for redistribution information.
  *
- * Copyright (c) 1997-2004
+ * Copyright (c) 1997-2005
  *	Sleepycat Software.  All rights reserved.
  *
- * $Id: cxx_mpool.cpp,v 11.28 2004/01/28 03:35:56 bostic Exp $
+ * $Id: cxx_mpool.cpp,v 12.1 2005/06/16 20:21:02 bostic Exp $
  */
 
 #include "db_config.h"
diff --git a/storage/bdb/cxx/cxx_multi.cpp b/storage/bdb/cxx/cxx_multi.cpp
index 0961f2921b0..ca80bbafbc4 100644
--- a/storage/bdb/cxx/cxx_multi.cpp
+++ b/storage/bdb/cxx/cxx_multi.cpp
@@ -1,10 +1,10 @@
 /*-
  * See the file LICENSE for redistribution information.
  *
- * Copyright (c) 1997-2004
+ * Copyright (c) 1997-2005
  *	Sleepycat Software.  All rights reserved.
  *
- * $Id: cxx_multi.cpp,v 1.4 2004/01/28 03:35:56 bostic Exp $
+ * $Id: cxx_multi.cpp,v 12.3 2005/09/30 07:40:20 mjc Exp $
  */
 
 #include "db_config.h"
@@ -29,7 +29,7 @@ bool DbMultipleDataIterator::next(Dbt &data)
 		if (data.get_size() == 0 && data.get_data() == data_)
 			data.set_data(0);
 	}
-	return (data.get_data() != 0);
+	return (p_ != 0);
 }
 
 bool DbMultipleKeyDataIterator::next(Dbt &key, Dbt &data)
@@ -46,7 +46,7 @@ bool DbMultipleKeyDataIterator::next(Dbt &key, Dbt &data)
 		data.set_data(data_ + *p_--);
 		data.set_size(*p_--);
 	}
-	return (data.get_data() != 0);
+	return (p_ != 0);
 }
 
 bool DbMultipleRecnoDataIterator::next(db_recno_t &recno, Dbt &data)
@@ -61,5 +61,5 @@ bool DbMultipleRecnoDataIterator::next(db_recno_t &recno, Dbt &data)
 		data.set_data(data_ + *p_--);
 		data.set_size(*p_--);
 	}
-	return (recno != 0);
+	return (p_ != 0);
 }
diff --git a/storage/bdb/cxx/cxx_seq.cpp b/storage/bdb/cxx/cxx_seq.cpp
index 60bc7455b36..ed8997f0340 100644
--- a/storage/bdb/cxx/cxx_seq.cpp
+++ b/storage/bdb/cxx/cxx_seq.cpp
@@ -1,10 +1,10 @@
 /*-
  * See the file LICENSE for redistribution information.
  *
- * Copyright (c) 1997-2004
+ * Copyright (c) 1997-2005
  *	Sleepycat Software.  All rights reserved.
  *
- * $Id: cxx_seq.cpp,v 11.3 2004/09/23 20:05:08 mjc Exp $
+ * $Id: cxx_seq.cpp,v 12.2 2005/10/13 20:49:47 bostic Exp $
  */
 
 #include "db_config.h"
@@ -90,20 +90,20 @@ DBSEQ_METHOD(get_range, (db_seq_t *minp, db_seq_t *maxp), (seq, minp, maxp), 0)
 DBSEQ_METHOD(set_range, (db_seq_t min, db_seq_t max), (seq, min, max), 0)
 
 Db *DbSequence::get_db()
-{       
+{     
 	DB_SEQUENCE *seq = unwrap(this);
 	DB *db;
 	(void)seq->get_db(seq, &db);
 	return Db::get_Db(db);
-}   
+} 
 
 Dbt *DbSequence::get_key()
-{       
+{     
 	DB_SEQUENCE *seq = unwrap(this);
 	memset(&key_, 0, sizeof (DBT));
 	(void)seq->get_key(seq, &key_);
 	return Dbt::get_Dbt(&key_);
-}   
+} 
 
 // static method
 DbSequence *DbSequence::wrap_DB_SEQUENCE(DB_SEQUENCE *seq)
diff --git a/storage/bdb/cxx/cxx_txn.cpp b/storage/bdb/cxx/cxx_txn.cpp
index 89ad7f02b4f..64ee3b9fff5 100644
--- a/storage/bdb/cxx/cxx_txn.cpp
+++ b/storage/bdb/cxx/cxx_txn.cpp
@@ -1,10 +1,10 @@
 /*-
  * See the file LICENSE for redistribution information.
  *
- * Copyright (c) 1997-2004
+ * Copyright (c) 1997-2005
  *	Sleepycat Software.  All rights reserved.
  *
- * $Id: cxx_txn.cpp,v 11.33 2004/09/22 22:20:31 mjc Exp $
+ * $Id: cxx_txn.cpp,v 12.2 2005/06/16 20:21:03 bostic Exp $
  */
 
 #include "db_config.h"
@@ -67,7 +67,9 @@ u_int32_t DbTxn::id()
 	return (txn->id(txn));		// no error
 }
 
+DBTXN_METHOD(get_name, 0, (const char **namep), (txn, namep))
 DBTXN_METHOD(prepare, 0, (u_int8_t *gid), (txn, gid))
+DBTXN_METHOD(set_name, 0, (const char *name), (txn, name))
 DBTXN_METHOD(set_timeout, 0, (db_timeout_t timeout, u_int32_t flags),
     (txn, timeout, flags))
 
diff --git a/storage/bdb/db/crdel.src b/storage/bdb/db/crdel.src
index 034e7b82f57..ba03fea9312 100644
--- a/storage/bdb/db/crdel.src
+++ b/storage/bdb/db/crdel.src
@@ -1,10 +1,10 @@
 /*-
  * See the file LICENSE for redistribution information.
  *
- * Copyright (c) 1996-2004
+ * Copyright (c) 1996-2005
  *	Sleepycat Software.  All rights reserved.
  *
- * $Id: crdel.src,v 11.29 2004/06/17 17:35:15 bostic Exp $
+ * $Id: crdel.src,v 12.2 2005/09/28 17:44:18 margo Exp $
  */
 
 PREFIX	__crdel
@@ -41,3 +41,40 @@ PGDBT	page		DBT		s
 POINTER	lsn		DB_LSN *	lu
 END
 
+/*
+ * Inmem_create: Log the creation of an in-memory database.
+ *
+ * name: Name of the database
+ * fid: File id of the database
+ */
+BEGIN	inmem_create	138
+ARG	fileid	int32_t		ld
+DBT	name	DBT		s
+DBT	fid	DBT		s
+ARG	pgsize	u_int32_t	lu
+END
+
+/*
+ * Inmem_rename: Log the renaming of an in-memory only database.
+ *
+ * oldname: database's starting name
+ * newname: database's ending name
+ * fid: fileid
+ */
+BEGIN	inmem_rename	139
+DBT	oldname		DBT		s
+DBT	newname		DBT		s
+DBT	fid		DBT		s
+END
+
+/*
+ * Inmem_remove: Log the removal of an in-memory only database.
+ *
+ * name: database's ending name
+ * fid: fileid
+ */
+BEGIN	inmem_remove	140
+DBT	name		DBT		s
+DBT	fid		DBT		s
+END
+
diff --git a/storage/bdb/db/crdel_rec.c b/storage/bdb/db/crdel_rec.c
index 7ff4fbd06e9..a94c6cbbc1f 100644
--- a/storage/bdb/db/crdel_rec.c
+++ b/storage/bdb/db/crdel_rec.c
@@ -1,10 +1,10 @@
 /*-
  * See the file LICENSE for redistribution information.
  *
- * Copyright (c) 1996-2004
+ * Copyright (c) 1996-2005
  *	Sleepycat Software.  All rights reserved.
  *
- * $Id: crdel_rec.c,v 11.68 2004/04/29 00:07:55 ubell Exp $
+ * $Id: crdel_rec.c,v 12.6 2005/10/20 18:57:04 bostic Exp $
  */
 
 #include "db_config.h"
@@ -18,9 +18,11 @@
 #include "db_int.h"
 #include "dbinc/db_page.h"
 #include "dbinc/db_shash.h"
+#include "dbinc/fop.h"
 #include "dbinc/hash.h"
 #include "dbinc/log.h"
 #include "dbinc/mp.h"
+#include "dbinc/txn.h"
 
 /*
  * __crdel_metasub_recover --
@@ -47,22 +49,39 @@ __crdel_metasub_recover(dbenv, dbtp, lsnp, op, info)
 	pagep = NULL;
 	COMPQUIET(info, NULL);
 	REC_PRINT(__crdel_metasub_print);
-	REC_INTRO(__crdel_metasub_read, 0);
+	REC_INTRO(__crdel_metasub_read, 0, 0);
 
 	if ((ret = __memp_fget(mpf, &argp->pgno, 0, &pagep)) != 0) {
-		*lsnp = argp->prev_lsn;
-		ret = 0;
-		goto out;
+		/* If this is an in-memory file, this might be OK. */
+		if (F_ISSET(file_dbp, DB_AM_INMEM) && (ret = __memp_fget(mpf,
+		    &argp->pgno, DB_MPOOL_CREATE, &pagep)) == 0)
+			LSN_NOT_LOGGED(LSN(pagep));
+		else {
+			*lsnp = argp->prev_lsn;
+			ret = 0;
+			goto out;
+		}
 	}
 
 	modified = 0;
 	cmp_p = log_compare(&LSN(pagep), &argp->lsn);
-	CHECK_LSN(op, cmp_p, &LSN(pagep), &argp->lsn);
+	CHECK_LSN(dbenv, op, cmp_p, &LSN(pagep), &argp->lsn);
 
 	if (cmp_p == 0 && DB_REDO(op)) {
 		memcpy(pagep, argp->page.data, argp->page.size);
 		LSN(pagep) = *lsnp;
 		modified = 1;
+
+		/*
+		 * If this was an in-memory database and we are re-creating
+		 * and this is the meta-data page, then we need to set up a
+		 * bunch of fields in the dbo as well.
+		 */
+		if (F_ISSET(file_dbp, DB_AM_INMEM) &&
+		    argp->pgno == PGNO_BASE_MD &&
+		    (ret = __db_meta_setup(file_dbp->dbenv,
+		    file_dbp, file_dbp->dname, (DBMETA *)pagep, 0, 1)) != 0)
+			goto out;
 	} else if (DB_UNDO(op)) {
 		/*
 		 * We want to undo this page creation.  The page creation
@@ -70,10 +89,11 @@ __crdel_metasub_recover(dbenv, dbtp, lsnp, op, info)
 		 * was logged separately. Then we wrote the meta-data onto
 		 * the page.  So long as we restore the LSN, then the recovery
 		 * for __bam_new will do everything else.
-		 * Don't bother checking the lsn on the page.  If we
-		 * are rolling back the next thing is that this page
-		 * will get freed.  Opening the subdb will have reinitialized
-		 * the page, but not the lsn.
+		 *
+		 * Don't bother checking the lsn on the page.  If we are
+		 * rolling back the next thing is that this page will get
+		 * freed.  Opening the subdb will have reinitialized the
+		 * page, but not the lsn.
 		 */
 		LSN(pagep) = argp->lsn;
 		modified = 1;
@@ -89,3 +109,186 @@ out:	if (pagep != NULL)
 		(void)__memp_fput(mpf, pagep, 0);
 	REC_CLOSE;
 }
+
+/*
+ * __crdel_inmem_create_recover --
+ *	Recovery function for inmem_create.
+ *
+ * PUBLIC: int __crdel_inmem_create_recover
+ * PUBLIC:   __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+ */
+int
+__crdel_inmem_create_recover(dbenv, dbtp, lsnp, op, info)
+	DB_ENV *dbenv;
+	DBT *dbtp;
+	DB_LSN *lsnp;
+	db_recops op;
+	void *info;
+{
+	DB *dbp;
+	__crdel_inmem_create_args *argp;
+	int do_close, ret, t_ret;
+
+	COMPQUIET(info, NULL);
+	dbp = NULL;
+	do_close = 0;
+	REC_PRINT(__crdel_inmem_create_print);
+	REC_NOOP_INTRO(__crdel_inmem_create_read);
+
+	/* First, see if the DB handle already exists. */
+	if (argp->fileid == DB_LOGFILEID_INVALID) {
+		if (DB_REDO(op))
+			ret = ENOENT;
+		else
+			ret = 0;
+	} else
+		ret = __dbreg_id_to_db_int(dbenv,
+		    argp->txnid, &dbp, argp->fileid, 0, 0);
+
+	if (DB_REDO(op)) {
+		/*
+		 * If the dbreg failed, that means that we're creating a
+		 * tmp file.
+		 */
+		if (ret != 0) {
+			if ((ret = db_create(&dbp, dbenv, 0)) != 0)
+				goto out;
+
+			F_SET(dbp, DB_AM_RECOVER | DB_AM_INMEM);
+			memcpy(dbp->fileid, argp->fid.data, DB_FILE_ID_LEN);
+			if (((ret = __os_strdup(dbenv,
+			    argp->name.data, &dbp->dname)) != 0))
+				goto out;
+
+			/*
+			 * This DBP is never going to be entered into the
+			 * dbentry table, so if we leave it open here,
+			 * then we're going to lose it.
+			 */
+			do_close = 1;
+		}
+
+		/* Now, set the fileid. */
+		memcpy(dbp->fileid, argp->fid.data, argp->fid.size);
+		if ((ret = __memp_set_fileid(dbp->mpf, dbp->fileid)) != 0)
+			goto out;
+		dbp->preserve_fid = 1;
+		MAKE_INMEM(dbp);
+		if ((ret = __db_dbenv_setup(dbp,
+		    NULL, NULL, argp->name.data, TXN_INVALID, 0)) != 0)
+			goto out;
+		ret = __db_dbenv_mpool(dbp, argp->name.data, 0);
+
+		if (ret == ENOENT) {
+			dbp->pgsize = argp->pgsize;
+			if ((ret = __db_dbenv_mpool(dbp,
+			    argp->name.data, DB_CREATE)) != 0)
+				goto out;
+		} else if (ret != 0)
+			goto out;
+	}
+
+	if (DB_UNDO(op)) {
+		if (ret == 0)
+			ret = __memp_nameop(dbenv, argp->fid.data, NULL,
+			    (const char *)argp->name.data,  NULL, 1);
+
+		if (ret == ENOENT || ret == DB_DELETED)
+			ret = 0;
+		else
+			goto out;
+	}
+
+	*lsnp = argp->prev_lsn;
+
+out:	if (dbp != NULL) {
+		t_ret = 0;
+		if (DB_UNDO(op))
+			t_ret = __db_refresh(dbp, NULL, DB_NOSYNC, NULL, 0);
+		else if (do_close || ret != 0)
+			t_ret = __db_close(dbp, NULL, DB_NOSYNC);
+		if (t_ret != 0 && ret == 0)
+			ret = t_ret;
+	}
+	REC_NOOP_CLOSE;
+}
+
+/*
+ * __crdel_inmem_rename_recover --
+ *	Recovery function for inmem_rename.
+ *
+ * PUBLIC: int __crdel_inmem_rename_recover
+ * PUBLIC:   __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+ */
+int
+__crdel_inmem_rename_recover(dbenv, dbtp, lsnp, op, info)
+	DB_ENV *dbenv;
+	DBT *dbtp;
+	DB_LSN *lsnp;
+	db_recops op;
+	void *info;
+{
+	__crdel_inmem_rename_args *argp;
+	u_int8_t *fileid;
+	int ret;
+
+	COMPQUIET(info, NULL);
+	REC_PRINT(__crdel_inmem_rename_print);
+	REC_NOOP_INTRO(__crdel_inmem_rename_read);
+	fileid = argp->fid.data;
+
+	/* Void out errors because the files may or may not still exist. */
+	if (DB_REDO(op))
+		(void)__memp_nameop(dbenv, fileid,
+		    (const char *)argp->newname.data,
+		    (const char *)argp->oldname.data,
+		    (const char *)argp->newname.data, 1);
+
+	if (DB_UNDO(op))
+		(void)__memp_nameop(dbenv, fileid,
+		    (const char *)argp->oldname.data,
+		    (const char *)argp->newname.data,
+		    (const char *)argp->oldname.data, 1);
+
+	*lsnp = argp->prev_lsn;
+	ret = 0;
+
+	REC_NOOP_CLOSE;
+}
+
+/*
+ * __crdel_inmem_remove_recover --
+ *	Recovery function for inmem_remove.
+ *
+ * PUBLIC: int __crdel_inmem_remove_recover
+ * PUBLIC:   __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+ */
+int
+__crdel_inmem_remove_recover(dbenv, dbtp, lsnp, op, info)
+	DB_ENV *dbenv;
+	DBT *dbtp;
+	DB_LSN *lsnp;
+	db_recops op;
+	void *info;
+{
+	__crdel_inmem_remove_args *argp;
+	int ret;
+
+	COMPQUIET(info, NULL);
+	REC_PRINT(__crdel_inmem_remove_print);
+	REC_NOOP_INTRO(__crdel_inmem_remove_read);
+
+	/*
+	 * Since removes are delayed; there is no undo for a remove; only redo.
+	 * The remove may fail, which is OK.
+	 */
+	if (DB_REDO(op)) {
+		(void)__memp_nameop(dbenv,
+		    argp->fid.data, NULL, argp->name.data, NULL, 1);
+	}
+
+	*lsnp = argp->prev_lsn;
+	ret = 0;
+
+	REC_NOOP_CLOSE;
+}
diff --git a/storage/bdb/db/db.c b/storage/bdb/db/db.c
index 28aecc09cff..432919133a2 100644
--- a/storage/bdb/db/db.c
+++ b/storage/bdb/db/db.c
@@ -1,7 +1,7 @@
 /*-
  * See the file LICENSE for redistribution information.
  *
- * Copyright (c) 1996-2004
+ * Copyright (c) 1996-2005
  *	Sleepycat Software.  All rights reserved.
  */
 /*
@@ -36,7 +36,7 @@
  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
  * SUCH DAMAGE.
  *
- * $Id: db.c,v 11.300 2004/10/26 17:38:41 bostic Exp $
+ * $Id: db.c,v 12.22 2005/11/12 17:41:44 bostic Exp $
  */
 
 #include "db_config.h"
@@ -52,6 +52,7 @@
 #include "dbinc/db_shash.h"
 #include "dbinc/db_swap.h"
 #include "dbinc/btree.h"
+#include "dbinc/fop.h"
 #include "dbinc/hash.h"
 #include "dbinc/lock.h"
 #include "dbinc/log.h"
@@ -241,8 +242,9 @@ __db_master_update(mdbp, sdbp, txn, subdb, type, action, newname, flags)
 		    __memp_fget(mdbp->mpf, &sdbp->meta_pgno, 0, &p)) != 0)
 			goto err;
 
-		/* Free the root on the master db. */
-		if (TYPE(p) == P_BTREEMETA) {
+		/* Free the root on the master db if it was created. */
+		if (TYPE(p) == P_BTREEMETA &&
+		    ((BTMETA *)p)->root != PGNO_INVALID) {
 			if ((ret = __memp_fget(mdbp->mpf,
 			     &((BTMETA *)p)->root, 0, &r)) != 0)
 				goto err;
@@ -389,18 +391,17 @@ done:	/*
  *	Set up the underlying environment during a db_open.
  *
  * PUBLIC: int __db_dbenv_setup __P((DB *,
- * PUBLIC:     DB_TXN *, const char *, u_int32_t, u_int32_t));
+ * PUBLIC:     DB_TXN *, const char *, const char *, u_int32_t, u_int32_t));
  */
 int
-__db_dbenv_setup(dbp, txn, fname, id, flags)
+__db_dbenv_setup(dbp, txn, fname, dname, id, flags)
 	DB *dbp;
 	DB_TXN *txn;
-	const char *fname;
+	const char *fname, *dname;
 	u_int32_t id, flags;
 {
 	DB *ldbp;
 	DB_ENV *dbenv;
-	DB_MPOOL *dbmp;
 	u_int32_t maxid;
 	int ret;
 
@@ -415,25 +416,20 @@ __db_dbenv_setup(dbp, txn, fname, id, flags)
 		    dbenv, 0, dbp->pgsize * DB_MINPAGECACHE, 0)) != 0)
 			return (ret);
 
-		if ((ret = __dbenv_open(dbenv, NULL, DB_CREATE |
+		if ((ret = __env_open(dbenv, NULL, DB_CREATE |
 		    DB_INIT_MPOOL | DB_PRIVATE | LF_ISSET(DB_THREAD), 0)) != 0)
 			return (ret);
 	}
 
 	/* Join the underlying cache. */
-	if ((ret = __db_dbenv_mpool(dbp, fname, flags)) != 0)
+	if ((!F_ISSET(dbp, DB_AM_INMEM) || dname == NULL) &&
+	    (ret = __db_dbenv_mpool(dbp, fname, flags)) != 0)
 		return (ret);
 
-	/*
-	 * We may need a per-thread mutex.  Allocate it from the mpool
-	 * region, there's supposed to be extra space there for that purpose.
-	 */
-	if (LF_ISSET(DB_THREAD)) {
-		dbmp = dbenv->mp_handle;
-		if ((ret = __db_mutex_setup(dbenv, dbmp->reginfo, &dbp->mutexp,
-		    MUTEX_ALLOC | MUTEX_THREAD)) != 0)
-			return (ret);
-	}
+	/* We may need a per-thread mutex. */
+	if (LF_ISSET(DB_THREAD) && (ret = __mutex_alloc(
+	    dbenv, MTX_DB_HANDLE, DB_MUTEX_THREAD, &dbp->mutex)) != 0)
+		return (ret);
 
 	/*
 	 * Set up a bookkeeping entry for this database in the log region,
@@ -441,13 +437,14 @@ __db_dbenv_setup(dbp, txn, fname, id, flags)
 	 * or a replication client, where we won't log registries, we'll
 	 * still need an FNAME struct, so LOGGING_ON is the correct macro.
 	 */
-	if (LOGGING_ON(dbenv) &&
-	    (ret = __dbreg_setup(dbp, fname, id)) != 0)
+	if (LOGGING_ON(dbenv) && dbp->log_filename == NULL &&
+	    (ret = __dbreg_setup(dbp,
+	    F_ISSET(dbp, DB_AM_INMEM) ? dname : fname, id)) != 0)
 		return (ret);
 
 	/*
 	 * If we're actively logging and our caller isn't a recovery function
-	 * that already did so, assign this dbp a log fileid.
+	 * that already did so, then assign this dbp a log fileid.
 	 */
 	if (DBENV_LOGGING(dbenv) && !F_ISSET(dbp, DB_AM_RECOVER) &&
 #if !defined(DEBUG_ROP)
@@ -465,13 +462,18 @@ __db_dbenv_setup(dbp, txn, fname, id, flags)
 	 * routines, where we don't want to do a lot of ugly and
 	 * expensive memcmps.
 	 */
-	MUTEX_THREAD_LOCK(dbenv, dbenv->dblist_mutexp);
+	MUTEX_LOCK(dbenv, dbenv->mtx_dblist);
 	for (maxid = 0, ldbp = LIST_FIRST(&dbenv->dblist);
 	    ldbp != NULL; ldbp = LIST_NEXT(ldbp, dblistlinks)) {
-		if (fname != NULL &&
-		    memcmp(ldbp->fileid, dbp->fileid, DB_FILE_ID_LEN) == 0 &&
-		    ldbp->meta_pgno == dbp->meta_pgno)
-			break;
+		if (!F_ISSET(dbp, DB_AM_INMEM)) {
+			if (memcmp(ldbp->fileid, dbp->fileid, DB_FILE_ID_LEN)
+			    == 0 && ldbp->meta_pgno == dbp->meta_pgno)
+				break;
+		} else if (dname != NULL) {
+			if (F_ISSET(ldbp, DB_AM_INMEM) &&
+			    strcmp(ldbp->dname, dname) == 0)
+				break;
+		}
 		if (ldbp->adj_fileid > maxid)
 			maxid = ldbp->adj_fileid;
 	}
@@ -493,7 +495,7 @@ __db_dbenv_setup(dbp, txn, fname, id, flags)
 		dbp->adj_fileid = ldbp->adj_fileid;
 		LIST_INSERT_AFTER(ldbp, dbp, dblistlinks);
 	}
-	MUTEX_THREAD_UNLOCK(dbenv, dbenv->dblist_mutexp);
+	MUTEX_UNLOCK(dbenv, dbenv->mtx_dblist);
 
 	return (0);
 }
@@ -514,12 +516,19 @@ __db_dbenv_mpool(dbp, fname, flags)
 	DBT pgcookie;
 	DB_MPOOLFILE *mpf;
 	DB_PGINFO pginfo;
+	int fidset, ftype, ret;
+	int32_t lsn_off;
+	u_int8_t nullfid[DB_FILE_ID_LEN];
 	u_int32_t clear_len;
-	int ftype, ret;
 
 	COMPQUIET(mpf, NULL);
 
 	dbenv = dbp->dbenv;
+	lsn_off = 0;
+
+	/* It's possible that this database is already open. */
+	if (F_ISSET(dbp, DB_AM_OPEN_CALLED))
+		return (0);
 
 	/*
 	 * If we need to pre- or post-process a file's pages on I/O, set the
@@ -534,17 +543,28 @@ __db_dbenv_mpool(dbp, fname, flags)
 	case DB_RECNO:
 		ftype = F_ISSET(dbp, DB_AM_SWAP | DB_AM_ENCRYPT | DB_AM_CHKSUM)
 		    ? DB_FTYPE_SET : DB_FTYPE_NOTSET;
-		clear_len = CRYPTO_ON(dbenv) ? dbp->pgsize : DB_PAGE_DB_LEN;
+		clear_len = CRYPTO_ON(dbenv) ?
+		    (dbp->pgsize != 0 ? dbp->pgsize : DB_CLEARLEN_NOTSET) :
+		    DB_PAGE_DB_LEN;
 		break;
 	case DB_HASH:
 		ftype = DB_FTYPE_SET;
-		clear_len = CRYPTO_ON(dbenv) ? dbp->pgsize : DB_PAGE_DB_LEN;
+		clear_len = CRYPTO_ON(dbenv) ?
+		    (dbp->pgsize != 0 ? dbp->pgsize : DB_CLEARLEN_NOTSET) :
+		    DB_PAGE_DB_LEN;
 		break;
 	case DB_QUEUE:
 		ftype = F_ISSET(dbp,
 		    DB_AM_SWAP | DB_AM_ENCRYPT | DB_AM_CHKSUM) ?
 		    DB_FTYPE_SET : DB_FTYPE_NOTSET;
-		clear_len = CRYPTO_ON(dbenv) ? dbp->pgsize : DB_PAGE_QUEUE_LEN;
+
+		/*
+		 * If we came in here without a pagesize set, then we need
+		 * to mark the in-memory handle as having clear_len not
+		 * set, because we don't really know the clear length or
+		 * the page size yet (since the file doesn't yet exist).
+		 */
+		clear_len = dbp->pgsize != 0 ? dbp->pgsize : DB_CLEARLEN_NOTSET;
 		break;
 	case DB_UNKNOWN:
 		/*
@@ -564,6 +584,18 @@ __db_dbenv_mpool(dbp, fname, flags)
 			clear_len = DB_PAGE_DB_LEN;
 			break;
 		}
+
+		/*
+		 * This might be an in-memory file and we won't know its
+		 * file type until after we open it and read the meta-data
+		 * page.
+		 */
+		if (F_ISSET(dbp, DB_AM_INMEM)) {
+			clear_len = DB_CLEARLEN_NOTSET;
+			ftype = DB_FTYPE_NOTSET;
+			lsn_off = DB_LSN_OFF_NOTSET;
+			break;
+		}
 		/* FALLTHROUGH */
 	default:
 		return (__db_unknown_type(dbenv, "DB->open", dbp->type));
@@ -571,10 +603,14 @@ __db_dbenv_mpool(dbp, fname, flags)
 
 	mpf = dbp->mpf;
 
+	memset(nullfid, 0, DB_FILE_ID_LEN);
+	fidset = memcmp(nullfid, dbp->fileid, DB_FILE_ID_LEN);
+	if (fidset)
+		(void)__memp_set_fileid(mpf, dbp->fileid);
+
 	(void)__memp_set_clear_len(mpf, clear_len);
-	(void)__memp_set_fileid(mpf, dbp->fileid);
 	(void)__memp_set_ftype(mpf, ftype);
-	(void)__memp_set_lsn_offset(mpf, 0);
+	(void)__memp_set_lsn_offset(mpf, lsn_off);
 
 	pginfo.db_pagesize = dbp->pgsize;
 	pginfo.flags =
@@ -585,12 +621,34 @@ __db_dbenv_mpool(dbp, fname, flags)
 	(void)__memp_set_pgcookie(mpf, &pgcookie);
 
 	if ((ret = __memp_fopen(mpf, NULL, fname,
-	    LF_ISSET(DB_RDONLY | DB_NOMMAP |
-	    DB_ODDFILESIZE | DB_TRUNCATE) |
+	    LF_ISSET(DB_CREATE | DB_DURABLE_UNKNOWN |
+		DB_NOMMAP | DB_ODDFILESIZE | DB_RDONLY | DB_TRUNCATE) |
 	    (F_ISSET(dbenv, DB_ENV_DIRECT_DB) ? DB_DIRECT : 0) |
 	    (F_ISSET(dbp, DB_AM_NOT_DURABLE) ? DB_TXN_NOT_DURABLE : 0),
-	    0, dbp->pgsize)) != 0)
+	    0, dbp->pgsize)) != 0) {
+		/*
+		 * The open didn't work; we need to reset the mpf,
+		 * retaining the in-memory semantics (if any).
+		 */
+		(void)__memp_fclose(dbp->mpf, 0);
+		(void)__memp_fcreate(dbenv, &dbp->mpf);
+		if (F_ISSET(dbp, DB_AM_INMEM))
+			MAKE_INMEM(dbp);
 		return (ret);
+	}
+
+	/*
+	 * Set the open flag.  We use it to mean that the dbp has gone
+	 * through mpf setup, including dbreg_register.  Also, below,
+	 * the underlying access method open functions may want to do
+	 * things like acquire cursors, so the open flag has to be set
+	 * before calling them.
+	 */
+	F_SET(dbp, DB_AM_OPEN_CALLED);
+	if (!fidset && fname != NULL) {
+		(void)__memp_get_fileid(dbp->mpf, dbp->fileid);
+		dbp->preserve_fid = 1;
+	}
 
 	return (0);
 }
@@ -624,7 +682,7 @@ __db_close(dbp, txn, flags)
 		(void)__db_check_txn(dbp, txn, DB_LOCK_INVALIDID, 0);
 
 	/* Refresh the structure and close any underlying resources. */
-	ret = __db_refresh(dbp, txn, flags, &deferred_close);
+	ret = __db_refresh(dbp, txn, flags, &deferred_close, 0);
 
 	/*
 	 * If we've deferred the close because the logging of the close failed,
@@ -644,11 +702,11 @@ __db_close(dbp, txn, flags)
 	 * dbenv, someone's already badly screwed up, so there's no reason
 	 * to bother engineering around this possibility.
 	 */
-	MUTEX_THREAD_LOCK(dbenv, dbenv->dblist_mutexp);
+	MUTEX_LOCK(dbenv, dbenv->mtx_dblist);
 	db_ref = --dbenv->db_ref;
-	MUTEX_THREAD_UNLOCK(dbenv, dbenv->dblist_mutexp);
+	MUTEX_UNLOCK(dbenv, dbenv->mtx_dblist);
 	if (F_ISSET(dbenv, DB_ENV_DBLOCAL) && db_ref == 0 &&
-	    (t_ret = __dbenv_close(dbenv, 0)) != 0 && ret == 0)
+	    (t_ret = __env_close(dbenv, 0)) != 0 && ret == 0)
 		ret = t_ret;
 
 	/* Free the database handle. */
@@ -666,25 +724,32 @@ __db_close(dbp, txn, flags)
  * the actual handle) and during abort processing, we may have a
  * fully opened handle.
  *
- * PUBLIC: int __db_refresh __P((DB *, DB_TXN *, u_int32_t, int *));
+ * PUBLIC: int __db_refresh __P((DB *, DB_TXN *, u_int32_t, int *, int));
  */
 int
-__db_refresh(dbp, txn, flags, deferred_closep)
+__db_refresh(dbp, txn, flags, deferred_closep, reuse)
 	DB *dbp;
 	DB_TXN *txn;
 	u_int32_t flags;
-	int *deferred_closep;
+	int *deferred_closep, reuse;
 {
 	DB *sdbp;
 	DBC *dbc;
 	DB_ENV *dbenv;
 	DB_LOCKREQ lreq;
-	DB_MPOOL *dbmp;
+	REGENV *renv;
+	REGINFO *infop;
+	u_int32_t save_flags;
 	int resync, ret, t_ret;
 
 	ret = 0;
 
 	dbenv = dbp->dbenv;
+	infop = dbenv->reginfo;
+	if (infop != NULL)
+		renv = infop->primary;
+	else
+		renv = NULL;
 
 	/* If never opened, or not currently open, it's easy. */
 	if (!F_ISSET(dbp, DB_AM_OPEN_CALLED))
@@ -768,6 +833,7 @@ __db_refresh(dbp, txn, flags, deferred_closep)
 	    (t_ret = __memp_fsync(dbp->mpf)) != 0 && ret == 0)
 		ret = t_ret;
 
+never_opened:
 	/*
 	 * At this point, we haven't done anything to render the DB
 	 * handle unusable, at least by a transaction abort.  Take the
@@ -779,12 +845,15 @@ __db_refresh(dbp, txn, flags, deferred_closep)
 	 * In this case, we put off actually closing the dbp until we've
 	 * performed the abort.
 	 */
-	if (LOGGING_ON(dbp->dbenv)) {
+	if (!reuse && LOGGING_ON(dbp->dbenv)) {
 		/*
 		 * Discard the log file id, if any.  We want to log the close
-		 * if and only if this is not a recovery dbp.
+		 * if and only if this is not a recovery dbp or a client dbp,
+		 * or a dead dbp handle.
 		 */
-		if (F_ISSET(dbp, DB_AM_RECOVER))
+		DB_ASSERT(renv != NULL);
+		if (F_ISSET(dbp, DB_AM_RECOVER) || IS_REP_CLIENT(dbenv) ||
+		    dbp->timestamp != renv->rep_timestamp)
 			t_ret = __dbreg_revoke_id(dbp, 0, DB_LOGFILEID_INVALID);
 		else {
 			if ((t_ret = __dbreg_close_id(dbp,
@@ -808,6 +877,17 @@ __db_refresh(dbp, txn, flags, deferred_closep)
 					*deferred_closep = 1;
 				return (t_ret);
 			}
+			/*
+			 * If dbreg_close_id failed and we were not in a
+			 * transaction, then we need to finish this close
+			 * because the caller can't do anything with the
+			 * handle after we return an error.  We rely on
+			 * dbreg_close_id to mark the entry in some manner
+			 * so that we do not do a clean shutdown of this
+			 * environment.  If shutdown isn't clean, then the
+			 * application *must* run recovery and that will
+			 * generate the RCLOSE record.
+			 */
 		}
 
 		if (ret == 0)
@@ -824,7 +904,6 @@ __db_refresh(dbp, txn, flags, deferred_closep)
 	    ret == 0)
 		ret = t_ret;
 
-never_opened:
 	/*
 	 * Remove this DB handle from the DB_ENV's dblist, if it's been added.
 	 *
@@ -832,8 +911,8 @@ never_opened:
 	 * want to race with a thread searching for our underlying cache link
 	 * while opening a DB handle.
 	 */
-	MUTEX_THREAD_LOCK(dbenv, dbenv->dblist_mutexp);
-	if (dbp->dblistlinks.le_prev != NULL) {
+	MUTEX_LOCK(dbenv, dbenv->mtx_dblist);
+	if (!reuse && dbp->dblistlinks.le_prev != NULL) {
 		LIST_REMOVE(dbp, dblistlinks);
 		dbp->dblistlinks.le_prev = NULL;
 	}
@@ -845,9 +924,13 @@ never_opened:
 		    ret == 0)
 			ret = t_ret;
 		dbp->mpf = NULL;
+		if (reuse &&
+		    (t_ret = __memp_fcreate(dbenv, &dbp->mpf)) != 0 &&
+		    ret == 0)
+		    	ret = t_ret;
 	}
 
-	MUTEX_THREAD_UNLOCK(dbenv, dbenv->dblist_mutexp);
+	MUTEX_UNLOCK(dbenv, dbenv->mtx_dblist);
 
 	/*
 	 * Call the access specific close function.
@@ -883,7 +966,7 @@ never_opened:
 	 * access-method specific data.
 	 */
 
-	if (dbp->lid != DB_LOCK_INVALIDID) {
+	if (!reuse && dbp->lid != DB_LOCK_INVALIDID) {
 		/* We may have pending trade operations on this dbp. */
 		if (txn != NULL)
 			__txn_remlock(dbenv, txn, &dbp->handle_lock, dbp->lid);
@@ -901,20 +984,57 @@ never_opened:
 		LOCK_INIT(dbp->handle_lock);
 	}
 
-	/* Discard the locker ID allocated as the fileid. */
-	if (F_ISSET(dbp, DB_AM_INMEM) && LOCKING_ON(dbenv) &&
+	/*
+	 * If this is a temporary file (un-named in-memory file), then
+	 * discard the locker ID allocated as the fileid.
+	 */
+	if (LOCKING_ON(dbenv) &&
+	    F_ISSET(dbp, DB_AM_INMEM) && !dbp->preserve_fid &&
+	    *(u_int32_t *)dbp->fileid != DB_LOCK_INVALIDID &&
 	    (t_ret = __lock_id_free(dbenv, *(u_int32_t *)dbp->fileid)) != 0 &&
 	    ret == 0)
 		ret = t_ret;
 
+	if (reuse) {
+		/*
+		 * If we are reusing this dbp, then we're done now. Re-init
+		 * the handle, preserving important flags, and then return.
+		 * This code is borrowed from __db_init, which does more
+		 * than we can do here.
+		 */
+		save_flags = F_ISSET(dbp, DB_AM_INMEM | DB_AM_TXN);
+
+		/*
+		 * XXX If this is an XA handle, we'll want to specify 
+		 * DB_XA_CREATE.
+		 */
+		if ((ret = __bam_db_create(dbp)) != 0)
+			return (ret);
+		if ((ret = __ham_db_create(dbp)) != 0)
+			return (ret);
+		if ((ret = __qam_db_create(dbp)) != 0)
+			return (ret);
+
+		/* Restore flags */
+		dbp->flags = dbp->orig_flags | save_flags;
+
+		if (FLD_ISSET(save_flags, DB_AM_INMEM)) {
+			/*
+			 * If this is inmem, then it may have a fileid
+			 * even if it was never opened, and we need to
+			 * clear out that fileid.
+			 */
+			memset(dbp->fileid, 0, sizeof(dbp->fileid));
+			MAKE_INMEM(dbp);
+		}
+		return (ret);
+	}
+
 	dbp->type = DB_UNKNOWN;
 
 	/* Discard the thread mutex. */
-	if (dbp->mutexp != NULL) {
-		dbmp = dbenv->mp_handle;
-		__db_mutex_free(dbenv, dbmp->reginfo, dbp->mutexp);
-		dbp->mutexp = NULL;
-	}
+	if ((t_ret = __mutex_free(dbenv, &dbp->mutex)) != 0 && ret == 0)
+		ret = t_ret;
 
 	/* Discard any memory allocated for the file and database names. */
 	if (dbp->fname != NULL) {
@@ -1004,7 +1124,7 @@ __db_log_page(dbp, txn, lsn, pgno, page)
  * PUBLIC:     const char *, DB_TXN *, char **));
  */
 #undef	BACKUP_PREFIX
-#define	BACKUP_PREFIX	"__db."
+#define	BACKUP_PREFIX	"__db"
 
 #undef	MAX_LSN_TO_TEXT
 #define	MAX_LSN_TO_TEXT	17
@@ -1026,16 +1146,18 @@ __db_backup_name(dbenv, name, txn, backup)
 	 * we allocate enough space for it, even in the case where we don't
 	 * use the entire filename for the backup name.
 	 */
-	len = strlen(name) + strlen(BACKUP_PREFIX) + MAX_LSN_TO_TEXT;
+	len = strlen(name) + strlen(BACKUP_PREFIX) + 1 + MAX_LSN_TO_TEXT;
 	if ((ret = __os_malloc(dbenv, len, &retp)) != 0)
 		return (ret);
 
 	/*
-	 * Create the name.  Backup file names are in one of two forms:
+	 * Create the name.  Backup file names are in one of three forms:
 	 *
 	 *	In a transactional env: __db.LSN(8).LSN(8)
 	 * and
-	 *	in a non-transactional env: __db.FILENAME
+	 *	In VXWORKS (where we want 8.3 support)
+	 * and
+	 *	in any other non-transactional env: __db.FILENAME
 	 *
 	 * If the transaction doesn't have a current LSN, we write a dummy
 	 * log record to force it, so we ensure all tmp names are unique.
@@ -1051,14 +1173,37 @@ __db_backup_name(dbenv, name, txn, backup)
 	 *	4. multi-component path + transaction
 	 */
 	p = __db_rpath(name);
-	if (txn == NULL)
+	if (txn == NULL) {
+#ifdef HAVE_VXWORKS
+	    { int i, n;
+		/* On VxWorks we must support 8.3 names. */
 		if (p == NULL)				/* Case 1. */
-			snprintf(retp, len, "%s%s", BACKUP_PREFIX, name);
-		else					/* Case 3. */
-			snprintf(retp, len, "%.*s%s%s",
+			n = snprintf(retp,
+			    len, "%s%.4s.tmp", BACKUP_PREFIX, name);
+		else				/* Case 3. */
+			n = snprintf(retp, len, "%.*s%s%.4s.tmp",
 			    (int)(p - name) + 1, name, BACKUP_PREFIX, p + 1);
-	else {
-		if (IS_ZERO_LSN(txn->last_lsn)) {
+
+		/*
+		 * Overwrite "." in the characters copied from the name.
+		 * If we backup 8 characters from the end, we're guaranteed
+		 * to a) include the four bytes we copied from the name
+		 * and b) not run off the beginning of the string.
+		 */
+		for (i = 0, p = (retp + n) - 8; i < 4; p++, i++)
+			if (*p == '.')
+				*p = '_';
+	    }
+#else
+		if (p == NULL)				/* Case 1. */
+			snprintf(retp, len, "%s.%s", BACKUP_PREFIX, name);
+		else					/* Case 3. */
+			snprintf(retp, len, "%.*s%s.%s",
+			    (int)(p - name) + 1, name, BACKUP_PREFIX, p + 1);
+#endif
+	} else {
+		lsn = ((TXN_DETAIL *)txn->td)->last_lsn;
+		if (IS_ZERO_LSN(lsn)) {
 			/*
 			 * Write dummy log record.   The two choices for dummy
 			 * log records are __db_noop_log and __db_debug_log;
@@ -1071,12 +1216,11 @@ __db_backup_name(dbenv, name, txn, backup)
 				__os_free(dbenv, retp);
 				return (ret);
 			}
-		} else
-			lsn = txn->last_lsn;
+		}
 
 		if (p == NULL)				/* Case 2. */
 			snprintf(retp, len,
-			    "%s%x.%x", BACKUP_PREFIX, lsn.file, lsn.offset);
+			    "%s.%x.%x", BACKUP_PREFIX, lsn.file, lsn.offset);
 		else					/* Case 4. */
 			snprintf(retp, len, "%.*s%x.%x",
 			    (int)(p - name) + 1, name, lsn.file, lsn.offset);
@@ -1235,7 +1379,8 @@ __db_testdocopy(dbenv, name)
 	/*
 	 * Maximum size of file, including adding a ".afterop".
 	 */
-	len = strlen(real_name) + strlen(BACKUP_PREFIX) + MAX_LSN_TO_TEXT + 9;
+	len = strlen(real_name) +
+	    strlen(BACKUP_PREFIX) + 1 + MAX_LSN_TO_TEXT + 9;
 
 	if ((ret = __os_malloc(dbenv, len, ©)) != 0)
 		goto err;
@@ -1264,7 +1409,7 @@ __db_testdocopy(dbenv, name)
 	 * files named, say, 'a' and 'abc' we won't match 'abc' when
 	 * looking for 'a'.
 	 */
-	snprintf(backup, len, "%s%s.0x", BACKUP_PREFIX, name);
+	snprintf(backup, len, "%s.%s.0x", BACKUP_PREFIX, name);
 
 	/*
 	 * We need the directory path to do the __os_dirlist.
@@ -1343,10 +1488,10 @@ __db_makecopy(dbenv, src, dest)
 		return;
 
 	if (__os_open(dbenv,
-	    src, DB_OSO_RDONLY, __db_omode("rw----"), &rfhp) != 0)
+	    src, DB_OSO_RDONLY, __db_omode(OWNER_RW), &rfhp) != 0)
 		goto err;
 	if (__os_open(dbenv, dest,
-	    DB_OSO_CREATE | DB_OSO_TRUNC, __db_omode("rw----"), &wfhp) != 0)
+	    DB_OSO_CREATE | DB_OSO_TRUNC, __db_omode(OWNER_RW), &wfhp) != 0)
 		goto err;
 
 	for (;;)
diff --git a/storage/bdb/db/db.src b/storage/bdb/db/db.src
index 2cac31a4f52..21fe754a3f7 100644
--- a/storage/bdb/db/db.src
+++ b/storage/bdb/db/db.src
@@ -1,10 +1,10 @@
 /*-
  * See the file LICENSE for redistribution information.
  *
- * Copyright (c) 1996-2004
+ * Copyright (c) 1996-2005
  *	Sleepycat Software.  All rights reserved.
  *
- * $Id: db.src,v 11.28 2004/06/17 17:35:15 bostic Exp $
+ * $Id: db.src,v 12.2 2005/08/08 03:37:06 ubell Exp $
  */
 
 PREFIX	__db
@@ -236,3 +236,24 @@ ARG	pgno		db_pgno_t	lu
 PGDBT	header		DBT		s
 PGDBT	data		DBT		s
 END
+
+/*
+ * pg_sort: sort the free list
+ *
+ * meta:	meta page number
+ * meta_lsn:	lsn on meta page.
+ * last_free:	page number of new last free page.
+ * last_lsn;	lsn of last free page.
+ * last_pgno:	current last page number.
+ * list:	list of pages and lsns to sort.
+ */
+BEGIN pg_sort		61
+DB	fileid		int32_t		ld
+ARG	meta		db_pgno_t	lu
+POINTER	meta_lsn	DB_LSN *	lu
+ARG	last_free	db_pgno_t	lu
+POINTER	last_lsn	DB_LSN *	lu
+ARG	last_pgno	db_pgno_t	lu
+DBT	list		DBT		s
+END
+
diff --git a/storage/bdb/db/db_am.c b/storage/bdb/db/db_am.c
index 0e4a864f95a..966f5f07123 100644
--- a/storage/bdb/db/db_am.c
+++ b/storage/bdb/db/db_am.c
@@ -1,10 +1,10 @@
 /*-
  * See the file LICENSE for redistribution information.
  *
- * Copyright (c) 1998-2004
+ * Copyright (c) 1998-2005
  *	Sleepycat Software.  All rights reserved.
  *
- * $Id: db_am.c,v 11.120 2004/10/07 17:33:32 sue Exp $
+ * $Id: db_am.c,v 12.12 2005/11/01 00:44:09 bostic Exp $
  */
 
 #include "db_config.h"
@@ -48,7 +48,9 @@ __db_cursor_int(dbp, txn, dbtype, root, is_opd, lockerid, dbcp)
 	DBC *dbc;
 	DBC_INTERNAL *cp;
 	DB_ENV *dbenv;
+	db_threadid_t tid;
 	int allocated, ret;
+	pid_t pid;
 
 	dbenv = dbp->dbenv;
 	allocated = 0;
@@ -61,7 +63,7 @@ __db_cursor_int(dbp, txn, dbtype, root, is_opd, lockerid, dbcp)
 	 * right type.  With off page dups we may have different kinds
 	 * of cursors on the queue for a single database.
 	 */
-	MUTEX_THREAD_LOCK(dbenv, dbp->mutexp);
+	MUTEX_LOCK(dbenv, dbp->mutex);
 	for (dbc = TAILQ_FIRST(&dbp->free_queue);
 	    dbc != NULL; dbc = TAILQ_NEXT(dbc, links))
 		if (dbtype == dbc->dbtype) {
@@ -69,7 +71,7 @@ __db_cursor_int(dbp, txn, dbtype, root, is_opd, lockerid, dbcp)
 			F_CLR(dbc, ~DBC_OWN_LID);
 			break;
 		}
-	MUTEX_THREAD_UNLOCK(dbenv, dbp->mutexp);
+	MUTEX_UNLOCK(dbenv, dbp->mutex);
 
 	if (dbc == NULL) {
 		if ((ret = __os_calloc(dbenv, 1, sizeof(DBC), &dbc)) != 0)
@@ -92,13 +94,14 @@ __db_cursor_int(dbp, txn, dbtype, root, is_opd, lockerid, dbcp)
 			 * environment handles.
 			 */
 			if (!DB_IS_THREADED(dbp)) {
-				if (dbp->dbenv->env_lid == DB_LOCK_INVALIDID &&
-				    (ret =
-				    __lock_id(dbenv,&dbp->dbenv->env_lid)) != 0)
+				if (dbp->dbenv->env_lref == NULL &&
+				    (ret = __lock_id(dbenv, NULL,
+				    (DB_LOCKER **)&dbp->dbenv->env_lref)) != 0)
 					goto err;
-				dbc->lid = dbp->dbenv->env_lid;
+				dbc->lref = dbp->dbenv->env_lref;
 			} else {
-				if ((ret = __lock_id(dbenv, &dbc->lid)) != 0)
+				if ((ret = __lock_id(dbenv, NULL,
+				    (DB_LOCKER **)&dbc->lref)) != 0)
 					goto err;
 				F_SET(dbc, DBC_OWN_LID);
 			}
@@ -175,7 +178,9 @@ __db_cursor_int(dbp, txn, dbtype, root, is_opd, lockerid, dbcp)
 	dbc->dbtype = dbtype;
 	RESET_RET_MEM(dbc);
 
-	if ((dbc->txn = txn) == NULL) {
+	if ((dbc->txn = txn) != NULL)
+		dbc->locker = txn->txnid;
+	else if (LOCKING_ON(dbenv)) {
 		/*
 		 * There are certain cases in which we want to create a
 		 * new cursor with a particular locker ID that is known
@@ -193,16 +198,25 @@ __db_cursor_int(dbp, txn, dbtype, root, is_opd, lockerid, dbcp)
 		 * primary are subdatabases or we're using env-wide locking,
 		 * this is disastrous.
 		 *
-		 * In these cases, our caller will pass a nonzero locker ID
-		 * into this function.  Use this locker ID instead of dbc->lid
-		 * as the locker ID for our new cursor.
+		 * In these cases, our caller will pass a nonzero locker
+		 * ID into this function.  Use this locker ID instead of
+		 * the default as the locker ID for our new cursor.
 		 */
 		if (lockerid != DB_LOCK_INVALIDID)
 			dbc->locker = lockerid;
-		else
-			dbc->locker = dbc->lid;
-	} else
-		dbc->locker = txn->txnid;
+		else {
+			/*
+			 * If we are threaded then we need to set the
+			 * proper thread id into the locker.
+			 */
+			if (DB_IS_THREADED(dbp)) {
+				dbenv->thread_id(dbenv, &pid, &tid);
+				__lock_set_thread_id(
+				    (DB_LOCKER *)dbc->lref, pid, tid);
+			}
+			dbc->locker = ((DB_LOCKER *)dbc->lref)->id;
+		}
+	}
 
 	/*
 	 * These fields change when we are used as a secondary index, so
@@ -253,10 +267,10 @@ __db_cursor_int(dbp, txn, dbtype, root, is_opd, lockerid, dbcp)
 	if (txn != NULL)
 		++txn->cursors;
 
-	MUTEX_THREAD_LOCK(dbenv, dbp->mutexp);
+	MUTEX_LOCK(dbenv, dbp->mutex);
 	TAILQ_INSERT_TAIL(&dbp->active_queue, dbc, links);
 	F_SET(dbc, DBC_ACTIVE);
-	MUTEX_THREAD_UNLOCK(dbenv, dbp->mutexp);
+	MUTEX_UNLOCK(dbenv, dbp->mutex);
 
 	*dbcp = dbc;
 	return (0);
@@ -445,38 +459,60 @@ __db_del(dbp, txn, key, flags)
 		f_next |= DB_RMW;
 	}
 
-	/* Walk through the set of key/data pairs, deleting as we go. */
-	if ((ret = __db_c_get(dbc, key, &data, f_init)) != 0)
-		goto err;
-
 	/*
-	 * Hash permits an optimization in DB->del:  since on-page
-	 * duplicates are stored in a single HKEYDATA structure, it's
-	 * possible to delete an entire set of them at once, and as
-	 * the HKEYDATA has to be rebuilt and re-put each time it
-	 * changes, this is much faster than deleting the duplicates
-	 * one by one.  Thus, if we're not pointing at an off-page
-	 * duplicate set, and we're not using secondary indices (in
-	 * which case we'd have to examine the items one by one anyway),
-	 * let hash do this "quick delete".
+	 * Optimize the simple cases.  For all AMs if we don't have secondaries
+	 * and are not a secondary and there are no dups then we can avoid a
+	 * bunch of overhead.  For queue we don't need to fetch the record since
+	 * we delete by direct calculation from the record number.
+	 *
+	 * Hash permits an optimization in DB->del: since on-page duplicates are
+	 * stored in a single HKEYDATA structure, it's possible to delete an
+	 * entire set of them at once, and as the HKEYDATA has to be rebuilt
+	 * and re-put each time it changes, this is much faster than deleting
+	 * the duplicates one by one.  Thus, if not pointing at an off-page
+	 * duplicate set, and we're not using secondary indices (in which case
+	 * we'd have to examine the items one by one anyway), let hash do this
+	 * "quick delete".
 	 *
 	 * !!!
 	 * Note that this is the only application-executed delete call in
 	 * Berkeley DB that does not go through the __db_c_del function.
 	 * If anything other than the delete itself (like a secondary index
 	 * update) has to happen there in a particular situation, the
-	 * conditions here should be modified not to call __ham_quick_delete.
-	 * The ordinary AM-independent alternative will work just fine with
-	 * a hash;  it'll just be slower.
+	 * conditions here should be modified not to use these optimizations.
+	 * The ordinary AM-independent alternative will work just fine;
+	 * it'll just be slower.
 	 */
-	if (dbp->type == DB_HASH)
-		if (LIST_FIRST(&dbp->s_secondaries) == NULL &&
-		    !F_ISSET(dbp, DB_AM_SECONDARY) &&
-		    dbc->internal->opd == NULL) {
+	if (!F_ISSET(dbp, DB_AM_SECONDARY) &&
+	    LIST_FIRST(&dbp->s_secondaries) == NULL) {
+
+#ifdef HAVE_QUEUE
+		if (dbp->type == DB_QUEUE) {
+			ret = __qam_delete(dbc, key);
+			goto done;
+		}
+#endif
+
+		/* Fetch the first record. */
+		if ((ret = __db_c_get(dbc, key, &data, f_init)) != 0)
+			goto err;
+
+#ifdef HAVE_HASH
+		if (dbp->type == DB_HASH && dbc->internal->opd == NULL) {
 			ret = __ham_quick_delete(dbc);
 			goto done;
 		}
+#endif
 
+		if ((dbp->type == DB_BTREE || dbp->type == DB_RECNO) &&
+		    !F_ISSET(dbp, DB_AM_DUP)) {
+			ret = dbc->c_am_del(dbc);
+			goto done;
+		}
+	} else if ((ret = __db_c_get(dbc, key, &data, f_init)) != 0)
+		goto err;
+
+	/* Walk through the set of key/data pairs, deleting as we go. */
 	for (;;) {
 		if ((ret = __db_c_del(dbc, 0)) != 0)
 			break;
@@ -554,21 +590,11 @@ __db_associate(dbp, txn, sdbp, callback, flags)
 	pdbc = sdbc = NULL;
 	ret = 0;
 
-	sdbp->s_callback = callback;
-	sdbp->s_primary = dbp;
-
-	sdbp->stored_get = sdbp->get;
-	sdbp->get = __db_secondary_get;
-
-	sdbp->stored_close = sdbp->close;
-	sdbp->close = __db_secondary_close_pp;
-
-	F_SET(sdbp, DB_AM_SECONDARY);
-
 	/*
-	 * Check to see if the secondary is empty--and thus if we should
-	 * build it--before we link it in and risk making it show up in
-	 * other threads.
+	 * Check to see if the secondary is empty -- and thus if we should
+	 * build it -- before we link it in and risk making it show up in other
+	 * threads.  Do this first so that the databases remain unassociated on
+	 * error.
 	 */
 	build = 0;
 	if (LF_ISSET(DB_CREATE)) {
@@ -590,10 +616,6 @@ __db_associate(dbp, txn, sdbp, callback, flags)
 			ret = 0;
 		}
 
-		/*
-		 * Secondary cursors have special refcounting close
-		 * methods.  Be careful.
-		 */
 		if ((t_ret = __db_c_close(sdbc)) != 0 && ret == 0)
 			ret = t_ret;
 
@@ -604,18 +626,35 @@ __db_associate(dbp, txn, sdbp, callback, flags)
 			goto err;
 	}
 
+	/*
+	 * Set up the database handle as a secondary.
+	 */
+	sdbp->s_callback = callback;
+	sdbp->s_primary = dbp;
+
+	sdbp->stored_get = sdbp->get;
+	sdbp->get = __db_secondary_get;
+
+	sdbp->stored_close = sdbp->close;
+	sdbp->close = __db_secondary_close_pp;
+
+	F_SET(sdbp, DB_AM_SECONDARY);
+
+	if (LF_ISSET(DB_IMMUTABLE_KEY))
+		FLD_SET(sdbp->s_assoc_flags, DB_ASSOC_IMMUTABLE_KEY);
+
 	/*
 	 * Add the secondary to the list on the primary.  Do it here
 	 * so that we see any updates that occur while we're walking
 	 * the primary.
 	 */
-	MUTEX_THREAD_LOCK(dbenv, dbp->mutexp);
+	MUTEX_LOCK(dbenv, dbp->mutex);
 
 	/* See __db_s_next for an explanation of secondary refcounting. */
 	DB_ASSERT(sdbp->s_refcnt == 0);
 	sdbp->s_refcnt = 1;
 	LIST_INSERT_HEAD(&dbp->s_secondaries, sdbp, s_links);
-	MUTEX_THREAD_UNLOCK(dbenv, dbp->mutexp);
+	MUTEX_UNLOCK(dbenv, dbp->mutex);
 
 	if (build) {
 		/*
@@ -650,11 +689,13 @@ __db_associate(dbp, txn, sdbp, callback, flags)
 					continue;
 				goto err;
 			}
+			SWAP_IF_NEEDED(dbp, sdbp, &key);
 			if ((ret = __db_c_put(sdbc,
 			    &skey, &key, DB_UPDATE_SECONDARY)) != 0) {
 				FREE_IF_NEEDED(sdbp, &skey);
 				goto err;
 			}
+			SWAP_IF_NEEDED(dbp, sdbp, &key);
 
 			FREE_IF_NEEDED(sdbp, &skey);
 		}
@@ -709,7 +750,7 @@ __db_secondary_close(sdbp, flags)
 	doclose = 0;
 	primary = sdbp->s_primary;
 
-	MUTEX_THREAD_LOCK(primary->dbenv, primary->mutexp);
+	MUTEX_LOCK(primary->dbenv, primary->mutex);
 	/*
 	 * Check the refcount--if it was at 1 when we were called, no
 	 * thread is currently updating this secondary through the primary,
@@ -725,7 +766,7 @@ __db_secondary_close(sdbp, flags)
 		/* We don't want to call close while the mutex is held. */
 		doclose = 1;
 	}
-	MUTEX_THREAD_UNLOCK(primary->dbenv, primary->mutexp);
+	MUTEX_UNLOCK(primary->dbenv, primary->mutex);
 
 	/*
 	 * sdbp->close is this function;  call the real one explicitly if
@@ -793,14 +834,14 @@ __db_append_primary(dbc, key, data)
 	 * just that section into a common function, but the basic
 	 * overview is the same here.
 	 */
-	for (sdbp = __db_s_first(dbp);
-	    sdbp != NULL && ret == 0; ret = __db_s_next(&sdbp)) {
+	if ((ret = __db_s_first(dbp, &sdbp)) != 0)
+		goto err;
+	for (; sdbp != NULL && ret == 0; ret = __db_s_next(&sdbp)) {
 		memset(&skey, 0, sizeof(DBT));
 		if ((ret = sdbp->s_callback(sdbp, key, data, &skey)) != 0) {
 			if (ret == DB_DONOTINDEX)
 				continue;
-			else
-				goto err;
+			goto err;
 		}
 
 		if ((ret = __db_cursor_int(sdbp, dbc->txn, sdbp->type,
@@ -851,7 +892,6 @@ err1:		FREE_IF_NEEDED(sdbp, &skey);
 
 		if ((t_ret = __db_c_close(sdbc)) != 0 && ret == 0)
 			ret = t_ret;
-
 		if (ret != 0)
 			goto err;
 	}
diff --git a/storage/bdb/db/db_cam.c b/storage/bdb/db/db_cam.c
index 075765072d7..f7b93ad36b1 100644
--- a/storage/bdb/db/db_cam.c
+++ b/storage/bdb/db/db_cam.c
@@ -1,10 +1,10 @@
 /*-
  * See the file LICENSE for redistribution information.
  *
- * Copyright (c) 2000-2004
+ * Copyright (c) 2000-2005
  *	Sleepycat Software.  All rights reserved.
  *
- * $Id: db_cam.c,v 11.156 2004/09/28 18:07:32 ubell Exp $
+ * $Id: db_cam.c,v 12.21 2005/10/07 20:21:22 ubell Exp $
  */
 
 #include "db_config.h"
@@ -52,6 +52,7 @@ static int __db_wrlock_err __P((DB_ENV *));
 	if (F_ISSET(dbc, DBC_WRITECURSOR))				\
 		(void)__lock_downgrade(					\
 		    (dbp)->dbenv, &(dbc)->mylock, DB_LOCK_IWRITE, 0);
+
 /*
  * __db_c_close --
  *	DBC->c_close.
@@ -85,16 +86,18 @@ __db_c_close(dbc)
 	 * access specific cursor close routine, btree depends on having that
 	 * order of operations.
 	 */
-	MUTEX_THREAD_LOCK(dbenv, dbp->mutexp);
+	MUTEX_LOCK(dbenv, dbp->mutex);
 
 	if (opd != NULL) {
+		DB_ASSERT(F_ISSET(opd, DBC_ACTIVE));
 		F_CLR(opd, DBC_ACTIVE);
 		TAILQ_REMOVE(&dbp->active_queue, opd, links);
 	}
+	DB_ASSERT(F_ISSET(dbc, DBC_ACTIVE));
 	F_CLR(dbc, DBC_ACTIVE);
 	TAILQ_REMOVE(&dbp->active_queue, dbc, links);
 
-	MUTEX_THREAD_UNLOCK(dbenv, dbp->mutexp);
+	MUTEX_UNLOCK(dbenv, dbp->mutex);
 
 	/* Call the access specific cursor close routine. */
 	if ((t_ret =
@@ -125,7 +128,7 @@ __db_c_close(dbc)
 		dbc->txn->cursors--;
 
 	/* Move the cursor(s) to the free queue. */
-	MUTEX_THREAD_LOCK(dbenv, dbp->mutexp);
+	MUTEX_LOCK(dbenv, dbp->mutex);
 	if (opd != NULL) {
 		if (dbc->txn != NULL)
 			dbc->txn->cursors--;
@@ -133,7 +136,7 @@ __db_c_close(dbc)
 		opd = NULL;
 	}
 	TAILQ_INSERT_TAIL(&dbp->free_queue, dbc, links);
-	MUTEX_THREAD_UNLOCK(dbenv, dbp->mutexp);
+	MUTEX_UNLOCK(dbenv, dbp->mutex);
 
 	return (ret);
 }
@@ -156,9 +159,9 @@ __db_c_destroy(dbc)
 	dbenv = dbp->dbenv;
 
 	/* Remove the cursor from the free queue. */
-	MUTEX_THREAD_LOCK(dbenv, dbp->mutexp);
+	MUTEX_LOCK(dbenv, dbp->mutex);
 	TAILQ_REMOVE(&dbp->free_queue, dbc, links);
-	MUTEX_THREAD_UNLOCK(dbenv, dbp->mutexp);
+	MUTEX_UNLOCK(dbenv, dbp->mutex);
 
 	/* Free up allocated memory. */
 	if (dbc->my_rskey.data != NULL)
@@ -176,7 +179,8 @@ __db_c_destroy(dbc)
 	 */
 	if (LOCKING_ON(dbenv) &&
 	    F_ISSET(dbc, DBC_OWN_LID) &&
-	    (t_ret = __lock_id_free(dbenv, dbc->lid)) != 0 && ret == 0)
+	    (t_ret = __lock_id_free(dbenv,
+	    ((DB_LOCKER *)dbc->lref)->id)) != 0 && ret == 0)
 		ret = t_ret;
 
 	__os_free(dbenv, dbc);
@@ -298,7 +302,7 @@ __db_c_del(dbc, flags)
 	 * to explicitly downgrade this lock.  The closed cursor
 	 * may only have had a read lock.
 	 */
-	if (F_ISSET(dbc->dbp, DB_AM_DIRTY) &&
+	if (F_ISSET(dbc->dbp, DB_AM_READ_UNCOMMITTED) &&
 	    dbc->internal->lock_mode == DB_LOCK_WRITE) {
 		if ((t_ret =
 		    __TLPUT(dbc, dbc->internal->lock)) != 0 && ret == 0)
@@ -413,8 +417,8 @@ __db_c_idup(dbc_orig, dbcp, flags)
 	}
 
 	/* Copy the locking flags to the new cursor. */
-	F_SET(dbc_n,
-	    F_ISSET(dbc_orig, DBC_WRITECURSOR | DBC_DIRTY_READ | DBC_DEGREE_2));
+	F_SET(dbc_n, F_ISSET(dbc_orig,
+	    DBC_READ_COMMITTED | DBC_READ_UNCOMMITTED | DBC_WRITECURSOR));
 
 	/*
 	 * If we're in CDB and this isn't an offpage dup cursor, then
@@ -504,9 +508,13 @@ __db_c_get(dbc_arg, key, data, flags)
 	DBC_INTERNAL *cp, *cp_n;
 	DB_MPOOLFILE *mpf;
 	db_pgno_t pgno;
-	u_int32_t multi, tmp_dirty, tmp_flags, tmp_rmw;
+	u_int32_t multi, orig_ulen, tmp_flags, tmp_read_uncommitted, tmp_rmw;
 	u_int8_t type;
-	int ret, t_ret;
+	int key_small, ret, t_ret;
+
+	COMPQUIET(orig_ulen, 0);
+
+	key_small = 0;
 
 	/*
 	 * Cursor Cleanup Note:
@@ -526,8 +534,10 @@ __db_c_get(dbc_arg, key, data, flags)
 	tmp_rmw = LF_ISSET(DB_RMW);
 	LF_CLR(DB_RMW);
 
-	tmp_dirty = LF_ISSET(DB_DIRTY_READ);
-	LF_CLR(DB_DIRTY_READ);
+	tmp_read_uncommitted =
+	    LF_ISSET(DB_READ_UNCOMMITTED) &&
+	    !F_ISSET(dbc_arg, DBC_READ_UNCOMMITTED);
+	LF_CLR(DB_READ_UNCOMMITTED);
 
 	multi = LF_ISSET(DB_MULTIPLE|DB_MULTIPLE_KEY);
 	LF_CLR(DB_MULTIPLE|DB_MULTIPLE_KEY);
@@ -539,13 +549,13 @@ __db_c_get(dbc_arg, key, data, flags)
 	if (flags == DB_GET_RECNO) {
 		if (tmp_rmw)
 			F_SET(dbc_arg, DBC_RMW);
-		if (tmp_dirty)
-			F_SET(dbc_arg, DBC_DIRTY_READ);
+		if (tmp_read_uncommitted)
+			F_SET(dbc_arg, DBC_READ_UNCOMMITTED);
 		ret = __bam_c_rget(dbc_arg, data);
 		if (tmp_rmw)
 			F_CLR(dbc_arg, DBC_RMW);
-		if (tmp_dirty)
-			F_CLR(dbc_arg, DBC_DIRTY_READ);
+		if (tmp_read_uncommitted)
+			F_CLR(dbc_arg, DBC_READ_UNCOMMITTED);
 		return (ret);
 	}
 
@@ -613,8 +623,8 @@ __db_c_get(dbc_arg, key, data, flags)
 		break;
 	}
 
-	if (tmp_dirty)
-		F_SET(dbc_arg, DBC_DIRTY_READ);
+	if (tmp_read_uncommitted)
+		F_SET(dbc_arg, DBC_READ_UNCOMMITTED);
 
 	/*
 	 * If this cursor is going to be closed immediately, we don't
@@ -624,8 +634,8 @@ __db_c_get(dbc_arg, key, data, flags)
 		dbc_n = dbc_arg;
 	else {
 		ret = __db_c_idup(dbc_arg, &dbc_n, tmp_flags);
-		if (tmp_dirty)
-			F_CLR(dbc_arg, DBC_DIRTY_READ);
+		if (tmp_read_uncommitted)
+			F_CLR(dbc_arg, DBC_READ_UNCOMMITTED);
 
 		if (ret != 0)
 			goto err;
@@ -654,8 +664,8 @@ __db_c_get(dbc_arg, key, data, flags)
 	ret = dbc_n->c_am_get(dbc_n, key, data, flags, &pgno);
 	if (tmp_rmw)
 		F_CLR(dbc_n, DBC_RMW);
-	if (tmp_dirty)
-		F_CLR(dbc_arg, DBC_DIRTY_READ);
+	if (tmp_read_uncommitted)
+		F_CLR(dbc_arg, DBC_READ_UNCOMMITTED);
 	F_CLR(dbc_n, DBC_MULTIPLE|DBC_MULTIPLE_KEY);
 	if (ret != 0)
 		goto err;
@@ -721,8 +731,22 @@ done:	/*
 			goto err;
 
 		if ((ret = __db_ret(dbp, cp_n->page, cp_n->indx,
-		    key, &dbc_arg->rkey->data, &dbc_arg->rkey->ulen)) != 0)
-			goto err;
+		    key, &dbc_arg->rkey->data, &dbc_arg->rkey->ulen)) != 0) {
+			/*
+			 * If the key DBT is too small, we still want to return
+			 * the size of the data.  Otherwise applications are
+			 * forced to check each one with a separate call.  We
+			 * don't want to copy the data, so we set the ulen to
+			 * zero before calling __db_ret.
+			 */
+			if (ret == DB_BUFFER_SMALL &&
+			    F_ISSET(data, DB_DBT_USERMEM)) {
+				key_small = 1;
+				orig_ulen = data->ulen;
+				data->ulen = 0;
+			} else
+				goto err;
+		}
 	}
 	if (multi != 0) {
 		/*
@@ -793,11 +817,11 @@ err:	/* Don't pass DB_DBT_ISSET back to application level, error or no. */
 		 * about the referencing page or cursor we need
 		 * to peek at the OPD cursor and get the lock here.
 		 */
-		if (F_ISSET(dbc_arg->dbp, DB_AM_DIRTY) &&
+		if (F_ISSET(dbc_arg->dbp, DB_AM_READ_UNCOMMITTED) &&
 		     F_ISSET((BTREE_CURSOR *)
 		     dbc_arg->internal->opd->internal, C_DELETED))
 			if ((t_ret =
-			    dbc_arg->c_am_writelock(dbc_arg)) != 0 && ret != 0)
+			    dbc_arg->c_am_writelock(dbc_arg)) != 0 && ret == 0)
 				ret = t_ret;
 		if ((t_ret = __db_c_cleanup(
 		    dbc_arg->internal->opd, opd, ret)) != 0 && ret == 0)
@@ -808,6 +832,12 @@ err:	/* Don't pass DB_DBT_ISSET back to application level, error or no. */
 	if ((t_ret = __db_c_cleanup(dbc_arg, dbc_n, ret)) != 0 && ret == 0)
 		ret = t_ret;
 
+	if (key_small) {
+		data->ulen = orig_ulen;
+		if (ret == 0)
+			ret = DB_BUFFER_SMALL;
+	}
+
 	if (flags == DB_CONSUME || flags == DB_CONSUME_WAIT)
 		CDB_LOCKING_DONE(dbp, dbc_arg);
 	return (ret);
@@ -1011,8 +1041,10 @@ __db_c_put(dbc_arg, key, data, flags)
 		}
 
 		/*
-		 * Now build the new datum from olddata and the partial
-		 * data we were given.
+		 * Now build the new datum from olddata and the partial data we
+		 * were given.  It's okay to do this if no record was returned
+		 * above: a partial put on an empty record is allowed, if a
+		 * little strange.  The data is zero-padded.
 		 */
 		if ((ret =
 		    __db_buildpartial(dbp, &olddata, data, &newdata)) != 0)
@@ -1071,8 +1103,18 @@ __db_c_put(dbc_arg, key, data, flags)
 	 * Note that __db_s_first and __db_s_next will take care of
 	 * thread-locking and refcounting issues.
 	 */
-	for (sdbp = __db_s_first(dbp);
-	    sdbp != NULL && ret == 0; ret = __db_s_next(&sdbp)) {
+	if ((ret = __db_s_first(dbp, &sdbp)) != 0)
+		goto err;
+	for (; sdbp != NULL && ret == 0; ret = __db_s_next(&sdbp)) {
+		/*
+		 * Don't process this secondary if the key is immutable and we
+		 * know that the old record exists.  This optimization can't be
+		 * used if we have not checked for the old record yet.
+		 */
+		if (have_oldrec && !nodel &&
+		    FLD_ISSET(sdbp->s_assoc_flags, DB_ASSOC_IMMUTABLE_KEY))
+			continue;
+
 		/*
 		 * Call the callback for this secondary, to get the
 		 * appropriate secondary key.
@@ -1088,8 +1130,7 @@ __db_c_put(dbc_arg, key, data, flags)
 				 * any necessary deletes in step 5.
 				 */
 				continue;
-			else
-				goto err;
+			goto err;
 		}
 
 		/*
@@ -1118,6 +1159,14 @@ __db_c_put(dbc_arg, key, data, flags)
 			F_SET(sdbc, DBC_WRITER);
 		}
 
+		/*
+		 * Swap the primary key to the byte order of this secondary, if
+		 * necessary.  By doing this now, we can compare directly
+		 * against the data already in the secondary without having to
+		 * swap it after reading.
+		 */
+		SWAP_IF_NEEDED(dbp, sdbp, &pkey);
+
 		/*
 		 * There are three cases here--
 		 * 1) The secondary supports sorted duplicates.
@@ -1161,9 +1210,9 @@ __db_c_put(dbc_arg, key, data, flags)
 			    "Put results in a non-unique secondary key in an ",
 			    "index not configured to support duplicates");
 					ret = EINVAL;
-					goto skipput;
 				}
-			} else if (ret != DB_NOTFOUND && ret != DB_KEYEMPTY)
+			}
+			if (ret != DB_NOTFOUND && ret != DB_KEYEMPTY)
 				goto skipput;
 		} else if (!F_ISSET(sdbp, DB_AM_DUPSORT)) {
 			/* Case 2. */
@@ -1191,6 +1240,9 @@ __db_c_put(dbc_arg, key, data, flags)
 
 skipput:	FREE_IF_NEEDED(sdbp, &skey)
 
+		/* Make sure the primary key is back in native byte-order. */
+		SWAP_IF_NEEDED(dbp, sdbp, &pkey);
+
 		if ((t_ret = __db_c_close(sdbc)) != 0 && ret == 0)
 			ret = t_ret;
 
@@ -1227,8 +1279,17 @@ skipput:	FREE_IF_NEEDED(sdbp, &skey)
 	if (nodel)
 		goto skip_s_update;
 
-	for (sdbp = __db_s_first(dbp);
-	    sdbp != NULL && ret == 0; ret = __db_s_next(&sdbp)) {
+	if ((ret = __db_s_first(dbp, &sdbp)) != 0)
+		goto err;
+	for (; sdbp != NULL && ret == 0; ret = __db_s_next(&sdbp)) {
+		/*
+		 * Don't process this secondary if the key is immutable.  We
+		 * know that the old record exists, so this optimization can
+		 * always be used.
+		 */
+		if (FLD_ISSET(sdbp->s_assoc_flags, DB_ASSOC_IMMUTABLE_KEY))
+			continue;
+
 		/*
 		 * Call the callback for this secondary to get the
 		 * old secondary key.
@@ -1243,8 +1304,7 @@ skipput:	FREE_IF_NEEDED(sdbp, &skey)
 				 * secondary.
 				 */
 				continue;
-			else
-				goto err;
+			goto err;
 		}
 		memset(&skey, 0, sizeof(DBT));
 		if ((ret = sdbp->s_callback(sdbp,
@@ -1280,6 +1340,7 @@ skipput:	FREE_IF_NEEDED(sdbp, &skey)
 			memset(&tempskey, 0, sizeof(DBT));
 			tempskey.data = oldskey.data;
 			tempskey.size = oldskey.size;
+			SWAP_IF_NEEDED(dbp, sdbp, &pkey);
 			memset(&temppkey, 0, sizeof(DBT));
 			temppkey.data = pkey.data;
 			temppkey.size = pkey.size;
@@ -1288,6 +1349,7 @@ skipput:	FREE_IF_NEEDED(sdbp, &skey)
 				ret = __db_c_del(sdbc, DB_UPDATE_SECONDARY);
 			else if (ret == DB_NOTFOUND)
 				ret = __db_secondary_corrupt(dbp);
+			SWAP_IF_NEEDED(dbp, sdbp, &pkey);
 		}
 
 		FREE_IF_NEEDED(sdbp, &skey);
@@ -1328,9 +1390,8 @@ skip_s_update:
 			goto err;
 		}
 
-		if ((ret = dbc_arg->c_am_writelock(dbc_arg)) != 0)
-			return (ret);
-		if ((ret = __db_c_dup(dbc_arg, &dbc_n, DB_POSITION)) != 0)
+		if ((ret = dbc_arg->c_am_writelock(dbc_arg)) != 0 ||
+		    (ret = __db_c_dup(dbc_arg, &dbc_n, DB_POSITION)) != 0)
 			goto err;
 		opd = dbc_n->internal->opd;
 		if ((ret = opd->c_am_put(
@@ -1530,7 +1591,7 @@ __db_c_cleanup(dbc, dbc_n, failed)
 	 * to explicitly downgrade this lock.  The closed cursor
 	 * may only have had a read lock.
 	 */
-	if (F_ISSET(dbp, DB_AM_DIRTY) &&
+	if (F_ISSET(dbp, DB_AM_READ_UNCOMMITTED) &&
 	    dbc->internal->lock_mode == DB_LOCK_WRITE) {
 		if ((t_ret =
 		    __TLPUT(dbc, dbc->internal->lock)) != 0 && ret == 0)
@@ -1573,13 +1634,14 @@ __db_c_pget(dbc, skey, pkey, data, flags)
 	u_int32_t flags;
 {
 	DB *pdbp, *sdbp;
-	DBC *pdbc;
-	DBT *save_rdata, nullpkey;
-	u_int32_t save_pkey_flags;
+	DBC *dbc_n, *pdbc;
+	DBT nullpkey;
+	u_int32_t save_pkey_flags, tmp_flags, tmp_read_uncommitted, tmp_rmw;
 	int pkeymalloc, ret, t_ret;
 
 	sdbp = dbc->dbp;
 	pdbp = sdbp->s_primary;
+	dbc_n = NULL;
 	pkeymalloc = t_ret = 0;
 
 	/*
@@ -1599,13 +1661,32 @@ __db_c_pget(dbc, skey, pkey, data, flags)
 		pkey = &nullpkey;
 	}
 
+	/* Clear OR'd in additional bits so we can check for flag equality. */
+	tmp_rmw = LF_ISSET(DB_RMW);
+	LF_CLR(DB_RMW);
+
+	tmp_read_uncommitted =
+	    LF_ISSET(DB_READ_UNCOMMITTED) &&
+	    !F_ISSET(dbc, DBC_READ_UNCOMMITTED);
+	LF_CLR(DB_READ_UNCOMMITTED);
+
 	/*
 	 * DB_GET_RECNO is a special case, because we're interested not in
 	 * the primary key/data pair, but rather in the primary's record
 	 * number.
 	 */
-	if ((flags & DB_OPFLAGS_MASK) == DB_GET_RECNO)
-		return (__db_c_pget_recno(dbc, pkey, data, flags));
+	if (flags == DB_GET_RECNO) {
+		if (tmp_rmw)
+			F_SET(dbc, DBC_RMW);
+		if (tmp_read_uncommitted)
+			F_SET(dbc, DBC_READ_UNCOMMITTED);
+		ret = __db_c_pget_recno(dbc, pkey, data, flags);
+		if (tmp_rmw)
+			F_CLR(dbc, DBC_RMW);
+		if (tmp_read_uncommitted)
+			F_CLR(dbc, DBC_READ_UNCOMMITTED);
+		return (ret);
+	}
 
 	/*
 	 * If the DBTs we've been passed don't have any of the
@@ -1623,28 +1704,23 @@ __db_c_pget(dbc, skey, pkey, data, flags)
 	 * the rkey/rdata from the *secondary* cursor.
 	 *
 	 * We accomplish all this by passing in the DBTs we started out
-	 * with to the c_get, but having swapped the contents of rskey and
-	 * rkey, respectively, into rkey and rdata;  __db_ret will treat
-	 * them like the normal key/data pair in a c_get call, and will
-	 * realloc them as need be (this is "step 1").  Then, for "step 2",
-	 * we swap back rskey/rkey/rdata to normal, and do a get on the primary
-	 * with the secondary dbc appointed as the owner of the returned-data
-	 * memory.
+	 * with to the c_get, but swapping the contents of rskey and rkey,
+	 * respectively, into rkey and rdata;  __db_ret will treat them like
+	 * the normal key/data pair in a c_get call, and will realloc them as
+	 * need be (this is "step 1").  Then, for "step 2", we swap back
+	 * rskey/rkey/rdata to normal, and do a get on the primary with the
+	 * secondary dbc appointed as the owner of the returned-data memory.
 	 *
 	 * Note that in step 2, we copy the flags field in case we need to
 	 * pass down a DB_DBT_PARTIAL or other flag that is compatible with
 	 * letting DB do the memory management.
 	 */
-	/* Step 1. */
-	save_rdata = dbc->rdata;
-	dbc->rdata = dbc->rkey;
-	dbc->rkey = dbc->rskey;
 
 	/*
-	 * It is correct, though slightly sick, to attempt a partial get
-	 * of a primary key.  However, if we do so here, we'll never find the
-	 * primary record;  clear the DB_DBT_PARTIAL field of pkey just
-	 * for the duration of the next call.
+	 * It is correct, though slightly sick, to attempt a partial get of a
+	 * primary key.  However, if we do so here, we'll never find the
+	 * primary record;  clear the DB_DBT_PARTIAL field of pkey just for the
+	 * duration of the next call.
 	 */
 	save_pkey_flags = pkey->flags;
 	F_CLR(pkey, DB_DBT_PARTIAL);
@@ -1653,67 +1729,114 @@ __db_c_pget(dbc, skey, pkey, data, flags)
 	 * Now we can go ahead with the meat of this call.  First, get the
 	 * primary key from the secondary index.  (What exactly we get depends
 	 * on the flags, but the underlying cursor get will take care of the
-	 * dirty work.)
+	 * dirty work.)  Duplicate the cursor, in case the later get on the
+	 * primary fails.
 	 */
-	if ((ret = __db_c_get(dbc, skey, pkey, flags)) != 0) {
-		/* Restore rskey/rkey/rdata and return. */
-		pkey->flags = save_pkey_flags;
-		dbc->rskey = dbc->rkey;
-		dbc->rkey = dbc->rdata;
-		dbc->rdata = save_rdata;
-		goto err;
+	switch (flags) {
+	case DB_CURRENT:
+	case DB_GET_BOTHC:
+	case DB_NEXT:
+	case DB_NEXT_DUP:
+	case DB_NEXT_NODUP:
+	case DB_PREV:
+	case DB_PREV_NODUP:
+		tmp_flags = DB_POSITION;
+		break;
+	default:
+		tmp_flags = 0;
+		break;
 	}
 
+	if (tmp_read_uncommitted)
+		F_SET(dbc, DBC_READ_UNCOMMITTED);
+
+	if ((ret = __db_c_dup(dbc, &dbc_n, tmp_flags)) != 0) {
+		if (tmp_read_uncommitted)
+			F_CLR(dbc, DBC_READ_UNCOMMITTED);
+
+		return (ret);
+	}
+
+	F_SET(dbc_n, DBC_TRANSIENT);
+
+	if (tmp_rmw)
+		F_SET(dbc_n, DBC_RMW);
+
+	/*
+	 * If we've been handed a primary key, it will be in native byte order,
+	 * so we need to swap it before reading from the secondary.
+	 */
+	if (flags == DB_GET_BOTH || flags == DB_GET_BOTHC ||
+	    flags == DB_GET_BOTH_RANGE)
+		SWAP_IF_NEEDED(pdbp, sdbp, pkey);
+
+	/* Step 1. */
+	dbc_n->rdata = dbc->rkey;
+	dbc_n->rkey = dbc->rskey;
+	ret = __db_c_get(dbc_n, skey, pkey, flags);
 	/* Restore pkey's flags in case we stomped the PARTIAL flag. */
 	pkey->flags = save_pkey_flags;
 
-	/*
-	 * Restore the cursor's rskey, rkey, and rdata DBTs.  If DB
-	 * is handling the memory management, we now have newly
-	 * reallocated buffers and ulens in rkey and rdata which we want
-	 * to put in rskey and rkey.  save_rdata contains the old value
-	 * of dbc->rdata.
-	 */
-	dbc->rskey = dbc->rkey;
-	dbc->rkey = dbc->rdata;
-	dbc->rdata = save_rdata;
+	if (tmp_read_uncommitted)
+		F_CLR(dbc_n, DBC_READ_UNCOMMITTED);
+	if (tmp_rmw)
+		F_CLR(dbc_n, DBC_RMW);
 
 	/*
-	 * Now we're ready for "step 2".  If either or both of pkey and
-	 * data do not have memory management flags set--that is, if DB is
-	 * managing their memory--we need to swap around the rkey/rdata
-	 * structures so that we don't wind up trying to use memory managed
-	 * by the primary database cursor, which we'll close before we return.
+	 * We need to swap the primary key to native byte order if we read it
+	 * successfully, or if we swapped it on entry above.  We can't return
+	 * with the application's data modified.
+	 */
+	if (ret == 0 || flags == DB_GET_BOTH || flags == DB_GET_BOTHC ||
+	    flags == DB_GET_BOTH_RANGE)
+		SWAP_IF_NEEDED(pdbp, sdbp, pkey);
+
+	if (ret != 0)
+		goto err;
+
+	/*
+	 * Now we're ready for "step 2".  If either or both of pkey and data do
+	 * not have memory management flags set--that is, if DB is managing
+	 * their memory--we need to swap around the rkey/rdata structures so
+	 * that we don't wind up trying to use memory managed by the primary
+	 * database cursor, which we'll close before we return.
 	 *
 	 * !!!
-	 * If you're carefully following the bouncing ball, you'll note
-	 * that in the DB-managed case, the buffer hanging off of pkey is
-	 * the same as dbc->rkey->data.  This is just fine;  we may well
-	 * realloc and stomp on it when we return, if we're going a
-	 * DB_GET_BOTH and need to return a different partial or key
-	 * (depending on the comparison function), but this is safe.
+	 * If you're carefully following the bouncing ball, you'll note that in
+	 * the DB-managed case, the buffer hanging off of pkey is the same as
+	 * dbc->rkey->data.  This is just fine;  we may well realloc and stomp
+	 * on it when we return, if we're doing a DB_GET_BOTH and need to
+	 * return a different partial or key (depending on the comparison
+	 * function), but this is safe.
 	 *
 	 * !!!
 	 * We need to use __db_cursor_int here rather than simply calling
-	 * pdbp->cursor, because otherwise, if we're in CDB, we'll
-	 * allocate a new locker ID and leave ourselves open to deadlocks.
-	 * (Even though we're only acquiring read locks, we'll still block
-	 * if there are any waiters.)
+	 * pdbp->cursor, because otherwise, if we're in CDB, we'll allocate a
+	 * new locker ID and leave ourselves open to deadlocks.  (Even though
+	 * we're only acquiring read locks, we'll still block if there are any
+	 * waiters.)
 	 */
 	if ((ret = __db_cursor_int(pdbp,
 	    dbc->txn, pdbp->type, PGNO_INVALID, 0, dbc->locker, &pdbc)) != 0)
 		goto err;
 
+	if (tmp_read_uncommitted)
+		F_SET(pdbc, DBC_READ_UNCOMMITTED);
+	if (tmp_rmw)
+		F_SET(pdbc, DBC_RMW);
+	if (F_ISSET(dbc, DBC_READ_COMMITTED))
+		F_SET(pdbc, DBC_READ_COMMITTED);
+
 	/*
-	 * We're about to use pkey a second time.  If DB_DBT_MALLOC
-	 * is set on it, we'll leak the memory we allocated the first time.
-	 * Thus, set DB_DBT_REALLOC instead so that we reuse that memory
-	 * instead of leaking it.
+	 * We're about to use pkey a second time.  If DB_DBT_MALLOC is set on
+	 * it, we'll leak the memory we allocated the first time.  Thus, set
+	 * DB_DBT_REALLOC instead so that we reuse that memory instead of
+	 * leaking it.
 	 *
 	 * !!!
-	 * This assumes that the user must always specify a compatible
-	 * realloc function if a malloc function is specified.  I think
-	 * this is a reasonable requirement.
+	 * This assumes that the user must always specify a compatible realloc
+	 * function if a malloc function is specified.  I think this is a
+	 * reasonable requirement.
 	 */
 	if (F_ISSET(pkey, DB_DBT_MALLOC)) {
 		F_CLR(pkey, DB_DBT_MALLOC);
@@ -1722,38 +1845,41 @@ __db_c_pget(dbc, skey, pkey, data, flags)
 	}
 
 	/*
-	 * Do the actual get.  Set DBC_TRANSIENT since we don't care
-	 * about preserving the position on error, and it's faster.
-	 * SET_RET_MEM so that the secondary DBC owns any returned-data
-	 * memory.
+	 * Do the actual get.  Set DBC_TRANSIENT since we don't care about
+	 * preserving the position on error, and it's faster.  SET_RET_MEM so
+	 * that the secondary DBC owns any returned-data memory.
 	 */
 	F_SET(pdbc, DBC_TRANSIENT);
 	SET_RET_MEM(pdbc, dbc);
 	ret = __db_c_get(pdbc, pkey, data, DB_SET);
 
 	/*
-	 * If the item wasn't found in the primary, this is a bug;
-	 * our secondary has somehow gotten corrupted, and contains
-	 * elements that don't correspond to anything in the primary.
-	 * Complain.
+	 * If the item wasn't found in the primary, this is a bug; our
+	 * secondary has somehow gotten corrupted, and contains elements that
+	 * don't correspond to anything in the primary.  Complain.
 	 */
 	if (ret == DB_NOTFOUND)
 		ret = __db_secondary_corrupt(pdbp);
 
 	/* Now close the primary cursor. */
-	t_ret = __db_c_close(pdbc);
+	if ((t_ret = __db_c_close(pdbc)) != 0 && ret == 0)
+		ret = t_ret;
 
-err:	if (pkeymalloc) {
+err:	/* Cleanup and cursor resolution. */
+	if ((t_ret = __db_c_cleanup(dbc, dbc_n, ret)) != 0 && ret == 0)
+		ret = t_ret;
+	if (pkeymalloc) {
 		/*
-		 * If pkey had a MALLOC flag, we need to restore it;
-		 * otherwise, if the user frees the buffer but reuses
-		 * the DBT without NULL'ing its data field or changing
-		 * the flags, we may drop core.
+		 * If pkey had a MALLOC flag, we need to restore it; otherwise,
+		 * if the user frees the buffer but reuses the DBT without
+		 * NULL'ing its data field or changing the flags, we may drop
+		 * core.
 		 */
 		F_CLR(pkey, DB_DBT_REALLOC);
 		F_SET(pkey, DB_DBT_MALLOC);
 	}
-	return (t_ret == 0 ? ret : t_ret);
+
+	return (ret);
 }
 
 /*
@@ -1880,6 +2006,7 @@ __db_c_del_secondary(dbc)
 
 	memset(&skey, 0, sizeof(DBT));
 	memset(&pkey, 0, sizeof(DBT));
+	pdbp = dbc->dbp->s_primary;
 
 	/*
 	 * Get the current item that we're pointing at.
@@ -1890,6 +2017,8 @@ __db_c_del_secondary(dbc)
 	if ((ret = __db_c_get(dbc, &skey, &pkey, DB_CURRENT)) != 0)
 		return (ret);
 
+	SWAP_IF_NEEDED(pdbp, dbc->dbp, &pkey);
+
 	/*
 	 * Create a cursor on the primary with our locker ID,
 	 * so that when it calls back, we don't conflict.
@@ -1900,7 +2029,6 @@ __db_c_del_secondary(dbc)
 	 * interface.  This shouldn't be any less efficient
 	 * anyway.
 	 */
-	pdbp = dbc->dbp->s_primary;
 	if ((ret = __db_cursor_int(pdbp, dbc->txn,
 	    pdbp->type, PGNO_INVALID, 0, dbc->locker, &pdbc)) != 0)
 		return (ret);
@@ -1968,8 +2096,9 @@ __db_c_del_primary(dbc)
 	if ((ret = __db_c_get(dbc, &pkey, &data, DB_CURRENT)) != 0)
 		return (ret);
 
-	for (sdbp = __db_s_first(dbp);
-	    sdbp != NULL && ret == 0; ret = __db_s_next(&sdbp)) {
+	if ((ret = __db_s_first(dbp, &sdbp)) != 0)
+		goto err;
+	for (; sdbp != NULL && ret == 0; ret = __db_s_next(&sdbp)) {
 		/*
 		 * Get the secondary key for this secondary and the current
 		 * item.
@@ -1985,13 +2114,13 @@ __db_c_del_primary(dbc)
 
 			/* We had a substantive error.  Bail. */
 			FREE_IF_NEEDED(sdbp, &skey);
-			goto done;
+			goto err;
 		}
 
 		/* Open a secondary cursor. */
 		if ((ret = __db_cursor_int(sdbp, dbc->txn, sdbp->type,
 		    PGNO_INVALID, 0, dbc->locker, &sdbc)) != 0)
-			goto done;
+			goto err;
 		/* See comment above and in __db_c_put. */
 		if (CDB_LOCKING(sdbp->dbenv)) {
 			DB_ASSERT(sdbc->mylock.off == LOCK_INVALID);
@@ -2014,6 +2143,7 @@ __db_c_del_primary(dbc)
 		memset(&tempskey, 0, sizeof(DBT));
 		tempskey.data = skey.data;
 		tempskey.size = skey.size;
+		SWAP_IF_NEEDED(dbp, sdbp, &pkey);
 		memset(&temppkey, 0, sizeof(DBT));
 		temppkey.data = pkey.data;
 		temppkey.size = pkey.size;
@@ -2022,16 +2152,17 @@ __db_c_del_primary(dbc)
 			ret = __db_c_del(sdbc, DB_UPDATE_SECONDARY);
 		else if (ret == DB_NOTFOUND)
 			ret = __db_secondary_corrupt(dbp);
+		SWAP_IF_NEEDED(dbp, sdbp, &pkey);
 
 		FREE_IF_NEEDED(sdbp, &skey);
 
 		if ((t_ret = __db_c_close(sdbc)) != 0 && ret == 0)
 			ret = t_ret;
 		if (ret != 0)
-			goto done;
+			goto err;
 	}
 
-done:	if (sdbp != NULL && (t_ret = __db_s_done(sdbp)) != 0 && ret == 0)
+err:	if (sdbp != NULL && (t_ret = __db_s_done(sdbp)) != 0 && ret == 0)
 		ret = t_ret;
 	return (ret);
 }
@@ -2040,23 +2171,25 @@ done:	if (sdbp != NULL && (t_ret = __db_s_done(sdbp)) != 0 && ret == 0)
  * __db_s_first --
  *	Get the first secondary, if any are present, from the primary.
  *
- * PUBLIC: DB *__db_s_first __P((DB *));
+ * PUBLIC: int __db_s_first __P((DB *, DB **));
  */
-DB *
-__db_s_first(pdbp)
-	DB *pdbp;
+int
+__db_s_first(pdbp, sdbpp)
+	DB *pdbp, **sdbpp;
 {
 	DB *sdbp;
 
-	MUTEX_THREAD_LOCK(pdbp->dbenv, pdbp->mutexp);
+	MUTEX_LOCK(pdbp->dbenv, pdbp->mutex);
 	sdbp = LIST_FIRST(&pdbp->s_secondaries);
 
 	/* See __db_s_next. */
 	if (sdbp != NULL)
 		sdbp->s_refcnt++;
-	MUTEX_THREAD_UNLOCK(pdbp->dbenv, pdbp->mutexp);
+	MUTEX_UNLOCK(pdbp->dbenv, pdbp->mutex);
 
-	return (sdbp);
+	*sdbpp = sdbp;
+
+	return (0);
 }
 
 /*
@@ -2099,7 +2232,7 @@ __db_s_next(sdbpp)
 	pdbp = sdbp->s_primary;
 	closeme = NULL;
 
-	MUTEX_THREAD_LOCK(pdbp->dbenv, pdbp->mutexp);
+	MUTEX_LOCK(pdbp->dbenv, pdbp->mutex);
 	DB_ASSERT(sdbp->s_refcnt != 0);
 	if (--sdbp->s_refcnt == 0) {
 		LIST_REMOVE(sdbp, s_links);
@@ -2108,7 +2241,7 @@ __db_s_next(sdbpp)
 	sdbp = LIST_NEXT(sdbp, s_links);
 	if (sdbp != NULL)
 		sdbp->s_refcnt++;
-	MUTEX_THREAD_UNLOCK(pdbp->dbenv, pdbp->mutexp);
+	MUTEX_UNLOCK(pdbp->dbenv, pdbp->mutex);
 
 	*sdbpp = sdbp;
 
@@ -2136,13 +2269,13 @@ __db_s_done(sdbp)
 	pdbp = sdbp->s_primary;
 	doclose = 0;
 
-	MUTEX_THREAD_LOCK(pdbp->dbenv, pdbp->mutexp);
+	MUTEX_LOCK(pdbp->dbenv, pdbp->mutex);
 	DB_ASSERT(sdbp->s_refcnt != 0);
 	if (--sdbp->s_refcnt == 0) {
 		LIST_REMOVE(sdbp, s_links);
 		doclose = 1;
 	}
-	MUTEX_THREAD_UNLOCK(pdbp->dbenv, pdbp->mutexp);
+	MUTEX_UNLOCK(pdbp->dbenv, pdbp->mutex);
 
 	return (doclose ? __db_close(sdbp, NULL, 0) : 0);
 }
diff --git a/storage/bdb/db/db_conv.c b/storage/bdb/db/db_conv.c
index b4c5c9a29ec..53f4e638d5c 100644
--- a/storage/bdb/db/db_conv.c
+++ b/storage/bdb/db/db_conv.c
@@ -1,7 +1,7 @@
 /*-
  * See the file LICENSE for redistribution information.
  *
- * Copyright (c) 1996-2004
+ * Copyright (c) 1996-2005
  *	Sleepycat Software.  All rights reserved.
  */
 /*
@@ -36,7 +36,7 @@
  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
  * SUCH DAMAGE.
  *
- * $Id: db_conv.c,v 11.45 2004/01/28 03:35:57 bostic Exp $
+ * $Id: db_conv.c,v 12.1 2005/06/16 20:21:09 bostic Exp $
  */
 
 #include "db_config.h"
diff --git a/storage/bdb/db/db_dispatch.c b/storage/bdb/db/db_dispatch.c
index 2317f500a71..3c56b556219 100644
--- a/storage/bdb/db/db_dispatch.c
+++ b/storage/bdb/db/db_dispatch.c
@@ -1,7 +1,7 @@
 /*-
  * See the file LICENSE for redistribution information.
  *
- * Copyright (c) 1996-2004
+ * Copyright (c) 1996-2005
  *	Sleepycat Software.  All rights reserved.
  */
 /*
@@ -35,7 +35,7 @@
  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
  * SUCH DAMAGE.
  *
- * $Id: db_dispatch.c,v 11.169 2004/10/27 16:44:26 ubell Exp $
+ * $Id: db_dispatch.c,v 12.12 2005/11/10 21:11:42 bostic Exp $
  */
 
 #include "db_config.h"
@@ -43,22 +43,26 @@
 #ifndef NO_SYSTEM_INCLUDES
 #include 
 
-#include 
 #include 
 #endif
 
 #include "db_int.h"
 #include "dbinc/db_page.h"
+#ifndef HAVE_FTRUNCATE
 #include "dbinc/db_shash.h"
+#endif
 #include "dbinc/hash.h"
-#include "dbinc/log.h"
+#ifndef HAVE_FTRUNCATE
+#include "dbinc/lock.h"
 #include "dbinc/mp.h"
+#endif
+#include "dbinc/log.h"
 #include "dbinc/fop.h"
 #include "dbinc/txn.h"
 
 #ifndef HAVE_FTRUNCATE
-static int __db_limbo_fix __P((DB *,
-    DB_TXN *, DB_TXNLIST *, db_pgno_t *, DBMETA *, db_limbo_state));
+static int __db_limbo_fix __P((DB *, DB_TXN *,
+		DB_TXNLIST *, db_pgno_t *, DBMETA *, db_limbo_state));
 static int __db_limbo_bucket __P((DB_ENV *,
 	     DB_TXN *, DB_TXNLIST *, db_limbo_state));
 static int __db_limbo_move __P((DB_ENV *, DB_TXN *, DB_TXN *, DB_TXNLIST *));
@@ -66,11 +70,11 @@ static int __db_limbo_prepare __P(( DB *, DB_TXN *, DB_TXNLIST *));
 static int __db_lock_move __P((DB_ENV *,
 		u_int8_t *, db_pgno_t, db_lockmode_t, DB_TXN *, DB_TXN *));
 static int __db_txnlist_pgnoadd __P((DB_ENV *, DB_TXNHEAD *,
-		int32_t, u_int8_t [DB_FILE_ID_LEN], char *, db_pgno_t));
+		int32_t, u_int8_t *, char *, db_pgno_t));
 #endif
-static int __db_txnlist_find_internal __P((DB_ENV *,
-    void *, db_txnlist_type, u_int32_t, u_int8_t[DB_FILE_ID_LEN],
-    DB_TXNLIST **, int, u_int32_t *));
+static int __db_txnlist_find_internal __P((DB_ENV *, DB_TXNHEAD *,
+		db_txnlist_type, u_int32_t, u_int8_t *, DB_TXNLIST **,
+		int, u_int32_t *));
 
 /*
  * __db_dispatch --
@@ -83,7 +87,7 @@ static int __db_txnlist_find_internal __P((DB_ENV *,
  *
  * PUBLIC: int __db_dispatch __P((DB_ENV *,
  * PUBLIC:     int (**)__P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *)),
- * PUBLIC:     size_t, DBT *, DB_LSN *, db_recops, void *));
+ * PUBLIC:     size_t, DBT *, DB_LSN *, db_recops, DB_TXNHEAD *));
  */
 int
 __db_dispatch(dbenv, dtab, dtabsize, db, lsnp, redo, info)
@@ -93,7 +97,7 @@ __db_dispatch(dbenv, dtab, dtabsize, db, lsnp, redo, info)
 	DBT *db;		/* The log record upon which to dispatch. */
 	DB_LSN *lsnp;		/* The lsn of the record being dispatched. */
 	db_recops redo;		/* Redo this op (or undo it). */
-	void *info;
+	DB_TXNHEAD *info;	/* Transaction list. */
 {
 	DB_LSN prev_lsn;
 	u_int32_t rectype, status, txnid;
@@ -235,7 +239,9 @@ __db_dispatch(dbenv, dtab, dtabsize, db, lsnp, redo, info)
 			break;
 
 		default:
-			if (txnid != 0) {
+			if (txnid == 0)
+				status = 0;
+			else {
 				ret = __db_txnlist_find(dbenv,
 				    info, txnid, &status);
 
@@ -364,14 +370,14 @@ __db_add_recovery(dbenv, dtab, dtabsize, func, ndx)
  *	Initialize transaction linked list.
  *
  * PUBLIC: int __db_txnlist_init __P((DB_ENV *,
- * PUBLIC:     u_int32_t, u_int32_t, DB_LSN *, void *));
+ * PUBLIC:     u_int32_t, u_int32_t, DB_LSN *, DB_TXNHEAD **));
  */
 int
 __db_txnlist_init(dbenv, low_txn, hi_txn, trunc_lsn, retp)
 	DB_ENV *dbenv;
 	u_int32_t low_txn, hi_txn;
 	DB_LSN *trunc_lsn;
-	void *retp;
+	DB_TXNHEAD **retp;
 {
 	DB_TXNHEAD *headp;
 	u_int32_t size, tmp;
@@ -427,7 +433,7 @@ __db_txnlist_init(dbenv, low_txn, hi_txn, trunc_lsn, retp)
 	}
 	ZERO_LSN(headp->ckplsn);
 
-	*(void **)retp = headp;
+	*retp = headp;
 	return (0);
 }
 
@@ -436,23 +442,21 @@ __db_txnlist_init(dbenv, low_txn, hi_txn, trunc_lsn, retp)
  *	Add an element to our transaction linked list.
  *
  * PUBLIC: int __db_txnlist_add __P((DB_ENV *,
- * PUBLIC:     void *, u_int32_t, u_int32_t, DB_LSN *));
+ * PUBLIC:     DB_TXNHEAD *, u_int32_t, u_int32_t, DB_LSN *));
  */
 int
-__db_txnlist_add(dbenv, listp, txnid, status, lsn)
+__db_txnlist_add(dbenv, hp, txnid, status, lsn)
 	DB_ENV *dbenv;
-	void *listp;
+	DB_TXNHEAD *hp;
 	u_int32_t txnid, status;
 	DB_LSN *lsn;
 {
-	DB_TXNHEAD *hp;
 	DB_TXNLIST *elp;
 	int ret;
 
 	if ((ret = __os_malloc(dbenv, sizeof(DB_TXNLIST), &elp)) != 0)
 		return (ret);
 
-	hp = (DB_TXNHEAD *)listp;
 	LIST_INSERT_HEAD(&hp->head[DB_TXNLIST_MASK(hp, txnid)], elp, links);
 
 	elp->type = TXNLIST_TXNID;
@@ -474,19 +478,19 @@ __db_txnlist_add(dbenv, listp, txnid, status, lsn)
  * __db_txnlist_remove --
  *	Remove an element from our transaction linked list.
  *
- * PUBLIC: int __db_txnlist_remove __P((DB_ENV *, void *, u_int32_t));
+ * PUBLIC: int __db_txnlist_remove __P((DB_ENV *, DB_TXNHEAD *, u_int32_t));
  */
 int
-__db_txnlist_remove(dbenv, listp, txnid)
+__db_txnlist_remove(dbenv, hp, txnid)
 	DB_ENV *dbenv;
-	void *listp;
+	DB_TXNHEAD *hp;
 	u_int32_t txnid;
 {
 	DB_TXNLIST *entry;
 	u_int32_t status;
 
 	return (__db_txnlist_find_internal(dbenv,
-	    listp, TXNLIST_TXNID, txnid, NULL, &entry, 1, &status));
+	    hp, TXNLIST_TXNID, txnid, NULL, &entry, 1, &status));
 }
 
 /*
@@ -497,20 +501,17 @@ __db_txnlist_remove(dbenv, listp, txnid)
  * recovery, we are going to virtually truncate the log and we need
  * to retain the last checkpoint before the truncation point.
  *
- * PUBLIC: void __db_txnlist_ckp __P((DB_ENV *, void *, DB_LSN *));
+ * PUBLIC: void __db_txnlist_ckp __P((DB_ENV *, DB_TXNHEAD *, DB_LSN *));
  */
 void
-__db_txnlist_ckp(dbenv, listp, ckp_lsn)
+__db_txnlist_ckp(dbenv, hp, ckp_lsn)
 	DB_ENV *dbenv;
-	void *listp;
+	DB_TXNHEAD *hp;
 	DB_LSN *ckp_lsn;
 {
-	DB_TXNHEAD *hp;
 
 	COMPQUIET(dbenv, NULL);
 
-	hp = (DB_TXNHEAD *)listp;
-
 	if (IS_ZERO_LSN(hp->ckplsn) && !IS_ZERO_LSN(hp->maxlsn) &&
 	    log_compare(&hp->maxlsn, ckp_lsn) >= 0)
 		hp->ckplsn = *ckp_lsn;
@@ -520,26 +521,24 @@ __db_txnlist_ckp(dbenv, listp, ckp_lsn)
  * __db_txnlist_end --
  *	Discard transaction linked list.
  *
- * PUBLIC: void __db_txnlist_end __P((DB_ENV *, void *));
+ * PUBLIC: void __db_txnlist_end __P((DB_ENV *, DB_TXNHEAD *));
  */
 void
-__db_txnlist_end(dbenv, listp)
+__db_txnlist_end(dbenv, hp)
 	DB_ENV *dbenv;
-	void *listp;
-{
 	DB_TXNHEAD *hp;
-	DB_TXNLIST *p;
+{
 	u_int32_t i;
+	DB_TXNLIST *p;
 
-	if ((hp = (DB_TXNHEAD *)listp) == NULL)
+	if (hp == NULL)
 		return;
 
 	for (i = 0; i < hp->nslots; i++)
 		while (hp != NULL && (p = LIST_FIRST(&hp->head[i])) != NULL) {
-			LIST_REMOVE(p, links);
 			switch (p->type) {
 			case TXNLIST_LSN:
-				__os_free(dbenv, p->u.l.lsn_array);
+				__os_free(dbenv, p->u.l.lsn_stack);
 				break;
 			case TXNLIST_DELETE:
 			case TXNLIST_PGNO:
@@ -551,12 +550,13 @@ __db_txnlist_end(dbenv, listp)
 				 */
 				break;
 			}
+			LIST_REMOVE(p, links);
 			__os_free(dbenv, p);
 		}
 
 	if (hp->gen_array != NULL)
 		__os_free(dbenv, hp->gen_array);
-	__os_free(dbenv, listp);
+	__os_free(dbenv, hp);
 }
 
 /*
@@ -568,12 +568,12 @@ __db_txnlist_end(dbenv, listp)
  *	was generated while not in a transaction.
  *
  * PUBLIC: int __db_txnlist_find __P((DB_ENV *,
- * PUBLIC:     void *, u_int32_t, u_int32_t *));
+ * PUBLIC:     DB_TXNHEAD *, u_int32_t, u_int32_t *));
  */
 int
-__db_txnlist_find(dbenv, listp, txnid, statusp)
+__db_txnlist_find(dbenv, hp, txnid, statusp)
 	DB_ENV *dbenv;
-	void *listp;
+	DB_TXNHEAD *hp;
 	u_int32_t txnid, *statusp;
 {
 	DB_TXNLIST *entry;
@@ -581,7 +581,7 @@ __db_txnlist_find(dbenv, listp, txnid, statusp)
 	if (txnid == 0)
 		return (DB_NOTFOUND);
 
-	return (__db_txnlist_find_internal(dbenv, listp,
+	return (__db_txnlist_find_internal(dbenv, hp,
 	    TXNLIST_TXNID, txnid, NULL, &entry, 0, statusp));
 }
 
@@ -590,32 +590,30 @@ __db_txnlist_find(dbenv, listp, txnid, statusp)
  *	Change the status of an existing transaction entry.
  *	Returns DB_NOTFOUND if no such entry exists.
  *
- * PUBLIC: int __db_txnlist_update __P((DB_ENV *,
- * PUBLIC:     void *, u_int32_t, u_int32_t, DB_LSN *, u_int32_t *, int));
+ * PUBLIC: int __db_txnlist_update __P((DB_ENV *, DB_TXNHEAD *,
+ * PUBLIC:     u_int32_t, u_int32_t, DB_LSN *, u_int32_t *, int));
  */
 int
-__db_txnlist_update(dbenv, listp, txnid, status, lsn, ret_status, add_ok)
+__db_txnlist_update(dbenv, hp, txnid, status, lsn, ret_status, add_ok)
 	DB_ENV *dbenv;
-	void *listp;
+	DB_TXNHEAD *hp;
 	u_int32_t txnid, status;
 	DB_LSN *lsn;
 	u_int32_t *ret_status;
 	int add_ok;
 {
-	DB_TXNHEAD *hp;
 	DB_TXNLIST *elp;
 	int ret;
 
 	if (txnid == 0)
 		return (DB_NOTFOUND);
 
-	hp = (DB_TXNHEAD *)listp;
 	ret = __db_txnlist_find_internal(dbenv,
-	    listp, TXNLIST_TXNID, txnid, NULL, &elp, 0, ret_status);
+	    hp, TXNLIST_TXNID, txnid, NULL, &elp, 0, ret_status);
 
 	if (ret == DB_NOTFOUND && add_ok) {
 		*ret_status = status;
-		return (__db_txnlist_add(dbenv, listp, txnid, status, lsn));
+		return (__db_txnlist_add(dbenv, hp, txnid, status, lsn));
 	}
 	if (ret != 0)
 		return (ret);
@@ -640,9 +638,9 @@ __db_txnlist_update(dbenv, listp, txnid, status, lsn, ret_status, add_ok)
  */
 static int
 __db_txnlist_find_internal(dbenv,
-    listp, type, txnid, uid, txnlistp, delete, statusp)
+    hp, type, txnid, uid, txnlistp, delete, statusp)
 	DB_ENV *dbenv;
-	void *listp;
+	DB_TXNHEAD *hp;
 	db_txnlist_type type;
 	u_int32_t  txnid;
 	u_int8_t uid[DB_FILE_ID_LEN];
@@ -651,14 +649,13 @@ __db_txnlist_find_internal(dbenv,
 	u_int32_t *statusp;
 {
 	struct __db_headlink *head;
-	DB_TXNHEAD *hp;
 	DB_TXNLIST *p;
 	u_int32_t generation, hash, i;
 	int ret;
 
 	ret = 0;
 
-	if ((hp = (DB_TXNHEAD *)listp) == NULL)
+	if (hp == NULL)
 		return (DB_NOTFOUND);
 
 	switch (type) {
@@ -712,12 +709,14 @@ __db_txnlist_find_internal(dbenv,
 		if (delete == 1) {
 			LIST_REMOVE(p, links);
 			__os_free(dbenv, p);
+			*txnlistp = NULL;
 		} else if (p != LIST_FIRST(head)) {
 			/* Move it to head of list. */
 			LIST_REMOVE(p, links);
 			LIST_INSERT_HEAD(head, p, links);
-		}
-		*txnlistp = p;
+			*txnlistp = p;
+		} else
+			*txnlistp = p;
 		return (ret);
 	}
 
@@ -729,16 +728,15 @@ __db_txnlist_find_internal(dbenv,
  *	Change the current generation number.
  *
  * PUBLIC: int __db_txnlist_gen __P((DB_ENV *,
- * PUBLIC:       void *, int, u_int32_t, u_int32_t));
+ * PUBLIC:       DB_TXNHEAD *, int, u_int32_t, u_int32_t));
  */
 int
-__db_txnlist_gen(dbenv, listp, incr, min, max)
+__db_txnlist_gen(dbenv, hp, incr, min, max)
 	DB_ENV *dbenv;
-	void *listp;
+	DB_TXNHEAD *hp;
 	int incr;
 	u_int32_t min, max;
 {
-	DB_TXNHEAD *hp;
 	int ret;
 
 	/*
@@ -753,7 +751,6 @@ __db_txnlist_gen(dbenv, listp, incr, min, max)
 	 * is given the generation number of the first range it falls into
 	 * in the stack.
 	 */
-	hp = (DB_TXNHEAD *)listp;
 	if (incr < 0) {
 		--hp->generation;
 		memmove(hp->gen_array, &hp->gen_array[1],
@@ -775,71 +772,78 @@ __db_txnlist_gen(dbenv, listp, incr, min, max)
 	return (0);
 }
 
-#define	TXN_BUBBLE(AP, MAX) {						\
-	DB_LSN __tmp;							\
-	u_int32_t __j;							\
-									\
-	for (__j = 0; __j < MAX - 1; __j++)				\
-		if (log_compare(&AP[__j], &AP[__j + 1]) < 0) {		\
-			__tmp = AP[__j];				\
-			AP[__j] = AP[__j + 1];				\
-			AP[__j + 1] = __tmp;				\
-		}							\
-}
-
 /*
  * __db_txnlist_lsnadd --
- *	Add to or re-sort the transaction list lsn entry.  Note that since this
- *	is used during an abort, the __txn_undo code calls into the "recovery"
- *	subsystem explicitly, and there is only a single TXNLIST_LSN entry on
- *	the list.
+ *	Save the prev_lsn from a txn_child record.
  *
- * PUBLIC: int __db_txnlist_lsnadd __P((DB_ENV *, void *, DB_LSN *, u_int32_t));
+ * PUBLIC: int __db_txnlist_lsnadd __P((DB_ENV *, DB_TXNHEAD *, DB_LSN *));
  */
 int
-__db_txnlist_lsnadd(dbenv, listp, lsnp, flags)
+__db_txnlist_lsnadd(dbenv, hp, lsnp)
 	DB_ENV *dbenv;
-	void *listp;
-	DB_LSN *lsnp;
-	u_int32_t flags;
-{
 	DB_TXNHEAD *hp;
+	DB_LSN *lsnp;
+{
 	DB_TXNLIST *elp;
-	u_int32_t i;
 	int ret;
 
-	hp = (DB_TXNHEAD *)listp;
+	if (IS_ZERO_LSN(*lsnp))
+		return (0);
 
 	for (elp = LIST_FIRST(&hp->head[0]);
 	    elp != NULL; elp = LIST_NEXT(elp, links))
 		if (elp->type == TXNLIST_LSN)
 			break;
 
-	if (elp == NULL)
+	if (elp == NULL) {
+		if ((ret = __db_txnlist_lsninit(dbenv, hp, lsnp)) != 0)
+			return (ret);
 		return (DB_SURPRISE_KID);
+	}
 
-	if (LF_ISSET(TXNLIST_NEW)) {
-		if (elp->u.l.ntxns >= elp->u.l.maxn) {
-			if ((ret = __os_realloc(dbenv,
-			    2 * elp->u.l.maxn * sizeof(DB_LSN),
-			    &elp->u.l.lsn_array)) != 0)
-				return (ret);
-			elp->u.l.maxn *= 2;
+	if (elp->u.l.stack_indx == elp->u.l.stack_size) {
+		elp->u.l.stack_size <<= 1;
+		if ((ret = __os_realloc(dbenv, sizeof(DB_LSN) *
+		     elp->u.l.stack_size, &elp->u.l.lsn_stack)) != 0) {
+			__db_txnlist_end(dbenv, hp);
+			return (ret);
 		}
-		elp->u.l.lsn_array[elp->u.l.ntxns++] = *lsnp;
-	} else
-		/* Simply replace the 0th element. */
-		elp->u.l.lsn_array[0] = *lsnp;
+	}
+	elp->u.l.lsn_stack[elp->u.l.stack_indx++] = *lsnp;
 
-	/*
-	 * If we just added a new entry and there may be NULL entries, so we
-	 * have to do a complete bubble sort, not just trickle a changed entry
-	 * around.
-	 */
-	for (i = 0; i < (!LF_ISSET(TXNLIST_NEW) ? 1 : elp->u.l.ntxns); i++)
-		TXN_BUBBLE(elp->u.l.lsn_array, elp->u.l.ntxns);
+	return (0);
+}
 
-	*lsnp = elp->u.l.lsn_array[0];
+/*
+ * __db_txnlist_lsnget --
+ *
+ * PUBLIC: int __db_txnlist_lsnget __P((DB_ENV *,
+ * PUBLIC:     DB_TXNHEAD *, DB_LSN *, u_int32_t));
+ *	Get the lsn saved from a txn_child record.
+ */
+int
+__db_txnlist_lsnget(dbenv, hp, lsnp, flags)
+	DB_ENV *dbenv;
+	DB_TXNHEAD *hp;
+	DB_LSN *lsnp;
+	u_int32_t flags;
+{
+	DB_TXNLIST *elp;
+
+	COMPQUIET(dbenv, NULL);
+	COMPQUIET(flags, 0);
+
+	for (elp = LIST_FIRST(&hp->head[0]);
+	    elp != NULL; elp = LIST_NEXT(elp, links))
+		if (elp->type == TXNLIST_LSN)
+			break;
+
+	if (elp == NULL || elp->u.l.stack_indx == 0) {
+		ZERO_LSN(*lsnp);
+		return (0);
+	}
+
+	*lsnp = elp->u.l.lsn_stack[--elp->u.l.stack_indx];
 
 	return (0);
 }
@@ -865,13 +869,13 @@ __db_txnlist_lsninit(dbenv, hp, lsnp)
 		goto err;
 	LIST_INSERT_HEAD(&hp->head[0], elp, links);
 
-	if ((ret = __os_malloc(dbenv,
-	    12 * sizeof(DB_LSN), &elp->u.l.lsn_array)) != 0)
-		goto err;
 	elp->type = TXNLIST_LSN;
-	elp->u.l.maxn = 12;
-	elp->u.l.ntxns = 1;
-	elp->u.l.lsn_array[0] = *lsnp;
+	if ((ret = __os_malloc(dbenv,
+	    sizeof(DB_LSN) * DB_LSN_STACK_SIZE, &elp->u.l.lsn_stack)) != 0)
+		goto err;
+	elp->u.l.stack_indx = 1;
+	elp->u.l.stack_size = DB_LSN_STACK_SIZE;
+	elp->u.l.lsn_stack[0] = *lsnp;
 
 	return (0);
 
@@ -886,13 +890,13 @@ err:	__db_txnlist_end(dbenv, hp);
  *
  * PUBLIC: #ifndef HAVE_FTRUNCATE
  * PUBLIC: int __db_add_limbo __P((DB_ENV *,
- * PUBLIC:      void *, int32_t, db_pgno_t, int32_t));
+ * PUBLIC:      DB_TXNHEAD *, int32_t, db_pgno_t, int32_t));
  * PUBLIC: #endif
  */
 int
-__db_add_limbo(dbenv, info, fileid, pgno, count)
+__db_add_limbo(dbenv, hp, fileid, pgno, count)
 	DB_ENV *dbenv;
-	void *info;
+	DB_TXNHEAD *hp;
 	int32_t fileid;
 	db_pgno_t pgno;
 	int32_t count;
@@ -907,7 +911,7 @@ __db_add_limbo(dbenv, info, fileid, pgno, count)
 
 	do {
 		if ((ret =
-		    __db_txnlist_pgnoadd(dbenv, info, fileid, fnp->ufid,
+		    __db_txnlist_pgnoadd(dbenv, hp, fileid, fnp->ufid,
 		    R_ADDR(&dblp->reginfo, fnp->name_off), pgno)) != 0)
 			return (ret);
 		pgno++;
@@ -1136,9 +1140,9 @@ retry:		dbp_created = 0;
 			dbp_created = 1;
 
 			/* It is ok if the file is nolonger there. */
-			ret = __db_open(dbp,
-			    t, elp->u.p.fname, NULL, DB_UNKNOWN,
-			    DB_ODDFILESIZE, __db_omode("rw----"), PGNO_BASE_MD);
+			ret = __db_open(dbp, t, elp->u.p.fname, NULL,
+			    DB_UNKNOWN, DB_ODDFILESIZE, __db_omode(OWNER_RW),
+			    PGNO_BASE_MD);
 			if (ret == ENOENT)
 				goto next;
 		}
@@ -1153,7 +1157,7 @@ retry:		dbp_created = 0;
 		mpf = dbp->mpf;
 		last_pgno = PGNO_INVALID;
 
-		if (meta == NULL && 
+		if (meta == NULL &&
 		    (ctxn == NULL || state == LIMBO_COMPENSATE)) {
 			pgno = PGNO_BASE_MD;
 			if ((ret = __memp_fget(mpf, &pgno, 0, &meta)) != 0)
@@ -1383,6 +1387,15 @@ __db_limbo_fix(dbp, ctxn, elp, lastp, meta, state)
 				 * do the open, we have to mark it explicitly.
 				 */
 				F_SET(dbc, DBC_COMPENSATE);
+
+				/*
+				 * If aborting a txn for a different process
+				 * via XA or failchk, DB_AM_RECOVER will be
+				 * set but we need to log the compensating
+				 * transactions.
+				 */
+				F_CLR(dbc, DBC_RECOVER);
+
 				ret = __db_free(dbc, pagep);
 				pagep = NULL;
 
@@ -1537,19 +1550,16 @@ err:	return (ret);
  * __db_txnlist_print --
  *	Print out the transaction list.
  *
- * PUBLIC: void __db_txnlist_print __P((void *));
+ * PUBLIC: void __db_txnlist_print __P((DB_TXNHEAD *));
  */
 void
-__db_txnlist_print(listp)
-	void *listp;
-{
+__db_txnlist_print(hp)
 	DB_TXNHEAD *hp;
+{
 	DB_TXNLIST *p;
 	u_int32_t i;
 	char *txntype;
 
-	hp = (DB_TXNHEAD *)listp;
-
 	printf("Maxid: %lu Generation: %lu\n",
 	    (u_long)hp->maxid, (u_long)hp->generation);
 	for (i = 0; i < hp->nslots; i++)
diff --git a/storage/bdb/db/db_dup.c b/storage/bdb/db/db_dup.c
index 725e81cceff..2f0732c6b5c 100644
--- a/storage/bdb/db/db_dup.c
+++ b/storage/bdb/db/db_dup.c
@@ -1,10 +1,10 @@
 /*-
  * See the file LICENSE for redistribution information.
  *
- * Copyright (c) 1996-2004
+ * Copyright (c) 1996-2005
  *	Sleepycat Software.  All rights reserved.
  *
- * $Id: db_dup.c,v 11.39 2004/02/18 21:34:37 bostic Exp $
+ * $Id: db_dup.c,v 12.2 2005/06/16 20:21:10 bostic Exp $
  */
 
 #include "db_config.h"
@@ -66,7 +66,7 @@ __db_ditem(dbc, pagep, indx, nbytes)
 	 * memmove(3), the regions may overlap.
 	 */
 	from = (u_int8_t *)pagep + HOFFSET(pagep);
-	DB_ASSERT((int)inp[indx] - HOFFSET(pagep) >= 0);
+	DB_ASSERT(inp[indx] >= HOFFSET(pagep));
 	memmove(from + nbytes, from, inp[indx] - HOFFSET(pagep));
 	HOFFSET(pagep) += nbytes;
 
diff --git a/storage/bdb/db/db_iface.c b/storage/bdb/db/db_iface.c
index 7be20ede132..37811a17a26 100644
--- a/storage/bdb/db/db_iface.c
+++ b/storage/bdb/db/db_iface.c
@@ -1,10 +1,10 @@
 /*-
  * See the file LICENSE for redistribution information.
  *
- * Copyright (c) 1996-2004
+ * Copyright (c) 1996-2005
  *	Sleepycat Software.  All rights reserved.
  *
- * $Id: db_iface.c,v 11.121 2004/10/07 17:33:32 sue Exp $
+ * $Id: db_iface.c,v 12.29 2005/11/08 14:49:44 bostic Exp $
  */
 
 #include "db_config.h"
@@ -19,14 +19,20 @@
 #include "dbinc/db_page.h"
 #include "dbinc/db_shash.h"
 #include "dbinc/btree.h"
+#ifndef HAVE_HASH
 #include "dbinc/hash.h"			/* For __db_no_hash_am(). */
+#endif
+#ifndef HAVE_QUEUE
 #include "dbinc/qam.h"			/* For __db_no_queue_am(). */
+#endif
 #include "dbinc/lock.h"
 #include "dbinc/log.h"
 #include "dbinc/mp.h"
+#include "dbinc/txn.h"
 
 static int __db_associate_arg __P((DB *, DB *,
 	       int (*)(DB *, const DBT *, const DBT *, DBT *), u_int32_t));
+static int __db_c_del_arg __P((DBC *, u_int32_t));
 static int __db_c_get_arg __P((DBC *, DBT *, DBT *, u_int32_t));
 static int __db_c_pget_arg __P((DBC *, DBT *, u_int32_t));
 static int __db_c_put_arg __P((DBC *, DBT *, DBT *, u_int32_t));
@@ -39,33 +45,22 @@ static int __db_open_arg __P((DB *,
 	       DB_TXN *, const char *, const char *, DBTYPE, u_int32_t));
 static int __db_pget_arg __P((DB *, DBT *, u_int32_t));
 static int __db_put_arg __P((DB *, DBT *, DBT *, u_int32_t));
-static int __db_rdonly __P((const DB_ENV *, const char *));
 static int __dbt_ferr __P((const DB *, const char *, const DBT *, int));
 
-/*
- * A database should be required to be readonly if it's been explicitly
- * specified as such or if we're a client in a replicated environment and
- * we don't have the special "client-writer" designation.
- */
-#define	IS_READONLY(dbp)						\
-    (F_ISSET(dbp, DB_AM_RDONLY) ||					\
-    (IS_REP_CLIENT((dbp)->dbenv) &&					\
-    !F_ISSET((dbp), DB_AM_CL_WRITER)))
-
 /*
  * These functions implement the Berkeley DB API.  They are organized in a
  * layered fashion.  The interface functions (XXX_pp) perform all generic
  * error checks (for example, PANIC'd region, replication state change
  * in progress, inconsistent transaction usage), call function-specific
  * check routines (_arg) to check for proper flag usage, etc., do pre-amble
- * processing (incrementing handle counts, handling auto-commit), call the
- * function and then do post-amble processing (DB_AUTO_COMMIT, dec handle
- * counts).
+ * processing (incrementing handle counts, handling local transactions),
+ * call the function and then do post-amble processing (local transactions,
+ * decrement handle counts).
  *
- * So, the basic structure is:
- *	Check for generic errors
- *	Call function-specific check routine
- *	Increment handle count
+ * The basic structure is:
+ *	Check for simple/generic errors (PANIC'd region)
+ *	Check if replication is changing state (increment handle count).
+ *	Call function-specific argument checking routine
  *	Create internal transaction if necessary
  *	Call underlying worker function
  *	Commit/abort internal transaction if necessary
@@ -88,14 +83,24 @@ __db_associate_pp(dbp, txn, sdbp, callback, flags)
 {
 	DBC *sdbc;
 	DB_ENV *dbenv;
-	int handle_check, ret, txn_local;
+	DB_THREAD_INFO *ip;
+	int handle_check, ret, t_ret, txn_local;
 
 	dbenv = dbp->dbenv;
+	txn_local = 0;
 
 	PANIC_CHECK(dbenv);
+	STRIP_AUTO_COMMIT(flags);
 
-	if ((ret = __db_associate_arg(dbp, sdbp, callback, flags)) != 0)
-		return (ret);
+	ENV_ENTER(dbenv, ip);
+
+	/* Check for replication block. */
+	handle_check = IS_ENV_REPLICATED(dbenv);
+	if (handle_check &&
+	    (ret = __db_rep_enter(dbp, 1, 0, txn != NULL)) != 0) {
+		handle_check = 0;
+		goto err;
+	}
 
 	/*
 	 * Secondary cursors may have the primary's lock file ID, so we need
@@ -106,44 +111,43 @@ __db_associate_pp(dbp, txn, sdbp, callback, flags)
 	    TAILQ_FIRST(&sdbp->join_queue) != NULL) {
 		__db_err(dbenv,
     "Databases may not become secondary indices while cursors are open");
-		return (EINVAL);
+		ret = EINVAL;
+		goto err;
 	}
 
+	if ((ret = __db_associate_arg(dbp, sdbp, callback, flags)) != 0)
+		goto err;
+
 	/*
 	 * Create a local transaction as necessary, check for consistent
 	 * transaction usage, and, if we have no transaction but do have
 	 * locking on, acquire a locker id for the handle lock acquisition.
 	 */
-	txn_local = 0;
-	if (IS_AUTO_COMMIT(dbenv, txn, flags)) {
-		if ((ret = __db_txn_auto_init(dbenv, &txn)) != 0)
-			return (ret);
+	if (IS_DB_AUTO_COMMIT(dbp, txn)) {
+		if ((ret = __txn_begin(dbenv, NULL, &txn, 0)) != 0)
+			goto err;
 		txn_local = 1;
-		LF_CLR(DB_AUTO_COMMIT);
-	} else if (txn != NULL && !TXN_ON(dbenv))
-		return (__db_not_txn_env(dbenv));
+	}
 
 	/* Check for consistent transaction usage. */
 	if ((ret = __db_check_txn(dbp, txn, DB_LOCK_INVALIDID, 0)) != 0)
 		goto err;
 
-	/* Check for replication block. */
-	handle_check = IS_REPLICATED(dbenv, dbp);
-	if (handle_check && (ret = __db_rep_enter(dbp, 1, 0, txn != NULL)) != 0)
-		goto err;
-
 	while ((sdbc = TAILQ_FIRST(&sdbp->free_queue)) != NULL)
 		if ((ret = __db_c_destroy(sdbc)) != 0)
-			break;
+			goto err;
 
-	if (ret == 0)
-		ret = __db_associate(dbp, txn, sdbp, callback, flags);
+	ret = __db_associate(dbp, txn, sdbp, callback, flags);
+
+err:	if (txn_local &&
+	    (t_ret = __db_txn_auto_resolve(dbenv, txn, 0, ret)) && ret == 0)
+		ret = t_ret;
 
 	/* Release replication block. */
-	if (handle_check)
-		__env_db_rep_exit(dbenv);
-
-err:	return (txn_local ? __db_txn_auto_resolve(dbenv, txn, 0, ret) : ret);
+	if (handle_check && (t_ret = __env_db_rep_exit(dbenv)) != 0 && ret == 0)
+		ret = t_ret;
+	ENV_LEAVE(dbenv, ip);
+	return (ret);
 }
 
 /*
@@ -201,8 +205,8 @@ __db_associate_arg(dbp, sdbp, callback, flags)
 		return (EINVAL);
 	}
 
-	if ((ret = __db_fchk(dbenv,
-	    "DB->associate", flags, DB_CREATE | DB_AUTO_COMMIT)) != 0)
+	if ((ret = __db_fchk(dbenv, "DB->associate", flags, DB_CREATE |
+	    DB_IMMUTABLE_KEY)) != 0)
 		return (ret);
 
 	return (0);
@@ -220,6 +224,7 @@ __db_close_pp(dbp, flags)
 	u_int32_t flags;
 {
 	DB_ENV *dbenv;
+	DB_THREAD_INFO *ip;
 	int handle_check, ret, t_ret;
 
 	dbenv = dbp->dbenv;
@@ -228,20 +233,20 @@ __db_close_pp(dbp, flags)
 	PANIC_CHECK(dbenv);
 
 	/*
-	 * !!!
-	 * The actual argument checking is simple, do it inline.
+	 * Close a DB handle -- as a handle destructor, we can't fail.
 	 *
-	 * Validate arguments and complain if they're wrong, but as a DB
-	 * handle destructor, we can't fail.
+	 * !!!
+	 * The actual argument checking is simple, do it inline, outside of
+	 * the replication block.
 	 */
-	if (flags != 0 && flags != DB_NOSYNC &&
-	    (t_ret = __db_ferr(dbenv, "DB->close", 0)) != 0 && ret == 0)
-		ret = t_ret;
+	if (flags != 0 && flags != DB_NOSYNC)
+		ret = __db_ferr(dbenv, "DB->close", 0);
+
+	ENV_ENTER(dbenv, ip);
 
 	/* Check for replication block. */
-	handle_check = IS_REPLICATED(dbenv, dbp);
-	if (handle_check &&
-	    (t_ret = __db_rep_enter(dbp, 0, 0, 0)) != 0) {
+	handle_check = IS_ENV_REPLICATED(dbenv);
+	if (handle_check && (t_ret = __db_rep_enter(dbp, 0, 0, 0)) != 0) {
 		handle_check = 0;
 		if (ret == 0)
 			ret = t_ret;
@@ -251,9 +256,10 @@ __db_close_pp(dbp, flags)
 		ret = t_ret;
 
 	/* Release replication block. */
-	if (handle_check)
-		__env_db_rep_exit(dbenv);
+	if (handle_check && (t_ret = __env_db_rep_exit(dbenv)) != 0 && ret == 0)
+		ret = t_ret;
 
+	ENV_LEAVE(dbenv, ip);
 	return (ret);
 }
 
@@ -271,6 +277,7 @@ __db_cursor_pp(dbp, txn, dbcp, flags)
 	u_int32_t flags;
 {
 	DB_ENV *dbenv;
+	DB_THREAD_INFO *ip;
 	int handle_check, ret;
 
 	dbenv = dbp->dbenv;
@@ -278,30 +285,37 @@ __db_cursor_pp(dbp, txn, dbcp, flags)
 	PANIC_CHECK(dbenv);
 	DB_ILLEGAL_BEFORE_OPEN(dbp, "DB->cursor");
 
-	if ((ret = __db_cursor_arg(dbp, flags)) != 0)
-		return (ret);
-
-	/*
-	 * Check for consistent transaction usage.  For now, assume that
-	 * this cursor might be used for read operations only (in which
-	 * case it may not require a txn).  We'll check more stringently
-	 * in c_del and c_put.  (Note that this all means that the
-	 * read-op txn tests have to be a subset of the write-op ones.)
-	 */
-	if ((ret = __db_check_txn(dbp, txn, DB_LOCK_INVALIDID, 1)) != 0)
-		return (ret);
+	ENV_ENTER(dbenv, ip);
 
 	/* Check for replication block. */
-	handle_check = IS_REPLICATED(dbenv, dbp);
-	if (handle_check && (ret = __db_rep_enter(dbp, 1, 0, txn != NULL)) != 0)
-		return (ret);
+	if (txn == NULL) {
+		handle_check = IS_ENV_REPLICATED(dbenv) ? 1 : 0;
+		if (handle_check && (ret = __op_rep_enter(dbenv)) != 0) {
+			handle_check = 0;
+			goto err;
+		}
+	} else
+		handle_check = 0;
+	if ((ret = __db_cursor_arg(dbp, flags)) != 0)
+		goto err;
+
+	/*
+	 * Check for consistent transaction usage.  For now, assume this
+	 * cursor might be used for read operations only (in which case
+	 * it may not require a txn).  We'll check more stringently in
+	 * c_del and c_put.  (Note this means the read-op txn tests have
+	 * to be a subset of the write-op ones.)
+	 */
+	if ((ret = __db_check_txn(dbp, txn, DB_LOCK_INVALIDID, 1)) != 0)
+		goto err;
 
 	ret = __db_cursor(dbp, txn, dbcp, flags);
 
-	/* Release replication block. */
-	if (handle_check)
-		__env_db_rep_exit(dbenv);
+err:	/* Release replication block on error. */
+	if (ret != 0 && handle_check)
+		(void)__op_rep_exit(dbenv);
 
+	ENV_LEAVE(dbenv, ip);
 	return (ret);
 }
 
@@ -347,13 +361,13 @@ __db_cursor(dbp, txn, dbcp, flags)
 			F_SET(dbc, DBC_WRITER);
 	}
 
-	if (LF_ISSET(DB_DIRTY_READ) ||
-	    (txn != NULL && F_ISSET(txn, TXN_DIRTY_READ)))
-		F_SET(dbc, DBC_DIRTY_READ);
+	if (LF_ISSET(DB_READ_UNCOMMITTED) ||
+	    (txn != NULL && F_ISSET(txn, TXN_READ_UNCOMMITTED)))
+		F_SET(dbc, DBC_READ_UNCOMMITTED);
 
-	if (LF_ISSET(DB_DEGREE_2) ||
-	    (txn != NULL && F_ISSET(txn, TXN_DEGREE_2)))
-		F_SET(dbc, DBC_DEGREE_2);
+	if (LF_ISSET(DB_READ_COMMITTED) ||
+	    (txn != NULL && F_ISSET(txn, TXN_READ_COMMITTED)))
+		F_SET(dbc, DBC_READ_COMMITTED);
 
 	*dbcp = dbc;
 	return (0);
@@ -376,13 +390,13 @@ __db_cursor_arg(dbp, flags)
 	dbenv = dbp->dbenv;
 
 	/*
-	 * DB_DIRTY_READ  and DB_DGREE_2 are the only valid bit-flags
-	 * and requires locking.
+	 * DB_READ_COMMITTED and DB_READ_UNCOMMITTED are the only valid
+	 * bit-flags; they require locking.
 	 */
-	if (LF_ISSET(DB_DIRTY_READ | DB_DEGREE_2)) {
+	if (LF_ISSET(DB_READ_COMMITTED | DB_READ_UNCOMMITTED)) {
 		if (!LOCKING_ON(dbenv))
 			return (__db_fnl(dbenv, "DB->cursor"));
-		LF_CLR(DB_DIRTY_READ | DB_DEGREE_2);
+		LF_CLR(DB_READ_COMMITTED| DB_READ_UNCOMMITTED);
 	}
 
 	/* Check for invalid function flags. */
@@ -390,13 +404,13 @@ __db_cursor_arg(dbp, flags)
 	case 0:
 		break;
 	case DB_WRITECURSOR:
-		if (IS_READONLY(dbp))
+		if (DB_IS_READONLY(dbp))
 			return (__db_rdonly(dbenv, "DB->cursor"));
 		if (!CDB_LOCKING(dbenv))
 			return (__db_ferr(dbenv, "DB->cursor", 0));
 		break;
 	case DB_WRITELOCK:
-		if (IS_READONLY(dbp))
+		if (DB_IS_READONLY(dbp))
 			return (__db_rdonly(dbenv, "DB->cursor"));
 		break;
 	default:
@@ -420,41 +434,55 @@ __db_del_pp(dbp, txn, key, flags)
 	u_int32_t flags;
 {
 	DB_ENV *dbenv;
-	int handle_check, ret, txn_local;
+	DB_THREAD_INFO *ip;
+	int handle_check, ret, t_ret, txn_local;
 
 	dbenv = dbp->dbenv;
+	txn_local = 0;
 
 	PANIC_CHECK(dbenv);
+	STRIP_AUTO_COMMIT(flags);
 	DB_ILLEGAL_BEFORE_OPEN(dbp, "DB->del");
 
+#if CONFIG_TEST
+	if (IS_REP_MASTER(dbenv))
+		DB_TEST_WAIT(dbenv, dbenv->test_check);
+#endif
+	ENV_ENTER(dbenv, ip);
+
+	/* Check for replication block. */
+	handle_check = IS_ENV_REPLICATED(dbenv);
+	if (handle_check &&
+	     (ret = __db_rep_enter(dbp, 1, 0, txn != NULL)) != 0) {
+			handle_check = 0;
+			goto err;
+	}
+
 	if ((ret = __db_del_arg(dbp, flags)) != 0)
-		return (ret);
+		goto err;
 
 	/* Create local transaction as necessary. */
-	if (IS_AUTO_COMMIT(dbenv, txn, flags)) {
-		if ((ret = __db_txn_auto_init(dbenv, &txn)) != 0)
-			return (ret);
+	if (IS_DB_AUTO_COMMIT(dbp, txn)) {
+		if ((ret = __txn_begin(dbenv, NULL, &txn, 0)) != 0)
+			goto err;
 		txn_local = 1;
-		LF_CLR(DB_AUTO_COMMIT);
-	} else
-		txn_local = 0;
+	}
 
 	/* Check for consistent transaction usage. */
 	if ((ret = __db_check_txn(dbp, txn, DB_LOCK_INVALIDID, 0)) != 0)
 		goto err;
 
-	/* Check for replication block. */
-	handle_check = IS_REPLICATED(dbenv, dbp);
-	if (handle_check && (ret = __db_rep_enter(dbp, 1, 0, txn != NULL)) != 0)
-		goto err;
-
 	ret = __db_del(dbp, txn, key, flags);
 
-	/* Release replication block. */
-	if (handle_check)
-		__env_db_rep_exit(dbenv);
+err:	if (txn_local &&
+	    (t_ret = __db_txn_auto_resolve(dbenv, txn, 0, ret)) && ret == 0)
+		ret = t_ret;
 
-err:	return (txn_local ? __db_txn_auto_resolve(dbenv, txn, 0, ret) : ret);
+	/* Release replication block. */
+	if (handle_check && (t_ret = __env_db_rep_exit(dbenv)) != 0 && ret == 0)
+		ret = t_ret;
+	ENV_LEAVE(dbenv, ip);
+	return (ret);
 }
 
 /*
@@ -471,11 +499,10 @@ __db_del_arg(dbp, flags)
 	dbenv = dbp->dbenv;
 
 	/* Check for changes to a read-only tree. */
-	if (IS_READONLY(dbp))
+	if (DB_IS_READONLY(dbp))
 		return (__db_rdonly(dbenv, "DB->del"));
 
 	/* Check for invalid function flags. */
-	LF_CLR(DB_AUTO_COMMIT);
 	switch (flags) {
 	case 0:
 		break;
@@ -498,18 +525,21 @@ __db_fd_pp(dbp, fdp)
 	int *fdp;
 {
 	DB_ENV *dbenv;
+	DB_THREAD_INFO *ip;
 	DB_FH *fhp;
-	int handle_check, ret;
+	int handle_check, ret, t_ret;
 
 	dbenv = dbp->dbenv;
 
 	PANIC_CHECK(dbenv);
 	DB_ILLEGAL_BEFORE_OPEN(dbp, "DB->fd");
 
+	ENV_ENTER(dbenv, ip);
+
 	/* Check for replication block. */
-	handle_check = IS_REPLICATED(dbenv, dbp);
+	handle_check = IS_ENV_REPLICATED(dbenv);
 	if (handle_check && (ret = __db_rep_enter(dbp, 1, 0, 0)) != 0)
-		return (ret);
+		goto err;
 
 	/*
 	 * !!!
@@ -521,21 +551,21 @@ __db_fd_pp(dbp, fdp)
 	 * XXX
 	 * Truly spectacular layering violation.
 	 */
-	if ((ret = __mp_xxx_fh(dbp->mpf, &fhp)) != 0)
-		goto err;
+	if ((ret = __mp_xxx_fh(dbp->mpf, &fhp)) == 0) {
+		if (fhp == NULL) {
+			*fdp = -1;
+			__db_err(dbenv,
+			    "Database does not have a valid file handle");
+			ret = ENOENT;
+		} else
+			*fdp = fhp->fd;
+	}
 
-	if (fhp == NULL) {
-		*fdp = -1;
-		__db_err(dbenv,
-		    "Database does not have a valid file handle");
-		ret = ENOENT;
-	} else
-		*fdp = fhp->fd;
-
-err:	/* Release replication block. */
-	if (handle_check)
-		__env_db_rep_exit(dbenv);
+	/* Release replication block. */
+	if (handle_check && (t_ret = __env_db_rep_exit(dbenv)) != 0 && ret == 0)
+		ret = t_ret;
 
+err:	ENV_LEAVE(dbenv, ip);
 	return (ret);
 }
 
@@ -553,29 +583,40 @@ __db_get_pp(dbp, txn, key, data, flags)
 	u_int32_t flags;
 {
 	DB_ENV *dbenv;
+	DB_THREAD_INFO *ip;
 	u_int32_t mode;
-	int handle_check, ret, txn_local;
+	int handle_check, ret, t_ret, txn_local;
 
 	dbenv = dbp->dbenv;
-
-	PANIC_CHECK(dbenv);
-	DB_ILLEGAL_BEFORE_OPEN(dbp, "DB->get");
-
-	if ((ret = __db_get_arg(dbp, key, data, flags)) != 0)
-		return (ret);
-
 	mode = 0;
 	txn_local = 0;
-	if (LF_ISSET(DB_DIRTY_READ))
-		mode = DB_DIRTY_READ;
+
+	PANIC_CHECK(dbenv);
+	STRIP_AUTO_COMMIT(flags);
+	DB_ILLEGAL_BEFORE_OPEN(dbp, "DB->get");
+
+	ENV_ENTER(dbenv, ip);
+
+	/* Check for replication block. */
+	handle_check = IS_ENV_REPLICATED(dbenv);
+	if (handle_check &&
+	     (ret = __db_rep_enter(dbp, 1, 0, txn != NULL)) != 0) {
+			handle_check = 0;
+			goto err;
+	}
+
+	if ((ret = __db_get_arg(dbp, key, data, flags)) != 0)
+		goto err;
+
+	if (LF_ISSET(DB_READ_UNCOMMITTED))
+		mode = DB_READ_UNCOMMITTED;
 	else if ((flags & DB_OPFLAGS_MASK) == DB_CONSUME ||
 	    (flags & DB_OPFLAGS_MASK) == DB_CONSUME_WAIT) {
 		mode = DB_WRITELOCK;
-		if (IS_AUTO_COMMIT(dbenv, txn, flags)) {
-			if ((ret = __db_txn_auto_init(dbenv, &txn)) != 0)
-				return (ret);
+		if (IS_DB_AUTO_COMMIT(dbp, txn)) {
+			if ((ret = __txn_begin(dbenv, NULL, &txn, 0)) != 0)
+				goto err;
 			txn_local = 1;
-			LF_CLR(DB_AUTO_COMMIT);
 		}
 	}
 
@@ -584,18 +625,17 @@ __db_get_pp(dbp, txn, key, data, flags)
 	    mode == DB_WRITELOCK || LF_ISSET(DB_RMW) ? 0 : 1)) != 0)
 		goto err;
 
-	/* Check for replication block. */
-	handle_check = IS_REPLICATED(dbenv, dbp);
-	if (handle_check && (ret = __db_rep_enter(dbp, 1, 0, txn != NULL)) != 0)
-		goto err;
-
 	ret = __db_get(dbp, txn, key, data, flags);
 
-	/* Release replication block. */
-	if (handle_check)
-		__env_db_rep_exit(dbenv);
+err:	if (txn_local &&
+	    (t_ret = __db_txn_auto_resolve(dbenv, txn, 0, ret)) && ret == 0)
+		ret = t_ret;
 
-err:	return (txn_local ? __db_txn_auto_resolve(dbenv, txn, 0, ret) : ret);
+	/* Release replication block. */
+	if (handle_check && (t_ret = __env_db_rep_exit(dbenv)) != 0 && ret == 0)
+		ret = t_ret;
+	ENV_LEAVE(dbenv, ip);
+	return (ret);
 }
 
 /*
@@ -616,12 +656,12 @@ __db_get(dbp, txn, key, data, flags)
 	int ret, t_ret;
 
 	mode = 0;
-	if (LF_ISSET(DB_DIRTY_READ)) {
-		mode = DB_DIRTY_READ;
-		LF_CLR(DB_DIRTY_READ);
-	} else if (LF_ISSET(DB_DEGREE_2)) {
-		mode = DB_DEGREE_2;
-		LF_CLR(DB_DEGREE_2);
+	if (LF_ISSET(DB_READ_UNCOMMITTED)) {
+		mode = DB_READ_UNCOMMITTED;
+		LF_CLR(DB_READ_UNCOMMITTED);
+	} else if (LF_ISSET(DB_READ_COMMITTED)) {
+		mode = DB_READ_COMMITTED;
+		LF_CLR(DB_READ_COMMITTED);
 	} else if ((flags & DB_OPFLAGS_MASK) == DB_CONSUME ||
 	    (flags & DB_OPFLAGS_MASK) == DB_CONSUME_WAIT)
 		mode = DB_WRITELOCK;
@@ -684,14 +724,15 @@ __db_get_arg(dbp, key, data, flags)
 	 * flag in a path where CDB may have been configured.
 	 */
 	check_thread = dirty = 0;
-	if (LF_ISSET(DB_DIRTY_READ | DB_RMW | DB_DEGREE_2)) {
+	if (LF_ISSET(DB_READ_COMMITTED | DB_READ_UNCOMMITTED | DB_RMW)) {
 		if (!LOCKING_ON(dbenv))
 			return (__db_fnl(dbenv, "DB->get"));
-		dirty = LF_ISSET(DB_DIRTY_READ | DB_DEGREE_2);
-		if ((ret = __db_fcchk(dbenv,
-		    "DB->get", flags, DB_DIRTY_READ, DB_DEGREE_2)) != 0)
+		if ((ret = __db_fcchk(dbenv, "DB->get",
+		    flags, DB_READ_UNCOMMITTED, DB_READ_COMMITTED)) != 0)
 			return (ret);
-		LF_CLR(DB_DIRTY_READ | DB_RMW | DB_DEGREE_2);
+		if (LF_ISSET(DB_READ_COMMITTED | DB_READ_UNCOMMITTED))
+			dirty = 1;
+		LF_CLR(DB_READ_COMMITTED | DB_READ_UNCOMMITTED | DB_RMW);
 	}
 
 	multi = 0;
@@ -702,12 +743,6 @@ __db_get_arg(dbp, key, data, flags)
 		LF_CLR(DB_MULTIPLE);
 	}
 
-	if (LF_ISSET(DB_AUTO_COMMIT)) {
-		LF_CLR(DB_AUTO_COMMIT);
-		if (flags != DB_CONSUME && flags != DB_CONSUME_WAIT)
-			goto err;
-	}
-
 	/* Check for invalid function flags. */
 	switch (flags) {
 	case 0:
@@ -723,9 +758,9 @@ __db_get_arg(dbp, key, data, flags)
 		check_thread = 1;
 		if (dirty) {
 			__db_err(dbenv,
-    "%s is not supported with DB_CONSUME or DB_CONSUME_WAIT",
-			     LF_ISSET(DB_DIRTY_READ) ?
-			     "DB_DIRTY_READ" : "DB_DEGREE_2");
+		    "%s is not supported with DB_CONSUME or DB_CONSUME_WAIT",
+			     LF_ISSET(DB_READ_UNCOMMITTED) ?
+			     "DB_READ_UNCOMMITTED" : "DB_READ_COMMITTED");
 			return (EINVAL);
 		}
 		if (multi)
@@ -785,27 +820,31 @@ __db_join_pp(primary, curslist, dbcp, flags)
 	u_int32_t flags;
 {
 	DB_ENV *dbenv;
-	int handle_check, ret;
+	DB_THREAD_INFO *ip;
+	int handle_check, ret, t_ret;
 
 	dbenv = primary->dbenv;
 
 	PANIC_CHECK(dbenv);
 
-	if ((ret = __db_join_arg(primary, curslist, flags)) != 0)
-		return (ret);
+	ENV_ENTER(dbenv, ip);
 
 	/* Check for replication block. */
-	handle_check = IS_REPLICATED(dbenv, primary);
+	handle_check = IS_ENV_REPLICATED(dbenv);
 	if (handle_check && (ret =
-	    __db_rep_enter(primary, 1, 0, curslist[0]->txn != NULL)) != 0)
-		return (ret);
+	    __db_rep_enter(primary, 1, 0, curslist[0]->txn != NULL)) != 0) {
+		handle_check = 0;
+		goto err;
+	}
 
-	ret = __db_join(primary, curslist, dbcp, flags);
+	if ((ret = __db_join_arg(primary, curslist, flags)) == 0)
+		ret = __db_join(primary, curslist, dbcp, flags);
 
 	/* Release replication block. */
-	if (handle_check)
-		__env_db_rep_exit(dbenv);
+	if (handle_check && (t_ret = __env_db_rep_exit(dbenv)) != 0 && ret == 0)
+		ret = t_ret;
 
+err:	ENV_LEAVE(dbenv, ip);
 	return (ret);
 }
 
@@ -867,6 +906,7 @@ __db_key_range_pp(dbp, txn, key, kr, flags)
 {
 	DBC *dbc;
 	DB_ENV *dbenv;
+	DB_THREAD_INFO *ip;
 	int handle_check, ret, t_ret;
 
 	dbenv = dbp->dbenv;
@@ -876,19 +916,25 @@ __db_key_range_pp(dbp, txn, key, kr, flags)
 
 	/*
 	 * !!!
-	 * The actual argument checking is simple, do it inline.
+	 * The actual argument checking is simple, do it inline, outside of
+	 * the replication block.
 	 */
 	if (flags != 0)
 		return (__db_ferr(dbenv, "DB->key_range", 0));
 
-	/* Check for consistent transaction usage. */
-	if ((ret = __db_check_txn(dbp, txn, DB_LOCK_INVALIDID, 1)) != 0)
-		return (ret);
+	ENV_ENTER(dbenv, ip);
 
 	/* Check for replication block. */
-	handle_check = IS_REPLICATED(dbenv, dbp);
-	if (handle_check && (ret = __db_rep_enter(dbp, 1, 0, txn != NULL)) != 0)
-		return (ret);
+	handle_check = IS_ENV_REPLICATED(dbenv);
+	if (handle_check &&
+	     (ret = __db_rep_enter(dbp, 1, 0, txn != NULL)) != 0) {
+		handle_check = 0;
+		goto err;
+	}
+
+	/* Check for consistent transaction usage. */
+	if ((ret = __db_check_txn(dbp, txn, DB_LOCK_INVALIDID, 1)) != 0)
+		goto err;
 
 	/*
 	 * !!!
@@ -918,10 +964,11 @@ __db_key_range_pp(dbp, txn, key, kr, flags)
 		break;
 	}
 
-	/* Release replication block. */
-	if (handle_check)
-		__env_db_rep_exit(dbenv);
+err:	/* Release replication block. */
+	if (handle_check && (t_ret = __env_db_rep_exit(dbenv)) != 0 && ret == 0)
+		ret = t_ret;
 
+	ENV_LEAVE(dbenv, ip);
 	return (ret);
 }
 
@@ -942,16 +989,17 @@ __db_open_pp(dbp, txn, fname, dname, type, flags, mode)
 	int mode;
 {
 	DB_ENV *dbenv;
-	int handle_check, nosync, remove_me, ret, txn_local;
+	DB_THREAD_INFO *ip;
+	int handle_check, nosync, remove_me, ret, t_ret, txn_local;
 
 	dbenv = dbp->dbenv;
 	nosync = 1;
-	remove_me = 0;
+	remove_me = txn_local = 0;
+	handle_check = 0;
 
 	PANIC_CHECK(dbenv);
 
-	if ((ret = __db_open_arg(dbp, txn, fname, dname, type, flags)) != 0)
-		return (ret);
+	ENV_ENTER(dbenv, ip);
 
 	/*
 	 * Save the file and database names and flags.  We do this here
@@ -959,40 +1007,48 @@ __db_open_pp(dbp, txn, fname, dname, type, flags, mode)
 	 * DB->open method call, we strip DB_AUTO_COMMIT at this layer.
 	 */
 	if ((fname != NULL &&
-	    (ret = __os_strdup(dbenv, fname, &dbp->fname)) != 0) ||
-	    (dname != NULL &&
+	    (ret = __os_strdup(dbenv, fname, &dbp->fname)) != 0))
+		goto err;
+	if ((dname != NULL &&
 	    (ret = __os_strdup(dbenv, dname, &dbp->dname)) != 0))
-		return (ret);
+		goto err;
 	dbp->open_flags = flags;
 
 	/* Save the current DB handle flags for refresh. */
 	dbp->orig_flags = dbp->flags;
 
-	/*
-	 * Create local transaction as necessary, check for consistent
-	 * transaction usage.
-	 */
-	txn_local = 0;
-	if (IS_AUTO_COMMIT(dbenv, txn, flags)) {
-		if ((ret = __db_txn_auto_init(dbenv, &txn)) != 0)
-			return (ret);
-		txn_local = 1;
-		LF_CLR(DB_AUTO_COMMIT);
-	} else
-		if (txn != NULL && !TXN_ON(dbenv))
-			return (__db_not_txn_env(dbenv));
-
 	/* Check for replication block. */
-	handle_check = IS_REPLICATED(dbenv, dbp);
+	handle_check = IS_ENV_REPLICATED(dbenv);
 	if (handle_check &&
 	    (ret = __db_rep_enter(dbp, 1, 0, txn != NULL)) != 0) {
 		handle_check = 0;
 		goto err;
 	}
 
-	if ((ret = __db_open(dbp,
-	    txn, fname, dname, type, flags, mode, PGNO_BASE_MD)) != 0)
-		goto err;
+	/*
+	 * Create local transaction as necessary, check for consistent
+	 * transaction usage.
+	 */
+	if (IS_ENV_AUTO_COMMIT(dbenv, txn, flags)) {
+		if ((ret = __db_txn_auto_init(dbenv, &txn)) != 0)
+			goto err;
+		txn_local = 1;
+	} else
+		if (txn != NULL && !TXN_ON(dbenv)) {
+			ret = __db_not_txn_env(dbenv);
+			goto err;
+		}
+	LF_CLR(DB_AUTO_COMMIT);
+
+	/*
+	 * We check arguments after possibly creating a local transaction,
+	 * which is unusual -- the reason is some flags are illegal if any
+	 * kind of transaction is in effect.
+	 */
+	if ((ret = __db_open_arg(dbp, txn, fname, dname, type, flags)) == 0)
+		if ((ret = __db_open(dbp, txn, fname, dname, type,
+		    flags, mode, PGNO_BASE_MD)) != 0)
+			goto txnerr;
 
 	/*
 	 * You can open the database that describes the subdatabases in the
@@ -1009,7 +1065,7 @@ __db_open_pp(dbp, txn, fname, dname, type, flags, mode)
 		__db_err(dbenv,
     "files containing multiple databases may only be opened read-only");
 		ret = EINVAL;
-		goto err;
+		goto txnerr;
 	}
 
 	/*
@@ -1026,7 +1082,7 @@ __db_open_pp(dbp, txn, fname, dname, type, flags, mode)
 	 * If not transactional, remove the databases/subdatabases.  If we're
 	 * transactional, the child transaction abort cleans up.
 	 */
-err:	if (ret != 0 && txn == NULL) {
+txnerr:	if (ret != 0 && txn == NULL) {
 		remove_me = F_ISSET(dbp, DB_AM_CREATED);
 		if (F_ISSET(dbp, DB_AM_CREATED_MSTR) ||
 		    (dname == NULL && remove_me))
@@ -1037,12 +1093,16 @@ err:	if (ret != 0 && txn == NULL) {
 			(void)__db_remove_int(dbp, txn, fname, dname, DB_FORCE);
 	}
 
-	/* Release replication block. */
-	if (handle_check)
-		__env_db_rep_exit(dbenv);
+	if (txn_local && (t_ret =
+	     __db_txn_auto_resolve(dbenv, txn, nosync, ret)) && ret == 0)
+		ret = t_ret;
 
-	return (txn_local ?
-	    __db_txn_auto_resolve(dbenv, txn, nosync, ret) : ret);
+err:	/* Release replication block. */
+	if (handle_check && (t_ret = __env_db_rep_exit(dbenv)) != 0 && ret == 0)
+		ret = t_ret;
+
+	ENV_LEAVE(dbenv, ip);
+	return (ret);
 }
 
 /*
@@ -1066,9 +1126,9 @@ __db_open_arg(dbp, txn, fname, dname, type, flags)
 	/* Validate arguments. */
 #undef	OKFLAGS
 #define	OKFLAGS								\
-    (DB_AUTO_COMMIT | DB_CREATE | DB_DIRTY_READ | DB_EXCL |		\
-     DB_FCNTL_LOCKING | DB_NO_AUTO_COMMIT | DB_NOMMAP | DB_RDONLY |	\
-     DB_RDWRMASTER | DB_THREAD | DB_TRUNCATE | DB_WRITEOPEN)
+	(DB_AUTO_COMMIT | DB_CREATE | DB_EXCL | DB_FCNTL_LOCKING |	\
+	DB_NOMMAP | DB_NO_AUTO_COMMIT | DB_RDONLY | DB_RDWRMASTER |	\
+	DB_READ_UNCOMMITTED | DB_THREAD | DB_TRUNCATE | DB_WRITEOPEN)
 	if ((ret = __db_fchk(dbenv, "DB->open", flags, OKFLAGS)) != 0)
 		return (ret);
 	if (LF_ISSET(DB_EXCL) && !LF_ISSET(DB_CREATE))
@@ -1086,8 +1146,7 @@ __db_open_arg(dbp, txn, fname, dname, type, flags)
 	case DB_UNKNOWN:
 		if (LF_ISSET(DB_CREATE|DB_TRUNCATE)) {
 			__db_err(dbenv,
-	    "%s: DB_UNKNOWN type specified with DB_CREATE or DB_TRUNCATE",
-			    fname);
+	    "DB_UNKNOWN type specified with DB_CREATE or DB_TRUNCATE");
 			return (EINVAL);
 		}
 		ok_flags = 0;
@@ -1153,18 +1212,18 @@ __db_open_arg(dbp, txn, fname, dname, type, flags)
 
 	/* Subdatabase checks. */
 	if (dname != NULL) {
-		/* Subdatabases must be created in named files. */
-		if (fname == NULL) {
-			__db_err(dbenv,
-		    "multiple databases cannot be created in temporary files");
-			return (EINVAL);
-		}
-
-		/* QAM can't be done as a subdatabase. */
-		if (type == DB_QUEUE) {
+		/* QAM can only be done on in-memory subdatabases. */
+		if (type == DB_QUEUE && fname != NULL) {
 			__db_err(dbenv, "Queue databases must be one-per-file");
 			return (EINVAL);
 		}
+
+		/*
+		 * Named in-memory databases can't support certain flags,
+		 * so check here.
+		 */
+		if (fname == NULL)
+			F_CLR(dbp, DB_AM_CHKSUM | DB_AM_ENCRYPT);
 	}
 
 	return (0);
@@ -1185,30 +1244,35 @@ __db_pget_pp(dbp, txn, skey, pkey, data, flags)
 	u_int32_t flags;
 {
 	DB_ENV *dbenv;
-	int handle_check, ret;
+	DB_THREAD_INFO *ip;
+	int handle_check, ret, t_ret;
 
 	dbenv = dbp->dbenv;
 
 	PANIC_CHECK(dbenv);
 	DB_ILLEGAL_BEFORE_OPEN(dbp, "DB->pget");
 
-	if ((ret = __db_pget_arg(dbp, pkey, flags)) != 0)
-		return (ret);
-
-	if ((ret = __db_get_arg(dbp, skey, data, flags)) != 0)
-		return (ret);
+	ENV_ENTER(dbenv, ip);
 
 	/* Check for replication block. */
-	handle_check = IS_REPLICATED(dbenv, dbp);
-	if (handle_check && (ret = __db_rep_enter(dbp, 1, 0, txn != NULL)) != 0)
-		return (ret);
+	handle_check = IS_ENV_REPLICATED(dbenv);
+	if (handle_check &&
+	    (ret = __db_rep_enter(dbp, 1, 0, txn != NULL)) != 0) {
+		handle_check = 0;
+		goto err;
+	}
+
+	if ((ret = __db_pget_arg(dbp, pkey, flags)) != 0 ||
+	    (ret = __db_get_arg(dbp, skey, data, flags)) != 0)
+		goto err;
 
 	ret = __db_pget(dbp, txn, skey, pkey, data, flags);
 
-	/* Release replication block. */
-	if (handle_check)
-		__env_db_rep_exit(dbenv);
+err:	/* Release replication block. */
+	if (handle_check && (t_ret = __env_db_rep_exit(dbenv)) != 0 && ret == 0)
+		ret = t_ret;
 
+	ENV_LEAVE(dbenv, ip);
 	return (ret);
 }
 
@@ -1227,9 +1291,19 @@ __db_pget(dbp, txn, skey, pkey, data, flags)
 	u_int32_t flags;
 {
 	DBC *dbc;
+	u_int32_t mode;
 	int ret, t_ret;
 
-	if ((ret = __db_cursor(dbp, txn, &dbc, 0)) != 0)
+	if (LF_ISSET(DB_READ_UNCOMMITTED)) {
+		mode = DB_READ_UNCOMMITTED;
+		LF_CLR(DB_READ_UNCOMMITTED);
+	} else if (LF_ISSET(DB_READ_COMMITTED)) {
+		mode = DB_READ_COMMITTED;
+		LF_CLR(DB_READ_COMMITTED);
+	} else
+		mode = 0;
+
+	if ((ret = __db_cursor(dbp, txn, &dbc, mode)) != 0)
 		return (ret);
 
 	SET_RET_MEM(dbc, dbp);
@@ -1296,7 +1370,7 @@ __db_pget_arg(dbp, pkey, flags)
 	}
 
 	/* DB_CONSUME makes no sense on a secondary index. */
-	LF_CLR(DB_RMW);
+	LF_CLR(DB_READ_COMMITTED | DB_READ_UNCOMMITTED | DB_RMW);
 	switch (flags) {
 	case DB_CONSUME:
 	case DB_CONSUME_WAIT:
@@ -1338,41 +1412,51 @@ __db_put_pp(dbp, txn, key, data, flags)
 	u_int32_t flags;
 {
 	DB_ENV *dbenv;
-	int handle_check, ret, txn_local;
+	DB_THREAD_INFO *ip;
+	int handle_check, ret, txn_local, t_ret;
 
 	dbenv = dbp->dbenv;
+	txn_local = 0;
 
 	PANIC_CHECK(dbenv);
+	STRIP_AUTO_COMMIT(flags);
 	DB_ILLEGAL_BEFORE_OPEN(dbp, "DB->put");
 
+	ENV_ENTER(dbenv, ip);
+
+	/* Check for replication block. */
+	handle_check = IS_ENV_REPLICATED(dbenv);
+	if (handle_check &&
+	    (ret = __db_rep_enter(dbp, 1, 0, txn != NULL)) != 0) {
+		handle_check = 0;
+		goto err;
+	}
+
 	if ((ret = __db_put_arg(dbp, key, data, flags)) != 0)
-		return (ret);
+		goto err;
 
 	/* Create local transaction as necessary. */
-	if (IS_AUTO_COMMIT(dbenv, txn, flags)) {
-		if ((ret = __db_txn_auto_init(dbenv, &txn)) != 0)
-			return (ret);
+	if (IS_DB_AUTO_COMMIT(dbp, txn)) {
+		if ((ret = __txn_begin(dbenv, NULL, &txn, 0)) != 0)
+			goto err;
 		txn_local = 1;
-		LF_CLR(DB_AUTO_COMMIT);
-	} else
-		txn_local = 0;
+	}
 
 	/* Check for consistent transaction usage. */
 	if ((ret = __db_check_txn(dbp, txn, DB_LOCK_INVALIDID, 0)) != 0)
 		goto err;
 
-	/* Check for replication block. */
-	handle_check = IS_REPLICATED(dbenv, dbp);
-	if (handle_check && (ret = __db_rep_enter(dbp, 1, 0, txn != NULL)) != 0)
-		goto err;
-
 	ret = __db_put(dbp, txn, key, data, flags);
 
-	/* Release replication block. */
-	if (handle_check)
-		__env_db_rep_exit(dbenv);
+err:	if (txn_local &&
+	    (t_ret = __db_txn_auto_resolve(dbenv, txn, 0, ret)) && ret == 0)
+		ret = t_ret;
 
-err:	return (txn_local ? __db_txn_auto_resolve(dbenv, txn, 0, ret) : ret);
+	/* Release replication block. */
+	if (handle_check && (t_ret = __env_db_rep_exit(dbenv)) != 0 && ret == 0)
+		ret = t_ret;
+	ENV_LEAVE(dbenv, ip);
+	return (ret);
 }
 
 /*
@@ -1392,8 +1476,8 @@ __db_put_arg(dbp, key, data, flags)
 	returnkey = 0;
 
 	/* Check for changes to a read-only tree. */
-	if (IS_READONLY(dbp))
-		return (__db_rdonly(dbenv, "put"));
+	if (DB_IS_READONLY(dbp))
+		return (__db_rdonly(dbenv, "DB->put"));
 
 	/* Check for puts on a secondary. */
 	if (F_ISSET(dbp, DB_AM_SECONDARY)) {
@@ -1402,7 +1486,6 @@ __db_put_arg(dbp, key, data, flags)
 	}
 
 	/* Check for invalid function flags. */
-	LF_CLR(DB_AUTO_COMMIT);
 	switch (flags) {
 	case 0:
 	case DB_NOOVERWRITE:
@@ -1441,6 +1524,82 @@ err:		return (__db_ferr(dbenv, "DB->put", 0));
 	return (0);
 }
 
+/*
+ * __db_compact_pp --
+ *	DB->compact pre/post processing.
+ *
+ * PUBLIC: int __db_compact_pp __P((DB *, DB_TXN *,
+ * PUBLIC:       DBT *, DBT *, DB_COMPACT *, u_int32_t, DBT *));
+ */
+int
+__db_compact_pp(dbp, txn, start, stop, c_data, flags, end)
+	DB *dbp;
+	DB_TXN *txn;
+	DBT *start, *stop;
+	DB_COMPACT *c_data;
+	u_int32_t flags;
+	DBT *end;
+{
+	DB_COMPACT *dp, l_data;
+	DB_ENV *dbenv;
+	DB_THREAD_INFO *ip;
+	int handle_check, ret, t_ret;
+
+	dbenv = dbp->dbenv;
+
+	PANIC_CHECK(dbenv);
+	DB_ILLEGAL_BEFORE_OPEN(dbp, "DB->compact");
+
+	/*
+	 * !!!
+	 * The actual argument checking is simple, do it inline, outside of
+	 * the replication block.
+	 */
+	if ((flags & ~DB_COMPACT_FLAGS) != 0)
+		return (__db_ferr(dbenv, "DB->compact", 0));
+
+	/* Check for changes to a read-only database. */
+	if (DB_IS_READONLY(dbp))
+		return (__db_rdonly(dbenv, "DB->compact"));
+
+	ENV_ENTER(dbenv, ip);
+
+	/* Check for replication block. */
+	handle_check = IS_ENV_REPLICATED(dbenv);
+	if (handle_check && (ret = __db_rep_enter(dbp, 1, 0, 0)) != 0) {
+		handle_check = 0;
+		goto err;
+	}
+
+	if (c_data == NULL) {
+		dp = &l_data;
+		memset(dp, 0, sizeof(*dp));
+	} else
+		dp = c_data;
+
+	switch (dbp->type) {
+	case DB_HASH:
+		if (!LF_ISSET(DB_FREELIST_ONLY))
+			goto err;
+		/* FALLTHROUGH */
+	case DB_BTREE:
+	case DB_RECNO:
+		ret = __bam_compact(dbp, txn, start, stop, dp, flags, end);
+		break;
+
+	default:
+err:		ret = __dbh_am_chk(dbp, DB_OK_BTREE);
+		break;
+	}
+
+	/* Release replication block. */
+	if (handle_check && (t_ret = __env_db_rep_exit(dbenv)) != 0 && ret == 0)
+		ret = t_ret;
+
+	ENV_LEAVE(dbenv, ip);
+	return (ret);
+}
+
 /*
  * __db_sync_pp --
  *	DB->sync pre/post processing.
@@ -1453,31 +1612,38 @@ __db_sync_pp(dbp, flags)
 	u_int32_t flags;
 {
 	DB_ENV *dbenv;
-	int handle_check, ret;
+	DB_THREAD_INFO *ip;
+	int handle_check, ret, t_ret;
 
 	dbenv = dbp->dbenv;
 
-	PANIC_CHECK(dbp->dbenv);
+	PANIC_CHECK(dbenv);
 	DB_ILLEGAL_BEFORE_OPEN(dbp, "DB->sync");
 
 	/*
 	 * !!!
-	 * The actual argument checking is simple, do it inline.
+	 * The actual argument checking is simple, do it inline, outside of
+	 * the replication block.
 	 */
 	if (flags != 0)
 		return (__db_ferr(dbenv, "DB->sync", 0));
 
+	ENV_ENTER(dbenv, ip);
+
 	/* Check for replication block. */
-	handle_check = IS_REPLICATED(dbenv, dbp);
-	if (handle_check && (ret = __db_rep_enter(dbp, 1, 0, 0)) != 0)
-		return (ret);
+	handle_check = IS_ENV_REPLICATED(dbenv);
+	if (handle_check && (ret = __db_rep_enter(dbp, 1, 0, 0)) != 0) {
+		handle_check = 0;
+		goto err;
+	}
 
 	ret = __db_sync(dbp);
 
 	/* Release replication block. */
-	if (handle_check)
-		__env_db_rep_exit(dbenv);
+	if (handle_check && (t_ret = __env_db_rep_exit(dbenv)) != 0 && ret == 0)
+		ret = t_ret;
 
+err:	ENV_LEAVE(dbenv, ip);
 	return (ret);
 }
 
@@ -1492,13 +1658,15 @@ __db_c_close_pp(dbc)
 	DBC *dbc;
 {
 	DB_ENV *dbenv;
+	DB_THREAD_INFO *ip;
 	DB *dbp;
-	int handle_check, ret;
+	int handle_check, ret, t_ret;
 
 	dbp = dbc->dbp;
 	dbenv = dbp->dbenv;
 
 	PANIC_CHECK(dbenv);
+	ENV_ENTER(dbenv, ip);
 
 	/*
 	 * If the cursor is already closed we have a serious problem, and we
@@ -1509,21 +1677,20 @@ __db_c_close_pp(dbc)
 		if (dbp != NULL)
 			__db_err(dbenv, "Closing already-closed cursor");
 		DB_ASSERT(0);
-		return (EINVAL);
+		ret = EINVAL;
+		goto err;
 	}
 
 	/* Check for replication block. */
-	handle_check = IS_REPLICATED(dbenv, dbp);
-	if (handle_check &&
-	    (ret = __db_rep_enter(dbp, 0, 0, dbc->txn != NULL)) != 0)
-		return (ret);
-
+	handle_check = dbc->txn == NULL && IS_ENV_REPLICATED(dbenv);
 	ret = __db_c_close(dbc);
 
 	/* Release replication block. */
-	if (handle_check)
-		__env_db_rep_exit(dbenv);
+	if (handle_check &&
+	    (t_ret = __op_rep_exit(dbenv)) != 0 && ret == 0)
+		ret = t_ret;
 
+err:	ENV_LEAVE(dbenv, ip);
 	return (ret);
 }
 
@@ -1540,8 +1707,9 @@ __db_c_count_pp(dbc, recnop, flags)
 	u_int32_t flags;
 {
 	DB_ENV *dbenv;
+	DB_THREAD_INFO *ip;
 	DB *dbp;
-	int handle_check, ret;
+	int ret;
 
 	dbp = dbc->dbp;
 	dbenv = dbp->dbenv;
@@ -1550,30 +1718,21 @@ __db_c_count_pp(dbc, recnop, flags)
 
 	/*
 	 * !!!
-	 * The actual argument checking is simple, do it inline.
+	 * The actual argument checking is simple, do it inline, outside of
+	 * the replication block.
+	 *
+	 * The cursor must be initialized, return EINVAL for an invalid cursor.
 	 */
 	if (flags != 0)
 		return (__db_ferr(dbenv, "DBcursor->count", 0));
 
-	/*
-	 * The cursor must be initialized, return EINVAL for an invalid cursor,
-	 * otherwise 0.
-	 */
 	if (!IS_INITIALIZED(dbc))
 		return (__db_curinval(dbenv));
 
-	/* Check for replication block. */
-	handle_check = IS_REPLICATED(dbenv, dbp);
-	if (handle_check &&
-	    (ret = __db_rep_enter(dbp, 1, 0, dbc->txn != NULL)) != 0)
-		return (ret);
+	ENV_ENTER(dbenv, ip);
 
 	ret = __db_c_count(dbc, recnop);
-
-	/* Release replication block. */
-	if (handle_check)
-		__env_db_rep_exit(dbenv);
-
+	ENV_LEAVE(dbenv, ip);
 	return (ret);
 }
 
@@ -1590,7 +1749,8 @@ __db_c_del_pp(dbc, flags)
 {
 	DB *dbp;
 	DB_ENV *dbenv;
-	int handle_check, ret;
+	DB_THREAD_INFO *ip;
+	int ret;
 
 	dbp = dbc->dbp;
 	dbenv = dbp->dbenv;
@@ -1600,34 +1760,24 @@ __db_c_del_pp(dbc, flags)
 	if ((ret = __db_c_del_arg(dbc, flags)) != 0)
 		return (ret);
 
+	ENV_ENTER(dbenv, ip);
+
 	/* Check for consistent transaction usage. */
 	if ((ret = __db_check_txn(dbp, dbc->txn, dbc->locker, 0)) != 0)
-		return (ret);
-
-	/* Check for replication block. */
-	handle_check = IS_REPLICATED(dbenv, dbp);
-	if (handle_check &&
-	    (ret = __db_rep_enter(dbp, 1, 0, dbc->txn != NULL)) != 0)
-		return (ret);
+		goto err;
 
 	DEBUG_LWRITE(dbc, dbc->txn, "DBcursor->del", NULL, NULL, flags);
-
 	ret = __db_c_del(dbc, flags);
-
-	/* Release replication block. */
-	if (handle_check)
-		__env_db_rep_exit(dbenv);
-
+err:
+	ENV_LEAVE(dbenv, ip);
 	return (ret);
 }
 
 /*
  * __db_c_del_arg --
  *	Check DBC->c_del arguments.
- *
- * PUBLIC: int __db_c_del_arg __P((DBC *, u_int32_t));
  */
-int
+static int
 __db_c_del_arg(dbc, flags)
 	DBC *dbc;
 	u_int32_t flags;
@@ -1639,7 +1789,7 @@ __db_c_del_arg(dbc, flags)
 	dbenv = dbp->dbenv;
 
 	/* Check for changes to a read-only tree. */
-	if (IS_READONLY(dbp))
+	if (DB_IS_READONLY(dbp))
 		return (__db_rdonly(dbenv, "DBcursor->del"));
 
 	/* Check for invalid function flags. */
@@ -1676,7 +1826,8 @@ __db_c_dup_pp(dbc, dbcp, flags)
 {
 	DB *dbp;
 	DB_ENV *dbenv;
-	int handle_check, ret;
+	DB_THREAD_INFO *ip;
+	int ret;
 
 	dbp = dbc->dbp;
 	dbenv = dbp->dbenv;
@@ -1685,23 +1836,16 @@ __db_c_dup_pp(dbc, dbcp, flags)
 
 	/*
 	 * !!!
-	 * The actual argument checking is simple, do it inline.
+	 * The actual argument checking is simple, do it inline, outside of
+	 * the replication block.
 	 */
 	if (flags != 0 && flags != DB_POSITION)
 		return (__db_ferr(dbenv, "DBcursor->dup", 0));
 
-	/* Check for replication block. */
-	handle_check = IS_REPLICATED(dbenv, dbp);
-	if (handle_check &&
-	    (ret = __db_rep_enter(dbp, 1, 0, dbc->txn != NULL)) != 0)
-		return (ret);
+	ENV_ENTER(dbenv, ip);
 
 	ret = __db_c_dup(dbc, dbcp, flags);
-
-	/* Release replication block. */
-	if (handle_check)
-		__env_db_rep_exit(dbenv);
-
+	ENV_LEAVE(dbenv, ip);
 	return (ret);
 }
 
@@ -1719,7 +1863,8 @@ __db_c_get_pp(dbc, key, data, flags)
 {
 	DB *dbp;
 	DB_ENV *dbenv;
-	int handle_check, ret;
+	int ret;
+	DB_THREAD_INFO *ip;
 
 	dbp = dbc->dbp;
 	dbenv = dbp->dbenv;
@@ -1729,21 +1874,13 @@ __db_c_get_pp(dbc, key, data, flags)
 	if ((ret = __db_c_get_arg(dbc, key, data, flags)) != 0)
 		return (ret);
 
-	/* Check for replication block. */
-	handle_check = IS_REPLICATED(dbenv, dbp);
-	if (handle_check &&
-	    (ret = __db_rep_enter(dbp, 1, 0, dbc->txn != NULL)) != 0)
-		return (ret);
+	ENV_ENTER(dbenv, ip);
 
 	DEBUG_LREAD(dbc, dbc->txn, "DBcursor->get",
 	    flags == DB_SET || flags == DB_SET_RANGE ? key : NULL, NULL, flags);
-
 	ret = __db_c_get(dbc, key, data, flags);
 
-	/* Release replication block. */
-	if (handle_check)
-		__env_db_rep_exit(dbenv);
-
+	ENV_LEAVE(dbenv, ip);
 	return (ret);
 }
 
@@ -1780,12 +1917,12 @@ __db_c_get_arg(dbc, key, data, flags)
 	 * flag in a path where CDB may have been configured.
 	 */
 	dirty = 0;
-	if (LF_ISSET(DB_DIRTY_READ | DB_RMW)) {
+	if (LF_ISSET(DB_READ_UNCOMMITTED | DB_RMW)) {
 		if (!LOCKING_ON(dbenv))
 			return (__db_fnl(dbenv, "DBcursor->get"));
-		if (LF_ISSET(DB_DIRTY_READ))
+		if (LF_ISSET(DB_READ_UNCOMMITTED))
 			dirty = 1;
-		LF_CLR(DB_DIRTY_READ | DB_RMW);
+		LF_CLR(DB_READ_UNCOMMITTED | DB_RMW);
 	}
 
 	multi = 0;
@@ -1802,7 +1939,7 @@ __db_c_get_arg(dbc, key, data, flags)
 	case DB_CONSUME_WAIT:
 		if (dirty) {
 			__db_err(dbenv,
-    "DB_DIRTY_READ is not supported with DB_CONSUME or DB_CONSUME_WAIT");
+    "DB_READ_UNCOMMITTED is not supported with DB_CONSUME or DB_CONSUME_WAIT");
 			return (EINVAL);
 		}
 		if (dbp->type != DB_QUEUE)
@@ -1902,6 +2039,7 @@ __db_secondary_close_pp(dbp, flags)
 	u_int32_t flags;
 {
 	DB_ENV *dbenv;
+	DB_THREAD_INFO *ip;
 	int handle_check, ret, t_ret;
 
 	dbenv = dbp->dbenv;
@@ -1910,20 +2048,20 @@ __db_secondary_close_pp(dbp, flags)
 	PANIC_CHECK(dbenv);
 
 	/*
-	 * !!!
-	 * The actual argument checking is simple, do it inline.
+	 * As a DB handle destructor, we can't fail.
 	 *
-	 * Validate arguments and complain if they're wrong, but as a DB
-	 * handle destructor, we can't fail.
+	 * !!!
+	 * The actual argument checking is simple, do it inline, outside of
+	 * the replication block.
 	 */
-	if (flags != 0 && flags != DB_NOSYNC &&
-	    (t_ret = __db_ferr(dbenv, "DB->close", 0)) != 0 && ret == 0)
-		ret = t_ret;
+	if (flags != 0 && flags != DB_NOSYNC)
+		ret = __db_ferr(dbenv, "DB->close", 0);
+
+	ENV_ENTER(dbenv, ip);
 
 	/* Check for replication block. */
-	handle_check = IS_REPLICATED(dbenv, dbp);
-	if (handle_check &&
-	    (t_ret = __db_rep_enter(dbp, 0, 0, 0)) != 0) {
+	handle_check = IS_ENV_REPLICATED(dbenv);
+	if (handle_check && (t_ret = __db_rep_enter(dbp, 0, 0, 0)) != 0) {
 		handle_check = 0;
 		if (ret == 0)
 			ret = t_ret;
@@ -1933,9 +2071,10 @@ __db_secondary_close_pp(dbp, flags)
 		ret = t_ret;
 
 	/* Release replication block. */
-	if (handle_check)
-		__env_db_rep_exit(dbenv);
+	if (handle_check && (t_ret = __env_db_rep_exit(dbenv)) != 0 && ret == 0)
+		ret = t_ret;
 
+	ENV_LEAVE(dbenv, ip);
 	return (ret);
 }
 
@@ -1953,31 +2092,23 @@ __db_c_pget_pp(dbc, skey, pkey, data, flags)
 {
 	DB *dbp;
 	DB_ENV *dbenv;
-	int handle_check, ret;
+	DB_THREAD_INFO *ip;
+	int ret;
 
 	dbp = dbc->dbp;
 	dbenv = dbp->dbenv;
 
 	PANIC_CHECK(dbenv);
 
-	if ((ret = __db_c_pget_arg(dbc, pkey, flags)) != 0)
+	if ((ret = __db_c_pget_arg(dbc, pkey, flags)) != 0 ||
+	    (ret = __db_c_get_arg(dbc, skey, data, flags)) != 0)
 		return (ret);
 
-	if ((ret = __db_c_get_arg(dbc, skey, data, flags)) != 0)
-		return (ret);
-
-	/* Check for replication block. */
-	handle_check = IS_REPLICATED(dbenv, dbp);
-	if (handle_check &&
-	    (ret = __db_rep_enter(dbp, 1, 0, dbc->txn != NULL)) != 0)
-		return (ret);
+	ENV_ENTER(dbenv, ip);
 
 	ret = __db_c_pget(dbc, skey, pkey, data, flags);
 
-	/* Release replication block. */
-	if (handle_check)
-		__env_db_rep_exit(dbenv);
-
+	ENV_LEAVE(dbenv, ip);
 	return (ret);
 }
 
@@ -2059,7 +2190,8 @@ __db_c_put_pp(dbc, key, data, flags)
 {
 	DB *dbp;
 	DB_ENV *dbenv;
-	int handle_check, ret;
+	DB_THREAD_INFO *ip;
+	int ret;
 
 	dbp = dbc->dbp;
 	dbenv = dbp->dbenv;
@@ -2069,27 +2201,19 @@ __db_c_put_pp(dbc, key, data, flags)
 	if ((ret = __db_c_put_arg(dbc, key, data, flags)) != 0)
 		return (ret);
 
+	ENV_ENTER(dbenv, ip);
+
 	/* Check for consistent transaction usage. */
 	if ((ret = __db_check_txn(dbp, dbc->txn, dbc->locker, 0)) != 0)
-		return (ret);
-
-	/* Check for replication block. */
-	handle_check = IS_REPLICATED(dbenv, dbp);
-	if (handle_check &&
-	    (ret = __db_rep_enter(dbp, 1, 0, dbc->txn != NULL)) != 0)
-		return (ret);
+		goto err;
 
 	DEBUG_LWRITE(dbc, dbc->txn, "DBcursor->put",
 	    flags == DB_KEYFIRST || flags == DB_KEYLAST ||
 	    flags == DB_NODUPDATA || flags == DB_UPDATE_SECONDARY ?
 	    key : NULL, data, flags);
-
 	ret =__db_c_put(dbc, key, data, flags);
-
-	/* Release replication block. */
-	if (handle_check)
-		__env_db_rep_exit(dbenv);
-
+err:
+	ENV_LEAVE(dbenv, ip);
 	return (ret);
 }
 
@@ -2112,8 +2236,8 @@ __db_c_put_arg(dbc, key, data, flags)
 	key_flags = 0;
 
 	/* Check for changes to a read-only tree. */
-	if (IS_READONLY(dbp))
-		return (__db_rdonly(dbenv, "c_put"));
+	if (DB_IS_READONLY(dbp))
+		return (__db_rdonly(dbenv, "DBcursor->put"));
 
 	/* Check for puts on a secondary. */
 	if (F_ISSET(dbp, DB_AM_SECONDARY)) {
@@ -2237,19 +2361,6 @@ __dbt_ferr(dbp, name, dbt, check_thread)
 	return (0);
 }
 
-/*
- * __db_rdonly --
- *	Common readonly message.
- */
-static int
-__db_rdonly(dbenv, name)
-	const DB_ENV *dbenv;
-	const char *name;
-{
-	__db_err(dbenv, "%s: attempt to modify a read-only tree", name);
-	return (EACCES);
-}
-
 /*
  * __db_curinval
  *	Report that a cursor is in an invalid state.
@@ -2274,6 +2385,12 @@ __db_txn_auto_init(dbenv, txnidp)
 	DB_ENV *dbenv;
 	DB_TXN **txnidp;
 {
+	/*
+	 * Method calls where applications explicitly specify DB_AUTO_COMMIT
+	 * require additional validation: the DB_AUTO_COMMIT flag cannot be
+	 * specified if a transaction cookie is also specified, nor can the
+	 * flag be specified in a non-transactional environment.
+	 */
 	if (*txnidp != NULL) {
 		__db_err(dbenv,
     "DB_AUTO_COMMIT may not be specified along with a transaction handle");
@@ -2287,15 +2404,15 @@ __db_txn_auto_init(dbenv, txnidp)
 	}
 
 	/*
-	 * We're creating a transaction for the user, and we want it to block
-	 * if replication recovery is running.  Call the user-level API.
+	 * Our caller checked to see if replication is making a state change.
+	 * Don't call the user-level API (which would repeat that check).
 	 */
-	return (dbenv->txn_begin(dbenv, NULL, txnidp, 0));
+	return (__txn_begin(dbenv, NULL, txnidp, 0));
 }
 
 /*
  * __db_txn_auto_resolve --
- *	Handle DB_AUTO_COMMIT resolution.
+ *	Resolve local transactions.
  *
  * PUBLIC: int __db_txn_auto_resolve __P((DB_ENV *, DB_TXN *, int, int));
  */
@@ -2312,9 +2429,9 @@ __db_txn_auto_resolve(dbenv, txn, nosync, ret)
 	 * replication handle count.  Call the user-level API.
 	 */
 	if (ret == 0)
-		return (txn->commit(txn, nosync ? DB_TXN_NOSYNC : 0));
+		return (__txn_commit(txn, nosync ? DB_TXN_NOSYNC : 0));
 
-	if ((t_ret = txn->abort(txn)) != 0)
+	if ((t_ret = __txn_abort(txn)) != 0)
 		return (__db_panic(dbenv, t_ret));
 
 	return (ret);
diff --git a/storage/bdb/db/db_join.c b/storage/bdb/db/db_join.c
index f486f296ebf..720891ac07e 100644
--- a/storage/bdb/db/db_join.c
+++ b/storage/bdb/db/db_join.c
@@ -1,10 +1,10 @@
 /*
  * See the file LICENSE for redistribution information.
  *
- * Copyright (c) 1998-2004
+ * Copyright (c) 1998-2005
  *	Sleepycat Software.  All rights reserved.
  *
- * $Id: db_join.c,v 11.75 2004/09/22 03:30:23 bostic Exp $
+ * $Id: db_join.c,v 12.6 2005/10/07 20:21:22 ubell Exp $
  */
 
 #include "db_config.h"
@@ -216,9 +216,9 @@ __db_join(primary, curslist, dbcp, flags)
 
 	*dbcp = dbc;
 
-	MUTEX_THREAD_LOCK(dbenv, primary->mutexp);
+	MUTEX_LOCK(dbenv, primary->mutex);
 	TAILQ_INSERT_TAIL(&primary->join_queue, dbc, links);
-	MUTEX_THREAD_UNLOCK(dbenv, primary->mutexp);
+	MUTEX_UNLOCK(dbenv, primary->mutex);
 
 	return (0);
 
@@ -250,24 +250,30 @@ __db_join_close_pp(dbc)
 	DBC *dbc;
 {
 	DB_ENV *dbenv;
+	DB_THREAD_INFO *ip;
 	DB *dbp;
-	int handle_check, ret;
+	int handle_check, ret, t_ret;
 
 	dbp = dbc->dbp;
 	dbenv = dbp->dbenv;
 
 	PANIC_CHECK(dbenv);
 
-	handle_check = IS_REPLICATED(dbenv, dbp);
+	ENV_ENTER(dbenv, ip);
+
+	handle_check = IS_ENV_REPLICATED(dbenv);
 	if (handle_check &&
-	    (ret = __db_rep_enter(dbp, 0, 0, dbc->txn != NULL)) != 0)
-		return (ret);
+	    (ret = __db_rep_enter(dbp, 1, 0, dbc->txn != NULL)) != 0) {
+		handle_check = 0;
+		goto err;
+	}
 
 	ret = __db_join_close(dbc);
 
-	if (handle_check)
-		__env_db_rep_exit(dbenv);
+	if (handle_check && (t_ret = __env_db_rep_exit(dbenv)) != 0 && ret == 0)
+		ret = t_ret;
 
+err:	ENV_LEAVE(dbenv, ip);
 	return (ret);
 }
 
@@ -309,8 +315,9 @@ __db_join_get_pp(dbc, key, data, flags)
 {
 	DB *dbp;
 	DB_ENV *dbenv;
+	DB_THREAD_INFO *ip;
 	u_int32_t handle_check, save_flags;
-	int ret;
+	int ret, t_ret;
 
 	dbp = dbc->dbp;
 	dbenv = dbp->dbenv;
@@ -320,11 +327,10 @@ __db_join_get_pp(dbc, key, data, flags)
 
 	PANIC_CHECK(dbenv);
 
-	if (LF_ISSET(DB_DIRTY_READ | DB_DEGREE_2 | DB_RMW)) {
+	if (LF_ISSET(DB_READ_COMMITTED | DB_READ_UNCOMMITTED | DB_RMW)) {
 		if (!LOCKING_ON(dbp->dbenv))
 			return (__db_fnl(dbp->dbenv, "DBcursor->c_get"));
-
-		LF_CLR(DB_DIRTY_READ | DB_DEGREE_2 | DB_RMW);
+		LF_CLR(DB_READ_COMMITTED | DB_READ_UNCOMMITTED | DB_RMW);
 	}
 
 	switch (flags) {
@@ -352,19 +358,24 @@ __db_join_get_pp(dbc, key, data, flags)
 		return (EINVAL);
 	}
 
-	handle_check = IS_REPLICATED(dbp->dbenv, dbp);
+	ENV_ENTER(dbenv, ip);
+
+	handle_check = IS_ENV_REPLICATED(dbp->dbenv);
 	if (handle_check &&
-	    (ret = __db_rep_enter(dbp, 1, 0, dbc->txn != NULL)) != 0)
-		return (ret);
+	    (ret = __db_rep_enter(dbp, 1, 0, dbc->txn != NULL)) != 0) {
+		handle_check = 0;
+		goto err;
+	}
 
 	/* Restore the original flags value. */
 	flags = save_flags;
 
 	ret = __db_join_get(dbc, key, data, flags);
 
-	if (handle_check)
-		__env_db_rep_exit(dbenv);
+	if (handle_check && (t_ret = __env_db_rep_exit(dbenv)) != 0 && ret == 0)
+		ret = t_ret;
 
+err:	ENV_LEAVE(dbenv, ip);
 	return (ret);
 }
 
@@ -390,7 +401,7 @@ __db_join_get(dbc, key_arg, data_arg, flags)
 	 * If the set of flags here changes, check that __db_join_primget
 	 * is updated to handle them properly.
 	 */
-	opmods = LF_ISSET(DB_RMW | DB_DEGREE_2 | DB_DIRTY_READ);
+	opmods = LF_ISSET(DB_READ_COMMITTED | DB_READ_UNCOMMITTED | DB_RMW);
 
 	/*
 	 * Since we are fetching the key as a datum in the secondary indices,
@@ -726,9 +737,9 @@ __db_join_close(dbc)
 	 * must happen before any action that can fail and return, or else
 	 * __db_close may loop indefinitely.
 	 */
-	MUTEX_THREAD_LOCK(dbenv, dbp->mutexp);
+	MUTEX_LOCK(dbenv, dbp->mutex);
 	TAILQ_REMOVE(&dbp->join_queue, dbc, links);
-	MUTEX_THREAD_UNLOCK(dbenv, dbp->mutexp);
+	MUTEX_UNLOCK(dbenv, dbp->mutex);
 
 	PANIC_CHECK(dbenv);
 
@@ -872,28 +883,29 @@ __db_join_primget(dbp, txn, lockerid, key, data, flags)
 	u_int32_t flags;
 {
 	DBC *dbc;
-	int ret, rmw, t_ret;
+	u_int32_t rmw;
+	int ret, t_ret;
 
 	if ((ret = __db_cursor_int(dbp,
 	    txn, dbp->type, PGNO_INVALID, 0, lockerid, &dbc)) != 0)
 		return (ret);
 
 	/*
-	 * The only allowable flags here are the two flags copied into
-	 * "opmods" in __db_join_get, DB_RMW and DB_DIRTY_READ.  The former
-	 * is an op on the c_get call, the latter on the cursor call.
-	 * It's a DB bug if we allow any other flags down in here.
+	 * The only allowable flags here are the two flags copied into "opmods"
+	 * in __db_join_get, DB_RMW and DB_READ_UNCOMMITTED.  The former is an
+	 * op on the c_get call, the latter on the cursor call.  It's a DB bug
+	 * if we allow any other flags down in here.
 	 */
 	rmw = LF_ISSET(DB_RMW);
-	if (LF_ISSET(DB_DIRTY_READ) ||
-	    (txn != NULL && F_ISSET(txn, TXN_DIRTY_READ)))
-		F_SET(dbc, DBC_DIRTY_READ);
+	if (LF_ISSET(DB_READ_UNCOMMITTED) ||
+	    (txn != NULL && F_ISSET(txn, TXN_READ_UNCOMMITTED)))
+		F_SET(dbc, DBC_READ_UNCOMMITTED);
 
-	if (LF_ISSET(DB_DEGREE_2) ||
-	    (txn != NULL && F_ISSET(txn, TXN_DEGREE_2)))
-		F_SET(dbc, DBC_DEGREE_2);
+	if (LF_ISSET(DB_READ_COMMITTED) ||
+	    (txn != NULL && F_ISSET(txn, TXN_READ_COMMITTED)))
+		F_SET(dbc, DBC_READ_COMMITTED);
 
-	LF_CLR(DB_RMW | DB_DIRTY_READ | DB_DEGREE_2);
+	LF_CLR(DB_READ_COMMITTED | DB_READ_UNCOMMITTED | DB_RMW);
 	DB_ASSERT(flags == 0);
 
 	F_SET(dbc, DBC_TRANSIENT);
diff --git a/storage/bdb/db/db_meta.c b/storage/bdb/db/db_meta.c
index c5e88bb560a..c1264d38fb1 100644
--- a/storage/bdb/db/db_meta.c
+++ b/storage/bdb/db/db_meta.c
@@ -1,7 +1,7 @@
 /*-
  * See the file LICENSE for redistribution information.
  *
- * Copyright (c) 1996-2004
+ * Copyright (c) 1996-2005
  *	Sleepycat Software.  All rights reserved.
  */
 /*
@@ -39,7 +39,7 @@
  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
  * SUCH DAMAGE.
  *
- * $Id: db_meta.c,v 11.89 2004/10/05 14:28:33 bostic Exp $
+ * $Id: db_meta.c,v 12.22 2005/10/27 01:46:34 bostic Exp $
  */
 
 #include "db_config.h"
@@ -47,6 +47,7 @@
 #ifndef NO_SYSTEM_INCLUDES
 #include 
 
+#include 
 #include 
 #endif
 
@@ -58,6 +59,12 @@
 #include "dbinc/db_am.h"
 
 static void __db_init_meta __P((DB *, void *, db_pgno_t, u_int32_t));
+#ifdef HAVE_FTRUNCATE
+static void __db_freelist_sort __P((struct pglist *, u_int32_t));
+static int  __db_pglistcmp __P((const void *, const void *));
+static int  __db_truncate_freelist __P((DBC *, DBMETA *,
+      PAGE *, db_pgno_t *, u_int32_t, u_int32_t));
+#endif
 
 /*
  * __db_init_meta --
@@ -104,7 +111,7 @@ __db_new(dbc, type, pagepp)
 	DB_LSN lsn;
 	DB_MPOOLFILE *mpf;
 	PAGE *h;
-	db_pgno_t last, pgno, newnext;
+	db_pgno_t last, *list, pgno, newnext;
 	u_int32_t meta_flags;
 	int extend, ret, t_ret;
 
@@ -123,6 +130,10 @@ __db_new(dbc, type, pagepp)
 		goto err;
 	last = meta->last_pgno;
 	if (meta->free == PGNO_INVALID) {
+		if (FLD_ISSET(type, P_DONTEXTEND)) {
+			*pagepp = NULL;
+			goto err;
+		}
 		last = pgno = meta->last_pgno + 1;
 		ZERO_LSN(lsn);
 		extend = 1;
@@ -141,6 +152,8 @@ __db_new(dbc, type, pagepp)
 		extend = 0;
 	}
 
+	FLD_CLR(type, P_DONTEXTEND);
+
 	/*
 	 * Log the allocation before fetching the new page.  If we
 	 * don't have room in the log then we don't want to tell
@@ -191,19 +204,40 @@ __db_new(dbc, type, pagepp)
 			break;
 	}
 
+	/* Fix up the sorted free list if necessary. */
+#ifdef HAVE_FTRUNCATE
+	if (extend == 0) {
+		u_int32_t nelems = 0;
+
+		if ((ret = __memp_get_freelist(dbp->mpf, &nelems, &list)) != 0)
+			goto err;
+		if (nelems != 0) {
+			DB_ASSERT(h->pgno == list[0]);
+			memmove(list, &list[1], (nelems - 1) * sizeof(*list));
+			if ((ret = __memp_extend_freelist(
+			    dbp->mpf, nelems - 1, &list)) != 0)
+				goto err;
+		}
+	}
+#else
+	COMPQUIET(list, NULL);
+#endif
+
 	/*
 	 * If dirty reads are enabled and we are in a transaction, we could
 	 * abort this allocation after the page(s) pointing to this
 	 * one have their locks downgraded.  This would permit dirty readers
 	 * to access this page which is ok, but they must be off the
-	 * page when we abort.  This will also prevent updates happening
-	 * to this page until we commit.
+	 * page when we abort.  We never lock overflow pages or off page
+	 * duplicate trees.
 	 */
-	if (F_ISSET(dbc->dbp, DB_AM_DIRTY) && dbc->txn != NULL) {
+	if (type != P_OVERFLOW && !F_ISSET(dbc, DBC_OPD) &&
+	     F_ISSET(dbc->dbp, DB_AM_READ_UNCOMMITTED) && dbc->txn != NULL) {
 		if ((ret = __db_lget(dbc, 0,
 		    h->pgno, DB_LOCK_WWRITE, 0, &metalock)) != 0)
 			goto err;
 	}
+
 	*pagepp = h;
 	return (0);
 
@@ -231,16 +265,29 @@ __db_free(dbc, h)
 	DBT ddbt, ldbt;
 	DB_LOCK metalock;
 	DB_MPOOLFILE *mpf;
-	db_pgno_t pgno;
-	u_int32_t dirty_flag;
-	int ret, t_ret;
+	db_pgno_t last_pgno, *lp, next_pgno, pgno, prev_pgno;
+	u_int32_t dirty_flag, lflag, nelem;
+	int do_truncate, ret, t_ret;
+#ifdef HAVE_FTRUNCATE
+	db_pgno_t *list;
+	u_int32_t position, start;
+#endif
 
 	dbp = dbc->dbp;
 	mpf = dbp->mpf;
+	prev_pgno = PGNO_INVALID;
+	nelem = 0;
+	meta = NULL;
+	do_truncate = 0;
+	lp = NULL;
 
 	/*
-	 * Retrieve the metadata page and insert the page at the head of
-	 * the free list.  If either the lock get or page get routines
+	 * Retrieve the metadata page.  If we are not keeping a sorted
+	 * free list put the page at the head of the the free list.
+	 * If we are keeping a sorted free list, for truncation,
+	 * then figure out where this page belongs and either
+	 * link it in or truncate the file as much as possible.
+	 * If either the lock get or page get routines
 	 * fail, then we need to put the page with which we were called
 	 * back because our caller assumes we take care of it.
 	 */
@@ -249,12 +296,76 @@ __db_free(dbc, h)
 	if ((ret = __db_lget(dbc,
 	    LCK_ALWAYS, pgno, DB_LOCK_WRITE, 0, &metalock)) != 0)
 		goto err;
-	if ((ret = __memp_fget(mpf, &pgno, 0, &meta)) != 0) {
-		(void)__TLPUT(dbc, metalock);
+	if ((ret = __memp_fget(mpf, &pgno, 0, &meta)) != 0)
+		goto err1;
+
+	last_pgno = meta->last_pgno;
+	next_pgno = meta->free;
+
+	DB_ASSERT(h->pgno != next_pgno);
+
+#ifdef HAVE_FTRUNCATE
+	/*
+	 * If we are maintaining a sorted free list see if we either have a
+	 * new truncation point or the page goes somewhere in the middle of
+	 * the list.  If it goes in the middle of the list, we will drop the
+	 * meta page and get the previous page.
+	 */
+	if ((ret = __memp_get_freelist(mpf, &nelem, &list)) != 0)
 		goto err;
+	if (list == NULL)
+		goto no_sort;
+
+	if (h->pgno != last_pgno) {
+		/*
+		 * Put the page number in the sorted list.
+		 * Finds its position and the previous page,
+		 * extend the list, make room and insert.
+		 */
+		position = 0;
+		if (nelem != 0) {
+			__db_freelist_pos(h->pgno, list, nelem, &position);
+
+			DB_ASSERT(h->pgno != list[position]);
+
+			/* Get the previous page if this is not the smallest. */
+			if (position != 0 || h->pgno > list[0])
+				prev_pgno = list[position];
+		}
+
+		/* Put the page number into the list. */
+		if ((ret = __memp_extend_freelist(mpf, nelem + 1, &list)) != 0)
+			return (ret);
+		if (prev_pgno != PGNO_INVALID)
+			lp = &list[position + 1];
+		else
+			lp = list;
+		if (nelem != 0 && position != nelem)
+			memmove(lp + 1, lp,
+			    (size_t)((u_int8_t*)&list[nelem] - (u_int8_t*)lp));
+		*lp = h->pgno;
+	} else if (nelem != 0) {
+		/* Find the truncation point. */
+		for (lp = &list[nelem - 1]; lp >= list; lp--)
+			if (--last_pgno != *lp)
+				break;
+		if (lp < list || last_pgno < h->pgno - 1)
+			do_truncate = 1;
+		last_pgno = meta->last_pgno;
 	}
 
-	DB_ASSERT(h->pgno != meta->free);
+no_sort:
+	if (prev_pgno != PGNO_INVALID) {
+		if ((ret = __memp_fput(mpf, meta, 0)) != 0)
+			goto err1;
+		meta = NULL;
+		pgno = prev_pgno;
+		if ((ret = __memp_fget(mpf, &pgno, 0, &meta)) != 0)
+			goto err1;
+		next_pgno = NEXT_PGNO(meta);
+	}
+#endif
+
 	/* Log the change. */
 	if (DBC_LOGGING(dbc)) {
 		memset(&ldbt, 0, sizeof(ldbt));
@@ -269,47 +380,61 @@ __db_free(dbc, h)
 		case P_LDUP:
 			if (h->entries > 0) {
 				ldbt.size += h->entries * sizeof(db_indx_t);
-				ddbt.data = (u_int8_t *)h + h->hf_offset;
-				ddbt.size = dbp->pgsize - h->hf_offset;
-				ret = __db_pg_freedata_log(dbp, dbc->txn,
-				     &LSN(meta), 0, h->pgno, &LSN(meta),
-				     PGNO_BASE_MD, &ldbt,
-				     meta->free, meta->last_pgno, &ddbt);
-				break;
+				ddbt.data = (u_int8_t *)h + HOFFSET(h);
+				ddbt.size = dbp->pgsize - HOFFSET(h);
+				if ((ret = __db_pg_freedata_log(dbp, dbc->txn,
+				     &LSN(meta), 0, h->pgno, &LSN(meta), pgno,
+				     &ldbt, next_pgno, last_pgno, &ddbt)) != 0)
+					goto err1;
+				goto logged;
 			}
-			goto log;
+			break;
 		case P_HASHMETA:
 			ldbt.size = sizeof(HMETA);
-			goto log;
+			break;
 		case P_BTREEMETA:
 			ldbt.size = sizeof(BTMETA);
-			goto log;
+			break;
 		case P_OVERFLOW:
 			ldbt.size += OV_LEN(h);
-			goto log;
+			break;
 		default:
 			DB_ASSERT(h->type != P_QAMDATA);
+		}
 
-log:			ret = __db_pg_free_log(dbp,
-			    dbc->txn, &LSN(meta), 0, h->pgno, &LSN(meta),
-			    PGNO_BASE_MD, &ldbt, meta->free, meta->last_pgno);
-		}
-		if (ret != 0) {
-			(void)__memp_fput(mpf, (PAGE *)meta, 0);
-			(void)__TLPUT(dbc, metalock);
-			goto err;
-		}
+		/*
+		 * If we are truncating the file, we need to make sure
+		 * the logging happens before the truncation.  If we
+		 * are truncating multiple pages we don't need to flush the
+		 * log here as it will be flushed by __db_truncate_freelist.
+		 */
+		lflag = 0;
+#ifdef HAVE_FTRUNCATE
+		if (do_truncate == 0 && h->pgno == last_pgno)
+			lflag = DB_FLUSH;
+#endif
+		if ((ret = __db_pg_free_log(dbp,
+		      dbc->txn, &LSN(meta), lflag, h->pgno,
+		      &LSN(meta), pgno, &ldbt, next_pgno, last_pgno)) != 0)
+			goto err1;
 	} else
 		LSN_NOT_LOGGED(LSN(meta));
-	LSN(h) = LSN(meta);
+logged:	LSN(h) = LSN(meta);
 
 #ifdef HAVE_FTRUNCATE
-	if (h->pgno == meta->last_pgno) {
+	if (do_truncate) {
+		start = (u_int32_t) (lp - list) + 1;
+		meta->last_pgno--;
+		ret = __db_truncate_freelist(
+		      dbc, meta, h, list, start, nelem);
+		h = NULL;
+	} else if (h->pgno == last_pgno) {
 		if ((ret = __memp_fput(mpf, h, DB_MPOOL_DISCARD)) != 0)
 			goto err;
 		/* Give the page back to the OS. */
-		if ((ret = __memp_ftruncate(mpf, meta->last_pgno, 0)) != 0)
+		if ((ret = __memp_ftruncate(mpf, last_pgno, 0)) != 0)
 			goto err;
+		DB_ASSERT(meta->pgno == PGNO_BASE_MD);
 		meta->last_pgno--;
 		h = NULL;
 	} else
@@ -318,20 +443,23 @@ log:			ret = __db_pg_free_log(dbp,
 	{
 		/*
 		 * If we are not truncating the page then we
-		 * reinitialize it and put it hat the head of
+		 * reinitialize it and put it at the head of
 		 * the free list.
 		 */
 		P_INIT(h, dbp->pgsize,
-		    h->pgno, PGNO_INVALID, meta->free, 0, P_INVALID);
+		    h->pgno, PGNO_INVALID, next_pgno, 0, P_INVALID);
 #ifdef DIAGNOSTIC
 		memset((u_int8_t *) h + P_OVERHEAD(dbp),
 		    CLEAR_BYTE, dbp->pgsize - P_OVERHEAD(dbp));
 #endif
-		meta->free = h->pgno;
+		if (prev_pgno == PGNO_INVALID)
+			meta->free = h->pgno;
+		else
+			NEXT_PGNO(meta) = h->pgno;
 	}
 
-	/* Discard the metadata page. */
-	if ((t_ret =
+	/* Discard the metadata or previous page. */
+err1:	if (meta != NULL && (t_ret =
 	    __memp_fput(mpf, (PAGE *)meta, DB_MPOOL_DIRTY)) != 0 && ret == 0)
 		ret = t_ret;
 	if ((t_ret = __TLPUT(dbc, metalock)) != 0 && ret == 0)
@@ -350,6 +478,377 @@ err:	if (h != NULL &&
 	return (ret);
 }
 
+#ifdef HAVE_FTRUNCATE
+/*
+ * __db_freelist_pos -- find the position of a page in the freelist.
+ *	The list is sorted, we do a binary search.
+ *
+ * PUBLIC: #ifdef HAVE_FTRUNCATE
+ * PUBLIC: void __db_freelist_pos __P((db_pgno_t,
+ * PUBLIC:       db_pgno_t *, u_int32_t, u_int32_t *));
+ * PUBLIC: #endif
+ */
+void
+__db_freelist_pos(pgno, list, nelem, posp)
+	db_pgno_t pgno;
+	db_pgno_t *list;
+	u_int32_t nelem;
+	u_int32_t *posp;
+{
+	u_int32_t base, indx, lim;
+
+	indx = 0;
+	for (base = 0, lim = nelem; lim != 0; lim >>= 1) {
+		indx = base + (lim >> 1);
+		if (pgno == list[indx]) {
+			*posp = indx;
+			return;
+		}
+		if (pgno > list[indx]) {
+			base = indx + 1;
+			--lim;
+		}
+	}
+	if (base != 0)
+		base--;
+	*posp = base;
+	return;
+}
+
+static int
+__db_pglistcmp(a, b)
+	const void *a, *b;
+{
+	struct pglist *ap, *bp;
+
+	ap = (struct pglist *)a;
+	bp = (struct pglist *)b;
+
+	return ((ap->pgno > bp->pgno) ? 1 : (ap->pgno < bp->pgno) ? -1: 0);
+}
+
+/*
+ * __db_freelist_sort -- sort a list of free pages.
+ */
+static void
+__db_freelist_sort(list, nelems)
+	struct pglist *list;
+	u_int32_t nelems;
+{
+	qsort(list, (size_t)nelems, sizeof(struct pglist), __db_pglistcmp);
+}
+
+/*
+ * __db_pg_truncate -- sort the freelist and find the truncation point.
+ *
+ * PUBLIC: #ifdef HAVE_FTRUNCATE
+ * PUBLIC: int __db_pg_truncate __P((DB_MPOOLFILE *, struct pglist *list,
+ * PUBLIC:    DB_COMPACT *, u_int32_t *, db_pgno_t *, DB_LSN *, int));
+ * PUBLIC: #endif
+ */
+int
+__db_pg_truncate(mpf, list, c_data, nelemp, last_pgno, lsnp, in_recovery)
+	DB_MPOOLFILE *mpf;
+	struct pglist *list;
+	DB_COMPACT *c_data;
+	u_int32_t *nelemp;
+	db_pgno_t *last_pgno;
+	DB_LSN *lsnp;
+	int in_recovery;
+{
+	PAGE *h;
+	struct pglist *lp;
+	db_pgno_t pgno;
+	u_int32_t nelems;
+	int modified, ret;
+
+	ret = 0;
+
+	nelems = *nelemp;
+	/* Sort the list */
+	__db_freelist_sort(list, nelems);
+
+	/* Find the truncation point. */
+	pgno = *last_pgno;
+	lp = &list[nelems - 1];
+	while (nelems != 0) {
+		if (lp->pgno != pgno)
+			break;
+		pgno--;
+		nelems--;
+		lp--;
+	}
+
+	/*
+	 * Figure out what (if any) pages can be truncated immediately and
+	 * record the place from which we can truncate, so we can do the
+	 * memp_ftruncate below.  We also use this to avoid ever putting
+	 * these pages on the freelist, which we are about to relink.
+	 */
+	for (lp = list; lp < &list[nelems]; lp++) {
+		if ((ret = __memp_fget(mpf, &lp->pgno, 0, &h)) != 0) {
+			/* Page may have been truncated later. */
+			if (in_recovery && ret == DB_PAGE_NOTFOUND) {
+				ret = 0;
+				continue;
+			}
+			goto err;
+		}
+		modified = 0;
+		if (!in_recovery || log_compare(&LSN(h), &lp->lsn) == 0) {
+			if (lp == &list[nelems - 1])
+				NEXT_PGNO(h) = PGNO_INVALID;
+			else
+				NEXT_PGNO(h) = lp[1].pgno;
+			DB_ASSERT(NEXT_PGNO(h) < *last_pgno);
+
+			LSN(h) = *lsnp;
+			modified = 1;
+		}
+		if ((ret = __memp_fput(mpf, h,
+		    modified ? DB_MPOOL_DIRTY : 0)) != 0)
+			goto err;
+	}
+
+	if (pgno != *last_pgno) {
+		if ((ret = __memp_ftruncate(mpf,
+		    pgno + 1, in_recovery ? MP_TRUNC_RECOVER : 0)) != 0)
+			goto err;
+		if (c_data)
+			c_data->compact_pages_truncated += *last_pgno - pgno;
+		*last_pgno = pgno;
+	}
+	*nelemp = nelems;
+
+err:	return (ret);
+}
+
+/*
+ * __db_free_truncate --
+ *	Truncate free pages at the end of the file.
+ *
+ * PUBLIC: #ifdef HAVE_FTRUNCATE
+ * PUBLIC: int __db_free_truncate __P((DB *, DB_TXN *, u_int32_t,
+ * PUBLIC:    DB_COMPACT *, struct pglist **, u_int32_t *, db_pgno_t *));
+ * PUBLIC: #endif
+ */
+int
+__db_free_truncate(dbp, txn, flags, c_data, listp, nelemp, last_pgnop)
+	DB *dbp;
+	DB_TXN *txn;
+	u_int32_t flags;
+	DB_COMPACT *c_data;
+	struct pglist **listp;
+	u_int32_t *nelemp;
+	db_pgno_t *last_pgnop;
+{
+	DBC *dbc;
+	DB_ENV *dbenv;
+	DBMETA *meta;
+	DBT ddbt;
+	DB_LOCK metalock;
+	DB_LSN null_lsn;
+	DB_MPOOLFILE *mpf;
+	PAGE *h;
+	db_pgno_t pgno;
+	u_int32_t nelems;
+	struct pglist *list, *lp;
+	int ret, t_ret;
+	size_t size;
+
+	COMPQUIET(flags, 0);
+	list = NULL;
+	meta = NULL;
+	dbenv = dbp->dbenv;
+	mpf = dbp->mpf;
+	h = NULL;
+	nelems = 0;
+	if (listp != NULL) {
+		*listp = NULL;
+		DB_ASSERT(nelemp != NULL);
+		*nelemp = 0;
+	}
+
+	if ((ret = __db_cursor(dbp, txn, &dbc, DB_WRITELOCK)) != 0)
+		return (ret);
+
+	pgno = PGNO_BASE_MD;
+	if ((ret = __db_lget(dbc,
+	    LCK_ALWAYS, pgno, DB_LOCK_WRITE, 0, &metalock)) != 0)
+		goto err;
+	if ((ret = __memp_fget(mpf, &pgno, 0, &meta)) != 0)
+		goto err;
+
+	if (last_pgnop != NULL)
+		*last_pgnop = meta->last_pgno;
+	if ((pgno = meta->free) == PGNO_INVALID)
+		goto done;
+
+	size = 128;
+	if ((ret = __os_malloc(dbenv, size * sizeof(*list), &list)) != 0)
+		goto err;
+	lp = list;
+
+	do {
+		if (lp == &list[size]) {
+			size *= 2;
+			if ((ret = __os_realloc(dbenv,
+			    size * sizeof(*list), &list)) != 0)
+				goto err;
+			lp = &list[size / 2];
+		}
+		if ((ret = __memp_fget(mpf, &pgno, 0, &h)) != 0)
+			goto err;
+
+		lp->pgno = pgno;
+		lp->lsn = LSN(h);
+		pgno = NEXT_PGNO(h);
+		if ((ret = __memp_fput(mpf, h, 0)) != 0)
+			goto err;
+		lp++;
+	} while (pgno != PGNO_INVALID);
+	nelems = (u_int32_t)(lp - list);
+
+	/* Log the current state of the free list */
+	if (DBC_LOGGING(dbc)) {
+		ddbt.data = list;
+		ddbt.size = nelems * sizeof(*lp);
+		ZERO_LSN(null_lsn);
+		if ((ret = __db_pg_sort_log(dbp,
+		     dbc->txn, &LSN(meta), DB_FLUSH, PGNO_BASE_MD, &LSN(meta),
+		     PGNO_INVALID, &null_lsn, meta->last_pgno, &ddbt)) != 0)
+			goto err;
+	} else
+		LSN_NOT_LOGGED(LSN(meta));
+
+	if ((ret = __db_pg_truncate(mpf, list, c_data,
+	    &nelems, &meta->last_pgno, &LSN(meta), 0)) != 0)
+		goto err;
+
+	if (nelems == 0)
+		meta->free = PGNO_INVALID;
+	else
+		meta->free = list[0].pgno;
+
+done:	if (last_pgnop != NULL)
+		*last_pgnop = meta->last_pgno;
+
+	/*
+	 * The truncate point is the number of pages in the free
+	 * list back from the last page.  The number of pages
+	 * in the free list are the number that we can swap in.
+	 */
+	if (c_data)
+		c_data->compact_truncate = (u_int32_t)meta->last_pgno - nelems;
+
+	if (nelems != 0 && listp != NULL) {
+		*listp = list;
+		*nelemp = nelems;
+		list = NULL;
+	}
+
+err:	if (list != NULL)
+		__os_free(dbenv, list);
+	if (meta != NULL && (t_ret =
+	     __memp_fput(mpf, (PAGE *)meta, DB_MPOOL_DIRTY)) != 0 && ret == 0)
+		ret = t_ret;
+	if ((t_ret = __TLPUT(dbc, metalock)) != 0 && ret == 0)
+		ret = t_ret;
+	if ((t_ret = __db_c_close(dbc)) != 0 && ret == 0)
+		ret = t_ret;
+	return (ret);
+}
+
+static int
+__db_truncate_freelist(dbc, meta, h, list, start, nelem)
+	DBC *dbc;
+	DBMETA *meta;
+	PAGE *h;
+	db_pgno_t *list;
+	u_int32_t start, nelem;
+{
+	DB *dbp;
+	DB_LSN null_lsn;
+	DB_MPOOLFILE *mpf;
+	DBT ddbt;
+	PAGE *last_free, *pg;
+	db_pgno_t *lp;
+	struct pglist *plist, *pp;
+	int ret;
+
+	dbp = dbc->dbp;
+	mpf = dbp->mpf;
+	plist = NULL;
+	last_free = NULL;
+
+	if (start != 0 &&
+	    (ret = __memp_fget(mpf, &list[start - 1], 0, &last_free)) != 0)
+		goto err;
+
+	if (DBC_LOGGING(dbc)) {
+		if ((ret = __os_malloc(dbp->dbenv,
+		     (nelem - start) * sizeof(*pp), &plist)) != 0)
+			goto err;
+
+		pp = plist;
+		for (lp = &list[start]; lp < &list[nelem]; lp++) {
+			pp->pgno = *lp;
+			if ((ret = __memp_fget(mpf, lp, 0, &pg)) != 0)
+				goto err;
+			pp->lsn = LSN(pg);
+			if ((ret = __memp_fput(mpf, pg, DB_MPOOL_DISCARD)) != 0)
+				goto err;
+			pp++;
+		}
+		ddbt.data = plist;
+		ddbt.size = (nelem - start) * sizeof(*pp);
+		ZERO_LSN(null_lsn);
+		if (last_free != NULL) {
+			if ((ret = __db_pg_sort_log(dbp, dbc->txn, &LSN(meta),
+			     DB_FLUSH, PGNO(meta), &LSN(meta), PGNO(last_free),
+			     &LSN(last_free), meta->last_pgno, &ddbt)) != 0)
+				goto err;
+		} else if ((ret = __db_pg_sort_log(dbp, dbc->txn,
+		     &LSN(meta), DB_FLUSH, PGNO(meta), &LSN(meta),
+		     PGNO_INVALID, &null_lsn, meta->last_pgno, &ddbt)) != 0)
+			goto err;
+	} else
+		LSN_NOT_LOGGED(LSN(meta));
+	if (last_free != NULL)
+		LSN(last_free) = LSN(meta);
+
+	if ((ret = __memp_fput(mpf, h, DB_MPOOL_DISCARD)) != 0)
+		goto err;
+	h = NULL;
+	if ((ret = __memp_ftruncate(mpf, list[start], 0)) != 0)
+		goto err;
+	meta->last_pgno = list[start] - 1;
+
+	if (start == 0)
+		meta->free = PGNO_INVALID;
+	else {
+		NEXT_PGNO(last_free) = PGNO_INVALID;
+		if ((ret = __memp_fput(mpf, last_free, DB_MPOOL_DIRTY)) != 0)
+			goto err;
+		last_free = NULL;
+	}
+
+	/* Shrink the number of elements in the list. */
+	ret = __memp_extend_freelist(mpf, start, &list);
+
+err:	if (plist != NULL)
+		__os_free(dbp->dbenv, plist);
+
+	/* We need to put the page on error. */
+	if (h != NULL)
+		(void)__memp_fput(mpf, h, 0);
+	if (last_free != NULL)
+		(void)__memp_fput(mpf, last_free, 0);
+
+	return (ret);
+}
+#endif
+
 #ifdef DEBUG
 /*
  * __db_lprint --
@@ -394,9 +893,9 @@ __db_lget(dbc, action, pgno, mode, lkflags, lockp)
 {
 	DB *dbp;
 	DB_ENV *dbenv;
-	DB_LOCKREQ couple[2], *reqp;
+	DB_LOCKREQ couple[3], *reqp;
 	DB_TXN *txn;
-	int has_timeout, ret;
+	int has_timeout, i, ret;
 
 	dbp = dbc->dbp;
 	dbenv = dbp->dbenv;
@@ -431,8 +930,8 @@ __db_lget(dbc, action, pgno, mode, lkflags, lockp)
 	if (DB_NONBLOCK(dbc))
 		lkflags |= DB_LOCK_NOWAIT;
 
-	if (F_ISSET(dbc, DBC_DIRTY_READ) && mode == DB_LOCK_READ)
-		mode = DB_LOCK_DIRTY;
+	if (F_ISSET(dbc, DBC_READ_UNCOMMITTED) && mode == DB_LOCK_READ)
+		mode = DB_LOCK_READ_UNCOMMITTED;
 
 	has_timeout = F_ISSET(dbc, DBC_RECOVER) ||
 	    (txn != NULL && F_ISSET(txn, TXN_LOCKTIMEOUT));
@@ -440,8 +939,8 @@ __db_lget(dbc, action, pgno, mode, lkflags, lockp)
 	/*
 	 * Transactional locking.
 	 * Hold on to the previous read lock only if we are in full isolation.
-	 * COUPLE_ALWAYS indicates we are holding an interior node
-	 * which need not be isolated.
+	 * COUPLE_ALWAYS indicates we are holding an interior node which need
+	 *	not be isolated.
 	 * Downgrade write locks if we are supporting dirty readers.
 	 */
 	if ((action != LCK_COUPLE && action != LCK_COUPLE_ALWAYS) ||
@@ -449,47 +948,54 @@ __db_lget(dbc, action, pgno, mode, lkflags, lockp)
 		action = 0;
 	else if (dbc->txn == NULL || action == LCK_COUPLE_ALWAYS)
 		action = LCK_COUPLE;
-	else if (F_ISSET(dbc, DBC_DEGREE_2) && lockp->mode == DB_LOCK_READ)
+	else if (F_ISSET(dbc,
+	    DBC_READ_COMMITTED) && lockp->mode == DB_LOCK_READ)
 		action = LCK_COUPLE;
-	else if (F_ISSET(dbc, DBC_DIRTY_READ) && lockp->mode == DB_LOCK_DIRTY)
+	else if (F_ISSET(dbc,
+	    DBC_READ_UNCOMMITTED) && lockp->mode == DB_LOCK_READ_UNCOMMITTED)
 		action = LCK_COUPLE;
-	else if (F_ISSET(dbc->dbp, DB_AM_DIRTY) && lockp->mode == DB_LOCK_WRITE)
+	else if (F_ISSET(dbc->dbp,
+	    DB_AM_READ_UNCOMMITTED) && lockp->mode == DB_LOCK_WRITE)
 		action = LCK_DOWNGRADE;
 	else
 		action = 0;
 
+	i = 0;
 	switch (action) {
-	case LCK_DOWNGRADE:
-		if ((ret = __lock_downgrade(
-		    dbenv, lockp, DB_LOCK_WWRITE, 0)) != 0)
-			return (ret);
-		/* FALLTHROUGH */
-
 	default:
-		if (!has_timeout) {
-			ret = __lock_get(dbenv,
-			    dbc->locker, lkflags, &dbc->lock_dbt, mode, lockp);
-			break;
-		}
+		if (has_timeout)
+			goto couple;
+		ret = __lock_get(dbenv,
+		    dbc->locker, lkflags, &dbc->lock_dbt, mode, lockp);
+		break;
 
+	case LCK_DOWNGRADE:
+		couple[0].op = DB_LOCK_GET;
+		couple[0].obj = NULL;
+		couple[0].lock = *lockp;
+		couple[0].mode = DB_LOCK_WWRITE;
+		UMRW_SET(couple[0].timeout);
+		i++;
 		/* FALLTHROUGH */
 	case LCK_COUPLE:
-		couple[0].op = has_timeout? DB_LOCK_GET_TIMEOUT : DB_LOCK_GET;
-		couple[0].obj = &dbc->lock_dbt;
-		couple[0].mode = mode;
-		UMRW_SET(couple[0].timeout);
+couple:		couple[i].op = has_timeout? DB_LOCK_GET_TIMEOUT : DB_LOCK_GET;
+		couple[i].obj = &dbc->lock_dbt;
+		couple[i].mode = mode;
+		UMRW_SET(couple[i].timeout);
+		i++;
 		if (has_timeout)
 			couple[0].timeout =
 			     F_ISSET(dbc, DBC_RECOVER) ? 0 : txn->lock_timeout;
-		if (action == LCK_COUPLE) {
-			couple[1].op = DB_LOCK_PUT;
-			couple[1].lock = *lockp;
+		if (action == LCK_COUPLE || action == LCK_DOWNGRADE) {
+			couple[i].op = DB_LOCK_PUT;
+			couple[i].lock = *lockp;
+			i++;
 		}
 
-		ret = __lock_vec(dbenv, dbc->locker,
-		    lkflags, couple, action == LCK_COUPLE ? 2 : 1, &reqp);
-		if (ret == 0 || reqp == &couple[1])
-			*lockp = couple[0].lock;
+		ret = __lock_vec(dbenv,
+		    dbc->locker, lkflags, couple, i, &reqp);
+		if (ret == 0 || reqp == &couple[i - 1])
+			*lockp = i == 1 ? couple[0].lock : couple[i - 2].lock;
 		break;
 	}
 
@@ -511,6 +1017,7 @@ __db_lput(dbc, lockp)
 	DB_LOCK *lockp;
 {
 	DB_ENV *dbenv;
+	DB_LOCKREQ couple[2], *reqp;
 	int action, ret;
 
 	/*
@@ -518,13 +1025,16 @@ __db_lput(dbc, lockp)
 	 * Hold on to the read locks only if we are in full isolation.
 	 * Downgrade write locks if we are supporting dirty readers.
 	 */
-	if (F_ISSET(dbc->dbp, DB_AM_DIRTY) && lockp->mode == DB_LOCK_WRITE)
+	if (F_ISSET(dbc->dbp,
+	    DB_AM_READ_UNCOMMITTED) && lockp->mode == DB_LOCK_WRITE)
 		action = LCK_DOWNGRADE;
 	else if (dbc->txn == NULL)
 		action = LCK_COUPLE;
-	else if (F_ISSET(dbc, DBC_DEGREE_2) && lockp->mode == DB_LOCK_READ)
+	else if (F_ISSET(dbc,
+	    DBC_READ_COMMITTED) && lockp->mode == DB_LOCK_READ)
 		action = LCK_COUPLE;
-	else if (F_ISSET(dbc, DBC_DIRTY_READ) && lockp->mode == DB_LOCK_DIRTY)
+	else if (F_ISSET(dbc,
+	    DBC_READ_UNCOMMITTED) && lockp->mode == DB_LOCK_READ_UNCOMMITTED)
 		action = LCK_COUPLE;
 	else
 		action = 0;
@@ -532,10 +1042,19 @@ __db_lput(dbc, lockp)
 	dbenv = dbc->dbp->dbenv;
 	switch (action) {
 	case LCK_COUPLE:
-		ret = __lock_put(dbenv, lockp, 0);
+		ret = __lock_put(dbenv, lockp);
 		break;
 	case LCK_DOWNGRADE:
-		ret = __lock_downgrade(dbenv, lockp, DB_LOCK_WWRITE, 0);
+		couple[0].op = DB_LOCK_GET;
+		couple[0].obj = NULL;
+		couple[0].mode = DB_LOCK_WWRITE;
+		couple[0].lock = *lockp;
+		UMRW_SET(couple[0].timeout);
+		couple[1].op = DB_LOCK_PUT;
+		couple[1].lock = *lockp;
+		ret = __lock_vec(dbenv, dbc->locker, 0, couple, 2, &reqp);
+		if (ret == 0 || reqp == &couple[1])
+			*lockp = couple[0].lock;
 		break;
 	default:
 		ret = 0;
diff --git a/storage/bdb/db/db_method.c b/storage/bdb/db/db_method.c
index 4266fbf0e1e..141392148e7 100644
--- a/storage/bdb/db/db_method.c
+++ b/storage/bdb/db/db_method.c
@@ -1,10 +1,10 @@
 /*-
  * See the file LICENSE for redistribution information.
  *
- * Copyright (c) 1999-2004
+ * Copyright (c) 1999-2005
  *	Sleepycat Software.  All rights reserved.
  *
- * $Id: db_method.c,v 11.116 2004/10/11 18:22:05 bostic Exp $
+ * $Id: db_method.c,v 12.15 2005/11/08 03:24:58 bostic Exp $
  */
 
 #include "db_config.h"
@@ -32,8 +32,7 @@
 #include "dbinc/lock.h"
 #include "dbinc/mp.h"
 #include "dbinc/qam.h"
-#include "dbinc/xa.h"
-#include "dbinc_auto/xa_ext.h"
+#include "dbinc/txn.h"
 
 #ifdef HAVE_RPC
 #include "dbinc_auto/rpc_client_ext.h"
@@ -42,9 +41,10 @@
 static int  __db_get_byteswapped __P((DB *, int *));
 static int  __db_get_dbname __P((DB *, const char **, const char **));
 static DB_ENV *__db_get_env __P((DB *));
+static DB_MPOOLFILE *__db_get_mpf __P((DB *));
 static int  __db_get_transactional __P((DB *));
 static int  __db_get_type __P((DB *, DBTYPE *dbtype));
-static int  __db_init __P((DB *, u_int32_t));
+static int  __db_init __P((DB_ENV *, DB *, u_int32_t));
 static int  __db_set_alloc __P((DB *, void *(*)(size_t),
 		void *(*)(void *, size_t), void (*)(void *)));
 static int  __db_set_append_recno __P((DB *, int (*)(DB *, DBT *, db_recno_t)));
@@ -71,10 +71,6 @@ static void __db_set_msgfile __P((DB *, FILE *));
 static void __dbh_err __P((DB *, int, const char *, ...));
 static void __dbh_errx __P((DB *, const char *, ...));
 
-#ifdef HAVE_RPC
-static int  __dbcl_init __P((DB *, DB_ENV *, u_int32_t));
-#endif
-
 /*
  * db_create --
  *	DB constructor.
@@ -88,14 +84,14 @@ db_create(dbpp, dbenv, flags)
 	u_int32_t flags;
 {
 	DB *dbp;
+	DB_THREAD_INFO *ip;
+	DB_REP *db_rep;
 	int ret;
 
 	/* Check for invalid function flags. */
 	switch (flags) {
 	case 0:
 		break;
-	case DB_REP_CREATE:
-		break;
 	case DB_XA_CREATE:
 		if (dbenv != NULL) {
 			__db_err(dbenv,
@@ -115,16 +111,17 @@ db_create(dbpp, dbenv, flags)
 		return (__db_ferr(dbenv, "db_create", 0));
 	}
 
+	ip = NULL;
+	if (dbenv != NULL)
+		ENV_ENTER(dbenv, ip);
 	/* Allocate the DB. */
-	if ((ret = __os_calloc(dbenv, 1, sizeof(*dbp), &dbp)) != 0)
+	if ((ret = __os_calloc(dbenv, 1, sizeof(*dbp), &dbp)) != 0) {
+		if (dbenv != NULL)
+			ENV_LEAVE(dbenv, ip);
 		return (ret);
-#ifdef HAVE_RPC
-	if (dbenv != NULL && RPC_ON(dbenv))
-		ret = __dbcl_init(dbp, dbenv, flags);
-	else
-#endif
-		ret = __db_init(dbp, flags);
-	if (ret != 0)
+	}
+
+	if ((ret = __db_init(dbenv, dbp, flags)) != 0)
 		goto err;
 
 	/* If we don't have an environment yet, allocate a local one. */
@@ -132,26 +129,34 @@ db_create(dbpp, dbenv, flags)
 		if ((ret = db_env_create(&dbenv, 0)) != 0)
 			goto err;
 		F_SET(dbenv, DB_ENV_DBLOCAL);
+		ENV_ENTER(dbenv, ip);
 	}
 	dbp->dbenv = dbenv;
-	MUTEX_THREAD_LOCK(dbenv, dbenv->dblist_mutexp);
+	MUTEX_LOCK(dbenv, dbenv->mtx_dblist);
 	++dbenv->db_ref;
-	MUTEX_THREAD_UNLOCK(dbenv, dbenv->dblist_mutexp);
+	MUTEX_UNLOCK(dbenv, dbenv->mtx_dblist);
 
 	/*
 	 * Set the replication timestamp; it's 0 if we're not in a replicated
-	 * environment.
+	 * environment.  Don't acquire a lock to read the value, even though
+	 * it's opaque: all we check later is value equality, nothing else.
 	 */
-	dbp->timestamp =
-	    (F_ISSET(dbenv, DB_ENV_DBLOCAL) || !REP_ON(dbenv)) ? 0 :
-	    ((REGENV *)((REGINFO *)dbenv->reginfo)->primary)->rep_timestamp;
+	dbp->timestamp = REP_ON(dbenv) ?
+	    ((REGENV *)((REGINFO *)dbenv->reginfo)->primary)->rep_timestamp : 0;
+	/*
+	 * Set the replication generation number for fid management; valid
+	 * replication generations start at 1.  Don't acquire a lock to
+	 * read the value.  All we check later is value equality.
+	 */
+	db_rep = dbenv->rep_handle;
+	dbp->fid_gen =
+	    (REP_ON(dbenv) && db_rep->region != NULL) ?
+	    ((REP *)db_rep->region)->gen : 0;
 
 	/* If not RPC, open a backing DB_MPOOLFILE handle in the memory pool. */
-#ifdef HAVE_RPC
-	if (!RPC_ON(dbenv))
-#endif
-		if ((ret = __memp_fcreate(dbenv, &dbp->mpf)) != 0)
-			goto err;
+	if (!RPC_ON(dbenv) &&
+	    (ret = __memp_fcreate(dbenv, &dbp->mpf)) != 0)
+		goto err;
 
 	dbp->type = DB_UNKNOWN;
 
@@ -161,9 +166,10 @@ db_create(dbpp, dbenv, flags)
 err:	if (dbp->mpf != NULL)
 		(void)__memp_fclose(dbp->mpf, 0);
 	if (dbenv != NULL && F_ISSET(dbenv, DB_ENV_DBLOCAL))
-		(void)__dbenv_close(dbenv, 0);
+		(void)__env_close(dbenv, 0);
 	__os_free(dbenv, dbp);
 	*dbpp = NULL;
+	ENV_LEAVE(dbenv, ip);
 	return (ret);
 }
 
@@ -172,7 +178,8 @@ err:	if (dbp->mpf != NULL)
  *	Initialize a DB structure.
  */
 static int
-__db_init(dbp, flags)
+__db_init(dbenv, dbp, flags)
+	DB_ENV *dbenv;
 	DB *dbp;
 	u_int32_t flags;
 {
@@ -189,8 +196,10 @@ __db_init(dbp, flags)
 	FLD_SET(dbp->am_ok,
 	    DB_OK_BTREE | DB_OK_HASH | DB_OK_QUEUE | DB_OK_RECNO);
 
+	/* DB PUBLIC HANDLE LIST BEGIN */
 	dbp->associate = __db_associate_pp;
 	dbp->close = __db_close_pp;
+	dbp->compact = __db_compact_pp;
 	dbp->cursor = __db_cursor_pp;
 	dbp->del = __db_del_pp;
 	dbp->dump = __db_dump_pp;
@@ -199,9 +208,18 @@ __db_init(dbp, flags)
 	dbp->fd = __db_fd_pp;
 	dbp->get = __db_get_pp;
 	dbp->get_byteswapped = __db_get_byteswapped;
+	dbp->get_cachesize = __db_get_cachesize;
 	dbp->get_dbname = __db_get_dbname;
+	dbp->get_encrypt_flags = __db_get_encrypt_flags;
 	dbp->get_env = __db_get_env;
+	dbp->get_errfile = __db_get_errfile;
+	dbp->get_errpfx = __db_get_errpfx;
+	dbp->get_flags = __db_get_flags;
+	dbp->get_lorder = __db_get_lorder;
+	dbp->get_mpf = __db_get_mpf;
+	dbp->get_msgfile = __db_get_msgfile;
 	dbp->get_open_flags = __db_get_open_flags;
+	dbp->get_pagesize = __db_get_pagesize;
 	dbp->get_transactional = __db_get_transactional;
 	dbp->get_type = __db_get_type;
 	dbp->join = __db_join_pp;
@@ -211,35 +229,28 @@ __db_init(dbp, flags)
 	dbp->put = __db_put_pp;
 	dbp->remove = __db_remove_pp;
 	dbp->rename = __db_rename_pp;
-	dbp->truncate = __db_truncate_pp;
 	dbp->set_alloc = __db_set_alloc;
 	dbp->set_append_recno = __db_set_append_recno;
-	dbp->get_cachesize = __db_get_cachesize;
 	dbp->set_cachesize = __db_set_cachesize;
 	dbp->set_dup_compare = __db_set_dup_compare;
-	dbp->get_encrypt_flags = __db_get_encrypt_flags;
 	dbp->set_encrypt = __db_set_encrypt;
 	dbp->set_errcall = __db_set_errcall;
-	dbp->get_errfile = __db_get_errfile;
 	dbp->set_errfile = __db_set_errfile;
-	dbp->get_errpfx = __db_get_errpfx;
 	dbp->set_errpfx = __db_set_errpfx;
 	dbp->set_feedback = __db_set_feedback;
-	dbp->get_flags = __db_get_flags;
 	dbp->set_flags = __db_set_flags;
-	dbp->get_lorder = __db_get_lorder;
 	dbp->set_lorder = __db_set_lorder;
 	dbp->set_msgcall = __db_set_msgcall;
-	dbp->get_msgfile = __db_get_msgfile;
 	dbp->set_msgfile = __db_set_msgfile;
-	dbp->get_pagesize = __db_get_pagesize;
 	dbp->set_pagesize = __db_set_pagesize;
 	dbp->set_paniccall = __db_set_paniccall;
 	dbp->stat = __db_stat_pp;
 	dbp->stat_print = __db_stat_print_pp;
 	dbp->sync = __db_sync_pp;
+	dbp->truncate = __db_truncate_pp;
 	dbp->upgrade = __db_upgrade_pp;
 	dbp->verify = __db_verify_pp;
+	/* DB PUBLIC HANDLE LIST END */
 
 					/* Access method specific. */
 	if ((ret = __bam_db_create(dbp)) != 0)
@@ -256,8 +267,25 @@ __db_init(dbp, flags)
 	if (LF_ISSET(DB_XA_CREATE) && (ret = __db_xa_create(dbp)) != 0)
 		return (ret);
 
-	if (LF_ISSET(DB_REP_CREATE))
-		F_SET(dbp, DB_AM_REPLICATION);
+#ifdef HAVE_RPC
+	/*
+	 * RPC specific: must be last, as we replace methods set by the
+	 * access methods.
+	 */
+	if (dbenv != NULL && RPC_ON(dbenv)) {
+		__dbcl_dbp_init(dbp);
+		/*
+		 * !!!
+		 * We wrap the DB->open method for RPC, and the rpc.src file
+		 * can't handle that.
+		 */
+		dbp->open = __dbcl_db_open_wrap;
+		if ((ret = __dbcl_db_create(dbp, dbenv, flags)) != 0)
+			return (ret);
+	}
+#else
+	COMPQUIET(dbenv, NULL);
+#endif
 
 	return (0);
 }
@@ -370,6 +398,17 @@ __db_get_env(dbp)
 	return (dbp->dbenv);
 }
 
+/*
+ * __db_get_mpf --
+ *	Get the underlying DB_MPOOLFILE handle.
+ */
+static DB_MPOOLFILE *
+__db_get_mpf(dbp)
+	DB *dbp;
+{
+	return (dbp->mpf);
+}
+
 /*
  * get_transactional --
  *	Get whether this database was created in a transaction.
@@ -478,7 +517,7 @@ __db_get_encrypt_flags(dbp, flagsp)
 {
 	DB_ILLEGAL_IN_ENV(dbp, "DB->get_encrypt_flags");
 
-	return (__dbenv_get_encrypt_flags(dbp->dbenv, flagsp));
+	return (__env_get_encrypt_flags(dbp->dbenv, flagsp));
 }
 
 /*
@@ -497,7 +536,7 @@ __db_set_encrypt(dbp, passwd, flags)
 	DB_ILLEGAL_IN_ENV(dbp, "DB->set_encrypt");
 	DB_ILLEGAL_AFTER_OPEN(dbp, "DB->set_encrypt");
 
-	if ((ret = __dbenv_set_encrypt(dbp->dbenv, passwd, flags)) != 0)
+	if ((ret = __env_set_encrypt(dbp->dbenv, passwd, flags)) != 0)
 		return (ret);
 
 	/*
@@ -517,7 +556,7 @@ __db_set_errcall(dbp, errcall)
 	DB *dbp;
 	void (*errcall) __P((const DB_ENV *, const char *, const char *));
 {
-	__dbenv_set_errcall(dbp->dbenv, errcall);
+	__env_set_errcall(dbp->dbenv, errcall);
 }
 
 static void
@@ -525,7 +564,7 @@ __db_get_errfile(dbp, errfilep)
 	DB *dbp;
 	FILE **errfilep;
 {
-	__dbenv_get_errfile(dbp->dbenv, errfilep);
+	__env_get_errfile(dbp->dbenv, errfilep);
 }
 
 static void
@@ -533,7 +572,7 @@ __db_set_errfile(dbp, errfile)
 	DB *dbp;
 	FILE *errfile;
 {
-	__dbenv_set_errfile(dbp->dbenv, errfile);
+	__env_set_errfile(dbp->dbenv, errfile);
 }
 
 static void
@@ -541,7 +580,7 @@ __db_get_errpfx(dbp, errpfxp)
 	DB *dbp;
 	const char **errpfxp;
 {
-	__dbenv_get_errpfx(dbp->dbenv, errpfxp);
+	__env_get_errpfx(dbp->dbenv, errpfxp);
 }
 
 static void
@@ -549,7 +588,7 @@ __db_set_errpfx(dbp, errpfx)
 	DB *dbp;
 	const char *errpfx;
 {
-	__dbenv_set_errpfx(dbp->dbenv, errpfx);
+	__env_set_errpfx(dbp->dbenv, errpfx);
 }
 
 static int
@@ -740,7 +779,7 @@ __db_set_alloc(dbp, mal_func, real_func, free_func)
 	DB_ILLEGAL_IN_ENV(dbp, "DB->set_alloc");
 	DB_ILLEGAL_AFTER_OPEN(dbp, "DB->set_alloc");
 
-	return (__dbenv_set_alloc(dbp->dbenv, mal_func, real_func, free_func));
+	return (__env_set_alloc(dbp->dbenv, mal_func, real_func, free_func));
 }
 
 static void
@@ -748,7 +787,7 @@ __db_set_msgcall(dbp, msgcall)
 	DB *dbp;
 	void (*msgcall) __P((const DB_ENV *, const char *));
 {
-	__dbenv_set_msgcall(dbp->dbenv, msgcall);
+	__env_set_msgcall(dbp->dbenv, msgcall);
 }
 
 static void
@@ -756,7 +795,7 @@ __db_get_msgfile(dbp, msgfilep)
 	DB *dbp;
 	FILE **msgfilep;
 {
-	__dbenv_get_msgfile(dbp->dbenv, msgfilep);
+	__env_get_msgfile(dbp->dbenv, msgfilep);
 }
 
 static void
@@ -764,7 +803,7 @@ __db_set_msgfile(dbp, msgfile)
 	DB *dbp;
 	FILE *msgfile;
 {
-	__dbenv_set_msgfile(dbp->dbenv, msgfile);
+	__env_set_msgfile(dbp->dbenv, msgfile);
 }
 
 static int
@@ -824,97 +863,5 @@ __db_set_paniccall(dbp, paniccall)
 	DB *dbp;
 	void (*paniccall) __P((DB_ENV *, int));
 {
-	return (__dbenv_set_paniccall(dbp->dbenv, paniccall));
+	return (__env_set_paniccall(dbp->dbenv, paniccall));
 }
-
-#ifdef HAVE_RPC
-/*
- * __dbcl_init --
- *	Initialize a DB structure on the server.
- */
-static int
-__dbcl_init(dbp, dbenv, flags)
-	DB *dbp;
-	DB_ENV *dbenv;
-	u_int32_t flags;
-{
-	TAILQ_INIT(&dbp->free_queue);
-	TAILQ_INIT(&dbp->active_queue);
-	/* !!!
-	 * Note that we don't need to initialize the join_queue;  it's
-	 * not used in RPC clients.  See the comment in __dbcl_db_join_ret().
-	 */
-
-	dbp->associate = __dbcl_db_associate;
-	dbp->close = __dbcl_db_close;
-	dbp->cursor = __dbcl_db_cursor;
-	dbp->del = __dbcl_db_del;
-	dbp->err = __dbh_err;
-	dbp->errx = __dbh_errx;
-	dbp->fd = __dbcl_db_fd;
-	dbp->get = __dbcl_db_get;
-	dbp->get_byteswapped = __db_get_byteswapped;
-	dbp->get_transactional = __db_get_transactional;
-	dbp->get_type = __db_get_type;
-	dbp->join = __dbcl_db_join;
-	dbp->key_range = __dbcl_db_key_range;
-	dbp->get_dbname = __dbcl_db_get_name;
-	dbp->get_open_flags = __dbcl_db_get_open_flags;
-	dbp->open = __dbcl_db_open_wrap;
-	dbp->pget = __dbcl_db_pget;
-	dbp->put = __dbcl_db_put;
-	dbp->remove = __dbcl_db_remove;
-	dbp->rename = __dbcl_db_rename;
-	dbp->set_alloc = __dbcl_db_alloc;
-	dbp->set_append_recno = __dbcl_db_set_append_recno;
-	dbp->get_cachesize = __dbcl_db_get_cachesize;
-	dbp->set_cachesize = __dbcl_db_cachesize;
-	dbp->set_dup_compare = __dbcl_db_dup_compare;
-	dbp->get_encrypt_flags = __dbcl_db_get_encrypt_flags;
-	dbp->set_encrypt = __dbcl_db_encrypt;
-	dbp->set_errcall = __db_set_errcall;
-	dbp->get_errfile = __db_get_errfile;
-	dbp->set_errfile = __db_set_errfile;
-	dbp->get_errpfx = __db_get_errpfx;
-	dbp->set_errpfx = __db_set_errpfx;
-	dbp->set_feedback = __dbcl_db_feedback;
-	dbp->get_flags = __dbcl_db_get_flags;
-	dbp->set_flags = __dbcl_db_flags;
-	dbp->get_lorder = __dbcl_db_get_lorder;
-	dbp->set_lorder = __dbcl_db_lorder;
-	dbp->get_pagesize = __dbcl_db_get_pagesize;
-	dbp->set_pagesize = __dbcl_db_pagesize;
-	dbp->set_paniccall = __dbcl_db_panic;
-	dbp->stat = __dbcl_db_stat;
-	dbp->sync = __dbcl_db_sync;
-	dbp->truncate = __dbcl_db_truncate;
-	dbp->upgrade = __dbcl_db_upgrade;
-	dbp->verify = __dbcl_db_verify;
-
-	/*
-	 * Set all the method specific functions to client funcs as well.
-	 */
-	dbp->set_bt_compare = __dbcl_db_bt_compare;
-	dbp->set_bt_maxkey = __dbcl_db_bt_maxkey;
-	dbp->get_bt_minkey = __dbcl_db_get_bt_minkey;
-	dbp->set_bt_minkey = __dbcl_db_bt_minkey;
-	dbp->set_bt_prefix = __dbcl_db_bt_prefix;
-	dbp->get_h_ffactor = __dbcl_db_get_h_ffactor;
-	dbp->set_h_ffactor = __dbcl_db_h_ffactor;
-	dbp->set_h_hash = __dbcl_db_h_hash;
-	dbp->get_h_nelem = __dbcl_db_get_h_nelem;
-	dbp->set_h_nelem = __dbcl_db_h_nelem;
-	dbp->get_q_extentsize = __dbcl_db_get_extentsize;
-	dbp->set_q_extentsize = __dbcl_db_extentsize;
-	dbp->get_re_delim = __dbcl_db_get_re_delim;
-	dbp->set_re_delim = __dbcl_db_re_delim;
-	dbp->get_re_len = __dbcl_db_get_re_len;
-	dbp->set_re_len = __dbcl_db_re_len;
-	dbp->get_re_pad = __dbcl_db_get_re_pad;
-	dbp->set_re_pad = __dbcl_db_re_pad;
-	dbp->get_re_source = __dbcl_db_get_re_source;
-	dbp->set_re_source = __dbcl_db_re_source;
-
-	return (__dbcl_db_create(dbp, dbenv, flags));
-}
-#endif
diff --git a/storage/bdb/db/db_open.c b/storage/bdb/db/db_open.c
index 35e1150910d..a397c92bc53 100644
--- a/storage/bdb/db/db_open.c
+++ b/storage/bdb/db/db_open.c
@@ -1,10 +1,10 @@
 /*-
  * See the file LICENSE for redistribution information.
  *
- * Copyright (c) 1996-2004
+ * Copyright (c) 1996-2005
  *	Sleepycat Software.  All rights reserved.
  *
- * $Id: db_open.c,v 11.240 2004/09/22 20:53:19 margo Exp $
+ * $Id: db_open.c,v 12.13 2005/10/12 17:45:53 bostic Exp $
  */
 
 #include "db_config.h"
@@ -42,12 +42,15 @@
  * 2. It can be called to open a subdatabase during normal operation.  In
  *    this case, name and subname will both be non-NULL and meta_pgno will
  *    be PGNO_BASE_MD (also PGNO_INVALID).
- * 3. It can be called during recovery to open a file/database, in which case
+ * 3. It can be called to open an in-memory database (name == NULL;
+ *    subname = name).
+ * 4. It can be called during recovery to open a file/database, in which case
  *    name will be non-NULL, subname will be NULL, and meta-pgno will be
  *    PGNO_BASE_MD.
- * 4. It can be called during recovery to open a subdatabase, in which case
+ * 5. It can be called during recovery to open a subdatabase, in which case
  *    name will be non-NULL, subname may be NULL and meta-pgno will be
  *    a valid pgno (i.e., not PGNO_BASE_MD).
+ * 6. It can be called during recovery to open an in-memory database.
  *
  * PUBLIC: int __db_open __P((DB *, DB_TXN *,
  * PUBLIC:     const char *, const char *, DBTYPE, u_int32_t, int, db_pgno_t));
@@ -85,8 +88,8 @@ __db_open(dbp, txn, fname, dname, type, flags, mode, meta_pgno)
 	/* Convert any DB->open flags. */
 	if (LF_ISSET(DB_RDONLY))
 		F_SET(dbp, DB_AM_RDONLY);
-	if (LF_ISSET(DB_DIRTY_READ))
-		F_SET(dbp, DB_AM_DIRTY);
+	if (LF_ISSET(DB_READ_UNCOMMITTED))
+		F_SET(dbp, DB_AM_READ_UNCOMMITTED);
 
 	if (txn != NULL)
 		F_SET(dbp, DB_AM_TXN);
@@ -95,43 +98,64 @@ __db_open(dbp, txn, fname, dname, type, flags, mode, meta_pgno)
 	dbp->type = type;
 
 	/*
-	 * If fname is NULL, it's always a create, so make sure that we
-	 * have a type specified.  It would be nice if this checking
-	 * were done in __db_open where most of the interface checking
-	 * is done, but this interface (__db_dbopen) is used by the
-	 * recovery and limbo system, so we need to safeguard this
-	 * interface as well.
+	 * If both fname and subname are NULL, it's always a create, so make
+	 * sure that we have both DB_CREATE and a type specified.  It would
+	 * be nice if this checking were done in __db_open where most of the
+	 * interface checking is done, but this interface (__db_dbopen) is
+	 * used by the recovery and limbo system, so we need to safeguard
+	 * this interface as well.
 	 */
 	if (fname == NULL) {
-		F_SET(dbp, DB_AM_INMEM);
+		if (dname == NULL) {
+			if (!LF_ISSET(DB_CREATE)) {
+				__db_err(dbenv,
+			    "DB_CREATE must be specified to create databases.");
+				return (ENOENT);
+			}
 
-		if (dbp->type == DB_UNKNOWN) {
-			__db_err(dbenv,
-			    "DBTYPE of unknown without existing file");
-			return (EINVAL);
-		}
+			F_SET(dbp, DB_AM_INMEM);
+			F_SET(dbp, DB_AM_CREATED);
 
-		if (dbp->pgsize == 0)
-			dbp->pgsize = DB_DEF_IOSIZE;
+			if (dbp->type == DB_UNKNOWN) {
+				__db_err(dbenv,
+				    "DBTYPE of unknown without existing file");
+				return (EINVAL);
+			}
+
+			if (dbp->pgsize == 0)
+				dbp->pgsize = DB_DEF_IOSIZE;
+
+			/*
+			 * If the file is a temporary file and we're
+			 * doing locking, then we have to create a
+			 * unique file ID.  We can't use our normal
+			 * dev/inode pair (or whatever this OS uses
+			 * in place of dev/inode pairs) because no
+			 * backing file will be created until the
+			 * mpool cache is filled forcing the buffers
+			 * to disk.  Grab a random locker ID to use
+			 * as a file ID.  The created ID must never
+			 * match a potential real file ID -- we know
+			 * it won't because real file IDs contain a
+			 * time stamp after the dev/inode pair, and
+			 * we're simply storing a 4-byte value.
+
+			 * !!!
+			 * Store the locker in the file id structure
+			 * -- we can get it from there as necessary,
+			 * and it saves having two copies.
+			*/
+			if (LOCKING_ON(dbenv) && (ret = __lock_id(dbenv,
+			    (u_int32_t *)dbp->fileid, NULL)) != 0)
+				return (ret);
+		} else
+			MAKE_INMEM(dbp);
 
 		/*
-		 * If the file is a temporary file and we're doing locking,
-		 * then we have to create a unique file ID.  We can't use our
-		 * normal dev/inode pair (or whatever this OS uses in place of
-		 * dev/inode pairs) because no backing file will be created
-		 * until the mpool cache is filled forcing the buffers to disk.
-		 * Grab a random locker ID to use as a file ID.  The created
-		 * ID must never match a potential real file ID -- we know it
-		 * won't because real file IDs contain a time stamp after the
-		 * dev/inode pair, and we're simply storing a 4-byte value.
-		 *
-		 * !!!
-		 * Store the locker in the file id structure -- we can get it
-		 * from there as necessary, and it saves having two copies.
+		 * Normally we would do handle locking here, however, with
+		 * in-memory files, we cannot do any database manipulation
+		 * until the mpool is open, so it happens later.
 		 */
-		if (LOCKING_ON(dbenv) &&
-		    (ret = __lock_id(dbenv, (u_int32_t *)dbp->fileid)) != 0)
-			return (ret);
 	} else if (dname == NULL && meta_pgno == PGNO_BASE_MD) {
 		/* Open/create the underlying file.  Acquire locks. */
 		if ((ret =
@@ -161,40 +185,46 @@ __db_open(dbp, txn, fname, dname, type, flags, mode, meta_pgno)
 		LF_SET(DB_TRUNCATE);
 
 	/* Set up the underlying environment. */
-	if ((ret = __db_dbenv_setup(dbp, txn, fname, id, flags)) != 0)
+	if ((ret = __db_dbenv_setup(dbp, txn, fname, dname, id, flags)) != 0)
 		return (ret);
 
-	/*
-	 * Set the open flag.  We use it to mean that the dbp has gone
-	 * through mpf setup, including dbreg_register.  Also, below,
-	 * the underlying access method open functions may want to do
-	 * things like acquire cursors, so the open flag has to be set
-	 * before calling them.
-	 */
-	F_SET(dbp, DB_AM_OPEN_CALLED);
-
-	/*
-	 * For unnamed files, we need to actually create the file now
-	 * that the mpool is open.
-	 */
-	if (fname == NULL && (ret = __db_new_file(dbp, txn, NULL, NULL)) != 0)
-		return (ret);
+	/* For in-memory databases, we now need to open/create the database. */
+	if (F_ISSET(dbp, DB_AM_INMEM)) {
+		if (dname == NULL)
+			ret = __db_new_file(dbp, txn, NULL, NULL);
+		else {
+			id = TXN_INVALID;
+			if ((ret = __fop_file_setup(dbp,
+			    txn, dname, mode, flags, &id)) == 0 &&
+			    DBENV_LOGGING(dbenv) && !F_ISSET(dbp, DB_AM_RECOVER)
+#if !defined(DEBUG_ROP)
+	    && !F_ISSET(dbp, DB_AM_RDONLY)
+#endif
+			)
+				ret = __dbreg_log_id(dbp,
+				    txn, dbp->log_filename->id, 1);
+		}
+		if (ret != 0)
+			goto err;
+	}
 
 	switch (dbp->type) {
-	case DB_BTREE:
-		ret = __bam_open(dbp, txn, fname, meta_pgno, flags);
-		break;
-	case DB_HASH:
-		ret = __ham_open(dbp, txn, fname, meta_pgno, flags);
-		break;
-	case DB_RECNO:
-		ret = __ram_open(dbp, txn, fname, meta_pgno, flags);
-		break;
-	case DB_QUEUE:
-		ret = __qam_open(dbp, txn, fname, meta_pgno, mode, flags);
-		break;
-	case DB_UNKNOWN:
-		return (__db_unknown_type(dbenv, "__db_dbopen", dbp->type));
+		case DB_BTREE:
+			ret = __bam_open(dbp, txn, fname, meta_pgno, flags);
+			break;
+		case DB_HASH:
+			ret = __ham_open(dbp, txn, fname, meta_pgno, flags);
+			break;
+		case DB_RECNO:
+			ret = __ram_open(dbp, txn, fname, meta_pgno, flags);
+			break;
+		case DB_QUEUE:
+			ret = __qam_open(
+			    dbp, txn, fname, meta_pgno, mode, flags);
+			break;
+		case DB_UNKNOWN:
+			return (
+			    __db_unknown_type(dbenv, "__db_dbopen", dbp->type));
 	}
 	if (ret != 0)
 		goto err;
@@ -202,16 +232,16 @@ __db_open(dbp, txn, fname, dname, type, flags, mode, meta_pgno)
 	DB_TEST_RECOVERY(dbp, DB_TEST_POSTOPEN, ret, fname);
 
 	/*
-	 * Unnamed files don't need handle locks, so we only have to check
+	 * Temporary files don't need handle locks, so we only have to check
 	 * for a handle lock downgrade or lockevent in the case of named
 	 * files.
 	 */
-	if (!F_ISSET(dbp, DB_AM_RECOVER) &&
-	    fname != NULL && LOCK_ISSET(dbp->handle_lock)) {
-		if (txn != NULL) {
+	if (!F_ISSET(dbp, DB_AM_RECOVER) && (fname != NULL || dname != NULL)
+	    && LOCK_ISSET(dbp->handle_lock)) {
+		if (txn != NULL)
 			ret = __txn_lockevent(dbenv,
 			    txn, dbp, &dbp->handle_lock, dbp->lid);
-		} else if (LOCKING_ON(dbenv))
+		else if (LOCKING_ON(dbenv))
 			/* Trade write handle lock for read handle lock. */
 			ret = __lock_downgrade(dbenv,
 			    &dbp->handle_lock, DB_LOCK_READ, 0);
@@ -341,8 +371,8 @@ err:	return (ret);
 
 /*
  * __db_chk_meta --
- *	Take a buffer containing a meta-data page and check it for a checksum
- *	(and verify the checksum if necessary) and possibly decrypt it.
+ *	Take a buffer containing a meta-data page and check it for a valid LSN,
+ *	checksum (and verify the checksum if necessary) and possibly decrypt it.
  *
  *	Return 0 on success, >0 (errno) on error, -1 on checksum mismatch.
  *
@@ -355,11 +385,13 @@ __db_chk_meta(dbenv, dbp, meta, do_metachk)
 	DBMETA *meta;
 	int do_metachk;
 {
+	DB_LSN cur_lsn, swap_lsn;
 	int is_hmac, ret, swapped;
-	u_int32_t orig_chk;
+	u_int32_t magic, orig_chk;
 	u_int8_t *chksum;
 
 	ret = 0;
+	swapped = 0;
 
 	if (FLD_ISSET(meta->metaflags, DBMETA_CHKSUM)) {
 		if (dbp != NULL)
@@ -399,6 +431,56 @@ chk_retry:		if ((ret = __db_check_chksum(dbenv,
 #ifdef HAVE_CRYPTO
 	ret = __crypto_decrypt_meta(dbenv, dbp, (u_int8_t *)meta, do_metachk);
 #endif
+
+	/* Now that we're decrypted, we can check LSN. */
+	if (LOGGING_ON(dbenv)) {
+		/*
+		 * This gets called both before and after swapping, so we
+		 * need to check ourselves.  If we already swapped it above,
+		 * we'll know that here.
+		 */
+
+		swap_lsn = meta->lsn;
+		magic = meta->magic;
+lsn_retry:
+		if (swapped) {
+			M_32_SWAP(swap_lsn.file);
+			M_32_SWAP(swap_lsn.offset);
+			M_32_SWAP(magic);
+		}
+		switch (magic) {
+		case DB_BTREEMAGIC:
+		case DB_HASHMAGIC:
+		case DB_QAMMAGIC:
+		case DB_RENAMEMAGIC:
+			break;
+		default:
+			if (swapped)
+				return (EINVAL);
+			swapped = 1;
+			goto lsn_retry;
+		}
+		if (!IS_REP_CLIENT(dbenv) &&
+		    !IS_NOT_LOGGED_LSN(swap_lsn) && !IS_ZERO_LSN(swap_lsn)) {
+			/* Need to do check. */
+			if ((ret = __log_current_lsn(dbenv,
+			    &cur_lsn, NULL, NULL)) != 0)
+				return (ret);
+			if (log_compare(&swap_lsn, &cur_lsn) > 0) {
+				__db_err(dbenv,
+			"file %s (meta pgno = %lu) has LSN [%lu][%lu].",
+				    dbp->fname == NULL
+				    ? "unknown" : dbp->fname,
+				    (u_long)dbp->meta_pgno,
+				    (u_long)swap_lsn.file,
+				    (u_long)swap_lsn.offset);
+				__db_err(dbenv, "end of log is [%lu][%lu]",
+				    (u_long)cur_lsn.file,
+				    (u_long)cur_lsn.offset);
+				return (EINVAL);
+			}
+		}
+	}
 	return (ret);
 }
 
@@ -433,7 +515,7 @@ __db_meta_setup(dbenv, dbp, name, meta, oflags, do_metachk)
 	 * we don't consider it an error, for example, if the user set
 	 * an expected byte order and the found file doesn't match it.
 	 */
-	F_CLR(dbp, DB_AM_SWAP);
+	F_CLR(dbp, DB_AM_SWAP | DB_AM_IN_RENAME);
 	magic = meta->magic;
 
 swap_retry:
diff --git a/storage/bdb/db/db_overflow.c b/storage/bdb/db/db_overflow.c
index 046e60fab9a..818ee91a8b2 100644
--- a/storage/bdb/db/db_overflow.c
+++ b/storage/bdb/db/db_overflow.c
@@ -1,7 +1,7 @@
 /*-
  * See the file LICENSE for redistribution information.
  *
- * Copyright (c) 1996-2004
+ * Copyright (c) 1996-2005
  *	Sleepycat Software.  All rights reserved.
  */
 /*
@@ -39,7 +39,7 @@
  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
  * SUCH DAMAGE.
  *
- * $Id: db_overflow.c,v 11.54 2004/03/28 17:17:50 bostic Exp $
+ * $Id: db_overflow.c,v 12.3 2005/08/08 17:30:51 bostic Exp $
  */
 
 #include "db_config.h"
@@ -241,8 +241,6 @@ __db_poff(dbc, dbt, pgnop)
 			LSN(lastp) = new_lsn;
 		LSN(pagep) = new_lsn;
 
-		P_INIT(pagep, dbp->pgsize,
-		    PGNO(pagep), PGNO_INVALID, PGNO_INVALID, 0, P_OVERFLOW);
 		OV_LEN(pagep) = pagespace;
 		OV_REF(pagep) = 1;
 		memcpy((u_int8_t *)pagep + P_OVERHEAD(dbp), p, pagespace);
@@ -288,7 +286,7 @@ __db_ovref(dbc, pgno, adjust)
 	mpf = dbp->mpf;
 
 	if ((ret = __memp_fget(mpf, &pgno, 0, &h)) != 0)
-		return (__db_pgerr(dbp, pgno, ret));
+		return (ret);
 
 	if (DBC_LOGGING(dbc)) {
 		if ((ret = __db_ovref_log(dbp,
@@ -327,7 +325,7 @@ __db_doff(dbc, pgno)
 
 	do {
 		if ((ret = __memp_fget(mpf, &pgno, 0, &pagep)) != 0)
-			return (__db_pgerr(dbp, pgno, ret));
+			return (ret);
 
 		DB_ASSERT(TYPE(pagep) == P_OVERFLOW);
 		/*
diff --git a/storage/bdb/db/db_ovfl_vrfy.c b/storage/bdb/db/db_ovfl_vrfy.c
index a3c5fba7c12..ceff4d2569c 100644
--- a/storage/bdb/db/db_ovfl_vrfy.c
+++ b/storage/bdb/db/db_ovfl_vrfy.c
@@ -1,7 +1,7 @@
 /*-
  * See the file LICENSE for redistribution information.
  *
- * Copyright (c) 1996-2004
+ * Copyright (c) 1996-2005
  *	Sleepycat Software.  All rights reserved.
  */
 /*
@@ -39,7 +39,7 @@
  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
  * SUCH DAMAGE.
  *
- * $Id: db_ovfl_vrfy.c,v 11.56 2004/01/28 03:35:57 bostic Exp $
+ * $Id: db_ovfl_vrfy.c,v 12.1 2005/06/16 20:21:13 bostic Exp $
  */
 
 #include "db_config.h"
diff --git a/storage/bdb/db/db_pr.c b/storage/bdb/db/db_pr.c
index e63daf3ee20..4618d4f4754 100644
--- a/storage/bdb/db/db_pr.c
+++ b/storage/bdb/db/db_pr.c
@@ -1,10 +1,10 @@
 /*-
  * See the file LICENSE for redistribution information.
  *
- * Copyright (c) 1996-2004
+ * Copyright (c) 1996-2005
  *	Sleepycat Software.  All rights reserved.
  *
- * $Id: db_pr.c,v 11.121 2004/10/28 14:48:43 bostic Exp $
+ * $Id: db_pr.c,v 12.17 2005/11/08 03:13:30 bostic Exp $
  */
 
 #include "db_config.h"
@@ -35,9 +35,10 @@
 void
 __db_loadme()
 {
-	u_int32_t id;
+	pid_t pid;
+	db_threadid_t tid;
 
-	__os_id(&id);
+	__os_id(NULL, &pid, &tid);
 }
 
 #ifdef HAVE_STATISTICS
@@ -106,38 +107,37 @@ __db_dumptree(dbp, op, name)
 }
 
 static const FN __db_flags_fn[] = {
-	{ DB_AM_CHKSUM,		"checksumming" },
-	{ DB_AM_CL_WRITER,	"client replica writer" },
-	{ DB_AM_COMPENSATE,	"created by compensating transaction" },
-	{ DB_AM_CREATED,	"database created" },
-	{ DB_AM_CREATED_MSTR,	"encompassing file created" },
-	{ DB_AM_DBM_ERROR,	"dbm/ndbm error" },
-	{ DB_AM_DELIMITER,	"variable length" },
-	{ DB_AM_DIRTY,		"dirty reads" },
-	{ DB_AM_DISCARD,	"discard cached pages" },
-	{ DB_AM_DUP,		"duplicates" },
-	{ DB_AM_DUPSORT,	"sorted duplicates" },
-	{ DB_AM_ENCRYPT,	"encrypted" },
-	{ DB_AM_FIXEDLEN,	"fixed-length records" },
-	{ DB_AM_INMEM,		"in-memory" },
-	{ DB_AM_IN_RENAME,	"file is being renamed" },
-	{ DB_AM_NOT_DURABLE,	"changes not logged" },
-	{ DB_AM_OPEN_CALLED,	"open called" },
-	{ DB_AM_PAD,		"pad value" },
-	{ DB_AM_PGDEF,		"default page size" },
-	{ DB_AM_RDONLY,		"read-only" },
-	{ DB_AM_RECNUM,		"Btree record numbers" },
-	{ DB_AM_RECOVER,	"opened for recovery" },
-	{ DB_AM_RENUMBER,	"renumber" },
-	{ DB_AM_REPLICATION,	"replication file" },
-	{ DB_AM_REVSPLITOFF,	"no reverse splits" },
-	{ DB_AM_SECONDARY,	"secondary" },
-	{ DB_AM_SNAPSHOT,	"load on open" },
-	{ DB_AM_SUBDB,		"subdatabases" },
-	{ DB_AM_SWAP,		"needswap" },
-	{ DB_AM_TXN,		"transactional" },
-	{ DB_AM_VERIFYING,	"verifier" },
-	{ 0,			NULL }
+	{ DB_AM_CHKSUM,			"checksumming" },
+	{ DB_AM_CL_WRITER,		"client replica writer" },
+	{ DB_AM_COMPENSATE,		"created by compensating transaction" },
+	{ DB_AM_CREATED,		"database created" },
+	{ DB_AM_CREATED_MSTR,		"encompassing file created" },
+	{ DB_AM_DBM_ERROR,		"dbm/ndbm error" },
+	{ DB_AM_DELIMITER,		"variable length" },
+	{ DB_AM_DISCARD,		"discard cached pages" },
+	{ DB_AM_DUP,			"duplicates" },
+	{ DB_AM_DUPSORT,		"sorted duplicates" },
+	{ DB_AM_ENCRYPT,		"encrypted" },
+	{ DB_AM_FIXEDLEN,		"fixed-length records" },
+	{ DB_AM_INMEM,			"in-memory" },
+	{ DB_AM_IN_RENAME,		"file is being renamed" },
+	{ DB_AM_NOT_DURABLE,		"changes not logged" },
+	{ DB_AM_OPEN_CALLED,		"open called" },
+	{ DB_AM_PAD,			"pad value" },
+	{ DB_AM_PGDEF,			"default page size" },
+	{ DB_AM_RDONLY,			"read-only" },
+	{ DB_AM_READ_UNCOMMITTED,	"read-uncommitted" },
+	{ DB_AM_RECNUM,			"Btree record numbers" },
+	{ DB_AM_RECOVER,		"opened for recovery" },
+	{ DB_AM_RENUMBER,		"renumber" },
+	{ DB_AM_REVSPLITOFF,		"no reverse splits" },
+	{ DB_AM_SECONDARY,		"secondary" },
+	{ DB_AM_SNAPSHOT,		"load on open" },
+	{ DB_AM_SUBDB,			"subdatabases" },
+	{ DB_AM_SWAP,			"needswap" },
+	{ DB_AM_TXN,			"transactional" },
+	{ DB_AM_VERIFYING,		"verifier" },
+	{ 0,				NULL }
 };
 
 /*
@@ -182,8 +182,7 @@ __db_prdb(dbp, flags)
 		bt = dbp->bt_internal;
 		__db_msg(dbenv, "bt_meta: %lu bt_root: %lu",
 		    (u_long)bt->bt_meta, (u_long)bt->bt_root);
-		__db_msg(dbenv, "bt_maxkey: %lu bt_minkey: %lu",
-		    (u_long)bt->bt_maxkey, (u_long)bt->bt_minkey);
+		__db_msg(dbenv, "bt_minkey: %lu", (u_long)bt->bt_minkey);
 		if (!LF_ISSET(DB_PR_RECOVERYTEST))
 			__db_msg(dbenv, "bt_compare: %#lx bt_prefix: %#lx",
 			    P_TO_ULONG(bt->bt_compare),
@@ -246,7 +245,8 @@ __db_prtree(dbp, flags)
 	 * Find out the page number of the last page in the database, then
 	 * dump each page.
 	 */
-	__memp_last_pgno(mpf, &last);
+	if ((ret = __memp_last_pgno(mpf, &last)) != 0)
+		return (ret);
 	for (i = 0; i <= last; ++i) {
 		if ((ret = __memp_fget(mpf, &i, 0, &h)) != 0)
 			return (ret);
@@ -362,8 +362,7 @@ __db_bmeta(dbp, h, flags)
 
 	__db_meta(dbp, (DBMETA *)h, fn, flags);
 
-	__db_msg(dbenv, "\tmaxkey: %lu minkey: %lu",
-	    (u_long)h->maxkey, (u_long)h->minkey);
+	__db_msg(dbenv, "\tminkey: %lu", (u_long)h->minkey);
 	if (dbp->type == DB_RECNO)
 		__db_msg(dbenv, "\tre_len: %#lx re_pad: %#lx",
 		    (u_long)h->re_len, (u_long)h->re_pad);
@@ -518,19 +517,29 @@ __db_prpage(dbp, h, flags)
 	pagesize = (u_int32_t)dbp->mpf->mfp->stat.st_pagesize;
 
 	/* Page number, page type. */
-	__db_msgadd(dbenv, &mb, "page %lu: %s level: %lu",
-	    (u_long)h->pgno, s, (u_long)h->level);
+	__db_msgadd(dbenv, &mb, "page %lu: %s:", (u_long)h->pgno, s);
+
+	/*
+	 * LSNs on a metadata page will be different from the original after an
+	 * abort, in some cases.  Don't display them if we're testing recovery.
+	 */
+	if (!LF_ISSET(DB_PR_RECOVERYTEST) ||
+	    (TYPE(h) != P_BTREEMETA && TYPE(h) != P_HASHMETA &&
+	    TYPE(h) != P_QAMMETA && TYPE(h) != P_QAMDATA))
+		__db_msgadd(dbenv, &mb, " LSN [%lu][%lu]:",
+		    (u_long)LSN(h).file, (u_long)LSN(h).offset);
+
+	/*
+	 * Page level (only applicable for Btree/Recno, but we always display
+	 * it, for no particular reason.
+	 */
+	__db_msgadd(dbenv, &mb, " level %lu", (u_long)h->level);
 
 	/* Record count. */
 	if (TYPE(h) == P_IBTREE ||
 	    TYPE(h) == P_IRECNO || (TYPE(h) == P_LRECNO &&
 	    h->pgno == ((BTREE *)dbp->bt_internal)->bt_root))
 		__db_msgadd(dbenv, &mb, " records: %lu", (u_long)RE_NREC(h));
-
-	/* LSN. */
-	if (!LF_ISSET(DB_PR_RECOVERYTEST))
-		__db_msgadd(dbenv, &mb, " (lsn.file: %lu lsn.offset: %lu)",
-		    (u_long)LSN(h).file, (u_long)LSN(h).offset);
 	DB_MSGBUF_FLUSH(dbenv, &mb);
 
 	switch (TYPE(h)) {
@@ -564,11 +573,6 @@ __db_prpage(dbp, h, flags)
 		break;
 	}
 
-	/* LSN. */
-	if (LF_ISSET(DB_PR_RECOVERYTEST))
-		__db_msg(dbenv, " (lsn.file: %lu lsn.offset: %lu)",
-		    (u_long)LSN(h).file, (u_long)LSN(h).offset);
-
 	s = "\t";
 	if (TYPE(h) != P_IBTREE && TYPE(h) != P_IRECNO) {
 		__db_msgadd(dbenv, &mb, "%sprev: %4lu next: %4lu",
@@ -680,7 +684,7 @@ __db_prpage(dbp, h, flags)
 		case P_IBTREE:
 			bi = sp;
 			__db_msgadd(dbenv, &mb,
-			    "count: %4lu pgno: %4lu type: %4lu",
+			    "count: %4lu pgno: %4lu type: %lu ",
 			    (u_long)bi->nrecs, (u_long)bi->pgno,
 			    (u_long)bi->type);
 			switch (B_TYPE(bi->type)) {
@@ -867,8 +871,8 @@ __db_lockmode_to_string(mode)
 		return ("Intent shared/read");
 	case DB_LOCK_IWR:
 		return ("Intent to read/write");
-	case DB_LOCK_DIRTY:
-		return ("Dirty read");
+	case DB_LOCK_READ_UNCOMMITTED:
+		return ("Read uncommitted");
 	case DB_LOCK_WWRITE:
 		return ("Was written");
 	default:
@@ -988,25 +992,31 @@ __db_dump_pp(dbp, subname, callback, handle, pflag, keyflag)
 	int pflag, keyflag;
 {
 	DB_ENV *dbenv;
-	int handle_check, ret;
+	DB_THREAD_INFO *ip;
+	int handle_check, ret, t_ret;
 
 	dbenv = dbp->dbenv;
 
 	PANIC_CHECK(dbenv);
 	DB_ILLEGAL_BEFORE_OPEN(dbp, "DB->dump");
 
+	ENV_ENTER(dbenv, ip);
+
 	/* Check for replication block. */
-	handle_check = IS_REPLICATED(dbenv, dbp);
-	if (handle_check && (ret = __db_rep_enter(dbp, 1, 0, 1)) != 0)
-		return (ret);
+	handle_check = IS_ENV_REPLICATED(dbenv);
+	if (handle_check && (ret = __db_rep_enter(dbp, 1, 0, 1)) != 0) {
+		handle_check = 0;
+		goto err;
+	}
 
 	ret = __db_dump(dbp, subname, callback, handle, pflag, keyflag);
 
 	/* Release replication block. */
-	if (handle_check)
-		__env_db_rep_exit(dbenv);
+	if (handle_check && (t_ret = __env_db_rep_exit(dbenv)) != 0 && ret == 0)
+		ret = t_ret;
 
-	return (0);
+err:	ENV_LEAVE(dbenv, ip);
+	return (ret);
 }
 
 /*
@@ -1088,8 +1098,11 @@ retry: while ((ret =
 		data.ulen = data.size;
 		goto retry;
 	}
+	if (ret == DB_NOTFOUND)
+		ret = 0;
 
-	(void)__db_prfooter(handle, callback);
+	if ((t_ret = __db_prfooter(handle, callback)) != 0 && ret == 0)
+		ret = t_ret;
 
 err:	if ((t_ret = __db_c_close(dbcp)) != 0 && ret == 0)
 		ret = t_ret;
@@ -1310,39 +1323,28 @@ __db_prheader(dbp, subname, pflag, keyflag, handle, callback, vdp, meta_pgno)
 	case DB_BTREE:
 		if ((ret = callback(handle, "type=btree\n")) != 0)
 			goto err;
-		if (using_vdp) {
-			if (F_ISSET(pip, VRFY_HAS_RECNUMS))
-				if ((ret =
-				    callback(handle, "recnum=1\n")) != 0)
-					goto err;
-			if (pip->bt_maxkey != 0) {
-				snprintf(buf, buflen,
-				    "bt_maxkey=%lu\n", (u_long)pip->bt_maxkey);
-				if ((ret = callback(handle, buf)) != 0)
-					goto err;
-			}
-			if (pip->bt_minkey != 0 &&
-			    pip->bt_minkey != DEFMINKEYPAGE) {
-				snprintf(buf, buflen,
-				    "bt_minkey=%lu\n", (u_long)pip->bt_minkey);
-				if ((ret = callback(handle, buf)) != 0)
-					goto err;
-			}
-			break;
-		}
-
-		if ((ret = __db_get_flags(dbp, &flags)) != 0) {
-			__db_err(dbenv, "DB->get_flags: %s", db_strerror(ret));
-			goto err;
-		}
-		if (F_ISSET(dbp, DB_AM_RECNUM))
-			if ((ret = callback(handle, "recnum=1\n")) != 0)
+		if (using_vdp)
+			tmp_int = F_ISSET(pip, VRFY_HAS_RECNUMS) ? 1 : 0;
+		else {
+			if ((ret = __db_get_flags(dbp, &flags)) != 0) {
+				__db_err(dbenv,
+				    "DB->get_flags: %s", db_strerror(ret));
 				goto err;
-		if ((ret = __bam_get_bt_minkey(dbp, &tmp_u_int32)) != 0) {
-			__db_err(dbenv,
-			    "DB->get_bt_minkey: %s", db_strerror(ret));
-			goto err;
+			}
+			tmp_int = F_ISSET(dbp, DB_AM_RECNUM) ? 1 : 0;
 		}
+		if (tmp_int && (ret = callback(handle, "recnum=1\n")) != 0)
+			goto err;
+
+		if (using_vdp)
+			tmp_u_int32 = pip->bt_minkey;
+		else
+			if ((ret =
+			    __bam_get_bt_minkey(dbp, &tmp_u_int32)) != 0) {
+				__db_err(dbenv,
+				    "DB->get_bt_minkey: %s", db_strerror(ret));
+				goto err;
+			}
 		if (tmp_u_int32 != 0 && tmp_u_int32 != DEFMINKEYPAGE) {
 			snprintf(buf, buflen,
 			    "bt_minkey=%lu\n", (u_long)tmp_u_int32);
@@ -1354,38 +1356,35 @@ __db_prheader(dbp, subname, pflag, keyflag, handle, callback, vdp, meta_pgno)
 #ifdef HAVE_HASH
 		if ((ret = callback(handle, "type=hash\n")) != 0)
 			goto err;
-		if (using_vdp) {
-			if (pip->h_ffactor != 0) {
-				snprintf(buf, buflen,
-				    "h_ffactor=%lu\n", (u_long)pip->h_ffactor);
-				if ((ret = callback(handle, buf)) != 0)
-					goto err;
+		if (using_vdp)
+			tmp_u_int32 = pip->h_ffactor;
+		else
+			if ((ret =
+			    __ham_get_h_ffactor(dbp, &tmp_u_int32)) != 0) {
+				__db_err(dbenv,
+				    "DB->get_h_ffactor: %s", db_strerror(ret));
+				goto err;
 			}
-			if (pip->h_nelem != 0) {
-				snprintf(buf, buflen,
-				    "h_nelem=%lu\n", (u_long)pip->h_nelem);
-				if ((ret = callback(handle, buf)) != 0)
-					goto err;
-			}
-			break;
-		}
-		if ((ret = __ham_get_h_ffactor(dbp, &tmp_u_int32)) != 0) {
-			__db_err(dbenv,
-			    "DB->get_h_ffactor: %s", db_strerror(ret));
-			goto err;
-		}
 		if (tmp_u_int32 != 0) {
 			snprintf(buf, buflen,
 			    "h_ffactor=%lu\n", (u_long)tmp_u_int32);
 			if ((ret = callback(handle, buf)) != 0)
 				goto err;
 		}
-		if ((ret = __ham_get_h_nelem(dbp, &tmp_u_int32)) != 0) {
-			__db_err(dbenv,
-			    "DB->get_h_nelem: %s", db_strerror(ret));
-			goto err;
-		}
-		if (tmp_u_int32 != 0) {
+
+		if (using_vdp)
+			tmp_u_int32 = pip->h_nelem;
+		else
+			if ((ret = __ham_get_h_nelem(dbp, &tmp_u_int32)) != 0) {
+				__db_err(dbenv,
+				    "DB->get_h_nelem: %s", db_strerror(ret));
+				goto err;
+			}
+		/*
+		 * Hash databases have an h_nelem field of 0 or 1, neither
+		 * of those values is interesting.
+		 */
+		if (tmp_u_int32 > 1) {
 			snprintf(buf, buflen,
 			    "h_nelem=%lu\n", (u_long)tmp_u_int32);
 			if ((ret = callback(handle, buf)) != 0)
@@ -1400,36 +1399,41 @@ __db_prheader(dbp, subname, pflag, keyflag, handle, callback, vdp, meta_pgno)
 #ifdef HAVE_QUEUE
 		if ((ret = callback(handle, "type=queue\n")) != 0)
 			goto err;
-		if (vdp != NULL) {
-			snprintf(buf,
-			    buflen, "re_len=%lu\n", (u_long)vdp->re_len);
-			if ((ret = callback(handle, buf)) != 0)
+		if (using_vdp)
+			tmp_u_int32 = vdp->re_len;
+		else
+			if ((ret = __ram_get_re_len(dbp, &tmp_u_int32)) != 0) {
+				__db_err(dbenv,
+				    "DB->get_re_len: %s", db_strerror(ret));
 				goto err;
-			break;
-		}
-		if ((ret = __ram_get_re_len(dbp, &tmp_u_int32)) != 0) {
-			__db_err(dbenv,
-			    "DB->get_re_len: %s", db_strerror(ret));
-			goto err;
-		}
+			}
 		snprintf(buf, buflen, "re_len=%lu\n", (u_long)tmp_u_int32);
 		if ((ret = callback(handle, buf)) != 0)
 			goto err;
-		if ((ret = __ram_get_re_pad(dbp, &tmp_int)) != 0) {
-			__db_err(dbenv,
-			    "DB->get_re_pad: %s", db_strerror(ret));
-			goto err;
-		}
+
+		if (using_vdp)
+			tmp_int = (int)vdp->re_pad;
+		else
+			if ((ret = __ram_get_re_pad(dbp, &tmp_int)) != 0) {
+				__db_err(dbenv,
+				    "DB->get_re_pad: %s", db_strerror(ret));
+				goto err;
+			}
 		if (tmp_int != 0 && tmp_int != ' ') {
 			snprintf(buf, buflen, "re_pad=%#x\n", tmp_int);
 			if ((ret = callback(handle, buf)) != 0)
 				goto err;
 		}
-		if ((ret = __qam_get_extentsize(dbp, &tmp_u_int32)) != 0) {
-			__db_err(dbenv,
-			    "DB->get_q_extentsize: %s", db_strerror(ret));
-			goto err;
-		}
+
+		if (using_vdp)
+			tmp_u_int32 = vdp->page_ext;
+		else
+			if ((ret =
+			    __qam_get_extentsize(dbp, &tmp_u_int32)) != 0) {
+				__db_err(dbenv, "DB->get_q_extentsize: %s",
+				    db_strerror(ret));
+				goto err;
+			}
 		if (tmp_u_int32 != 0) {
 			snprintf(buf, buflen,
 			    "extentsize=%lu\n", (u_long)tmp_u_int32);
@@ -1444,38 +1448,42 @@ __db_prheader(dbp, subname, pflag, keyflag, handle, callback, vdp, meta_pgno)
 	case DB_RECNO:
 		if ((ret = callback(handle, "type=recno\n")) != 0)
 			goto err;
-		if (using_vdp) {
-			if (F_ISSET(pip, VRFY_IS_RRECNO))
+		if (using_vdp)
+			tmp_int = F_ISSET(pip, VRFY_IS_RRECNO) ? 1 : 0;
+		else
+			tmp_int = F_ISSET(dbp, DB_AM_RENUMBER) ? 1 : 0;
+		if (tmp_int != 0 &&
+		    (ret = callback(handle, "renumber=1\n")) != 0)
+				goto err;
+
+		if (using_vdp)
+			tmp_int = F_ISSET(pip, VRFY_IS_FIXEDLEN) ? 1 : 0;
+		else
+			tmp_int = F_ISSET(dbp, DB_AM_FIXEDLEN) ? 1 : 0;
+		if (tmp_int) {
+			if (using_vdp)
+				tmp_u_int32 = pip->re_len;
+			else
 				if ((ret =
-				    callback(handle, "renumber=1\n")) != 0)
+				    __ram_get_re_len(dbp, &tmp_u_int32)) != 0) {
+					__db_err(dbenv, "DB->get_re_len: %s",
+					    db_strerror(ret));
 					goto err;
-			if (pip->re_len > 0) {
-				snprintf(buf, buflen,
-				    "re_len=%lu\n", (u_long)pip->re_len);
-				if ((ret = callback(handle, buf)) != 0)
-					goto err;
-			}
-			break;
-		}
-		if (F_ISSET(dbp, DB_AM_RENUMBER))
-			if ((ret = callback(handle, "renumber=1\n")) != 0)
-				goto err;
-		if (F_ISSET(dbp, DB_AM_FIXEDLEN)) {
-			if ((ret = __ram_get_re_len(dbp, &tmp_u_int32)) != 0) {
-				__db_err(dbenv,
-				    "DB->get_re_len: %s", db_strerror(ret));
-				goto err;
-			}
+				}
 			snprintf(buf, buflen,
 			    "re_len=%lu\n", (u_long)tmp_u_int32);
 			if ((ret = callback(handle, buf)) != 0)
 				goto err;
 
-			if ((ret = __ram_get_re_pad(dbp, &tmp_int)) != 0) {
-				__db_err(dbenv,
-				    "DB->get_re_pad: %s", db_strerror(ret));
-				goto err;
-			}
+			if (using_vdp)
+				tmp_int = (int)pip->re_pad;
+			else
+				if ((ret =
+				    __ram_get_re_pad(dbp, &tmp_int)) != 0) {
+					__db_err(dbenv, "DB->get_re_pad: %s",
+					    db_strerror(ret));
+					goto err;
+				}
 			if (tmp_int != 0 && tmp_int != ' ') {
 				snprintf(buf,
 				    buflen, "re_pad=%#x\n", (u_int)tmp_int);
@@ -1493,13 +1501,21 @@ __db_prheader(dbp, subname, pflag, keyflag, handle, callback, vdp, meta_pgno)
 	}
 
 	if (using_vdp) {
+		if (F_ISSET(pip, VRFY_HAS_CHKSUM))
+			if ((ret = callback(handle, "chksum=1\n")) != 0)
+				goto err;
 		if (F_ISSET(pip, VRFY_HAS_DUPS))
 			if ((ret = callback(handle, "duplicates=1\n")) != 0)
 				goto err;
 		if (F_ISSET(pip, VRFY_HAS_DUPSORT))
 			if ((ret = callback(handle, "dupsort=1\n")) != 0)
 				goto err;
-		/* We should handle page size. XXX */
+		/*
+		 * !!!
+		 * We don't know if the page size was the default if we're
+		 * salvaging.  It doesn't seem that interesting to have, so
+		 * we ignore it for now.
+		 */
 	} else {
 		if (F_ISSET(dbp, DB_AM_CHKSUM))
 			if ((ret = callback(handle, "chksum=1\n")) != 0)
diff --git a/storage/bdb/db/db_rec.c b/storage/bdb/db/db_rec.c
index bce2b8701a3..e0c13f255c1 100644
--- a/storage/bdb/db/db_rec.c
+++ b/storage/bdb/db/db_rec.c
@@ -1,10 +1,10 @@
 /*-
  * See the file LICENSE for redistribution information.
  *
- * Copyright (c) 1996-2004
+ * Copyright (c) 1996-2005
  *	Sleepycat Software.  All rights reserved.
  *
- * $Id: db_rec.c,v 11.61 2004/10/21 14:39:58 bostic Exp $
+ * $Id: db_rec.c,v 12.12 2005/10/27 01:03:01 bostic Exp $
  */
 
 #include "db_config.h"
@@ -51,13 +51,13 @@ __db_addrem_recover(dbenv, dbtp, lsnp, op, info)
 	pagep = NULL;
 	COMPQUIET(info, NULL);
 	REC_PRINT(__db_addrem_print);
-	REC_INTRO(__db_addrem_read, 1);
+	REC_INTRO(__db_addrem_read, 1, 1);
 
 	REC_FGET(mpf, argp->pgno, &pagep, done);
 
 	cmp_n = log_compare(lsnp, &LSN(pagep));
 	cmp_p = log_compare(&LSN(pagep), &argp->pagelsn);
-	CHECK_LSN(op, cmp_p, &LSN(pagep), &argp->pagelsn);
+	CHECK_LSN(dbenv, op, cmp_p, &LSN(pagep), &argp->pagelsn);
 	change = 0;
 	if ((cmp_p == 0 && DB_REDO(op) && argp->opcode == DB_ADD_DUP) ||
 	    (cmp_n == 0 && DB_UNDO(op) && argp->opcode == DB_REM_DUP)) {
@@ -121,7 +121,7 @@ __db_big_recover(dbenv, dbtp, lsnp, op, info)
 	pagep = NULL;
 	COMPQUIET(info, NULL);
 	REC_PRINT(__db_big_print);
-	REC_INTRO(__db_big_read, 1);
+	REC_INTRO(__db_big_read, 1, 0);
 
 	REC_FGET(mpf, argp->pgno, &pagep, ppage);
 
@@ -133,7 +133,7 @@ __db_big_recover(dbenv, dbtp, lsnp, op, info)
 	 */
 	cmp_n = log_compare(lsnp, &LSN(pagep));
 	cmp_p = log_compare(&LSN(pagep), &argp->pagelsn);
-	CHECK_LSN(op, cmp_p, &LSN(pagep), &argp->pagelsn);
+	CHECK_LSN(dbenv, op, cmp_p, &LSN(pagep), &argp->pagelsn);
 	change = 0;
 	if ((cmp_p == 0 && DB_REDO(op) && argp->opcode == DB_ADD_BIG) ||
 	    (cmp_n == 0 && DB_UNDO(op) && argp->opcode == DB_REM_BIG)) {
@@ -176,7 +176,7 @@ ppage:	if (argp->prev_pgno != PGNO_INVALID) {
 
 		cmp_n = log_compare(lsnp, &LSN(pagep));
 		cmp_p = log_compare(&LSN(pagep), &argp->prevlsn);
-		CHECK_LSN(op, cmp_p, &LSN(pagep), &argp->prevlsn);
+		CHECK_LSN(dbenv, op, cmp_p, &LSN(pagep), &argp->prevlsn);
 
 		if (cmp_p == 0 && DB_REDO(op) && argp->opcode == DB_ADD_BIG) {
 			/* Redo add, undo delete. */
@@ -202,7 +202,7 @@ npage:	if (argp->next_pgno != PGNO_INVALID) {
 
 		cmp_n = log_compare(lsnp, &LSN(pagep));
 		cmp_p = log_compare(&LSN(pagep), &argp->nextlsn);
-		CHECK_LSN(op, cmp_p, &LSN(pagep), &argp->nextlsn);
+		CHECK_LSN(dbenv, op, cmp_p, &LSN(pagep), &argp->nextlsn);
 		if (cmp_p == 0 && DB_REDO(op)) {
 			PREV_PGNO(pagep) = PGNO_INVALID;
 			change = DB_MPOOL_DIRTY;
@@ -250,13 +250,13 @@ __db_ovref_recover(dbenv, dbtp, lsnp, op, info)
 	pagep = NULL;
 	COMPQUIET(info, NULL);
 	REC_PRINT(__db_ovref_print);
-	REC_INTRO(__db_ovref_read, 1);
+	REC_INTRO(__db_ovref_read, 1, 0);
 
 	REC_FGET(mpf, argp->pgno, &pagep, done);
 
 	modified = 0;
 	cmp = log_compare(&LSN(pagep), &argp->lsn);
-	CHECK_LSN(op, cmp, &LSN(pagep), &argp->lsn);
+	CHECK_LSN(dbenv, op, cmp, &LSN(pagep), &argp->lsn);
 	if (cmp == 0 && DB_REDO(op)) {
 		/* Need to redo update described. */
 		OV_REF(pagep) += argp->adjust;
@@ -339,13 +339,13 @@ __db_noop_recover(dbenv, dbtp, lsnp, op, info)
 	pagep = NULL;
 	COMPQUIET(info, NULL);
 	REC_PRINT(__db_noop_print);
-	REC_INTRO(__db_noop_read, 0);
+	REC_INTRO(__db_noop_read, 0, 0);
 
 	REC_FGET(mpf, argp->pgno, &pagep, done);
 
 	cmp_n = log_compare(lsnp, &LSN(pagep));
 	cmp_p = log_compare(&LSN(pagep), &argp->prevlsn);
-	CHECK_LSN(op, cmp_p, &LSN(pagep), &argp->prevlsn);
+	CHECK_LSN(dbenv, op, cmp_p, &LSN(pagep), &argp->prevlsn);
 	change = 0;
 	if (cmp_p == 0 && DB_REDO(op)) {
 		LSN(pagep) = *lsnp;
@@ -391,7 +391,7 @@ __db_pg_alloc_recover(dbenv, dbtp, lsnp, op, info)
 	pagep = NULL;
 	created = meta_modified = modified = 0;
 	REC_PRINT(__db_pg_alloc_print);
-	REC_INTRO(__db_pg_alloc_read, 0);
+	REC_INTRO(__db_pg_alloc_read, 0, 0);
 
 	/*
 	 * Fix up the metadata page.  If we're redoing the operation, we have
@@ -410,7 +410,7 @@ __db_pg_alloc_recover(dbenv, dbtp, lsnp, op, info)
 	}
 	cmp_n = log_compare(lsnp, &LSN(meta));
 	cmp_p = log_compare(&LSN(meta), &argp->meta_lsn);
-	CHECK_LSN(op, cmp_p, &LSN(meta), &argp->meta_lsn);
+	CHECK_LSN(dbenv, op, cmp_p, &LSN(meta), &argp->meta_lsn);
 	if (cmp_p == 0 && DB_REDO(op)) {
 		/* Need to redo update described. */
 		LSN(meta) = *lsnp;
@@ -439,6 +439,29 @@ __db_pg_alloc_recover(dbenv, dbtp, lsnp, op, info)
 		meta_modified = 1;
 	}
 
+#ifdef HAVE_FTRUNCATE
+	/*
+	 * Check to see if we are keeping a sorted
+	 * freelist, if so put this back in the in
+	 * memory list.  It must be the first element.
+	 */
+	if (op == DB_TXN_ABORT && !IS_ZERO_LSN(argp->page_lsn)) {
+		db_pgno_t *list;
+		u_int32_t nelem;
+
+		if ((ret = __memp_get_freelist(mpf, &nelem, &list)) != 0)
+			goto out;
+		if (list != NULL) {
+			if ((ret =
+			    __memp_extend_freelist(mpf, nelem + 1, &list)) != 0)
+				goto out;
+			if (nelem != 0)
+				memmove(list + 1, list, nelem * sizeof(list));
+			*list = argp->pgno;
+		}
+	}
+#endif
+
 	/*
 	 * Fix up the allocated page. If the page does not exist
 	 * and we can truncate it then don't create it.
@@ -485,7 +508,7 @@ __db_pg_alloc_recover(dbenv, dbtp, lsnp, op, info)
 	    (IS_ZERO_LSN(argp->page_lsn) && IS_INIT_LSN(LSN(pagep))))
 		cmp_p = 0;
 
-	CHECK_LSN(op, cmp_p, &LSN(pagep), &argp->page_lsn);
+	CHECK_LSN(dbenv, op, cmp_p, &LSN(pagep), &argp->page_lsn);
 	/*
 	 * Another special case we have to handle is if we ended up with a
 	 * page of all 0's which can happen if we abort between allocating a
@@ -594,26 +617,30 @@ __db_pg_free_recover_int(dbenv, argp, file_dbp, lsnp, mpf, op, data)
 {
 	DBMETA *meta;
 	DB_LSN copy_lsn;
-	PAGE *pagep;
-	db_pgno_t pgno;
-	int cmp_n, cmp_p, meta_modified, modified, ret;
+	PAGE *pagep, *prevp;
+	int cmp_n, cmp_p, is_meta, meta_modified, modified, ret;
 
 	meta = NULL;
 	pagep = NULL;
+	prevp = NULL;
 	meta_modified = modified = 0;
 
 	/*
-	 * Get the metapage first so we can see where we are.
+	 * Get the "metapage".  This will either be the metapage
+	 * or the previous page in the free list if we are doing
+	 * sorted allocations.  If its a previous page then
+	 * we will not be truncating.
 	 */
-	pgno = PGNO_BASE_MD;
-	if ((ret = __memp_fget(mpf, &pgno, 0, &meta)) != 0) {
-		/* The metadata page must always exist. */
-		ret = __db_pgerr(file_dbp, pgno, ret);
-		goto out;
-	}
+	is_meta = argp->meta_pgno == PGNO_BASE_MD;
+
+	REC_FGET(mpf, argp->meta_pgno, &meta, check_meta);
+
+	if (argp->meta_pgno != PGNO_BASE_MD)
+		prevp = (PAGE *)meta;
+
 	cmp_n = log_compare(lsnp, &LSN(meta));
 	cmp_p = log_compare(&LSN(meta), &argp->meta_lsn);
-	CHECK_LSN(op, cmp_p, &LSN(meta), &argp->meta_lsn);
+	CHECK_LSN(dbenv, op, cmp_p, &LSN(meta), &argp->meta_lsn);
 
 	/*
 	 * Fix up the metadata page.  If we're redoing or undoing the operation
@@ -627,30 +654,45 @@ __db_pg_free_recover_int(dbenv, argp, file_dbp, lsnp, mpf, op, data)
 		*/
 		if (argp->pgno == argp->last_pgno)
 			meta->last_pgno = argp->pgno - 1;
-		else
+		else if (prevp == NULL)
 			meta->free = argp->pgno;
+		else
+			NEXT_PGNO(prevp) = argp->pgno;
 #else
 		/* Need to redo the deallocation. */
-		meta->free = argp->pgno;
+		if (prevp == NULL)
+			meta->free = argp->pgno;
+		else
+			NEXT_PGNO(prevp) = argp->pgno;
 		/*
 		 * If this was a compensating transaction and
 		 * we are a replica, then we never executed the
 		 * original allocation which incremented meta->free.
 		 */
-		if (meta->last_pgno < meta->free)
+		if (prevp == NULL && meta->last_pgno < meta->free)
 			meta->last_pgno = meta->free;
 #endif
 		LSN(meta) = *lsnp;
 		meta_modified = 1;
 	} else if (cmp_n == 0 && DB_UNDO(op)) {
 		/* Need to undo the deallocation. */
-		meta->free = argp->next;
+		if (prevp == NULL)
+			meta->free = argp->next;
+		else
+			NEXT_PGNO(prevp) = argp->next;
 		LSN(meta) = argp->meta_lsn;
-		if (meta->last_pgno < argp->pgno)
+		if (prevp == NULL && meta->last_pgno < argp->pgno)
 			meta->last_pgno = argp->pgno;
 		meta_modified = 1;
 	}
 
+check_meta:
+	if (ret != 0 && is_meta) {
+		/* The metadata page must always exist. */
+		ret = __db_pgerr(file_dbp, argp->meta_pgno, ret);
+		goto out;
+	}
+
 	/*
 	 * Get the freed page.  If we support truncate then don't
 	 * create the page if we are going to free it.  If we're
@@ -661,7 +703,7 @@ __db_pg_free_recover_int(dbenv, argp, file_dbp, lsnp, mpf, op, data)
 	 * and roll it back.
 	 */
 #ifdef HAVE_FTRUNCATE
-	if (DB_REDO(op) || meta->last_pgno < argp->pgno) {
+	if (DB_REDO(op) || (is_meta && meta->last_pgno < argp->pgno)) {
 		if ((ret = __memp_fget(mpf, &argp->pgno, 0, &pagep)) != 0) {
 			if (ret == DB_PAGE_NOTFOUND)
 				goto done;
@@ -687,14 +729,19 @@ __db_pg_free_recover_int(dbenv, argp, file_dbp, lsnp, mpf, op, data)
 		cmp_p = 0;
 #endif
 
-	CHECK_LSN(op, cmp_p, &LSN(pagep), ©_lsn);
+	CHECK_LSN(dbenv, op, cmp_p, &LSN(pagep), ©_lsn);
 	if (DB_REDO(op) &&
 	    (cmp_p == 0 ||
 	    (IS_ZERO_LSN(copy_lsn) &&
 	    log_compare(&LSN(pagep), &argp->meta_lsn) <= 0))) {
 		/* Need to redo the deallocation. */
 #ifdef HAVE_FTRUNCATE
-		if (meta->last_pgno <= argp->pgno) {
+		/*
+		 * The page can be truncated if it was truncated at runtime
+		 * and the current metapage reflects the truncation.
+		 */
+		if (is_meta && meta->last_pgno <= argp->pgno &&
+		    argp->last_pgno <= argp->pgno) {
 			if ((ret =
 			    __memp_fput(mpf, pagep, DB_MPOOL_DISCARD)) != 0)
 				goto out;
@@ -720,7 +767,7 @@ __db_pg_free_recover_int(dbenv, argp, file_dbp, lsnp, mpf, op, data)
 		/* Need to reallocate the page. */
 		memcpy(pagep, argp->header.data, argp->header.size);
 		if (data)
-			memcpy((u_int8_t*)pagep + pagep->hf_offset,
+			memcpy((u_int8_t*)pagep + HOFFSET(pagep),
 			     argp->data.data, argp->data.size);
 
 		modified = 1;
@@ -731,9 +778,38 @@ __db_pg_free_recover_int(dbenv, argp, file_dbp, lsnp, mpf, op, data)
 
 	pagep = NULL;
 #ifdef HAVE_FTRUNCATE
+	/*
+	 * If we are keeping an in memory free list remove this
+	 * element from the list.
+	 */
+	if (op == DB_TXN_ABORT && argp->pgno != argp->last_pgno) {
+		db_pgno_t *lp;
+		u_int32_t nelem, pos;
+
+		if ((ret = __memp_get_freelist(mpf, &nelem, &lp)) != 0)
+			goto out;
+		if (lp != NULL) {
+			pos = 0;
+			if (!is_meta && nelem != 0) {
+				__db_freelist_pos(argp->pgno, lp, nelem, &pos);
+
+				DB_ASSERT(argp->pgno == lp[pos]);
+				DB_ASSERT(argp->meta_pgno == lp[pos - 1]);
+			}
+
+			if (nelem != 0 && pos != nelem)
+				memmove(&lp[pos], &lp[pos + 1],
+				    (nelem - pos) * sizeof(*lp));
+
+			/* Shrink the list */
+			if ((ret =
+			    __memp_extend_freelist(mpf, nelem - 1, &lp)) != 0)
+				goto out;
+		}
+	}
 done:
 #endif
-	if ((ret = __memp_fput(mpf,
+	if (meta != NULL && (ret = __memp_fput(mpf,
 	     meta, meta_modified ? DB_MPOOL_DIRTY : 0)) != 0)
 		goto out;
 	meta = NULL;
@@ -771,7 +847,7 @@ __db_pg_free_recover(dbenv, dbtp, lsnp, op, info)
 
 	COMPQUIET(info, NULL);
 	REC_PRINT(__db_pg_free_print);
-	REC_INTRO(__db_pg_free_read, 1);
+	REC_INTRO(__db_pg_free_read, 1, 0);
 
 	ret = __db_pg_free_recover_int(dbenv,
 	     (__db_pg_freedata_args *)argp, file_dbp, lsnp, mpf, op, 0);
@@ -805,7 +881,7 @@ __db_pg_new_recover(dbenv, dbtp, lsnp, op, info)
 	int ret;
 
 	REC_PRINT(__db_pg_free_print);
-	REC_INTRO(__db_pg_free_read, 1);
+	REC_INTRO(__db_pg_free_read, 1, 0);
 	COMPQUIET(op, DB_TXN_ABORT);
 
 	if ((ret =
@@ -848,7 +924,7 @@ __db_pg_freedata_recover(dbenv, dbtp, lsnp, op, info)
 
 	COMPQUIET(info, NULL);
 	REC_PRINT(__db_pg_freedata_print);
-	REC_INTRO(__db_pg_freedata_read, 1);
+	REC_INTRO(__db_pg_freedata_read, 1, 0);
 
 	ret = __db_pg_free_recover_int(dbenv, argp, file_dbp, lsnp, mpf, op, 1);
 
@@ -925,7 +1001,7 @@ __db_pg_prepare_recover(dbenv, dbtp, lsnp, op, info)
 	int ret, t_ret;
 
 	REC_PRINT(__db_pg_prepare_print);
-	REC_INTRO(__db_pg_prepare_read, 1);
+	REC_INTRO(__db_pg_prepare_read, 1, 0);
 
 	mpf = file_dbp->mpf;
 
@@ -993,11 +1069,11 @@ __db_pg_init_recover(dbenv, dbtp, lsnp, op, info)
 	DB_LSN copy_lsn;
 	DB_MPOOLFILE *mpf;
 	PAGE *pagep;
-	int cmp_n, cmp_p, modified, ret;
+	int cmp_n, cmp_p, modified, ret, type;
 
 	COMPQUIET(info, NULL);
 	REC_PRINT(__db_pg_init_print);
-	REC_INTRO(__db_pg_init_read, 1);
+	REC_INTRO(__db_pg_init_read, 1, 0);
 
 	mpf = file_dbp->mpf;
 	REC_FGET(mpf, argp->pgno, &pagep, done);
@@ -1006,18 +1082,22 @@ __db_pg_init_recover(dbenv, dbtp, lsnp, op, info)
 	(void)__ua_memcpy(©_lsn, &LSN(argp->header.data), sizeof(DB_LSN));
 	cmp_n = log_compare(lsnp, &LSN(pagep));
 	cmp_p = log_compare(&LSN(pagep), ©_lsn);
-	CHECK_LSN(op, cmp_p, &LSN(pagep), ©_lsn);
+	CHECK_LSN(dbenv, op, cmp_p, &LSN(pagep), ©_lsn);
 
 	if (cmp_p == 0 && DB_REDO(op)) {
+		if (TYPE(pagep) == P_HASH)
+			type = P_HASH;
+		else
+			type = file_dbp->type == DB_RECNO ? P_LRECNO : P_LBTREE;
 		P_INIT(pagep, file_dbp->pgsize, PGNO(pagep), PGNO_INVALID,
-		    PGNO_INVALID, TYPE(pagep) == P_HASH ? 0 : 1, TYPE(pagep));
+		    PGNO_INVALID, TYPE(pagep) == P_HASH ? 0 : 1, type);
 		pagep->lsn = *lsnp;
 		modified = 1;
 	} else if (cmp_n == 0 && DB_UNDO(op)) {
 		/* Put the data back on the page. */
 		memcpy(pagep, argp->header.data, argp->header.size);
 		if (argp->data.size > 0)
-			memcpy((u_int8_t*)pagep + pagep->hf_offset,
+			memcpy((u_int8_t*)pagep + HOFFSET(pagep),
 			     argp->data.data, argp->data.size);
 
 		modified = 1;
@@ -1029,3 +1109,158 @@ done:	*lsnp = argp->prev_lsn;
 out:
 	REC_CLOSE;
 }
+
+/*
+ * __db_pg_sort_recover --
+ *	Recovery function for pg_sort.
+ *
+ * PUBLIC: int __db_pg_sort_recover
+ * PUBLIC:   __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+ */
+int
+__db_pg_sort_recover(dbenv, dbtp, lsnp, op, info)
+	DB_ENV *dbenv;
+	DBT *dbtp;
+	DB_LSN *lsnp;
+	db_recops op;
+	void *info;
+{
+#ifdef HAVE_FTRUNCATE
+	__db_pg_sort_args *argp;
+	DB *file_dbp;
+	DBC *dbc;
+	DBMETA *meta;
+	DB_MPOOLFILE *mpf;
+	PAGE *pagep;
+	db_pgno_t pgno, *list;
+	u_int32_t felem, nelem;
+	struct pglist *pglist, *lp;
+	int modified, ret;
+
+	COMPQUIET(info, NULL);
+
+	REC_PRINT(__db_pg_sort_print);
+	REC_INTRO(__db_pg_sort_read, 1, 1);
+
+	modified = 0;
+
+	pglist = (struct pglist *) argp->list.data;
+	nelem = argp->list.size / sizeof(struct pglist);
+	if (DB_REDO(op)) {
+		pgno = argp->last_pgno;
+		if ((ret = __db_pg_truncate(mpf,
+		    pglist, NULL, &nelem, &pgno, lsnp, 1)) != 0)
+			goto out;
+
+		if (argp->last_free != PGNO_INVALID) {
+			if ((ret = __memp_fget(mpf,
+			     &argp->last_free, 0, &meta)) == 0) {
+				if (log_compare(&LSN(meta),
+				     &argp->last_lsn) == 0) {
+					NEXT_PGNO(meta) = PGNO_INVALID;
+					LSN(meta) = *lsnp;
+					modified = 1;
+				}
+				if ((ret = __memp_fput(mpf,
+				     meta, modified ? DB_MPOOL_DIRTY : 0)) != 0)
+					goto out;
+				meta = NULL;
+				modified = 0;
+			} else if (ret != DB_PAGE_NOTFOUND)
+				goto out;
+		}
+		if ((ret = __memp_fget(mpf, &argp->meta, 0, &meta)) != 0)
+			goto out;
+		if (log_compare(&LSN(meta), &argp->meta_lsn) == 0) {
+			if (argp->last_free == PGNO_INVALID) {
+				if (nelem == 0)
+					meta->free = PGNO_INVALID;
+				else
+					meta->free = pglist->pgno;
+			}
+			meta->last_pgno = pgno;
+			LSN(meta) = *lsnp;
+			modified = 1;
+		}
+	} else {
+		/* Put the free list back in its original order. */
+		for (lp = pglist; lp < &pglist[nelem]; lp++) {
+			if ((ret = __memp_fget(mpf,
+			     &lp->pgno, DB_MPOOL_CREATE, &pagep)) != 0)
+				goto out;
+			if (IS_ZERO_LSN(LSN(pagep)) ||
+			     log_compare(&LSN(pagep), lsnp) == 0) {
+				if (lp == &pglist[nelem - 1])
+					pgno = PGNO_INVALID;
+				else
+					pgno = lp[1].pgno;
+
+				P_INIT(pagep, file_dbp->pgsize,
+				    lp->pgno, PGNO_INVALID, pgno, 0, P_INVALID);
+				LSN(pagep) = lp->lsn;
+				modified = 1;
+			}
+			if ((ret = __memp_fput(mpf,
+			     pagep, modified ? DB_MPOOL_DIRTY: 0)) != 0)
+				goto out;
+		}
+		if (argp->last_free != PGNO_INVALID) {
+			if ((ret = __memp_fget(mpf,
+			     &argp->last_free, 0, &meta)) == 0) {
+				if (log_compare(&LSN(meta), lsnp) == 0) {
+					NEXT_PGNO(meta) = pglist->pgno;
+					LSN(meta) = argp->last_lsn;
+					modified = 1;
+				}
+				if ((ret = __memp_fput(mpf,
+				     meta, modified ? DB_MPOOL_DIRTY : 0)) != 0)
+					goto out;
+			} else if (ret != DB_PAGE_NOTFOUND)
+				goto out;
+			modified = 0;
+			meta = NULL;
+		}
+		if ((ret = __memp_fget(mpf, &argp->meta, 0, &meta)) != 0)
+			goto out;
+		if (log_compare(&LSN(meta), lsnp) == 0) {
+			meta->last_pgno = argp->last_pgno;
+			if (argp->last_pgno == PGNO_INVALID)
+				meta->free = pglist->pgno;
+			LSN(meta) = argp->meta_lsn;
+			modified = 1;
+		}
+	}
+	if (op == DB_TXN_ABORT) {
+		if ((ret = __memp_get_freelist(mpf, &felem, &list)) != 0)
+			goto out;
+		if (list != NULL) {
+			DB_ASSERT(felem == 0 ||
+			    argp->last_free == list[felem - 1]);
+			if ((ret = __memp_extend_freelist(
+			    mpf, felem + nelem, &list)) != 0)
+				goto out;
+			for (lp = pglist; lp < &pglist[nelem]; lp++)
+				list[felem++] = lp->pgno;
+		}
+	}
+
+	if ((ret = __memp_fput(mpf, meta, modified ? DB_MPOOL_DIRTY : 0)) != 0)
+		goto out;
+
+done:	*lsnp = argp->prev_lsn;
+	ret = 0;
+
+out:	REC_CLOSE;
+#else
+	/*
+	 * If HAVE_FTRUNCATE is not defined, we'll never see pg_sort records
+	 * to recover.
+	 */
+	COMPQUIET(dbenv, NULL);
+	COMPQUIET(dbtp, NULL);
+	COMPQUIET(lsnp, NULL);
+	COMPQUIET(op,  DB_TXN_ABORT);
+	COMPQUIET(info, NULL);
+	return (EINVAL);
+#endif
+}
diff --git a/storage/bdb/db/db_reclaim.c b/storage/bdb/db/db_reclaim.c
index 4795b8caa08..ed68bc6eae7 100644
--- a/storage/bdb/db/db_reclaim.c
+++ b/storage/bdb/db/db_reclaim.c
@@ -1,10 +1,10 @@
 /*-
  * See the file LICENSE for redistribution information.
  *
- * Copyright (c) 1996-2004
+ * Copyright (c) 1996-2005
  *	Sleepycat Software.  All rights reserved.
  *
- * $Id: db_reclaim.c,v 11.42 2004/06/10 04:46:44 ubell Exp $
+ * $Id: db_reclaim.c,v 12.2 2005/06/16 20:21:14 bostic Exp $
  */
 
 #include "db_config.h"
@@ -209,8 +209,8 @@ reinit:			*putp = 0;
 				ldbt.data = p;
 				ldbt.size = P_OVERHEAD(dbp);
 				ldbt.size += p->entries * sizeof(db_indx_t);
-				ddbt.data = (u_int8_t *)p + p->hf_offset;
-				ddbt.size = dbp->pgsize - p->hf_offset;
+				ddbt.data = (u_int8_t *)p + HOFFSET(p);
+				ddbt.size = dbp->pgsize - HOFFSET(p);
 				if ((ret = __db_pg_init_log(dbp,
 				    param->dbc->txn, &LSN(p), 0,
 				    p->pgno, &ldbt, &ddbt)) != 0)
diff --git a/storage/bdb/db/db_remove.c b/storage/bdb/db/db_remove.c
index 5497c5e011d..c37c1876dd7 100644
--- a/storage/bdb/db/db_remove.c
+++ b/storage/bdb/db/db_remove.c
@@ -1,16 +1,18 @@
 /*-
  * See the file LICENSE for redistribution information.
  *
- * Copyright (c) 2001-2004
+ * Copyright (c) 2001-2005
  *	Sleepycat Software.  All rights reserved.
  *
- * $Id: db_remove.c,v 11.219 2004/09/16 17:55:17 margo Exp $
+ * $Id: db_remove.c,v 12.16 2005/10/27 01:25:53 mjc Exp $
  */
 
 #include "db_config.h"
 
 #ifndef NO_SYSTEM_INCLUDES
 #include 
+
+#include 
 #endif
 
 #include "db_int.h"
@@ -20,55 +22,70 @@
 #include "dbinc/hash.h"
 #include "dbinc/db_shash.h"
 #include "dbinc/lock.h"
+#include "dbinc/mp.h"
+#include "dbinc/txn.h"
 
-static int __db_dbtxn_remove __P((DB *, DB_TXN *, const char *));
+static int __db_dbtxn_remove __P((DB *, DB_TXN *, const char *, const char *));
 static int __db_subdb_remove __P((DB *, DB_TXN *, const char *, const char *));
 
 /*
- * __dbenv_dbremove_pp
+ * __env_dbremove_pp
  *	DB_ENV->dbremove pre/post processing.
  *
- * PUBLIC: int __dbenv_dbremove_pp __P((DB_ENV *,
+ * PUBLIC: int __env_dbremove_pp __P((DB_ENV *,
  * PUBLIC:     DB_TXN *, const char *, const char *, u_int32_t));
  */
 int
-__dbenv_dbremove_pp(dbenv, txn, name, subdb, flags)
+__env_dbremove_pp(dbenv, txn, name, subdb, flags)
 	DB_ENV *dbenv;
 	DB_TXN *txn;
 	const char *name, *subdb;
 	u_int32_t flags;
 {
 	DB *dbp;
+	DB_THREAD_INFO *ip;
 	int handle_check, ret, t_ret, txn_local;
 
+	dbp = NULL;
+	txn_local = 0;
+
 	PANIC_CHECK(dbenv);
 	ENV_ILLEGAL_BEFORE_OPEN(dbenv, "DB_ENV->dbremove");
 
-	/* Validate arguments. */
+	/*
+	 * The actual argument checking is simple, do it inline, outside of
+	 * the replication block.
+	 */
 	if ((ret = __db_fchk(dbenv, "DB->remove", flags, DB_AUTO_COMMIT)) != 0)
 		return (ret);
 
+	ENV_ENTER(dbenv, ip);
+
+	/* Check for replication block. */
+	handle_check = IS_ENV_REPLICATED(dbenv);
+	if (handle_check && (ret = __env_rep_enter(dbenv, 1)) != 0) {
+		handle_check = 0;
+		goto err;
+	}
+
 	/*
 	 * Create local transaction as necessary, check for consistent
 	 * transaction usage.
 	 */
-	if (IS_AUTO_COMMIT(dbenv, txn, flags)) {
+	if (IS_ENV_AUTO_COMMIT(dbenv, txn, flags)) {
 		if ((ret = __db_txn_auto_init(dbenv, &txn)) != 0)
-			return (ret);
+			goto err;
 		txn_local = 1;
-	} else {
-		if (txn != NULL && !TXN_ON(dbenv))
-			return (__db_not_txn_env(dbenv));
-		txn_local = 0;
-	}
+	} else
+		if (txn != NULL && !TXN_ON(dbenv)) {
+			ret = __db_not_txn_env(dbenv);
+			goto err;
+		}
+	LF_CLR(DB_AUTO_COMMIT);
 
 	if ((ret = db_create(&dbp, dbenv, 0)) != 0)
 		goto err;
 
-	handle_check = IS_REPLICATED(dbenv, dbp);
-	if (handle_check && (ret = __db_rep_enter(dbp, 1, 1, txn != NULL)) != 0)
-		goto err;
-
 	ret = __db_remove_int(dbp, txn, name, subdb, flags);
 
 	if (txn_local) {
@@ -90,19 +107,27 @@ __dbenv_dbremove_pp(dbenv, txn, name, subdb, flags)
 		 dbp->lid = DB_LOCK_INVALIDID;
 	}
 
-	if (handle_check)
-		__env_db_rep_exit(dbenv);
-
-err:	if (txn_local)
-		ret = __db_txn_auto_resolve(dbenv, txn, 0, ret);
+err:	if (txn_local && (t_ret =
+	    __db_txn_auto_resolve(dbenv, txn, 0, ret)) != 0 && ret == 0)
+		ret = t_ret;
 
 	/*
 	 * We never opened this dbp for real, so don't include a transaction
 	 * handle, and use NOSYNC to avoid calling into mpool.
+	 *
+	 * !!!
+	 * Note we're reversing the order of operations: we started the txn and
+	 * then opened the DB handle; we're resolving the txn and then closing
+	 * closing the DB handle -- it's safer.
 	 */
-	if ((t_ret = __db_close(dbp, NULL, DB_NOSYNC)) != 0 && ret == 0)
+	if (dbp != NULL &&
+	    (t_ret = __db_close(dbp, NULL, DB_NOSYNC)) != 0 && ret == 0)
 		ret = t_ret;
 
+	if (handle_check && (t_ret = __env_db_rep_exit(dbenv)) != 0 && ret == 0)
+		ret = t_ret;
+
+	ENV_LEAVE(dbenv, ip);
 	return (ret);
 }
 
@@ -120,7 +145,8 @@ __db_remove_pp(dbp, name, subdb, flags)
 	u_int32_t flags;
 {
 	DB_ENV *dbenv;
-	int handle_check, ret;
+	DB_THREAD_INFO *ip;
+	int handle_check, ret, t_ret;
 
 	dbenv = dbp->dbenv;
 
@@ -149,15 +175,21 @@ __db_remove_pp(dbp, name, subdb, flags)
 	if ((ret = __db_check_txn(dbp, NULL, DB_LOCK_INVALIDID, 0)) != 0)
 		return (ret);
 
-	handle_check = IS_REPLICATED(dbenv, dbp);
-	if (handle_check && (ret = __db_rep_enter(dbp, 1, 1, 0)) != 0)
-		return (ret);
+	ENV_ENTER(dbenv, ip);
+
+	handle_check = IS_ENV_REPLICATED(dbenv);
+	if (handle_check && (ret = __db_rep_enter(dbp, 1, 1, 0)) != 0) {
+		handle_check = 0;
+		goto err;
+	}
 
 	/* Remove the file. */
 	ret = __db_remove(dbp, NULL, name, subdb, flags);
 
-	if (handle_check)
-		__env_db_rep_exit(dbenv);
+	if (handle_check && (t_ret = __env_db_rep_exit(dbenv)) != 0 && ret == 0)
+		ret = t_ret;
+
+err:	ENV_LEAVE(dbenv, ip);
 	return (ret);
 }
 
@@ -206,15 +238,23 @@ __db_remove_int(dbp, txn, name, subdb, flags)
 	dbenv = dbp->dbenv;
 	real_name = tmpname = NULL;
 
-	/* Handle subdatabase removes separately. */
-	if (subdb != NULL) {
+	if (name == NULL && subdb == NULL) {
+		__db_err(dbenv, "Remove on temporary files invalid");
+		ret = EINVAL;
+		goto err;
+	}
+
+	if (name == NULL) {
+		MAKE_INMEM(dbp);
+		real_name = (char *)subdb;
+	} else if (subdb != NULL) {
 		ret = __db_subdb_remove(dbp, txn, name, subdb);
 		goto err;
 	}
 
 	/* Handle transactional file removes separately. */
 	if (txn != NULL) {
-		ret = __db_dbtxn_remove(dbp, txn, name);
+		ret = __db_dbtxn_remove(dbp, txn, name, subdb);
 		goto err;
 	}
 
@@ -223,15 +263,16 @@ __db_remove_int(dbp, txn, name, subdb, flags)
 	 *
 	 * Find the real name of the file.
 	 */
-	if ((ret = __db_appname(dbenv,
-	    DB_APP_DATA, name, 0, NULL, &real_name)) != 0)
+	if (!F_ISSET(dbp, DB_AM_INMEM) && (ret =
+	    __db_appname(dbenv, DB_APP_DATA, name, 0, NULL, &real_name)) != 0)
 		goto err;
 
 	/*
-	 * If force is set, remove the temporary file.  Ignore errors because
-	 * the backup file might not exist.
+	 * If this is a file and force is set, remove the temporary file, which
+	 * may have been left around.  Ignore errors because the temporary file
+	 * might not exist.
 	 */
-	if (LF_ISSET(DB_FORCE) &&
+	if (!F_ISSET(dbp, DB_AM_INMEM) && LF_ISSET(DB_FORCE) &&
 	    (ret = __db_backup_name(dbenv, real_name, NULL, &tmpname)) == 0)
 		(void)__os_unlink(dbenv, tmpname);
 
@@ -242,10 +283,12 @@ __db_remove_int(dbp, txn, name, subdb, flags)
 	    (ret = dbp->db_am_remove(dbp, NULL, name, subdb)) != 0)
 		goto err;
 
-	ret = __fop_remove(dbenv, NULL, dbp->fileid, name, DB_APP_DATA,
+	ret = F_ISSET(dbp, DB_AM_INMEM) ?
+	    __db_inmem_remove(dbp, NULL, real_name) :
+	    __fop_remove(dbenv, NULL, dbp->fileid, name, DB_APP_DATA,
 	    F_ISSET(dbp, DB_AM_NOT_DURABLE) ? DB_LOG_NOT_DURABLE : 0);
 
-err:	if (real_name != NULL)
+err:	if (!F_ISSET(dbp, DB_AM_INMEM) && real_name != NULL)
 		__os_free(dbenv, real_name);
 	if (tmpname != NULL)
 		__os_free(dbenv, tmpname);
@@ -253,6 +296,78 @@ err:	if (real_name != NULL)
 	return (ret);
 }
 
+/*
+ * __db_inmem_remove --
+ *	Removal of a named in-memory database.
+ * PUBLIC: int __db_inmem_remove __P((DB *, DB_TXN *, const char *));
+ */
+int
+__db_inmem_remove(dbp, txn, name)
+	DB *dbp;
+	DB_TXN *txn;
+	const char *name;
+{
+	DB_ENV *dbenv;
+	DB_LSN lsn;
+	DBT fid_dbt, name_dbt;
+	u_int32_t locker;
+	int ret;
+
+	dbenv = dbp->dbenv;
+	locker = DB_LOCK_INVALIDID;
+
+	DB_ASSERT(name != NULL);
+
+	/* This had better exist if we are trying to do a remove. */
+	(void)__memp_set_flags(dbp->mpf, DB_MPOOL_NOFILE, 1);
+	if ((ret = __memp_fopen(dbp->mpf, NULL, name, 0, 0, 0)) != 0)
+		return (ret);
+	if ((ret = __memp_get_fileid(dbp->mpf, dbp->fileid)) != 0)
+		goto err;
+	dbp->preserve_fid = 1;
+
+	if (LOCKING_ON(dbenv)) {
+		if (dbp->lid == DB_LOCK_INVALIDID &&
+		    (ret = __lock_id(dbenv, &dbp->lid, NULL)) != 0)
+			goto err;
+		locker = txn == NULL ? dbp->lid : txn->txnid;
+	}
+
+	/*
+	 * In a transactional environment, we'll play the same game
+	 * that we play for databases in the file system -- create a
+	 * temporary database and put it in with the current name
+	 * and then rename this one to another name.  We'll then use
+	 * a commit-time event to remove the entry.
+	 */
+
+	if ((ret = __fop_lock_handle(dbenv,
+	    dbp, locker, DB_LOCK_WRITE, NULL, 0)) != 0)
+		goto err;
+
+	if (LOGGING_ON(dbenv)) {
+		memset(&fid_dbt, 0, sizeof(fid_dbt));
+		fid_dbt.data = dbp->fileid;
+		fid_dbt.size = DB_FILE_ID_LEN;
+		memset(&name_dbt, 0, sizeof(name_dbt));
+		name_dbt.data = (void *)name;
+		name_dbt.size = (u_int32_t)strlen(name) + 1;
+
+		if (txn != NULL && (ret =
+		    __txn_remevent(dbenv, txn, name, dbp->fileid, 1)) != 0)
+			goto err;
+
+		if ((ret = __crdel_inmem_remove_log(dbenv,
+		    txn, &lsn, 0, &name_dbt, &fid_dbt)) != 0)
+			goto err;
+	}
+
+	if (txn == NULL)
+		ret = __memp_nameop(dbenv, dbp->fileid, NULL, name, NULL, 1);
+
+err:	return (ret);
+}
+
 /*
  * __db_subdb_remove --
  *	Remove a subdatabase.
@@ -323,10 +438,10 @@ err:
 }
 
 static int
-__db_dbtxn_remove(dbp, txn, name)
+__db_dbtxn_remove(dbp, txn, name, subdb)
 	DB *dbp;
 	DB_TXN *txn;
-	const char *name;
+	const char *name, *subdb;
 {
 	DB_ENV *dbenv;
 	int ret;
@@ -336,27 +451,32 @@ __db_dbtxn_remove(dbp, txn, name)
 	tmpname = NULL;
 
 	/*
-	 * This is a transactional rename, so we have to keep the name
+	 * This is a transactional remove, so we have to keep the name
 	 * of the file locked until the transaction commits.  As a result,
 	 * we implement remove by renaming the file to some other name
 	 * (which creates a dummy named file as a placeholder for the
 	 * file being rename/dremoved) and then deleting that file as
 	 * a delayed remove at commit.
 	 */
-	if ((ret = __db_backup_name(dbenv, name, txn, &tmpname)) != 0)
+	if ((ret = __db_backup_name(dbenv,
+	    F_ISSET(dbp, DB_AM_INMEM) ? subdb : name, txn, &tmpname)) != 0)
 		return (ret);
 
 	DB_TEST_RECOVERY(dbp, DB_TEST_PREDESTROY, ret, name);
 
-	if ((ret = __db_rename_int(dbp, txn, name, NULL, tmpname)) != 0)
+	if ((ret = __db_rename_int(dbp, txn, name, subdb, tmpname)) != 0)
 		goto err;
 
-	/* The internal removes will also translate into delayed removes. */
+	/*
+	 * The internal removes will also translate into delayed removes.
+	 */
 	if (dbp->db_am_remove != NULL &&
 	    (ret = dbp->db_am_remove(dbp, txn, tmpname, NULL)) != 0)
 		goto err;
 
-	ret = __fop_remove(dbenv, txn, dbp->fileid, tmpname, DB_APP_DATA,
+	ret = F_ISSET(dbp, DB_AM_INMEM) ?
+	     __db_inmem_remove(dbp, txn, tmpname) :
+	    __fop_remove(dbenv, txn, dbp->fileid, tmpname, DB_APP_DATA,
 	    F_ISSET(dbp, DB_AM_NOT_DURABLE) ? DB_LOG_NOT_DURABLE : 0);
 
 	DB_TEST_RECOVERY(dbp, DB_TEST_POSTDESTROY, ret, name);
diff --git a/storage/bdb/db/db_rename.c b/storage/bdb/db/db_rename.c
index 12f1f22760a..827d772751d 100644
--- a/storage/bdb/db/db_rename.c
+++ b/storage/bdb/db/db_rename.c
@@ -1,10 +1,10 @@
 /*-
  * See the file LICENSE for redistribution information.
  *
- * Copyright (c) 2001-2004
+ * Copyright (c) 2001-2005
  *	Sleepycat Software.  All rights reserved.
  *
- * $Id: db_rename.c,v 11.216 2004/09/16 17:55:17 margo Exp $
+ * $Id: db_rename.c,v 12.11 2005/10/07 20:21:22 ubell Exp $
  */
 
 #include "db_config.h"
@@ -23,74 +23,66 @@
 #include "dbinc/log.h"
 #include "dbinc/mp.h"
 
-static int __dbenv_dbrename __P((DB_ENV *,
-	       DB_TXN *, const char *, const char *, const char *, int));
 static int __db_subdb_rename __P((DB *,
 	       DB_TXN *, const char *, const char *, const char *));
 
 /*
- * __dbenv_dbrename_pp
+ * __env_dbrename_pp
  *	DB_ENV->dbrename pre/post processing.
  *
- * PUBLIC: int __dbenv_dbrename_pp __P((DB_ENV *, DB_TXN *,
+ * PUBLIC: int __env_dbrename_pp __P((DB_ENV *, DB_TXN *,
  * PUBLIC:     const char *, const char *, const char *, u_int32_t));
  */
 int
-__dbenv_dbrename_pp(dbenv, txn, name, subdb, newname, flags)
+__env_dbrename_pp(dbenv, txn, name, subdb, newname, flags)
 	DB_ENV *dbenv;
 	DB_TXN *txn;
 	const char *name, *subdb, *newname;
 	u_int32_t flags;
 {
-	int ret, txn_local;
+	DB *dbp;
+	DB_THREAD_INFO *ip;
+	int handle_check, ret, t_ret, txn_local;
+
+	dbp = NULL;
+	txn_local = 0;
 
 	PANIC_CHECK(dbenv);
 	ENV_ILLEGAL_BEFORE_OPEN(dbenv, "DB_ENV->dbrename");
 
-	/* Validate arguments. */
+	/*
+	 * The actual argument checking is simple, do it inline, outside of
+	 * the replication block.
+	 */
 	if ((ret = __db_fchk(dbenv, "DB->rename", flags, DB_AUTO_COMMIT)) != 0)
 		return (ret);
 
+	ENV_ENTER(dbenv, ip);
+
+	/* Check for replication block. */
+	handle_check = IS_ENV_REPLICATED(dbenv);
+	if (handle_check && (ret = __env_rep_enter(dbenv, 1)) != 0) {
+		handle_check = 0;
+		goto err;
+	}
+
 	/*
 	 * Create local transaction as necessary, check for consistent
 	 * transaction usage.
 	 */
-	if (IS_AUTO_COMMIT(dbenv, txn, flags)) {
+	if (IS_ENV_AUTO_COMMIT(dbenv, txn, flags)) {
 		if ((ret = __db_txn_auto_init(dbenv, &txn)) != 0)
-			return (ret);
+			goto err;
 		txn_local = 1;
-	} else {
-		if (txn != NULL && !TXN_ON(dbenv))
-			return (__db_not_txn_env(dbenv));
-		txn_local = 0;
-	}
+	} else
+		if (txn != NULL && !TXN_ON(dbenv)) {
+			ret = __db_not_txn_env(dbenv);
+			goto err;
+		}
 
-	ret = __dbenv_dbrename(dbenv, txn, name, subdb, newname, txn_local);
-
-	return (txn_local ? __db_txn_auto_resolve(dbenv, txn, 0, ret) : ret);
-}
-
-/*
- * __dbenv_dbrename
- *	DB_ENV->dbrename.
- */
-static int
-__dbenv_dbrename(dbenv, txn, name, subdb, newname, txn_local)
-	DB_ENV *dbenv;
-	DB_TXN *txn;
-	const char *name, *subdb, *newname;
-	int txn_local;
-{
-	DB *dbp;
-	int handle_check, ret, t_ret;
+	LF_CLR(DB_AUTO_COMMIT);
 
 	if ((ret = db_create(&dbp, dbenv, 0)) != 0)
-		return (ret);
-	if (txn != NULL)
-		F_SET(dbp, DB_AM_TXN);
-
-	handle_check = IS_REPLICATED(dbenv, dbp);
-	if (handle_check && (ret = __db_rep_enter(dbp, 1, 1, txn != NULL)) != 0)
 		goto err;
 
 	ret = __db_rename_int(dbp, txn, name, subdb, newname);
@@ -114,13 +106,27 @@ __dbenv_dbrename(dbenv, txn, name, subdb, newname, txn_local)
 		 dbp->lid = DB_LOCK_INVALIDID;
 	}
 
-	if (handle_check)
-		__env_db_rep_exit(dbenv);
-
-err:
-	if ((t_ret = __db_close(dbp, txn, DB_NOSYNC)) != 0 && ret == 0)
+err:	if (txn_local && (t_ret =
+	    __db_txn_auto_resolve(dbenv, txn, 0, ret)) != 0 && ret == 0)
 		ret = t_ret;
 
+	/*
+	 * We never opened this dbp for real, so don't include a transaction
+	 * handle, and use NOSYNC to avoid calling into mpool.
+	 *
+	 * !!!
+	 * Note we're reversing the order of operations: we started the txn and
+	 * then opened the DB handle; we're resolving the txn and then closing
+	 * closing the DB handle -- it's safer.
+	 */
+	if (dbp != NULL &&
+	    (t_ret = __db_close(dbp, NULL, DB_NOSYNC)) != 0 && ret == 0)
+		ret = t_ret;
+
+	if (handle_check && (t_ret = __env_db_rep_exit(dbenv)) != 0 && ret == 0)
+		ret = t_ret;
+
+	ENV_LEAVE(dbenv, ip);
 	return (ret);
 }
 
@@ -138,7 +144,8 @@ __db_rename_pp(dbp, name, subdb, newname, flags)
 	u_int32_t flags;
 {
 	DB_ENV *dbenv;
-	int handle_check, ret;
+	DB_THREAD_INFO *ip;
+	int handle_check, ret, t_ret;
 
 	dbenv = dbp->dbenv;
 	handle_check = 0;
@@ -155,20 +162,20 @@ __db_rename_pp(dbp, name, subdb, newname, flags)
 	 * a database -- we'll destroy the handle, and the application won't
 	 * ever be able to close the database.
 	 */
-	if (F_ISSET(dbp, DB_AM_OPEN_CALLED)) {
-		ret = __db_mi_open(dbenv, "DB->rename", 1);
-		goto err;
-	}
+	if (F_ISSET(dbp, DB_AM_OPEN_CALLED))
+		return (__db_mi_open(dbenv, "DB->rename", 1));
 
 	/* Validate arguments. */
 	if ((ret = __db_fchk(dbenv, "DB->rename", flags, 0)) != 0)
-		goto err;
+		return (ret);
 
 	/* Check for consistent transaction usage. */
 	if ((ret = __db_check_txn(dbp, NULL, DB_LOCK_INVALIDID, 0)) != 0)
-		goto err;
+		return (ret);
 
-	handle_check = IS_REPLICATED(dbenv, dbp);
+	ENV_ENTER(dbenv, ip);
+
+	handle_check = IS_ENV_REPLICATED(dbenv);
 	if (handle_check && (ret = __db_rep_enter(dbp, 1, 1, 0)) != 0) {
 		handle_check = 0;
 		goto err;
@@ -177,9 +184,9 @@ __db_rename_pp(dbp, name, subdb, newname, flags)
 	/* Rename the file. */
 	ret = __db_rename(dbp, NULL, name, subdb, newname);
 
-err:	if (handle_check)
-		__env_db_rep_exit(dbenv);
-
+	if (handle_check && (t_ret = __env_db_rep_exit(dbenv)) != 0 && ret == 0)
+		ret = t_ret;
+err:	ENV_LEAVE(dbenv, ip);
 	return (ret);
 }
 
@@ -222,26 +229,40 @@ __db_rename_int(dbp, txn, name, subdb, newname)
 {
 	DB_ENV *dbenv;
 	int ret;
-	char *real_name;
+	char *old, *real_name;
 
 	dbenv = dbp->dbenv;
 	real_name = NULL;
 
 	DB_TEST_RECOVERY(dbp, DB_TEST_PREDESTROY, ret, name);
 
-	if (subdb != NULL) {
+	if (name == NULL && subdb == NULL) {
+		__db_err(dbenv, "Rename on temporary files invalid");
+		ret = EINVAL;
+		goto err;
+	}
+
+	if (name == NULL)
+		MAKE_INMEM(dbp);
+	else if (subdb != NULL) {
 		ret = __db_subdb_rename(dbp, txn, name, subdb, newname);
 		goto err;
 	}
 
 	/*
-	 * From here on down, this pertains to files.
+	 * From here on down, this pertains to files or in-memory databases.
 	 *
 	 * Find the real name of the file.
 	 */
-	if ((ret = __db_appname(dbenv,
-	    DB_APP_DATA, name, 0, NULL, &real_name)) != 0)
-		goto err;
+	if (F_ISSET(dbp, DB_AM_INMEM)) {
+		old = (char *)subdb;
+		real_name = (char *)subdb;
+	} else {
+		if ((ret = __db_appname(dbenv,
+		    DB_APP_DATA, name, 0, NULL, &real_name)) != 0)
+			goto err;
+		old = (char *)name;
+	}
 
 	if ((ret = __fop_remove_setup(dbp, txn, real_name, 0)) != 0)
 		goto err;
@@ -259,10 +280,10 @@ __db_rename_int(dbp, txn, name, subdb, newname)
 	 * taken care of in the fop layer.
 	 */
 	if (txn != NULL) {
-		if ((ret = __fop_dummy(dbp, txn, name, newname, 0)) != 0)
+		if ((ret = __fop_dummy(dbp, txn, old, newname, 0)) != 0)
 			goto err;
 	} else {
-		if ((ret = __fop_dbrename(dbp, name, newname)) != 0)
+		if ((ret = __fop_dbrename(dbp, old, newname)) != 0)
 			goto err;
 	}
 
@@ -276,7 +297,7 @@ __db_rename_int(dbp, txn, name, subdb, newname)
 	DB_TEST_RECOVERY(dbp, DB_TEST_POSTDESTROY, ret, newname);
 
 DB_TEST_RECOVERY_LABEL
-err:	if (real_name != NULL)
+err:	if (!F_ISSET(dbp, DB_AM_INMEM) && real_name != NULL)
 		__os_free(dbenv, real_name);
 
 	return (ret);
diff --git a/storage/bdb/db/db_ret.c b/storage/bdb/db/db_ret.c
index 99421eccc18..39446ea443c 100644
--- a/storage/bdb/db/db_ret.c
+++ b/storage/bdb/db/db_ret.c
@@ -1,10 +1,10 @@
 /*-
  * See the file LICENSE for redistribution information.
  *
- * Copyright (c) 1996-2004
+ * Copyright (c) 1996-2005
  *	Sleepycat Software.  All rights reserved.
  *
- * $Id: db_ret.c,v 11.26 2004/02/05 02:25:13 mjc Exp $
+ * $Id: db_ret.c,v 12.1 2005/06/16 20:21:14 bostic Exp $
  */
 
 #include "db_config.h"
diff --git a/storage/bdb/db/db_setid.c b/storage/bdb/db/db_setid.c
index fffba1c6dcb..4ba3ae9b4d2 100644
--- a/storage/bdb/db/db_setid.c
+++ b/storage/bdb/db/db_setid.c
@@ -1,10 +1,10 @@
 /*-
  * See the file LICENSE for redistribution information.
  *
- * Copyright (c) 2000-2004
+ * Copyright (c) 2000-2005
  *	Sleepycat Software.  All rights reserved.
  *
- * $Id: db_setid.c,v 1.6 2004/09/24 13:41:08 bostic Exp $
+ * $Id: db_setid.c,v 12.8 2005/10/18 14:17:08 mjc Exp $
  */
 
 #include "db_config.h"
@@ -17,20 +17,64 @@
 
 #include "db_int.h"
 #include "dbinc/db_page.h"
+#include "dbinc/db_shash.h"
 #include "dbinc/db_swap.h"
 #include "dbinc/db_am.h"
+#include "dbinc/mp.h"
+
+static int __env_fileid_reset __P((DB_ENV *, const char *, int));
 
 /*
- * __db_fileid_reset --
- *	Reset the file IDs for every database in the file.
+ * __env_fileid_reset_pp --
+ *	DB_ENV->fileid_reset pre/post processing.
  *
- * PUBLIC: int __db_fileid_reset __P((DB_ENV *, char *, int));
+ * PUBLIC: int __env_fileid_reset_pp __P((DB_ENV *, const char *, u_int32_t));
  */
 int
-__db_fileid_reset(dbenv, name, passwd)
+__env_fileid_reset_pp(dbenv, name, flags)
 	DB_ENV *dbenv;
-	char *name;
-	int passwd;
+	const char *name;
+	u_int32_t flags;
+{
+	DB_THREAD_INFO *ip;
+	int handle_check, ret, t_ret;
+
+	PANIC_CHECK(dbenv);
+	ENV_ILLEGAL_BEFORE_OPEN(dbenv, "DB_ENV->fileid_reset");
+
+	/*
+	 * !!!
+	 * The actual argument checking is simple, do it inline, outside of
+	 * the replication block.
+	 */
+	if (flags != 0 && flags != DB_ENCRYPT)
+		return (__db_ferr(dbenv, "DB_ENV->fileid_reset", 0));
+
+	ENV_ENTER(dbenv, ip);
+
+	/* Check for replication block. */
+	handle_check = IS_ENV_REPLICATED(dbenv);
+	if (handle_check && (ret = __env_rep_enter(dbenv, 1)) != 0)
+		goto err;
+
+	ret = __env_fileid_reset(dbenv, name, LF_ISSET(DB_ENCRYPT) ? 1 : 0);
+
+	if (handle_check && (t_ret = __env_db_rep_exit(dbenv)) != 0 && ret == 0)
+		ret = t_ret;
+
+err:	ENV_LEAVE(dbenv, ip);
+	return (ret);
+}
+
+/*
+ * __env_fileid_reset --
+ *	Reset the file IDs for every database in the file.
+ */
+static int
+__env_fileid_reset(dbenv, name, encrypted)
+	DB_ENV *dbenv;
+	const char *name;
+	int encrypted;
 {
 	DB *dbp;
 	DBC *dbcp;
@@ -47,27 +91,21 @@ __db_fileid_reset(dbenv, name, passwd)
 	real_name = NULL;
 
 	/* Get the real backing file name. */
-	if ((ret = __db_appname(dbenv,
-	    DB_APP_DATA, name, 0, NULL, &real_name)) != 0)
+	if ((ret =
+	    __db_appname(dbenv, DB_APP_DATA, name, 0, NULL, &real_name)) != 0)
 		return (ret);
 
 	/* Get a new file ID. */
-	if ((ret = __os_fileid(dbenv, real_name, 1, fileid)) != 0) {
-		dbenv->err(dbenv, ret, "unable to get new file ID");
+	if ((ret = __os_fileid(dbenv, real_name, 1, fileid)) != 0)
 		goto err;
-	}
 
 	/* Create the DB object. */
-	if ((ret = db_create(&dbp, dbenv, 0)) != 0) {
-		dbenv->err(dbenv, ret, "db_create");
+	if ((ret = db_create(&dbp, dbenv, 0)) != 0)
 		goto err;
-	}
 
 	/* If configured with a password, the databases are encrypted. */
-	if (passwd && (ret = dbp->set_flags(dbp, DB_ENCRYPT)) != 0) {
-		dbp->err(dbp, ret, "DB->set_flags: DB_ENCRYPT");
+	if (encrypted && (ret = __db_set_flags(dbp, DB_ENCRYPT)) != 0)
 		goto err;
-	}
 
 	/*
 	 * Open the DB file.
@@ -76,26 +114,18 @@ __db_fileid_reset(dbenv, name, passwd)
 	 * Note DB_RDWRMASTER flag, we need to open the master database file
 	 * for writing in this case.
 	 */
-	if ((ret = dbp->open(dbp,
-	    NULL, name, NULL, DB_UNKNOWN, DB_RDWRMASTER, 0)) != 0) {
-		dbp->err(dbp, ret, "DB->open: %s", name);
+	if ((ret = __db_open(dbp, NULL,
+	    name, NULL, DB_UNKNOWN, DB_RDWRMASTER, 0, PGNO_BASE_MD)) != 0)
 		goto err;
-	}
 
 	mpf = dbp->mpf;
 
 	pgno = PGNO_BASE_MD;
-	if ((ret = mpf->get(mpf, &pgno, 0, &pagep)) != 0) {
-		dbp->err(dbp, ret,
-		    "%s: DB_MPOOLFILE->get: %lu", name, (u_long)pgno);
+	if ((ret = __memp_fget(mpf, &pgno, 0, &pagep)) != 0)
 		goto err;
-	}
 	memcpy(((DBMETA *)pagep)->uid, fileid, DB_FILE_ID_LEN);
-	if ((ret = mpf->put(mpf, pagep, DB_MPOOL_DIRTY)) != 0) {
-		dbp->err(dbp, ret,
-		    "%s: DB_MPOOLFILE->put: %lu", name, (u_long)pgno);
+	if ((ret = __memp_fput(mpf, pagep, DB_MPOOL_DIRTY)) != 0)
 		goto err;
-	}
 
 	/*
 	 * If the database file doesn't support subdatabases, we only have
@@ -108,11 +138,9 @@ __db_fileid_reset(dbenv, name, passwd)
 
 	memset(&key, 0, sizeof(key));
 	memset(&data, 0, sizeof(data));
-	if ((ret = dbp->cursor(dbp, NULL, &dbcp, 0)) != 0) {
-		dbp->err(dbp, ret, "DB->cursor");
+	if ((ret = __db_cursor(dbp, NULL, &dbcp, 0)) != 0)
 		goto err;
-	}
-	while ((ret = dbcp->c_get(dbcp, &key, &data, DB_NEXT)) == 0) {
+	while ((ret = __db_c_get(dbcp, &key, &data, DB_NEXT)) == 0) {
 		/*
 		 * XXX
 		 * We're handling actual data, not on-page meta-data, so it
@@ -121,33 +149,19 @@ __db_fileid_reset(dbenv, name, passwd)
 		 */
 		memcpy(&pgno, data.data, sizeof(db_pgno_t));
 		DB_NTOHL(&pgno);
-		if ((ret = mpf->get(mpf, &pgno, 0, &pagep)) != 0) {
-			dbp->err(dbp, ret,
-			    "%s: DB_MPOOLFILE->get: %lu", name, (u_long)pgno);
+		if ((ret = __memp_fget(mpf, &pgno, 0, &pagep)) != 0)
 			goto err;
-		}
 		memcpy(((DBMETA *)pagep)->uid, fileid, DB_FILE_ID_LEN);
-		if ((ret = mpf->put(mpf, pagep, DB_MPOOL_DIRTY)) != 0) {
-			dbp->err(dbp, ret,
-			    "%s: DB_MPOOLFILE->put: %lu", name, (u_long)pgno);
+		if ((ret = __memp_fput(mpf, pagep, DB_MPOOL_DIRTY)) != 0)
 			goto err;
-		}
 	}
 	if (ret == DB_NOTFOUND)
 		ret = 0;
-	else
-		dbp->err(dbp, ret, "DBcursor->get");
 
-err:	if (dbcp != NULL && (t_ret = dbcp->c_close(dbcp)) != 0) {
-		dbp->err(dbp, ret, "DBcursor->close");
-		if (ret == 0)
-			ret = t_ret;
-	}
-	if (dbp != NULL && (t_ret = dbp->close(dbp, 0)) != 0) {
-		dbenv->err(dbenv, ret, "DB->close");
-		if (ret == 0)
-			ret = t_ret;
-	}
+err:	if (dbcp != NULL && (t_ret = __db_c_close(dbcp)) != 0 && ret == 0)
+		ret = t_ret;
+	if (dbp != NULL && (t_ret = __db_close(dbp, NULL, 0)) != 0 && ret == 0)
+		ret = t_ret;
 	if (real_name != NULL)
 		__os_free(dbenv, real_name);
 
diff --git a/storage/bdb/db/db_setlsn.c b/storage/bdb/db/db_setlsn.c
index 5865798e508..ef07fc49925 100644
--- a/storage/bdb/db/db_setlsn.c
+++ b/storage/bdb/db/db_setlsn.c
@@ -1,35 +1,77 @@
 /*-
  * See the file LICENSE for redistribution information.
  *
- * Copyright (c) 2000-2004
+ * Copyright (c) 2000-2005
  *	Sleepycat Software.  All rights reserved.
  *
- * $Id: db_setlsn.c,v 1.2 2004/04/27 16:10:13 bostic Exp $
+ * $Id: db_setlsn.c,v 12.8 2005/10/21 19:17:40 bostic Exp $
  */
 
 #include "db_config.h"
 
 #ifndef NO_SYSTEM_INCLUDES
 #include 
-
-#include 
 #endif
 
 #include "db_int.h"
 #include "dbinc/db_page.h"
+#include "dbinc/db_shash.h"
 #include "dbinc/db_am.h"
+#include "dbinc/mp.h"
+
+static int __env_lsn_reset __P((DB_ENV *, const char *, int));
 
 /*
- * __db_lsn_reset --
- *	Reset the LSNs for every page in the file.
+ * __env_lsn_reset_pp --
+ *	DB_ENV->lsn_reset pre/post processing.
  *
- * PUBLIC: int __db_lsn_reset __P((DB_ENV *, char *, int));
+ * PUBLIC: int __env_lsn_reset_pp __P((DB_ENV *, const char *, u_int32_t));
  */
 int
-__db_lsn_reset(dbenv, name, passwd)
+__env_lsn_reset_pp(dbenv, name, flags)
 	DB_ENV *dbenv;
-	char *name;
-	int passwd;
+	const char *name;
+	u_int32_t flags;
+{
+	DB_THREAD_INFO *ip;
+	int handle_check, ret, t_ret;
+
+	PANIC_CHECK(dbenv);
+	ENV_ILLEGAL_BEFORE_OPEN(dbenv, "DB_ENV->lsn_reset");
+
+	/*
+	 * !!!
+	 * The actual argument checking is simple, do it inline, outside of
+	 * the replication block.
+	 */
+	if (flags != 0 && flags != DB_ENCRYPT)
+		return (__db_ferr(dbenv, "DB_ENV->lsn_reset", 0));
+
+	ENV_ENTER(dbenv, ip);
+
+	/* Check for replication block. */
+	handle_check = IS_ENV_REPLICATED(dbenv);
+	if (handle_check && (ret = __env_rep_enter(dbenv, 1)) != 0)
+		goto err;
+
+	ret = __env_lsn_reset(dbenv, name, LF_ISSET(DB_ENCRYPT) ? 1 : 0);
+
+	if (handle_check && (t_ret = __env_db_rep_exit(dbenv)) != 0 && ret == 0)
+		ret = t_ret;
+
+err:	ENV_LEAVE(dbenv, ip);
+	return (ret);
+}
+
+/*
+ * __env_lsn_reset --
+ *	Reset the LSNs for every page in the file.
+ */
+static int
+__env_lsn_reset(dbenv, name, encrypted)
+	DB_ENV *dbenv;
+	const char *name;
+	int encrypted;
 {
 	DB *dbp;
 	DB_MPOOLFILE *mpf;
@@ -38,16 +80,12 @@ __db_lsn_reset(dbenv, name, passwd)
 	int t_ret, ret;
 
 	/* Create the DB object. */
-	if ((ret = db_create(&dbp, dbenv, 0)) != 0) {
-		dbenv->err(dbenv, ret, "db_create");
-		return (1);
-	}
+	if ((ret = db_create(&dbp, dbenv, 0)) != 0)
+		return (ret);
 
 	/* If configured with a password, the databases are encrypted. */
-	if (passwd && (ret = dbp->set_flags(dbp, DB_ENCRYPT)) != 0) {
-		dbp->err(dbp, ret, "DB->set_flags: DB_ENCRYPT");
+	if (encrypted && (ret = __db_set_flags(dbp, DB_ENCRYPT)) != 0)
 		goto err;
-	}
 
 	/*
 	 * Open the DB file.
@@ -56,28 +94,23 @@ __db_lsn_reset(dbenv, name, passwd)
 	 * Note DB_RDWRMASTER flag, we need to open the master database file
 	 * for writing in this case.
 	 */
-	if ((ret = dbp->open(dbp,
-	    NULL, name, NULL, DB_UNKNOWN, DB_RDWRMASTER, 0)) != 0) {
-		dbp->err(dbp, ret, "DB->open: %s", name);
+	if ((ret = __db_open(dbp, NULL,
+	    name, NULL, DB_UNKNOWN, DB_RDWRMASTER, 0, PGNO_BASE_MD)) != 0)
 		goto err;
-	}
 
 	/* Reset the LSN on every page of the database file. */
 	mpf = dbp->mpf;
-	for (pgno = 0; (ret = mpf->get(mpf, &pgno, 0, &pagep)) == 0; ++pgno) {
+	for (pgno = 0;
+	    (ret = __memp_fget(mpf, &pgno, 0, &pagep)) == 0; ++pgno) {
 		LSN_NOT_LOGGED(pagep->lsn);
-		if ((ret = mpf->put(mpf, pagep, DB_MPOOL_DIRTY)) != 0) {
-			dbp->err(dbp, ret, "DB_MPOOLFILE->put: %s", name);
+		if ((ret = __memp_fput(mpf, pagep, DB_MPOOL_DIRTY)) != 0)
 			goto err;
-		}
 	}
 
 	if (ret == DB_PAGE_NOTFOUND)
 		ret = 0;
-	else
-		dbp->err(dbp, ret, "DB_MPOOLFILE->get: %s", name);
 
-err:	if ((t_ret = dbp->close(dbp, 0)) != 0 && ret == 0)
+err:	if ((t_ret = __db_close(dbp, NULL, 0)) != 0 && ret == 0)
 		ret = t_ret;
-	return (ret == 0 ? 0 : 1);
+	return (ret);
 }
diff --git a/storage/bdb/db/db_stati.c b/storage/bdb/db/db_stati.c
index cd73b8ea477..fb0bf4bee4f 100644
--- a/storage/bdb/db/db_stati.c
+++ b/storage/bdb/db/db_stati.c
@@ -1,10 +1,10 @@
 /*-
  * See the file LICENSE for redistribution information.
  *
- * Copyright (c) 1996-2004
+ * Copyright (c) 1996-2005
  *	Sleepycat Software.  All rights reserved.
  *
- * $Id: db_stati.c,v 11.123 2004/07/19 16:40:51 bostic Exp $
+ * $Id: db_stati.c,v 12.10 2005/11/08 03:13:31 bostic Exp $
  */
 
 #include "db_config.h"
@@ -33,6 +33,7 @@
 #include "dbinc/btree.h"
 #include "dbinc/hash.h"
 #include "dbinc/qam.h"
+#include "dbinc/lock.h"
 #include "dbinc/log.h"
 #include "dbinc/mp.h"
 
@@ -57,7 +58,8 @@ __db_stat_pp(dbp, txn, spp, flags)
 	u_int32_t flags;
 {
 	DB_ENV *dbenv;
-	int handle_check, ret;
+	DB_THREAD_INFO *ip;
+	int handle_check, ret, t_ret;
 
 	dbenv = dbp->dbenv;
 
@@ -67,17 +69,22 @@ __db_stat_pp(dbp, txn, spp, flags)
 	if ((ret = __db_stat_arg(dbp, flags)) != 0)
 		return (ret);
 
+	ENV_ENTER(dbenv, ip);
+
 	/* Check for replication block. */
-	handle_check = IS_REPLICATED(dbenv, dbp);
-	if (handle_check && (ret = __db_rep_enter(dbp, 1, 0, 0)) != 0)
-		return (ret);
+	handle_check = IS_ENV_REPLICATED(dbenv);
+	if (handle_check && (ret = __db_rep_enter(dbp, 1, 0, 0)) != 0) {
+		handle_check = 0;
+		goto err;
+	}
 
 	ret = __db_stat(dbp, txn, spp, flags);
 
 	/* Release replication block. */
-	if (handle_check)
-		__env_db_rep_exit(dbenv);
+	if (handle_check && (t_ret = __env_db_rep_exit(dbenv)) != 0 && ret == 0)
+		ret = t_ret;
 
+err:	ENV_LEAVE(dbenv, ip);
 	return (ret);
 }
 
@@ -102,11 +109,11 @@ __db_stat(dbp, txn, spp, flags)
 
 	/* Acquire a cursor. */
 	if ((ret = __db_cursor(dbp, txn,
-	     &dbc, LF_ISSET(DB_DEGREE_2 | DB_DIRTY_READ))) != 0)
+	     &dbc, LF_ISSET(DB_READ_COMMITTED | DB_READ_UNCOMMITTED))) != 0)
 		return (ret);
 
 	DEBUG_LWRITE(dbc, NULL, "DB->stat", NULL, NULL, flags);
-	LF_CLR(DB_DEGREE_2 | DB_DIRTY_READ);
+	LF_CLR(DB_READ_COMMITTED | DB_READ_UNCOMMITTED);
 
 	switch (dbp->type) {
 	case DB_BTREE:
@@ -145,7 +152,7 @@ __db_stat_arg(dbp, flags)
 	dbenv = dbp->dbenv;
 
 	/* Check for invalid function flags. */
-	LF_CLR(DB_DEGREE_2 | DB_DIRTY_READ);
+	LF_CLR(DB_READ_COMMITTED | DB_READ_UNCOMMITTED);
 	switch (flags) {
 	case 0:
 	case DB_FAST_STAT:
@@ -176,7 +183,8 @@ __db_stat_print_pp(dbp, flags)
 	u_int32_t flags;
 {
 	DB_ENV *dbenv;
-	int handle_check, ret;
+	DB_THREAD_INFO *ip;
+	int handle_check, ret, t_ret;
 
 	dbenv = dbp->dbenv;
 
@@ -187,21 +195,26 @@ __db_stat_print_pp(dbp, flags)
 	 * !!!
 	 * The actual argument checking is simple, do it inline.
 	 */
-	if ((ret = __db_fchk(dbenv, "DB->stat_print",
-	    flags, DB_STAT_ALL | DB_STAT_CLEAR)) != 0)
+	if ((ret = __db_fchk(dbenv,
+	    "DB->stat_print", flags, DB_FAST_STAT | DB_STAT_ALL)) != 0)
 		return (ret);
 
+	ENV_ENTER(dbenv, ip);
+
 	/* Check for replication block. */
-	handle_check = IS_REPLICATED(dbenv, dbp);
-	if (handle_check && (ret = __db_rep_enter(dbp, 1, 0, 0)) != 0)
-		return (ret);
+	handle_check = IS_ENV_REPLICATED(dbenv);
+	if (handle_check && (ret = __db_rep_enter(dbp, 1, 0, 0)) != 0) {
+		handle_check = 0;
+		goto err;
+	}
 
 	ret = __db_stat_print(dbp, flags);
 
 	/* Release replication block. */
-	if (handle_check)
-		__env_db_rep_exit(dbenv);
+	if (handle_check && (t_ret = __env_db_rep_exit(dbenv)) != 0 && ret == 0)
+		ret = t_ret;
 
+err:	ENV_LEAVE(dbenv, ip);
 	return (ret);
 }
 
@@ -217,16 +230,17 @@ __db_stat_print(dbp, flags)
 	u_int32_t flags;
 {
 	int ret;
+	time_t now;
 
-	if (flags == 0 || LF_ISSET(DB_STAT_ALL)) {
-		ret = __db_print_stats(dbp, flags);
-		if (flags == 0 || ret != 0)
-			return (ret);
-	}
+	(void)time(&now);
+	__db_msg(dbp->dbenv, "%.24s\tLocal time", ctime(&now));
 
 	if (LF_ISSET(DB_STAT_ALL) && (ret = __db_print_all(dbp, flags)) != 0)
 		return (ret);
 
+	if ((ret = __db_print_stats(dbp, flags)) != 0)
+		return (ret);
+
 	return (0);
 }
 
@@ -284,38 +298,37 @@ __db_print_all(dbp, flags)
 	u_int32_t flags;
 {
 	static const FN fn[] = {
-		{ DB_AM_CHKSUM,		"DB_AM_CHKSUM" },
-		{ DB_AM_CL_WRITER,	"DB_AM_CL_WRITER" },
-		{ DB_AM_COMPENSATE,	"DB_AM_COMPENSATE" },
-		{ DB_AM_CREATED,	"DB_AM_CREATED" },
-		{ DB_AM_CREATED_MSTR,	"DB_AM_CREATED_MSTR" },
-		{ DB_AM_DBM_ERROR,	"DB_AM_DBM_ERROR" },
-		{ DB_AM_DELIMITER,	"DB_AM_DELIMITER" },
-		{ DB_AM_DIRTY,		"DB_AM_DIRTY" },
-		{ DB_AM_DISCARD,	"DB_AM_DISCARD" },
-		{ DB_AM_DUP,		"DB_AM_DUP" },
-		{ DB_AM_DUPSORT,	"DB_AM_DUPSORT" },
-		{ DB_AM_ENCRYPT,	"DB_AM_ENCRYPT" },
-		{ DB_AM_FIXEDLEN,	"DB_AM_FIXEDLEN" },
-		{ DB_AM_INMEM,		"DB_AM_INMEM" },
-		{ DB_AM_IN_RENAME,	"DB_AM_IN_RENAME" },
-		{ DB_AM_NOT_DURABLE,	"DB_AM_NOT_DURABLE" },
-		{ DB_AM_OPEN_CALLED,	"DB_AM_OPEN_CALLED" },
-		{ DB_AM_PAD,		"DB_AM_PAD" },
-		{ DB_AM_PGDEF,		"DB_AM_PGDEF" },
-		{ DB_AM_RDONLY,		"DB_AM_RDONLY" },
-		{ DB_AM_RECNUM,		"DB_AM_RECNUM" },
-		{ DB_AM_RECOVER,	"DB_AM_RECOVER" },
-		{ DB_AM_RENUMBER,	"DB_AM_RENUMBER" },
-		{ DB_AM_REPLICATION,	"DB_AM_REPLICATION" },
-		{ DB_AM_REVSPLITOFF,	"DB_AM_REVSPLITOFF" },
-		{ DB_AM_SECONDARY,	"DB_AM_SECONDARY" },
-		{ DB_AM_SNAPSHOT,	"DB_AM_SNAPSHOT" },
-		{ DB_AM_SUBDB,		"DB_AM_SUBDB" },
-		{ DB_AM_SWAP,		"DB_AM_SWAP" },
-		{ DB_AM_TXN,		"DB_AM_TXN" },
-		{ DB_AM_VERIFYING,	"DB_AM_VERIFYING" },
-		{ 0,			NULL }
+		{ DB_AM_CHKSUM,			"DB_AM_CHKSUM" },
+		{ DB_AM_CL_WRITER,		"DB_AM_CL_WRITER" },
+		{ DB_AM_COMPENSATE,		"DB_AM_COMPENSATE" },
+		{ DB_AM_CREATED,		"DB_AM_CREATED" },
+		{ DB_AM_CREATED_MSTR,		"DB_AM_CREATED_MSTR" },
+		{ DB_AM_DBM_ERROR,		"DB_AM_DBM_ERROR" },
+		{ DB_AM_DELIMITER,		"DB_AM_DELIMITER" },
+		{ DB_AM_DISCARD,		"DB_AM_DISCARD" },
+		{ DB_AM_DUP,			"DB_AM_DUP" },
+		{ DB_AM_DUPSORT,		"DB_AM_DUPSORT" },
+		{ DB_AM_ENCRYPT,		"DB_AM_ENCRYPT" },
+		{ DB_AM_FIXEDLEN,		"DB_AM_FIXEDLEN" },
+		{ DB_AM_INMEM,			"DB_AM_INMEM" },
+		{ DB_AM_IN_RENAME,		"DB_AM_IN_RENAME" },
+		{ DB_AM_NOT_DURABLE,		"DB_AM_NOT_DURABLE" },
+		{ DB_AM_OPEN_CALLED,		"DB_AM_OPEN_CALLED" },
+		{ DB_AM_PAD,			"DB_AM_PAD" },
+		{ DB_AM_PGDEF,			"DB_AM_PGDEF" },
+		{ DB_AM_RDONLY,			"DB_AM_RDONLY" },
+		{ DB_AM_READ_UNCOMMITTED,	"DB_AM_READ_UNCOMMITTED" },
+		{ DB_AM_RECNUM,			"DB_AM_RECNUM" },
+		{ DB_AM_RECOVER,		"DB_AM_RECOVER" },
+		{ DB_AM_RENUMBER,		"DB_AM_RENUMBER" },
+		{ DB_AM_REVSPLITOFF,		"DB_AM_REVSPLITOFF" },
+		{ DB_AM_SECONDARY,		"DB_AM_SECONDARY" },
+		{ DB_AM_SNAPSHOT,		"DB_AM_SNAPSHOT" },
+		{ DB_AM_SUBDB,			"DB_AM_SUBDB" },
+		{ DB_AM_SWAP,			"DB_AM_SWAP" },
+		{ DB_AM_TXN,			"DB_AM_TXN" },
+		{ DB_AM_VERIFYING,		"DB_AM_VERIFYING" },
+		{ 0,				NULL }
 	};
 	DB_ENV *dbenv;
 
@@ -331,7 +344,7 @@ __db_print_all(dbp, flags)
 	STAT_ISSET("DbEnv", dbp->dbenv);
 	STAT_STRING("Type", __db_dbtype_to_string(dbp->type));
 
-	__db_print_mutex(dbenv, NULL, dbp->mutexp, "Thread mutex", flags);
+	__mutex_print_debug_single(dbenv, "Thread mutex", dbp->mutex, flags);
 
 	STAT_STRING("File", dbp->fname);
 	STAT_STRING("Database", dbp->dname);
@@ -389,7 +402,7 @@ __db_print_cursor(dbp)
 	__db_msg(dbenv, "DB handle cursors:");
 
 	ret = 0;
-	MUTEX_THREAD_LOCK(dbp->dbenv, dbp->mutexp);
+	MUTEX_LOCK(dbp->dbenv, dbp->mutex);
 	__db_msg(dbenv, "Active queue:");
 	for (dbc = TAILQ_FIRST(&dbp->active_queue);
 	    dbc != NULL; dbc = TAILQ_NEXT(dbc, links))
@@ -405,7 +418,7 @@ __db_print_cursor(dbp)
 	    dbc != NULL; dbc = TAILQ_NEXT(dbc, links))
 		if ((t_ret = __db_print_citem(dbc)) != 0 && ret == 0)
 			ret = t_ret;
-	MUTEX_THREAD_UNLOCK(dbp->dbenv, dbp->mutexp);
+	MUTEX_UNLOCK(dbp->dbenv, dbp->mutex);
 
 	return (ret);
 }
@@ -417,17 +430,17 @@ int __db_print_citem(dbc)
 	static const FN fn[] = {
 		{ DBC_ACTIVE,		"DBC_ACTIVE" },
 		{ DBC_COMPENSATE,	"DBC_COMPENSATE" },
-		{ DBC_DEGREE_2,		"DBC_DEGREE_2" },
-		{ DBC_DIRTY_READ,	"DBC_DIRTY_READ" },
+		{ DBC_MULTIPLE,		"DBC_MULTIPLE" },
+		{ DBC_MULTIPLE_KEY,	"DBC_MULTIPLE_KEY" },
 		{ DBC_OPD,		"DBC_OPD" },
+		{ DBC_OWN_LID,		"DBC_OWN_LID" },
+		{ DBC_READ_COMMITTED,	"DBC_READ_COMMITTED" },
+		{ DBC_READ_UNCOMMITTED,	"DBC_READ_UNCOMMITTED" },
 		{ DBC_RECOVER,		"DBC_RECOVER" },
 		{ DBC_RMW,		"DBC_RMW" },
 		{ DBC_TRANSIENT,	"DBC_TRANSIENT" },
 		{ DBC_WRITECURSOR,	"DBC_WRITECURSOR" },
 		{ DBC_WRITER,		"DBC_WRITER" },
-		{ DBC_MULTIPLE,		"DBC_MULTIPLE" },
-		{ DBC_MULTIPLE_KEY,	"DBC_MULTIPLE_KEY" },
-		{ DBC_OWN_LID,		"DBC_OWN_LID" },
 		{ 0,			NULL }
 	};
 	DB *dbp;
@@ -438,16 +451,17 @@ int __db_print_citem(dbc)
 	dbenv = dbp->dbenv;
 	cp = dbc->internal;
 
-	STAT_HEX("DBC", dbc);
-	STAT_HEX("Associated dbp", dbc->dbp);
-	STAT_HEX("Associated txn", dbc->txn);
-	STAT_HEX("Internal", cp);
-	STAT_HEX("Default locker ID", dbc->lid);
+	STAT_POINTER("DBC", dbc);
+	STAT_POINTER("Associated dbp", dbc->dbp);
+	STAT_POINTER("Associated txn", dbc->txn);
+	STAT_POINTER("Internal", cp);
+	STAT_HEX("Default locker ID",
+	    dbc->lref == NULL ? 0 : ((DB_LOCKER *)dbc->lref)->id);
 	STAT_HEX("Locker", dbc->locker);
 	STAT_STRING("Type", __db_dbtype_to_string(dbc->dbtype));
 
-	STAT_HEX("Off-page duplicate cursor", cp->opd);
-	STAT_HEX("Referenced page", cp->page);
+	STAT_POINTER("Off-page duplicate cursor", cp->opd);
+	STAT_POINTER("Referenced page", cp->page);
 	STAT_ULONG("Root", cp->root);
 	STAT_ULONG("Page number", cp->pgno);
 	STAT_ULONG("Page index", cp->indx);
diff --git a/storage/bdb/db/db_truncate.c b/storage/bdb/db/db_truncate.c
index 801f3712ffb..c6b740969fb 100644
--- a/storage/bdb/db/db_truncate.c
+++ b/storage/bdb/db/db_truncate.c
@@ -1,10 +1,10 @@
 /*-
  * See the file LICENSE for redistribution information.
  *
- * Copyright (c) 2001-2004
+ * Copyright (c) 2001-2005
  *	Sleepycat Software.  All rights reserved.
  *
- * $Id: db_truncate.c,v 11.201 2004/07/15 15:52:51 sue Exp $
+ * $Id: db_truncate.c,v 12.10 2005/10/21 19:22:59 bostic Exp $
  */
 
 #include "db_config.h"
@@ -17,10 +17,12 @@
 
 #include "db_int.h"
 #include "dbinc/db_page.h"
-#include "dbinc/log.h"
+#include "dbinc/db_shash.h"
 #include "dbinc/btree.h"
 #include "dbinc/hash.h"
 #include "dbinc/qam.h"
+#include "dbinc/lock.h"
+#include "dbinc/log.h"
 #include "dbinc/txn.h"
 
 static int __db_cursor_check __P((DB *));
@@ -38,22 +40,27 @@ __db_truncate_pp(dbp, txn, countp, flags)
 	u_int32_t *countp, flags;
 {
 	DB_ENV *dbenv;
-	int handle_check, ret, txn_local;
+	DB_THREAD_INFO *ip;
+	int handle_check, ret, t_ret, txn_local;
 
 	dbenv = dbp->dbenv;
+	txn_local = 0;
+	handle_check = 0;
 
 	PANIC_CHECK(dbenv);
+	STRIP_AUTO_COMMIT(flags);
 
 	/* Check for invalid flags. */
 	if (F_ISSET(dbp, DB_AM_SECONDARY)) {
 		__db_err(dbenv,
-		    "DBP->truncate forbidden on secondary indices");
+		    "DB->truncate forbidden on secondary indices");
 		return (EINVAL);
 	}
-	if ((ret =
-	    __db_fchk(dbenv, "DB->truncate", flags, DB_AUTO_COMMIT)) != 0)
+	if ((ret = __db_fchk(dbenv, "DB->truncate", flags, 0)) != 0)
 		return (ret);
 
+	ENV_ENTER(dbenv, ip);
+
 	/*
 	 * Make sure there are no active cursors on this db.  Since we drop
 	 * pages we cannot really adjust cursors.
@@ -61,34 +68,57 @@ __db_truncate_pp(dbp, txn, countp, flags)
 	if (__db_cursor_check(dbp) != 0) {
 		__db_err(dbenv,
 		     "DB->truncate not permitted with active cursors");
-		return (EINVAL);
+		goto err;
+	}
+
+#if CONFIG_TEST
+	if (IS_REP_MASTER(dbenv))
+		DB_TEST_WAIT(dbenv, dbenv->test_check);
+#endif
+	/* Check for replication block. */
+	handle_check = IS_ENV_REPLICATED(dbenv);
+	if (handle_check &&
+	    (ret = __db_rep_enter(dbp, 1, 0, txn != NULL)) != 0) {
+		handle_check = 0;
+		goto err;
+	}
+
+	/*
+	 * Check for changes to a read-only database.
+	 * This must be after the replication block so that we
+	 * cannot race master/client state changes.
+	 */
+	if (DB_IS_READONLY(dbp)) {
+		ret = __db_rdonly(dbenv, "DB->truncate");
+		goto err;
 	}
 
 	/*
 	 * Create local transaction as necessary, check for consistent
 	 * transaction usage.
 	 */
-	txn_local = 0;
-	if (IS_AUTO_COMMIT(dbenv, txn, flags)) {
-		if ((ret = __db_txn_auto_init(dbenv, &txn)) != 0)
+	if (IS_DB_AUTO_COMMIT(dbp, txn)) {
+		if ((ret = __txn_begin(dbenv, NULL, &txn, 0)) != 0)
 			goto err;
 		txn_local = 1;
-		LF_CLR(DB_AUTO_COMMIT);
-	} else if (txn != NULL && !TXN_ON(dbenv)) {
-		ret = __db_not_txn_env(dbenv);
-		return (ret);
 	}
 
-	handle_check = IS_REPLICATED(dbenv, dbp);
-	if (handle_check && (ret = __db_rep_enter(dbp, 1, 0, txn != NULL)) != 0)
+	/* Check for consistent transaction usage. */
+	if ((ret = __db_check_txn(dbp, txn, DB_LOCK_INVALIDID, 0)) != 0)
 		goto err;
 
 	ret = __db_truncate(dbp, txn, countp);
 
-	if (handle_check)
-		__env_db_rep_exit(dbenv);
+err:	if (txn_local &&
+	    (t_ret = __db_txn_auto_resolve(dbenv, txn, 0, ret)) && ret == 0)
+		ret = t_ret;
 
-err:	return (txn_local ? __db_txn_auto_resolve(dbenv, txn, 0, ret) : ret);
+	/* Release replication block. */
+	if (handle_check && (t_ret = __env_db_rep_exit(dbenv)) != 0 && ret == 0)
+		ret = t_ret;
+
+	ENV_LEAVE(dbenv, ip);
+	return (ret);
 }
 
 /*
@@ -119,8 +149,9 @@ __db_truncate(dbp, txn, countp)
 	 * processing to truncate so it will update the secondaries normally.
 	 */
 	if (dbp->type != DB_QUEUE && LIST_FIRST(&dbp->s_secondaries) != NULL) {
-		for (sdbp = __db_s_first(dbp);
-		    sdbp != NULL && ret == 0; ret = __db_s_next(&sdbp))
+		if ((ret = __db_s_first(dbp, &sdbp)) != 0)
+			return (ret);
+		for (; sdbp != NULL && ret == 0; ret = __db_s_next(&sdbp))
 			if ((ret = __db_truncate(sdbp, txn, &scount)) != 0)
 				break;
 		if (sdbp != NULL)
@@ -180,11 +211,11 @@ __db_cursor_check(dbp)
 
 	dbenv = dbp->dbenv;
 
-	MUTEX_THREAD_LOCK(dbenv, dbenv->dblist_mutexp);
+	MUTEX_LOCK(dbenv, dbenv->mtx_dblist);
 	for (found = 0, ldbp = __dblist_get(dbenv, dbp->adj_fileid);
 	    ldbp != NULL && ldbp->adj_fileid == dbp->adj_fileid;
 	    ldbp = LIST_NEXT(ldbp, dblistlinks)) {
-		MUTEX_THREAD_LOCK(dbenv, dbp->mutexp);
+		MUTEX_LOCK(dbenv, dbp->mutex);
 		for (dbc = TAILQ_FIRST(&ldbp->active_queue);
 		    dbc != NULL; dbc = TAILQ_NEXT(dbc, links)) {
 			if (IS_INITIALIZED(dbc)) {
@@ -192,11 +223,11 @@ __db_cursor_check(dbp)
 				break;
 			}
 		}
-		MUTEX_THREAD_UNLOCK(dbenv, dbp->mutexp);
+		MUTEX_UNLOCK(dbenv, dbp->mutex);
 		if (found == 1)
 			break;
 	}
-	MUTEX_THREAD_UNLOCK(dbenv, dbenv->dblist_mutexp);
+	MUTEX_UNLOCK(dbenv, dbenv->mtx_dblist);
 
 	return (found);
 }
diff --git a/storage/bdb/db/db_upg.c b/storage/bdb/db/db_upg.c
index a41a1c49def..674202d5bb7 100644
--- a/storage/bdb/db/db_upg.c
+++ b/storage/bdb/db/db_upg.c
@@ -1,10 +1,10 @@
 /*-
  * See the file LICENSE for redistribution information.
  *
- * Copyright (c) 1996-2004
+ * Copyright (c) 1996-2005
  *	Sleepycat Software.  All rights reserved.
  *
- * $Id: db_upg.c,v 11.35 2004/03/24 20:37:35 bostic Exp $
+ * $Id: db_upg.c,v 12.1 2005/06/16 20:21:15 bostic Exp $
  */
 
 #include "db_config.h"
diff --git a/storage/bdb/db/db_upg_opd.c b/storage/bdb/db/db_upg_opd.c
index fcae089ad0c..23838be9ca8 100644
--- a/storage/bdb/db/db_upg_opd.c
+++ b/storage/bdb/db/db_upg_opd.c
@@ -1,10 +1,10 @@
 /*-
  * See the file LICENSE for redistribution information.
  *
- * Copyright (c) 1996-2004
+ * Copyright (c) 1996-2005
  *	Sleepycat Software.  All rights reserved.
  *
- * $Id: db_upg_opd.c,v 11.21 2004/03/19 16:10:26 bostic Exp $
+ * $Id: db_upg_opd.c,v 12.1 2005/06/16 20:21:15 bostic Exp $
  */
 
 #include "db_config.h"
diff --git a/storage/bdb/db/db_vrfy.c b/storage/bdb/db/db_vrfy.c
index d0cd22b6a37..4f33e451099 100644
--- a/storage/bdb/db/db_vrfy.c
+++ b/storage/bdb/db/db_vrfy.c
@@ -1,10 +1,10 @@
 /*-
  * See the file LICENSE for redistribution information.
  *
- * Copyright (c) 2000-2004
+ * Copyright (c) 2000-2005
  *	Sleepycat Software.  All rights reserved.
  *
- * $Id: db_vrfy.c,v 1.138 2004/10/11 18:47:50 bostic Exp $
+ * $Id: db_vrfy.c,v 12.14 2005/10/07 16:49:47 bostic Exp $
  */
 
 #include "db_config.h"
@@ -120,26 +120,11 @@ __db_verify_internal(dbp, fname, dname, handle, callback, flags)
 		LF_SET(DB_UNREF);
 #endif
 
-	if ((ret = __db_verify_arg(dbp, dname, handle, flags)) != 0)
-		goto err;
-
-	/*
-	 * Forbid working in an environment that uses transactions or
-	 * locking;  we're going to be looking at the file freely,
-	 * and while we're not going to modify it, we aren't obeying
-	 * locking conventions either.
-	 */
-	if (TXN_ON(dbenv) || LOCKING_ON(dbenv) || LOGGING_ON(dbenv)) {
-		__db_err(dbenv,
-    "DB->verify may not be used with transactions, logging, or locking");
-		ret = EINVAL;
-		goto err;
-	}
-
-	ret = __db_verify(dbp, fname, dname, handle, callback, flags);
+	if ((ret = __db_verify_arg(dbp, dname, handle, flags)) == 0)
+		ret = __db_verify(dbp, fname, dname, handle, callback, flags);
 
 	/* Db.verify is a DB handle destructor. */
-err:	if ((t_ret = __db_close(dbp, NULL, 0)) != 0 && ret == 0)
+	if ((t_ret = __db_close(dbp, NULL, 0)) != 0 && ret == 0)
 		ret = t_ret;
 
 	return (ret);
@@ -184,7 +169,7 @@ __db_verify_arg(dbp, dname, handle, flags)
 		if (LF_ISSET(DB_AGGRESSIVE | DB_PRINTABLE))
 			return (__db_ferr(dbenv, "DB->verify", 1));
 
-	/* 
+	/*
 	 * DB_ORDERCHKONLY is mutually exclusive with DB_SALVAGE and
 	 * DB_NOORDERCHK, and requires a database name.
 	 */
@@ -223,14 +208,14 @@ __db_verify(dbp, name, subdb, handle, callback, flags)
 	DB_ENV *dbenv;
 	DB_FH *fhp;
 	VRFY_DBINFO *vdp;
-	int has, isbad, ret, t_ret;
+	int has_subdbs, isbad, ret, t_ret;
 	char *real_name;
 
 	dbenv = dbp->dbenv;
 	fhp = NULL;
 	vdp = NULL;
 	real_name = NULL;
-	has = ret = isbad = 0;
+	has_subdbs = isbad = ret = 0;
 
 	F_SET(dbp, DB_AM_VERIFYING);
 
@@ -298,8 +283,8 @@ __db_verify(dbp, name, subdb, handle, callback, flags)
 	 * and the mpool--manually.
 	 */
 	if ((ret = __db_dbenv_setup(dbp, NULL,
-	    name, TXN_INVALID, DB_ODDFILESIZE | DB_RDONLY)) != 0)
-		return (ret);
+	    name, subdb, TXN_INVALID, DB_ODDFILESIZE | DB_RDONLY)) != 0)
+		goto err;
 
 	/*
 	 * Set our name in the Queue subsystem;  we may need it later
@@ -307,13 +292,14 @@ __db_verify(dbp, name, subdb, handle, callback, flags)
 	 */
 	if (dbp->type == DB_QUEUE &&
 	    (ret = __qam_set_ext_data(dbp, name)) != 0)
-		return (ret);
+		goto err;
 
 	/* Mark the dbp as opened, so that we correctly handle its close. */
 	F_SET(dbp, DB_AM_OPEN_CALLED);
 
 	/* Find out the page number of the last page in the database. */
-	__memp_last_pgno(dbp->mpf, &vdp->last_pgno);
+	if ((ret = __memp_last_pgno(dbp->mpf, &vdp->last_pgno)) != 0)
+		goto err;
 
 	/*
 	 * DB_ORDERCHKONLY is a special case;  our file consists of
@@ -337,27 +323,25 @@ __db_verify(dbp, name, subdb, handle, callback, flags)
 	 */
 	if (LF_ISSET(DB_SALVAGE)) {
 		if ((ret = __db_salvage_init(vdp)) != 0)
-			return (ret);
+			goto err;
 
 		/*
-		 * If we're not being aggressive, attempt to crack subdbs.
-		 * "has" will indicate whether the attempt has succeeded
+		 * If we're not being aggressive, attempt to crack subdatabases.
+		 * "has_subdbs" will indicate whether the attempt has succeeded
 		 * (even in part), meaning that we have some semblance of
-		 * subdbs;  on the walkpages pass, we print out
-		 * whichever data pages we have not seen.
+		 * subdatabases; on the walkpages pass, we print out whichever
+		 * data pages we have not seen.
 		 */
-		if (!LF_ISSET(DB_AGGRESSIVE) && (__db_salvage_subdbs(dbp,
-		    vdp, handle, callback, flags, &has)) != 0)
+		if (!LF_ISSET(DB_AGGRESSIVE) && __db_salvage_subdbs(
+		    dbp, vdp, handle, callback, flags, &has_subdbs) != 0)
 			isbad = 1;
 
 		/*
-		 * If we have subdatabases, we need to signal that if
-		 * any keys are found that don't belong to a subdatabase,
-		 * they'll need to have an "__OTHER__" subdatabase header
-		 * printed first.  Flag this.  Else, print a header for
-		 * the normal, non-subdb database.
+		 * If we have subdatabases, flag if any keys are found that
+		 * don't belong to a subdatabase -- they'll need to have an
+		 * "__OTHER__" subdatabase header printed first.
 		 */
-		if (has == 1)
+		if (has_subdbs)
 			F_SET(vdp, SALVAGE_PRINTHEADER);
 	}
 
@@ -392,12 +376,14 @@ __db_verify(dbp, name, subdb, handle, callback, flags)
 		__db_salvage_destroy(vdp);
 	}
 
-err:	if (LF_ISSET(DB_SALVAGE) &&
-	    (has == 0 || F_ISSET(vdp, SALVAGE_PRINTFOOTER)))
+	/* Don't display a footer for a database holding other databases. */
+	if (LF_ISSET(DB_SALVAGE) &&
+	    (!has_subdbs || F_ISSET(vdp, SALVAGE_PRINTFOOTER)))
 		(void)__db_prfooter(handle, callback);
 
+done: err:
 	/* Send feedback that we're done. */
-done:	if (!LF_ISSET(DB_SALVAGE) && dbp->db_feedback != NULL)
+	if (!LF_ISSET(DB_SALVAGE) && dbp->db_feedback != NULL)
 		dbp->db_feedback(dbp, DB_VERIFY, 100);
 
 	if (fhp != NULL &&
@@ -456,6 +442,9 @@ __db_vrfy_pagezero(dbp, vdp, fhp, flags)
 	meta = (DBMETA *)mbuf;
 	dbp->type = DB_UNKNOWN;
 
+	if ((ret = __db_vrfy_getpageinfo(vdp, PGNO_BASE_MD, &pip)) != 0)
+		return (ret);
+
 	/*
 	 * Seek to the metadata page.
 	 * Note that if we're just starting a verification, dbp->pgsize
@@ -573,6 +562,20 @@ __db_vrfy_pagezero(dbp, vdp, fhp, flags)
 		    (u_long)PGNO_BASE_MD, (u_long)meta->type));
 	}
 
+	/*
+	 * 26: Meta-flags.
+	 */
+	if (meta->metaflags != 0) {
+		if (meta->metaflags == DBMETA_CHKSUM)
+			F_SET(pip, VRFY_HAS_CHKSUM);
+		else {
+			isbad = 1;
+			EPRINT((dbenv,
+			    "Page %lu: bad meta-data flags value %#lx",
+			    (u_long)PGNO_BASE_MD, (u_long)meta->metaflags));
+		}
+	}
+
 	/*
 	 * 28-31: Free list page number.
 	 * We'll verify its sensibility when we do inter-page
@@ -587,8 +590,6 @@ __db_vrfy_pagezero(dbp, vdp, fhp, flags)
 	 * this one page.  We'll realloc later when we know how many
 	 * pages there are.
 	 */
-	if ((ret = __db_vrfy_getpageinfo(vdp, PGNO_BASE_MD, &pip)) != 0)
-		return (ret);
 	pip->pgno = PGNO_BASE_MD;
 	pip->type = meta->type;
 
@@ -638,9 +639,8 @@ __db_vrfy_walkpages(dbp, vdp, handle, callback, flags)
 
 	for (i = 0; i <= vdp->last_pgno; i++) {
 		/*
-		 * If DB_SALVAGE is set, we inspect our database of
-		 * completed pages, and skip any we've already printed in
-		 * the subdb pass.
+		 * If DB_SALVAGE is set, we inspect our database of completed
+		 * pages, and skip any we've already printed in the subdb pass.
 		 */
 		if (LF_ISSET(DB_SALVAGE) && (__db_salvage_isdone(vdp, i) != 0))
 			continue;
@@ -677,11 +677,8 @@ __db_vrfy_walkpages(dbp, vdp, handle, callback, flags)
 			 * DB_VERIFY_BAD, keep going;  listing more errors
 			 * may make it easier to diagnose problems and
 			 * determine the magnitude of the corruption.
-			 */
-
-			/*
-			 * Verify info common to all page
-			 * types.
+			 *
+			 * Verify info common to all page types.
 			 */
 			if (i != PGNO_BASE_MD) {
 				ret = __db_vrfy_common(dbp, vdp, h, i, flags);
@@ -1320,8 +1317,21 @@ __db_vrfy_meta(dbp, vdp, meta, pgno, flags)
 		    (u_long)pgno, (u_long)meta->pagesize));
 	}
 
-	/* free list */
+	/* Flags */
+	if (meta->metaflags != 0) {
+		if (meta->metaflags == DBMETA_CHKSUM)
+			F_SET(pip, VRFY_HAS_CHKSUM);
+		else {
+			isbad = 1;
+			EPRINT((dbenv,
+			    "Page %lu: bad meta-data flags value %#lx",
+			    (u_long)PGNO_BASE_MD, (u_long)meta->metaflags));
+		}
+	}
+
 	/*
+	 * Free list.
+	 *
 	 * If this is not the main, master-database meta page, it
 	 * should not have a free list.
 	 */
@@ -1729,9 +1739,23 @@ __db_salvage(dbp, vdp, pgno, h, handle, callback, flags)
 	int (*callback) __P((void *, const void *));
 	u_int32_t flags;
 {
-	int ret;
+	DB_ENV *dbenv;
+	VRFY_PAGEINFO *pip;
+	int keyflag, ret, t_ret;
+
 	DB_ASSERT(LF_ISSET(DB_SALVAGE));
 
+	dbenv = dbp->dbenv;
+
+	/*
+	 * !!!
+	 * We dump record numbers when salvaging Queue databases, but not for
+	 * immutable Recno databases.  The problem is we can't figure out the
+	 * record number from the database page in the Recno case, while the
+	 * offset in the file is sufficient for Queue.
+	 */
+	keyflag = 0;
+
 	/* If we got this page in the subdb pass, we can safely skip it. */
 	if (__db_salvage_isdone(vdp, pgno))
 		return (0);
@@ -1744,11 +1768,12 @@ __db_salvage(dbp, vdp, pgno, h, handle, callback, flags)
 		ret = __bam_vrfy_meta(dbp, vdp, (BTMETA *)h, pgno, flags);
 		break;
 	case P_QAMMETA:
+		keyflag = 1;
 		ret = __qam_vrfy_meta(dbp, vdp, (QMETA *)h, pgno, flags);
 		break;
 	case P_HASH:
-		return (__ham_salvage(dbp,
-		    vdp, pgno, h, handle, callback, flags));
+		return (__ham_salvage(
+		    dbp, vdp, pgno, h, handle, callback, flags));
 	case P_LBTREE:
 		return (__bam_salvage(dbp,
 		    vdp, pgno, P_LBTREE, h, handle, callback, NULL, flags));
@@ -1781,10 +1806,24 @@ __db_salvage(dbp, vdp, pgno, h, handle, callback, flags)
 		/* XXX: Should we be more aggressive here? */
 		return (0);
 	}
+	if (ret != 0)
+		return (ret);
 
-	return (ret != 0 ? ret :
-	     __db_prheader(dbp, NULL, 0, 1,
-	     handle, callback, vdp, PGNO_BASE_MD));
+	/*
+	 * We have to display the dump header if it's a metadata page.  It's
+	 * our last chance as the page was marked "seen" in the vrfy routine,
+	 * and  we won't see the page again.  We don't display headers for
+	 * the first database in a multi-database file, that database simply
+	 * contains a list of subdatabases.
+	 */
+	if ((ret = __db_vrfy_getpageinfo(vdp, pgno, &pip)) != 0)
+		return (ret);
+	if (!F_ISSET(pip, VRFY_HAS_SUBDBS))
+		ret = __db_prheader(
+		    dbp, NULL, 0, keyflag, handle, callback, vdp, pgno);
+	if ((t_ret = __db_vrfy_putpageinfo(dbenv, vdp, pip)) != 0 && ret == 0)
+		ret = t_ret;
+	return (ret);
 }
 
 /*
@@ -1800,15 +1839,17 @@ __db_salvage_unknowns(dbp, vdp, handle, callback, flags)
 	int (*callback) __P((void *, const void *));
 	u_int32_t flags;
 {
+	DBC *dbc;
 	DBT unkdbt, key, *dbt;
 	DB_ENV *dbenv;
 	DB_MPOOLFILE *mpf;
 	PAGE *h;
 	db_pgno_t pgno;
 	u_int32_t pgtype;
-	int ret, err_ret;
+	int ret, t_ret;
 	void *ovflbuf;
 
+	dbc = NULL;
 	dbenv = dbp->dbenv;
 	mpf = dbp->mpf;
 
@@ -1819,15 +1860,20 @@ __db_salvage_unknowns(dbp, vdp, handle, callback, flags)
 	if ((ret = __os_malloc(dbenv, dbp->pgsize, &ovflbuf)) != 0)
 		return (ret);
 
-	err_ret = 0;
-	while ((ret = __db_salvage_getnext(vdp, &pgno, &pgtype)) == 0) {
-		dbt = NULL;
-
-		if ((ret = __memp_fget(mpf, &pgno, 0, &h)) != 0) {
-			err_ret = ret;
+	/*
+	 * We make two passes -- in the first pass, skip SALVAGE_OVERFLOW
+	 * pages, because they may be referenced by the standard database
+	 * pages that we're resolving.
+	 */
+	while ((t_ret =
+	    __db_salvage_getnext(vdp, &dbc, &pgno, &pgtype, 1)) == 0) {
+		if ((t_ret = __memp_fget(mpf, &pgno, 0, &h)) != 0) {
+			if (ret == 0)
+				ret = t_ret;
 			continue;
 		}
 
+		dbt = NULL;
 		switch (pgtype) {
 		case SALVAGE_LDUP:
 		case SALVAGE_LRECNODUP:
@@ -1835,28 +1881,17 @@ __db_salvage_unknowns(dbp, vdp, handle, callback, flags)
 			/* FALLTHROUGH */
 		case SALVAGE_LBTREE:
 		case SALVAGE_LRECNO:
-			if ((ret = __bam_salvage(dbp, vdp, pgno, pgtype,
-			    h, handle, callback, dbt, flags)) != 0)
-				err_ret = ret;
+			if ((t_ret = __bam_salvage(dbp, vdp, pgno, pgtype,
+			    h, handle, callback, dbt, flags)) != 0 && ret == 0)
+				ret = t_ret;
 			break;
 		case SALVAGE_OVERFLOW:
-			/*
-			 * XXX:
-			 * This may generate multiple "UNKNOWN" keys in
-			 * a database with no dups.  What to do?
-			 */
-			if ((ret = __db_safe_goff(dbp,
-			    vdp, pgno, &key, &ovflbuf, flags)) != 0 ||
-			    (ret = __db_vrfy_prdbt(&key,
-			    0, " ", handle, callback, 0, vdp)) != 0 ||
-			    (ret = __db_vrfy_prdbt(&unkdbt,
-			    0, " ", handle, callback, 0, vdp)) != 0)
-				err_ret = ret;
+			DB_ASSERT(0);		/* Shouldn't ever happen. */
 			break;
 		case SALVAGE_HASH:
-			if ((ret = __ham_salvage(
-			    dbp, vdp, pgno, h, handle, callback, flags)) != 0)
-				err_ret = ret;
+			if ((t_ret = __ham_salvage(dbp, vdp,
+			    pgno, h, handle, callback, flags)) != 0 && ret == 0)
+				ret = t_ret;
 			break;
 		case SALVAGE_INVALID:
 		case SALVAGE_IGNORE:
@@ -1868,16 +1903,67 @@ __db_salvage_unknowns(dbp, vdp, handle, callback, flags)
 			DB_ASSERT(0);
 			break;
 		}
-		if ((ret = __memp_fput(mpf, h, 0)) != 0)
-			err_ret = ret;
+		if ((t_ret = __memp_fput(mpf, h, 0)) != 0 && ret == 0)
+			ret = t_ret;
 	}
 
+	/* We should have reached the end of the database. */
+	if (t_ret == DB_NOTFOUND)
+		t_ret = 0;
+	if (t_ret != 0 && ret == 0)
+		ret = t_ret;
+
+	/* Re-open the cursor so we traverse the database again. */
+	if ((t_ret = __db_c_close(dbc)) != 0 && ret == 0)
+		ret = t_ret;
+	dbc = NULL;
+
+	/* Now, deal with any remaining overflow pages. */
+	while ((t_ret =
+	    __db_salvage_getnext(vdp, &dbc, &pgno, &pgtype, 0)) == 0) {
+		if ((t_ret = __memp_fget(mpf, &pgno, 0, &h)) != 0) {
+			if (ret == 0)
+				ret = t_ret;
+			continue;
+		}
+
+		switch (pgtype) {
+		case SALVAGE_OVERFLOW:
+			/*
+			 * XXX:
+			 * This may generate multiple "UNKNOWN" keys in
+			 * a database with no dups.  What to do?
+			 */
+			if ((t_ret = __db_safe_goff(dbp,
+			    vdp, pgno, &key, &ovflbuf, flags)) != 0 ||
+			    ((vdp->type == DB_BTREE || vdp->type == DB_HASH) &&
+			    (t_ret = __db_vrfy_prdbt(&unkdbt,
+			    0, " ", handle, callback, 0, vdp)) != 0) ||
+			    (t_ret = __db_vrfy_prdbt(
+			    &key, 0, " ", handle, callback, 0, vdp)) != 0)
+				if (ret == 0)
+					ret = t_ret;
+			break;
+		default:
+			DB_ASSERT(0);		/* Shouldn't ever happen. */
+			break;
+		}
+		if ((t_ret = __memp_fput(mpf, h, 0)) != 0 && ret == 0)
+			ret = t_ret;
+	}
+
+	/* We should have reached the end of the database. */
+	if (t_ret == DB_NOTFOUND)
+		t_ret = 0;
+	if (t_ret != 0 && ret == 0)
+		ret = t_ret;
+
+	if ((t_ret = __db_c_close(dbc)) != 0 && ret == 0)
+		ret = t_ret;
+
 	__os_free(dbenv, ovflbuf);
 
-	if (err_ret != 0 && ret == 0)
-		ret = err_ret;
-
-	return (ret == DB_NOTFOUND ? 0 : ret);
+	return (ret);
 }
 
 /*
@@ -2157,95 +2243,87 @@ __db_salvage_subdbs(dbp, vdp, handle, callback, flags, hassubsp)
 	u_int32_t flags;
 	int *hassubsp;
 {
-	BTMETA *btmeta;
 	DB *pgset;
 	DBC *pgsc;
+	DB_ENV *dbenv;
 	DB_MPOOLFILE *mpf;
 	PAGE *h;
+	VRFY_PAGEINFO *pip;
 	db_pgno_t p, meta_pgno;
-	int ret, err_ret;
+	int ret, t_ret;
 
+	*hassubsp = 0;
+
+	dbenv = dbp->dbenv;
 	pgset = NULL;
 	pgsc = NULL;
 	mpf = dbp->mpf;
-	err_ret = 0;
+	h = NULL;
+	pip = NULL;
+	ret = 0;
 
+	/*
+	 * Check to make sure the page is OK and find out if it contains
+	 * subdatabases.
+	 */
 	meta_pgno = PGNO_BASE_MD;
-	if ((ret = __memp_fget(mpf, &meta_pgno, 0, &h)) != 0)
-		return (ret);
-
-	if (TYPE(h) == P_BTREEMETA)
-		btmeta = (BTMETA *)h;
-	else {
-		/* Not a btree metadata, ergo no subdbs, so just return. */
-		ret = 0;
-		goto err;
+	if ((t_ret = __memp_fget(mpf, &meta_pgno, 0, &h)) == 0 &&
+	    (t_ret = __db_vrfy_common(dbp, vdp, h, PGNO_BASE_MD, flags)) == 0 &&
+	    (t_ret = __db_salvage(
+	    dbp, vdp, PGNO_BASE_MD, h, handle, callback, flags)) == 0 &&
+	    (t_ret = __db_vrfy_getpageinfo(vdp, 0, &pip)) == 0)
+		if (F_ISSET(pip, VRFY_HAS_SUBDBS))
+			*hassubsp = 1;
+	if (pip != NULL &&
+	    (t_ret = __db_vrfy_putpageinfo(dbenv, vdp, pip)) != 0 && ret == 0)
+		ret = t_ret;
+	if (h != NULL) {
+		if ((t_ret = __memp_fput(mpf, h, 0)) != 0 && ret == 0)
+			ret = t_ret;
+		h = NULL;
 	}
-
-	/* If it's not a safe page, bail on the attempt. */
-	if ((ret = __db_vrfy_common(dbp, vdp, h, PGNO_BASE_MD, flags)) != 0 ||
-	   (ret = __bam_vrfy_meta(dbp, vdp, btmeta, PGNO_BASE_MD, flags)) != 0)
-		goto err;
-
-	if (!F_ISSET(&btmeta->dbmeta, BTM_SUBDB)) {
-		/* No subdbs, just return. */
-		ret = 0;
-		goto err;
-	}
-
-	/* We think we've got subdbs.  Mark it so. */
-	*hassubsp = 1;
-
-	if ((ret = __memp_fput(mpf, h, 0)) != 0)
+	if (ret != 0 || *hassubsp == 0)
 		return (ret);
 
 	/*
 	 * We have subdbs.  Try to crack them.
 	 *
-	 * To do so, get a set of leaf pages in the master
-	 * database, and then walk each of the valid ones, salvaging
-	 * subdbs as we go.  If any prove invalid, just drop them;  we'll
-	 * pick them up on a later pass.
+	 * To do so, get a set of leaf pages in the master database, and then
+	 * walk each of the valid ones, salvaging subdbs as we go.  If any
+	 * prove invalid, just drop them;  we'll pick them up on a later pass.
 	 */
-	if ((ret = __db_vrfy_pgset(dbp->dbenv, dbp->pgsize, &pgset)) != 0)
-		return (ret);
-	if ((ret =
-	    __db_meta2pgset(dbp, vdp, PGNO_BASE_MD, flags, pgset)) != 0)
+	if ((ret = __db_vrfy_pgset(dbenv, dbp->pgsize, &pgset)) != 0)
+		goto err;
+	if ((ret = __db_meta2pgset(dbp, vdp, PGNO_BASE_MD, flags, pgset)) != 0)
 		goto err;
-
 	if ((ret = __db_cursor(pgset, NULL, &pgsc, 0)) != 0)
 		goto err;
-	while ((ret = __db_vrfy_pgset_next(pgsc, &p)) == 0) {
-		if ((ret = __memp_fget(mpf, &p, 0, &h)) != 0) {
-			err_ret = ret;
-			continue;
+	while ((t_ret = __db_vrfy_pgset_next(pgsc, &p)) == 0) {
+		if ((t_ret = __memp_fget(mpf, &p, 0, &h)) == 0 &&
+		    (t_ret = __db_vrfy_common(dbp, vdp, h, p, flags)) == 0 &&
+		    (t_ret =
+		    __bam_vrfy(dbp, vdp, h, p, flags | DB_NOORDERCHK)) == 0)
+			t_ret = __db_salvage_subdbpg(
+			    dbp, vdp, h, handle, callback, flags);
+		if (t_ret != 0 && ret == 0)
+			ret = t_ret;
+		if (h != NULL) {
+			if ((t_ret = __memp_fput(mpf, h, 0)) != 0 && ret == 0)
+				ret = t_ret;
+			h = NULL;
 		}
-		if ((ret = __db_vrfy_common(dbp, vdp, h, p, flags)) != 0 ||
-		    (ret = __bam_vrfy(dbp,
-		    vdp, h, p, flags | DB_NOORDERCHK)) != 0)
-			goto nextpg;
-		if (TYPE(h) != P_LBTREE)
-			goto nextpg;
-		else if ((ret = __db_salvage_subdbpg(
-		    dbp, vdp, h, handle, callback, flags)) != 0)
-			err_ret = ret;
-nextpg:		if ((ret = __memp_fput(mpf, h, 0)) != 0)
-			err_ret = ret;
 	}
 
-	if (ret != DB_NOTFOUND)
-		goto err;
-	if ((ret = __db_c_close(pgsc)) != 0)
-		goto err;
+	if (t_ret != DB_NOTFOUND && ret == 0)
+		ret = t_ret;
 
-	ret = __db_close(pgset, NULL, 0);
-	return ((ret == 0 && err_ret != 0) ? err_ret : ret);
-
-err:	if (pgsc != NULL)
-		(void)__db_c_close(pgsc);
-	if (pgset != NULL)
-		(void)__db_close(pgset, NULL, 0);
-	(void)__memp_fput(mpf, h, 0);
+err:	if (pgsc != NULL && (t_ret = __db_c_close(pgsc)) != 0 && ret == 0)
+		ret = t_ret;
+	if (pgset != NULL &&
+	    (t_ret = __db_close(pgset, NULL, 0)) != 0 && ret ==0)
+		ret = t_ret;
+	if (h != NULL && (t_ret = __memp_fput(mpf, h, 0)) != 0 && ret == 0)
+		ret = t_ret;
 	return (ret);
 }
 
diff --git a/storage/bdb/db/db_vrfy_stub.c b/storage/bdb/db/db_vrfy_stub.c
index 486802d7dd7..46f0b1134e1 100644
--- a/storage/bdb/db/db_vrfy_stub.c
+++ b/storage/bdb/db/db_vrfy_stub.c
@@ -1,10 +1,10 @@
 /*-
  * See the file LICENSE for redistribution information.
  *
- * Copyright (c) 1996-2004
+ * Copyright (c) 1996-2005
  *	Sleepycat Software.  All rights reserved.
  *
- * $Id: db_vrfy_stub.c,v 11.6 2004/06/14 15:23:32 bostic Exp $
+ * $Id: db_vrfy_stub.c,v 12.1 2005/06/16 20:21:15 bostic Exp $
  */
 
 #include "db_config.h"
diff --git a/storage/bdb/db/db_vrfyutil.c b/storage/bdb/db/db_vrfyutil.c
index f1034af1f2e..f1508872238 100644
--- a/storage/bdb/db/db_vrfyutil.c
+++ b/storage/bdb/db/db_vrfyutil.c
@@ -1,10 +1,10 @@
 /*-
  * See the file LICENSE for redistribution information.
  *
- * Copyright (c) 2000-2004
+ * Copyright (c) 2000-2005
  *	Sleepycat Software.  All rights reserved.
  *
- * $Id: db_vrfyutil.c,v 11.40 2004/10/11 18:47:50 bostic Exp $
+ * $Id: db_vrfyutil.c,v 12.5 2005/06/16 20:21:15 bostic Exp $
  */
 
 #include "db_config.h"
@@ -102,13 +102,26 @@ __db_vrfy_dbinfo_destroy(dbenv, vdp)
 	DB_ENV *dbenv;
 	VRFY_DBINFO *vdp;
 {
-	VRFY_CHILDINFO *c, *d;
+	VRFY_CHILDINFO *c;
 	int t_ret, ret;
 
 	ret = 0;
 
-	for (c = LIST_FIRST(&vdp->subdbs); c != NULL; c = d) {
-		d = LIST_NEXT(c, links);
+	/*
+	 * Discard active page structures.  Ideally there wouldn't be any,
+	 * but in some error cases we may not have cleared them all out.
+	 */
+	while (LIST_FIRST(&vdp->activepips) != NULL)
+		if ((t_ret = __db_vrfy_putpageinfo(
+		    dbenv, vdp, LIST_FIRST(&vdp->activepips))) != 0) {
+			if (ret == 0)
+				ret = t_ret;
+			break;
+		}
+
+	/* Discard subdatabase list structures. */
+	while ((c = LIST_FIRST(&vdp->subdbs)) != NULL) {
+		LIST_REMOVE(c, links);
 		__os_free(NULL, c);
 	}
 
@@ -121,8 +134,6 @@ __db_vrfy_dbinfo_destroy(dbenv, vdp)
 	if ((t_ret = __db_close(vdp->pgset, NULL, 0)) != 0 && ret == 0)
 		ret = t_ret;
 
-	DB_ASSERT(LIST_FIRST(&vdp->activepips) == NULL);
-
 	if (vdp->extents != NULL)
 		__os_free(dbenv, vdp->extents);
 	__os_free(dbenv, vdp);
@@ -184,7 +195,6 @@ __db_vrfy_getpageinfo(vdp, pgno, pipp)
 		/* Found it. */
 		DB_ASSERT(data.size == sizeof(VRFY_PAGEINFO));
 		pip = data.data;
-		DB_ASSERT(pip->pi_refcount == 0);
 		LIST_INSERT_HEAD(&vdp->activepips, pip, links);
 		goto found;
 	} else if (ret != DB_NOTFOUND)	/* Something nasty happened. */
@@ -198,8 +208,6 @@ __db_vrfy_getpageinfo(vdp, pgno, pipp)
 found:	pip->pi_refcount++;
 
 	*pipp = pip;
-
-	DB_ASSERT(pip->pi_refcount > 0);
 	return (0);
 }
 
@@ -220,11 +228,6 @@ __db_vrfy_putpageinfo(dbenv, vdp, pip)
 	DB *pgdbp;
 	VRFY_PAGEINFO *p;
 	int ret;
-#ifdef DIAGNOSTIC
-	int found;
-
-	found = 0;
-#endif
 
 	if (--pip->pi_refcount > 0)
 		return (0);
@@ -241,22 +244,14 @@ __db_vrfy_putpageinfo(dbenv, vdp, pip)
 	if ((ret = __db_put(pgdbp, NULL, &key, &data, 0)) != 0)
 		return (ret);
 
-	for (p = LIST_FIRST(&vdp->activepips); p != NULL;
-	    p = LIST_NEXT(p, links))
-		if (p == pip) {
-#ifdef DIAGNOSTIC
-			found++;
-#endif
-			DB_ASSERT(p->pi_refcount == 0);
-			LIST_REMOVE(p, links);
+	for (p =
+	    LIST_FIRST(&vdp->activepips); p != NULL; p = LIST_NEXT(p, links))
+		if (p == pip)
 			break;
-		}
-#ifdef DIAGNOSTIC
-	DB_ASSERT(found == 1);
-#endif
+	if (p != NULL)
+		LIST_REMOVE(p, links);
 
-	DB_ASSERT(pip->pi_refcount == 0);
-	__os_ufree(dbenv, pip);
+	__os_ufree(dbenv, p);
 	return (0);
 }
 
@@ -595,11 +590,11 @@ __db_vrfy_ccclose(dbc)
  *	Constructor for VRFY_PAGEINFO;  allocates and initializes.
  */
 static int
-__db_vrfy_pageinfo_create(dbenv, pgipp)
+__db_vrfy_pageinfo_create(dbenv, pipp)
 	DB_ENV *dbenv;
-	VRFY_PAGEINFO **pgipp;
+	VRFY_PAGEINFO **pipp;
 {
-	VRFY_PAGEINFO *pgip;
+	VRFY_PAGEINFO *pip;
 	int ret;
 
 	/*
@@ -609,13 +604,11 @@ __db_vrfy_pageinfo_create(dbenv, pgipp)
 	 * used, and so we always allocate with __os_umalloc so we can free
 	 * with __os_ufree.
 	 */
-	if ((ret = __os_umalloc(dbenv, sizeof(VRFY_PAGEINFO), &pgip)) != 0)
+	if ((ret = __os_umalloc(dbenv, sizeof(VRFY_PAGEINFO), &pip)) != 0)
 		return (ret);
-	memset(pgip, 0, sizeof(VRFY_PAGEINFO));
+	memset(pip, 0, sizeof(VRFY_PAGEINFO));
 
-	DB_ASSERT(pgip->pi_refcount == 0);
-
-	*pgipp = pgip;
+	*pipp = pip;
 	return (0);
 }
 
@@ -668,16 +661,17 @@ __db_salvage_destroy(vdp)
  *	in this search, as well as the page we're returning.
  *
  * PUBLIC: int __db_salvage_getnext
- * PUBLIC:     __P((VRFY_DBINFO *, db_pgno_t *, u_int32_t *));
+ * PUBLIC:     __P((VRFY_DBINFO *, DBC **, db_pgno_t *, u_int32_t *, int));
  */
 int
-__db_salvage_getnext(vdp, pgnop, pgtypep)
+__db_salvage_getnext(vdp, dbcp, pgnop, pgtypep, skip_overflow)
 	VRFY_DBINFO *vdp;
+	DBC **dbcp;
 	db_pgno_t *pgnop;
 	u_int32_t *pgtypep;
+	int skip_overflow;
 {
 	DB *dbp;
-	DBC *dbc;
 	DBT key, data;
 	int ret;
 	u_int32_t pgtype;
@@ -687,30 +681,29 @@ __db_salvage_getnext(vdp, pgnop, pgtypep)
 	memset(&key, 0, sizeof(DBT));
 	memset(&data, 0, sizeof(DBT));
 
-	if ((ret = __db_cursor(dbp, NULL, &dbc, 0)) != 0)
+	if (*dbcp == NULL &&
+	    (ret = __db_cursor(dbp, NULL, dbcp, 0)) != 0)
 		return (ret);
 
-	while ((ret = __db_c_get(dbc, &key, &data, DB_NEXT)) == 0) {
+	while ((ret = __db_c_get(*dbcp, &key, &data, DB_NEXT)) == 0) {
 		DB_ASSERT(data.size == sizeof(u_int32_t));
 		memcpy(&pgtype, data.data, sizeof(pgtype));
 
-		if ((ret = __db_c_del(dbc, 0)) != 0)
-			goto err;
-		if (pgtype != SALVAGE_IGNORE)
-			goto found;
+		if (skip_overflow && pgtype == SALVAGE_OVERFLOW)
+			continue;
+
+		if ((ret = __db_c_del(*dbcp, 0)) != 0)
+			return (ret);
+		if (pgtype != SALVAGE_IGNORE) {
+			DB_ASSERT(key.size == sizeof(db_pgno_t));
+			DB_ASSERT(data.size == sizeof(u_int32_t));
+
+			*pgnop = *(db_pgno_t *)key.data;
+			*pgtypep = *(u_int32_t *)data.data;
+			break;
+		}
 	}
 
-	/* No more entries--ret probably equals DB_NOTFOUND. */
-
-	if (0) {
-found:		DB_ASSERT(key.size == sizeof(db_pgno_t));
-		DB_ASSERT(data.size == sizeof(u_int32_t));
-
-		*pgnop = *(db_pgno_t *)key.data;
-		*pgtypep = *(u_int32_t *)data.data;
-	}
-
-err:	(void)__db_c_close(dbc);
 	return (ret);
 }
 
@@ -753,8 +746,7 @@ __db_salvage_isdone(vdp, pgno)
 	 * If it's there and is marked anything else, that's fine--we
 	 * want to mark it done.
 	 */
-	ret = __db_get(dbp, NULL, &key, &data, 0);
-	if (ret == 0) {
+	if ((ret = __db_get(dbp, NULL, &key, &data, 0)) == 0) {
 		/*
 		 * The key's already here.  Check and see if it's already
 		 * marked done.  If it is, return DB_KEYEXIST.  If it's not,
@@ -887,11 +879,12 @@ __db_vrfy_prdbt(dbtp, checkprint, prefix, handle, callback, is_recno, vdp)
 		 * Check and clear the SALVAGE_PRINTHEADER flag;  if
 		 * it was set, print a subdatabase header.
 		 */
-		if (F_ISSET(vdp, SALVAGE_PRINTHEADER))
+		if (F_ISSET(vdp, SALVAGE_PRINTHEADER)) {
 			(void)__db_prheader(
 			    NULL, "__OTHER__", 0, 0, handle, callback, vdp, 0);
-		F_CLR(vdp, SALVAGE_PRINTHEADER);
-		F_SET(vdp, SALVAGE_PRINTFOOTER);
+			F_CLR(vdp, SALVAGE_PRINTHEADER);
+			F_SET(vdp, SALVAGE_PRINTFOOTER);
+		}
 
 		/*
 		 * Even if the printable flag wasn't set by our immediate
diff --git a/storage/bdb/db185/db185.c b/storage/bdb/db185/db185.c
index 8399eac42f4..59b3260e4f1 100644
--- a/storage/bdb/db185/db185.c
+++ b/storage/bdb/db185/db185.c
@@ -1,17 +1,17 @@
 /*-
  * See the file LICENSE for redistribution information.
  *
- * Copyright (c) 1996-2004
+ * Copyright (c) 1996-2005
  *	Sleepycat Software.  All rights reserved.
  *
- * $Id: db185.c,v 11.35 2004/03/24 20:37:35 bostic Exp $
+ * $Id: db185.c,v 12.2 2005/10/06 14:36:51 bostic Exp $
  */
 
 #include "db_config.h"
 
 #ifndef lint
 static const char copyright[] =
-    "Copyright (c) 1996-2004\nSleepycat Software Inc.  All rights reserved.\n";
+    "Copyright (c) 1996-2005\nSleepycat Software Inc.  All rights reserved.\n";
 #endif
 
 #ifndef NO_SYSTEM_INCLUDES
@@ -89,12 +89,6 @@ __db185_open(file, oflags, mode, type, openinfo)
 				(void)dbp->set_bt_minkey(dbp, bi->minkeypage);
 			if (bi->psize != 0)
 				(void)dbp->set_pagesize(dbp, bi->psize);
-			/*
-			 * !!!
-			 * Comparisons and prefix calls work because the DBT
-			 * structures in 1.85 and 2.0 have the same initial
-			 * fields.
-			 */
 			if (bi->prefix != NULL) {
 				db185p->prefix = bi->prefix;
 				dbp->set_bt_prefix(dbp, db185_prefix);
@@ -546,7 +540,14 @@ db185_compare(dbp, a, b)
 	DB *dbp;
 	const DBT *a, *b;
 {
-	return (((DB185 *)dbp->api_internal)->compare(a, b));
+	DBT185 a185, b185;
+
+	a185.data = a->data;
+	a185.size = a->size;
+	b185.data = b->data;
+	b185.size = b->size;
+
+	return (((DB185 *)dbp->api_internal)->compare(&a185, &b185));
 }
 
 /*
@@ -558,7 +559,14 @@ db185_prefix(dbp, a, b)
 	DB *dbp;
 	const DBT *a, *b;
 {
-	return (((DB185 *)dbp->api_internal)->prefix(a, b));
+	DBT185 a185, b185;
+
+	a185.data = a->data;
+	a185.size = a->size;
+	b185.data = b->data;
+	b185.size = b->size;
+
+	return (((DB185 *)dbp->api_internal)->prefix(&a185, &b185));
 }
 
 /*
diff --git a/storage/bdb/db185/db185_int.in b/storage/bdb/db185/db185_int.in
index f9bfdbba01c..eba4fdb002d 100644
--- a/storage/bdb/db185/db185_int.in
+++ b/storage/bdb/db185/db185_int.in
@@ -1,7 +1,7 @@
 /*-
  * See the file LICENSE for redistribution information.
  *
- * Copyright (c) 1997-2004
+ * Copyright (c) 1997-2005
  *	Sleepycat Software.  All rights reserved.
  */
 /*
@@ -36,7 +36,7 @@
  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
  * SUCH DAMAGE.
  *
- * $Id: db185_int.in,v 11.14 2004/01/28 03:35:59 bostic Exp $
+ * $Id: db185_int.in,v 12.2 2005/10/06 14:36:52 bostic Exp $
  */
 
 #ifndef _DB185_INT_H_
@@ -82,8 +82,8 @@ typedef struct __db185 {
 	 */
 	DBC	  *dbc;			/* DB cursor. */
 					/* Various callback functions. */
-	int	  (*compare) __P((const DBT *, const DBT *));
-	size_t	  (*prefix) __P((const DBT *, const DBT *));
+	int	  (*compare) __P((const DBT185 *, const DBT185 *));
+	size_t	  (*prefix) __P((const DBT185 *, const DBT185 *));
 	u_int32_t (*hash) __P((const void *, size_t));
 } DB185;
 
@@ -96,9 +96,9 @@ typedef struct {
 	u_int32_t minkeypage;	/* minimum keys per page */
 	u_int32_t psize;	/* page size */
 	int	(*compare)	/* comparison function */
-	    __P((const DBT *, const DBT *));
+	    __P((const DBT185 *, const DBT185 *));
 	size_t	(*prefix)	/* prefix function */
-	    __P((const DBT *, const DBT *));
+	    __P((const DBT185 *, const DBT185 *));
 	int	lorder;		/* byte order */
 } BTREEINFO;
 
diff --git a/storage/bdb/db_archive/db_archive.c b/storage/bdb/db_archive/db_archive.c
index d5db42deed2..d00fc1bf9ea 100644
--- a/storage/bdb/db_archive/db_archive.c
+++ b/storage/bdb/db_archive/db_archive.c
@@ -1,17 +1,17 @@
 /*-
  * See the file LICENSE for redistribution information.
  *
- * Copyright (c) 1996-2004
+ * Copyright (c) 1996-2005
  *	Sleepycat Software.  All rights reserved.
  *
- * $Id: db_archive.c,v 11.46 2004/06/10 01:00:08 bostic Exp $
+ * $Id: db_archive.c,v 12.4 2005/09/09 12:38:30 bostic Exp $
  */
 
 #include "db_config.h"
 
 #ifndef lint
 static const char copyright[] =
-    "Copyright (c) 1996-2004\nSleepycat Software Inc.  All rights reserved.\n";
+    "Copyright (c) 1996-2005\nSleepycat Software Inc.  All rights reserved.\n";
 #endif
 
 #ifndef NO_SYSTEM_INCLUDES
@@ -27,7 +27,9 @@ static const char copyright[] =
 
 int main __P((int, char *[]));
 int usage __P((void));
-int version_check __P((const char *));
+int version_check __P((void));
+
+const char *progname;
 
 int
 main(argc, argv)
@@ -36,13 +38,17 @@ main(argc, argv)
 {
 	extern char *optarg;
 	extern int optind;
-	const char *progname = "db_archive";
 	DB_ENV	*dbenv;
 	u_int32_t flags;
 	int ch, exitval, ret, verbose;
 	char **file, *home, **list, *passwd;
 
-	if ((ret = version_check(progname)) != 0)
+	if ((progname = strrchr(argv[0], '/')) == NULL)
+		progname = argv[0];
+	else
+		++progname;
+
+	if ((ret = version_check()) != 0)
 		return (ret);
 
 	dbenv = NULL;
@@ -117,8 +123,7 @@ main(argc, argv)
 	 * If attaching to a pre-existing environment fails, create a
 	 * private one and try again.
 	 */
-	if ((ret = dbenv->open(dbenv,
-	    home, DB_JOINENV | DB_USE_ENVIRON, 0)) != 0 &&
+	if ((ret = dbenv->open(dbenv, home, DB_USE_ENVIRON, 0)) != 0 &&
 	    (ret == DB_VERSION_MISMATCH ||
 	    (ret = dbenv->open(dbenv, home, DB_CREATE |
 	    DB_INIT_LOG | DB_PRIVATE | DB_USE_ENVIRON, 0)) != 0)) {
@@ -161,13 +166,12 @@ int
 usage()
 {
 	(void)fprintf(stderr,
-	    "usage: db_archive [-adlsVv] [-h home] [-P password]\n");
+	    "usage: %s [-adlsVv] [-h home] [-P password]\n", progname);
 	return (EXIT_FAILURE);
 }
 
 int
-version_check(progname)
-	const char *progname;
+version_check()
 {
 	int v_major, v_minor, v_patch;
 
diff --git a/storage/bdb/db_checkpoint/db_checkpoint.c b/storage/bdb/db_checkpoint/db_checkpoint.c
index 538b66ddcd1..c1ee7b50596 100644
--- a/storage/bdb/db_checkpoint/db_checkpoint.c
+++ b/storage/bdb/db_checkpoint/db_checkpoint.c
@@ -1,17 +1,17 @@
 /*-
  * See the file LICENSE for redistribution information.
  *
- * Copyright (c) 1996-2004
+ * Copyright (c) 1996-2005
  *	Sleepycat Software.  All rights reserved.
  *
- * $Id: db_checkpoint.c,v 11.54 2004/03/24 15:13:12 bostic Exp $
+ * $Id: db_checkpoint.c,v 12.6 2005/09/09 12:38:30 bostic Exp $
  */
 
 #include "db_config.h"
 
 #ifndef lint
 static const char copyright[] =
-    "Copyright (c) 1996-2004\nSleepycat Software Inc.  All rights reserved.\n";
+    "Copyright (c) 1996-2005\nSleepycat Software Inc.  All rights reserved.\n";
 #endif
 
 #ifndef NO_SYSTEM_INCLUDES
@@ -28,7 +28,6 @@ static const char copyright[] =
 #endif
 #endif
 
-#include 
 #include 
 #include 
 #include 
@@ -36,12 +35,12 @@ static const char copyright[] =
 #endif
 
 #include "db_int.h"
-#include "dbinc/db_page.h"
-#include "dbinc/db_am.h"
 
 int	 main __P((int, char *[]));
 int	 usage __P((void));
-int	 version_check __P((const char *));
+int	 version_check __P((void));
+
+const char *progname;
 
 int
 main(argc, argv)
@@ -51,14 +50,18 @@ main(argc, argv)
 	extern char *optarg;
 	extern int optind;
 	DB_ENV	*dbenv;
-	const char *progname = "db_checkpoint";
 	time_t now;
 	long argval;
 	u_int32_t flags, kbytes, minutes, seconds;
 	int ch, exitval, once, ret, verbose;
 	char *home, *logfile, *passwd;
 
-	if ((ret = version_check(progname)) != 0)
+	if ((progname = strrchr(argv[0], '/')) == NULL)
+		progname = argv[0];
+	else
+		++progname;
+
+	if ((ret = version_check()) != 0)
 		return (ret);
 
 	/*
@@ -86,7 +89,7 @@ main(argc, argv)
 			if (__db_getlong(NULL, progname,
 			    optarg, 1, (long)MAX_UINT32_T, &argval))
 				return (EXIT_FAILURE);
-			kbytes = argval;
+			kbytes = (u_int32_t)argval;
 			break;
 		case 'L':
 			logfile = optarg;
@@ -104,7 +107,7 @@ main(argc, argv)
 			if (__db_getlong(NULL, progname,
 			    optarg, 1, (long)MAX_UINT32_T, &argval))
 				return (EXIT_FAILURE);
-			minutes = argval;
+			minutes = (u_int32_t)argval;
 			break;
 		case 'V':
 			printf("%s\n", db_version(NULL, NULL, NULL));
@@ -155,20 +158,11 @@ main(argc, argv)
 		goto shutdown;
 	}
 	/* Initialize the environment. */
-	if ((ret = dbenv->open(dbenv,
-	    home, DB_JOINENV | DB_USE_ENVIRON, 0)) != 0) {
+	if ((ret = dbenv->open(dbenv, home, DB_USE_ENVIRON, 0)) != 0) {
 		dbenv->err(dbenv, ret, "open");
 		goto shutdown;
 	}
 
-	/* Register the standard pgin/pgout functions, in case we do I/O. */
-	if ((ret = dbenv->memp_register(
-	    dbenv, DB_FTYPE_SET, __db_pgin, __db_pgout)) != 0) {
-		dbenv->err(dbenv, ret,
-    "DB_ENV->memp_register: failed to register access method functions");
-		goto shutdown;
-	}
-
 	/*
 	 * If we have only a time delay, then we'll sleep the right amount
 	 * to wake up when a checkpoint is necessary.  If we have a "kbytes"
@@ -205,7 +199,7 @@ shutdown:	exitval = 1;
 
 	/* Clean up the logfile. */
 	if (logfile != NULL)
-		remove(logfile);
+		(void)remove(logfile);
 
 	/* Clean up the environment. */
 	if (dbenv != NULL && (ret = dbenv->close(dbenv, 0)) != 0) {
@@ -226,15 +220,13 @@ shutdown:	exitval = 1;
 int
 usage()
 {
-	(void)fprintf(stderr, "%s\n\t%s\n",
-	    "usage: db_checkpoint [-1Vv]",
+	(void)fprintf(stderr, "usage: %s [-1Vv]\n\t%s\n", progname,
 	    "[-h home] [-k kbytes] [-L file] [-P password] [-p min]");
 	return (EXIT_FAILURE);
 }
 
 int
-version_check(progname)
-	const char *progname;
+version_check()
 {
 	int v_major, v_minor, v_patch;
 
diff --git a/storage/bdb/db_deadlock/db_deadlock.c b/storage/bdb/db_deadlock/db_deadlock.c
index cc91db25d74..67078a6937a 100644
--- a/storage/bdb/db_deadlock/db_deadlock.c
+++ b/storage/bdb/db_deadlock/db_deadlock.c
@@ -1,17 +1,17 @@
 /*-
  * See the file LICENSE for redistribution information.
  *
- * Copyright (c) 1996-2004
+ * Copyright (c) 1996-2005
  *	Sleepycat Software.  All rights reserved.
  *
- * $Id: db_deadlock.c,v 11.45 2004/03/24 15:13:12 bostic Exp $
+ * $Id: db_deadlock.c,v 12.4 2005/10/03 16:00:16 bostic Exp $
  */
 
 #include "db_config.h"
 
 #ifndef lint
 static const char copyright[] =
-    "Copyright (c) 1996-2004\nSleepycat Software Inc.  All rights reserved.\n";
+    "Copyright (c) 1996-2005\nSleepycat Software Inc.  All rights reserved.\n";
 #endif
 
 #ifndef NO_SYSTEM_INCLUDES
@@ -39,7 +39,9 @@ static const char copyright[] =
 
 int main __P((int, char *[]));
 int usage __P((void));
-int version_check __P((const char *));
+int version_check __P((void));
+
+const char *progname;
 
 int
 main(argc, argv)
@@ -48,23 +50,27 @@ main(argc, argv)
 {
 	extern char *optarg;
 	extern int optind;
-	const char *progname = "db_deadlock";
 	DB_ENV  *dbenv;
 	u_int32_t atype;
 	time_t now;
 	u_long secs, usecs;
 	int ch, exitval, ret, verbose;
-	char *home, *logfile, *str;
+	char *home, *logfile, *passwd, *str;
 
-	if ((ret = version_check(progname)) != 0)
+	if ((progname = strrchr(argv[0], '/')) == NULL)
+		progname = argv[0];
+	else
+		++progname;
+
+	if ((ret = version_check()) != 0)
 		return (ret);
 
 	dbenv = NULL;
 	atype = DB_LOCK_DEFAULT;
-	home = logfile = NULL;
+	home = logfile = passwd = NULL;
 	secs = usecs = 0;
 	exitval = verbose = 0;
-	while ((ch = getopt(argc, argv, "a:h:L:t:Vvw")) != EOF)
+	while ((ch = getopt(argc, argv, "a:h:L:P:t:Vvw")) != EOF)
 		switch (ch) {
 		case 'a':
 			switch (optarg[0]) {
@@ -102,6 +108,14 @@ main(argc, argv)
 		case 'L':
 			logfile = optarg;
 			break;
+		case 'P':
+			passwd = strdup(optarg);
+			memset(optarg, 0, strlen(optarg));
+			if (passwd == NULL) {
+				fprintf(stderr, "%s: strdup: %s\n",
+				    progname, strerror(errno));
+				return (EXIT_FAILURE);
+			}
 		case 't':
 			if ((str = strchr(optarg, '.')) != NULL) {
 				*str++ = '\0';
@@ -158,14 +172,19 @@ main(argc, argv)
 	dbenv->set_errfile(dbenv, stderr);
 	dbenv->set_errpfx(dbenv, progname);
 
+	if (passwd != NULL && (ret = dbenv->set_encrypt(dbenv,
+	    passwd, DB_ENCRYPT_AES)) != 0) {
+		dbenv->err(dbenv, ret, "set_passwd");
+		goto shutdown;
+	}
+
 	if (verbose) {
 		(void)dbenv->set_verbose(dbenv, DB_VERB_DEADLOCK, 1);
 		(void)dbenv->set_verbose(dbenv, DB_VERB_WAITSFOR, 1);
 	}
 
 	/* An environment is required. */
-	if ((ret =
-	    dbenv->open(dbenv, home, DB_INIT_LOCK | DB_USE_ENVIRON, 0)) != 0) {
+	if ((ret = dbenv->open(dbenv, home, DB_USE_ENVIRON, 0)) != 0) {
 		dbenv->err(dbenv, ret, "open");
 		goto shutdown;
 	}
@@ -202,6 +221,9 @@ shutdown:	exitval = 1;
 		    "%s: dbenv->close: %s\n", progname, db_strerror(ret));
 	}
 
+	if (passwd != NULL)
+		free(passwd);
+
 	/* Resend any caught signal. */
 	__db_util_sigresend();
 
@@ -211,15 +233,14 @@ shutdown:	exitval = 1;
 int
 usage()
 {
-	(void)fprintf(stderr, "%s\n\t%s\n",
-	    "usage: db_deadlock [-Vv]",
-	    "[-a e | m | n | o | W | w | y] [-h home] [-L file] [-t sec.usec]");
+	(void)fprintf(stderr,
+	    "usage: %s [-Vv] [-a e | m | n | o | W | w | y]\n\t%s\n", progname,
+	    "[-h home] [-L file] [-P password] [-t sec.usec]");
 	return (EXIT_FAILURE);
 }
 
 int
-version_check(progname)
-	const char *progname;
+version_check()
 {
 	int v_major, v_minor, v_patch;
 
diff --git a/storage/bdb/db_dump/db_dump.c b/storage/bdb/db_dump/db_dump.c
index 732a4c62fd0..fbae7373004 100644
--- a/storage/bdb/db_dump/db_dump.c
+++ b/storage/bdb/db_dump/db_dump.c
@@ -1,17 +1,17 @@
 /*-
  * See the file LICENSE for redistribution information.
  *
- * Copyright (c) 1996-2004
+ * Copyright (c) 1996-2005
  *	Sleepycat Software.  All rights reserved.
  *
- * $Id: db_dump.c,v 11.99 2004/10/11 18:53:13 bostic Exp $
+ * $Id: db_dump.c,v 12.4 2005/09/09 12:38:30 bostic Exp $
  */
 
 #include "db_config.h"
 
 #ifndef lint
 static const char copyright[] =
-    "Copyright (c) 1996-2004\nSleepycat Software Inc.  All rights reserved.\n";
+    "Copyright (c) 1996-2005\nSleepycat Software Inc.  All rights reserved.\n";
 #endif
 
 #ifndef NO_SYSTEM_INCLUDES
@@ -33,7 +33,9 @@ int	 is_sub __P((DB *, int *));
 int	 main __P((int, char *[]));
 int	 show_subs __P((DB *));
 int	 usage __P((void));
-int	 version_check __P((const char *));
+int	 version_check __P((void));
+
+const char *progname;
 
 int
 main(argc, argv)
@@ -42,7 +44,6 @@ main(argc, argv)
 {
 	extern char *optarg;
 	extern int optind;
-	const char *progname = "db_dump";
 	DB_ENV	*dbenv;
 	DB *dbp;
 	u_int32_t cache;
@@ -51,7 +52,12 @@ main(argc, argv)
 	int ret, Rflag, rflag, resize, subs;
 	char *dopt, *home, *passwd, *subname;
 
-	if ((ret = version_check(progname)) != 0)
+	if ((progname = strrchr(argv[0], '/')) == NULL)
+		progname = argv[0];
+	else
+		++progname;
+
+	if ((ret = version_check()) != 0)
 		return (ret);
 
 	dbenv = NULL;
@@ -291,7 +297,7 @@ db_init(dbenv, home, is_salvage, cache, is_privatep)
 	 * We wish to use the buffer pool so our information is as up-to-date
 	 * as possible, even if the mpool cache hasn't been flushed.
 	 *
-	 * If we are not doing a salvage, we wish to use the DB_JOINENV flag;
+	 * If we are not doing a salvage, we want to join the environment;
 	 * if a locking system is present, this will let us use it and be
 	 * safe to run concurrently with other threads of control.  (We never
 	 * need to use transactions explicitly, as we're read-only.)  Note
@@ -305,8 +311,8 @@ db_init(dbenv, home, is_salvage, cache, is_privatep)
 	 * before we create our own.
 	 */
 	*is_privatep = 0;
-	if ((ret = dbenv->open(dbenv, home, DB_USE_ENVIRON |
-	    (is_salvage ? DB_INIT_MPOOL : DB_JOINENV), 0)) == 0)
+	if ((ret = dbenv->open(dbenv, home,
+	    DB_USE_ENVIRON | (is_salvage ? DB_INIT_MPOOL : 0), 0)) == 0)
 		return (0);
 	if (ret == DB_VERSION_MISMATCH)
 		goto err;
@@ -490,15 +496,14 @@ show_subs(dbp)
 int
 usage()
 {
-	(void)fprintf(stderr, "%s\n\t%s\n",
-	    "usage: db_dump [-klNprRV]",
+	(void)fprintf(stderr, "usage: %s [-klNprRV]\n\t%s\n",
+	    progname,
     "[-d ahr] [-f output] [-h home] [-P password] [-s database] db_file");
 	return (EXIT_FAILURE);
 }
 
 int
-version_check(progname)
-	const char *progname;
+version_check()
 {
 	int v_major, v_minor, v_patch;
 
diff --git a/storage/bdb/db_dump185/db_dump185.c b/storage/bdb/db_dump185/db_dump185.c
index 2fb3cc5ab58..0e39c913dd6 100644
--- a/storage/bdb/db_dump185/db_dump185.c
+++ b/storage/bdb/db_dump185/db_dump185.c
@@ -1,10 +1,10 @@
 /*-
  * See the file LICENSE for redistribution information.
  *
- * Copyright (c) 1996-2004
+ * Copyright (c) 1996-2005
  *	Sleepycat Software.  All rights reserved.
  *
- * $Id: db_dump185.c,v 11.19 2004/01/28 03:36:00 bostic Exp $
+ * $Id: db_dump185.c,v 12.1 2005/06/16 20:21:22 bostic Exp $
  */
 
 #ifndef lint
diff --git a/storage/bdb/db_hotbackup/db_hotbackup.c b/storage/bdb/db_hotbackup/db_hotbackup.c
new file mode 100644
index 00000000000..b96de7a4165
--- /dev/null
+++ b/storage/bdb/db_hotbackup/db_hotbackup.c
@@ -0,0 +1,708 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996-2005
+ *	Sleepycat Software.  All rights reserved.
+ *
+ * $Id: db_hotbackup.c,v 1.16 2005/10/27 01:25:54 mjc Exp $
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char copyright[] =
+    "Copyright (c) 1996-2005\nSleepycat Software Inc.  All rights reserved.\n";
+#endif
+
+#ifndef NO_SYSTEM_INCLUDES
+#include 
+#include 
+
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+#endif
+
+#include "db_int.h"
+#include "dbinc/log.h"
+
+enum which_open { OPEN_ORIGINAL, OPEN_HOT_BACKUP };
+
+int backup_dir_clean __P((DB_ENV *, char *, int *, int, int));
+int data_copy __P((DB_ENV *, char *, char *, char *, int));
+int env_init __P((DB_ENV **, char *, char *, char *, enum which_open));
+int main __P((int, char *[]));
+int read_data_dir __P((DB_ENV *, char *, char *, int));
+int read_log_dir __P((DB_ENV *, char *, char *, int *, int, int));
+int usage __P((void));
+int version_check __P((void));
+
+const char *progname;
+
+int
+main(argc, argv)
+	int argc;
+	char *argv[];
+{
+	extern char *optarg;
+	extern int optind;
+	time_t now;
+	DB_ENV *dbenv;
+	u_int data_cnt, data_next;
+	int ch, checkpoint, copy_min, exitval, remove_max, ret, update, verbose;
+	char *backup_dir, **data_dir, **dir, *home, *log_dir, *passwd;
+
+	if ((progname = strrchr(argv[0], '/')) == NULL)
+		progname = argv[0];
+	else
+		++progname;
+
+	if ((ret = version_check()) != 0)
+		return (ret);
+
+	checkpoint = data_cnt = data_next = exitval = update = verbose = 0;
+	data_dir = NULL;
+	backup_dir = home = log_dir = passwd = NULL;
+	copy_min = remove_max = 0;
+	while ((ch = getopt(argc, argv, "b:cd:h:l:P:uVv")) != EOF)
+		switch (ch) {
+		case 'b':
+			backup_dir = optarg;
+			break;
+		case 'c':
+			checkpoint = 1;
+			break;
+		case 'd':
+			/*
+			 * User can specify a list of directories -- keep an
+			 * array, leaving room for the trailing NULL.
+			 */
+			if (data_dir == NULL || data_next >= data_cnt - 2) {
+				data_cnt = data_cnt == 0 ? 20 : data_cnt * 2;
+				if ((data_dir = realloc(data_dir,
+				    data_cnt * sizeof(*data_dir))) == NULL) {
+					fprintf(stderr, "%s: %s\n",
+					    progname, strerror(errno));
+					return (EXIT_FAILURE);
+				}
+			}
+			data_dir[data_next++] = optarg;
+			break;
+		case 'h':
+			home = optarg;
+			break;
+		case 'l':
+			log_dir = optarg;
+			break;
+		case 'P':
+			passwd = strdup(optarg);
+			memset(optarg, 0, strlen(optarg));
+			if (passwd == NULL) {
+				fprintf(stderr, "%s: strdup: %s\n",
+				    progname, strerror(errno));
+				return (EXIT_FAILURE);
+			}
+			break;
+		case 'u':
+			update = 1;
+			break;
+		case 'V':
+			printf("%s\n", db_version(NULL, NULL, NULL));
+			return (EXIT_SUCCESS);
+		case 'v':
+			verbose = 1;
+			break;
+		case '?':
+		default:
+			return (usage());
+		}
+	argc -= optind;
+	argv += optind;
+
+	if (argc != 0)
+		return (usage());
+
+	/* Handle possible interruptions. */
+	__db_util_siginit();
+
+	/*
+	 * The home directory defaults to the environment variable DB_HOME.
+	 * The log directory defaults to the home directory.
+	 *
+	 * We require a source database environment directory and a target
+	 * backup directory.
+	 */
+	if (home == NULL)
+		home = getenv("DB_HOME");
+	if (home == NULL) {
+		fprintf(stderr,
+		    "%s: no source database environment specified\n", progname);
+		return (usage());
+	}
+	if (log_dir == NULL)
+		log_dir = home;
+	if (backup_dir == NULL) {
+		fprintf(stderr,
+		    "%s: no target backup directory specified\n", progname);
+		return (usage());
+	}
+
+	/* NULL-terminate any list of data directories. */
+	if (data_dir != NULL)
+		data_dir[data_next] = NULL;
+
+	if (verbose) {
+		(void)time(&now);
+		printf("%s: hot backup started at %s", progname, ctime(&now));
+	}
+
+	/* Open the source environment. */
+	if ((ret = env_init(&dbenv, home, log_dir, passwd, OPEN_ORIGINAL)) != 0)
+		goto shutdown;
+
+	/*
+	 * If the -c option is specified, checkpoint the source home
+	 * database environment, and remove any unnecessary log files.
+	 */
+	if (checkpoint) {
+		if (verbose)
+			printf("%s: %s: force checkpoint\n", progname, home);
+		if ((ret =
+		    dbenv->txn_checkpoint(dbenv, 0, 0, DB_FORCE)) != 0) {
+			dbenv->err(dbenv, ret, "DB_ENV->txn_checkpoint");
+			goto shutdown;
+		}
+		if (!update) {
+			if (verbose)
+				printf("%s: %s: remove unnecessary log files\n",
+				    progname, home);
+			if ((ret = dbenv->log_archive(dbenv,
+			     NULL, DB_ARCH_REMOVE)) != 0) {
+				dbenv->err(dbenv, ret, "DB_ENV->log_archive");
+				goto shutdown;
+			}
+		}
+	}
+
+	/*
+	 * If the target directory for the backup does not exist, create it
+	 * with mode read-write-execute for the owner.  Ignore errors here,
+	 * it's simpler and more portable to just always try the create.  If
+	 * there's a problem, we'll fail with reasonable errors later.
+	 */
+	(void)__os_mkdir(NULL, backup_dir, __db_omode("rwx------"));
+
+	/*
+	 * If the target directory for the backup does exist and the -u option
+	 * was specified, all log files in the target directory are removed;
+	 * if the -u option was not specified, all files in the target directory
+	 * are removed.
+	 */
+	if ((ret = backup_dir_clean(
+	    dbenv, backup_dir, &remove_max, update, verbose)) != 0)
+		goto shutdown;
+
+	/*
+	 * If the -u option was not specified, copy all database files found in
+	 * the database environment home directory, or any directory specified
+	 * using the -d option, into the target directory for the backup.
+	 */
+	if (!update) {
+		if (read_data_dir(dbenv, backup_dir, home, verbose) != 0)
+			goto shutdown;
+		if (data_dir != NULL)
+			for (dir = &data_dir[0]; *dir != NULL; ++dir)
+				if (read_data_dir(
+				    dbenv, backup_dir, *dir, verbose) != 0)
+					goto shutdown;
+	}
+
+	/*
+	 * Copy all log files found in the directory specified by the -l option
+	 * (or in the database environment home directory, if no -l option was
+	 * specified), into the target directory for the backup.
+	 *
+	 * The log directory defaults to the home directory.
+	 */
+	if (read_log_dir(dbenv,
+	     backup_dir, log_dir, ©_min, update, verbose) != 0)
+		goto shutdown;
+
+	/*
+	 * If we're updating a snapshot, the lowest-numbered log file copied
+	 * into the backup directory should be less than, or equal to, the
+	 * highest-numbered log file removed from the backup directory during
+	 * cleanup.
+	 */
+	if (update && remove_max < copy_min &&
+	     !(remove_max == 0 && copy_min == 1)) {
+		fprintf(stderr,
+		    "%s: the largest log file removed (%d) must be greater\n",
+		    progname, remove_max);
+		fprintf(stderr,
+		    "%s: than or equal the smallest log file copied (%d)\n",
+		    progname, copy_min);
+		goto shutdown;
+	}
+
+	/* Close the source environment. */
+	if ((ret = dbenv->close(dbenv, 0)) != 0) {
+		fprintf(stderr,
+		    "%s: dbenv->close: %s\n", progname, db_strerror(ret));
+		dbenv = NULL;
+		goto shutdown;
+	}
+	/* Perform catastrophic recovery on the hot backup. */
+	if (verbose)
+		printf("%s: %s: run catastrophic recovery\n",
+		    progname, backup_dir);
+	if ((ret = env_init(
+	    &dbenv, backup_dir, NULL, passwd, OPEN_HOT_BACKUP)) != 0)
+		goto shutdown;
+
+	/*
+	 * Remove any unnecessary log files from the hot backup.
+	 */
+	if (verbose)
+		printf("%s: %s: remove unnecessary log files\n",
+		    progname, backup_dir);
+	if ((ret =
+	    dbenv->log_archive(dbenv, NULL, DB_ARCH_REMOVE)) != 0) {
+		dbenv->err(dbenv, ret, "DB_ENV->log_archive");
+		goto shutdown;
+	}
+
+	if (0) {
+shutdown:	exitval = 1;
+	}
+	if (dbenv != NULL && (ret = dbenv->close(dbenv, 0)) != 0) {
+		exitval = 1;
+		fprintf(stderr,
+		    "%s: dbenv->close: %s\n", progname, db_strerror(ret));
+	}
+
+	if (data_dir != NULL)
+		free(data_dir);
+	if (passwd != NULL)
+		free(passwd);
+
+	if (exitval == 0) {
+		if (verbose) {
+			(void)time(&now);
+			printf("%s: hot backup completed at %s",
+			    progname, ctime(&now));
+		}
+	} else {
+		fprintf(stderr, "%s: HOT BACKUP FAILED!\n", progname);
+	}
+
+	/* Resend any caught signal. */
+	__db_util_sigresend();
+
+	return (exitval == 0 ? EXIT_SUCCESS : EXIT_FAILURE);
+
+}
+
+/*
+ * env_init --
+ *	Open a database environment.
+ */
+int
+env_init(dbenvp, home, log_dir, passwd, which)
+	DB_ENV **dbenvp;
+	char *home, *log_dir, *passwd;
+	enum which_open which;
+{
+	DB_ENV *dbenv;
+	int ret;
+
+	*dbenvp = NULL;
+
+	/*
+	 * Create an environment object and initialize it for error reporting.
+	 */
+	if ((ret = db_env_create(&dbenv, 0)) != 0) {
+		fprintf(stderr,
+		    "%s: db_env_create: %s\n", progname, db_strerror(ret));
+		return (1);
+	}
+
+	dbenv->set_errfile(dbenv, stderr);
+	setbuf(stderr, NULL);
+	dbenv->set_errpfx(dbenv, progname);
+	setvbuf(stdout, NULL, _IOLBF, 0);
+
+	/*
+	 * If a log directory has been specified, and it's not the same as the
+	 * home directory, set it for the environment.
+	 */
+	if (log_dir != NULL && log_dir != home &&
+	    (ret = dbenv->set_lg_dir(dbenv, log_dir)) != 0) {
+		dbenv->err(dbenv, ret, "DB_ENV->set_lg_dir: %s", log_dir);
+		return (1);
+	}
+
+	/* Optionally set the password. */
+	if (passwd != NULL &&
+	    (ret = dbenv->set_encrypt(dbenv, passwd, DB_ENCRYPT_AES)) != 0) {
+		dbenv->err(dbenv, ret, "DB_ENV->set_encrypt");
+		return (1);
+	}
+
+	switch (which) {
+	case OPEN_ORIGINAL:
+		/*
+		 * Opening the database environment we're trying to back up.
+		 * We try to attach to a pre-existing environment; if that
+		 * fails, we create a private environment and try again.
+		 */
+		if ((ret = dbenv->open(dbenv, home, DB_USE_ENVIRON, 0)) != 0 &&
+		    (ret == DB_VERSION_MISMATCH ||
+		    (ret = dbenv->open(dbenv, home, DB_CREATE |
+		    DB_INIT_LOG | DB_INIT_TXN | DB_PRIVATE | DB_USE_ENVIRON,
+		    0)) != 0)) {
+			dbenv->err(dbenv, ret, "DB_ENV->open: %s", home);
+			return (1);
+		}
+		break;
+	case OPEN_HOT_BACKUP:
+		/*
+		 * Opening the backup copy of the database environment.  We
+		 * better be the only user, we're running recovery.
+		 */
+		if ((ret = dbenv->open(dbenv, home, DB_CREATE |
+		    DB_INIT_LOG | DB_INIT_MPOOL | DB_INIT_TXN | DB_PRIVATE |
+		    DB_RECOVER_FATAL | DB_USE_ENVIRON, 0)) != 0) {
+			dbenv->err(dbenv, ret, "DB_ENV->open: %s", home);
+			return (1);
+		}
+		break;
+	}
+
+	*dbenvp = dbenv;
+	return (0);
+}
+
+/*
+ * backup_dir_clean --
+ *	Clean out the backup directory.
+ */
+int
+backup_dir_clean(dbenv, backup_dir, remove_maxp, update, verbose)
+	DB_ENV *dbenv;
+	char *backup_dir;
+	int *remove_maxp, update, verbose;
+{
+	int cnt, fcnt, ret, v;
+	char **names;
+	char buf[2048];			/* MAXPATHLEN is too hard to find. */
+
+	/* Get a list of file names. */
+	if ((ret = __os_dirlist(dbenv, backup_dir, &names, &fcnt)) != 0) {
+		dbenv->err(dbenv, ret, "%s: directory read", backup_dir);
+		return (1);
+	}
+	for (cnt = fcnt; --cnt >= 0;) {
+		/*
+		 * Skip ".", ".." and log files (if update wasn't specified).
+		 */
+		if (!strcmp(names[cnt], ".") || !strcmp(names[cnt], ".."))
+			continue;
+		if (strncmp(names[cnt], LFPREFIX, sizeof(LFPREFIX) - 1)) {
+			if (update)
+				continue;
+		} else {
+			/* Track the highest-numbered log file removed. */
+			v = atoi(names[cnt] + sizeof(LFPREFIX) - 1);
+			if (*remove_maxp < v)
+				*remove_maxp = v;
+		}
+		if ((size_t)snprintf(buf, sizeof(buf),
+		    "%s/%s", backup_dir, names[cnt]) == sizeof(buf)) {
+			dbenv->err(dbenv, ret,
+			    "%s/%s: path too long", backup_dir, names[cnt]);
+			return (1);
+		}
+		if (verbose)
+			printf("%s: removing %s\n", progname, buf);
+		if ((ret = remove(buf)) != 0) {
+			dbenv->err(dbenv, ret, "%s: remove", buf);
+			return (1);
+		}
+	}
+
+	__os_dirfree(dbenv, names, fcnt);
+
+	if (verbose && *remove_maxp != 0)
+		printf("%s: highest numbered log file removed: %d\n",
+		    progname, *remove_maxp);
+
+	return (0);
+}
+
+/*
+ * read_data_dir --
+ *	Read a directory looking for databases to copy.
+ */
+int
+read_data_dir(dbenv, backup_dir, dir, verbose)
+	DB_ENV *dbenv;
+	char *backup_dir, *dir;
+	int verbose;
+{
+	int cnt, fcnt, ret;
+	char **names;
+	char buf[2048];			/* MAXPATHLEN is too hard to find. */
+
+	/* Get a list of file names. */
+	if ((ret = __os_dirlist(dbenv, dir, &names, &fcnt)) != 0) {
+		dbenv->err(dbenv, ret, "%s: directory read", dir);
+		return (1);
+	}
+	for (cnt = fcnt; --cnt >= 0;) {
+		/*
+		 * Skip ".", ".." and files in DB's name space (but not Queue
+		 * extent files, we need them).
+		 */
+		if (!strcmp(names[cnt], ".") || !strcmp(names[cnt], ".."))
+			continue;
+		if (!strncmp(names[cnt], LFPREFIX, sizeof(LFPREFIX) - 1))
+			continue;
+		if (!strncmp(names[cnt],
+		    DB_REGION_PREFIX, sizeof(DB_REGION_PREFIX) - 1))
+			continue;
+
+		/* Build a path name to the source. */
+		if ((size_t)snprintf(buf, sizeof(buf),
+		    "%s/%s", dir, names[cnt]) == sizeof(buf)) {
+			dbenv->errx(dbenv,
+			    "%s/%s: path too long", dir, names[cnt]);
+			return (1);
+		}
+
+		/* Copy the file. */
+		if ((ret = data_copy(
+		    dbenv, buf, backup_dir, names[cnt], verbose)) != 0)
+			return (1);
+	}
+
+	__os_dirfree(dbenv, names, fcnt);
+
+	return (0);
+}
+
+/*
+ * read_log_dir --
+ *	Read a directory looking for log files to copy.
+ */
+int
+read_log_dir(dbenv, backup_dir, log_dir, copy_minp, update, verbose)
+	DB_ENV *dbenv;
+	char *backup_dir, *log_dir;
+	int *copy_minp, update, verbose;
+{
+	int aflag, ret, v;
+	char **begin, **names;
+	char from[2048], to[2048];	/* MAXPATHLEN is too hard to find. */
+
+again:	aflag = DB_ARCH_LOG;
+
+	/*
+	 * If this is an update and we are deleting files, first process
+	 * those files that can be removed, then repeat with the rest.
+	 */
+	if (update)
+		aflag = 0;
+	/* Get a list of file names to be copied. */
+	if ((ret = dbenv->log_archive(dbenv, &names, aflag)) != 0) {
+		dbenv->err(dbenv, ret, "%s: log_archive", log_dir);
+		return (1);
+	}
+	if (names == NULL)
+		goto done;
+	begin = names;
+	for (; *names != NULL; names++) {
+		/* Track the lowest-numbered log file copied. */
+		v = atoi(*names + sizeof(LFPREFIX) - 1);
+		if (*copy_minp == 0 || *copy_minp > v)
+			*copy_minp = v;
+
+		/* Build a path name to the source. */
+		if ((size_t)snprintf(from, sizeof(from),
+		    "%s/%s", log_dir, *names) == sizeof(from)) {
+			dbenv->errx(dbenv,
+			    "%s/%s: path too long", log_dir, *names);
+			return (1);
+		}
+
+		/*
+		 * If we're going to remove the file, attempt to rename the
+		 * instead of copying and then removing.  The likely failure
+		 * is EXDEV (source and destination are on different volumes).
+		 * Fall back to a copy, regardless of the error.  We don't
+		 * worry about partial contents, the copy truncates the file
+		 * on open.
+		 */
+		if (update) {
+			if ((size_t)snprintf(to, sizeof(to),
+			    "%s/%s", backup_dir, *names) == sizeof(to)) {
+				dbenv->errx(dbenv,
+				    "%s/%s: path too long", backup_dir, *names);
+				return (1);
+			}
+			if (rename(from, to) == 0) {
+				if (verbose)
+					printf("%s: moving %s to %s\n",
+					   progname, from, to);
+				continue;
+			}
+		}
+
+		/* Copy the file. */
+		if ((ret = data_copy(dbenv,
+		    from, backup_dir, *names, verbose)) != 0)
+			return (1);
+
+		if (update) {
+			if (verbose)
+				printf("%s: removing %s\n", progname, from);
+			if ((ret = __os_unlink(dbenv, from)) != 0) {
+				dbenv->err(dbenv, ret,
+				     "unlink of %s failed", from);
+				return (1);
+			}
+		}
+
+	}
+
+	free(begin);
+done:	if (update) {
+		update = 0;
+		goto again;
+	}
+
+	if (verbose && *copy_minp != 0)
+		printf("%s: lowest numbered log file copied: %d\n",
+		    progname, *copy_minp);
+
+	return (0);
+}
+
+/*
+ * data_copy --
+ *	Copy a file into the backup directory.
+ */
+int
+data_copy(dbenv, from, to_dir, to_file, verbose)
+	DB_ENV *dbenv;
+	char *from, *to_dir, *to_file;
+	int verbose;
+{
+	ssize_t nr, nw;
+	size_t offset;
+	int ret, rfd, wfd;
+	char *buf, *taddr;
+
+	ret = 0;
+	rfd = wfd = -1;
+
+	if (verbose)
+		printf("%s: copying %s to %s/%s\n",
+		    progname, from, to_dir, to_file);
+
+	/*
+	 * We MUST copy multiples of the page size, atomically, to ensure a
+	 * database page is not updated by another thread of control during
+	 * the copy.
+	 *
+	 * !!!
+	 * The current maximum page size for Berkeley DB is 64KB; we will have
+	 * to increase this value if the maximum page size is ever more than a
+	 * megabyte
+	 */
+	if ((buf = malloc(MEGABYTE)) == NULL) {
+		dbenv->err(dbenv,
+		    errno, "%lu buffer allocation", (u_long)MEGABYTE);
+		return (1);
+	}
+
+	/* Open the input file. */
+	if ((rfd = open(from, O_RDONLY, 0)) == -1) {
+		dbenv->err(dbenv, errno, "%s", from);
+		goto err;
+	}
+
+	/* Open the output file. */
+	if ((u_int32_t)snprintf(
+	    buf, MEGABYTE, "%s/%s", to_dir, to_file) == MEGABYTE) {
+		dbenv->errx(dbenv, "%s/%s: path too long", to_dir, to_file);
+		goto err;
+	}
+	if ((wfd = open(
+	    buf, O_CREAT | O_TRUNC | O_WRONLY, __db_omode(OWNER_RW))) == -1)
+		goto err;
+
+	/* Copy the data. */
+	while ((nr = read(rfd, buf, MEGABYTE)) > 0)
+		for (taddr = buf, offset = 0;
+		    offset < (size_t)nr; taddr += nw, offset += (size_t)nw) {
+			RETRY_CHK(((nw = write(wfd,
+			    taddr, (u_int)(nr - offset))) < 0 ? 1 : 0), ret);
+			if (ret != 0)
+				break;
+		}
+	if (nr == -1) {
+		dbenv->err(dbenv, errno, "%s: read", from);
+		goto err;
+	}
+
+	if (ret != 0) {
+		dbenv->err(dbenv, errno, "%s: write %s/%s", to_dir, to_file);
+		goto err;
+	}
+
+	if (0) {
+err:		ret = 1;
+	}
+	if (buf != NULL)
+		free(buf);
+
+	if (rfd != -1)
+		(void)close(rfd);
+
+	/* We may be running on a remote filesystem; force the flush. */
+	if (wfd != -1 && (fsync(wfd) != 0 || close(wfd) != 0)) {
+		dbenv->err(dbenv,
+		    errno, "%s: fsync %s/%s", to_dir, to_file);
+		ret = 1;
+	}
+	return (ret);
+}
+
+int
+usage()
+{
+	(void)fprintf(stderr, "usage: %s [-cuVv]\n\t%s\n", progname,
+    "[-d data_dir ...] [-h home] [-l log_dir] [-P password] -b backup_dir");
+	return (EXIT_FAILURE);
+}
+
+int
+version_check()
+{
+	int v_major, v_minor, v_patch;
+
+	/* Make sure we're loaded with the right version of the DB library. */
+	(void)db_version(&v_major, &v_minor, &v_patch);
+	if (v_major != DB_VERSION_MAJOR || v_minor != DB_VERSION_MINOR) {
+		fprintf(stderr,
+	"%s: version %d.%d doesn't match library version %d.%d\n",
+		    progname, DB_VERSION_MAJOR, DB_VERSION_MINOR,
+		    v_major, v_minor);
+		return (EXIT_FAILURE);
+	}
+	return (0);
+}
diff --git a/storage/bdb/db_load/db_load.c b/storage/bdb/db_load/db_load.c
index db0caf42d58..c47bd585452 100644
--- a/storage/bdb/db_load/db_load.c
+++ b/storage/bdb/db_load/db_load.c
@@ -1,17 +1,17 @@
 /*-
  * See the file LICENSE for redistribution information.
  *
- * Copyright (c) 1996-2004
+ * Copyright (c) 1996-2005
  *	Sleepycat Software.  All rights reserved.
  *
- * $Id: db_load.c,v 11.100 2004/10/29 17:29:02 bostic Exp $
+ * $Id: db_load.c,v 12.8 2005/06/16 20:21:23 bostic Exp $
  */
 
 #include "db_config.h"
 
 #ifndef lint
 static const char copyright[] =
-    "Copyright (c) 1996-2004\nSleepycat Software Inc.  All rights reserved.\n";
+    "Copyright (c) 1996-2005\nSleepycat Software Inc.  All rights reserved.\n";
 #endif
 
 #ifndef NO_SYSTEM_INCLUDES
@@ -57,7 +57,9 @@ int	load __P((DB_ENV *, char *, DBTYPE, char **, u_int, LDG *, int *));
 int	main __P((int, char *[]));
 int	rheader __P((DB_ENV *, DB *, DBTYPE *, char **, int *, int *));
 int	usage __P((void));
-int	version_check __P((const char *));
+int	version_check __P((void));
+
+const char *progname;
 
 #define	G(f)	((LDG *)dbenv->app_private)->f
 
@@ -81,7 +83,15 @@ main(argc, argv)
 	int ch, existed, exitval, ret;
 	char **clist, **clp;
 
-	ldg.progname = "db_load";
+	if ((progname = strrchr(argv[0], '/')) == NULL)
+		progname = argv[0];
+	else
+		++progname;
+
+	if ((ret = version_check()) != 0)
+		return (ret);
+
+	ldg.progname = progname;
 	ldg.lineno = 0;
 	ldg.endodata = ldg.endofile = 0;
 	ldg.version = 1;
@@ -90,9 +100,6 @@ main(argc, argv)
 	ldg.home = NULL;
 	ldg.passwd = NULL;
 
-	if ((ret = version_check(ldg.progname)) != 0)
-		return (ret);
-
 	mode = NOTSET;
 	ldf = 0;
 	exitval = existed = 0;
@@ -218,11 +225,11 @@ main(argc, argv)
 	switch (mode) {
 	case FILEID_RESET:
 		exitval = dbenv->fileid_reset(
-		    dbenv, argv[0], ldf & LDF_PASSWORD ? 1 : 0);
+		    dbenv, argv[0], ldf & LDF_PASSWORD ? DB_ENCRYPT : 0);
 		break;
 	case LSN_RESET:
 		exitval = dbenv->lsn_reset(
-		    dbenv, argv[0], ldf & LDF_PASSWORD ? 1 : 0);
+		    dbenv, argv[0], ldf & LDF_PASSWORD ? DB_ENCRYPT : 0);
 		break;
 	case NOTSET:
 	case STANDARD_LOAD:
@@ -401,7 +408,7 @@ retry_db:
 	/* Open the DB file. */
 	if ((ret = dbp->open(dbp, NULL, name, subdb, dbtype,
 	    DB_CREATE | (TXN_ON(dbenv) ? DB_AUTO_COMMIT : 0),
-	    __db_omode("rwrwrw"))) != 0) {
+	    __db_omode("rw-rw-rw-"))) != 0) {
 		dbp->err(dbp, ret, "DB->open: %s", name);
 		goto err;
 	}
@@ -652,23 +659,22 @@ err:	dbenv->err(dbenv, ret, "DB_ENV->open");
 			if ((ret = dbp->set_flags(dbp, flag)) != 0) {	\
 				dbp->err(dbp, ret, "%s: set_flags: %s",	\
 				    G(progname), name);			\
-				return (1);				\
+				goto err;				\
 			}						\
 			break;						\
 		case '0':						\
 			break;						\
 		default:						\
 			badnum(dbenv);					\
-			return (1);					\
+			goto err;					\
 		}							\
 		continue;						\
 	}
 #define	NUMBER(name, value, keyword, func, t)				\
 	if (strcmp(name, keyword) == 0) {				\
-		if (__db_getlong(dbenv,					\
-		    NULL, value, 1, LONG_MAX, &val) != 0)		\
-			return (1);					\
-		if ((ret = dbp->func(dbp, (t)val)) != 0)		\
+		if ((ret = __db_getlong(dbenv,				\
+		    NULL, value, 0, LONG_MAX, &val)) != 0 ||		\
+		    (ret = dbp->func(dbp, (t)val)) != 0)		\
 			goto nameerr;					\
 		continue;						\
 	}
@@ -725,9 +731,6 @@ configure(dbenv, dbp, clp, subdbp, keysp)
 			continue;
 		}
 
-#ifdef notyet
-		NUMBER(name, value, "bt_maxkey", set_bt_maxkey, u_int32_t);
-#endif
 		NUMBER(name, value, "bt_minkey", set_bt_minkey, u_int32_t);
 		NUMBER(name, value, "db_lorder", set_lorder, int);
 		NUMBER(name, value, "db_pagesize", set_pagesize, u_int32_t);
@@ -749,7 +752,7 @@ configure(dbenv, dbp, clp, subdbp, keysp)
 
 nameerr:
 	dbp->err(dbp, ret, "%s: %s=%s", G(progname), name, value);
-	return (1);
+err:	return (1);
 }
 
 /*
@@ -771,7 +774,7 @@ rheader(dbenv, dbp, dbtypep, subdbp, checkprintp, keysp)
 
 	*dbtypep = DB_UNKNOWN;
 	*checkprintp = 0;
-	name = p = NULL;
+	name = NULL;
 
 	/*
 	 * We start with a smallish buffer;  most headers are small.
@@ -780,10 +783,8 @@ rheader(dbenv, dbp, dbtypep, subdbp, checkprintp, keysp)
 	buflen = 4096;
 	if (G(hdrbuf) == NULL) {
 		hdr = 0;
-		if ((buf = malloc(buflen)) == NULL) {
-memerr:			dbp->errx(dbp, "could not allocate buffer %d", buflen);
-			return (1);
-		}
+		if ((buf = malloc(buflen)) == NULL)
+			goto memerr;
 		G(hdrbuf) = buf;
 		G(origline) = G(lineno);
 	} else {
@@ -832,7 +833,6 @@ memerr:			dbp->errx(dbp, "could not allocate buffer %d", buflen);
 		start += linelen;
 
 		if (name != NULL) {
-			*p = '=';
 			free(name);
 			name = NULL;
 		}
@@ -939,19 +939,21 @@ memerr:			dbp->errx(dbp, "could not allocate buffer %d", buflen);
 		goto err;
 	}
 	ret = 0;
+
 	if (0) {
 nameerr:	dbp->err(dbp, ret, "%s: %s=%s", G(progname), name, value);
-err:		ret = 1;
+		ret = 1;
 	}
 	if (0) {
 badfmt:		dbp->errx(dbp, "line %lu: unexpected format", G(lineno));
 		ret = 1;
 	}
-	if (name != NULL) {
-		if (p != NULL)
-			*p = '=';
-		free(name);
+	if (0) {
+memerr:		dbp->errx(dbp, "unable to allocate memory");
+err:		ret = 1;
 	}
+	if (name != NULL)
+		free(name);
 	return (ret);
 }
 
@@ -1293,17 +1295,16 @@ badend(dbenv)
 int
 usage()
 {
-	(void)fprintf(stderr, "%s\n\t%s\n",
-	    "usage: db_load [-nTV] [-c name=value] [-f file]",
+	(void)fprintf(stderr, "usage: %s %s\n\t%s\n", progname,
+	    "[-nTV] [-c name=value] [-f file]",
     "[-h home] [-P password] [-t btree | hash | recno | queue] db_file");
-	(void)fprintf(stderr, "%s\n",
-	    "usage: db_load -r lsn | fileid [-h home] [-P password] db_file");
+	(void)fprintf(stderr, "usage: %s %s\n",
+	    progname, "-r lsn | fileid [-h home] [-P password] db_file");
 	return (EXIT_FAILURE);
 }
 
 int
-version_check(progname)
-	const char *progname;
+version_check()
 {
 	int v_major, v_minor, v_patch;
 
diff --git a/storage/bdb/db_printlog/README b/storage/bdb/db_printlog/README
index d625964296c..eca5383cb58 100644
--- a/storage/bdb/db_printlog/README
+++ b/storage/bdb/db_printlog/README
@@ -1,4 +1,4 @@
-# $Id: README,v 10.7 2004/09/24 00:43:16 bostic Exp $
+# $Id: README,v 12.0 2004/11/17 03:43:23 bostic Exp $
 
 Berkeley DB log dump utility.  This utility dumps out a DB log in human
 readable form, a record at a time, to assist in recovery and transaction
diff --git a/storage/bdb/db_printlog/commit.awk b/storage/bdb/db_printlog/commit.awk
index 66391d3fb63..4f03fd2ce50 100644
--- a/storage/bdb/db_printlog/commit.awk
+++ b/storage/bdb/db_printlog/commit.awk
@@ -1,4 +1,4 @@
-# $Id: commit.awk,v 10.2 1999/11/21 18:01:42 bostic Exp $
+# $Id: commit.awk,v 12.0 2004/11/17 03:43:24 bostic Exp $
 #
 # Output tid of committed transactions.
 
diff --git a/storage/bdb/db_printlog/count.awk b/storage/bdb/db_printlog/count.awk
index 1d5a291950f..6a80cbe1b60 100644
--- a/storage/bdb/db_printlog/count.awk
+++ b/storage/bdb/db_printlog/count.awk
@@ -1,4 +1,4 @@
-# $Id: count.awk,v 10.2 1999/11/21 18:01:42 bostic Exp $
+# $Id: count.awk,v 12.0 2004/11/17 03:43:24 bostic Exp $
 #
 # Print out the number of log records for transactions that we
 # encountered.
diff --git a/storage/bdb/db_printlog/db_printlog.c b/storage/bdb/db_printlog/db_printlog.c
index d06477160a8..4a96efd9073 100644
--- a/storage/bdb/db_printlog/db_printlog.c
+++ b/storage/bdb/db_printlog/db_printlog.c
@@ -1,17 +1,17 @@
 /*-
  * See the file LICENSE for redistribution information.
  *
- * Copyright (c) 1996-2004
+ * Copyright (c) 1996-2005
  *	Sleepycat Software.  All rights reserved.
  *
- * $Id: db_printlog.c,v 11.64 2004/06/17 17:35:17 bostic Exp $
+ * $Id: db_printlog.c,v 12.5 2005/09/09 12:38:33 bostic Exp $
  */
 
 #include "db_config.h"
 
 #ifndef lint
 static const char copyright[] =
-    "Copyright (c) 1996-2004\nSleepycat Software Inc.  All rights reserved.\n";
+    "Copyright (c) 1996-2005\nSleepycat Software Inc.  All rights reserved.\n";
 #endif
 
 #ifndef NO_SYSTEM_INCLUDES
@@ -33,12 +33,14 @@ static const char copyright[] =
 #include "dbinc/qam.h"
 #include "dbinc/txn.h"
 
-int lsn_arg __P((const char *, char *, DB_LSN *));
+int lsn_arg __P((char *, DB_LSN *));
 int main __P((int, char *[]));
 int open_rep_db __P((DB_ENV *, DB **, DBC **));
 int print_app_record __P((DB_ENV *, DBT *, DB_LSN *, db_recops));
 int usage __P((void));
-int version_check __P((const char *));
+int version_check __P((void));
+
+const char *progname;
 
 int
 main(argc, argv)
@@ -47,7 +49,6 @@ main(argc, argv)
 {
 	extern char *optarg;
 	extern int optind;
-	const char *progname = "db_printlog";
 	DB *dbp;
 	DBC *dbc;
 	DBT data, keydbt;
@@ -60,7 +61,12 @@ main(argc, argv)
 	int (**dtab) __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
 	char *home, *passwd;
 
-	if ((ret = version_check(progname)) != 0)
+	if ((progname = strrchr(argv[0], '/')) == NULL)
+		progname = argv[0];
+	else
+		++progname;
+
+	if ((ret = version_check()) != 0)
 		return (ret);
 
 	dbp = NULL;
@@ -77,11 +83,11 @@ main(argc, argv)
 	while ((ch = getopt(argc, argv, "b:e:h:NP:rRV")) != EOF)
 		switch (ch) {
 		case 'b':
-			if (lsn_arg(progname, optarg, &start))
+			if (lsn_arg(optarg, &start))
 				return (usage());
 			break;
 		case 'e':
-			if (lsn_arg(progname, optarg, &stop))
+			if (lsn_arg(optarg, &stop))
 				return (usage());
 			break;
 		case 'h':
@@ -177,8 +183,7 @@ main(argc, argv)
 			dbenv->err(dbenv, ret, "DB_ENV->open");
 			goto shutdown;
 		}
-	} else if ((ret = dbenv->open(dbenv, home,
-	    DB_JOINENV | DB_USE_ENVIRON, 0)) != 0 &&
+	} else if ((ret = dbenv->open(dbenv, home, DB_USE_ENVIRON, 0)) != 0 &&
 	    (ret == DB_VERSION_MISMATCH ||
 	    (ret = dbenv->open(dbenv, home,
 	    DB_CREATE | DB_INIT_LOG | DB_PRIVATE | DB_USE_ENVIRON, 0)) != 0)) {
@@ -297,14 +302,13 @@ shutdown:	exitval = 1;
 int
 usage()
 {
-	fprintf(stderr, "usage: db_printlog %s\n",
+	fprintf(stderr, "usage: %s %s\n", progname,
 	    "[-NrV] [-b file/offset] [-e file/offset] [-h home] [-P password]");
 	return (EXIT_FAILURE);
 }
 
 int
-version_check(progname)
-	const char *progname;
+version_check()
 {
 	int v_major, v_minor, v_patch;
 
@@ -400,9 +404,8 @@ err:	if (*dbpp != NULL)
  *	Parse a LSN argument.
  */
 int
-lsn_arg(progname, optarg, lsnp)
-	const char *progname;
-	char *optarg;
+lsn_arg(arg, lsnp)
+	char *arg;
 	DB_LSN *lsnp;
 {
 	char *p;
@@ -413,11 +416,11 @@ lsn_arg(progname, optarg, lsnp)
 	 *
 	 * Don't use getsubopt(3), some systems don't have it.
 	 */
-	if ((p = strchr(optarg, '/')) == NULL)
+	if ((p = strchr(arg, '/')) == NULL)
 		return (1);
 	*p = '\0';
 
-	if (__db_getulong(NULL, progname, optarg, 0, 0, &uval))
+	if (__db_getulong(NULL, progname, arg, 0, 0, &uval))
 		return (1);
 	if (uval > UINT32_MAX)
 		return (1);
diff --git a/storage/bdb/db_printlog/dbname.awk b/storage/bdb/db_printlog/dbname.awk
index 41ef97a162d..a864c95dd53 100644
--- a/storage/bdb/db_printlog/dbname.awk
+++ b/storage/bdb/db_printlog/dbname.awk
@@ -1,4 +1,4 @@
-# $Id: dbname.awk,v 1.7 2003/11/21 20:00:03 ubell Exp $
+# $Id: dbname.awk,v 12.1 2005/03/23 04:56:51 ubell Exp $
 #
 # Take a comma-separated list of database names and spit out all the
 # log records that affect those databases.
@@ -58,6 +58,7 @@ NR == 1 {
 	} else if ($2 <= nreg && files[$2] == 1) {
 		printme = 1
 	}
+	myfile = -1;
 }
 
 /^\[/{
diff --git a/storage/bdb/db_printlog/fileid.awk b/storage/bdb/db_printlog/fileid.awk
index 020644039ab..853ba866c99 100644
--- a/storage/bdb/db_printlog/fileid.awk
+++ b/storage/bdb/db_printlog/fileid.awk
@@ -1,4 +1,4 @@
-# $Id: fileid.awk,v 10.4 2000/07/17 22:07:17 ubell Exp $
+# $Id: fileid.awk,v 12.0 2004/11/17 03:43:25 bostic Exp $
 #
 # Take a comma-separated list of file numbers and spit out all the
 # log records that affect those file numbers.
diff --git a/storage/bdb/db_printlog/logstat.awk b/storage/bdb/db_printlog/logstat.awk
index 1009343eba4..83386465375 100644
--- a/storage/bdb/db_printlog/logstat.awk
+++ b/storage/bdb/db_printlog/logstat.awk
@@ -1,4 +1,4 @@
-# $Id: logstat.awk,v 1.1 2002/05/10 15:19:13 bostic Exp $
+# $Id: logstat.awk,v 12.0 2004/11/17 03:43:25 bostic Exp $
 #
 # Output accumulated log record count/size statistics.
 BEGIN {
diff --git a/storage/bdb/db_printlog/pgno.awk b/storage/bdb/db_printlog/pgno.awk
index 289fa853bc4..f58713523f1 100644
--- a/storage/bdb/db_printlog/pgno.awk
+++ b/storage/bdb/db_printlog/pgno.awk
@@ -1,4 +1,4 @@
-# $Id: pgno.awk,v 10.3 2000/07/17 22:07:17 ubell Exp $
+# $Id: pgno.awk,v 12.0 2004/11/17 03:43:25 bostic Exp $
 #
 # Take a comma-separated list of page numbers and spit out all the
 # log records that affect those page numbers.
diff --git a/storage/bdb/db_printlog/range.awk b/storage/bdb/db_printlog/range.awk
index 7abb410b40f..045c7fb2070 100644
--- a/storage/bdb/db_printlog/range.awk
+++ b/storage/bdb/db_printlog/range.awk
@@ -1,4 +1,4 @@
-# $Id: range.awk,v 10.2 1999/11/21 18:01:42 bostic Exp $
+# $Id: range.awk,v 12.0 2004/11/17 03:43:25 bostic Exp $
 #
 # Print out a range of the log
 
diff --git a/storage/bdb/db_printlog/rectype.awk b/storage/bdb/db_printlog/rectype.awk
index f30124cac43..25b28008561 100644
--- a/storage/bdb/db_printlog/rectype.awk
+++ b/storage/bdb/db_printlog/rectype.awk
@@ -1,4 +1,4 @@
-# $Id: rectype.awk,v 11.4 2004/04/19 09:36:58 bostic Exp $
+# $Id: rectype.awk,v 12.0 2004/11/17 03:43:25 bostic Exp $
 #
 # Print out a range of the log.
 # Command line should set RECTYPE to a comma separated list
diff --git a/storage/bdb/db_printlog/status.awk b/storage/bdb/db_printlog/status.awk
index a0c381ed9e2..0433312debf 100644
--- a/storage/bdb/db_printlog/status.awk
+++ b/storage/bdb/db_printlog/status.awk
@@ -1,4 +1,4 @@
-# $Id: status.awk,v 10.5 2004/09/24 00:43:17 bostic Exp $
+# $Id: status.awk,v 12.0 2004/11/17 03:43:25 bostic Exp $
 #
 # Read through db_printlog output and list all the transactions encountered
 # and whether they committed or aborted.
diff --git a/storage/bdb/db_printlog/txn.awk b/storage/bdb/db_printlog/txn.awk
index be8c44e1092..12f283ebf79 100644
--- a/storage/bdb/db_printlog/txn.awk
+++ b/storage/bdb/db_printlog/txn.awk
@@ -1,4 +1,4 @@
-# $Id: txn.awk,v 10.3 2000/07/17 22:07:17 ubell Exp $
+# $Id: txn.awk,v 12.0 2004/11/17 03:43:25 bostic Exp $
 #
 # Print out all the records for a comma-separated list of transaction ids.
 NR == 1 {
diff --git a/storage/bdb/db_recover/db_recover.c b/storage/bdb/db_recover/db_recover.c
index 75961f78c86..5d9b5886b81 100644
--- a/storage/bdb/db_recover/db_recover.c
+++ b/storage/bdb/db_recover/db_recover.c
@@ -1,17 +1,17 @@
 /*-
  * See the file LICENSE for redistribution information.
  *
- * Copyright (c) 1996-2004
+ * Copyright (c) 1996-2005
  *	Sleepycat Software.  All rights reserved.
  *
- * $Id: db_recover.c,v 11.41 2004/01/28 03:36:00 bostic Exp $
+ * $Id: db_recover.c,v 12.5 2005/06/16 20:21:29 bostic Exp $
  */
 
 #include "db_config.h"
 
 #ifndef lint
 static const char copyright[] =
-    "Copyright (c) 1996-2004\nSleepycat Software Inc.  All rights reserved.\n";
+    "Copyright (c) 1996-2005\nSleepycat Software Inc.  All rights reserved.\n";
 #endif
 
 #ifndef NO_SYSTEM_INCLUDES
@@ -36,9 +36,11 @@ static const char copyright[] =
 #include "db_int.h"
 
 int main __P((int, char *[]));
-int read_timestamp __P((const char *, char *, time_t *));
+int read_timestamp __P((char *, time_t *));
 int usage __P((void));
-int version_check __P((const char *));
+int version_check __P((void));
+
+const char *progname;
 
 int
 main(argc, argv)
@@ -47,14 +49,18 @@ main(argc, argv)
 {
 	extern char *optarg;
 	extern int optind;
-	const char *progname = "db_recover";
 	DB_ENV	*dbenv;
 	time_t timestamp;
 	u_int32_t flags;
 	int ch, exitval, fatal_recover, ret, retain_env, verbose;
 	char *home, *passwd;
 
-	if ((ret = version_check(progname)) != 0)
+	if ((progname = strrchr(argv[0], '/')) == NULL)
+		progname = argv[0];
+	else
+		++progname;
+
+	if ((ret = version_check()) != 0)
 		return (ret);
 
 	home = passwd = NULL;
@@ -81,8 +87,7 @@ main(argc, argv)
 			}
 			break;
 		case 't':
-			if ((ret =
-			    read_timestamp(progname, optarg, ×tamp)) != 0)
+			if ((ret = read_timestamp(optarg, ×tamp)) != 0)
 				return (ret);
 			break;
 		case 'V':
@@ -142,10 +147,10 @@ main(argc, argv)
 	 * certainly use DB_CONFIG files in the directory.
 	 */
 	flags = 0;
-	LF_SET(DB_CREATE | DB_INIT_LOCK | DB_INIT_LOG |
+	LF_SET(DB_CREATE | DB_INIT_LOG |
 	    DB_INIT_MPOOL | DB_INIT_TXN | DB_USE_ENVIRON);
 	LF_SET(fatal_recover ? DB_RECOVER_FATAL : DB_RECOVER);
-	LF_SET(retain_env ? 0 : DB_PRIVATE);
+	LF_SET(retain_env ? DB_INIT_LOCK : DB_PRIVATE);
 	if ((ret = dbenv->open(dbenv, home, flags, 0)) != 0) {
 		dbenv->err(dbenv, ret, "DB_ENV->open");
 		goto shutdown;
@@ -204,8 +209,7 @@ shutdown:	exitval = 1;
  * SUCH DAMAGE.
  */
 int
-read_timestamp(progname, arg, timep)
-	const char *progname;
+read_timestamp(arg, timep)
 	char *arg;
 	time_t *timep;
 {
@@ -276,14 +280,13 @@ terr:		fprintf(stderr,
 int
 usage()
 {
-	(void)fprintf(stderr, "%s\n",
-"usage: db_recover [-ceVv] [-h home] [-P password] [-t [[CC]YY]MMDDhhmm[.SS]]");
+	(void)fprintf(stderr, "usage: %s %s\n", progname,
+	    "[-ceVv] [-h home] [-P password] [-t [[CC]YY]MMDDhhmm[.SS]]");
 	return (EXIT_FAILURE);
 }
 
 int
-version_check(progname)
-	const char *progname;
+version_check()
 {
 	int v_major, v_minor, v_patch;
 
diff --git a/storage/bdb/db_stat/db_stat.c b/storage/bdb/db_stat/db_stat.c
index aa191865503..9b6fff88f6c 100644
--- a/storage/bdb/db_stat/db_stat.c
+++ b/storage/bdb/db_stat/db_stat.c
@@ -1,17 +1,17 @@
 /*-
  * See the file LICENSE for redistribution information.
  *
- * Copyright (c) 1996-2004
+ * Copyright (c) 1996-2005
  *	Sleepycat Software.  All rights reserved.
  *
- * $Id: db_stat.c,v 11.158 2004/07/15 18:26:48 ubell Exp $
+ * $Id: db_stat.c,v 12.6 2005/10/05 22:27:27 ubell Exp $
  */
 
 #include "db_config.h"
 
 #ifndef lint
 static const char copyright[] =
-    "Copyright (c) 1996-2004\nSleepycat Software Inc.  All rights reserved.\n";
+    "Copyright (c) 1996-2005\nSleepycat Software Inc.  All rights reserved.\n";
 #endif
 
 #ifndef NO_SYSTEM_INCLUDES
@@ -36,15 +36,16 @@ static const char copyright[] =
 
 #include "db_int.h"
 #include "dbinc/db_page.h"
-#include "dbinc/txn.h"
 
 typedef enum { T_NOTSET,
-    T_DB, T_ENV, T_LOCK, T_LOG, T_MPOOL, T_REP, T_TXN } test_t;
+    T_DB, T_ENV, T_LOCK, T_LOG, T_MPOOL, T_MUTEX, T_REP, T_TXN } test_t;
 
 int	 db_init __P((DB_ENV *, char *, test_t, u_int32_t, int *));
 int	 main __P((int, char *[]));
 int	 usage __P((void));
-int	 version_check __P((const char *));
+int	 version_check __P((void));
+
+const char *progname;
 
 int
 main(argc, argv)
@@ -53,7 +54,6 @@ main(argc, argv)
 {
 	extern char *optarg;
 	extern int optind;
-	const char *progname = "db_stat";
 	DB_ENV	*dbenv;
 	DB_BTREE_STAT *sp;
 	DB *alt_dbp, *dbp;
@@ -63,7 +63,12 @@ main(argc, argv)
 	int nflag, private, resize, ret;
 	char *db, *home, *p, *passwd, *subdb;
 
-	if ((ret = version_check(progname)) != 0)
+	if ((progname = strrchr(argv[0], '/')) == NULL)
+		progname = argv[0];
+	else
+		++progname;
+
+	if ((ret = version_check()) != 0)
 		return (ret);
 
 	dbenv = NULL;
@@ -74,7 +79,8 @@ main(argc, argv)
 	db = home = passwd = subdb = NULL;
 	env_flags = 0;
 
-	while ((ch = getopt(argc, argv, "C:cd:Eefh:L:lM:mNP:R:rs:tVZ")) != EOF)
+	while ((ch = getopt(argc,
+	    argv, "C:cd:Eefh:L:lM:mNP:R:rs:tVxX:Z")) != EOF)
 		switch (ch) {
 		case 'C': case 'c':
 			if (ttype != T_NOTSET && ttype != T_LOCK)
@@ -201,6 +207,20 @@ argcombo:			fprintf(stderr,
 		case 'V':
 			printf("%s\n", db_version(NULL, NULL, NULL));
 			return (EXIT_SUCCESS);
+		case 'X': case 'x':
+			if (ttype != T_NOTSET && ttype != T_MUTEX)
+				goto argcombo;
+			ttype = T_MUTEX;
+			if (ch != 'x')
+				for (p = optarg; *p; ++p)
+					switch (*p) {
+						case 'A':
+							LF_SET(DB_STAT_ALL);
+							break;
+						default:
+							return (usage());
+					}
+			break;
 		case 'Z':
 			LF_SET(DB_STAT_CLEAR);
 			break;
@@ -225,6 +245,7 @@ argcombo:			fprintf(stderr,
 	case T_MPOOL:
 	case T_REP:
 	case T_TXN:
+	case T_MUTEX:
 		if (fast != 0)
 			return (usage());
 		break;
@@ -351,6 +372,10 @@ retry:	if ((ret = db_env_create(&dbenv, env_flags)) != 0) {
 		if (dbenv->memp_stat_print(dbenv, flags))
 			goto err;
 		break;
+	case T_MUTEX:
+		if (dbenv->mutex_stat_print(dbenv, flags))
+			goto err;
+		break;
 	case T_REP:
 		if (dbenv->rep_stat_print(dbenv, flags))
 			goto err;
@@ -412,8 +437,7 @@ db_init(dbenv, home, ttype, cache, is_private)
 	 * error, I think.
 	 */
 	*is_private = 0;
-	if ((ret =
-	    dbenv->open(dbenv, home, DB_JOINENV | DB_USE_ENVIRON, 0)) == 0)
+	if ((ret = dbenv->open(dbenv, home, DB_USE_ENVIRON, 0)) == 0)
 		return (0);
 	if (ret == DB_VERSION_MISMATCH)
 		goto err;
@@ -456,17 +480,16 @@ err:	dbenv->err(dbenv, ret, "DB_ENV->open");
 int
 usage()
 {
-	fprintf(stderr, "usage: db_stat %s\n",
+	fprintf(stderr, "usage: %s %s\n", progname,
 	    "-d file [-fN] [-h home] [-P password] [-s database]");
-	fprintf(stderr, "usage: db_stat %s\n\t%s\n",
-	    "[-cEelmNrtVZ] [-C Aclop]",
-	    "[-h home] [-L A] [-M A] [-P password] [-R A]");
+	fprintf(stderr, "usage: %s %s\n\t%s\n", progname,
+	    "[-cEelmNrtVxZ] [-C Aclop]",
+	    "[-h home] [-L A] [-M A] [-P password] [-R A] [-X A]");
 	return (EXIT_FAILURE);
 }
 
 int
-version_check(progname)
-	const char *progname;
+version_check()
 {
 	int v_major, v_minor, v_patch;
 
diff --git a/storage/bdb/db_stat/dd.sh b/storage/bdb/db_stat/dd.sh
index d8bb7033f98..4e00c289a5a 100644
--- a/storage/bdb/db_stat/dd.sh
+++ b/storage/bdb/db_stat/dd.sh
@@ -1,5 +1,5 @@
 #! /bin/sh
-#	$Id: dd.sh,v 1.3 2004/05/04 15:51:45 bostic Exp $
+#	$Id: dd.sh,v 12.0 2004/11/17 03:43:25 bostic Exp $
 #
 # Display environment's deadlocks based on "db_stat -Co" output.
 
diff --git a/storage/bdb/db_upgrade/db_upgrade.c b/storage/bdb/db_upgrade/db_upgrade.c
index 0f43be6ba77..724034dc73c 100644
--- a/storage/bdb/db_upgrade/db_upgrade.c
+++ b/storage/bdb/db_upgrade/db_upgrade.c
@@ -1,17 +1,17 @@
 /*-
  * See the file LICENSE for redistribution information.
  *
- * Copyright (c) 1996-2004
+ * Copyright (c) 1996-2005
  *	Sleepycat Software.  All rights reserved.
  *
- * $Id: db_upgrade.c,v 1.37 2004/06/10 01:00:09 bostic Exp $
+ * $Id: db_upgrade.c,v 12.5 2005/09/09 12:38:36 bostic Exp $
  */
 
 #include "db_config.h"
 
 #ifndef lint
 static const char copyright[] =
-    "Copyright (c) 1996-2004\nSleepycat Software Inc.  All rights reserved.\n";
+    "Copyright (c) 1996-2005\nSleepycat Software Inc.  All rights reserved.\n";
 #endif
 
 #ifndef NO_SYSTEM_INCLUDES
@@ -27,7 +27,9 @@ static const char copyright[] =
 
 int main __P((int, char *[]));
 int usage __P((void));
-int version_check __P((const char *));
+int version_check __P((void));
+
+const char *progname;
 
 int
 main(argc, argv)
@@ -36,21 +38,25 @@ main(argc, argv)
 {
 	extern char *optarg;
 	extern int optind;
-	const char *progname = "db_upgrade";
 	DB *dbp;
 	DB_ENV *dbenv;
 	u_int32_t flags;
-	int ch, exitval, nflag, ret, t_ret;
+	int ch, exitval, nflag, ret, t_ret, verbose;
 	char *home, *passwd;
 
-	if ((ret = version_check(progname)) != 0)
+	if ((progname = strrchr(argv[0], '/')) == NULL)
+		progname = argv[0];
+	else
+		++progname;
+
+	if ((ret = version_check()) != 0)
 		return (ret);
 
 	dbenv = NULL;
-	flags = nflag = 0;
+	flags = nflag = verbose = 0;
 	exitval = 0;
 	home = passwd = NULL;
-	while ((ch = getopt(argc, argv, "h:NP:sV")) != EOF)
+	while ((ch = getopt(argc, argv, "h:NP:sVv")) != EOF)
 		switch (ch) {
 		case 'h':
 			home = optarg;
@@ -73,6 +79,9 @@ main(argc, argv)
 		case 'V':
 			printf("%s\n", db_version(NULL, NULL, NULL));
 			return (EXIT_SUCCESS);
+		case 'v':
+			verbose = 1;
+			break;
 		case '?':
 		default:
 			return (usage());
@@ -120,8 +129,7 @@ main(argc, argv)
 	 * If attaching to a pre-existing environment fails, create a
 	 * private one and try again.
 	 */
-	if ((ret = dbenv->open(dbenv,
-	    home, DB_JOINENV | DB_USE_ENVIRON, 0)) != 0 &&
+	if ((ret = dbenv->open(dbenv, home, DB_USE_ENVIRON, 0)) != 0 &&
 	    (ret == DB_VERSION_MISMATCH ||
 	    (ret = dbenv->open(dbenv, home,
 	    DB_CREATE | DB_INIT_MPOOL | DB_PRIVATE | DB_USE_ENVIRON,
@@ -146,6 +154,13 @@ main(argc, argv)
 		}
 		if (ret != 0)
 			goto shutdown;
+		/*
+		 * People get concerned if they don't see a success message.
+		 * If verbose is set, give them one.
+		 */
+		if (verbose)
+			printf("%s: %s upgraded successfully\n",
+			    progname, argv[0]);
 	}
 
 	if (0) {
@@ -169,14 +184,13 @@ shutdown:	exitval = 1;
 int
 usage()
 {
-	fprintf(stderr, "%s\n",
-	    "usage: db_upgrade [-NsV] [-h home] [-P password] db_file ...");
+	fprintf(stderr, "usage: %s %s\n", progname,
+	    "[-NsVv] [-h home] [-P password] db_file ...");
 	return (EXIT_FAILURE);
 }
 
 int
-version_check(progname)
-	const char *progname;
+version_check()
 {
 	int v_major, v_minor, v_patch;
 
diff --git a/storage/bdb/db_verify/db_verify.c b/storage/bdb/db_verify/db_verify.c
index 148ce1f8e43..d2763429239 100644
--- a/storage/bdb/db_verify/db_verify.c
+++ b/storage/bdb/db_verify/db_verify.c
@@ -1,17 +1,17 @@
 /*-
  * See the file LICENSE for redistribution information.
  *
- * Copyright (c) 1996-2004
+ * Copyright (c) 1996-2005
  *	Sleepycat Software.  All rights reserved.
  *
- * $Id: db_verify.c,v 1.49 2004/08/01 00:21:58 bostic Exp $
+ * $Id: db_verify.c,v 12.3 2005/06/16 20:21:37 bostic Exp $
  */
 
 #include "db_config.h"
 
 #ifndef lint
 static const char copyright[] =
-    "Copyright (c) 1996-2004\nSleepycat Software Inc.  All rights reserved.\n";
+    "Copyright (c) 1996-2005\nSleepycat Software Inc.  All rights reserved.\n";
 #endif
 
 #ifndef NO_SYSTEM_INCLUDES
@@ -27,7 +27,9 @@ static const char copyright[] =
 
 int main __P((int, char *[]));
 int usage __P((void));
-int version_check __P((const char *));
+int version_check __P((void));
+
+const char *progname;
 
 int
 main(argc, argv)
@@ -36,7 +38,6 @@ main(argc, argv)
 {
 	extern char *optarg;
 	extern int optind;
-	const char *progname = "db_verify";
 	DB *dbp, *dbp1;
 	DB_ENV *dbenv;
 	u_int32_t flags, cache;
@@ -44,7 +45,12 @@ main(argc, argv)
 	int quiet, resize, ret;
 	char *home, *passwd;
 
-	if ((ret = version_check(progname)) != 0)
+	if ((progname = strrchr(argv[0], '/')) == NULL)
+		progname = argv[0];
+	else
+		++progname;
+
+	if ((ret = version_check()) != 0)
 		return (ret);
 
 	dbenv = NULL;
@@ -230,14 +236,13 @@ shutdown:	exitval = 1;
 int
 usage()
 {
-	fprintf(stderr, "%s\n",
-	    "usage: db_verify [-NoqV] [-h home] [-P password] db_file ...");
+	fprintf(stderr, "usage: %s %s\n", progname,
+	    "[-NoqV] [-h home] [-P password] db_file ...");
 	return (EXIT_FAILURE);
 }
 
 int
-version_check(progname)
-	const char *progname;
+version_check()
 {
 	int v_major, v_minor, v_patch;
 
diff --git a/storage/bdb/dbinc/btree.h b/storage/bdb/dbinc/btree.h
index d6bb2c839e1..b5fe4f2bbca 100644
--- a/storage/bdb/dbinc/btree.h
+++ b/storage/bdb/dbinc/btree.h
@@ -1,7 +1,7 @@
 /*-
  * See the file LICENSE for redistribution information.
  *
- * Copyright (c) 1996-2004
+ * Copyright (c) 1996-2005
  *	Sleepycat Software.  All rights reserved.
  */
 /*
@@ -39,7 +39,7 @@
  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
  * SUCH DAMAGE.
  *
- * $Id: btree.h,v 11.50 2004/07/22 21:52:57 bostic Exp $
+ * $Id: btree.h,v 12.8 2005/08/08 14:52:30 bostic Exp $
  */
 #ifndef	_DB_BTREE_H_
 #define	_DB_BTREE_H_
@@ -74,6 +74,7 @@ struct __recno;		typedef struct __recno RECNO;
 /* Flags for __bam_stkrel(). */
 #define	STK_CLRDBC	0x01		/* Clear dbc->page reference. */
 #define	STK_NOLOCK	0x02		/* Don't retain locks. */
+#define	STK_PGONLY	0x04
 
 /* Flags for __ram_ca(). These get logged, so make the values explicit. */
 typedef enum {
@@ -113,6 +114,11 @@ typedef enum {
 					 * return an entry one past end-of-page.
 					 */
 #define	S_STK_ONLY	0x04000		/* Just return info in the stack */
+#define	S_MAX		0x08000		/* Get the right most key */
+#define	S_MIN		0x10000		/* Get the left most key */
+#define	S_NEXT		0x20000		/* Get the page after this key */
+#define	S_DEL		0x40000		/* Get the tree to delete this key. */
+#define	S_START		0x80000		/* Level to start stack. */
 
 #define	S_DELETE	(S_WRITE | S_DUPFIRST | S_DELNO | S_EXACT | S_STACK)
 #define	S_FIND		(S_READ | S_DUPFIRST | S_DELNO)
@@ -153,7 +159,7 @@ struct __epg {
 	if ((ret = ((c)->csp == (c)->esp ?				\
 	    __bam_stkgrow(dbenv, c) : 0)) == 0) {			\
 		(c)->csp->page = pagep;					\
-		(c)->csp->indx = page_indx;				\
+		(c)->csp->indx = (page_indx);				\
 		(c)->csp->entries = NUM_ENT(pagep);			\
 		(c)->csp->lock = l;					\
 		(c)->csp->lock_mode = mode;				\
@@ -166,10 +172,10 @@ struct __epg {
 } while (0)
 
 #define	BT_STK_NUM(dbenv, c, pagep, page_indx, ret) do {		\
-	if ((ret =							\
-	    (c)->csp == (c)->esp ? __bam_stkgrow(dbenv, c) : 0) == 0) {	\
+	if ((ret = ((c)->csp ==						\
+	    (c)->esp ? __bam_stkgrow(dbenv, c) : 0)) == 0) {		\
 		(c)->csp->page = NULL;					\
-		(c)->csp->indx = page_indx;				\
+		(c)->csp->indx = (page_indx);				\
 		(c)->csp->entries = NUM_ENT(pagep);			\
 		LOCK_INIT((c)->csp->lock);				\
 		(c)->csp->lock_mode = DB_LOCK_NG;			\
@@ -259,7 +265,6 @@ struct __btree {			/* Btree access method. */
 	db_pgno_t bt_meta;		/* Database meta-data page. */
 	db_pgno_t bt_root;		/* Database root page. */
 
-	u_int32_t bt_maxkey;		/* Maximum keys per page. */
 	u_int32_t bt_minkey;		/* Minimum keys per page. */
 
 					/* Btree comparison function. */
@@ -301,6 +306,7 @@ struct __btree {			/* Btree access method. */
 	FILE		*re_fp;		/* Source file handle. */
 	int		 re_eof;	/* Backing source file EOF reached. */
 	db_recno_t	 re_last;	/* Last record number read. */
+
 };
 
 /*
@@ -315,6 +321,12 @@ typedef enum {
 	DB_CA_SPLIT	= 4
 } db_ca_mode;
 
+/*
+ * Flags for __bam_pinsert.
+ */
+#define	BPI_SPACEONLY	0x01		/* Only check for space to update. */
+#define	BPI_NORECNUM	0x02		/* Not update the recnum on the left. */
+
 #include "dbinc_auto/btree_auto.h"
 #include "dbinc_auto/btree_ext.h"
 #include "dbinc/db_am.h"
diff --git a/storage/bdb/dbinc/crypto.h b/storage/bdb/dbinc/crypto.h
index 8eeebf81b28..419c16ffe2c 100644
--- a/storage/bdb/dbinc/crypto.h
+++ b/storage/bdb/dbinc/crypto.h
@@ -1,10 +1,10 @@
 /*-
  * See the file LICENSE for redistribution information.
  *
- * Copyright (c) 1996-2004
+ * Copyright (c) 1996-2005
  *	Sleepycat Software.  All rights reserved.
  *
- * $Id: crypto.h,v 1.12 2004/01/28 03:36:00 bostic Exp $
+ * $Id: crypto.h,v 12.2 2005/07/20 16:51:03 bostic Exp $
  */
 
 #ifndef	_DB_CRYPTO_H_
@@ -56,7 +56,7 @@ struct __db_cipher {
 
 /*
  * Shared ciphering structure
- * No DB_MUTEX needed because all information is read-only after creation.
+ * No mutex needed because all information is read-only after creation.
  */
 typedef struct __cipher {
 	roff_t		passwd;		/* Offset to shared passwd */
diff --git a/storage/bdb/dbinc/cxx_int.h b/storage/bdb/dbinc/cxx_int.h
index d71ca25d034..7686058e85f 100644
--- a/storage/bdb/dbinc/cxx_int.h
+++ b/storage/bdb/dbinc/cxx_int.h
@@ -1,10 +1,10 @@
 /*-
  * See the file LICENSE for redistribution information.
  *
- * Copyright (c) 1997-2004
+ * Copyright (c) 1997-2005
  *	Sleepycat Software.  All rights reserved.
  *
- * $Id: cxx_int.h,v 11.25 2004/09/22 22:20:31 mjc Exp $
+ * $Id: cxx_int.h,v 12.1 2005/06/16 20:21:43 bostic Exp $
  */
 
 #ifndef _CXX_INT_H_
diff --git a/storage/bdb/dbinc/db.in b/storage/bdb/dbinc/db.in
index 18dca52b96e..ab6108dd4e6 100644
--- a/storage/bdb/dbinc/db.in
+++ b/storage/bdb/dbinc/db.in
@@ -1,10 +1,10 @@
 /*
  * See the file LICENSE for redistribution information.
  *
- * Copyright (c) 1996-2004
+ * Copyright (c) 1996-2005
  *	Sleepycat Software.  All rights reserved.
  *
- * $Id: db.in,v 11.463 2004/10/11 18:47:50 bostic Exp $
+ * $Id: db.in,v 12.67 2005/11/10 21:10:24 bostic Exp $
  *
  * db.h include file layout:
  *	General.
@@ -27,6 +27,8 @@
 @stdint_h_decl@
 @stddef_h_decl@
 #include 
+@unistd_h_decl@
+@thread_h_decl@
 #endif
 
 #if defined(__cplusplus)
@@ -82,9 +84,7 @@ extern "C" {
  * (and we don't have to worry about systems that store floats in other than
  * power-of-2 numbers of bytes).  Additionally this fixes compilers that rewrite
  * structure assignments and ANSI C memcpy calls to be in-line instructions
- * that happen to require alignment.  Note: this alignment isn't sufficient for
- * mutexes, which depend on things like cache line alignment.  Mutex alignment
- * is handled separately, in mutex.h.
+ * that happen to require alignment.
  *
  * uintptr_t --
  * Unsigned type that's the same size as a pointer.  There are places where
@@ -101,6 +101,10 @@ extern "C" {
  */
 @db_seq_decl@
 
+/* Thread and process identification. */
+@db_threadid_t_decl@
+@pid_t_decl@
+
 /* Basic types that are exported or quasi-exported. */
 typedef	u_int32_t	db_pgno_t;	/* Page number type. */
 typedef	u_int16_t	db_indx_t;	/* Page offset type. */
@@ -127,6 +131,7 @@ typedef	uintptr_t	roff_t;
 struct __db;		typedef struct __db DB;
 struct __db_bt_stat;	typedef struct __db_bt_stat DB_BTREE_STAT;
 struct __db_cipher;	typedef struct __db_cipher DB_CIPHER;
+struct __db_compact;	typedef struct __db_compact DB_COMPACT;
 struct __db_dbt;	typedef struct __db_dbt DBT;
 struct __db_env;	typedef struct __db_env DB_ENV;
 struct __db_h_stat;	typedef struct __db_h_stat DB_HASH_STAT;
@@ -141,13 +146,14 @@ struct __db_mpool;	typedef struct __db_mpool DB_MPOOL;
 struct __db_mpool_fstat;typedef struct __db_mpool_fstat DB_MPOOL_FSTAT;
 struct __db_mpool_stat;	typedef struct __db_mpool_stat DB_MPOOL_STAT;
 struct __db_mpoolfile;	typedef struct __db_mpoolfile DB_MPOOLFILE;
+struct __db_mutex_stat;	typedef struct __db_mutex_stat DB_MUTEX_STAT;
 struct __db_preplist;	typedef struct __db_preplist DB_PREPLIST;
 struct __db_qam_stat;	typedef struct __db_qam_stat DB_QUEUE_STAT;
 struct __db_rep;	typedef struct __db_rep DB_REP;
 struct __db_rep_stat;	typedef struct __db_rep_stat DB_REP_STAT;
-struct __db_sequence;	typedef struct __db_sequence DB_SEQUENCE;
 struct __db_seq_record; typedef struct __db_seq_record DB_SEQ_RECORD;
 struct __db_seq_stat;	typedef struct __db_seq_stat DB_SEQUENCE_STAT;
+struct __db_sequence;	typedef struct __db_sequence DB_SEQUENCE;
 struct __db_txn;	typedef struct __db_txn DB_TXN;
 struct __db_txn_active;	typedef struct __db_txn_active DB_TXN_ACTIVE;
 struct __db_txn_stat;	typedef struct __db_txn_stat DB_TXN_STAT;
@@ -158,13 +164,9 @@ struct __fh_t;		typedef struct __fh_t DB_FH;
 struct __fname;		typedef struct __fname FNAME;
 struct __key_range;	typedef struct __key_range DB_KEY_RANGE;
 struct __mpoolfile;	typedef struct __mpoolfile MPOOLFILE;
-struct __mutex_t;	typedef struct __mutex_t DB_MUTEX;
 
 /* Key/data structure -- a Data-Base Thang. */
 struct __db_dbt {
-	/*
-	 * data/size must be fields 1 and 2 for DB 1.85 compatibility.
-	 */
 	void	 *data;			/* Key/data */
 	u_int32_t size;			/* key/data length */
 
@@ -172,8 +174,6 @@ struct __db_dbt {
 	u_int32_t dlen;			/* RO: get/put record length. */
 	u_int32_t doff;			/* RO: get/put record offset. */
 
-	void	*app_private;		/* Application-private handle. */
-
 #define	DB_DBT_APPMALLOC	0x001	/* Callback allocated memory. */
 #define	DB_DBT_ISSET		0x002	/* Lower level calls set value. */
 #define	DB_DBT_MALLOC		0x004	/* Return in malloc'd memory. */
@@ -190,7 +190,7 @@ struct __db_dbt {
  *	interface specific flags in this range.
  */
 #define	DB_CREATE	      0x0000001	/* Create file as necessary. */
-#define	DB_CXX_NO_EXCEPTIONS  0x0000002	/* C++: return error values. */
+#define	DB_DURABLE_UNKNOWN    0x0000002 /* Durability on open (internal). */
 #define	DB_FORCE	      0x0000004	/* Force (anything). */
 #define	DB_NOMMAP	      0x0000008	/* Don't mmap underlying file. */
 #define	DB_RDONLY	      0x0000010	/* Read-only (O_RDONLY). */
@@ -199,8 +199,9 @@ struct __db_dbt {
 #define	DB_TRUNCATE	      0x0000080	/* Discard existing DB (O_TRUNC). */
 #define	DB_TXN_NOSYNC	      0x0000100	/* Do not sync log on commit. */
 #define	DB_TXN_NOT_DURABLE    0x0000200	/* Do not log changes. */
-#define	DB_USE_ENVIRON	      0x0000400	/* Use the environment. */
-#define	DB_USE_ENVIRON_ROOT   0x0000800	/* Use the environment if root. */
+#define	DB_TXN_WRITE_NOSYNC   0x0000400	/* Write the log but don't sync. */
+#define	DB_USE_ENVIRON	      0x0000800	/* Use the environment. */
+#define	DB_USE_ENVIRON_ROOT   0x0001000	/* Use the environment if root. */
 
 /*
  * Common flags --
@@ -208,66 +209,80 @@ struct __db_dbt {
  *	interface specific flags in this range.
  *
  * DB_AUTO_COMMIT:
- *	DB_ENV->set_flags, DB->associate, DB->del, DB->put, DB->open,
- *	DB->remove, DB->rename, DB->truncate
- * DB_DEGREE_2:
+ *	DB_ENV->set_flags, DB->open
+ *      (Note: until the 4.3 release, legal to DB->associate, DB->del,
+ *	DB->put, DB->remove, DB->rename and DB->truncate, and others.)
+ * DB_READ_COMMITTED:
  *	DB->cursor, DB->get, DB->join, DBcursor->c_get, DB_ENV->txn_begin
- * DB_DIRTY_READ:
+ * DB_READ_UNCOMMITTED:
  *	DB->cursor, DB->get, DB->join, DB->open, DBcursor->c_get,
  *	DB_ENV->txn_begin
- * DB_NOAUTO_COMMIT
- *	DB->associate, DB->del, DB->put, DB->open,
- *	DB->remove, DB->rename, DB->truncate
  *
  * !!!
- * The DB_DIRTY_READ and DB_DEGREE_2 bit masks can't be changed without
- * also changing the masks for the flags that can be OR'd into DB
+ * The DB_READ_COMMITTED and DB_READ_UNCOMMITTED bit masks can't be changed
+ * without also changing the masks for the flags that can be OR'd into DB
  * access method and cursor operation values.
  */
 #define	DB_AUTO_COMMIT	      0x01000000/* Implied transaction. */
-#define	DB_DEGREE_2	      0x02000000/* Degree 2. */
-#define	DB_DIRTY_READ	      0x04000000/* Dirty Read. */
-#define	DB_NO_AUTO_COMMIT     0x08000000/* Override env-wide AUTOCOMMIT. */
+
+#define	DB_READ_COMMITTED     0x02000000/* Degree 2 isolation. */
+#define	DB_DEGREE_2	      0x02000000/*	Historic name. */
+
+#define	DB_READ_UNCOMMITTED   0x04000000/* Degree 1 isolation. */
+#define	DB_DIRTY_READ	      0x04000000/*	Historic name. */
+
+/*
+ * Flags common to db_env_create and db_create.
+ */
+#define	DB_CXX_NO_EXCEPTIONS  0x0000001	/* C++: return error values. */
 
 /*
  * Flags private to db_env_create.
- */
-#define	DB_RPCCLIENT	      0x0000001	/* An RPC client environment. */
+ *	   Shared flags up to 0x0000001 */
+#define	DB_RPCCLIENT	      0x0000002	/* An RPC client environment. */
 
 /*
  * Flags private to db_create.
- */
-#define	DB_REP_CREATE	      0x0000001	/* Open of an internal rep database. */
+ *	   Shared flags up to 0x0000001 */
 #define	DB_XA_CREATE	      0x0000002	/* Open in an XA environment. */
 
 /*
  * Flags private to DB_ENV->open.
- *	   Shared flags up to 0x0000800 */
-#define	DB_INIT_CDB	      0x0001000	/* Concurrent Access Methods. */
-#define	DB_INIT_LOCK	      0x0002000	/* Initialize locking. */
-#define	DB_INIT_LOG	      0x0004000	/* Initialize logging. */
-#define	DB_INIT_MPOOL	      0x0008000	/* Initialize mpool. */
-#define	DB_INIT_REP	      0x0010000	/* Initialize replication. */
-#define	DB_INIT_TXN	      0x0020000	/* Initialize transactions. */
-#define	DB_JOINENV	      0x0040000	/* Initialize all subsystems present. */
+ *	   Shared flags up to 0x0001000 */
+#define	DB_INIT_CDB	      0x0002000	/* Concurrent Access Methods. */
+#define	DB_INIT_LOCK	      0x0004000	/* Initialize locking. */
+#define	DB_INIT_LOG	      0x0008000	/* Initialize logging. */
+#define	DB_INIT_MPOOL	      0x0010000	/* Initialize mpool. */
+#define	DB_INIT_REP	      0x0020000	/* Initialize replication. */
+#define	DB_INIT_TXN	      0x0040000	/* Initialize transactions. */
 #define	DB_LOCKDOWN	      0x0080000	/* Lock memory into physical core. */
 #define	DB_PRIVATE	      0x0100000	/* DB_ENV is process local. */
 #define	DB_RECOVER_FATAL      0x0200000	/* Run catastrophic recovery. */
-#define	DB_SYSTEM_MEM	      0x0400000	/* Use system-backed memory. */
+#define	DB_REGISTER	      0x0400000	/* Multi-process registry. */
+#define	DB_SYSTEM_MEM	      0x0800000	/* Use system-backed memory. */
+
+#define	DB_JOINENV	      0x0	/* Compatibility. */
 
 /*
  * Flags private to DB->open.
- *	   Shared flags up to 0x0000800 */
-#define	DB_EXCL		      0x0001000	/* Exclusive open (O_EXCL). */
-#define	DB_FCNTL_LOCKING      0x0002000	/* UNDOC: fcntl(2) locking. */
-#define	DB_RDWRMASTER	      0x0004000	/* UNDOC: allow subdb master open R/W */
-#define	DB_WRITEOPEN	      0x0008000	/* UNDOC: open with write lock. */
+ *	   Shared flags up to 0x0001000 */
+#define	DB_EXCL		      0x0002000	/* Exclusive open (O_EXCL). */
+#define	DB_FCNTL_LOCKING      0x0004000	/* UNDOC: fcntl(2) locking. */
+#define	DB_NO_AUTO_COMMIT     0x0008000	/* Override env-wide AUTOCOMMIT. */
+#define	DB_RDWRMASTER	      0x0010000	/* UNDOC: allow subdb master open R/W */
+#define	DB_WRITEOPEN	      0x0020000	/* UNDOC: open with write lock. */
+
+/*
+ * Flags private to DB->associate.
+ *	   Shared flags up to 0x0001000 */
+#define	DB_IMMUTABLE_KEY      0x0002000	/* Secondary key is immutable. */
+/*	      Shared flags at 0x1000000 */
 
 /*
  * Flags private to DB_ENV->txn_begin.
- *	   Shared flags up to 0x0000800 */
-#define	DB_TXN_NOWAIT	      0x0001000	/* Do not wait for locks in this TXN. */
-#define	DB_TXN_SYNC	      0x0002000	/* Always sync log on commit. */
+ *	   Shared flags up to 0x0001000 */
+#define	DB_TXN_NOWAIT	      0x0002000	/* Do not wait for locks in this TXN. */
+#define	DB_TXN_SYNC	      0x0004000	/* Always sync log on commit. */
 
 /*
  * Flags private to DB_ENV->set_encrypt.
@@ -276,24 +291,23 @@ struct __db_dbt {
 
 /*
  * Flags private to DB_ENV->set_flags.
- *	   Shared flags up to 0x00000800 */
-#define	DB_CDB_ALLDB	      0x00001000/* Set CDB locking per environment. */
-#define	DB_DIRECT_DB	      0x00002000/* Don't buffer databases in the OS. */
-#define	DB_DIRECT_LOG	      0x00004000/* Don't buffer log files in the OS. */
-#define	DB_DSYNC_LOG	      0x00008000/* Set O_DSYNC on the log. */
-#define	DB_LOG_AUTOREMOVE     0x00010000/* Automatically remove log files. */
-#define	DB_LOG_INMEMORY       0x00020000/* Store logs in buffers in memory. */
-#define	DB_NOLOCKING	      0x00040000/* Set locking/mutex behavior. */
-#define	DB_NOPANIC	      0x00080000/* Set panic state per DB_ENV. */
-#define	DB_OVERWRITE	      0x00100000/* Overwrite unlinked region files. */
-#define	DB_PANIC_ENVIRONMENT  0x00200000/* Set panic state per environment. */
-#define	DB_REGION_INIT	      0x00400000/* Page-fault regions on open. */
-#define	DB_TIME_NOTGRANTED    0x00800000/* Return NOTGRANTED on timeout. */
+ *	   Shared flags up to 0x00001000 */
+#define	DB_CDB_ALLDB	      0x00002000/* Set CDB locking per environment. */
+#define	DB_DIRECT_DB	      0x00004000/* Don't buffer databases in the OS. */
+#define	DB_DIRECT_LOG	      0x00008000/* Don't buffer log files in the OS. */
+#define	DB_DSYNC_DB	      0x00010000/* Set O_DSYNC on the databases. */
+#define	DB_DSYNC_LOG	      0x00020000/* Set O_DSYNC on the log. */
+#define	DB_LOG_AUTOREMOVE     0x00040000/* Automatically remove log files. */
+#define	DB_LOG_INMEMORY       0x00080000/* Store logs in buffers in memory. */
+#define	DB_NOLOCKING	      0x00100000/* Set locking/mutex behavior. */
+#define	DB_NOPANIC	      0x00200000/* Set panic state per DB_ENV. */
+#define	DB_OVERWRITE	      0x00400000/* Overwrite unlinked region files. */
+#define	DB_PANIC_ENVIRONMENT  0x00800000/* Set panic state per environment. */
 /*	      Shared flags at 0x01000000 */
 /*	      Shared flags at 0x02000000 */
 /*	      Shared flags at 0x04000000 */
-/*	      Shared flags at 0x08000000 */
-#define	DB_TXN_WRITE_NOSYNC   0x10000000/* Write, don't sync, on txn commit. */
+#define	DB_REGION_INIT	      0x08000000/* Page-fault regions on open. */
+#define	DB_TIME_NOTGRANTED    0x10000000/* Return NOTGRANTED on timeout. */
 #define	DB_YIELDCPU	      0x20000000/* Yield the CPU (a lot). */
 
 /*
@@ -302,26 +316,34 @@ struct __db_dbt {
 #define	DB_UPGRADE	      0x0000001	/* Upgrading. */
 #define	DB_VERIFY	      0x0000002	/* Verifying. */
 
+/*
+ * Flags private to DB->compact.
+ *	   Shared flags up to 0x00001000
+ */
+#define	DB_FREELIST_ONLY      0x00002000 /* Just sort and truncate. */
+#define	DB_FREE_SPACE         0x00004000 /* Free space . */
+#define	DB_COMPACT_FLAGS      \
+      (DB_FREELIST_ONLY | DB_FREE_SPACE)
+
 /*
  * Flags private to DB_MPOOLFILE->open.
- *	   Shared flags up to 0x0000800 */
-#define	DB_DIRECT	      0x0001000	/* Don't buffer the file in the OS. */
-#define	DB_DURABLE_UNKNOWN    0x0002000 /* internal: durability on open. */
+ *	   Shared flags up to 0x0001000 */
+#define	DB_DIRECT	      0x0002000	/* Don't buffer the file in the OS. */
 #define	DB_EXTENT	      0x0004000	/* internal: dealing with an extent. */
 #define	DB_ODDFILESIZE	      0x0008000	/* Truncate file to N * pgsize. */
 
 /*
  * Flags private to DB->set_flags.
- */
-#define	DB_CHKSUM	      0x0000001	/* Do checksumming */
-#define	DB_DUP		      0x0000002	/* Btree, Hash: duplicate keys. */
-#define	DB_DUPSORT	      0x0000004	/* Btree, Hash: duplicate keys. */
-#define	DB_ENCRYPT	      0x0000008	/* Btree, Hash: duplicate keys. */
-#define	DB_INORDER	      0x0000010	/* Queue: strict ordering on consume. */
-#define	DB_RECNUM	      0x0000020	/* Btree: record numbers. */
-#define	DB_RENUMBER	      0x0000040	/* Recno: renumber on insert/delete. */
-#define	DB_REVSPLITOFF	      0x0000080	/* Btree: turn off reverse splits. */
-#define	DB_SNAPSHOT	      0x0000100	/* Recno: snapshot the input. */
+ *	   Shared flags up to 0x00001000 */
+#define	DB_CHKSUM	      0x00002000 /* Do checksumming */
+#define	DB_DUP		      0x00004000 /* Btree, Hash: duplicate keys. */
+#define	DB_DUPSORT	      0x00008000 /* Btree, Hash: duplicate keys. */
+#define	DB_ENCRYPT	      0x00010000 /* Btree, Hash: duplicate keys. */
+#define	DB_INORDER	      0x00020000 /* Queue: strict ordering on consume */
+#define	DB_RECNUM	      0x00040000 /* Btree: record numbers. */
+#define	DB_RENUMBER	      0x00080000 /* Recno: renumber on insert/delete. */
+#define	DB_REVSPLITOFF	      0x00100000 /* Btree: turn off reverse splits. */
+#define	DB_SNAPSHOT	      0x00200000 /* Recno: snapshot the input. */
 
 /*
  * Flags private to the DB_ENV->stat_print, DB->stat and DB->stat_print methods.
@@ -333,7 +355,7 @@ struct __db_dbt {
 #define	DB_STAT_LOCK_OBJECTS  0x0000010	/* Print: Lock objects. */
 #define	DB_STAT_LOCK_PARAMS   0x0000020	/* Print: Lock parameters. */
 #define	DB_STAT_MEMP_HASH     0x0000040	/* Print: Mpool hash buckets. */
-#define	DB_STAT_SUBSYSTEM     0x0000080	/* Print: Subsystems too. */
+#define	DB_STAT_SUBSYSTEM     0x0000080 /* Print: Subsystems too. */
 
 /*
  * Flags private to DB->join.
@@ -360,8 +382,42 @@ struct __db_dbt {
 /*
  * Flags private to DB->set_rep_transport's send callback.
  */
-#define	DB_REP_NOBUFFER	      0x0000001	/* Do not buffer this message. */
-#define	DB_REP_PERMANENT      0x0000002	/* Important--app. may want to flush. */
+#define	DB_REP_ANYWHERE	      0x0000001	/* Message can be serviced anywhere. */
+#define	DB_REP_NOBUFFER	      0x0000002	/* Do not buffer this message. */
+#define	DB_REP_PERMANENT      0x0000004	/* Important--app. may want to flush. */
+#define	DB_REP_REREQUEST      0x0000008	/* This msg already been requested. */
+
+/*******************************************************
+ * Mutexes.
+ *******************************************************/
+typedef u_int32_t	db_mutex_t;
+
+/*
+ * Flag arguments for DbEnv.mutex_alloc and for the DB_MUTEX structure.
+ */
+#define	DB_MUTEX_ALLOCATED	0x01	/* Mutex currently allocated. */
+#define	DB_MUTEX_LOCKED		0x02	/* Mutex currently locked. */
+#define	DB_MUTEX_LOGICAL_LOCK	0x04	/* Mutex backs a database lock. */
+#define	DB_MUTEX_SELF_BLOCK	0x08	/* Must be able to block self. */
+#define	DB_MUTEX_THREAD		0x10	/* Thread-only mutex. */
+
+struct __db_mutex_stat {
+	/* The following fields are maintained in the region's copy. */
+	u_int32_t st_mutex_align;	/* Mutex alignment */
+	u_int32_t st_mutex_tas_spins;	/* Mutex test-and-set spins */
+	u_int32_t st_mutex_cnt;		/* Mutex count */
+	u_int32_t st_mutex_free;	/* Available mutexes */
+	u_int32_t st_mutex_inuse;	/* Mutexes in use */
+	u_int32_t st_mutex_inuse_max;	/* Maximum mutexes ever in use */
+
+	/* The following fields are filled-in from other places. */
+	u_int32_t st_region_wait;	/* Region lock granted after wait. */
+	u_int32_t st_region_nowait;	/* Region lock granted without wait. */
+	roff_t	  st_regsize;		/* Region size. */
+};
+
+/* This is the length of the buffer passed to DB_ENV->thread_id_string() */
+#define	DB_THREADID_STRLEN	128
 
 /*******************************************************
  * Locking.
@@ -389,10 +445,9 @@ struct __db_dbt {
 #define	DB_LOCK_ABORT		0x001	/* Internal: Lock during abort. */
 #define	DB_LOCK_NOWAIT		0x002	/* Don't wait on unavailable lock. */
 #define	DB_LOCK_RECORD		0x004	/* Internal: record lock. */
-#define	DB_LOCK_REMOVE		0x008	/* Internal: flag object removed. */
-#define	DB_LOCK_SET_TIMEOUT	0x010	/* Internal: set lock timeout. */
-#define	DB_LOCK_SWITCH		0x020	/* Internal: switch existing lock. */
-#define	DB_LOCK_UPGRADE		0x040	/* Internal: upgrade existing lock. */
+#define	DB_LOCK_SET_TIMEOUT	0x008	/* Internal: set lock timeout. */
+#define	DB_LOCK_SWITCH		0x010	/* Internal: switch existing lock. */
+#define	DB_LOCK_UPGRADE		0x020	/* Internal: upgrade existing lock. */
 
 /*
  * Simple R/W lock modes and for multi-granularity intention locking.
@@ -410,7 +465,7 @@ typedef enum {
 	DB_LOCK_IWRITE=4,		/* Intent exclusive/write. */
 	DB_LOCK_IREAD=5,		/* Intent to share/read. */
 	DB_LOCK_IWR=6,			/* Intent to read and write. */
-	DB_LOCK_DIRTY=7,		/* Dirty Read. */
+	DB_LOCK_READ_UNCOMMITTED=7,	/* Degree 1 isolation. */
 	DB_LOCK_WWRITE=8		/* Was Written. */
 } db_lockmode_t;
 
@@ -439,12 +494,10 @@ typedef enum  {
 	DB_LSTAT_EXPIRED=2,		/* Lock has expired. */
 	DB_LSTAT_FREE=3,		/* Lock is unallocated. */
 	DB_LSTAT_HELD=4,		/* Lock is currently held. */
-	DB_LSTAT_NOTEXIST=5,		/* Object on which lock was waiting
-					 * was removed */
-	DB_LSTAT_PENDING=6,		/* Lock was waiting and has been
+	DB_LSTAT_PENDING=5,		/* Lock was waiting and has been
 					 * promoted; waiting for the owner
 					 * to run and upgrade it to held. */
-	DB_LSTAT_WAITING=7		/* Lock is on the wait queue. */
+	DB_LSTAT_WAITING=6		/* Lock is on the wait queue. */
 }db_status_t;
 
 /* Lock statistics structure. */
@@ -461,11 +514,12 @@ struct __db_lock_stat {
 	u_int32_t st_maxnlockers;	/* Maximum number of lockers so far. */
 	u_int32_t st_nobjects;		/* Current number of objects. */
 	u_int32_t st_maxnobjects;	/* Maximum number of objects so far. */
-	u_int32_t st_nconflicts;	/* Number of lock conflicts. */
 	u_int32_t st_nrequests;		/* Number of lock gets. */
 	u_int32_t st_nreleases;		/* Number of lock puts. */
-	u_int32_t st_nnowaits;		/* Number of requests that would have
-					   waited, but NOWAIT was set. */
+	u_int32_t st_nupgrade;		/* Number of lock upgrades. */
+	u_int32_t st_ndowngrade;	/* Number of lock downgrades. */
+	u_int32_t st_lock_wait;		/* Lock conflicts w/ subsequent wait */
+	u_int32_t st_lock_nowait;	/* Lock conflicts w/o subsequent wait */
 	u_int32_t st_ndeadlocks;	/* Number of lock deadlocks. */
 	db_timeout_t st_locktimeout;	/* Lock timeout. */
 	u_int32_t st_nlocktimeouts;	/* Number of lock timeouts. */
@@ -514,8 +568,8 @@ struct __db_lockreq {
 /*******************************************************
  * Logging.
  *******************************************************/
-#define	DB_LOGVERSION	10		/* Current log version. */
-#define	DB_LOGOLDVER	10		/* Oldest log version supported. */
+#define	DB_LOGVERSION	11		/* Current log version. */
+#define	DB_LOGOLDVER	11		/* Oldest log version supported. */
 #define	DB_LOGMAGIC	0x040988
 
 /* Flag values for DB_ENV->log_archive(). */
@@ -582,9 +636,10 @@ struct __db_log_cursor {
 
 	u_int32_t bp_maxrec;		/* Max record length in the log file. */
 
-					/* Methods. */
+	/* DB_LOGC PUBLIC HANDLE LIST BEGIN */
 	int (*close) __P((DB_LOGC *, u_int32_t));
 	int (*get) __P((DB_LOGC *, DB_LSN *, DBT *, u_int32_t));
+	/* DB_LOGC PUBLIC HANDLE LIST END */
 
 #define	DB_LOG_DISK		0x01	/* Log record came from disk. */
 #define	DB_LOG_LOCKED		0x02	/* Log region already locked */
@@ -596,15 +651,17 @@ struct __db_log_cursor {
 struct __db_log_stat {
 	u_int32_t st_magic;		/* Log file magic number. */
 	u_int32_t st_version;		/* Log file version number. */
-	int	  st_mode;			/* Log file mode. */
+	int	  st_mode;		/* Log file permissions mode. */
 	u_int32_t st_lg_bsize;		/* Log buffer size. */
 	u_int32_t st_lg_size;		/* Log file size. */
+	u_int32_t st_record;		/* Records entered into the log. */
 	u_int32_t st_w_bytes;		/* Bytes to log. */
 	u_int32_t st_w_mbytes;		/* Megabytes to log. */
 	u_int32_t st_wc_bytes;		/* Bytes to log since checkpoint. */
 	u_int32_t st_wc_mbytes;		/* Megabytes to log since checkpoint. */
-	u_int32_t st_wcount;		/* Total writes to the log. */
+	u_int32_t st_wcount;		/* Total I/O writes to the log. */
 	u_int32_t st_wcount_fill;	/* Overflow writes to the log. */
+	u_int32_t st_rcount;		/* Total I/O reads from the log. */
 	u_int32_t st_scount;		/* Total syncs to the log. */
 	u_int32_t st_region_wait;	/* Region lock granted after wait. */
 	u_int32_t st_region_nowait;	/* Region lock granted without wait. */
@@ -618,12 +675,14 @@ struct __db_log_stat {
 };
 
 /*
- * We need to record the first log record of a transaction.
- * For user defined logging this macro returns the place to
- * put that information, if it is need in rlsnp, otherwise it
- * leaves it unchanged.
+ * We need to record the first log record of a transaction.  For user
+ * defined logging this macro returns the place to put that information,
+ * if it is need in rlsnp, otherwise it leaves it unchanged.  We also
+ * need to track the last record of the transaction, this returns the
+ * place to put that info.
  */
-#define	DB_SET_BEGIN_LSNP(txn, rlsnp)	((txn)->set_begin_lsnp(txn, rlsnp))
+#define	DB_SET_TXN_LSNP(txn, blsnp, llsnp)		\
+	((txn)->set_txn_lsnp(txn, blsnp, llsnp))
 
 /*******************************************************
  * Shared buffer cache (mpool).
@@ -632,12 +691,12 @@ struct __db_log_stat {
 #define	DB_MPOOL_CREATE		0x001	/* Create a page. */
 #define	DB_MPOOL_LAST		0x002	/* Return the last page. */
 #define	DB_MPOOL_NEW		0x004	/* Create a new page. */
+#define	DB_MPOOL_FREE		0x008	/* Free page if present. */
 
 /* Flag values for DB_MPOOLFILE->put, DB_MPOOLFILE->set. */
 #define	DB_MPOOL_CLEAN		0x001	/* Page is not modified. */
 #define	DB_MPOOL_DIRTY		0x002	/* Page is modified. */
 #define	DB_MPOOL_DISCARD	0x004	/* Don't cache the page. */
-#define	DB_MPOOL_FREE		0x008	/* Free page if present. */
 
 /* Flags values for DB_MPOOLFILE->set_flags. */
 #define	DB_MPOOL_NOFILE		0x001	/* Never open a backing file. */
@@ -694,15 +753,14 @@ struct __db_mpoolfile {
 	int32_t		lsn_offset;	/* LSN offset in page. */
 	u_int32_t	gbytes, bytes;	/* Maximum file size. */
 	DBT	       *pgcookie;	/* Byte-string passed to pgin/pgout. */
-	DB_CACHE_PRIORITY		/* Cache priority. */
-			priority;
+	int32_t		priority;	/* Cache priority. */
 
 	void	       *addr;		/* Address of mmap'd region. */
 	size_t		len;		/* Length of mmap'd region. */
 
 	u_int32_t	config_flags;	/* Flags to DB_MPOOLFILE->set_flags. */
 
-					/* Methods. */
+	/* DB_MPOOLFILE PUBLIC HANDLE LIST BEGIN */
 	int (*close) __P((DB_MPOOLFILE *, u_int32_t));
 	int (*get) __P((DB_MPOOLFILE *, db_pgno_t *, u_int32_t, void *));
 	int (*open) __P((DB_MPOOLFILE *, const char *, u_int32_t, int, size_t));
@@ -725,6 +783,7 @@ struct __db_mpoolfile {
 	int (*get_priority) __P((DB_MPOOLFILE *, DB_CACHE_PRIORITY *));
 	int (*set_priority) __P((DB_MPOOLFILE *, DB_CACHE_PRIORITY));
 	int (*sync) __P((DB_MPOOLFILE *));
+	/* DB_MPOOLFILE PUBLIC HANDLE LIST END */
 
 	/*
 	 * MP_FILEID_SET, MP_OPEN_CALLED and MP_READONLY do not need to be
@@ -820,12 +879,14 @@ typedef enum {
 struct __db_txn {
 	DB_TXNMGR	*mgrp;		/* Pointer to transaction manager. */
 	DB_TXN		*parent;	/* Pointer to transaction's parent. */
-	DB_LSN		last_lsn;	/* Lsn of last log write. */
+
 	u_int32_t	txnid;		/* Unique transaction id. */
-	u_int32_t	tid;		/* Thread id for use in MT XA. */
-	roff_t		off;		/* Detail structure within region. */
+	char		*name;		/* Transaction name */
+
+	db_threadid_t	tid;		/* Thread id for use in MT XA. */
+	void		*td;		/* Detail structure within region. */
 	db_timeout_t	lock_timeout;	/* Timeout for locks for this txn. */
-	db_timeout_t	expire;		/* Time this txn expires. */
+	db_timeout_t	expire;		/* Time transaction expires. */
 	void		*txn_list;	/* Undo information for parent. */
 
 	/*
@@ -843,6 +904,16 @@ struct __db_txn {
 		struct __db_txn **tqe_prev;
 	} xalinks;			/* Links active XA transactions. */
 
+	/*
+	 * !!!
+	 * Explicit representations of structures from queue.h.
+	 * TAILQ_HEAD(__kids, __db_txn) kids;
+	 */
+	struct __kids {
+		struct __db_txn *tqh_first;
+		struct __db_txn **tqh_last;
+	} kids;
+
 	/*
 	 * !!!
 	 * Explicit representations of structures from queue.h.
@@ -863,16 +934,6 @@ struct __db_txn {
 		struct __txn_logrec **stqh_last;
 	} logs;				/* Links deferred events. */
 
-	/*
-	 * !!!
-	 * Explicit representations of structures from queue.h.
-	 * TAILQ_HEAD(__kids, __db_txn) kids;
-	 */
-	struct __kids {
-		struct __db_txn *tqh_first;
-		struct __db_txn **tqh_last;
-	} kids;
-
 	/*
 	 * !!!
 	 * Explicit representations of structures from queue.h.
@@ -888,32 +949,41 @@ struct __db_txn {
 
 	u_int32_t	cursors;	/* Number of cursors open for txn */
 
-					/* Methods. */
+	/* DB_TXN PUBLIC HANDLE LIST BEGIN */
 	int	  (*abort) __P((DB_TXN *));
 	int	  (*commit) __P((DB_TXN *, u_int32_t));
 	int	  (*discard) __P((DB_TXN *, u_int32_t));
+	int	  (*get_name) __P((DB_TXN *, const char **));
 	u_int32_t (*id) __P((DB_TXN *));
 	int	  (*prepare) __P((DB_TXN *, u_int8_t *));
-	void	  (*set_begin_lsnp) __P((DB_TXN *txn, DB_LSN **));
+	int	  (*set_name) __P((DB_TXN *, const char *));
 	int	  (*set_timeout) __P((DB_TXN *, db_timeout_t, u_int32_t));
+	/* DB_TXN PUBLIC HANDLE LIST END */
 
-#define	TXN_CHILDCOMMIT	0x001		/* Transaction that has committed. */
-#define	TXN_COMPENSATE	0x002		/* Compensating transaction. */
-#define	TXN_DEADLOCK	0x004		/* Transaction has deadlocked. */
-#define	TXN_DEGREE_2	0x008		/* Has degree 2 isolation. */
-#define	TXN_DIRTY_READ	0x010		/* Transaction does dirty reads. */
-#define	TXN_LOCKTIMEOUT	0x020		/* Transaction has a lock timeout. */
-#define	TXN_MALLOC	0x040		/* Structure allocated by TXN system. */
-#define	TXN_NOSYNC	0x080		/* Do not sync on prepare and commit. */
-#define	TXN_NOWAIT	0x100		/* Do not wait on locks. */
-#define	TXN_RESTORED	0x200		/* Transaction has been restored. */
-#define	TXN_SYNC	0x400		/* Sync on prepare and commit. */
+	/* DB_TXN PRIVATE HANDLE LIST BEGIN */
+	void	  (*set_txn_lsnp) __P((DB_TXN *txn, DB_LSN **, DB_LSN **));
+	/* DB_TXN PRIVATE HANDLE LIST END */
+
+#define	TXN_CHILDCOMMIT		0x001	/* Txn has committed. */
+#define	TXN_COMPENSATE		0x002	/* Compensating transaction. */
+#define	TXN_DEADLOCK		0x004	/* Txn has deadlocked. */
+#define	TXN_LOCKTIMEOUT		0x008	/* Txn has a lock timeout. */
+#define	TXN_MALLOC		0x010	/* Structure allocated by TXN system. */
+#define	TXN_NOSYNC		0x020	/* Do not sync on prepare and commit. */
+#define	TXN_NOWAIT		0x040	/* Do not wait on locks. */
+#define	TXN_READ_COMMITTED	0x080	/* Txn has degree 2 isolation. */
+#define	TXN_READ_UNCOMMITTED	0x100	/* Txn has degree 1 isolation. */
+#define	TXN_RESTORED		0x200	/* Txn has been restored. */
+#define	TXN_SYNC		0x400	/* Write and sync on prepare/commit. */
+#define	TXN_WRITE_NOSYNC	0x800	/* Write only on prepare/commit. */
 	u_int32_t	flags;
 };
 
+#define	TXN_SYNC_FLAGS (TXN_SYNC | TXN_NOSYNC | TXN_WRITE_NOSYNC)
+
 /*
  * Structure used for two phase commit interface.  Berkeley DB support for two
- * phase commit is compatible with the X/open XA interface.
+ * phase commit is compatible with the X/Open XA interface.
  *
  * The XA #define XIDDATASIZE defines the size of a global transaction ID.  We
  * have our own version here (for name space reasons) which must have the same
@@ -929,9 +999,12 @@ struct __db_preplist {
 struct __db_txn_active {
 	u_int32_t txnid;		/* Transaction ID */
 	u_int32_t parentid;		/* Transaction ID of parent */
+	pid_t     pid;			/* Process owning txn ID */
+	db_threadid_t tid;		/* Thread owning txn ID */
 	DB_LSN	  lsn;			/* LSN when transaction began */
 	u_int32_t xa_status;		/* XA status */
 	u_int8_t  xid[DB_XIDDATASIZE];	/* XA global transaction ID */
+	char	  name[51];		/* 50 bytes of name, nul termination */
 };
 
 struct __db_txn_stat {
@@ -959,7 +1032,13 @@ struct __db_txn_stat {
 #define	DB_EID_BROADCAST	-1
 #define	DB_EID_INVALID		-2
 
-/* rep_start flags values */
+/* rep_config flag values. */
+#define	DB_REP_CONF_BULK	0x0001	/* Bulk transfer. */
+#define	DB_REP_CONF_DELAYCLIENT	0x0002	/* Delay client synchronization. */
+#define	DB_REP_CONF_NOAUTOINIT	0x0004	/* No automatic client init. */
+#define	DB_REP_CONF_NOWAIT	0x0008	/* Don't wait, return error. */
+
+/* rep_start flags values. */
 #define	DB_REP_CLIENT		0x001
 #define	DB_REP_MASTER		0x002
 
@@ -984,6 +1063,15 @@ struct __db_rep_stat {
 					   condition was detected.+ */
 	int st_env_id;			/* Current environment ID. */
 	int st_env_priority;		/* Current environment priority. */
+	u_int32_t st_bulk_fills;	/* Bulk buffer fills. */
+	u_int32_t st_bulk_overflows;	/* Bulk buffer overflows. */
+	u_int32_t st_bulk_records;	/* Bulk records stored. */
+	u_int32_t st_bulk_transfers;	/* Transfers of bulk buffers. */
+	u_int32_t st_client_rerequests;	/* Number of forced rerequests. */
+	u_int32_t st_client_svc_req;	/* Number of client service requests
+					   received by this client. */
+	u_int32_t st_client_svc_miss;	/* Number of client service requests
+					   missing on this client. */
 	u_int32_t st_gen;		/* Current generation number. */
 	u_int32_t st_egen;		/* Current election gen number. */
 	u_int32_t st_log_duplicated;	/* Log records received multiply.+ */
@@ -1026,7 +1114,13 @@ struct __db_rep_stat {
 	int st_election_status;		/* Current election status. */
 	u_int32_t st_election_tiebreaker;/* Election tiebreaker value. */
 	int st_election_votes;		/* Votes received in this round. */
+	u_int32_t st_election_sec;	/* Last election time seconds. */
+	u_int32_t st_election_usec;	/* Last election time useconds. */
 };
+
+/*******************************************************
+ * Sequences.
+ *******************************************************/
 /*
  * The storage record for a sequence.
  */
@@ -1036,6 +1130,7 @@ struct __db_seq_record {
 #define	DB_SEQ_INC		0x00000002	/* Increment sequence. */
 #define	DB_SEQ_RANGE_SET	0x00000004	/* Range set (internal). */
 #define	DB_SEQ_WRAP		0x00000008	/* Wrap sequence at min/max. */
+#define	DB_SEQ_WRAPPED		0x00000010	/* Just wrapped (internal). */
 	u_int32_t	flags;		/* Flags. */
 	db_seq_t	seq_value;	/* Current value. */
 	db_seq_t	seq_max;	/* Max permitted. */
@@ -1047,7 +1142,7 @@ struct __db_seq_record {
  */
 struct __db_sequence {
 	DB		*seq_dbp;	/* DB handle for this sequence. */
-	DB_MUTEX	*seq_mutexp;	/* Mutex if sequence is threaded. */
+	db_mutex_t	mtx_seq;	/* Mutex if sequence is threaded. */
 	DB_SEQ_RECORD	*seq_rp;	/* Pointer to current data. */
 	DB_SEQ_RECORD	seq_record;	/* Data from DB_SEQUENCE. */
 	int32_t		seq_cache_size; /* Number of values cached. */
@@ -1058,6 +1153,7 @@ struct __db_sequence {
 	/* API-private structure: used by C++ and Java. */
 	void		*api_internal;
 
+	/* DB_SEQUENCE PUBLIC HANDLE LIST BEGIN */
 	int		(*close) __P((DB_SEQUENCE *, u_int32_t));
 	int		(*get) __P((DB_SEQUENCE *,
 			      DB_TXN *, int32_t, db_seq_t *, u_int32_t));
@@ -1077,18 +1173,19 @@ struct __db_sequence {
 	int		(*stat) __P((DB_SEQUENCE *,
 			    DB_SEQUENCE_STAT **, u_int32_t));
 	int		(*stat_print) __P((DB_SEQUENCE *, u_int32_t));
+	/* DB_SEQUENCE PUBLIC HANDLE LIST END */
 };
 
 struct __db_seq_stat {
-	u_int32_t st_wait;	  /* Sequence lock granted without wait. */
-	u_int32_t st_nowait;	  /* Sequence lock granted after wait. */
-	db_seq_t  st_current;	  /* Current value in db. */
-	db_seq_t  st_value;	  /* Current cached value. */
-	db_seq_t  st_last_value;  /* Last cached value. */
-	db_seq_t  st_min;	  /* Minimum value. */
-	db_seq_t  st_max;	  /* Maximum value. */
-	int32_t   st_cache_size;  /* Cache size. */
-	u_int32_t st_flags;	  /* Flag value. */
+	u_int32_t st_wait;		/* Sequence lock granted w/o wait. */
+	u_int32_t st_nowait;		/* Sequence lock granted after wait. */
+	db_seq_t  st_current;		/* Current value in db. */
+	db_seq_t  st_value;		/* Current cached value. */
+	db_seq_t  st_last_value;	/* Last cached value. */
+	db_seq_t  st_min;		/* Minimum value. */
+	db_seq_t  st_max;		/* Maximum value. */
+	int32_t   st_cache_size;	/* Cache size. */
+	u_int32_t st_flags;		/* Flag value. */
 };
 
 /*******************************************************
@@ -1117,7 +1214,7 @@ typedef enum {
 #define	DB_QAMMAGIC	0x042253
 
 #define	DB_SEQUENCE_VERSION 2		/* Current sequence version. */
-#define	DB_SEQUENCE_OLDVER  1		/* Oldest queue version supported. */
+#define	DB_SEQUENCE_OLDVER  1		/* Oldest sequence version supported. */
 
 /*
  * DB access method and cursor operation values.  Each value is an operation
@@ -1165,12 +1262,15 @@ typedef enum {
 
 /*
  * Masks for flags that can be OR'd into DB access method and cursor
- * operation values.
+ * operation values.  Three top bits have already been taken:
  *
- *	DB_DIRTY_READ	0x04000000	   Dirty Read. */
+ * DB_AUTO_COMMIT	0x01000000
+ * DB_READ_COMMITTED	0x02000000
+ * DB_READ_UNCOMMITTED	0x04000000
+ */
 #define	DB_MULTIPLE	0x08000000	/* Return multiple data values. */
 #define	DB_MULTIPLE_KEY	0x10000000	/* Return multiple data/key pairs. */
-#define	DB_RMW		0x20000000	/* Acquire write flag immediately. */
+#define	DB_RMW		0x20000000	/* Acquire write lock immediately. */
 
 /*
  * DB (user visible) error return codes.
@@ -1203,22 +1303,25 @@ typedef enum {
 #define	DB_REP_DUPMASTER	(-30986)/* There are two masters. */
 #define	DB_REP_HANDLE_DEAD	(-30985)/* Rolled back a commit. */
 #define	DB_REP_HOLDELECTION	(-30984)/* Time to hold an election. */
-#define	DB_REP_ISPERM		(-30983)/* Cached not written perm written.*/
-#define	DB_REP_NEWMASTER	(-30982)/* We have learned of a new master. */
-#define	DB_REP_NEWSITE		(-30981)/* New site entered system. */
-#define	DB_REP_NOTPERM		(-30980)/* Permanent log record not written. */
-#define	DB_REP_STARTUPDONE	(-30979)/* Client startup complete. */
-#define	DB_REP_UNAVAIL		(-30978)/* Site cannot currently be reached. */
-#define	DB_RUNRECOVERY		(-30977)/* Panic return. */
-#define	DB_SECONDARY_BAD	(-30976)/* Secondary index corrupt. */
-#define	DB_VERIFY_BAD		(-30975)/* Verify failed; bad format. */
-#define	DB_VERSION_MISMATCH	(-30974)/* Environment version mismatch. */
+#define	DB_REP_IGNORE		(-30983)/* This msg should be ignored.*/
+#define	DB_REP_ISPERM		(-30982)/* Cached not written perm written.*/
+#define	DB_REP_JOIN_FAILURE	(-30981)/* Unable to join replication group. */
+#define	DB_REP_LOCKOUT		(-30980)/* API/Replication lockout now. */
+#define	DB_REP_NEWMASTER	(-30979)/* We have learned of a new master. */
+#define	DB_REP_NEWSITE		(-30978)/* New site entered system. */
+#define	DB_REP_NOTPERM		(-30977)/* Permanent log record not written. */
+#define	DB_REP_STARTUPDONE	(-30976)/* Client startup complete. */
+#define	DB_REP_UNAVAIL		(-30975)/* Site cannot currently be reached. */
+#define	DB_RUNRECOVERY		(-30974)/* Panic return. */
+#define	DB_SECONDARY_BAD	(-30973)/* Secondary index corrupt. */
+#define	DB_VERIFY_BAD		(-30972)/* Verify failed; bad format. */
+#define	DB_VERSION_MISMATCH	(-30971)/* Environment version mismatch. */
 
 /* DB (private) error return codes. */
 #define	DB_ALREADY_ABORTED	(-30899)
 #define	DB_DELETED		(-30898)/* Recovery file marked deleted. */
-#define	DB_LOCK_NOTEXIST	(-30897)/* Object to lock is gone. */
-#define	DB_NEEDSPLIT		(-30896)/* Page needs to be split. */
+#define	DB_NEEDSPLIT		(-30897)/* Page needs to be split. */
+#define	DB_REP_BULKOVF		(-30896)/* Rep bulk buffer overflow. */
 #define	DB_REP_EGENCHG		(-30895)/* Egen changed while in election. */
 #define	DB_REP_LOGREADY		(-30894)/* Rep log ready for recovery. */
 #define	DB_REP_PAGEDONE		(-30893)/* This page was already done. */
@@ -1252,7 +1355,7 @@ struct __db {
 
 	DB_MPOOLFILE *mpf;		/* Backing buffer pool. */
 
-	DB_MUTEX *mutexp;		/* Synchronization for free threading */
+	db_mutex_t mutex;		/* Synchronization for free threading */
 
 	char *fname, *dname;		/* File/database passed to DB->open. */
 	u_int32_t open_flags;		/* Flags passed to DB->open. */
@@ -1268,11 +1371,12 @@ struct __db {
 	u_int32_t lid;			/* Locker id for handle locking. */
 	u_int32_t cur_lid;		/* Current handle lock holder. */
 	u_int32_t associate_lid;	/* Locker id for DB->associate call. */
-	DB_LOCK	handle_lock;		/* Lock held on this handle. */
+	DB_LOCK	 handle_lock;		/* Lock held on this handle. */
 
 	u_int	 cl_id;			/* RPC: remote client id. */
 
 	time_t	 timestamp;		/* Handle timestamp for replication. */
+	u_int32_t fid_gen;		/* Rep generation number for fids. */
 
 	/*
 	 * Returned data memory for DB->get() and friends.
@@ -1368,6 +1472,11 @@ struct __db {
 	/* Reference to primary -- set in the secondary. */
 	DB	*s_primary;
 
+#define	DB_ASSOC_IMMUTABLE_KEY    0x00000001 /* Secondary key is immutable. */
+
+	/* Flags passed to associate -- set in the secondary. */
+	u_int32_t s_assoc_flags;
+
 	/* API-private structure: used by DB 1.85, C++, Java, Perl and Tcl */
 	void	*api_internal;
 
@@ -1377,19 +1486,19 @@ struct __db {
 	void	*q_internal;		/* Queue access method. */
 	void	*xa_internal;		/* XA. */
 
-					/* Methods. */
-	int  (*associate) __P((DB *, DB_TXN *, DB *, int (*)(DB *, const DBT *,
-		const DBT *, DBT *), u_int32_t));
+	/* DB PUBLIC HANDLE LIST BEGIN */
+	int  (*associate) __P((DB *, DB_TXN *, DB *,
+		int (*)(DB *, const DBT *, const DBT *, DBT *), u_int32_t));
 	int  (*close) __P((DB *, u_int32_t));
+	int  (*compact) __P((DB *,
+		DB_TXN *, DBT *, DBT *, DB_COMPACT *, u_int32_t, DBT *));
 	int  (*cursor) __P((DB *, DB_TXN *, DBC **, u_int32_t));
 	int  (*del) __P((DB *, DB_TXN *, DBT *, u_int32_t));
-	int  (*dump) __P((DB *,
-		const char *, int (*)(void *, const void *), void *, int, int));
 	void (*err) __P((DB *, int, const char *, ...));
 	void (*errx) __P((DB *, const char *, ...));
 	int  (*fd) __P((DB *, int *));
 	int  (*get) __P((DB *, DB_TXN *, DBT *, DBT *, u_int32_t));
-	int  (*pget) __P((DB *, DB_TXN *, DBT *, DBT *, DBT *, u_int32_t));
+	int  (*get_bt_minkey) __P((DB *, u_int32_t *));
 	int  (*get_byteswapped) __P((DB *, int *));
 	int  (*get_cachesize) __P((DB *, u_int32_t *, u_int32_t *, int *));
 	int  (*get_dbname) __P((DB *, const char **, const char **));
@@ -1398,77 +1507,78 @@ struct __db {
 	void (*get_errfile) __P((DB *, FILE **));
 	void (*get_errpfx) __P((DB *, const char **));
 	int  (*get_flags) __P((DB *, u_int32_t *));
-	int  (*get_lorder) __P((DB *, int *));
-	int  (*get_open_flags) __P((DB *, u_int32_t *));
-	int  (*get_pagesize) __P((DB *, u_int32_t *));
-	int  (*get_transactional) __P((DB *));
-	int  (*get_type) __P((DB *, DBTYPE *));
-	int  (*join) __P((DB *, DBC **, DBC **, u_int32_t));
-	int  (*key_range) __P((DB *,
-		DB_TXN *, DBT *, DB_KEY_RANGE *, u_int32_t));
-	int  (*open) __P((DB *, DB_TXN *,
-		const char *, const char *, DBTYPE, u_int32_t, int));
-	int  (*put) __P((DB *, DB_TXN *, DBT *, DBT *, u_int32_t));
-	int  (*remove) __P((DB *, const char *, const char *, u_int32_t));
-	int  (*rename) __P((DB *,
-	    const char *, const char *, const char *, u_int32_t));
-	int  (*truncate) __P((DB *, DB_TXN *, u_int32_t *, u_int32_t));
-	int  (*set_append_recno) __P((DB *, int (*)(DB *, DBT *, db_recno_t)));
-	int  (*set_alloc) __P((DB *, void *(*)(size_t),
-		void *(*)(void *, size_t), void (*)(void *)));
-	int  (*set_cachesize) __P((DB *, u_int32_t, u_int32_t, int));
-	int  (*set_dup_compare) __P((DB *,
-	    int (*)(DB *, const DBT *, const DBT *)));
-	int  (*set_encrypt) __P((DB *, const char *, u_int32_t));
-	void (*set_errcall) __P((DB *,
-	    void (*)(const DB_ENV *, const char *, const char *)));
-	void (*set_errfile) __P((DB *, FILE *));
-	void (*set_errpfx) __P((DB *, const char *));
-	int  (*set_feedback) __P((DB *, void (*)(DB *, int, int)));
-	int  (*set_flags) __P((DB *, u_int32_t));
-	int  (*set_lorder) __P((DB *, int));
-	void (*set_msgcall) __P((DB *, void (*)(const DB_ENV *, const char *)));
-	void (*get_msgfile) __P((DB *, FILE **));
-	void (*set_msgfile) __P((DB *, FILE *));
-	int  (*set_pagesize) __P((DB *, u_int32_t));
-	int  (*set_paniccall) __P((DB *, void (*)(DB_ENV *, int)));
-	int  (*stat) __P((DB *, DB_TXN *, void *, u_int32_t));
-	int  (*stat_print) __P((DB *, u_int32_t));
-	int  (*sync) __P((DB *, u_int32_t));
-	int  (*upgrade) __P((DB *, const char *, u_int32_t));
-	int  (*verify) __P((DB *,
-	    const char *, const char *, FILE *, u_int32_t));
-
-	int  (*get_bt_minkey) __P((DB *, u_int32_t *));
-	int  (*set_bt_compare) __P((DB *,
-	    int (*)(DB *, const DBT *, const DBT *)));
-	int  (*set_bt_maxkey) __P((DB *, u_int32_t));
-	int  (*set_bt_minkey) __P((DB *, u_int32_t));
-	int  (*set_bt_prefix) __P((DB *,
-	    size_t (*)(DB *, const DBT *, const DBT *)));
-
 	int  (*get_h_ffactor) __P((DB *, u_int32_t *));
 	int  (*get_h_nelem) __P((DB *, u_int32_t *));
-	int  (*set_h_ffactor) __P((DB *, u_int32_t));
-	int  (*set_h_hash) __P((DB *,
-	    u_int32_t (*)(DB *, const void *, u_int32_t)));
-	int  (*set_h_nelem) __P((DB *, u_int32_t));
-
+	int  (*get_lorder) __P((DB *, int *));
+	DB_MPOOLFILE *(*get_mpf) __P((DB *));
+	void (*get_msgfile) __P((DB *, FILE **));
+	int  (*get_open_flags) __P((DB *, u_int32_t *));
+	int  (*get_pagesize) __P((DB *, u_int32_t *));
+	int  (*get_q_extentsize) __P((DB *, u_int32_t *));
 	int  (*get_re_delim) __P((DB *, int *));
 	int  (*get_re_len) __P((DB *, u_int32_t *));
 	int  (*get_re_pad) __P((DB *, int *));
 	int  (*get_re_source) __P((DB *, const char **));
+	int  (*get_transactional) __P((DB *));
+	int  (*get_type) __P((DB *, DBTYPE *));
+	int  (*join) __P((DB *, DBC **, DBC **, u_int32_t));
+	int  (*key_range)
+		__P((DB *, DB_TXN *, DBT *, DB_KEY_RANGE *, u_int32_t));
+	int  (*open) __P((DB *,
+		DB_TXN *, const char *, const char *, DBTYPE, u_int32_t, int));
+	int  (*pget) __P((DB *, DB_TXN *, DBT *, DBT *, DBT *, u_int32_t));
+	int  (*put) __P((DB *, DB_TXN *, DBT *, DBT *, u_int32_t));
+	int  (*remove) __P((DB *, const char *, const char *, u_int32_t));
+	int  (*rename) __P((DB *,
+		const char *, const char *, const char *, u_int32_t));
+	int  (*set_alloc) __P((DB *, void *(*)(size_t),
+		void *(*)(void *, size_t), void (*)(void *)));
+	int  (*set_append_recno) __P((DB *, int (*)(DB *, DBT *, db_recno_t)));
+	int  (*set_bt_compare)
+		__P((DB *, int (*)(DB *, const DBT *, const DBT *)));
+	int  (*set_bt_minkey) __P((DB *, u_int32_t));
+	int  (*set_bt_prefix)
+		__P((DB *, size_t (*)(DB *, const DBT *, const DBT *)));
+	int  (*set_cachesize) __P((DB *, u_int32_t, u_int32_t, int));
+	int  (*set_dup_compare)
+		__P((DB *, int (*)(DB *, const DBT *, const DBT *)));
+	int  (*set_encrypt) __P((DB *, const char *, u_int32_t));
+	void (*set_errcall) __P((DB *,
+		void (*)(const DB_ENV *, const char *, const char *)));
+	void (*set_errfile) __P((DB *, FILE *));
+	void (*set_errpfx) __P((DB *, const char *));
+	int  (*set_feedback) __P((DB *, void (*)(DB *, int, int)));
+	int  (*set_flags) __P((DB *, u_int32_t));
+	int  (*set_h_ffactor) __P((DB *, u_int32_t));
+	int  (*set_h_hash)
+		__P((DB *, u_int32_t (*)(DB *, const void *, u_int32_t)));
+	int  (*set_h_nelem) __P((DB *, u_int32_t));
+	int  (*set_lorder) __P((DB *, int));
+	void (*set_msgcall) __P((DB *, void (*)(const DB_ENV *, const char *)));
+	void (*set_msgfile) __P((DB *, FILE *));
+	int  (*set_pagesize) __P((DB *, u_int32_t));
+	int  (*set_paniccall) __P((DB *, void (*)(DB_ENV *, int)));
+	int  (*set_q_extentsize) __P((DB *, u_int32_t));
 	int  (*set_re_delim) __P((DB *, int));
 	int  (*set_re_len) __P((DB *, u_int32_t));
 	int  (*set_re_pad) __P((DB *, int));
 	int  (*set_re_source) __P((DB *, const char *));
+	int  (*stat) __P((DB *, DB_TXN *, void *, u_int32_t));
+	int  (*stat_print) __P((DB *, u_int32_t));
+	int  (*sync) __P((DB *, u_int32_t));
+	int  (*truncate) __P((DB *, DB_TXN *, u_int32_t *, u_int32_t));
+	int  (*upgrade) __P((DB *, const char *, u_int32_t));
+	int  (*verify)
+		__P((DB *, const char *, const char *, FILE *, u_int32_t));
+	/* DB PUBLIC HANDLE LIST END */
 
-	int  (*get_q_extentsize) __P((DB *, u_int32_t *));
-	int  (*set_q_extentsize) __P((DB *, u_int32_t));
-
+	/* DB PRIVATE HANDLE LIST BEGIN */
+	int  (*dump) __P((DB *, const char *,
+		int (*)(void *, const void *), void *, int, int));
 	int  (*db_am_remove) __P((DB *, DB_TXN *, const char *, const char *));
 	int  (*db_am_rename) __P((DB *, DB_TXN *,
 	    const char *, const char *, const char *));
+	/* DB PRIVATE HANDLE LIST END */
 
 	/*
 	 * Never called; these are a place to save function pointers
@@ -1483,39 +1593,45 @@ struct __db {
 #define	DB_OK_RECNO	0x08
 	u_int32_t	am_ok;		/* Legal AM choices. */
 
-#define	DB_AM_CHKSUM		0x00000001 /* Checksumming. */
-#define	DB_AM_CL_WRITER		0x00000002 /* Allow writes in client replica. */
-#define	DB_AM_COMPENSATE	0x00000004 /* Created by compensating txn. */
-#define	DB_AM_CREATED		0x00000008 /* Database was created upon open. */
-#define	DB_AM_CREATED_MSTR	0x00000010 /* Encompassing file was created. */
-#define	DB_AM_DBM_ERROR		0x00000020 /* Error in DBM/NDBM database. */
-#define	DB_AM_DELIMITER		0x00000040 /* Variable length delimiter set. */
-#define	DB_AM_DIRTY		0x00000080 /* Support Dirty Reads. */
-#define	DB_AM_DISCARD		0x00000100 /* Discard any cached pages. */
-#define	DB_AM_DUP		0x00000200 /* DB_DUP. */
-#define	DB_AM_DUPSORT		0x00000400 /* DB_DUPSORT. */
-#define	DB_AM_ENCRYPT		0x00000800 /* Encryption. */
-#define	DB_AM_FIXEDLEN		0x00001000 /* Fixed-length records. */
-#define	DB_AM_INMEM		0x00002000 /* In-memory; no sync on close. */
-#define	DB_AM_INORDER		0x00004000 /* DB_INORDER. */
-#define	DB_AM_IN_RENAME		0x00008000 /* File is being renamed. */
-#define	DB_AM_NOT_DURABLE	0x00010000 /* Do not log changes. */
-#define	DB_AM_OPEN_CALLED	0x00020000 /* DB->open called. */
-#define	DB_AM_PAD		0x00040000 /* Fixed-length record pad. */
-#define	DB_AM_PGDEF		0x00080000 /* Page size was defaulted. */
-#define	DB_AM_RDONLY		0x00100000 /* Database is readonly. */
-#define	DB_AM_RECNUM		0x00200000 /* DB_RECNUM. */
-#define	DB_AM_RECOVER		0x00400000 /* DB opened by recovery routine. */
-#define	DB_AM_RENUMBER		0x00800000 /* DB_RENUMBER. */
-#define	DB_AM_REPLICATION	0x01000000 /* An internal replication file. */
-#define	DB_AM_REVSPLITOFF	0x02000000 /* DB_REVSPLITOFF. */
-#define	DB_AM_SECONDARY		0x04000000 /* Database is a secondary index. */
-#define	DB_AM_SNAPSHOT		0x08000000 /* DB_SNAPSHOT. */
-#define	DB_AM_SUBDB		0x10000000 /* Subdatabases supported. */
-#define	DB_AM_SWAP		0x20000000 /* Pages need to be byte-swapped. */
-#define	DB_AM_TXN		0x40000000 /* Opened in a transaction. */
-#define	DB_AM_VERIFYING		0x80000000 /* DB handle is in the verifier. */
-	u_int32_t orig_flags;		   /* Flags at  open, for refresh. */
+	/*
+	 * This field really ought to be an AM_FLAG, but we have
+	 * have run out of bits.  If/when we decide to split up
+	 * the flags, we can incorporate it.
+	 */
+	int	 preserve_fid;		/* Do not free fileid on close. */
+
+#define	DB_AM_CHKSUM		0x00000001 /* Checksumming */
+#define	DB_AM_CL_WRITER		0x00000002 /* Allow writes in client replica */
+#define	DB_AM_COMPENSATE	0x00000004 /* Created by compensating txn */
+#define	DB_AM_CREATED		0x00000008 /* Database was created upon open */
+#define	DB_AM_CREATED_MSTR	0x00000010 /* Encompassing file was created */
+#define	DB_AM_DBM_ERROR		0x00000020 /* Error in DBM/NDBM database */
+#define	DB_AM_DELIMITER		0x00000040 /* Variable length delimiter set */
+#define	DB_AM_DISCARD		0x00000080 /* Discard any cached pages */
+#define	DB_AM_DUP		0x00000100 /* DB_DUP */
+#define	DB_AM_DUPSORT		0x00000200 /* DB_DUPSORT */
+#define	DB_AM_ENCRYPT		0x00000400 /* Encryption */
+#define	DB_AM_FIXEDLEN		0x00000800 /* Fixed-length records */
+#define	DB_AM_INMEM		0x00001000 /* In-memory; no sync on close */
+#define	DB_AM_INORDER		0x00002000 /* DB_INORDER */
+#define	DB_AM_IN_RENAME		0x00004000 /* File is being renamed */
+#define	DB_AM_NOT_DURABLE	0x00008000 /* Do not log changes */
+#define	DB_AM_OPEN_CALLED	0x00010000 /* DB->open called */
+#define	DB_AM_PAD		0x00020000 /* Fixed-length record pad */
+#define	DB_AM_PGDEF		0x00040000 /* Page size was defaulted */
+#define	DB_AM_RDONLY		0x00080000 /* Database is readonly */
+#define	DB_AM_READ_UNCOMMITTED	0x00100000 /* Support degree 1 isolation */
+#define	DB_AM_RECNUM		0x00200000 /* DB_RECNUM */
+#define	DB_AM_RECOVER		0x00400000 /* DB opened by recovery routine */
+#define	DB_AM_RENUMBER		0x00800000 /* DB_RENUMBER */
+#define	DB_AM_REVSPLITOFF	0x01000000 /* DB_REVSPLITOFF */
+#define	DB_AM_SECONDARY		0x02000000 /* Database is a secondary index */
+#define	DB_AM_SNAPSHOT		0x04000000 /* DB_SNAPSHOT */
+#define	DB_AM_SUBDB		0x08000000 /* Subdatabases supported */
+#define	DB_AM_SWAP		0x10000000 /* Pages need to be byte-swapped */
+#define	DB_AM_TXN		0x20000000 /* Opened in a transaction */
+#define	DB_AM_VERIFYING		0x40000000 /* DB handle is in the verifier */
+	u_int32_t orig_flags;		   /* Flags at  open, for refresh */
 	u_int32_t flags;
 };
 
@@ -1617,7 +1733,7 @@ struct __dbc {
 	DBT	  my_rkey;		/* Space for returned [primary] key. */
 	DBT	  my_rdata;		/* Space for returned data. */
 
-	u_int32_t lid;			/* Default process' locker id. */
+	void	 *lref;			/* Reference to default locker. */
 	u_int32_t locker;		/* Locker for this operation. */
 	DBT	  lock_dbt;		/* DBT referencing lock. */
 	DB_LOCK_ILOCK lock;		/* Object to be locked. */
@@ -1629,15 +1745,17 @@ struct __dbc {
 
 	DBC_INTERNAL *internal;		/* Access method private. */
 
-	int (*c_close) __P((DBC *));	/* Methods: public. */
+	/* DBC PUBLIC HANDLE LIST BEGIN */
+	int (*c_close) __P((DBC *));
 	int (*c_count) __P((DBC *, db_recno_t *, u_int32_t));
 	int (*c_del) __P((DBC *, u_int32_t));
 	int (*c_dup) __P((DBC *, DBC **, u_int32_t));
 	int (*c_get) __P((DBC *, DBT *, DBT *, u_int32_t));
 	int (*c_pget) __P((DBC *, DBT *, DBT *, DBT *, u_int32_t));
 	int (*c_put) __P((DBC *, DBT *, DBT *, u_int32_t));
+	/* DBC PUBLIC HANDLE LIST END */
 
-					/* Methods: private. */
+	/* DBC PRIVATE HANDLE LIST BEGIN */
 	int (*c_am_bulk) __P((DBC *, DBT *, u_int32_t));
 	int (*c_am_close) __P((DBC *, db_pgno_t, int *));
 	int (*c_am_del) __P((DBC *));
@@ -1645,20 +1763,29 @@ struct __dbc {
 	int (*c_am_get) __P((DBC *, DBT *, DBT *, u_int32_t, db_pgno_t *));
 	int (*c_am_put) __P((DBC *, DBT *, DBT *, u_int32_t, db_pgno_t *));
 	int (*c_am_writelock) __P((DBC *));
+	/* DBC PRIVATE HANDLE LIST END */
 
-#define	DBC_ACTIVE	 0x0001		/* Cursor in use. */
-#define	DBC_COMPENSATE	 0x0002		/* Cursor compensating, don't lock. */
-#define	DBC_DEGREE_2	 0x0004		/* Cursor has degree 2 isolation. */
-#define	DBC_DIRTY_READ	 0x0008		/* Cursor supports dirty reads. */
-#define	DBC_OPD		 0x0010		/* Cursor references off-page dups. */
-#define	DBC_RECOVER	 0x0020		/* Recovery cursor; don't log/lock. */
-#define	DBC_RMW		 0x0040		/* Acquire write flag in read op. */
-#define	DBC_TRANSIENT	 0x0080		/* Cursor is transient. */
-#define	DBC_WRITECURSOR	 0x0100		/* Cursor may be used to write (CDB). */
-#define	DBC_WRITER	 0x0200		/* Cursor immediately writing (CDB). */
-#define	DBC_MULTIPLE	 0x0400		/* Return Multiple data. */
-#define	DBC_MULTIPLE_KEY 0x0800		/* Return Multiple keys and data. */
-#define	DBC_OWN_LID	 0x1000		/* Free lock id on destroy. */
+/*
+ * DBC_COMPENSATE and DBC_RECOVER are used during recovery and transaction
+ * abort.  If a transaction is being aborted or recovered then DBC_RECOVER
+ * will be set and locking and logging will be disabled on this cursor.  If
+ * we are performing a compensating transaction (e.g. free page processing)
+ * then DB_COMPENSATE will be set to inhibit locking, but logging will still
+ * be required.
+ */
+#define	DBC_ACTIVE		0x0001	/* Cursor in use. */
+#define	DBC_COMPENSATE		0x0002	/* Cursor compensating, don't lock. */
+#define	DBC_MULTIPLE		0x0004	/* Return Multiple data. */
+#define	DBC_MULTIPLE_KEY	0x0008	/* Return Multiple keys and data. */
+#define	DBC_OPD			0x0010	/* Cursor references off-page dups. */
+#define	DBC_OWN_LID		0x0020	/* Free lock id on destroy. */
+#define	DBC_READ_COMMITTED	0x0040	/* Cursor has degree 2 isolation. */
+#define	DBC_READ_UNCOMMITTED	0x0080	/* Cursor has degree 1 isolation. */
+#define	DBC_RECOVER		0x0100	/* Recovery cursor; don't log/lock. */
+#define	DBC_RMW			0x0200	/* Acquire write flag in read op. */
+#define	DBC_TRANSIENT		0x0400	/* Cursor is transient. */
+#define	DBC_WRITECURSOR		0x0800	/* Cursor may be used to write (CDB). */
+#define	DBC_WRITER		0x1000	/* Cursor immediately writing (CDB). */
 	u_int32_t flags;
 };
 
@@ -1677,7 +1804,6 @@ struct __db_bt_stat {
 	u_int32_t bt_nkeys;		/* Number of unique keys. */
 	u_int32_t bt_ndata;		/* Number of data items. */
 	u_int32_t bt_pagesize;		/* Page size. */
-	u_int32_t bt_maxkey;		/* Maxkey value. */
 	u_int32_t bt_minkey;		/* Minkey value. */
 	u_int32_t bt_re_len;		/* Fixed-length record length. */
 	u_int32_t bt_re_pad;		/* Fixed-length record pad. */
@@ -1694,6 +1820,21 @@ struct __db_bt_stat {
 	u_int32_t bt_over_pgfree;	/* Bytes free in overflow pages. */
 };
 
+struct __db_compact {
+	/* Input Parameters. */
+	u_int32_t	compact_fillpercent;	/* Desired fillfactor: 1-100 */
+	db_timeout_t	compact_timeout;	/* Lock timeout. */
+	u_int32_t	compact_pages;		/* Max pages to process. */
+	/* Output Stats. */
+	u_int32_t	compact_pages_free;	/* Number of pages freed. */
+	u_int32_t	compact_pages_examine;	/* Number of pages examine. */
+	u_int32_t	compact_levels;		/* Number of levels removed. */
+	u_int32_t	compact_deadlock;	/* Number of deadlocks. */
+	db_pgno_t	compact_pages_truncated; /* Pages truncated to OS. */
+	/* Internal. */
+	db_pgno_t	compact_truncate;	/* Page number for truncation */
+};
+
 /* Hash statistics structure. */
 struct __db_h_stat {
 	u_int32_t hash_magic;		/* Magic number. */
@@ -1766,8 +1907,9 @@ struct __db_env {
 	 */
 #define	DB_VERB_DEADLOCK	0x0001	/* Deadlock detection information. */
 #define	DB_VERB_RECOVERY	0x0002	/* Recovery information. */
-#define	DB_VERB_REPLICATION	0x0004	/* Replication information. */
-#define	DB_VERB_WAITSFOR	0x0008	/* Dump waits-for table. */
+#define	DB_VERB_REGISTER	0x0004	/* Dump waits-for table. */
+#define	DB_VERB_REPLICATION	0x0008	/* Replication information. */
+#define	DB_VERB_WAITSFOR	0x0010	/* Dump waits-for table. */
 	u_int32_t	 verbose;	/* Verbose output. */
 
 	void		*app_private;	/* Application-private handle. */
@@ -1775,6 +1917,19 @@ struct __db_env {
 	int (*app_dispatch)		/* User-specified recovery dispatch. */
 	    __P((DB_ENV *, DBT *, DB_LSN *, db_recops));
 
+	/* Mutexes. */
+	u_int32_t	mutex_align;	/* Mutex alignment */
+	u_int32_t	mutex_cnt;	/* Number of mutexes to configure */
+	u_int32_t	mutex_inc;	/* Number of mutexes to add */
+	u_int32_t	mutex_tas_spins;/* Test-and-set spin count */
+
+	struct {
+		int	  alloc_id;	/* Allocation ID argument */
+		u_int32_t flags;	/* Flags argument */
+	} *mutex_iq;			/* Initial mutexes queue */
+	u_int		mutex_iq_next;	/* Count of initial mutexes */
+	u_int		mutex_iq_max;	/* Maximum initial mutexes */
+
 	/* Locking. */
 	u_int8_t	*lk_conflicts;	/* Two dimensional conflict matrix. */
 	int		 lk_modes;	/* Number of lock modes in table. */
@@ -1788,6 +1943,7 @@ struct __db_env {
 	u_int32_t	 lg_bsize;	/* Buffer size. */
 	u_int32_t	 lg_size;	/* Log file size. */
 	u_int32_t	 lg_regionmax;	/* Region size. */
+	int		 lg_filemode;	/* Log file permission mode. */
 
 	/* Memory pool. */
 	u_int32_t	 mp_gbytes;	/* Cachesize: GB. */
@@ -1810,11 +1966,19 @@ struct __db_env {
 	time_t		 tx_timestamp;	/* Recover to specific timestamp. */
 	db_timeout_t	 tx_timeout;	/* Timeout for transactions. */
 
+	/* Thread tracking. */
+	u_int32_t	thr_nbucket;	/* Number of hash buckets. */
+	u_int32_t	thr_max;	/* Max before garbage collection. */
+	void		*thr_hashtab;	/* Hash table of DB_THREAD_INFO. */
+
 	/*******************************************************
 	 * Private: owned by DB.
 	 *******************************************************/
+	pid_t		pid_cache;	/* Cached process ID. */
+
 					/* User files, paths. */
 	char		*db_home;	/* Database home. */
+	char		*db_abshome;	/* Absolute path when started. */
 	char		*db_log_dir;	/* Database log file directory. */
 	char		*db_tmp_dir;	/* Database tmp file directory. */
 
@@ -1824,12 +1988,25 @@ struct __db_env {
 
 	int		 db_mode;	/* Default open permissions. */
 	int		 dir_mode;	/* Intermediate directory perms. */
-	u_int32_t	 env_lid;	/* Locker ID in non-threaded handles. */
+	void		*env_lref;	/* Locker in non-threaded handles. */
 	u_int32_t	 open_flags;	/* Flags passed to DB_ENV->open. */
 
 	void		*reginfo;	/* REGINFO structure reference. */
 	DB_FH		*lockfhp;	/* fcntl(2) locking file handle. */
 
+	DB_FH		*registry;	/* DB_REGISTER file handle. */
+	u_int32_t	registry_off;	/*
+					 * Offset of our slot.  We can't use
+					 * off_t because its size depends on
+					 * build settings.
+					 */
+
+					/* Return ID, check if ID alive. */
+	void	       (*thread_id) __P((DB_ENV *, pid_t *, db_threadid_t *));
+	int	       (*is_alive) __P((DB_ENV *, pid_t, db_threadid_t));
+	char	       *(*thread_id_string)
+			__P((DB_ENV *, pid_t, db_threadid_t, char *));
+
 	int	      (**recover_dtab)	/* Dispatch table for recover funcs. */
 			    __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
 	size_t		 recover_dtab_size;
@@ -1841,7 +2018,6 @@ struct __db_env {
 	int		 db_ref;	/* DB reference count. */
 
 	long		 shm_key;	/* shmget(2) key. */
-	u_int32_t	 tas_spins;	/* test-and-set spins. */
 
 	/*
 	 * List of open DB handles for this DB_ENV, used for cursor
@@ -1856,7 +2032,7 @@ struct __db_env {
 	 * Explicit representation of structure in queue.h.
 	 * LIST_HEAD(dblist, __db);
 	 */
-	DB_MUTEX	*dblist_mutexp;	/* Mutex. */
+	db_mutex_t mtx_dblist;		/* Mutex. */
 	struct {
 		struct __db *lh_first;
 	} dblist;
@@ -1879,161 +2055,183 @@ struct __db_env {
 	} xa_txn;
 	int		 xa_rmid;	/* XA Resource Manager ID. */
 
+	char		*passwd;	/* Cryptography support. */
+	size_t		 passwd_len;
+	void		*crypto_handle;	/* Primary handle. */
+	db_mutex_t	 mtx_mt;	/* Mersenne Twister mutex. */
+	int		 mti;		/* Mersenne Twister index. */
+	u_long		*mt;		/* Mersenne Twister state vector. */
+
 	/* API-private structure. */
 	void		*api1_internal;	/* C++, Perl API private */
 	void		*api2_internal;	/* Java API private */
 
-	char		*passwd;	/* Cryptography support. */
-	size_t		 passwd_len;
-	void		*crypto_handle;	/* Primary handle. */
-	DB_MUTEX	*mt_mutexp;	/* Mersenne Twister mutex. */
-	int		 mti;		/* Mersenne Twister index. */
-	u_long		*mt;		/* Mersenne Twister state vector. */
+	void *lg_handle;		/* Log handle. */
+	void *lk_handle;		/* Lock handle. */
+	void *mp_handle;		/* Mpool handle. */
+	void *mutex_handle;		/* Mutex handle. */
+	void *rep_handle;		/* Replication handle. */
+	void *tx_handle;		/* Txn handle. */
 
-					/* DB_ENV Methods. */
+	/* DB_ENV PUBLIC HANDLE LIST BEGIN */
 	int  (*close) __P((DB_ENV *, u_int32_t));
 	int  (*dbremove) __P((DB_ENV *,
 		DB_TXN *, const char *, const char *, u_int32_t));
-	int  (*dbrename) __P((DB_ENV *, DB_TXN *,
-		const char *, const char *, const char *, u_int32_t));
+	int  (*dbrename) __P((DB_ENV *,
+		DB_TXN *, const char *, const char *, const char *, u_int32_t));
 	void (*err) __P((const DB_ENV *, int, const char *, ...));
 	void (*errx) __P((const DB_ENV *, const char *, ...));
-	int  (*open) __P((DB_ENV *, const char *, u_int32_t, int));
-	int  (*remove) __P((DB_ENV *, const char *, u_int32_t));
-	int  (*stat_print) __P((DB_ENV *, u_int32_t));
-
-					/* House-keeping. */
-	int  (*fileid_reset) __P((DB_ENV *, char *, int));
-	int  (*is_bigendian) __P((void));
-	int  (*lsn_reset) __P((DB_ENV *, char *, int));
-	int  (*prdbt) __P((DBT *,
-		int, const char *, void *, int (*)(void *, const void *), int));
-
-					/* Setters/getters. */
-	int  (*set_alloc) __P((DB_ENV *, void *(*)(size_t),
-		void *(*)(void *, size_t), void (*)(void *)));
-	int  (*set_app_dispatch) __P((DB_ENV *,
-		int (*)(DB_ENV *, DBT *, DB_LSN *, db_recops)));
+	int  (*failchk) __P((DB_ENV *, u_int32_t));
+	int  (*fileid_reset) __P((DB_ENV *, const char *, u_int32_t));
+	int  (*get_cachesize) __P((DB_ENV *, u_int32_t *, u_int32_t *, int *));
 	int  (*get_data_dirs) __P((DB_ENV *, const char ***));
-	int  (*set_data_dir) __P((DB_ENV *, const char *));
 	int  (*get_encrypt_flags) __P((DB_ENV *, u_int32_t *));
-	int  (*set_encrypt) __P((DB_ENV *, const char *, u_int32_t));
-	void (*set_errcall) __P((DB_ENV *,
-	    void (*)(const DB_ENV *, const char *, const char *)));
 	void (*get_errfile) __P((DB_ENV *, FILE **));
-	void (*set_errfile) __P((DB_ENV *, FILE *));
 	void (*get_errpfx) __P((DB_ENV *, const char **));
-	void (*set_errpfx) __P((DB_ENV *, const char *));
-	int  (*set_feedback) __P((DB_ENV *, void (*)(DB_ENV *, int, int)));
 	int  (*get_flags) __P((DB_ENV *, u_int32_t *));
-	int  (*set_flags) __P((DB_ENV *, u_int32_t, int));
 	int  (*get_home) __P((DB_ENV *, const char **));
-	int  (*set_intermediate_dir) __P((DB_ENV *, int, u_int32_t));
-	int  (*get_open_flags) __P((DB_ENV *, u_int32_t *));
-	int  (*set_paniccall) __P((DB_ENV *, void (*)(DB_ENV *, int)));
-	int  (*set_rpc_server) __P((DB_ENV *,
-		void *, const char *, long, long, u_int32_t));
-	int  (*get_shm_key) __P((DB_ENV *, long *));
-	int  (*set_shm_key) __P((DB_ENV *, long));
-	void (*set_msgcall) __P((DB_ENV *,
-		void (*)(const DB_ENV *, const char *)));
-	void (*get_msgfile) __P((DB_ENV *, FILE **));
-	void (*set_msgfile) __P((DB_ENV *, FILE *));
-	int  (*get_tas_spins) __P((DB_ENV *, u_int32_t *));
-	int  (*set_tas_spins) __P((DB_ENV *, u_int32_t));
-	int  (*get_tmp_dir) __P((DB_ENV *, const char **));
-	int  (*set_tmp_dir) __P((DB_ENV *, const char *));
-	int  (*get_verbose) __P((DB_ENV *, u_int32_t, int *));
-	int  (*set_verbose) __P((DB_ENV *, u_int32_t, int));
-
-	void *lg_handle;		/* Log handle and methods. */
 	int  (*get_lg_bsize) __P((DB_ENV *, u_int32_t *));
-	int  (*set_lg_bsize) __P((DB_ENV *, u_int32_t));
 	int  (*get_lg_dir) __P((DB_ENV *, const char **));
-	int  (*set_lg_dir) __P((DB_ENV *, const char *));
+	int  (*get_lg_filemode) __P((DB_ENV *, int *));
 	int  (*get_lg_max) __P((DB_ENV *, u_int32_t *));
-	int  (*set_lg_max) __P((DB_ENV *, u_int32_t));
 	int  (*get_lg_regionmax) __P((DB_ENV *, u_int32_t *));
-	int  (*set_lg_regionmax) __P((DB_ENV *, u_int32_t));
-	int  (*log_archive) __P((DB_ENV *, char **[], u_int32_t));
-	int  (*log_cursor) __P((DB_ENV *, DB_LOGC **, u_int32_t));
-	int  (*log_file) __P((DB_ENV *, const DB_LSN *, char *, size_t));
-	int  (*log_flush) __P((DB_ENV *, const DB_LSN *));
-	int  (*log_put) __P((DB_ENV *, DB_LSN *, const DBT *, u_int32_t));
-	int  (*log_stat) __P((DB_ENV *, DB_LOG_STAT **, u_int32_t));
-	int  (*log_stat_print) __P((DB_ENV *, u_int32_t));
-
-	void *lk_handle;		/* Lock handle and methods. */
 	int  (*get_lk_conflicts) __P((DB_ENV *, const u_int8_t **, int *));
-	int  (*set_lk_conflicts) __P((DB_ENV *, u_int8_t *, int));
 	int  (*get_lk_detect) __P((DB_ENV *, u_int32_t *));
-	int  (*set_lk_detect) __P((DB_ENV *, u_int32_t));
-	int  (*set_lk_max) __P((DB_ENV *, u_int32_t));
-	int  (*get_lk_max_locks) __P((DB_ENV *, u_int32_t *));
-	int  (*set_lk_max_locks) __P((DB_ENV *, u_int32_t));
 	int  (*get_lk_max_lockers) __P((DB_ENV *, u_int32_t *));
-	int  (*set_lk_max_lockers) __P((DB_ENV *, u_int32_t));
+	int  (*get_lk_max_locks) __P((DB_ENV *, u_int32_t *));
 	int  (*get_lk_max_objects) __P((DB_ENV *, u_int32_t *));
-	int  (*set_lk_max_objects) __P((DB_ENV *, u_int32_t));
+	int  (*get_mp_max_openfd) __P((DB_ENV *, int *));
+	int  (*get_mp_max_write) __P((DB_ENV *, int *, int *));
+	int  (*get_mp_mmapsize) __P((DB_ENV *, size_t *));
+	void (*get_msgfile) __P((DB_ENV *, FILE **));
+	int  (*get_open_flags) __P((DB_ENV *, u_int32_t *));
+	int  (*get_rep_limit) __P((DB_ENV *, u_int32_t *, u_int32_t *));
+	int  (*get_shm_key) __P((DB_ENV *, long *));
+	int  (*get_timeout) __P((DB_ENV *, db_timeout_t *, u_int32_t));
+	int  (*get_tmp_dir) __P((DB_ENV *, const char **));
+	int  (*get_tx_max) __P((DB_ENV *, u_int32_t *));
+	int  (*get_tx_timestamp) __P((DB_ENV *, time_t *));
+	int  (*get_verbose) __P((DB_ENV *, u_int32_t, int *));
+	int  (*is_bigendian) __P((void));
 	int  (*lock_detect) __P((DB_ENV *, u_int32_t, u_int32_t, int *));
 	int  (*lock_get) __P((DB_ENV *,
 		u_int32_t, u_int32_t, const DBT *, db_lockmode_t, DB_LOCK *));
-	int  (*lock_put) __P((DB_ENV *, DB_LOCK *));
 	int  (*lock_id) __P((DB_ENV *, u_int32_t *));
 	int  (*lock_id_free) __P((DB_ENV *, u_int32_t));
+	int  (*lock_put) __P((DB_ENV *, DB_LOCK *));
 	int  (*lock_stat) __P((DB_ENV *, DB_LOCK_STAT **, u_int32_t));
 	int  (*lock_stat_print) __P((DB_ENV *, u_int32_t));
 	int  (*lock_vec) __P((DB_ENV *,
 		u_int32_t, u_int32_t, DB_LOCKREQ *, int, DB_LOCKREQ **));
-
-	void *mp_handle;		/* Mpool handle and methods. */
-	int  (*get_cachesize) __P((DB_ENV *, u_int32_t *, u_int32_t *, int *));
-	int  (*set_cachesize) __P((DB_ENV *, u_int32_t, u_int32_t, int));
-	int  (*get_mp_mmapsize) __P((DB_ENV *, size_t *));
-	int  (*set_mp_mmapsize) __P((DB_ENV *, size_t));
-	int  (*get_mp_max_openfd) __P((DB_ENV *, int *));
-	int  (*set_mp_max_openfd) __P((DB_ENV *, int));
-	int  (*get_mp_max_write) __P((DB_ENV *, int *, int *));
-	int  (*set_mp_max_write) __P((DB_ENV *, int, int));
+	int  (*log_archive) __P((DB_ENV *, char **[], u_int32_t));
+	int  (*log_cursor) __P((DB_ENV *, DB_LOGC **, u_int32_t));
+	int  (*log_file) __P((DB_ENV *, const DB_LSN *, char *, size_t));
+	int  (*log_flush) __P((DB_ENV *, const DB_LSN *));
+	int  (*log_printf) __P((DB_ENV *, DB_TXN *, const char *, ...));
+	int  (*log_put) __P((DB_ENV *, DB_LSN *, const DBT *, u_int32_t));
+	int  (*log_stat) __P((DB_ENV *, DB_LOG_STAT **, u_int32_t));
+	int  (*log_stat_print) __P((DB_ENV *, u_int32_t));
+	int  (*lsn_reset) __P((DB_ENV *, const char *, u_int32_t));
 	int  (*memp_fcreate) __P((DB_ENV *, DB_MPOOLFILE **, u_int32_t));
-	int  (*memp_register) __P((DB_ENV *, int,
-		int (*)(DB_ENV *, db_pgno_t, void *, DBT *),
-		int (*)(DB_ENV *, db_pgno_t, void *, DBT *)));
+	int  (*memp_register) __P((DB_ENV *, int, int (*)(DB_ENV *,
+		db_pgno_t, void *, DBT *), int (*)(DB_ENV *,
+		db_pgno_t, void *, DBT *)));
 	int  (*memp_stat) __P((DB_ENV *,
 		DB_MPOOL_STAT **, DB_MPOOL_FSTAT ***, u_int32_t));
 	int  (*memp_stat_print) __P((DB_ENV *, u_int32_t));
 	int  (*memp_sync) __P((DB_ENV *, DB_LSN *));
 	int  (*memp_trickle) __P((DB_ENV *, int, int *));
-
-	void *rep_handle;		/* Replication handle and methods. */
-	int  (*rep_elect) __P((DB_ENV *, int, int, int,
-		u_int32_t, int *, u_int32_t));
+	int  (*mutex_alloc) __P((DB_ENV *, u_int32_t, db_mutex_t *));
+	int  (*mutex_free) __P((DB_ENV *, db_mutex_t));
+	int  (*mutex_get_align) __P((DB_ENV *, u_int32_t *));
+	int  (*mutex_get_increment) __P((DB_ENV *, u_int32_t *));
+	int  (*mutex_get_max) __P((DB_ENV *, u_int32_t *));
+	int  (*mutex_get_tas_spins) __P((DB_ENV *, u_int32_t *));
+	int  (*mutex_lock) __P((DB_ENV *, db_mutex_t));
+	int  (*mutex_set_align) __P((DB_ENV *, u_int32_t));
+	int  (*mutex_set_increment) __P((DB_ENV *, u_int32_t));
+	int  (*mutex_set_max) __P((DB_ENV *, u_int32_t));
+	int  (*mutex_set_tas_spins) __P((DB_ENV *, u_int32_t));
+	int  (*mutex_stat) __P((DB_ENV *, DB_MUTEX_STAT **, u_int32_t));
+	int  (*mutex_stat_print) __P((DB_ENV *, u_int32_t));
+	int  (*mutex_unlock) __P((DB_ENV *, db_mutex_t));
+	int  (*open) __P((DB_ENV *, const char *, u_int32_t, int));
+	int  (*remove) __P((DB_ENV *, const char *, u_int32_t));
+	int  (*rep_elect)
+		__P((DB_ENV *, int, int, int, u_int32_t, int *, u_int32_t));
 	int  (*rep_flush) __P((DB_ENV *));
-	int  (*rep_process_message) __P((DB_ENV *, DBT *, DBT *,
-	    int *, DB_LSN *));
+	int  (*rep_get_config) __P((DB_ENV *, u_int32_t, int *));
+	int  (*rep_process_message)
+		__P((DB_ENV *, DBT *, DBT *, int *, DB_LSN *));
+	int  (*rep_set_config) __P((DB_ENV *, u_int32_t, int));
 	int  (*rep_start) __P((DB_ENV *, DBT *, u_int32_t));
 	int  (*rep_stat) __P((DB_ENV *, DB_REP_STAT **, u_int32_t));
 	int  (*rep_stat_print) __P((DB_ENV *, u_int32_t));
-	int  (*get_rep_limit) __P((DB_ENV *, u_int32_t *, u_int32_t *));
+	int  (*rep_sync) __P((DB_ENV *, u_int32_t));
+	int  (*set_alloc) __P((DB_ENV *, void *(*)(size_t),
+		void *(*)(void *, size_t), void (*)(void *)));
+	int  (*set_app_dispatch)
+		__P((DB_ENV *, int (*)(DB_ENV *, DBT *, DB_LSN *, db_recops)));
+	int  (*set_cachesize) __P((DB_ENV *, u_int32_t, u_int32_t, int));
+	int  (*set_data_dir) __P((DB_ENV *, const char *));
+	int  (*set_encrypt) __P((DB_ENV *, const char *, u_int32_t));
+	void (*set_errcall) __P((DB_ENV *,
+		void (*)(const DB_ENV *, const char *, const char *)));
+	void (*set_errfile) __P((DB_ENV *, FILE *));
+	void (*set_errpfx) __P((DB_ENV *, const char *));
+	int  (*set_feedback) __P((DB_ENV *, void (*)(DB_ENV *, int, int)));
+	int  (*set_flags) __P((DB_ENV *, u_int32_t, int));
+	int  (*set_intermediate_dir) __P((DB_ENV *, int, u_int32_t));
+	int  (*set_isalive) __P((DB_ENV *,
+		int (*)(DB_ENV *, pid_t, db_threadid_t)));
+	int  (*set_lg_bsize) __P((DB_ENV *, u_int32_t));
+	int  (*set_lg_dir) __P((DB_ENV *, const char *));
+	int  (*set_lg_filemode) __P((DB_ENV *, int));
+	int  (*set_lg_max) __P((DB_ENV *, u_int32_t));
+	int  (*set_lg_regionmax) __P((DB_ENV *, u_int32_t));
+	int  (*set_lk_conflicts) __P((DB_ENV *, u_int8_t *, int));
+	int  (*set_lk_detect) __P((DB_ENV *, u_int32_t));
+	int  (*set_lk_max) __P((DB_ENV *, u_int32_t));
+	int  (*set_lk_max_lockers) __P((DB_ENV *, u_int32_t));
+	int  (*set_lk_max_locks) __P((DB_ENV *, u_int32_t));
+	int  (*set_lk_max_objects) __P((DB_ENV *, u_int32_t));
+	int  (*set_mp_max_openfd) __P((DB_ENV *, int));
+	int  (*set_mp_max_write) __P((DB_ENV *, int, int));
+	int  (*set_mp_mmapsize) __P((DB_ENV *, size_t));
+	void (*set_msgcall)
+		__P((DB_ENV *, void (*)(const DB_ENV *, const char *)));
+	void (*set_msgfile) __P((DB_ENV *, FILE *));
+	int  (*set_paniccall) __P((DB_ENV *, void (*)(DB_ENV *, int)));
 	int  (*set_rep_limit) __P((DB_ENV *, u_int32_t, u_int32_t));
 	int  (*set_rep_request) __P((DB_ENV *, u_int32_t, u_int32_t));
-	int  (*set_rep_transport) __P((DB_ENV *, int,
-		int (*) (DB_ENV *, const DBT *, const DBT *, const DB_LSN *,
-		int, u_int32_t)));
-
-	void *tx_handle;		/* Txn handle and methods. */
-	int  (*get_tx_max) __P((DB_ENV *, u_int32_t *));
+	int  (*set_rep_transport) __P((DB_ENV *, int, int (*)(DB_ENV *,
+		const DBT *, const DBT *, const DB_LSN *, int, u_int32_t)));
+	int  (*set_rpc_server)
+		__P((DB_ENV *, void *, const char *, long, long, u_int32_t));
+	int  (*set_shm_key) __P((DB_ENV *, long));
+	int  (*set_thread_count) __P((DB_ENV *, u_int32_t));
+	int  (*set_thread_id) __P((DB_ENV *,
+		void (*)(DB_ENV *, pid_t *, db_threadid_t *)));
+	int  (*set_thread_id_string) __P((DB_ENV *,
+		char *(*)(DB_ENV *, pid_t, db_threadid_t, char *)));
+	int  (*set_timeout) __P((DB_ENV *, db_timeout_t, u_int32_t));
+	int  (*set_tmp_dir) __P((DB_ENV *, const char *));
 	int  (*set_tx_max) __P((DB_ENV *, u_int32_t));
-	int  (*get_tx_timestamp) __P((DB_ENV *, time_t *));
 	int  (*set_tx_timestamp) __P((DB_ENV *, time_t *));
+	int  (*set_verbose) __P((DB_ENV *, u_int32_t, int));
+	int  (*stat_print) __P((DB_ENV *, u_int32_t));
 	int  (*txn_begin) __P((DB_ENV *, DB_TXN *, DB_TXN **, u_int32_t));
 	int  (*txn_checkpoint) __P((DB_ENV *, u_int32_t, u_int32_t, u_int32_t));
-	int  (*txn_recover) __P((DB_ENV *,
-		DB_PREPLIST *, long, long *, u_int32_t));
+	int  (*txn_recover)
+		__P((DB_ENV *, DB_PREPLIST *, long, long *, u_int32_t));
 	int  (*txn_stat) __P((DB_ENV *, DB_TXN_STAT **, u_int32_t));
 	int  (*txn_stat_print) __P((DB_ENV *, u_int32_t));
-	int  (*get_timeout) __P((DB_ENV *, db_timeout_t *, u_int32_t));
-	int  (*set_timeout) __P((DB_ENV *, db_timeout_t, u_int32_t));
+	/* DB_ENV PUBLIC HANDLE LIST END */
+
+	/* DB_ENV PRIVATE HANDLE LIST BEGIN */
+	int  (*prdbt) __P((DBT *,
+		int, const char *, void *, int (*)(void *, const void *), int));
+	/* DB_ENV PRIVATE HANDLE LIST END */
 
 #define	DB_TEST_ELECTINIT	 1	/* after __rep_elect_init */
 #define	DB_TEST_ELECTVOTE1	 2	/* after sending VOTE1 */
@@ -2056,26 +2254,27 @@ struct __db_env {
 #define	DB_ENV_DBLOCAL		0x0000010 /* DB_ENV allocated for private DB. */
 #define	DB_ENV_DIRECT_DB	0x0000020 /* DB_DIRECT_DB set. */
 #define	DB_ENV_DIRECT_LOG	0x0000040 /* DB_DIRECT_LOG set. */
-#define	DB_ENV_DSYNC_LOG	0x0000080 /* DB_DSYNC_LOG set. */
-#define	DB_ENV_FATAL		0x0000100 /* Doing fatal recovery in env. */
-#define	DB_ENV_LOCKDOWN		0x0000200 /* DB_LOCKDOWN set. */
-#define	DB_ENV_LOG_AUTOREMOVE   0x0000400 /* DB_LOG_AUTOREMOVE set. */
-#define	DB_ENV_LOG_INMEMORY     0x0000800 /* DB_LOG_INMEMORY set. */
-#define	DB_ENV_NOLOCKING	0x0001000 /* DB_NOLOCKING set. */
-#define	DB_ENV_NOMMAP		0x0002000 /* DB_NOMMAP set. */
-#define	DB_ENV_NOPANIC		0x0004000 /* Okay if panic set. */
-#define	DB_ENV_OPEN_CALLED	0x0008000 /* DB_ENV->open called. */
-#define	DB_ENV_OVERWRITE	0x0010000 /* DB_OVERWRITE set. */
-#define	DB_ENV_PRIVATE		0x0020000 /* DB_PRIVATE set. */
-#define	DB_ENV_REGION_INIT	0x0040000 /* DB_REGION_INIT set. */
-#define	DB_ENV_RPCCLIENT	0x0080000 /* DB_RPCCLIENT set. */
-#define	DB_ENV_RPCCLIENT_GIVEN	0x0100000 /* User-supplied RPC client struct */
-#define	DB_ENV_SYSTEM_MEM	0x0200000 /* DB_SYSTEM_MEM set. */
-#define	DB_ENV_THREAD		0x0400000 /* DB_THREAD set. */
-#define	DB_ENV_TIME_NOTGRANTED	0x0800000 /* DB_TIME_NOTGRANTED set. */
-#define	DB_ENV_TXN_NOSYNC	0x1000000 /* DB_TXN_NOSYNC set. */
-#define	DB_ENV_TXN_WRITE_NOSYNC	0x2000000 /* DB_TXN_WRITE_NOSYNC set. */
-#define	DB_ENV_YIELDCPU		0x4000000 /* DB_YIELDCPU set. */
+#define	DB_ENV_DSYNC_DB		0x0000080 /* DB_DSYNC_DB set. */
+#define	DB_ENV_DSYNC_LOG	0x0000100 /* DB_DSYNC_LOG set. */
+#define	DB_ENV_FATAL		0x0000200 /* Doing fatal recovery in env. */
+#define	DB_ENV_LOCKDOWN		0x0000400 /* DB_LOCKDOWN set. */
+#define	DB_ENV_LOG_AUTOREMOVE   0x0000800 /* DB_LOG_AUTOREMOVE set. */
+#define	DB_ENV_LOG_INMEMORY     0x0001000 /* DB_LOG_INMEMORY set. */
+#define	DB_ENV_NOLOCKING	0x0002000 /* DB_NOLOCKING set. */
+#define	DB_ENV_NOMMAP		0x0004000 /* DB_NOMMAP set. */
+#define	DB_ENV_NOPANIC		0x0008000 /* Okay if panic set. */
+#define	DB_ENV_OPEN_CALLED	0x0010000 /* DB_ENV->open called. */
+#define	DB_ENV_OVERWRITE	0x0020000 /* DB_OVERWRITE set. */
+#define	DB_ENV_PRIVATE		0x0040000 /* DB_PRIVATE set. */
+#define	DB_ENV_REGION_INIT	0x0080000 /* DB_REGION_INIT set. */
+#define	DB_ENV_RPCCLIENT	0x0100000 /* DB_RPCCLIENT set. */
+#define	DB_ENV_RPCCLIENT_GIVEN	0x0200000 /* User-supplied RPC client struct */
+#define	DB_ENV_SYSTEM_MEM	0x0400000 /* DB_SYSTEM_MEM set. */
+#define	DB_ENV_THREAD		0x0800000 /* DB_THREAD set. */
+#define	DB_ENV_TIME_NOTGRANTED	0x1000000 /* DB_TIME_NOTGRANTED set. */
+#define	DB_ENV_TXN_NOSYNC	0x2000000 /* DB_TXN_NOSYNC set. */
+#define	DB_ENV_TXN_WRITE_NOSYNC	0x4000000 /* DB_TXN_WRITE_NOSYNC set. */
+#define	DB_ENV_YIELDCPU		0x8000000 /* DB_YIELDCPU set. */
 	u_int32_t flags;
 };
 
diff --git a/storage/bdb/dbinc/db_185.in b/storage/bdb/dbinc/db_185.in
index 338455a601a..56b909cd934 100644
--- a/storage/bdb/dbinc/db_185.in
+++ b/storage/bdb/dbinc/db_185.in
@@ -1,7 +1,7 @@
 /*-
  * See the file LICENSE for redistribution information.
  *
- * Copyright (c) 1996-2004
+ * Copyright (c) 1996-2005
  *	Sleepycat Software.  All rights reserved.
  */
 /*
@@ -32,7 +32,7 @@
  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
  * SUCH DAMAGE.
  *
- * $Id: db_185.in,v 11.11 2004/01/28 03:36:01 bostic Exp $
+ * $Id: db_185.in,v 12.2 2005/06/16 20:21:45 bostic Exp $
  */
 
 #ifndef _DB_185_H_
@@ -72,7 +72,7 @@
  * XXX
  * SGI/IRIX already has a pgno_t.
  */
-#ifdef	sgi
+#ifdef	__sgi
 #define	pgno_t	db_pgno_t
 #endif
 
diff --git a/storage/bdb/dbinc/db_am.h b/storage/bdb/dbinc/db_am.h
index ed1956c66f4..d9d6c51700a 100644
--- a/storage/bdb/dbinc/db_am.h
+++ b/storage/bdb/dbinc/db_am.h
@@ -1,24 +1,37 @@
 /*-
  * See the file LICENSE for redistribution information.
  *
- * Copyright (c) 1996-2004
+ * Copyright (c) 1996-2005
  *	Sleepycat Software.  All rights reserved.
  *
- * $Id: db_am.h,v 11.78 2004/09/22 21:14:56 ubell Exp $
+ * $Id: db_am.h,v 12.8 2005/09/28 17:44:24 margo Exp $
  */
 #ifndef _DB_AM_H_
 #define	_DB_AM_H_
 
 /*
- * IS_AUTO_COMMIT --
- *	Test for local auto-commit flag or global flag with no local DbTxn
- *	handle.
+ * IS_ENV_AUTO_COMMIT --
+ *	Auto-commit test for enviroment operations: DbEnv::{open,remove,rename}
  */
-#define	IS_AUTO_COMMIT(dbenv, txn, flags)				\
+#define	IS_ENV_AUTO_COMMIT(dbenv, txn, flags)				\
 	(LF_ISSET(DB_AUTO_COMMIT) ||					\
 	    ((txn) == NULL && F_ISSET((dbenv), DB_ENV_AUTO_COMMIT) &&	\
 	    !LF_ISSET(DB_NO_AUTO_COMMIT)))
 
+/*
+ * IS_DB_AUTO_COMMIT --
+ *	Auto-commit test for database operations.
+ */
+#define	IS_DB_AUTO_COMMIT(dbp, txn)					\
+	    ((txn) == NULL && F_ISSET((dbp), DB_AM_TXN))
+
+/*
+ * STRIP_AUTO_COMMIT --
+ *	Releases after 4.3 no longer requires DB operations to specify the
+ *	AUTO_COMMIT flag, but the API continues to allow it to be specified.
+ */
+#define	STRIP_AUTO_COMMIT(f)	FLD_CLR((f), DB_AUTO_COMMIT)
+
 /* DB recovery operation codes. */
 #define	DB_ADD_DUP	1
 #define	DB_REM_DUP	2
@@ -28,10 +41,10 @@
 /*
  * Standard initialization and shutdown macros for all recovery functions.
  */
-#define	REC_INTRO(func, inc_count) do {					\
+#define	REC_INTRO(func, inc_count, do_cursor) do {			\
 	argp = NULL;							\
-	dbc = NULL;							\
 	file_dbp = NULL;						\
+	COMPQUIET(dbc, NULL);						\
 	/* mpf isn't used by all of the recovery functions. */		\
 	COMPQUIET(mpf, NULL);						\
 	if ((ret = func(dbenv, dbtp->data, &argp)) != 0)		\
@@ -44,9 +57,11 @@
 		}							\
 		goto out;						\
 	}								\
-	if ((ret = __db_cursor(file_dbp, NULL, &dbc, 0)) != 0)		\
-		goto out;						\
-	F_SET(dbc, DBC_RECOVER);					\
+	if (do_cursor) {						\
+		if ((ret = __db_cursor(file_dbp, NULL, &dbc, 0)) != 0)	\
+			goto out;					\
+		F_SET(dbc, DBC_RECOVER);				\
+	}								\
 	mpf = file_dbp->mpf;						\
 } while (0)
 
@@ -124,10 +139,10 @@
  * we don't tie up the internal pages of the tree longer than necessary.
  */
 #define	__LPUT(dbc, lock)						\
-	__ENV_LPUT((dbc)->dbp->dbenv, 					\
-	     lock, F_ISSET((dbc)->dbp, DB_AM_DIRTY) ? DB_LOCK_DOWNGRADE : 0)
-#define	__ENV_LPUT(dbenv, lock, flags)					\
-	(LOCK_ISSET(lock) ? __lock_put(dbenv, &(lock), flags) : 0)
+	__ENV_LPUT((dbc)->dbp->dbenv, lock)
+
+#define	__ENV_LPUT(dbenv, lock)						\
+	(LOCK_ISSET(lock) ? __lock_put(dbenv, &(lock)) : 0)
 
 /*
  * __TLPUT -- transactional lock put
@@ -147,6 +162,34 @@ typedef struct {
 	u_int32_t count;
 } db_trunc_param;
 
+/*
+ * A database should be required to be readonly if it's been explicitly
+ * specified as such or if we're a client in a replicated environment and
+ * we don't have the special "client-writer" designation.
+ */
+#define	DB_IS_READONLY(dbp)						\
+    (F_ISSET(dbp, DB_AM_RDONLY) ||					\
+    (IS_REP_CLIENT((dbp)->dbenv) &&					\
+    !F_ISSET((dbp), DB_AM_CL_WRITER)))
+
+/*
+ * For portability, primary keys that are record numbers are stored in
+ * secondaries in the same byte order as the secondary database.  As a
+ * consequence, we need to swap the byte order of these keys before attempting
+ * to use them for lookups in the primary.  We also need to swap user-supplied
+ * primary keys that are used in secondary lookups (for example, with the
+ * DB_GET_BOTH flag on a secondary get).
+ */
+#include "dbinc/db_swap.h"
+
+#define	SWAP_IF_NEEDED(pdbp, sdbp, pkey)				\
+	do {								\
+		if (((pdbp)->type == DB_QUEUE ||			\
+		    (pdbp)->type == DB_RECNO) &&			\
+		    F_ISSET((sdbp), DB_AM_SWAP))			\
+			P_32_SWAP((pkey)->data);			\
+	} while (0)
+
 #include "dbinc/db_dispatch.h"
 #include "dbinc_auto/db_auto.h"
 #include "dbinc_auto/crdel_auto.h"
diff --git a/storage/bdb/dbinc/db_cxx.in b/storage/bdb/dbinc/db_cxx.in
index 356145765f0..b1a28d6f2bb 100644
--- a/storage/bdb/dbinc/db_cxx.in
+++ b/storage/bdb/dbinc/db_cxx.in
@@ -1,10 +1,10 @@
 /*-
  * See the file LICENSE for redistribution information.
  *
- * Copyright (c) 1997-2004
+ * Copyright (c) 1997-2005
  *	Sleepycat Software.  All rights reserved.
  *
- * $Id: db_cxx.in,v 11.147 2004/10/07 21:39:48 bostic Exp $
+ * $Id: db_cxx.in,v 12.13 2005/10/18 14:17:08 mjc Exp $
  */
 
 #ifndef _DB_CXX_H_
@@ -88,6 +88,7 @@ class DbException;                               // forward
 class DbDeadlockException;                       // forward
 class DbLockNotGrantedException;                 // forward
 class DbMemoryException;                         // forward
+class DbRepHandleDeadException;                  // forward
 class DbRunRecoveryException;                    // forward
 
 ////////////////////////////////////////////////////////////////
@@ -178,8 +179,8 @@ class _exported Db
 	friend class DbEnv;
 
 public:
-	Db(DbEnv*, u_int32_t);      // create a Db object, then call open()
-	virtual ~Db();              // does *not* call close.
+	Db(DbEnv*, u_int32_t);      // Create a Db object.
+	virtual ~Db();              // Calls close() if the user hasn't.
 
 	// These methods exactly match those in the C interface.
 	//
@@ -187,6 +188,8 @@ public:
 	    int (*callback)(Db *, const Dbt *, const Dbt *, Dbt *),
 	    u_int32_t flags);
 	virtual int close(u_int32_t flags);
+	virtual int compact(DbTxn *txnid, Dbt *start, Dbt *stop,
+	    DB_COMPACT *c_data, u_int32_t flags, Dbt *end);
 	virtual int cursor(DbTxn *txnid, Dbc **cursorp, u_int32_t flags);
 	virtual int del(DbTxn *txnid, Dbt *key, u_int32_t flags);
 	virtual void err(int, const char *, ...);
@@ -204,17 +207,16 @@ public:
 	virtual int open(DbTxn *txnid,
 	    const char *, const char *subname, DBTYPE, u_int32_t, int);
 	virtual int pget(DbTxn *txnid, Dbt *key, Dbt *pkey, Dbt *data,
-		 u_int32_t flags);
+	    u_int32_t flags);
 	virtual int put(DbTxn *, Dbt *, Dbt *, u_int32_t);
 	virtual int remove(const char *, const char *, u_int32_t);
 	virtual int rename(const char *, const char *, const char *, u_int32_t);
 	virtual int set_alloc(db_malloc_fcn_type, db_realloc_fcn_type,
-		      db_free_fcn_type);
+	    db_free_fcn_type);
 	virtual void set_app_private(void *);
 	virtual int set_append_recno(int (*)(Db *, Dbt *, db_recno_t));
 	virtual int set_bt_compare(bt_compare_fcn_type); /*deprecated*/
 	virtual int set_bt_compare(int (*)(Db *, const Dbt *, const Dbt *));
-	virtual int set_bt_maxkey(u_int32_t);
 	virtual int get_bt_minkey(u_int32_t *);
 	virtual int set_bt_minkey(u_int32_t);
 	virtual int set_bt_prefix(bt_prefix_fcn_type); /*deprecated*/
@@ -226,7 +228,7 @@ public:
 	virtual int get_encrypt_flags(u_int32_t *);
 	virtual int set_encrypt(const char *, u_int32_t);
 	virtual void set_errcall(
-			void (*)(const DbEnv *, const char *, const char *));
+	    void (*)(const DbEnv *, const char *, const char *));
 	virtual void get_errfile(FILE **);
 	virtual void set_errfile(FILE *);
 	virtual void get_errpfx(const char **);
@@ -392,11 +394,15 @@ public:
 	    const char *newname, u_int32_t flags);
 	virtual void err(int, const char *, ...);
 	virtual void errx(const char *, ...);
+	virtual int failchk(u_int32_t);
+	virtual int fileid_reset(const char *, u_int32_t);
 	virtual void *get_app_private() const;
 	virtual int get_home(const char **);
 	virtual int get_open_flags(u_int32_t *);
 	virtual int open(const char *, u_int32_t, int);
 	virtual int remove(const char *, u_int32_t);
+	virtual int stat_print(u_int32_t flags);
+
 	virtual int set_alloc(db_malloc_fcn_type, db_realloc_fcn_type,
 			      db_free_fcn_type);
 	virtual void set_app_private(void *);
@@ -405,6 +411,8 @@ public:
 	virtual int get_data_dirs(const char ***);
 	virtual int set_data_dir(const char *);
 	virtual int get_encrypt_flags(u_int32_t *);
+	virtual int set_intermediate_dir(int, u_int32_t);
+	virtual int set_isalive(int (*)(DbEnv *, pid_t, db_threadid_t));
 	virtual int set_encrypt(const char *, u_int32_t);
 	virtual void set_errcall(
 			void (*)(const DbEnv *, const char *, const char *));
@@ -414,11 +422,15 @@ public:
 	virtual void set_errpfx(const char *);
 	virtual int get_flags(u_int32_t *);
 	virtual int set_flags(u_int32_t, int);
+	virtual bool is_bigendian();
+	virtual int lsn_reset(const char *, u_int32_t);
 	virtual int set_feedback(void (*)(DbEnv *, int, int));
 	virtual int get_lg_bsize(u_int32_t *);
 	virtual int set_lg_bsize(u_int32_t);
 	virtual int get_lg_dir(const char **);
 	virtual int set_lg_dir(const char *);
+	virtual int get_lg_filemode(int *);
+	virtual int set_lg_filemode(int);
 	virtual int get_lg_max(u_int32_t *);
 	virtual int set_lg_max(u_int32_t);
 	virtual int get_lg_regionmax(u_int32_t *);
@@ -436,6 +448,10 @@ public:
 	virtual int set_lk_max_objects(u_int32_t);
 	virtual int get_mp_mmapsize(size_t *);
 	virtual int set_mp_mmapsize(size_t);
+	virtual int get_mp_max_openfd(int *);
+	virtual int set_mp_max_openfd(int);
+	virtual int get_mp_max_write(int *, int *);
+	virtual int set_mp_max_write(int, int);
 	virtual void set_msgcall(void (*)(const DbEnv *, const char *));
 	virtual void get_msgfile(FILE **);
 	virtual void set_msgfile(FILE *);
@@ -447,8 +463,6 @@ public:
 	virtual int set_timeout(db_timeout_t, u_int32_t);
 	virtual int get_tmp_dir(const char **);
 	virtual int set_tmp_dir(const char *);
-	virtual int get_tas_spins(u_int32_t *);
-	virtual int set_tas_spins(u_int32_t);
 	virtual int get_tx_max(u_int32_t *);
 	virtual int set_tx_max(u_int32_t);
 	virtual int set_app_dispatch(int (*)(DbEnv *,
@@ -510,6 +524,7 @@ public:
 	virtual int log_file(DbLsn *lsn, char *namep, size_t len);
 	virtual int log_flush(const DbLsn *lsn);
 	virtual int log_put(DbLsn *lsn, const Dbt *data, u_int32_t flags);
+	virtual int log_printf(DbTxn *, const char *, ...);
 
 	virtual int log_stat(DB_LOG_STAT **spp, u_int32_t flags);
 	virtual int log_stat_print(u_int32_t flags);
@@ -526,6 +541,23 @@ public:
 	virtual int memp_sync(DbLsn *lsn);
 	virtual int memp_trickle(int pct, int *nwrotep);
 
+	// Mpool functions
+	//
+	virtual int mutex_alloc(u_int32_t, db_mutex_t *);
+	virtual int mutex_free(db_mutex_t);
+	virtual int mutex_get_align(u_int32_t *);
+	virtual int mutex_get_increment(u_int32_t *);
+	virtual int mutex_get_max(u_int32_t *);
+	virtual int mutex_get_tas_spins(u_int32_t *);
+	virtual int mutex_lock(db_mutex_t);
+	virtual int mutex_set_align(u_int32_t);
+	virtual int mutex_set_increment(u_int32_t);
+	virtual int mutex_set_max(u_int32_t);
+	virtual int mutex_set_tas_spins(u_int32_t);
+	virtual int mutex_stat(DB_MUTEX_STAT **, u_int32_t);
+	virtual int mutex_stat_print(u_int32_t);
+	virtual int mutex_unlock(db_mutex_t);
+
 	// Transaction functions
 	//
 	virtual int txn_begin(DbTxn *pid, DbTxn **tid, u_int32_t flags);
@@ -539,6 +571,7 @@ public:
 	// Replication functions
 	//
 	virtual int rep_elect(int, int, int, u_int32_t, int *, u_int32_t);
+	virtual int rep_flush();
 	virtual int rep_process_message(Dbt *, Dbt *, int *, DbLsn *);
 	virtual int rep_start(Dbt *, u_int32_t);
 	virtual int rep_stat(DB_REP_STAT **statp, u_int32_t flags);
@@ -547,6 +580,13 @@ public:
 	virtual int set_rep_limit(u_int32_t, u_int32_t);
 	virtual int set_rep_transport(int, int (*)(DbEnv *,
 	    const Dbt *, const Dbt *, const DbLsn *, int, u_int32_t));
+	virtual int set_rep_request(u_int32_t, u_int32_t);
+	virtual int set_thread_count(u_int32_t);
+	virtual int set_thread_id(void (*)(DbEnv *, pid_t *, db_threadid_t *));
+	virtual int set_thread_id_string(char *(*)(DbEnv *, pid_t, db_threadid_t, char *));
+	virtual int rep_set_config(u_int32_t which, int onoff);
+	virtual int rep_get_config(u_int32_t which, int *onoffp);
+	virtual int rep_sync(u_int32_t flags);
 
 	// Conversion functions
 	//
@@ -581,15 +621,18 @@ public:
 				       db_recops op);
 	static void _paniccall_intercept(DB_ENV *env, int errval);
 	static void _feedback_intercept(DB_ENV *env, int opcode, int pct);
-	static int _rep_send_intercept(DB_ENV *env,
-				       const DBT *cntrl, const DBT *data,
-				       const DB_LSN *lsn, int id,
-				       u_int32_t flags);
+	static int _isalive_intercept(DB_ENV *env, pid_t pid,
+	    db_threadid_t thrid);
+	static int _rep_send_intercept(DB_ENV *env, const DBT *cntrl,
+	    const DBT *data, const DB_LSN *lsn, int id, u_int32_t flags);
 	static void _stream_error_function(const DB_ENV *env,
-					   const char *prefix,
-					   const char *message);
+	    const char *prefix, const char *message);
 	static void _stream_message_function(const DB_ENV *env,
-					     const char *message);
+	    const char *message);
+	static void _thread_id_intercept(DB_ENV *env, pid_t *pidp,
+	    db_threadid_t *thridp);
+	static char *_thread_id_string_intercept(DB_ENV *env, pid_t pid,
+	    db_threadid_t thrid, char *buf);
 
 private:
 	void cleanup();
@@ -611,16 +654,16 @@ private:
 	__DB_STD(ostream) *message_stream_;
 
 	int (*app_dispatch_callback_)(DbEnv *, Dbt *, DbLsn *, db_recops);
+	int (*isalive_callback_)(DbEnv *, pid_t, db_threadid_t);
 	void (*error_callback_)(const DbEnv *, const char *, const char *);
 	void (*feedback_callback_)(DbEnv *, int, int);
 	void (*message_callback_)(const DbEnv *, const char *);
 	void (*paniccall_callback_)(DbEnv *, int);
-	int (*pgin_callback_)(DbEnv *dbenv, db_pgno_t pgno,
-			      void *pgaddr, Dbt *pgcookie);
-	int (*pgout_callback_)(DbEnv *dbenv, db_pgno_t pgno,
-			       void *pgaddr, Dbt *pgcookie);
-	int (*rep_send_callback_)(DbEnv *,
-	    const Dbt *, const Dbt *, const DbLsn *, int, u_int32_t);
+	int (*rep_send_callback_)(DbEnv *, const Dbt *, const Dbt *,
+	    const DbLsn *, int, u_int32_t);
+	void (*thread_id_callback_)(DbEnv *, pid_t *, db_threadid_t *);
+	char *(*thread_id_string_callback_)(DbEnv *, pid_t, db_threadid_t,
+	    char *);
 };
 
 //
@@ -768,7 +811,7 @@ public:
 	int remove(DbTxn *txnid, u_int32_t flags);
 	int stat(DB_SEQUENCE_STAT **sp, u_int32_t flags);
 	int stat_print(u_int32_t flags);
-	
+
 	int get(DbTxn *txnid, int32_t delta, db_seq_t *retp, u_int32_t flags);
 	int get_cachesize(int32_t *sizep);
 	int set_cachesize(int32_t size);
@@ -825,7 +868,9 @@ public:
 	int commit(u_int32_t flags);
 	int discard(u_int32_t flags);
 	u_int32_t id();
+	int get_name(const char **namep);
 	int prepare(u_int8_t *gid);
+	int set_name(const char *name);
 	int set_timeout(db_timeout_t timeout, u_int32_t flags);
 
 	virtual DB_TXN *get_DB_TXN()
@@ -1072,6 +1117,20 @@ private:
 	Dbt *dbt_;
 };
 
+//
+// A specific sort of exception that occurs when a change of replication
+// master requires that all handles be re-opened.
+//
+class _exported DbRepHandleDeadException : public DbException
+{
+public:
+	virtual ~DbRepHandleDeadException() throw();
+	DbRepHandleDeadException(const char *description);
+
+	DbRepHandleDeadException(const DbRepHandleDeadException &);
+	DbRepHandleDeadException &operator = (const DbRepHandleDeadException &);
+};
+
 //
 // A specific sort of exception that occurs when
 // recovery is required before continuing DB activity.
diff --git a/storage/bdb/dbinc/db_dispatch.h b/storage/bdb/dbinc/db_dispatch.h
index bbaff69dbfb..eee9c59d2a8 100644
--- a/storage/bdb/dbinc/db_dispatch.h
+++ b/storage/bdb/dbinc/db_dispatch.h
@@ -1,7 +1,7 @@
 /*-
  * See the file LICENSE for redistribution information.
  *
- * Copyright (c) 1996-2004
+ * Copyright (c) 1996-2005
  *	Sleepycat Software.  All rights reserved.
  */
 /*
@@ -32,7 +32,7 @@
  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
  * SUCH DAMAGE.
  *
- * $Id: db_dispatch.h,v 11.38 2004/07/26 19:54:08 margo Exp $
+ * $Id: db_dispatch.h,v 12.5 2005/10/19 15:10:44 bostic Exp $
  */
 
 #ifndef _DB_DISPATCH_H_
@@ -68,6 +68,7 @@ struct __db_txnhead {
 	LIST_HEAD(__db_headlink, __db_txnlist) head[1];
 };
 
+#define	DB_LSN_STACK_SIZE 4
 struct __db_txnlist {
 	db_txnlist_type type;
 	LIST_ENTRY(__db_txnlist) links;
@@ -78,9 +79,9 @@ struct __db_txnlist {
 			u_int32_t status;
 		} t;
 		struct {
-			u_int32_t ntxns;
-			u_int32_t maxn;
-			DB_LSN *lsn_array;
+			u_int32_t stack_size;
+			u_int32_t stack_indx;
+			DB_LSN *lsn_stack;
 		} l;
 		struct {
 			u_int32_t nentries;
@@ -94,16 +95,9 @@ struct __db_txnlist {
 	} u;
 };
 
-/*
- * Flag value for __db_txnlist_lsnadd. Distinguish whether we are replacing
- * an entry in the transaction list or adding a new one.
- */
-#define	TXNLIST_NEW	0x1
-
 /*
  * States for limbo list processing.
  */
-
 typedef enum {
 	LIMBO_NORMAL,		/* Normal processing. */
 	LIMBO_PREPARE,		/* We are preparing a transaction. */
diff --git a/storage/bdb/dbinc/db_int.in b/storage/bdb/dbinc/db_int.in
index 522f2f7280b..55be4366326 100644
--- a/storage/bdb/dbinc/db_int.in
+++ b/storage/bdb/dbinc/db_int.in
@@ -1,10 +1,10 @@
 /*-
  * See the file LICENSE for redistribution information.
  *
- * Copyright (c) 1996-2004
+ * Copyright (c) 1996-2005
  *	Sleepycat Software.  All rights reserved.
  *
- * $Id: db_int.in,v 11.155 2004/10/28 16:07:38 ubell Exp $
+ * $Id: db_int.in,v 12.15 2005/11/03 17:46:08 bostic Exp $
  */
 
 #ifndef _DB_INTERNAL_H_
@@ -46,7 +46,7 @@ extern "C" {
 #endif
 #endif
 
-#if defined(HAVE_LONG_LONG) && defined(HAVE_UNSIGNED_LONG_LONG)
+#if defined(HAVE_64BIT_TYPES)
 #undef	INT64_MAX
 #undef	INT64_MIN
 #undef	UINT64_MAX
@@ -68,8 +68,8 @@ extern "C" {
 #define	INT64_MIN	(-INT64_MAX-1)
 #define	UINT64_MAX	18446744073709551615ULL
 
-#define	INT64_FMT	"%lld"
-#define	UINT64_FMT	"%llu"
+@INT64_FMT@
+@UINT64_FMT@
 #endif	/* DB_WIN32 */
 #endif	/* HAVE_LONG_LONG && HAVE_UNSIGNED_LONG_LONG */
 
@@ -102,17 +102,17 @@ extern "C" {
 /* Align an integer to a specific boundary. */
 #undef	DB_ALIGN
 #define	DB_ALIGN(v, bound)						\
-	(((v) + (bound) - 1) & ~(((uintmax_t)bound) - 1))
+	(((v) + (bound) - 1) & ~(((uintmax_t)(bound)) - 1))
 
 /* Increment a pointer to a specific boundary. */
 #undef	ALIGNP_INC
 #define	ALIGNP_INC(p, bound)						\
-	(void *)(((uintptr_t)(p) + (bound) - 1) & ~(((uintptr_t)bound) - 1))
+	(void *)(((uintptr_t)(p) + (bound) - 1) & ~(((uintptr_t)(bound)) - 1))
 
 /* Decrement a pointer to a specific boundary. */
 #undef	ALIGNP_DEC
 #define	ALIGNP_DEC(p, bound)						\
-	(void *)((uintptr_t)(p) & ~(((uintptr_t)bound) - 1))
+	(void *)((uintptr_t)(p) & ~(((uintptr_t)(bound)) - 1))
 
 /*
  * Print an address as a u_long (a u_long is the largest type we can print
@@ -180,7 +180,7 @@ typedef struct __fn {
 	((int)((total) == 0 ? 0 : ((double)(v) * 100) / (total)))
 #define	DB_PCT_PG(v, total, pgsize)					\
 	((int)((total) == 0 ? 0 :					\
-	    100 - ((double)(v) * 100) / ((total) * (pgsize))))
+	    100 - ((double)(v) * 100) / (((double)total) * (pgsize))))
 
 /*
  * Structure used for callback message aggregation.
@@ -220,6 +220,8 @@ typedef struct __db_msgbuf {
 #define	STAT_LSN(msg, lsnp)						\
 	__db_msg(dbenv, "%lu/%lu\t%s",					\
 	    (u_long)(lsnp)->file, (u_long)(lsnp)->offset, msg)
+#define	STAT_POINTER(msg, v)						\
+	__db_msg(dbenv, "%#lx\t%s", P_TO_ULONG(v), msg)
 #define	STAT_STRING(msg, p) do {					\
 	const char *__p = p;	/* p may be a function call. */		\
 	__db_msg(dbenv, "%s\t%s", __p == NULL ? "!Set" : __p, msg);	\
@@ -248,6 +250,7 @@ typedef struct __db_msgbuf {
 #define	DB_RETOK_LGGET(ret)	((ret) == 0 || (ret) == DB_NOTFOUND)
 #define	DB_RETOK_MPGET(ret)	((ret) == 0 || (ret) == DB_PAGE_NOTFOUND)
 #define	DB_RETOK_REPPMSG(ret)	((ret) == 0 || \
+				    (ret) == DB_REP_IGNORE || \
 				    (ret) == DB_REP_ISPERM || \
 				    (ret) == DB_REP_NEWMASTER || \
 				    (ret) == DB_REP_NEWSITE || \
@@ -292,20 +295,24 @@ typedef enum {
 } APPNAME;
 
 /*
+ * ALIVE_ON	The is_alive function is configured.
  * CDB_LOCKING	CDB product locking.
  * CRYPTO_ON	Security has been configured.
  * LOCKING_ON	Locking has been configured.
  * LOGGING_ON	Logging has been configured.
+ * MUTEX_ON	Mutexes have been configured.
  * MPOOL_ON	Memory pool has been configured.
  * REP_ON	Replication has been configured.
  * RPC_ON	RPC has been configured.
  * TXN_ON	Transactions have been configured.
  */
+#define	ALIVE_ON(dbenv)		((dbenv)->is_alive != NULL)
 #define	CDB_LOCKING(dbenv)	F_ISSET(dbenv, DB_ENV_CDB)
 #define	CRYPTO_ON(dbenv)	((dbenv)->crypto_handle != NULL)
 #define	LOCKING_ON(dbenv)	((dbenv)->lk_handle != NULL)
 #define	LOGGING_ON(dbenv)	((dbenv)->lg_handle != NULL)
 #define	MPOOL_ON(dbenv)		((dbenv)->mp_handle != NULL)
+#define	MUTEX_ON(dbenv)		((dbenv)->mutex_handle != NULL)
 #define	REP_ON(dbenv)		((dbenv)->rep_handle != NULL)
 #define	RPC_ON(dbenv)		((dbenv)->cl_handle != NULL)
 #define	TXN_ON(dbenv)		((dbenv)->tx_handle != NULL)
@@ -342,6 +349,74 @@ typedef enum {
 	if (F_ISSET((dbenv), DB_ENV_OPEN_CALLED))			\
 		ENV_REQUIRES_CONFIG(dbenv, handle, i, flags)
 
+#define	ENV_ENTER(dbenv, ip) do {					\
+	int __ret;							\
+	if ((dbenv)->thr_hashtab == NULL)				\
+		ip = NULL;						\
+	else {								\
+		if ((__ret =						\
+		    __env_set_state(dbenv, &(ip), THREAD_ACTIVE)) != 0)	\
+			return (__ret);					\
+	}								\
+} while (0)
+
+#ifdef DIAGNOSTIC
+#define	ENV_LEAVE(dbenv, ip) do {					\
+	if ((ip) != NULL) {						\
+		DB_ASSERT(ip->dbth_state == THREAD_ACTIVE);		\
+		(ip)->dbth_state = THREAD_OUT;				\
+	}								\
+} while (0)
+#else
+#define	ENV_LEAVE(dbenv, ip) do {					\
+	if ((ip) != NULL)						\
+		(ip)->dbth_state = THREAD_OUT;				\
+} while (0)
+#endif
+#ifdef DIAGNOSTIC
+#define	CHECK_THREAD(dbenv) do {					\
+	DB_THREAD_INFO *__ip;						\
+	if ((dbenv)->thr_hashtab != NULL) {				\
+		(void)__env_set_state(dbenv, &__ip, THREAD_DIAGNOSTIC);	\
+		DB_ASSERT(__ip != NULL &&				\
+		     __ip->dbth_state != THREAD_OUT);			\
+	}								\
+} while (0)
+#define	CHECK_MTX_THREAD(dbenv, mtx) do {				\
+	if (mtx->alloc_id != MTX_MUTEX_REGION &&			\
+	    mtx->alloc_id != MTX_ENV_REGION &&				\
+	    mtx->alloc_id != MTX_APPLICATION)				\
+		CHECK_THREAD(dbenv);					\
+} while (0)
+#else
+#define	CHECK_THREAD(dbenv)
+#define	CHECK_MTX_THREAD(dbenv, mtx)
+#endif
+
+typedef enum {
+	THREAD_SLOT_NOT_IN_USE=0,
+	THREAD_OUT,
+	THREAD_ACTIVE,
+	THREAD_BLOCKED
+#ifdef DIAGNOSTIC
+	, THREAD_DIAGNOSTIC
+#endif
+} DB_THREAD_STATE;
+
+typedef struct __db_thread_info {
+	pid_t		dbth_pid;
+	db_threadid_t	dbth_tid;
+	DB_THREAD_STATE	dbth_state;
+	SH_TAILQ_ENTRY	dbth_links;
+} DB_THREAD_INFO;
+
+typedef struct __env_thread_info {
+	u_int32_t	thr_count;
+	u_int32_t	thr_max;
+	u_int32_t	thr_nbucket;
+	roff_t		thr_hashoff;
+} THREAD_INFO;
+
 /*******************************************************
  * Database Access Methods.
  *******************************************************/
@@ -350,7 +425,7 @@ typedef enum {
  *	The database handle is free-threaded (was opened with DB_THREAD).
  */
 #define	DB_IS_THREADED(dbp)						\
-	((dbp)->mutexp != NULL)
+	((dbp)->mutex != MUTEX_INVALID)
 
 /* Initialization methods are often illegal before/after open is called. */
 #define	DB_ILLEGAL_AFTER_OPEN(dbp, name)				\
@@ -437,8 +512,10 @@ typedef enum { MU_REMOVE, MU_RENAME, MU_OPEN } mu_action;
 /*
  * File types for DB access methods.  Negative numbers are reserved to DB.
  */
-#define	DB_FTYPE_SET		-1	/* Call pgin/pgout functions. */
-#define	DB_FTYPE_NOTSET		 0	/* Don't call... */
+#define	DB_FTYPE_SET		-1		/* Call pgin/pgout functions. */
+#define	DB_FTYPE_NOTSET		 0		/* Don't call... */
+#define	DB_LSN_OFF_NOTSET	-1		/* Not yet set. */
+#define	DB_CLEARLEN_NOTSET	UINT32_MAX	/* Not yet set. */
 
 /* Structure used as the DB pgin/pgout pgcookie. */
 typedef struct __dbpginfo {
@@ -526,7 +603,7 @@ typedef struct __dbpginfo {
 struct __db_reginfo_t;	typedef struct __db_reginfo_t REGINFO;
 struct __db_txnhead;	typedef struct __db_txnhead DB_TXNHEAD;
 struct __db_txnlist;	typedef struct __db_txnlist DB_TXNLIST;
-struct __vrfy_childinfo; typedef struct __vrfy_childinfo VRFY_CHILDINFO;
+struct __vrfy_childinfo;typedef struct __vrfy_childinfo VRFY_CHILDINFO;
 struct __vrfy_dbinfo;   typedef struct __vrfy_dbinfo VRFY_DBINFO;
 struct __vrfy_pageinfo; typedef struct __vrfy_pageinfo VRFY_PAGEINFO;
 
@@ -541,10 +618,9 @@ struct __vrfy_pageinfo; typedef struct __vrfy_pageinfo VRFY_PAGEINFO;
 
 #include "dbinc/globals.h"
 #include "dbinc/debug.h"
-#include "dbinc/mutex.h"
 #include "dbinc/region.h"
-#include "dbinc_auto/mutex_ext.h"	/* XXX: Include after region.h. */
 #include "dbinc_auto/env_ext.h"
+#include "dbinc/mutex.h"
 #include "dbinc/os.h"
 #include "dbinc/rep.h"
 #include "dbinc_auto/clib_ext.h"
@@ -577,11 +653,14 @@ struct __vrfy_pageinfo; typedef struct __vrfy_pageinfo VRFY_PAGEINFO;
  * We explicitly use LOGGING_ON/IS_REP_CLIENT here because we don't want to pull
  * in the log headers, which IS_RECOVERING (and thus DBENV_LOGGING) rely on, and
  * because DBC_RECOVER should be set anytime IS_RECOVERING would be true.
+ *
+ * If we're not in recovery (master - doing an abort a client applying
+ * a txn), then a client's only path through here is on an internal
+ * operation, and a master's only path through here is a transactional
+ * operation.  Detect if either is not the case.
  */
 #if defined(DIAGNOSTIC) || defined(DEBUG_ROP)  || defined(DEBUG_WOP)
-#define	DBC_LOGGING(dbc)						\
-	(LOGGING_ON((dbc)->dbp->dbenv) &&				\
-	    !F_ISSET((dbc), DBC_RECOVER) && !IS_REP_CLIENT((dbc)->dbp->dbenv))
+#define	DBC_LOGGING(dbc)	__dbc_logging(dbc)
 #else
 #define	DBC_LOGGING(dbc)						\
 	((dbc)->txn != NULL && LOGGING_ON((dbc)->dbp->dbenv) &&		\
diff --git a/storage/bdb/dbinc/db_join.h b/storage/bdb/dbinc/db_join.h
index 3fea2ad2f12..ff43216479c 100644
--- a/storage/bdb/dbinc/db_join.h
+++ b/storage/bdb/dbinc/db_join.h
@@ -1,15 +1,14 @@
 /*-
  * See the file LICENSE for redistribution information.
  *
- * Copyright (c) 1998-2004
+ * Copyright (c) 1998-2005
  *	Sleepycat Software.  All rights reserved.
  *
- *	@(#)db_join.h	11.1 (Sleepycat) 7/25/99
+ * $Id: db_join.h,v 12.2 2005/06/16 20:21:47 bostic Exp $
  */
 
 #ifndef _DB_JOIN_H_
 #define	_DB_JOIN_H_
-
 /*
  * Joins use a join cursor that is similar to a regular DB cursor except
  * that it only supports c_get and c_close functionality.  Also, it does
diff --git a/storage/bdb/dbinc/db_page.h b/storage/bdb/dbinc/db_page.h
index 59a1292ffe3..883b7d450fe 100644
--- a/storage/bdb/dbinc/db_page.h
+++ b/storage/bdb/dbinc/db_page.h
@@ -1,10 +1,10 @@
 /*-
  * See the file LICENSE for redistribution information.
  *
- * Copyright (c) 1996-2004
+ * Copyright (c) 1996-2005
  *	Sleepycat Software.  All rights reserved.
  *
- * $Id: db_page.h,v 11.63 2004/09/17 22:00:27 mjc Exp $
+ * $Id: db_page.h,v 12.6 2005/08/08 14:52:30 bostic Exp $
  */
 
 #ifndef _DB_PAGE_H_
@@ -48,6 +48,8 @@ extern "C" {
 #define	P_QAMDATA	11	/* Queue data page. */
 #define	P_LDUP		12	/* Off-page duplicate leaf. */
 #define	P_PAGETYPE_MAX	13
+/* Flag to __db_new */
+#define	P_DONTEXTEND	0x8000	/* Don't allocate if there are no free pages. */
 
 /*
  * When we create pages in mpool, we ask mpool to clear some number of bytes
@@ -100,12 +102,12 @@ typedef struct _btmeta33 {
 #define	BTM_MASK	0x07f
 	DBMETA	dbmeta;		/* 00-71: Generic meta-data header. */
 
-	u_int32_t maxkey;	/* 72-75: Btree: Maxkey. */
+	u_int32_t unused1;	/* 72-75: Unused space. */
 	u_int32_t minkey;	/* 76-79: Btree: Minkey. */
 	u_int32_t re_len;	/* 80-83: Recno: fixed-length record length. */
 	u_int32_t re_pad;	/* 84-87: Recno: fixed-length record pad. */
 	u_int32_t root;		/* 88-91: Root page. */
-	u_int32_t unused[92];	/* 92-459: Unused space */
+	u_int32_t unused2[92];	/* 92-459: Unused space. */
 	u_int32_t crypto_magic;		/* 460-463: Crypto magic number */
 	u_int32_t trash[3];		/* 464-475: Trash space - Do not use */
 	u_int8_t iv[DB_IV_BYTES];	/* 476-495: Crypto IV */
@@ -268,7 +270,7 @@ typedef struct _db_page {
 	(F_ISSET((dbp), DB_AM_ENCRYPT) ? ((u_int8_t *)(pg) +		\
 	SIZEOF_PAGE + SSZA(PG_CRYPTO, chksum)) :			\
 	(F_ISSET((dbp), DB_AM_CHKSUM) ? ((u_int8_t *)(pg) +		\
-	SIZEOF_PAGE + SSZA(PG_CHKSUM, chksum))			\
+	SIZEOF_PAGE + SSZA(PG_CHKSUM, chksum))				\
 	: NULL))
 
 /* PAGE element macros. */
@@ -588,6 +590,14 @@ typedef struct _boverflow {
 #define	BOVERFLOW_PSIZE							\
 	(BOVERFLOW_SIZE + sizeof(db_indx_t))
 
+#define	BITEM_SIZE(bk)							\
+	(B_TYPE((bk)->type) != B_KEYDATA ? BOVERFLOW_SIZE :		\
+	BKEYDATA_SIZE((bk)->len))
+
+#define	BITEM_PSIZE(bk)							\
+	(B_TYPE((bk)->type) != B_KEYDATA ? BOVERFLOW_PSIZE :		\
+	BKEYDATA_PSIZE((bk)->len))
+
 /*
  * Btree leaf and hash page layouts group indices in sets of two, one for the
  * key and one for the data.  Everything else does it in sets of one to save
@@ -650,6 +660,11 @@ typedef struct _rinternal {
 #define	RINTERNAL_PSIZE							\
 	(RINTERNAL_SIZE + sizeof(db_indx_t))
 
+struct pglist {
+	db_pgno_t pgno;
+	DB_LSN lsn;
+};
+
 #if defined(__cplusplus)
 }
 #endif
diff --git a/storage/bdb/dbinc/db_server_int.h b/storage/bdb/dbinc/db_server_int.h
index eba36efcb5b..aee9ad194c7 100644
--- a/storage/bdb/dbinc/db_server_int.h
+++ b/storage/bdb/dbinc/db_server_int.h
@@ -1,10 +1,10 @@
 /*-
  * See the file LICENSE for redistribution information.
  *
- * Copyright (c) 2000-2004
+ * Copyright (c) 2000-2005
  *	Sleepycat Software.  All rights reserved.
  *
- * $Id: db_server_int.h,v 1.25 2004/01/28 03:36:02 bostic Exp $
+ * $Id: db_server_int.h,v 12.4 2005/08/08 14:52:30 bostic Exp $
  */
 
 #ifndef _DB_SERVER_INT_H_
@@ -58,7 +58,7 @@ struct home_entry {
 DB_INIT_CDB | DB_INIT_LOCK | DB_INIT_LOG | DB_INIT_MPOOL |		\
 DB_INIT_TXN | DB_JOINENV)
 
-#define	DB_SERVER_DBFLAGS	 (DB_DIRTY_READ | DB_NOMMAP | DB_RDONLY)
+#define	DB_SERVER_DBFLAGS	 (DB_NOMMAP | DB_RDONLY | DB_READ_UNCOMMITTED)
 #define	DB_SERVER_DBNOSHARE	 (DB_EXCL | DB_TRUNCATE)
 
 typedef struct ct_envdata ct_envdata;
@@ -145,4 +145,9 @@ extern int __dbsrv_verbose;
 	__dbsrv_active(ctp);			\
 }
 
+#define	FREE_IF_CHANGED(dbenv, p, orig) do {	\
+	if ((p) != NULL && (p) != (orig))	\
+		__os_ufree((dbenv), (p));	\
+} while (0)
+
 #endif	/* !_DB_SERVER_INT_H_ */
diff --git a/storage/bdb/dbinc/db_shash.h b/storage/bdb/dbinc/db_shash.h
index 51277e5e0b3..89c544fcc91 100644
--- a/storage/bdb/dbinc/db_shash.h
+++ b/storage/bdb/dbinc/db_shash.h
@@ -1,10 +1,10 @@
 /*-
  * See the file LICENSE for redistribution information.
  *
- * Copyright (c) 1996-2004
+ * Copyright (c) 1996-2005
  *	Sleepycat Software.  All rights reserved.
  *
- * $Id: db_shash.h,v 11.13 2004/01/28 03:36:02 bostic Exp $
+ * $Id: db_shash.h,v 12.1 2005/06/16 20:21:47 bostic Exp $
  */
 
 #ifndef	_DB_SHASH_H_
diff --git a/storage/bdb/dbinc/db_swap.h b/storage/bdb/dbinc/db_swap.h
index 25391ce5957..6350ae6a1b2 100644
--- a/storage/bdb/dbinc/db_swap.h
+++ b/storage/bdb/dbinc/db_swap.h
@@ -1,7 +1,7 @@
 /*-
  * See the file LICENSE for redistribution information.
  *
- * Copyright (c) 1996-2004
+ * Copyright (c) 1996-2005
  *	Sleepycat Software.  All rights reserved.
  */
 /*
@@ -32,7 +32,7 @@
  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
  * SUCH DAMAGE.
  *
- * $Id: db_swap.h,v 11.11 2004/01/28 03:36:02 bostic Exp $
+ * $Id: db_swap.h,v 12.3 2005/06/16 20:21:47 bostic Exp $
  */
 
 #ifndef _DB_SWAP_H_
@@ -47,7 +47,7 @@
 #undef	M_64_SWAP
 #define	M_64_SWAP(a) {							\
 	u_int64_t _tmp;							\
-	_tmp = a;							\
+	_tmp = (u_int64_t)a;						\
 	((u_int8_t *)&a)[0] = ((u_int8_t *)&_tmp)[7];			\
 	((u_int8_t *)&a)[1] = ((u_int8_t *)&_tmp)[6];			\
 	((u_int8_t *)&a)[2] = ((u_int8_t *)&_tmp)[5];			\
@@ -91,7 +91,7 @@
 #undef	M_32_SWAP
 #define	M_32_SWAP(a) {							\
 	u_int32_t _tmp;							\
-	_tmp = a;							\
+	_tmp = (u_int32_t)a;						\
 	((u_int8_t *)&a)[0] = ((u_int8_t *)&_tmp)[3];			\
 	((u_int8_t *)&a)[1] = ((u_int8_t *)&_tmp)[2];			\
 	((u_int8_t *)&a)[2] = ((u_int8_t *)&_tmp)[1];			\
diff --git a/storage/bdb/dbinc/db_upgrade.h b/storage/bdb/dbinc/db_upgrade.h
index e7ac0bc96e7..e4081e9b6ef 100644
--- a/storage/bdb/dbinc/db_upgrade.h
+++ b/storage/bdb/dbinc/db_upgrade.h
@@ -1,10 +1,10 @@
 /*-
  * See the file LICENSE for redistribution information.
  *
- * Copyright (c) 1996-2004
+ * Copyright (c) 1996-2005
  *	Sleepycat Software.  All rights reserved.
  *
- * $Id: db_upgrade.h,v 1.12 2004/01/28 03:36:02 bostic Exp $
+ * $Id: db_upgrade.h,v 12.1 2005/06/16 20:21:47 bostic Exp $
  */
 
 #ifndef _DB_UPGRADE_H_
diff --git a/storage/bdb/dbinc/db_verify.h b/storage/bdb/dbinc/db_verify.h
index 528ba8f04cb..43bbff0c27b 100644
--- a/storage/bdb/dbinc/db_verify.h
+++ b/storage/bdb/dbinc/db_verify.h
@@ -1,10 +1,10 @@
 /*-
  * See the file LICENSE for redistribution information.
  *
- * Copyright (c) 1999-2004
+ * Copyright (c) 1999-2005
  *	Sleepycat Software.  All rights reserved.
  *
- * $Id: db_verify.h,v 1.34 2004/05/20 14:34:12 bostic Exp $
+ * $Id: db_verify.h,v 12.4 2005/06/16 20:21:47 bostic Exp $
  */
 
 #ifndef _DB_VERIFY_H_
@@ -124,7 +124,8 @@ struct __vrfy_dbinfo {
 	u_int8_t	leaf_type;
 
 	/* Queue needs these to verify data pages in the first pass. */
-	u_int32_t	re_len;
+	u_int32_t	re_pad;		/* Record pad character. */
+	u_int32_t	re_len;		/* Record length. */
 	u_int32_t	rec_page;
 	u_int32_t	page_ext;
 	u_int32_t       first_recno;
@@ -165,9 +166,9 @@ struct __vrfy_pageinfo {
 	db_indx_t	entries;	/* Actual number of entries. */
 	u_int16_t	unused;
 	db_recno_t	rec_cnt;	/* Record count. */
+	u_int32_t	re_pad;		/* Record pad character. */
 	u_int32_t	re_len;		/* Record length. */
 	u_int32_t	bt_minkey;
-	u_int32_t	bt_maxkey;
 	u_int32_t	h_ffactor;
 	u_int32_t	h_nelem;
 
@@ -180,16 +181,17 @@ struct __vrfy_pageinfo {
 	u_int32_t	olen;
 
 #define	VRFY_DUPS_UNSORTED	0x0001	/* Have to flag the negative! */
-#define	VRFY_HAS_DUPS		0x0002
-#define	VRFY_HAS_DUPSORT	0x0004	/* Has the flag set. */
-#define	VRFY_HAS_SUBDBS		0x0008
+#define	VRFY_HAS_CHKSUM		0x0002
+#define	VRFY_HAS_DUPS		0x0004
+#define	VRFY_HAS_DUPSORT	0x0008	/* Has the flag set. */
 #define	VRFY_HAS_RECNUMS	0x0010
-#define	VRFY_INCOMPLETE		0x0020	/* Meta or item order checks incomp. */
-#define	VRFY_IS_ALLZEROES	0x0040	/* Hash page we haven't touched? */
-#define	VRFY_IS_FIXEDLEN	0x0080
-#define	VRFY_IS_RECNO		0x0100
-#define	VRFY_IS_RRECNO		0x0200
-#define	VRFY_OVFL_LEAFSEEN	0x0400
+#define	VRFY_HAS_SUBDBS		0x0020
+#define	VRFY_INCOMPLETE		0x0040	/* Meta or item order checks incomp. */
+#define	VRFY_IS_ALLZEROES	0x0080	/* Hash page we haven't touched? */
+#define	VRFY_IS_FIXEDLEN	0x0100
+#define	VRFY_IS_RECNO		0x0200
+#define	VRFY_IS_RRECNO		0x0400
+#define	VRFY_OVFL_LEAFSEEN	0x0800
 	u_int32_t	flags;
 
 	LIST_ENTRY(__vrfy_pageinfo) links;
diff --git a/storage/bdb/dbinc/debug.h b/storage/bdb/dbinc/debug.h
index 068c8af2b0d..642920eb2f2 100644
--- a/storage/bdb/dbinc/debug.h
+++ b/storage/bdb/dbinc/debug.h
@@ -1,10 +1,10 @@
 /*-
  * See the file LICENSE for redistribution information.
  *
- * Copyright (c) 1998-2004
+ * Copyright (c) 1998-2005
  *	Sleepycat Software.  All rights reserved.
  *
- * $Id: debug.h,v 11.44 2004/09/24 00:43:18 bostic Exp $
+ * $Id: debug.h,v 12.2 2005/06/16 20:21:47 bostic Exp $
  */
 
 #ifndef _DB_DEBUG_H_
@@ -247,15 +247,15 @@ extern "C" {
 
 #define	DB_TEST_RECOVERY_LABEL	db_tr_err:
 
-#define	DB_TEST_CHECKPOINT(env, val)					\
-	if ((val) != 0)							\
+#define	DB_TEST_WAIT(env, val)					\
+	if ((val) != 0)						\
 		__os_sleep((env), (u_long)(val), 0)
 #else
 #define	DB_TEST_SUBLOCKS(env, flags)
 #define	DB_ENV_TEST_RECOVERY(env, val, ret, name)
 #define	DB_TEST_RECOVERY(dbp, val, ret, name)
 #define	DB_TEST_RECOVERY_LABEL
-#define	DB_TEST_CHECKPOINT(env, val)
+#define	DB_TEST_WAIT(env, val)
 #endif
 
 #if defined(__cplusplus)
diff --git a/storage/bdb/dbinc/fop.h b/storage/bdb/dbinc/fop.h
index ef87ff6e25a..98f7c59b362 100644
--- a/storage/bdb/dbinc/fop.h
+++ b/storage/bdb/dbinc/fop.h
@@ -1,15 +1,20 @@
 /*-
  * See the file LICENSE for redistribution information.
  *
- * Copyright (c) 2001-2004
+ * Copyright (c) 2001-2005
  *	Sleepycat Software.  All rights reserved.
  *
- * $Id: fop.h,v 11.5 2004/01/28 03:36:02 bostic Exp $
+ * $Id: fop.h,v 12.3 2005/10/04 18:22:22 bostic Exp $
  */
 
 #ifndef	_FOP_H_
 #define	_FOP_H_
 
+#define	MAKE_INMEM(D) do {					\
+	F_SET((D), DB_AM_INMEM);				\
+	(void)__memp_set_flags((D)->mpf, DB_MPOOL_NOFILE, 1);	\
+} while (0)
+
 #include "dbinc_auto/fileops_auto.h"
 #include "dbinc_auto/fileops_ext.h"
 
diff --git a/storage/bdb/dbinc/globals.h b/storage/bdb/dbinc/globals.h
index 95d96533a74..aaef6309fb1 100644
--- a/storage/bdb/dbinc/globals.h
+++ b/storage/bdb/dbinc/globals.h
@@ -1,10 +1,10 @@
 /*-
  * See the file LICENSE for redistribution information.
  *
- * Copyright (c) 1996-2004
+ * Copyright (c) 1996-2005
  *	Sleepycat Software.  All rights reserved.
  *
- * $Id: globals.h,v 11.9 2004/09/17 22:00:27 mjc Exp $
+ * $Id: globals.h,v 12.1 2005/06/16 20:21:47 bostic Exp $
  */
 
 /*******************************************************
diff --git a/storage/bdb/dbinc/hash.h b/storage/bdb/dbinc/hash.h
index 10059a5e027..feb124cbab2 100644
--- a/storage/bdb/dbinc/hash.h
+++ b/storage/bdb/dbinc/hash.h
@@ -1,7 +1,7 @@
 /*-
  * See the file LICENSE for redistribution information.
  *
- * Copyright (c) 1996-2004
+ * Copyright (c) 1996-2005
  *	Sleepycat Software.  All rights reserved.
  */
 /*
@@ -39,7 +39,7 @@
  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
  * SUCH DAMAGE.
  *
- * $Id: hash.h,v 11.28 2004/01/28 03:36:02 bostic Exp $
+ * $Id: hash.h,v 12.1 2005/06/16 20:21:47 bostic Exp $
  */
 
 #ifndef	_DB_HASH_H_
diff --git a/storage/bdb/dbinc/hmac.h b/storage/bdb/dbinc/hmac.h
index 439537927ea..a30756febcf 100644
--- a/storage/bdb/dbinc/hmac.h
+++ b/storage/bdb/dbinc/hmac.h
@@ -1,10 +1,10 @@
 /*-
  * See the file LICENSE for redistribution information.
  *
- * Copyright (c) 1996-2004
+ * Copyright (c) 1996-2005
  *	Sleepycat Software.  All rights reserved.
  *
- * $Id: hmac.h,v 1.5 2004/01/28 03:36:02 bostic Exp $
+ * $Id: hmac.h,v 12.1 2005/06/16 20:21:48 bostic Exp $
  */
 
 #ifndef	_DB_HMAC_H_
diff --git a/storage/bdb/dbinc/lock.h b/storage/bdb/dbinc/lock.h
index e59abbff84c..b3b7186bf6c 100644
--- a/storage/bdb/dbinc/lock.h
+++ b/storage/bdb/dbinc/lock.h
@@ -1,10 +1,10 @@
 /*-
  * See the file LICENSE for redistribution information.
  *
- * Copyright (c) 1996-2004
+ * Copyright (c) 1996-2005
  *	Sleepycat Software.  All rights reserved.
  *
- * $Id: lock.h,v 11.53 2004/09/22 21:14:56 ubell Exp $
+ * $Id: lock.h,v 12.7 2005/10/07 20:21:23 ubell Exp $
  */
 
 #ifndef	_DB_LOCK_H_
@@ -54,11 +54,21 @@ typedef struct {
 	((t1)->tv_sec > (t2)->tv_sec ||					\
 	((t1)->tv_sec == (t2)->tv_sec && (t1)->tv_usec > (t2)->tv_usec))
 
+/* Macros to lock/unlock the lock region as a whole. */
+#define	LOCK_SYSTEM_LOCK(dbenv)						\
+	MUTEX_LOCK(dbenv, ((DB_LOCKREGION *)((DB_LOCKTAB *)		\
+	    (dbenv)->lk_handle)->reginfo.primary)->mtx_region)
+#define	LOCK_SYSTEM_UNLOCK(dbenv)					\
+	MUTEX_UNLOCK(dbenv, ((DB_LOCKREGION *)((DB_LOCKTAB *)		\
+	    (dbenv)->lk_handle)->reginfo.primary)->mtx_region)
+
 /*
  * DB_LOCKREGION --
  *	The lock shared region.
  */
 typedef struct __db_lockregion {
+	db_mutex_t	mtx_region;	/* Region mutex. */
+
 	u_int32_t	need_dd;	/* flag for deadlock detector */
 	u_int32_t	detect;		/* run dd on every conflict */
 	db_timeval_t	next_timeout;	/* next time to expire a lock */
@@ -79,15 +89,9 @@ typedef struct __db_lockregion {
 
 	roff_t		conf_off;	/* offset of conflicts array */
 	roff_t		obj_off;	/* offset of object hash table */
-	roff_t		osynch_off;	/* offset of the object mutex table */
 	roff_t		locker_off;	/* offset of locker hash table */
-	roff_t		lsynch_off;	/* offset of the locker mutex table */
 
 	DB_LOCK_STAT	stat;		/* stats about locking. */
-
-#ifdef HAVE_MUTEX_SYSTEM_RESOURCES
-	roff_t		maint_off;	/* offset of region maintenance info */
-#endif
 } DB_LOCKREGION;
 
 /*
@@ -122,9 +126,15 @@ typedef struct __db_lockobj {
  */
 typedef struct __db_locker {
 	u_int32_t id;			/* Locker id. */
+
+	pid_t pid;			/* Process owning locker ID */
+	db_threadid_t tid;		/* Thread owning locker ID */
+
 	u_int32_t dd_id;		/* Deadlock detector id. */
+
 	u_int32_t nlocks;		/* Number of locks held. */
 	u_int32_t nwrites;		/* Number of write locks held. */
+
 	roff_t  master_locker;		/* Locker of master transaction. */
 	roff_t  parent_locker;		/* Parent of this child. */
 	SH_LIST_HEAD(_child) child_locker;	/* List of descendant txns;
@@ -175,7 +185,7 @@ struct __db_lock {
 	 * Wait on mutex to wait on lock.  You reference your own mutex with
 	 * ID 0 and others reference your mutex with ID 1.
 	 */
-	DB_MUTEX	mutex;
+	db_mutex_t	mtx_lock;
 
 	u_int32_t	holder;		/* Who holds this lock. */
 	u_int32_t	gen;		/* Generation count. */
@@ -195,10 +205,9 @@ struct __db_lock {
  *		      (used by __lock_put_internal).
  * DB_LOCK_UNLINK:    Remove from the locker links (used in checklocker).
  * Make sure that these do not conflict with the interface flags because
- * we pass some of those around (i.e., DB_LOCK_REMOVE).
+ * we pass some of those around.
  */
 #define	DB_LOCK_DOALL		0x010000
-#define	DB_LOCK_DOWNGRADE	0x020000
 #define	DB_LOCK_FREE		0x040000
 #define	DB_LOCK_NOPROMOTE	0x080000
 #define	DB_LOCK_UNLINK		0x100000
@@ -212,11 +221,16 @@ struct __db_lock {
 	ndx = __lock_ohash(obj) % (reg)->object_t_size
 #define	SHOBJECT_LOCK(lt, reg, shobj, ndx)				\
 	ndx = __lock_lhash(shobj) % (reg)->object_t_size
+
+/*
+ * __lock_locker_hash --
+ *	Hash function for entering lockers into the locker hash table.
+ *	Since these are simply 32-bit unsigned integers at the moment,
+ *	just return the locker value.
+ */
+#define	__lock_locker_hash(locker)	(locker)
 #define	LOCKER_LOCK(lt, reg, locker, ndx)				\
 	ndx = __lock_locker_hash(locker) % (reg)->locker_t_size;
 
-#define	LOCKREGION(dbenv, lt)  R_LOCK((dbenv), &((DB_LOCKTAB *)lt)->reginfo)
-#define	UNLOCKREGION(dbenv, lt)  R_UNLOCK((dbenv), &((DB_LOCKTAB *)lt)->reginfo)
-
 #include "dbinc_auto/lock_ext.h"
 #endif /* !_DB_LOCK_H_ */
diff --git a/storage/bdb/dbinc/log.h b/storage/bdb/dbinc/log.h
index 9a8690138b7..6f97526441f 100644
--- a/storage/bdb/dbinc/log.h
+++ b/storage/bdb/dbinc/log.h
@@ -1,10 +1,10 @@
 /*-
  * See the file LICENSE for redistribution information.
  *
- * Copyright (c) 1996-2004
+ * Copyright (c) 1996-2005
  *	Sleepycat Software.  All rights reserved.
  *
- * $Id: log.h,v 11.90 2004/10/15 16:59:39 bostic Exp $
+ * $Id: log.h,v 12.12 2005/10/20 18:57:05 bostic Exp $
  */
 
 #ifndef _LOG_H_
@@ -42,14 +42,18 @@ struct __fname {
 					 * Txn ID of the DB create, stored so
 					 * we can log it at register time.
 					 */
-	int	  is_durable;		/* Is this file durable or not. */
+#define	DB_FNAME_NOTLOGGED	0x01	/* Log of close failed. */
+#define	DB_FNAME_DURABLE	0x02	/* File is durable. */
+	u_int32_t flags;
 };
 
 /* File open/close register log record opcodes. */
 #define	DBREG_CHKPNT	1		/* Checkpoint: file name/id dump. */
 #define	DBREG_CLOSE	2		/* File close. */
 #define	DBREG_OPEN	3		/* File open. */
-#define	DBREG_RCLOSE	4		/* File close after recovery. */
+#define	DBREG_PREOPEN	4		/* Open in mpool only. */
+#define	DBREG_RCLOSE	5		/* File close after recovery. */
+#define	DBREG_REOPEN	6		/* Open for in-memory database. */
 
 /*******************************************************
  * LOG:
@@ -75,30 +79,25 @@ struct __log_persist;	typedef struct __log_persist LOGP;
  *	Per-process log structure.
  */
 struct __db_log {
-/*
- * These fields need to be protected for multi-threaded support.
- *
- * !!!
- * As this structure is allocated in per-process memory, the mutex may need
- * to be stored elsewhere on architectures unable to support mutexes in heap
- * memory, e.g., HP/UX 9.
- */
-	DB_MUTEX  *mutexp;		/* Mutex for thread protection. */
+	/*
+	 * These fields need to be protected for multi-threaded support.
+	 */
+	db_mutex_t mtx_dbreg;		/* Mutex for thread protection. */
 
 	DB_ENTRY *dbentry;		/* Recovery file-id mapping. */
 #define	DB_GROW_SIZE	64
-	int32_t dbentry_cnt;		/* Entries.  Grows by DB_GROW_SIZE. */
+	int32_t	dbentry_cnt;		/* Entries.  Grows by DB_GROW_SIZE. */
 
-/*
- * These fields are always accessed while the region lock is held, so they do
- * not have to be protected by the thread lock as well.
- */
+	/*
+	 * These fields are only accessed when the region lock is held, so
+	 * they do not have to be protected by the thread lock as well.
+	 */
 	u_int32_t lfname;		/* Log file "name". */
 	DB_FH	 *lfhp;			/* Log file handle. */
 
 	u_int8_t *bufp;			/* Region buffer. */
 
-/* These fields are not protected. */
+	/* These fields are not thread protected. */
 	DB_ENV	 *dbenv;		/* Reference to error information. */
 	REGINFO	  reginfo;		/* Region information. */
 
@@ -138,24 +137,28 @@ struct __log_persist {
 	u_int32_t version;		/* DB_LOGVERSION */
 
 	u_int32_t log_size;		/* Log file size. */
-	u_int32_t mode;			/* Log file mode. */
+	u_int32_t notused;		/* Historically the log file mode. */
 };
 
+/* Macros to lock/unlock the log region as a whole. */
+#define	LOG_SYSTEM_LOCK(dbenv)						\
+	MUTEX_LOCK(dbenv, ((LOG *)((DB_LOG *)				\
+	    (dbenv)->lg_handle)->reginfo.primary)->mtx_region)
+#define	LOG_SYSTEM_UNLOCK(dbenv)					\
+	MUTEX_UNLOCK(dbenv, ((LOG *)((DB_LOG *)				\
+	    (dbenv)->lg_handle)->reginfo.primary)->mtx_region)
+
 /*
  * LOG --
  *	Shared log region.  One of these is allocated in shared memory,
  *	and describes the log.
  */
 struct __log {
-	/*
-	 * Due to alignment constraints on some architectures (e.g. HP-UX),
-	 * DB_MUTEXes must be the first element of shalloced structures,
-	 * and as a corollary there can be only one per structure.  Thus,
-	 * flush_mutex_off points to a mutex in a separately-allocated chunk.
-	 */
-	DB_MUTEX fq_mutex;		/* Mutex guarding file name list. */
+	db_mutex_t mtx_region;		/* Region mutex. */
 
-	LOGP	 persist;		/* Persistent information. */
+	db_mutex_t mtx_filelist;	/* Mutex guarding file name list. */
+
+	LOGP	persist;		/* Persistent information. */
 
 	SH_TAILQ_HEAD(__fq1) fq;	/* List of file names. */
 	int32_t	fid_max;		/* Max fid allocated. */
@@ -185,28 +188,23 @@ struct __log {
 					   file. */
 
 	/*
-	 * Due to alignment constraints on some architectures (e.g. HP-UX),
-	 * DB_MUTEXes must be the first element of shalloced structures,
-	 * and as a corollary there can be only one per structure.  Thus,
-	 * flush_mutex_off points to a mutex in a separately-allocated chunk.
-	 *
 	 * The s_lsn LSN is the last LSN that we know is on disk, not just
 	 * written, but synced.  This field is protected by the flush mutex
 	 * rather than by the region mutex.
 	 */
-	int	  in_flush;		/* Log flush in progress. */
-	roff_t	  flush_mutex_off;	/* Mutex guarding flushing. */
-	DB_LSN	  s_lsn;		/* LSN of the last sync. */
+	db_mutex_t mtx_flush;		/* Mutex guarding flushing. */
+	int	   in_flush;		/* Log flush in progress. */
+	DB_LSN	   s_lsn;		/* LSN of the last sync. */
 
 	DB_LOG_STAT stat;		/* Log statistics. */
 
 	/*
-	 * !!! - NOTE that the next 7 fields, waiting_lsn, verify_lsn,
-	 * max_wait_lsn, maxperm_lsn, wait_recs, rcvd_recs,
-	 * and ready_lsn are NOT protected
-	 * by the log region lock.  They are protected by db_rep->db_mutexp.
-	 * If you need access to both, you must acquire db_rep->db_mutexp
-	 * before acquiring the log region lock.
+	 * !!!
+	 * NOTE: the next 11 fields, waiting_lsn, verify_lsn, max_wait_lsn,
+	 * maxperm_lsn, wait_recs, rcvd_recs, ready_lsn and bulk_* are NOT
+	 * protected by the log region lock.  They are protected by
+	 * REP->mtx_clientdb.  If you need access to both, you must acquire
+	 * REP->mtx_clientdb before acquiring the log region lock.
 	 *
 	 * The waiting_lsn is used by the replication system.  It is the
 	 * first LSN that we are holding without putting in the log, because
@@ -235,6 +233,16 @@ struct __log {
 	 * header), rather than to 0.
 	 */
 	DB_LSN	  ready_lsn;
+	/*
+	 * The bulk_buf is used by replication for bulk transfer.  While this
+	 * is protected by REP->mtx_clientdb, this doesn't contend with the
+	 * above fields because the above are used by clients and the bulk
+	 * fields below are used by a master.
+	 */
+	roff_t	  bulk_buf;		/* Bulk transfer buffer in region. */
+	uintptr_t bulk_off;		/* Current offset into bulk buffer. */
+	u_int32_t bulk_len;		/* Length of buffer. */
+	u_int32_t bulk_flags;		/* Bulk buffer flags. */
 
 	/*
 	 * During initialization, the log system walks forward through the
@@ -253,6 +261,8 @@ struct __log {
 	u_int32_t log_size;		/* Log file's size. */
 	u_int32_t log_nsize;		/* Next log file's size. */
 
+	int	  filemode;		/* Log file permissions mode. */
+
 	/*
 	 * DB_LOG_AUTOREMOVE and DB_LOG_INMEMORY: not protected by a mutex,
 	 * all we care about is if they're zero or non-zero.
@@ -272,12 +282,6 @@ struct __log {
 	 */
 	SH_TAILQ_HEAD(__logfile) logfiles;
 	SH_TAILQ_HEAD(__free_logfile) free_logfiles;
-
-#ifdef HAVE_MUTEX_SYSTEM_RESOURCES
-#define	LG_MAINT_SIZE	(sizeof(roff_t) * DB_MAX_HANDLES)
-
-	roff_t	  maint_off;		/* offset of region maintenance info */
-#endif
 };
 
 /*
@@ -285,7 +289,7 @@ struct __log {
  *	One of these is allocated for each transaction waiting to commit.
  */
 struct __db_commit {
-	DB_MUTEX	mutex;		/* Mutex for txn to wait on. */
+	db_mutex_t	mtx_txnwait;	/* Mutex for txn to wait on. */
 	DB_LSN		lsn;		/* LSN of commit record. */
 	SH_TAILQ_ENTRY	links;		/* Either on free or waiting list. */
 
@@ -309,15 +313,17 @@ struct __db_commit {
  */
 
 #ifdef HAVE_FTRUNCATE
-#define	CHECK_LSN(redo, cmp, lsn, prev)					\
+#define	CHECK_LSN(e, redo, cmp, lsn, prev)				\
 	if (DB_REDO(redo) && (cmp) < 0 &&				\
-	    !IS_NOT_LOGGED_LSN(*(lsn)) && !IS_ZERO_LSN(*(lsn))) {	\
+	    ((!IS_NOT_LOGGED_LSN(*(lsn)) && !IS_ZERO_LSN(*(lsn))) ||	\
+	    IS_REP_CLIENT(e))) {					\
 		ret = __db_check_lsn(dbenv, lsn, prev);			\
 		goto out;						\
 	}
 #else
-#define	CHECK_LSN(redo, cmp, lsn, prev)					\
-	if (DB_REDO(redo) && (cmp) < 0 && !IS_NOT_LOGGED_LSN(*(lsn))) {	\
+#define	CHECK_LSN(e, redo, cmp, lsn, prev)				\
+	if (DB_REDO(redo) && (cmp) < 0 &&				\
+	    (!IS_NOT_LOGGED_LSN(*(lsn)) || IS_REP_CLIENT(e))) {		\
 		ret = __db_check_lsn(dbenv, lsn, prev);			\
 		goto out;						\
 	}
@@ -341,17 +347,19 @@ struct __db_filestart {
 /*
  * Internal macro to set pointer to the begin_lsn for generated
  * logging routines.  If begin_lsn is already set then do nothing.
+ * Return a pointer to the last lsn too.
  */
-#undef DB_SET_BEGIN_LSNP
-#define	DB_SET_BEGIN_LSNP(txn, rlsnp) do {				\
+#undef DB_SET_TXN_LSNP
+#define	DB_SET_TXN_LSNP(txn, blsnp, llsnp) do {				\
 	DB_LSN *__lsnp;							\
 	TXN_DETAIL *__td;						\
-	__td = R_ADDR(&(txn)->mgrp->reginfo, (txn)->off);		\
+	__td = (txn)->td;						\
+	*(llsnp) = &__td->last_lsn;					\
 	while (__td->parent != INVALID_ROFF)				\
 		__td = R_ADDR(&(txn)->mgrp->reginfo, __td->parent);	\
 	__lsnp = &__td->begin_lsn;					\
 	if (IS_ZERO_LSN(*__lsnp))					\
-		*(rlsnp) = __lsnp;					\
+		*(blsnp) = __lsnp;					\
 } while (0)
 
 /*
diff --git a/storage/bdb/dbinc/mp.h b/storage/bdb/dbinc/mp.h
index 871bd6df922..86e1905e950 100644
--- a/storage/bdb/dbinc/mp.h
+++ b/storage/bdb/dbinc/mp.h
@@ -1,10 +1,10 @@
 /*-
  * See the file LICENSE for redistribution information.
  *
- * Copyright (c) 1996-2004
+ * Copyright (c) 1996-2005
  *	Sleepycat Software.  All rights reserved.
  *
- * $Id: mp.h,v 11.61 2004/09/17 22:00:27 mjc Exp $
+ * $Id: mp.h,v 12.5 2005/08/08 14:52:30 bostic Exp $
  */
 
 #ifndef	_DB_MP_H_
@@ -42,9 +42,14 @@ typedef enum {
  */
 struct __db_mpool {
 	/* These fields need to be protected for multi-threaded support. */
-	DB_MUTEX   *mutexp;		/* Structure thread lock. */
+	db_mutex_t mutex;		/* Thread mutex. */
 
-					/* List of pgin/pgout routines. */
+	/*
+	 * DB_MPREG structure for the DB pgin/pgout routines.
+	 *
+	 * Linked list of application-specified pgin/pgout routines.
+	 */
+	DB_MPREG *pg_inout;
 	LIST_HEAD(__db_mpregh, __db_mpreg) dbregq;
 
 					/* List of DB_MPOOLFILE's. */
@@ -102,6 +107,20 @@ struct __db_mpreg {
 #define	NBUCKET(mc, mf_offset, pgno)					\
 	(((pgno) ^ ((mf_offset) << 9)) % (mc)->htab_buckets)
 
+/* Macros to lock/unlock the mpool region as a whole. */
+#define	MPOOL_SYSTEM_LOCK(dbenv)					\
+	MUTEX_LOCK(dbenv, ((MPOOL *)((DB_MPOOL *)			\
+	    (dbenv)->mp_handle)->reginfo[0].primary)->mtx_region)
+#define	MPOOL_SYSTEM_UNLOCK(dbenv)					\
+	MUTEX_UNLOCK(dbenv, ((MPOOL *)((DB_MPOOL *)			\
+	    (dbenv)->mp_handle)->reginfo[0].primary)->mtx_region)
+
+/* Macros to lock/unlock a specific mpool region. */
+#define	MPOOL_REGION_LOCK(dbenv, infop)					\
+	MUTEX_LOCK(dbenv, ((MPOOL *)(infop)->primary)->mtx_region)
+#define	MPOOL_REGION_UNLOCK(dbenv, infop)				\
+	MUTEX_UNLOCK(dbenv, ((MPOOL *)(infop)->primary)->mtx_region)
+
 /*
  * MPOOL --
  *	Shared memory pool region.
@@ -117,6 +136,7 @@ struct __mpool {
 	 * the first of these pieces/files describes the entire pool, the
 	 * second only describe a piece of the cache.
 	 */
+	db_mutex_t	mtx_region;	/* Region mutex. */
 
 	/*
 	 * The lsn field and list of underlying MPOOLFILEs are thread protected
@@ -140,10 +160,6 @@ struct __mpool {
 	u_int32_t nreg;			/* Number of underlying REGIONS. */
 	roff_t	  regids;		/* Array of underlying REGION Ids. */
 
-#ifdef HAVE_MUTEX_SYSTEM_RESOURCES
-	roff_t	  maint_off;		/* Maintenance information offset */
-#endif
-
 	/*
 	 * The following structure fields only describe the per-cache portion
 	 * of the region.
@@ -176,23 +192,12 @@ struct __mpool {
 };
 
 struct __db_mpool_hash {
-	DB_MUTEX	hash_mutex;	/* Per-bucket mutex. */
+	db_mutex_t	mtx_hash;	/* Per-bucket mutex. */
 
 	DB_HASHTAB	hash_bucket;	/* Head of bucket. */
 
 	u_int32_t	hash_page_dirty;/* Count of dirty pages. */
 	u_int32_t	hash_priority;	/* Minimum priority of bucket buffer. */
-
-#ifdef	HPUX_MUTEX_PAD
-	/*
-	 * !!!
-	 * We allocate the mpool hash buckets as an array, which means that
-	 * they are not individually aligned.  This fails on one platform:
-	 * HPUX 10.20, where mutexes require 16 byte alignment.   This is a
-	 * grievous hack for that single platform.
-	 */
-	u_int8_t	pad[HPUX_MUTEX_PAD];
-#endif
 };
 
 /*
@@ -218,7 +223,7 @@ struct __db_mpool_hash {
  *	Shared DB_MPOOLFILE information.
  */
 struct __mpoolfile {
-	DB_MUTEX mutex;
+	db_mutex_t mutex;		/* MPOOLFILE mutex. */
 
 	/* Protected by MPOOLFILE mutex. */
 	u_int32_t mpf_cnt;		/* Ref count: DB_MPOOLFILEs. */
@@ -226,6 +231,18 @@ struct __mpoolfile {
 
 	roff_t	  path_off;		/* File name location. */
 
+	/*
+	 * The following are used for file compaction processing.
+	 * They are only used when a thread is in the process
+	 * of trying to move free pages to the end of the file.
+	 * Other threads may look here when freeing a page.
+	 * Protected by a lock on the metapage.
+	 */
+	u_int32_t free_ref;		/* Refcount to freelist. */
+	u_int32_t free_cnt;		/* Count of free pages. */
+	size_t	  free_size;		/* Allocated size of free list. */
+	roff_t	  free_list;		/* Offset to free list. */
+
 	/*
 	 * We normally don't lock the deadfile field when we read it since we
 	 * only care if the field is zero or non-zero.  We do lock on read when
@@ -323,7 +340,7 @@ struct __mpoolfile {
  *	Buffer header.
  */
 struct __bh {
-	DB_MUTEX	mutex;		/* Buffer thread/process lock. */
+	db_mutex_t	mtx_bh;		/* Buffer thread/process mutex. */
 
 	u_int16_t	ref;		/* Reference count. */
 	u_int16_t	ref_sync;	/* Sync wait-for reference count. */
@@ -351,6 +368,10 @@ struct __bh {
 	 */
 	u_int8_t   buf[1];		/* Variable length data. */
 };
+/*
+ * Flags to __memp_ftruncate.
+ */
+#define	MP_TRUNC_RECOVER	0x01
 
 #include "dbinc_auto/mp_ext.h"
 #endif /* !_DB_MP_H_ */
diff --git a/storage/bdb/dbinc/mutex.h b/storage/bdb/dbinc/mutex.h
index 056d34bab5c..4937e4f7d3a 100644
--- a/storage/bdb/dbinc/mutex.h
+++ b/storage/bdb/dbinc/mutex.h
@@ -1,296 +1,112 @@
 /*-
  * See the file LICENSE for redistribution information.
  *
- * Copyright (c) 1996-2004
+ * Copyright (c) 1996-2005
  *	Sleepycat Software.  All rights reserved.
  *
- * $Id: mutex.h,v 11.100 2004/10/05 14:41:12 mjc Exp $
+ * $Id: mutex.h,v 12.14 2005/10/13 00:56:52 bostic Exp $
  */
 
 #ifndef _DB_MUTEX_H_
 #define	_DB_MUTEX_H_
 
 /*
- * Some of the Berkeley DB ports require single-threading at various
- * places in the code.  In those cases, these #defines will be set.
+ * Mutexes are represented by unsigned, 32-bit integral values.  As the
+ * OOB value is 0, mutexes can be initialized by zero-ing out the memory
+ * in which they reside.
  */
-#define	DB_BEGIN_SINGLE_THREAD
-#define	DB_END_SINGLE_THREAD
+#define	MUTEX_INVALID	0
 
-/*********************************************************************
- * POSIX.1 pthreads interface.
- *********************************************************************/
-#ifdef HAVE_MUTEX_PTHREADS
-#include 
-
-#define	MUTEX_FIELDS							\
-	pthread_mutex_t mutex;		/* Mutex. */			\
-	pthread_cond_t  cond;		/* Condition variable. */
-#endif
-
-/*********************************************************************
- * Solaris lwp threads interface.
- *
- * !!!
- * We use LWP mutexes on Solaris instead of UI or POSIX mutexes (both of
- * which are available), for two reasons.  First, the Solaris C library
- * includes versions of the both UI and POSIX thread mutex interfaces, but
- * they are broken in that they don't support inter-process locking, and
- * there's no way to detect it, e.g., calls to configure the mutexes for
- * inter-process locking succeed without error.  So, we use LWP mutexes so
- * that we don't fail in fairly undetectable ways because the application
- * wasn't linked with the appropriate threads library.  Second, there were
- * bugs in SunOS 5.7 (Solaris 7) where if an application loaded the C library
- * before loading the libthread/libpthread threads libraries (e.g., by using
- * dlopen to load the DB library), the pwrite64 interface would be translated
- * into a call to pwrite and DB would drop core.
- *********************************************************************/
-#ifdef HAVE_MUTEX_SOLARIS_LWP
 /*
- * XXX
- * Don't change  to  -- although lwp.h is listed in the
- * Solaris manual page as the correct include to use, it causes the Solaris
- * compiler on SunOS 2.6 to fail.
+ * We track mutex allocations by ID.
  */
-#include 
+#define	MTX_APPLICATION		 1
+#define	MTX_DB_HANDLE		 2
+#define	MTX_ENV_DBLIST		 3
+#define	MTX_ENV_REGION		 4
+#define	MTX_LOCK_REGION		 5
+#define	MTX_LOGICAL_LOCK	 6
+#define	MTX_LOG_FILENAME	 7
+#define	MTX_LOG_FLUSH		 8
+#define	MTX_LOG_HANDLE		 9
+#define	MTX_LOG_REGION		10
+#define	MTX_MPOOLFILE_HANDLE	11
+#define	MTX_MPOOL_BUFFER	12
+#define	MTX_MPOOL_FH		13
+#define	MTX_MPOOL_HANDLE	14
+#define	MTX_MPOOL_HASH_BUCKET	15
+#define	MTX_MPOOL_REGION	16
+#define	MTX_MUTEX_REGION	17
+#define	MTX_MUTEX_TEST		18
+#define	MTX_REP_DATABASE	19
+#define	MTX_REP_REGION		20
+#define	MTX_SEQUENCE		21
+#define	MTX_TWISTER		22
+#define	MTX_TXN_ACTIVE		23
+#define	MTX_TXN_CHKPT		24
+#define	MTX_TXN_COMMIT		25
+#define	MTX_TXN_REGION		26
+#define	MTX_MAX_ENTRY		26
 
-#define	MUTEX_FIELDS							\
-	lwp_mutex_t mutex;		/* Mutex. */			\
-	lwp_cond_t cond;		/* Condition variable. */
+/* Redirect mutex calls to the correct functions. */
+#if defined(HAVE_MUTEX_PTHREADS) ||					\
+    defined(HAVE_MUTEX_SOLARIS_LWP) ||					\
+    defined(HAVE_MUTEX_UI_THREADS)
+#define	__mutex_init(a, b, c)		__db_pthread_mutex_init(a, b, c)
+#define	__mutex_lock(a, b)		__db_pthread_mutex_lock(a, b)
+#define	__mutex_unlock(a, b)		__db_pthread_mutex_unlock(a, b)
+#define	__mutex_destroy(a, b)		__db_pthread_mutex_destroy(a, b)
 #endif
 
-/*********************************************************************
- * Solaris/Unixware threads interface.
- *********************************************************************/
-#ifdef HAVE_MUTEX_UI_THREADS
-#include 
-#include 
-
-#define	MUTEX_FIELDS							\
-	mutex_t mutex;			/* Mutex. */			\
-	cond_t  cond;			/* Condition variable. */
+#if defined(HAVE_MUTEX_WIN32) || defined(HAVE_MUTEX_WIN32_GCC)
+#define	__mutex_init(a, b, c)		__db_win32_mutex_init(a, b, c)
+#define	__mutex_lock(a, b)		__db_win32_mutex_lock(a, b)
+#define	__mutex_unlock(a, b)		__db_win32_mutex_unlock(a, b)
+#define	__mutex_destroy(a, b)		__db_win32_mutex_destroy(a, b)
 #endif
 
-/*********************************************************************
- * AIX C library functions.
- *********************************************************************/
-#ifdef HAVE_MUTEX_AIX_CHECK_LOCK
-#include 
-typedef int tsl_t;
-
-#ifndef	MUTEX_ALIGN
-#define	MUTEX_ALIGN	sizeof(int)
+#if defined(HAVE_MUTEX_FCNTL)
+#define	__mutex_init(a, b, c)		__db_fcntl_mutex_init(a, b, c)
+#define	__mutex_lock(a, b)		__db_fcntl_mutex_lock(a, b)
+#define	__mutex_unlock(a, b)		__db_fcntl_mutex_unlock(a, b)
+#define	__mutex_destroy(a, b)		__db_fcntl_mutex_destroy(a, b)
 #endif
 
-#ifdef LOAD_ACTUAL_MUTEX_CODE
-#define	MUTEX_INIT(x)	0
-#define	MUTEX_SET(x)	(!_check_lock(x, 0, 1))
-#define	MUTEX_UNSET(x)	_clear_lock(x, 0)
-#endif
+#ifndef __mutex_init			/* Test-and-set is the default */
+#define	__mutex_init(a, b, c)		__db_tas_mutex_init(a, b, c)
+#define	__mutex_lock(a, b)		__db_tas_mutex_lock(a, b)
+#define	__mutex_unlock(a, b)		__db_tas_mutex_unlock(a, b)
+#define	__mutex_destroy(a, b)		__db_tas_mutex_destroy(a, b)
 #endif
 
-/*********************************************************************
- * Apple/Darwin library functions.
- *********************************************************************/
-#ifdef HAVE_MUTEX_DARWIN_SPIN_LOCK_TRY
-typedef u_int32_t tsl_t;
-
-#ifndef	MUTEX_ALIGN
-#define	MUTEX_ALIGN	sizeof(int)
-#endif
-
-#ifdef LOAD_ACTUAL_MUTEX_CODE
-extern int _spin_lock_try(tsl_t *);
-extern void _spin_unlock(tsl_t *);
-#define	MUTEX_SET(tsl)          _spin_lock_try(tsl)
-#define	MUTEX_UNSET(tsl)        _spin_unlock(tsl)
-#define	MUTEX_INIT(tsl)         (MUTEX_UNSET(tsl), 0)
-#endif
-#endif
-
-/*********************************************************************
- * General C library functions (msemaphore).
+/*
+ * Lock/unlock a mutex.  If the mutex was never required, the thread of
+ * control can proceed without it.
  *
- * !!!
- * Check for HPPA as a special case, because it requires unusual alignment,
- * and doesn't support semaphores in malloc(3) or shmget(2) memory.
- *
- * !!!
- * Do not remove the MSEM_IF_NOWAIT flag.  The problem is that if a single
- * process makes two msem_lock() calls in a row, the second one returns an
- * error.  We depend on the fact that we can lock against ourselves in the
- * locking subsystem, where we set up a mutex so that we can block ourselves.
- * Tested on OSF1 v4.0.
- *********************************************************************/
-#ifdef HAVE_MUTEX_HPPA_MSEM_INIT
-#define	MUTEX_NO_MALLOC_LOCKS
-#define	MUTEX_NO_SHMGET_LOCKS
+ * We never fail to acquire or release a mutex without panicing.  Simplify
+ * the macros to always return a panic value rather than saving the actual
+ * return value of the mutex routine.
+ */
+#define	MUTEX_LOCK(dbenv, mutex) do {					\
+	if ((mutex) != MUTEX_INVALID &&					\
+	    __mutex_lock(dbenv, mutex) != 0)				\
+		return (DB_RUNRECOVERY);				\
+} while (0)
+#define	MUTEX_UNLOCK(dbenv, mutex) do {					\
+	if ((mutex) != MUTEX_INVALID &&					\
+	    __mutex_unlock(dbenv, mutex) != 0)				\
+		return (DB_RUNRECOVERY);				\
+} while (0)
 
-#ifndef	MUTEX_ALIGN
-#define	MUTEX_ALIGN	16
-#define	HPUX_MUTEX_PAD	 8
-#endif
-#endif
-
-#if defined(HAVE_MUTEX_MSEM_INIT) || defined(HAVE_MUTEX_HPPA_MSEM_INIT)
-#include 
-typedef msemaphore tsl_t;
-
-#ifndef	MUTEX_ALIGN
-#define	MUTEX_ALIGN	sizeof(int)
-#endif
-
-#ifdef LOAD_ACTUAL_MUTEX_CODE
-#define	MUTEX_INIT(x)	(msem_init(x, MSEM_UNLOCKED) <= (msemaphore *)0)
-#define	MUTEX_SET(x)	(!msem_lock(x, MSEM_IF_NOWAIT))
-#define	MUTEX_UNSET(x)	msem_unlock(x, 0)
-#endif
-#endif
-
-/*********************************************************************
- * Plan 9 library functions.
- *********************************************************************/
-#ifdef HAVE_MUTEX_PLAN9
-typedef Lock tsl_t;
-
-#ifndef	MUTEX_ALIGN
-#define	MUTEX_ALIGN	sizeof(int)
-#endif
-
-#define	MUTEX_INIT(x)	(memset(x, 0, sizeof(Lock)), 0)
-#define	MUTEX_SET(x)	canlock(x)
-#define	MUTEX_UNSET(x)	unlock(x)
-#endif
-
-/*********************************************************************
- * Reliant UNIX C library functions.
- *********************************************************************/
-#ifdef HAVE_MUTEX_RELIANTUNIX_INITSPIN
-#include 
-typedef spinlock_t tsl_t;
-
-#ifdef LOAD_ACTUAL_MUTEX_CODE
-#define	MUTEX_INIT(x)	(initspin(x, 1), 0)
-#define	MUTEX_SET(x)	(cspinlock(x) == 0)
-#define	MUTEX_UNSET(x)	spinunlock(x)
-#endif
-#endif
-
-/*********************************************************************
- * General C library functions (POSIX 1003.1 sema_XXX).
- *
- * !!!
- * Never selected by autoconfig in this release (semaphore calls are known
- * to not work in Solaris 5.5).
- *********************************************************************/
-#ifdef HAVE_MUTEX_SEMA_INIT
-#include 
-typedef sema_t tsl_t;
-
-#ifndef	MUTEX_ALIGN
-#define	MUTEX_ALIGN	 sizeof(int)
-#endif
-
-#ifdef LOAD_ACTUAL_MUTEX_CODE
-#define	MUTEX_DESTROY(x) sema_destroy(x)
-#define	MUTEX_INIT(x)	 (sema_init(x, 1, USYNC_PROCESS, NULL) != 0)
-#define	MUTEX_SET(x)	 (sema_wait(x) == 0)
-#define	MUTEX_UNSET(x)	 sema_post(x)
-#endif
-#endif
-
-/*********************************************************************
- * SGI C library functions.
- *********************************************************************/
-#ifdef HAVE_MUTEX_SGI_INIT_LOCK
-#include 
-typedef abilock_t tsl_t;
-
-#ifndef	MUTEX_ALIGN
-#define	MUTEX_ALIGN	sizeof(int)
-#endif
-
-#ifdef LOAD_ACTUAL_MUTEX_CODE
-#define	MUTEX_INIT(x)	(init_lock(x) != 0)
-#define	MUTEX_SET(x)	(!acquire_lock(x))
-#define	MUTEX_UNSET(x)	release_lock(x)
-#endif
-#endif
-
-/*********************************************************************
- * Solaris C library functions.
- *
- * !!!
- * These are undocumented functions, but they're the only ones that work
- * correctly as far as we know.
- *********************************************************************/
-#ifdef HAVE_MUTEX_SOLARIS_LOCK_TRY
-#include 
-typedef lock_t tsl_t;
-
-#ifndef	MUTEX_ALIGN
-#define	MUTEX_ALIGN	sizeof(int)
-#endif
-
-#ifdef LOAD_ACTUAL_MUTEX_CODE
-#define	MUTEX_INIT(x)	0
-#define	MUTEX_SET(x)	_lock_try(x)
-#define	MUTEX_UNSET(x)	_lock_clear(x)
-#endif
-#endif
-
-/*********************************************************************
- * VMS.
- *********************************************************************/
-#ifdef HAVE_MUTEX_VMS
-#include ;
-#include 
-typedef volatile unsigned char tsl_t;
-
-#ifndef	MUTEX_ALIGN
-#define	MUTEX_ALIGN		sizeof(unsigned int)
-#endif
-
-#ifdef LOAD_ACTUAL_MUTEX_CODE
-#ifdef __ALPHA
-#define	MUTEX_SET(tsl)		(!__TESTBITSSI(tsl, 0))
-#else /* __VAX */
-#define	MUTEX_SET(tsl)		(!(int)_BBSSI(0, tsl))
-#endif
-#define	MUTEX_UNSET(tsl)	(*(tsl) = 0)
-#define	MUTEX_INIT(tsl)		MUTEX_UNSET(tsl)
-#endif
-#endif
-
-/*********************************************************************
- * VxWorks
- * Use basic binary semaphores in VxWorks, as we currently do not need
- * any special features.  We do need the ability to single-thread the
- * entire system, however, because VxWorks doesn't support the open(2)
- * flag O_EXCL, the mechanism we normally use to single thread access
- * when we're first looking for a DB environment.
- *********************************************************************/
+/*
+ * Berkeley DB ports may require single-threading at places in the code.
+ */
 #ifdef HAVE_MUTEX_VXWORKS
 #include "taskLib.h"
-typedef SEM_ID tsl_t;
-
-#ifndef	MUTEX_ALIGN
-#define	MUTEX_ALIGN		sizeof(unsigned int)
-#endif
-
-#ifdef LOAD_ACTUAL_MUTEX_CODE
-#define	MUTEX_SET(tsl)		(semTake((*tsl), WAIT_FOREVER) == OK)
-#define	MUTEX_UNSET(tsl)	(semGive((*tsl)))
-#define	MUTEX_INIT(tsl)							\
-	((*(tsl) = semBCreate(SEM_Q_FIFO, SEM_FULL)) == NULL)
-#define	MUTEX_DESTROY(tsl)	semDelete(*tsl)
-#endif
-
 /*
  * Use the taskLock() mutex to eliminate a race where two tasks are
  * trying to initialize the global lock at the same time.
  */
-#undef	DB_BEGIN_SINGLE_THREAD
 #define	DB_BEGIN_SINGLE_THREAD do {					\
 	if (DB_GLOBAL(db_global_init))					\
 		(void)semTake(DB_GLOBAL(db_global_lock), WAIT_FOREVER);	\
@@ -309,673 +125,18 @@ typedef SEM_ID tsl_t;
 		taskUnlock();						\
 	}								\
 } while (DB_GLOBAL(db_global_init) == 0)
-#undef	DB_END_SINGLE_THREAD
 #define	DB_END_SINGLE_THREAD	(void)semGive(DB_GLOBAL(db_global_lock))
 #endif
 
-/*********************************************************************
- * Win16
- *
- * Win16 spinlocks are simple because we cannot possibly be preempted.
- *
- * !!!
- * We should simplify this by always returning a no-need-to-lock lock
- * when we initialize the mutex.
- *********************************************************************/
-#ifdef HAVE_MUTEX_WIN16
-typedef unsigned int tsl_t;
-
-#ifndef	MUTEX_ALIGN
-#define	MUTEX_ALIGN		sizeof(unsigned int)
-#endif
-
-#ifdef LOAD_ACTUAL_MUTEX_CODE
-#define	MUTEX_INIT(x)		0
-#define	MUTEX_SET(tsl)		(*(tsl) = 1)
-#define	MUTEX_UNSET(tsl)	(*(tsl) = 0)
-#endif
-#endif
-
-/*********************************************************************
- * Win32
- *********************************************************************/
-#if defined(HAVE_MUTEX_WIN32) || defined(HAVE_MUTEX_WIN32_GCC)
-#define	MUTEX_FIELDS							\
-	LONG tas;							\
-	LONG nwaiters;							\
-	u_int32_t id;	/* ID used for creating events */		\
-
-#if defined(LOAD_ACTUAL_MUTEX_CODE)
-#define	MUTEX_SET(tsl)		(!InterlockedExchange((PLONG)tsl, 1))
-#define	MUTEX_UNSET(tsl)	(*(tsl) = 0)
-#define	MUTEX_INIT(tsl)		MUTEX_UNSET(tsl)
-
 /*
- * From Intel's performance tuning documentation (and see SR #6975):
- * ftp://download.intel.com/design/perftool/cbts/appnotes/sse2/w_spinlock.pdf
- *
- * "For this reason, it is highly recommended that you insert the PAUSE
- * instruction into all spin-wait code immediately. Using the PAUSE
- * instruction does not affect the correctness of programs on existing
- * platforms, and it improves performance on Pentium 4 processor platforms."
+ * Single-threading defaults to a no-op.
  */
-#ifdef HAVE_MUTEX_WIN32
-#ifndef _WIN64
-#define	MUTEX_PAUSE		{__asm{_emit 0xf3}; __asm{_emit 0x90}}
-#endif
-#endif
-#ifdef HAVE_MUTEX_WIN32_GCC
-#define	MUTEX_PAUSE		asm volatile ("rep; nop" : : );
-#endif
+#ifndef DB_BEGIN_SINGLE_THREAD
+#define	DB_BEGIN_SINGLE_THREAD
 #endif
+#ifndef DB_END_SINGLE_THREAD
+#define	DB_END_SINGLE_THREAD
 #endif
 
-/*********************************************************************
- * 68K/gcc assembly.
- *********************************************************************/
-#ifdef HAVE_MUTEX_68K_GCC_ASSEMBLY
-typedef unsigned char tsl_t;
-
-#ifdef LOAD_ACTUAL_MUTEX_CODE
-#define	MUTEX_SET_TEST	1		/* gcc/68K: 0 is clear, 1 is set. */
-
-#define	MUTEX_SET(tsl) ({						\
-	register tsl_t *__l = (tsl);					\
-	int __r;							\
-	    asm volatile("tas  %1; \n					\
-			  seq  %0"					\
-		: "=dm" (__r), "=m" (*__l)				\
-		: "1" (*__l)						\
-		);							\
-	__r & 1;							\
-})
-
-#define	MUTEX_UNSET(tsl)	(*(tsl) = 0)
-#define	MUTEX_INIT(tsl)		MUTEX_UNSET(tsl)
-#endif
-#endif
-
-/*********************************************************************
- * ALPHA/gcc assembly.
- *********************************************************************/
-#ifdef HAVE_MUTEX_ALPHA_GCC_ASSEMBLY
-typedef u_int32_t tsl_t;
-
-#ifndef	MUTEX_ALIGN
-#define	MUTEX_ALIGN	4
-#endif
-
-#ifdef LOAD_ACTUAL_MUTEX_CODE
-/*
- * For gcc/alpha.  Should return 0 if could not acquire the lock, 1 if
- * lock was acquired properly.
- */
-static inline int
-MUTEX_SET(tsl_t *tsl) {
-	register tsl_t *__l = tsl;
-	register tsl_t __r;
-	asm volatile(
-		"1:	ldl_l	%0,%2\n"
-		"	blbs	%0,2f\n"
-		"	or	$31,1,%0\n"
-		"	stl_c	%0,%1\n"
-		"	beq	%0,3f\n"
-		"	mb\n"
-		"	br	3f\n"
-		"2:	xor	%0,%0\n"
-		"3:"
-		: "=&r"(__r), "=m"(*__l) : "1"(*__l) : "memory");
-	return __r;
-}
-
-/*
- * Unset mutex. Judging by Alpha Architecture Handbook, the mb instruction
- * might be necessary before unlocking
- */
-static inline int
-MUTEX_UNSET(tsl_t *tsl) {
-	asm volatile("	mb\n");
-	return *tsl = 0;
-}
-
-#define	MUTEX_INIT(tsl)		MUTEX_UNSET(tsl)
-#endif
-#endif
-
-/*********************************************************************
- * Tru64/cc assembly.
- *********************************************************************/
-#ifdef HAVE_MUTEX_TRU64_CC_ASSEMBLY
-typedef volatile u_int32_t tsl_t;
-
-#ifndef	MUTEX_ALIGN
-#define	MUTEX_ALIGN	4
-#endif
-
-#ifdef LOAD_ACTUAL_MUTEX_CODE
-#include 
-#define	MUTEX_SET(tsl)		(__LOCK_LONG_RETRY((tsl), 1) != 0)
-#define	MUTEX_UNSET(tsl)	(__UNLOCK_LONG(tsl))
-
-#define	MUTEX_INIT(tsl)		(MUTEX_UNSET(tsl), 0)
-#endif
-#endif
-
-/*********************************************************************
- * ARM/gcc assembly.
- *********************************************************************/
-#ifdef HAVE_MUTEX_ARM_GCC_ASSEMBLY
-typedef unsigned char tsl_t;
-
-#ifdef LOAD_ACTUAL_MUTEX_CODE
-#define	MUTEX_SET_TEST	1		/* gcc/arm: 0 is clear, 1 is set. */
-
-#define	MUTEX_SET(tsl) ({						\
-	int __r;							\
-	asm volatile(							\
-		"swpb	%0, %1, [%2]\n\t"				\
-		"eor	%0, %0, #1\n\t"					\
-	    : "=&r" (__r)						\
-	    : "r" (1), "r" (tsl)					\
-	    );								\
-	__r & 1;							\
-})
-
-#define	MUTEX_UNSET(tsl)	(*(volatile tsl_t *)(tsl) = 0)
-#define	MUTEX_INIT(tsl)		MUTEX_UNSET(tsl)
-#endif
-#endif
-
-/*********************************************************************
- * HPPA/gcc assembly.
- *********************************************************************/
-#ifdef HAVE_MUTEX_HPPA_GCC_ASSEMBLY
-typedef u_int32_t tsl_t;
-
-#ifndef	MUTEX_ALIGN
-#define	MUTEX_ALIGN	16
-#define	HPUX_MUTEX_PAD	 8
-#endif
-
-#ifdef LOAD_ACTUAL_MUTEX_CODE
-/*
- * The PA-RISC has a "load and clear" instead of a "test and set" instruction.
- * The 32-bit word used by that instruction must be 16-byte aligned.  We could
- * use the "aligned" attribute in GCC but that doesn't work for stack variables.
- */
-#define	MUTEX_SET(tsl) ({						\
-	register tsl_t *__l = (tsl);					\
-	int __r;							\
-	asm volatile("ldcws 0(%1),%0" : "=r" (__r) : "r" (__l));	\
-	__r & 1;							\
-})
-
-#define	MUTEX_UNSET(tsl)	(*(tsl) = -1)
-#define	MUTEX_INIT(tsl)		(MUTEX_UNSET(tsl), 0)
-#endif
-#endif
-
-/*********************************************************************
- * IA64/gcc assembly.
- *********************************************************************/
-#ifdef HAVE_MUTEX_IA64_GCC_ASSEMBLY
-typedef unsigned char tsl_t;
-
-#ifdef LOAD_ACTUAL_MUTEX_CODE
-#define	MUTEX_SET_TEST	1		/* gcc/ia64: 0 is clear, 1 is set. */
-
-#define	MUTEX_SET(tsl) ({						\
-	register tsl_t *__l = (tsl);					\
-	long __r;							\
-	asm volatile("xchg1 %0=%1,%3" : "=r"(__r), "=m"(*__l) : "1"(*__l), "r"(1));\
-	__r ^ 1;							\
-})
-
-/*
- * Store through a "volatile" pointer so we get a store with "release"
- * semantics.
- */
-#define	MUTEX_UNSET(tsl)	(*(volatile unsigned char *)(tsl) = 0)
-#define	MUTEX_INIT(tsl)		MUTEX_UNSET(tsl)
-#endif
-#endif
-
-/*********************************************************************
- * PowerPC/gcc assembly.
- *********************************************************************/
-#if defined(HAVE_MUTEX_PPC_GCC_ASSEMBLY)
-typedef u_int32_t tsl_t;
-
-#ifdef LOAD_ACTUAL_MUTEX_CODE
-/*
- * The PowerPC does a sort of pseudo-atomic locking.  You set up a
- * 'reservation' on a chunk of memory containing a mutex by loading the
- * mutex value with LWARX.  If the mutex has an 'unlocked' (arbitrary)
- * value, you then try storing into it with STWCX.  If no other process or
- * thread broke your 'reservation' by modifying the memory containing the
- * mutex, then the STCWX succeeds; otherwise it fails and you try to get
- * a reservation again.
- *
- * While mutexes are explicitly 4 bytes, a 'reservation' applies to an
- * entire cache line, normally 32 bytes, aligned naturally.  If the mutex
- * lives near data that gets changed a lot, there's a chance that you'll
- * see more broken reservations than you might otherwise.  The only
- * situation in which this might be a problem is if one processor is
- * beating on a variable in the same cache block as the mutex while another
- * processor tries to acquire the mutex.  That's bad news regardless
- * because of the way it bashes caches, but if you can't guarantee that a
- * mutex will reside in a relatively quiescent cache line, you might
- * consider padding the mutex to force it to live in a cache line by
- * itself.  No, you aren't guaranteed that cache lines are 32 bytes.  Some
- * embedded processors use 16-byte cache lines, while some 64-bit
- * processors use 128-bit cache lines.  But assuming a 32-byte cache line
- * won't get you into trouble for now.
- *
- * If mutex locking is a bottleneck, then you can speed it up by adding a
- * regular LWZ load before the LWARX load, so that you can test for the
- * common case of a locked mutex without wasting cycles making a reservation.
- *
- * 'set' mutexes have the value 1, like on Intel; the returned value from
- * MUTEX_SET() is 1 if the mutex previously had its low bit clear, 0 otherwise.
- */
-#define	MUTEX_SET_TEST	1		/* gcc/ppc: 0 is clear, 1 is set. */
-
-static inline int
-MUTEX_SET(int *tsl)  {
-         int __r;
-         int __tmp = (int)tsl;
-    asm volatile (
-"0:                             \n\t"
-"       lwarx   %0,0,%2         \n\t"
-"       cmpwi   %0,0            \n\t"
-"       bne-    1f              \n\t"
-"       stwcx.  %2,0,%2         \n\t"
-"       isync                   \n\t"
-"       beq+    2f              \n\t"
-"       b       0b              \n\t"
-"1:                             \n\t"
-"       li      %1, 0           \n\t"
-"2:                             \n\t"
-         : "=&r" (__r), "=r" (tsl)
-         : "r" (__tmp)
-         : "cr0", "memory");
-         return (int)tsl;
-}
-
-static inline int
-MUTEX_UNSET(tsl_t *tsl) {
-         asm volatile("sync" : : : "memory");
-         return *tsl = 0;
-}
-#define	MUTEX_INIT(tsl)		MUTEX_UNSET(tsl)
-#endif
-#endif
-
-/*********************************************************************
- * OS/390 C
- *********************************************************************/
-#ifdef HAVE_MUTEX_S390_CC_ASSEMBLY
-typedef int tsl_t;
-
-#ifndef	MUTEX_ALIGN
-#define	MUTEX_ALIGN	sizeof(int)
-#endif
-
-#ifdef LOAD_ACTUAL_MUTEX_CODE
-/*
- * cs() is declared in  but is built in to the compiler.
- * Must use LANGLVL(EXTENDED) to get its declaration.
- */
-#define	MUTEX_SET(tsl)		(!cs(&zero, (tsl), 1))
-#define	MUTEX_UNSET(tsl)	(*(tsl) = 0)
-#define	MUTEX_INIT(tsl)		MUTEX_UNSET(tsl)
-#endif
-#endif
-
-/*********************************************************************
- * S/390 32-bit assembly.
- *********************************************************************/
-#ifdef HAVE_MUTEX_S390_GCC_ASSEMBLY
-typedef int tsl_t;
-
-#ifdef LOAD_ACTUAL_MUTEX_CODE
-#define	MUTEX_SET_TEST	1		/* gcc/S390: 0 is clear, 1 is set. */
-
-static inline int
-MUTEX_SET(tsl_t *tsl) {							\
-	register tsl_t *__l = (tsl);					\
-	int __r;							\
-  asm volatile(								\
-       "    la    1,%1\n"						\
-       "    lhi   0,1\n"						\
-       "    l     %0,%1\n"						\
-       "0:  cs    %0,0,0(1)\n"						\
-       "    jl    0b"							\
-       : "=&d" (__r), "+m" (*__l)					\
-       : : "0", "1", "cc");						\
-	return !__r;							\
-}
-
-#define	MUTEX_UNSET(tsl)	(*(tsl) = 0)
-#define	MUTEX_INIT(tsl)		MUTEX_UNSET(tsl)
-#endif
-#endif
-
-/*********************************************************************
- * SCO/cc assembly.
- *********************************************************************/
-#ifdef HAVE_MUTEX_SCO_X86_CC_ASSEMBLY
-typedef unsigned char tsl_t;
-
-#ifdef LOAD_ACTUAL_MUTEX_CODE
-/*
- * UnixWare has threads in libthread, but OpenServer doesn't (yet).
- */
-#define	MUTEX_SET_TEST	1		/* cc/x86: 0 is clear, 1 is set. */
-
-#if defined(__USLC__)
-asm int
-_tsl_set(void *tsl)
-{
-%mem tsl
-	movl	tsl, %ecx
-	movl	$1, %eax
-	lock
-	xchgb	(%ecx),%al
-	xorl	$1,%eax
-}
-#endif
-
-#define	MUTEX_SET(tsl)		_tsl_set(tsl)
-#define	MUTEX_UNSET(tsl)	(*(tsl) = 0)
-#define	MUTEX_INIT(tsl)		MUTEX_UNSET(tsl)
-#endif
-#endif
-
-/*********************************************************************
- * Sparc/gcc assembly.
- *********************************************************************/
-#ifdef HAVE_MUTEX_SPARC_GCC_ASSEMBLY
-typedef unsigned char tsl_t;
-
-#ifdef LOAD_ACTUAL_MUTEX_CODE
-/*
- *
- * The ldstub instruction takes the location specified by its first argument
- * (a register containing a memory address) and loads its contents into its
- * second argument (a register) and atomically sets the contents the location
- * specified by its first argument to a byte of 1s.  (The value in the second
- * argument is never read, but only overwritten.)
- *
- * The stbar is needed for v8, and is implemented as membar #sync on v9,
- * so is functional there as well.  For v7, stbar may generate an illegal
- * instruction and we have no way to tell what we're running on.  Some
- * operating systems notice and skip this instruction in the fault handler.
- */
-#define	MUTEX_SET_TEST	1		/* gcc/sparc: 0 is clear, 1 is set. */
-
-#define	MUTEX_SET(tsl) ({						\
-	register tsl_t *__l = (tsl);					\
-	register tsl_t __r;						\
-	__asm__ volatile						\
-	    ("ldstub [%1],%0; stbar"					\
-	    : "=r"( __r) : "r" (__l));					\
-	!__r;								\
-})
-
-#define	MUTEX_UNSET(tsl)	(*(tsl) = 0)
-#define	MUTEX_INIT(tsl)		MUTEX_UNSET(tsl)
-#endif
-#endif
-
-/*********************************************************************
- * UTS/cc assembly.
- *********************************************************************/
-#ifdef HAVE_MUTEX_UTS_CC_ASSEMBLY
-typedef int tsl_t;
-
-#ifndef	MUTEX_ALIGN
-#define	MUTEX_ALIGN	sizeof(int)
-#endif
-
-#ifdef LOAD_ACTUAL_MUTEX_CODE
-#define	MUTEX_INIT(x)	0
-#define	MUTEX_SET(x)	(!uts_lock(x, 1))
-#define	MUTEX_UNSET(x)	(*(x) = 0)
-#endif
-#endif
-
-/*********************************************************************
- * x86/gcc assembly.
- *********************************************************************/
-#ifdef HAVE_MUTEX_X86_GCC_ASSEMBLY
-typedef unsigned char tsl_t;
-
-#ifdef LOAD_ACTUAL_MUTEX_CODE
-#define	MUTEX_SET_TEST	1		/* gcc/x86: 0 is clear, 1 is set. */
-
-#define	MUTEX_SET(tsl) ({						\
-	register tsl_t *__l = (tsl);					\
-	int __r;							\
-	asm volatile("movl $1,%%eax; lock; xchgb %1,%%al; xorl $1,%%eax"\
-	    : "=&a" (__r), "=m" (*__l)					\
-	    : "1" (*__l)						\
-	    );								\
-	__r & 1;							\
-})
-
-#define	MUTEX_UNSET(tsl)	(*(tsl) = 0)
-#define	MUTEX_INIT(tsl)		MUTEX_UNSET(tsl)
-
-/*
- * From Intel's performance tuning documentation (and see SR #6975):
- * ftp://download.intel.com/design/perftool/cbts/appnotes/sse2/w_spinlock.pdf
- *
- * "For this reason, it is highly recommended that you insert the PAUSE
- * instruction into all spin-wait code immediately. Using the PAUSE
- * instruction does not affect the correctness of programs on existing
- * platforms, and it improves performance on Pentium 4 processor platforms."
- */
-#define	MUTEX_PAUSE		asm volatile ("rep; nop" : : );
-#endif
-#endif
-
-/*
- * Mutex alignment defaults to one byte.
- *
- * !!!
- * Various systems require different alignments for mutexes (the worst we've
- * seen so far is 16-bytes on some HP architectures).  Malloc(3) is assumed
- * to return reasonable alignment, all other mutex users must ensure proper
- * alignment locally.
- */
-#ifndef	MUTEX_ALIGN
-#define	MUTEX_ALIGN	1
-#endif
-
-/*
- * Mutex destruction defaults to a no-op.
- */
-#ifdef LOAD_ACTUAL_MUTEX_CODE
-#ifndef	MUTEX_DESTROY
-#define	MUTEX_DESTROY(x)
-#endif
-#endif
-
-/*
- * !!!
- * The flag arguments for __db_mutex_setup (and the underlying initialization
- * function for the mutex type, for example, __db_tas_mutex_init), and flags
- * stored in the DB_MUTEX structure are combined, and may not overlap.  Flags
- * to __db_mutex_setup:
- *
- * MUTEX_ALLOC:
- *	Use when the mutex to initialize needs to be allocated. The 'ptr'
- *	arg to __db_mutex_setup should be a DB_MUTEX ** whenever you use
- *	this flag.  If this flag is not set, the 'ptr' arg is a DB_MUTEX *.
- * MUTEX_NO_RECORD:
- *	Explicitly do not record the mutex in the region.  Otherwise the
- *	mutex will be recorded by default.  If you set this you need to
- *	understand why you don't need it recorded.  The *only* ones not
- *	recorded are those that are part of region structures that only
- *	get destroyed when the regions are destroyed.
- * MUTEX_NO_RLOCK:
- *	Explicitly do not lock the given region otherwise the region will
- *	be locked by default.
- * MUTEX_SELF_BLOCK:
- *	Set if self blocking mutex.
- * MUTEX_THREAD:
- *	Set if mutex is a thread-only mutex.
- */
-#define	MUTEX_ALLOC		0x0001	/* Allocate and init a mutex */
-#define	MUTEX_IGNORE		0x0002	/* Ignore, no lock required. */
-#define	MUTEX_INITED		0x0004	/* Mutex is successfully initialized */
-#define	MUTEX_LOGICAL_LOCK	0x0008	/* Mutex backs database lock. */
-#define	MUTEX_MPOOL		0x0010	/* Allocated from mpool. */
-#define	MUTEX_NO_RECORD		0x0020	/* Do not record lock */
-#define	MUTEX_NO_RLOCK		0x0040	/* Do not acquire region lock */
-#define	MUTEX_SELF_BLOCK	0x0080	/* Must block self. */
-#define	MUTEX_THREAD		0x0100	/* Thread-only mutex. */
-
-/* Mutex. */
-struct __mutex_t {
-#ifdef	HAVE_MUTEX_THREADS
-#ifdef	MUTEX_FIELDS
-	MUTEX_FIELDS
-#else
-	tsl_t	tas;			/* Test and set. */
-#endif
-	u_int32_t locked;		/* !0 if locked. */
-#else
-	u_int32_t off;			/* Byte offset to lock. */
-	u_int32_t pid;			/* Lock holder: 0 or process pid. */
-#endif
-	u_int32_t mutex_set_wait;	/* Granted after wait. */
-	u_int32_t mutex_set_nowait;	/* Granted without waiting. */
-	u_int32_t mutex_set_spin;	/* Granted without spinning. */
-	u_int32_t mutex_set_spins;	/* Total number of spins. */
-#ifdef HAVE_MUTEX_SYSTEM_RESOURCES
-	roff_t	  reg_off;		/* Shared lock info offset. */
-#endif
-	/*
-	 * Flags should be an unsigned integer even if it's not required by
-	 * the possible flags values, getting a single byte on some machines
-	 * is expensive, and the mutex structure is a MP hot spot.
-	 */
-	u_int32_t flags;		/* MUTEX_XXX */
-};
-
-/* Macro to clear mutex statistics. */
-#define	MUTEX_CLEAR(mp) {						\
-	(mp)->mutex_set_wait = (mp)->mutex_set_nowait = 0;		\
-}
-
-/* Redirect calls to the correct functions. */
-#ifdef HAVE_MUTEX_THREADS
-#if defined(HAVE_MUTEX_PTHREADS) ||					\
-    defined(HAVE_MUTEX_SOLARIS_LWP) ||					\
-    defined(HAVE_MUTEX_UI_THREADS)
-#define	__db_mutex_init_int(a, b, c, d)	__db_pthread_mutex_init(a, b, d)
-#define	__db_mutex_lock(a, b)		__db_pthread_mutex_lock(a, b)
-#define	__db_mutex_unlock(a, b)		__db_pthread_mutex_unlock(a, b)
-#define	__db_mutex_destroy(a)		__db_pthread_mutex_destroy(a)
-#else
-#if defined(HAVE_MUTEX_WIN32) || defined(HAVE_MUTEX_WIN32_GCC)
-#define	__db_mutex_init_int(a, b, c, d)	__db_win32_mutex_init(a, b, d)
-#define	__db_mutex_lock(a, b)		__db_win32_mutex_lock(a, b)
-#define	__db_mutex_unlock(a, b)		__db_win32_mutex_unlock(a, b)
-#define	__db_mutex_destroy(a)		__db_win32_mutex_destroy(a)
-#else
-#define	__db_mutex_init_int(a, b, c, d)	__db_tas_mutex_init(a, b, d)
-#define	__db_mutex_lock(a, b)		__db_tas_mutex_lock(a, b)
-#define	__db_mutex_unlock(a, b)		__db_tas_mutex_unlock(a, b)
-#define	__db_mutex_destroy(a)		__db_tas_mutex_destroy(a)
-#endif
-#endif
-#else
-#define	__db_mutex_init_int(a, b, c, d)	__db_fcntl_mutex_init(a, b, c)
-#define	__db_mutex_lock(a, b)		__db_fcntl_mutex_lock(a, b)
-#define	__db_mutex_unlock(a, b)		__db_fcntl_mutex_unlock(a, b)
-#define	__db_mutex_destroy(a)		__db_fcntl_mutex_destroy(a)
-#endif
-
-/* Redirect system resource calls to correct functions */
-#ifdef HAVE_MUTEX_SYSTEM_RESOURCES
-#define	__db_maintinit(a, b, c)		__db_shreg_maintinit(a, b, c)
-#define	__db_shlocks_clear(a, b, c)	__db_shreg_locks_clear(a, b, c)
-#define	__db_shlocks_destroy(a, b)	__db_shreg_locks_destroy(a, b)
-#define	__db_mutex_init(a, b, c, d, e, f)	\
-    __db_shreg_mutex_init(a, b, c, d, e, f)
-#else
-#define	__db_maintinit(a, b, c)
-#define	__db_shlocks_clear(a, b, c)
-#define	__db_shlocks_destroy(a, b)
-#define	__db_mutex_init(a, b, c, d, e, f)	__db_mutex_init_int(a, b, c, d)
-#endif
-
-/*
- * Lock/unlock a mutex.  If the mutex was marked as uninteresting, the thread
- * of control can proceed without it.
- *
- * If the lock is for threads-only, then it was optionally not allocated and
- * file handles aren't necessary, as threaded applications aren't supported by
- * fcntl(2) locking.
- */
-#ifdef DIAGNOSTIC
-	/*
-	 * XXX
-	 * We want to switch threads as often as possible.  Yield every time
-	 * we get a mutex to ensure contention.
-	 */
-#define	MUTEX_LOCK(dbenv, mp)						\
-	if (!F_ISSET((mp), MUTEX_IGNORE))				\
-		DB_ASSERT(__db_mutex_lock(dbenv, mp) == 0);		\
-	if (F_ISSET(dbenv, DB_ENV_YIELDCPU))				\
-		__os_yield(NULL, 1);
-#else
-#define	MUTEX_LOCK(dbenv, mp)						\
-	if (!F_ISSET((mp), MUTEX_IGNORE))				\
-		(void)__db_mutex_lock(dbenv, mp);
-#endif
-#define	MUTEX_UNLOCK(dbenv, mp)						\
-	if (!F_ISSET((mp), MUTEX_IGNORE))				\
-		(void)__db_mutex_unlock(dbenv, mp);
-#define	MUTEX_THREAD_LOCK(dbenv, mp)					\
-	if (mp != NULL)							\
-		MUTEX_LOCK(dbenv, mp)
-#define	MUTEX_THREAD_UNLOCK(dbenv, mp)					\
-	if (mp != NULL)							\
-		MUTEX_UNLOCK(dbenv, mp)
-
-/*
- * We use a single file descriptor for fcntl(2) locking, and (generally) the
- * object's offset in a shared region as the byte that we're locking.  So,
- * there's a (remote) possibility that two objects might have the same offsets
- * such that the locks could conflict, resulting in deadlock.  To avoid this
- * possibility, we offset the region offset by a small integer value, using a
- * different offset for each subsystem's locks.  Since all region objects are
- * suitably aligned, the offset guarantees that we don't collide with another
- * region's objects.
- */
-#define	DB_FCNTL_OFF_GEN	0		/* Everything else. */
-#define	DB_FCNTL_OFF_LOCK	1		/* Lock subsystem offset. */
-#define	DB_FCNTL_OFF_MPOOL	2		/* Mpool subsystem offset. */
-
-#ifdef HAVE_MUTEX_SYSTEM_RESOURCES
-/*
- * When the underlying mutexes require library (most likely heap) or system
- * resources, we have to clean up when we discard mutexes (for the library
- * resources) and both when discarding mutexes and after application failure
- * (for the mutexes requiring system resources).  This violates the rule that
- * we never look at a shared region after application failure, but we've no
- * other choice.  In those cases, the #define HAVE_MUTEX_SYSTEM_RESOURCES is
- * set.
- *
- * To support mutex release after application failure, allocate thread-handle
- * mutexes in shared memory instead of in the heap.  The number of slots we
- * allocate for this purpose isn't configurable, but this tends to be an issue
- * only on embedded systems where we don't expect large server applications.
- */
-#define	DB_MAX_HANDLES	100			/* Mutex slots for handles. */
-#endif
+#include "dbinc_auto/mutex_ext.h"
 #endif /* !_DB_MUTEX_H_ */
diff --git a/storage/bdb/dbinc/os.h b/storage/bdb/dbinc/os.h
index 24685a4a765..52013630908 100644
--- a/storage/bdb/dbinc/os.h
+++ b/storage/bdb/dbinc/os.h
@@ -1,10 +1,10 @@
 /*-
  * See the file LICENSE for redistribution information.
  *
- * Copyright (c) 1997-2004
+ * Copyright (c) 1997-2005
  *	Sleepycat Software.  All rights reserved.
  *
- * $Id: os.h,v 11.25 2004/09/22 03:40:20 bostic Exp $
+ * $Id: os.h,v 12.10 2005/10/31 02:22:24 bostic Exp $
  */
 
 #ifndef _DB_OS_H_
@@ -17,22 +17,47 @@ extern "C" {
 /* Number of times to retry system calls that return EINTR or EBUSY. */
 #define	DB_RETRY	100
 
+#ifdef __TANDEM
+/*
+ * OSS Tandem problem: fsync can return a Guardian file system error of 70,
+ * which has no symbolic name in OSS.  HP says to retry the fsync. [#12957]
+ */
 #define	RETRY_CHK(op, ret) do {						\
 	int __retries = DB_RETRY;					\
 	do {								\
 		(ret) = (op);						\
 	} while ((ret) != 0 && (((ret) = __os_get_errno()) == EAGAIN ||	\
-	    (ret) == EBUSY || (ret) == EINTR) && --__retries > 0);	\
+	    (ret) == EBUSY || (ret) == EINTR || (ret) == EIO ||		\
+	    (ret) == 70) &&						\
+	    --__retries > 0);						\
+} while (0)
+#else
+#define	RETRY_CHK(op, ret) do {						\
+	int __retries = DB_RETRY;					\
+	do {								\
+		(ret) = (op);						\
+	} while ((ret) != 0 && (((ret) = __os_get_errno()) == EAGAIN ||	\
+	    (ret) == EBUSY || (ret) == EINTR || (ret) == EIO) &&	\
+	    --__retries > 0);						\
+} while (0)
+#endif
+
+#define	RETRY_CHK_EINTR_ONLY(op, ret) do {				\
+	int __retries = DB_RETRY;					\
+	do {								\
+		(ret) = (op);						\
+	} while ((ret) != 0 &&						\
+	    (((ret) = __os_get_errno()) == EINTR) && --__retries > 0);	\
 } while (0)
 
 /*
  * Flags understood by __os_open.
  */
-#define	DB_OSO_CREATE	0x0001		/* POSIX: O_CREAT */
-#define	DB_OSO_DIRECT	0x0002		/* Don't buffer the file in the OS. */
-#define	DB_OSO_DSYNC	0x0004		/* POSIX: O_DSYNC. */
-#define	DB_OSO_EXCL	0x0008		/* POSIX: O_EXCL */
-#define	DB_OSO_LOG	0x0010		/* Opening a log file. */
+#define	DB_OSO_ABSMODE	0x0001		/* Absolute mode specified. */
+#define	DB_OSO_CREATE	0x0002		/* POSIX: O_CREAT */
+#define	DB_OSO_DIRECT	0x0004		/* Don't buffer the file in the OS. */
+#define	DB_OSO_DSYNC	0x0008		/* POSIX: O_DSYNC. */
+#define	DB_OSO_EXCL	0x0010		/* POSIX: O_EXCL */
 #define	DB_OSO_RDONLY	0x0020		/* POSIX: O_RDONLY */
 #define	DB_OSO_REGION	0x0040		/* Opening a region file. */
 #define	DB_OSO_SEQ	0x0080		/* Expected sequential access. */
@@ -62,14 +87,14 @@ struct __fh_t {
 	 * across seek and read/write pairs, it does not protect the
 	 * the reference count, or any other fields in the structure.
 	 */
-	DB_MUTEX  *mutexp;		/* Mutex to lock. */
+	db_mutex_t mtx_fh;		/* Mutex to lock. */
 
-	int	  ref;			/* Reference count. */
+	int	ref;			/* Reference count. */
 
 #if defined(DB_WIN32)
-	HANDLE	  handle;		/* Windows/32 file handle. */
+	HANDLE	handle;		/* Windows/32 file handle. */
 #endif
-	int	  fd;			/* POSIX file descriptor. */
+	int	fd;			/* POSIX file descriptor. */
 
 	char	*name;			/* File name (ref DB_FH_UNLINK) */
 
@@ -87,6 +112,9 @@ struct __fh_t {
 	u_int8_t flags;
 };
 
+/* Standard 600 mode for __db_omode. */
+#define	OWNER_RW	"rw-------"
+
 #if defined(__cplusplus)
 }
 #endif
diff --git a/storage/bdb/dbinc/qam.h b/storage/bdb/dbinc/qam.h
index 43910d01d76..fdf1aa96eec 100644
--- a/storage/bdb/dbinc/qam.h
+++ b/storage/bdb/dbinc/qam.h
@@ -1,10 +1,10 @@
 /*-
  * See the file LICENSE for redistribution information.
  *
- * Copyright (c) 1999-2004
+ * Copyright (c) 1999-2005
  *	Sleepycat Software.  All rights reserved.
  *
- * $Id: qam.h,v 11.49 2004/09/17 22:00:27 mjc Exp $
+ * $Id: qam.h,v 12.3 2005/10/16 18:42:40 bostic Exp $
  */
 
 #ifndef	_DB_QAM_H_
@@ -69,8 +69,8 @@ struct __queue {
 };
 
 /* Format for queue extent names. */
-#define	QUEUE_EXTENT "%s%c__dbq.%s.%d"
-#define	QUEUE_EXTENT_HEAD "__dbq.%s."
+#define	QUEUE_EXTENT		"%s%c__dbq.%s.%d"
+#define	QUEUE_EXTENT_HEAD	"__dbq.%s."
 
 typedef struct __qam_filelist {
 	DB_MPOOLFILE *mpf;
diff --git a/storage/bdb/dbinc/queue.h b/storage/bdb/dbinc/queue.h
index bf09b12d7ab..d76f2019f6f 100644
--- a/storage/bdb/dbinc/queue.h
+++ b/storage/bdb/dbinc/queue.h
@@ -1,4 +1,3 @@
-
 /*
  * Copyright (c) 1991, 1993
  *	The Regents of the University of California.  All rights reserved.
diff --git a/storage/bdb/dbinc/region.h b/storage/bdb/dbinc/region.h
index 98f6ea07b16..5999893962a 100644
--- a/storage/bdb/dbinc/region.h
+++ b/storage/bdb/dbinc/region.h
@@ -1,10 +1,10 @@
 /*-
  * See the file LICENSE for redistribution information.
  *
- * Copyright (c) 1998-2004
+ * Copyright (c) 1998-2005
  *	Sleepycat Software.  All rights reserved.
  *
- * $Id: region.h,v 11.51 2004/10/15 16:59:39 bostic Exp $
+ * $Id: region.h,v 12.7 2005/10/13 00:53:00 bostic Exp $
  */
 
 #ifndef _DB_REGION_H_
@@ -24,16 +24,18 @@
  *	file mapped into memory (mmap, MapViewOfFile)
  *	system memory (shmget, CreateFileMapping)
  *
- * If the regions are private to a process, they're in malloc.  If they're
- * public, they're in file mapped memory, or, optionally, in system memory.
- * Regions in the filesystem are named "__db.001", "__db.002" and so on.  If
- * we're not using a private environment allocated using malloc(3), the file
- * "__db.001" will always exist, as we use it to synchronize on the regions,
- * whether they exist in file mapped memory or system memory.
+ * By default, regions are created in filesystem-backed shared memory.  They
+ * can also be created in system shared memory (DB_SYSTEM_MEM), or, if private
+ * to a process, in heap memory (DB_PRIVATE).
  *
- * The file "__db.001" contains a REGENV structure and a linked list of some
- * number of REGION structures.  Each of the REGION structures describes and
- * locks one of the underlying shared regions used by DB.
+ * Regions in the filesystem are named "__db.001", "__db.002" and so on.  If
+ * we're not using a private environment allocated in heap, "__db.001" will
+ * always exist, as we use it to synchronize on the regions, whether they are
+ * in filesystem-backed memory or system memory.
+ *
+ * The file "__db.001" contains a REGENV structure and an array of REGION
+ * structures.  Each REGION structures describes an underlying chunk of
+ * shared memory.
  *
  *	__db.001
  *	+---------+
@@ -49,22 +51,32 @@
  *	|	  |   +----------+
  *	+---------+
  *
- * The only tricky part about manipulating the regions is correctly creating
- * or joining the REGENV file, i.e., __db.001.  We have to be absolutely sure
- * that only one process creates it, and that everyone else joins it without
- * seeing inconsistent data.  Once that region is created, we can use normal
- * shared locking procedures to do mutual exclusion for all other regions.
+ * The tricky part about manipulating the regions is creating or joining the
+ * database environment.  We have to be sure only a single thread of control
+ * creates and/or recovers a database environment.  All other threads should
+ * then join without seeing inconsistent data.
  *
- * One of the REGION structures in the main environment region describes the
- * environment region itself.
+ * We do this in two parts: first, we use the underlying O_EXCL flag to the
+ * open system call to serialize creation of the __db.001 file.  The thread
+ * of control creating that file then proceeds to create the remaining
+ * regions in the environment, including the mutex region.  Once the mutex
+ * region has been created, the creating thread of control fills in the
+ * __db.001 file's magic number.  Other threads of control (the ones that
+ * didn't create the __db.001 file), wait on the initialization of the
+ * __db.001 file's magic number.  After it has been initialized, all threads
+ * of control can proceed, using normal shared mutex locking procedures for
+ * exclusion.
  *
- * To lock a region, locate the REGION structure that describes it and acquire
- * the region's mutex.  There is one exception to this rule -- the lock for the
- * environment region itself is in the REGENV structure, and not in the REGION
- * that describes the environment region.  That's so that we can acquire a lock
- * without walking linked lists that could potentially change underneath us.
- * The REGION will not be moved or removed during the life of the region, and
- * so long-lived references to it can be held by the process.
+ * REGIONs are not moved or removed during the life of the environment, and
+ * so processes can have long-lived references to them.
+ *
+ * One of the REGION structures describes the environment region itself.
+ *
+ * The REGION array is not locked in any way.  It's an array so we don't have
+ * to manipulate data structures after a crash -- on some systems, we have to
+ * join and clean up the mutex region after application failure.  Using an
+ * array means we don't have to worry about broken links or other nastiness
+ * after the failure.
  *
  * All requests to create or join a region return a REGINFO structure, which
  * is held by the caller and used to open and subsequently close the reference
@@ -82,17 +94,19 @@
  * Although DB does not currently grow regions when they run out of memory, it
  * would be possible to do so.  To grow a region, allocate a new region of the
  * appropriate size, then copy the old region over it and insert the additional
- * space into the already existing shalloc arena.  Callers may have to fix up
- * local references, but that should be easy to do.  This failed in historic
- * versions of DB because the region lock lived in the mapped memory, and when
- * it was unmapped and remapped (or copied), threads could lose track of it.
- * Once we moved that lock into a region that is never unmapped, growing should
- * work.  That all said, current versions of DB don't implement region grow
- * because some systems don't support mutex copying, e.g., from OSF1 V4.0:
+ * memory into the already existing shalloc arena.  Region users must reset
+ * their base addresses and any local pointers into the memory, of course.
+ * This failed in historic versions of DB because the region mutexes lived in
+ * the mapped memory, and when it was unmapped and remapped (or copied),
+ * threads could lose track of it.  Also, some systems didn't support mutex
+ * copying, e.g., from OSF1 V4.0:
  *
  *	The address of an msemaphore structure may be significant.  If the
  *	msemaphore structure contains any value copied from an msemaphore
  *	structure at a different address, the result is undefined.
+ *
+ * All mutexes are now maintained in a separate region which is never unmapped,
+ * so growing regions should be possible.
  */
 
 #if defined(__cplusplus)
@@ -138,42 +152,55 @@ typedef struct __db_reg_env_ref {
 typedef struct __db_reg_env {
 	/*
 	 * !!!
-	 * The mutex must be the first entry in the structure to guarantee
-	 * correct alignment.
+	 * The magic, panic, version and envid fields of the region are fixed
+	 * in size, the timestamp field is the first field which is variable
+	 * length.  These fields must never change in order, to guarantee we
+	 * can always read them, no matter what Berkeley DB release we have.
+	 *
+	 * !!!
+	 * The magic and panic fields are NOT protected by any mutex, and for
+	 * this reason cannot be anything more complicated than zero/non-zero.
 	 */
-	DB_MUTEX   mutex;		/* Environment mutex. */
+	u_int32_t magic;		/* Valid region magic number. */
+	u_int32_t panic;		/* Environment is dead. */
+
+	u_int32_t majver;		/* Major DB version number. */
+	u_int32_t minver;		/* Minor DB version number. */
+	u_int32_t patchver;		/* Patch DB version number. */
+
+	u_int32_t envid;		/* Unique environment ID. */
+
+	time_t	  timestamp;		/* Creation time. */
+
+	u_int32_t init_flags;		/* Flags environment initialized with.*/
 
 	/*
-	 * !!!
-	 * Note, the magic and panic fields are NOT protected by any mutex,
-	 * and for this reason cannot be anything more complicated than a
-	 * zero/non-zero value.
+	 * The mtx_regenv mutex protects the environment reference count and
+	 * memory allocation from the primary shared region (the crypto and
+	 * replication implementations allocate memory from the primary shared
+	 * region).  The rest of the fields are initialized at creation time,
+	 * and so don't need mutex protection.  The flags, op_timestamp and
+	 * rep_timestamp fields are used by replication only and are
+	 * protected * by the replication mutex.  The rep_timestamp is
+	 * is not protected when it is used in recovery as that is already
+	 * single threaded.
 	 */
-	u_int32_t  magic;		/* Valid region magic number. */
-	u_int32_t  envid;		/* Unique environment ID. */
-
-	int	   envpanic;		/* Environment is dead. */
-
-	int	   majver;		/* Major DB version number. */
-	int	   minver;		/* Minor DB version number. */
-	int	   patch;		/* Patch DB version number. */
-
-	u_int32_t  init_flags;		/* Flags the env was initialized with.*/
-	roff_t	   cipher_off;		/* Offset of cipher area */
-
-					/* List of regions. */
-	SH_LIST_HEAD(__db_regionh) regionq;
-
+	db_mutex_t mtx_regenv;		/* Refcnt, region allocation mutex. */
 	u_int32_t  refcnt;		/* References to the environment. */
 
-	roff_t	   rep_off;		/* Offset of the replication area. */
-#define	DB_REGENV_REPLOCKED	0x0001	/* Env locked for rep backup. */
-	u_int32_t  flags;		/* Shared environment flags. */
-#define	DB_REGENV_TIMEOUT	30	/* Backup timeout. */
-	time_t	   op_timestamp;	/* Timestamp for operations. */
-	time_t	   rep_timestamp;	/* Timestamp for rep db handles. */
+	u_int32_t region_cnt;		/* Number of REGIONs. */
+	roff_t	  region_off;		/* Offset of region array */
 
-	size_t	   pad;			/* Guarantee that following memory is
+	roff_t	  cipher_off;		/* Offset of cipher area */
+
+	roff_t	  rep_off;		/* Offset of the replication area. */
+#define	DB_REGENV_REPLOCKED	0x0001	/* Env locked for rep backup. */
+	u_int32_t flags;		/* Shared environment flags. */
+#define	DB_REGENV_TIMEOUT	30	/* Backup timeout. */
+	time_t	  op_timestamp;		/* Timestamp for operations. */
+	time_t	  rep_timestamp;	/* Timestamp for rep db handles. */
+
+	size_t	pad;			/* Guarantee that following memory is
 					 * size_t aligned.  This is necessary
 					 * because we're going to store the
 					 * allocation region information there.
@@ -182,24 +209,15 @@ typedef struct __db_reg_env {
 
 /* Per-region shared region information. */
 typedef struct __db_region {
-	/*
-	 * !!!
-	 * The mutex must be the first entry in the structure to guarantee
-	 * correct alignment.
-	 */
-	DB_MUTEX   mutex;		/* Region mutex. */
+	u_int32_t	id;		/* Region id. */
+	reg_type_t	type;		/* Region type. */
 
-	SH_LIST_ENTRY q;		/* Linked list of REGIONs. */
+	roff_t	size_orig;		/* Region size in bytes (original). */
+	roff_t	size;			/* Region size in bytes (adjusted). */
 
-	reg_type_t type;		/* Region type. */
-	u_int32_t  id;			/* Region id. */
+	roff_t	primary;		/* Primary data structure offset. */
 
-	roff_t	   size_orig;		/* Region size in bytes (original). */
-	roff_t	   size;		/* Region size in bytes (adjusted). */
-
-	roff_t	   primary;		/* Primary data structure offset. */
-
-	long	   segid;		/* UNIX shmget(2), Win16 segment ID. */
+	long	segid;			/* UNIX shmget(2), Win16 segment ID. */
 } REGION;
 
 /*
@@ -232,26 +250,6 @@ struct __db_reginfo_t {		/* __db_r_attach IN parameters. */
 	u_int32_t   flags;
 };
 
-/*
- * Mutex maintenance information each subsystem region must keep track
- * of to manage resources adequately.
- */
-typedef struct __db_regmaint_stat_t {
-	u_int32_t	st_hint_hit;
-	u_int32_t	st_hint_miss;
-	u_int32_t	st_records;
-	u_int32_t	st_clears;
-	u_int32_t	st_destroys;
-	u_int32_t	st_max_locks;
-} REGMAINT_STAT;
-
-typedef struct __db_regmaint_t {
-	u_int32_t  reglocks;		/* Maximum # of mutexes we track. */
-	u_int32_t  regmutex_hint;	/* Hint for next slot */
-	REGMAINT_STAT stat;		/* Stats */
-	roff_t	   regmutexes[1];	/* Region mutexes in use. */
-} REGMAINT;
-
 /*
  * R_ADDR	Return a per-process address for a shared region offset.
  * R_OFFSET	Return a shared region offset for a per-process address.
@@ -263,40 +261,13 @@ typedef struct __db_regmaint_t {
 	(F_ISSET((reginfop)->dbenv, DB_ENV_PRIVATE) ? (roff_t)(p) :	\
 	(roff_t)((u_int8_t *)(p) - (u_int8_t *)(reginfop)->addr))
 
-/*
- * R_LOCK	Lock/unlock a region.
- * R_UNLOCK
- */
-#define	R_LOCK(dbenv, reginfo)						\
-	MUTEX_LOCK(dbenv, &(reginfo)->rp->mutex)
-#define	R_UNLOCK(dbenv, reginfo)					\
-	MUTEX_UNLOCK(dbenv, &(reginfo)->rp->mutex)
-
 /* PANIC_CHECK:	Check to see if the DB environment is dead. */
 #define	PANIC_CHECK(dbenv)						\
-	if (!F_ISSET((dbenv), DB_ENV_NOPANIC) &&			\
-	    (dbenv)->reginfo != NULL && ((REGENV *)			\
-	    ((REGINFO *)(dbenv)->reginfo)->primary)->envpanic != 0)	\
+	if ((dbenv)->reginfo != NULL && ((REGENV *)			\
+	    ((REGINFO *)(dbenv)->reginfo)->primary)->panic != 0 &&	\
+	    !F_ISSET((dbenv), DB_ENV_NOPANIC))				\
 		return (__db_panic_msg(dbenv));
 
-#define	PANIC_SET(dbenv, onoff)						\
-	if ((dbenv)->reginfo != NULL)					\
-		((REGENV *)((REGINFO *)					\
-		    (dbenv)->reginfo)->primary)->envpanic = (onoff);
-
-/*
- * All regions are created on 8K boundaries out of sheer paranoia, so we
- * don't make some underlying VM unhappy. Make sure we don't overflow or
- * underflow.
- */
-#define	OS_VMPAGESIZE		(8 * 1024)
-#define	OS_VMROUNDOFF(i) {						\
-	if ((i) <							\
-	    (UINT32_MAX - OS_VMPAGESIZE) + 1 || (i) < OS_VMPAGESIZE)	\
-		(i) += OS_VMPAGESIZE - 1;				\
-	(i) -= (i) % OS_VMPAGESIZE;					\
-}
-
 #if defined(__cplusplus)
 }
 #endif
diff --git a/storage/bdb/dbinc/rep.h b/storage/bdb/dbinc/rep.h
index ec1f290f45a..effecaba8a1 100644
--- a/storage/bdb/dbinc/rep.h
+++ b/storage/bdb/dbinc/rep.h
@@ -1,8 +1,10 @@
 /*-
  * See the file LICENSE for redistribution information.
  *
- * Copyright (c) 2001-2004
+ * Copyright (c) 2001-2005
  *	Sleepycat Software.  All rights reserved.
+ *
+ * $Id: rep.h,v 12.22 2005/10/27 13:27:01 bostic Exp $
  */
 
 #ifndef _REP_H_
@@ -10,37 +12,43 @@
 
 #include "dbinc_auto/rep_auto.h"
 
+/*
+ * Message types
+ */
 #define	REP_ALIVE	1	/* I am alive message. */
 #define	REP_ALIVE_REQ	2	/* Request for alive messages. */
 #define	REP_ALL_REQ	3	/* Request all log records greater than LSN. */
-#define	REP_DUPMASTER	4	/* Duplicate master detected; propagate. */
-#define	REP_FILE	5	/* Page of a database file. NOTUSED */
-#define	REP_FILE_FAIL	6	/* File requested does not exist. */
-#define	REP_FILE_REQ	7	/* Request for a database file. NOTUSED */
-#define	REP_LOG		8	/* Log record. */
-#define	REP_LOG_MORE	9	/* There are more log records to request. */
-#define	REP_LOG_REQ	10	/* Request for a log record. */
-#define	REP_MASTER_REQ	11	/* Who is the master */
-#define	REP_NEWCLIENT	12	/* Announces the presence of a new client. */
-#define	REP_NEWFILE	13	/* Announce a log file change. */
-#define	REP_NEWMASTER	14	/* Announces who the master is. */
-#define	REP_NEWSITE	15	/* Announces that a site has heard from a new
+#define	REP_BULK_LOG	4	/* Bulk transfer of log records. */
+#define	REP_BULK_PAGE	5	/* Bulk transfer of pages. */
+#define	REP_DUPMASTER	6	/* Duplicate master detected; propagate. */
+#define	REP_FILE	7	/* Page of a database file. NOTUSED */
+#define	REP_FILE_FAIL	8	/* File requested does not exist. */
+#define	REP_FILE_REQ	9	/* Request for a database file. NOTUSED */
+#define	REP_LOG		10	/* Log record. */
+#define	REP_LOG_MORE	11	/* There are more log records to request. */
+#define	REP_LOG_REQ	12	/* Request for a log record. */
+#define	REP_MASTER_REQ	13	/* Who is the master */
+#define	REP_NEWCLIENT	14	/* Announces the presence of a new client. */
+#define	REP_NEWFILE	15	/* Announce a log file change. */
+#define	REP_NEWMASTER	16	/* Announces who the master is. */
+#define	REP_NEWSITE	17	/* Announces that a site has heard from a new
 				 * site; like NEWCLIENT, but indirect.  A
 				 * NEWCLIENT message comes directly from the new
 				 * client while a NEWSITE comes indirectly from
 				 * someone who heard about a NEWSITE.
 				 */
-#define	REP_PAGE	16	/* Database page. */
-#define	REP_PAGE_FAIL	17	/* Requested page does not exist. */
-#define	REP_PAGE_MORE	18	/* There are more pages to request. */
-#define	REP_PAGE_REQ	19	/* Request for a database page. */
-#define	REP_UPDATE	20	/* Environment hotcopy information. */
-#define	REP_UPDATE_REQ	21	/* Request for hotcopy information. */
-#define	REP_VERIFY	22	/* A log record for verification. */
-#define	REP_VERIFY_FAIL	23	/* The client is outdated. */
-#define	REP_VERIFY_REQ	24	/* Request for a log record to verify. */
-#define	REP_VOTE1	25	/* Send out your information for an election. */
-#define	REP_VOTE2	26	/* Send a "you are master" vote. */
+#define	REP_PAGE	18	/* Database page. */
+#define	REP_PAGE_FAIL	19	/* Requested page does not exist. */
+#define	REP_PAGE_MORE	20	/* There are more pages to request. */
+#define	REP_PAGE_REQ	21	/* Request for a database page. */
+#define	REP_REREQUEST	22	/* Force rerequest. */
+#define	REP_UPDATE	23	/* Environment hotcopy information. */
+#define	REP_UPDATE_REQ	24	/* Request for hotcopy information. */
+#define	REP_VERIFY	25	/* A log record for verification. */
+#define	REP_VERIFY_FAIL	26	/* The client is outdated. */
+#define	REP_VERIFY_REQ	27	/* Request for a log record to verify. */
+#define	REP_VOTE1	28	/* Send out your information for an election. */
+#define	REP_VOTE2	29	/* Send a "you are master" vote. */
 
 /*
  * REP_PRINT_MESSAGE
@@ -54,7 +62,7 @@
 #ifdef DIAGNOSTIC
 #define	REP_PRINT_MESSAGE(dbenv, eid, rp, str)				\
 	__rep_print_message(dbenv, eid, rp, str)
-#define RPRINT(e, r, x) do {						\
+#define	RPRINT(e, r, x) do {						\
 	if (FLD_ISSET((e)->verbose, DB_VERB_REPLICATION)) {		\
 		DB_MSGBUF_INIT(&mb);					\
 		if ((e)->db_errpfx == NULL) {				\
@@ -69,23 +77,22 @@
 		__db_msgadd x;						\
 		DB_MSGBUF_FLUSH((e), &mb);				\
 	}								\
-} while (0)	
+} while (0)
 #else
 #define	REP_PRINT_MESSAGE(dbenv, eid, rp, str)
-#define RPRINT(e, r, x)
+#define	RPRINT(e, r, x)
 #endif
 
 /*
  * Election gen file name
- *	The file contains an egen number for an election this client
- * has NOT participated in.  I.e. it is the number of a future
- * election.  We create it when we create the rep region, if it
- * doesn't already exist and initialize egen to 1.  If it does
- * exist, we read it when we create the rep region.  We write it
- * immediately before sending our VOTE1 in an election.  That way,
- * if a client has ever sent a vote for any election, the file is
- * already going to be updated to reflect a future election,
- * should it crash.
+ * The file contains an egen number for an election this client has NOT
+ * participated in.  I.e. it is the number of a future election.  We
+ * create it when we create the rep region, if it doesn't already exist
+ * and initialize egen to 1.  If it does exist, we read it when we create
+ * the rep region.  We write it immediately before sending our VOTE1 in
+ * an election.  That way, if a client has ever sent a vote for any
+ * election, the file is already going to be updated to reflect a future
+ * election, should it crash.
  */
 #define	REP_EGENNAME	"__db.rep.egen"
 
@@ -97,17 +104,21 @@ typedef enum {
 	REP_PG		/* Pg database. */
 } repdb_t;
 
-/* Shared replication structure. */
+/* Macros to lock/unlock the replication region as a whole. */
+#define	REP_SYSTEM_LOCK(dbenv)						\
+	MUTEX_LOCK(dbenv, ((DB_REP *)					\
+	    (dbenv)->rep_handle)->region->mtx_region)
+#define	REP_SYSTEM_UNLOCK(dbenv)					\
+	MUTEX_UNLOCK(dbenv, ((DB_REP *)					\
+	    (dbenv)->rep_handle)->region->mtx_region)
 
+/*
+ * REP --
+ * Shared replication structure.
+ */
 typedef struct __rep {
-	/*
-	 * Due to alignment constraints on some architectures (e.g. HP-UX),
-	 * DB_MUTEXes must be the first element of shalloced structures,
-	 * and as a corollary there can be only one per structure.  Thus,
-	 * db_mutex_off points to a mutex in a separately-allocated chunk.
-	 */
-	DB_MUTEX	mutex;		/* Region lock. */
-	roff_t		db_mutex_off;	/* Client database mutex. */
+	db_mutex_t	mtx_region;	/* Region mutex. */
+	db_mutex_t	mtx_clientdb;	/* Client database mutex. */
 	roff_t		tally_off;	/* Offset of the tally region. */
 	roff_t		v2tally_off;	/* Offset of the vote2 tally region. */
 	int		eid;		/* Environment id. */
@@ -136,8 +147,8 @@ typedef struct __rep {
 	int		in_recovery;	/* Running recovery now. */
 
 	/* Backup information. */
-	int		nfiles;		/* Number of files we have info on. */
-	int		curfile;	/* Current file we're getting. */
+	u_int32_t	nfiles;		/* Number of files we have info on. */
+	u_int32_t	curfile;	/* Current file we're getting. */
 	__rep_fileinfo_args	*curinfo;	/* Current file info ptr. */
 	void		*finfo;		/* Current file info buffer. */
 	void		*nextinfo;	/* Next file info buffer. */
@@ -160,22 +171,32 @@ typedef struct __rep {
 	DB_LSN		w_lsn;		/* Winner LSN. */
 	u_int32_t	w_tiebreaker;	/* Winner tiebreaking value. */
 	int		votes;		/* Number of votes for this site. */
+	u_int32_t	esec;		/* Election start seconds. */
+	u_int32_t	eusec;		/* Election start useconds. */
 
 	/* Statistics. */
 	DB_REP_STAT	stat;
 
+	/* Configuration. */
+#define	REP_C_BULK		0x00001		/* Bulk transfer. */
+#define	REP_C_DELAYCLIENT	0x00002		/* Delay client sync-up. */
+#define	REP_C_NOAUTOINIT	0x00004		/* No auto initialization. */
+#define	REP_C_NOWAIT		0x00008		/* Immediate error return. */
+	u_int32_t	config;		/* Configuration flags. */
+
 #define	REP_F_CLIENT		0x00001		/* Client replica. */
-#define	REP_F_EPHASE1		0x00002		/* In phase 1 of election. */
-#define	REP_F_EPHASE2		0x00004		/* In phase 2 of election. */
-#define	REP_F_MASTER		0x00008		/* Master replica. */
-#define	REP_F_MASTERELECT	0x00010		/* Master elect */
-#define	REP_F_NOARCHIVE		0x00020		/* Rep blocks log_archive */
-#define	REP_F_READY		0x00040		/* Wait for txn_cnt to be 0. */
-#define	REP_F_RECOVER_LOG	0x00080		/* In recovery - log. */
-#define	REP_F_RECOVER_PAGE	0x00100		/* In recovery - pages. */
-#define	REP_F_RECOVER_UPDATE	0x00200		/* In recovery - files. */
-#define	REP_F_RECOVER_VERIFY	0x00400		/* In recovery - verify. */
-#define	REP_F_TALLY		0x00800		/* Tallied vote before elect. */
+#define	REP_F_DELAY		0x00002		/* Delaying client sync-up. */
+#define	REP_F_EPHASE1		0x00004		/* In phase 1 of election. */
+#define	REP_F_EPHASE2		0x00008		/* In phase 2 of election. */
+#define	REP_F_MASTER		0x00010		/* Master replica. */
+#define	REP_F_MASTERELECT	0x00020		/* Master elect */
+#define	REP_F_NOARCHIVE		0x00040		/* Rep blocks log_archive */
+#define	REP_F_READY		0x00080		/* Wait for txn_cnt to be 0. */
+#define	REP_F_RECOVER_LOG	0x00100		/* In recovery - log. */
+#define	REP_F_RECOVER_PAGE	0x00200		/* In recovery - pages. */
+#define	REP_F_RECOVER_UPDATE	0x00400		/* In recovery - files. */
+#define	REP_F_RECOVER_VERIFY	0x00800		/* In recovery - verify. */
+#define	REP_F_TALLY		0x01000		/* Tallied vote before elect. */
 	u_int32_t	flags;
 } REP;
 
@@ -184,8 +205,8 @@ typedef struct __rep {
  * REP_F_READY and all REP_F_RECOVER*.  This must change if the values
  * of the flags change.
  */
-#define	REP_F_RECOVER_MASK					\
-    (REP_F_READY | REP_F_RECOVER_LOG | REP_F_RECOVER_PAGE |	\
+#define	REP_F_RECOVER_MASK						\
+    (REP_F_READY | REP_F_RECOVER_LOG | REP_F_RECOVER_PAGE |		\
      REP_F_RECOVER_UPDATE | REP_F_RECOVER_VERIFY)
 
 #define	IN_ELECTION(R)		F_ISSET((R), REP_F_EPHASE1 | REP_F_EPHASE2)
@@ -207,24 +228,43 @@ typedef struct __rep {
 	    REP_F_RECOVER_PAGE))
 
 /*
- * Macros to figure out if we need to do replication pre/post-amble
- * processing.
+ * Macros to figure out if we need to do replication pre/post-amble processing.
+ * Skip for specific DB handles owned by the replication layer, either because
+ * replication is running recovery or because it's a handle entirely owned by
+ * the replication code (replication opens its own databases to track state).
  */
-#define	IS_REPLICATED(E, D)						\
-	(!F_ISSET((D), DB_AM_RECOVER | DB_AM_REPLICATION) &&		\
-	REP_ON(E) && ((DB_REP *)((E)->rep_handle))->region != NULL &&	\
-	((DB_REP *)((E)->rep_handle))->region->flags != 0)
-
-#define	IS_ENV_REPLICATED(E) (REP_ON(E) &&		\
+#define	IS_ENV_REPLICATED(E) (REP_ON(E) &&				\
 	((DB_REP *)((E)->rep_handle))->region != NULL &&		\
 	((DB_REP *)((E)->rep_handle))->region->flags != 0)
 
+/*
+ * Gap processing flags.  These provide control over the basic
+ * gap processing algorithm for some special cases.
+ */
+#define	REP_GAP_FORCE		0x001	/* Force a request for a gap. */
+#define	REP_GAP_REREQUEST	0x002	/* Gap request is a forced rerequest. */
+					/* REREQUEST is a superset of FORCE. */
+
+/*
+ * Basic pre/post-amble processing.
+ */
+#define	REPLICATION_WRAP(dbenv, func_call, ret) do {			\
+	int __rep_check, __t_ret;					\
+	__rep_check = IS_ENV_REPLICATED(dbenv) ? 1 : 0;			\
+	if (__rep_check && ((ret) = __env_rep_enter(dbenv, 0)) != 0)	\
+		return ((ret));						\
+	(ret) = func_call;						\
+	if (__rep_check &&						\
+	    (__t_ret = __env_db_rep_exit(dbenv)) != 0 && (ret) == 0)	\
+		(ret) = __t_ret;					\
+} while (0)
+
 /*
  * Per-process replication structure.
  *
  * There are 2 mutexes used in replication.
- * 1.  rep_mutexp - This protects the fields of the rep region above.
- * 2.  db_mutexp - This protects the per-process flags, and bookkeeping
+ * 1.  mtx_region - This protects the fields of the rep region above.
+ * 2.  mtx_clientdb - This protects the per-process flags, and bookkeeping
  * database and all of the components that maintain it.  Those
  * components include the following fields in the log region (see log.h):
  *	a. ready_lsn
@@ -233,25 +273,23 @@ typedef struct __rep {
  *	d. wait_recs
  *	e. rcvd_recs
  *	f. max_wait_lsn
- * These fields in the log region are NOT protected by the log
- * region lock at all.
+ * These fields in the log region are NOT protected by the log region lock at
+ * all.
  *
- * Note that the per-process flags should truly be protected by a
- * special per-process thread mutex, but it is currently set in so
- * isolated a manner that it didn't make sense to do so and in most
- * case we're already holding the db_mutexp anyway.
+ * Note that the per-process flags should truly be protected by a special
+ * per-process thread mutex, but it is currently set in so isolated a manner
+ * that it didn't make sense to do so and in most case we're already holding
+ * the mtx_clientdb anyway.
  *
- * The lock ordering protocol is that db_mutexp must be acquired
- * first and then either rep_mutexp, or the log region mutex may
- * be acquired if necessary.
+ * The lock ordering protocol is that mtx_clientdb must be acquired first and
+ * then either REP->mtx_region, or the LOG->mtx_region mutex may be acquired if
+ * necessary.
  */
 struct __db_rep {
-	DB_MUTEX	*rep_mutexp;	/* Mutex for rep region */
-
-	DB_MUTEX	*db_mutexp;	/* Mutex for bookkeeping database. */
 	DB		*rep_db;	/* Bookkeeping database. */
 
 	REP		*region;	/* In memory structure. */
+	u_int8_t	*bulk;		/* Shared memory bulk area. */
 #define	DBREP_OPENFILES		0x0001	/* This handle has opened files. */
 	u_int32_t	flags;		/* per-process flags. */
 };
@@ -265,7 +303,7 @@ struct __db_rep {
  * the rest of the structure changes or when the message numbers change.
  */
 typedef struct __rep_control {
-#define	DB_REPVERSION	2
+#define	DB_REPVERSION	3
 	u_int32_t	rep_version;	/* Replication version number. */
 	u_int32_t	log_version;	/* Log version number. */
 
@@ -290,6 +328,43 @@ typedef struct __rep_vtally {
 	int		eid;		/* Voter's ID. */
 } REP_VTALLY;
 
+/*
+ * The REP_THROTTLE_ONLY flag is used to do throttle processing only.
+ * If set, it will only allow sending the REP_*_MORE message, but not
+ * the normal, non-throttled message.  It is used to support throttling
+ * with bulk transfer.
+ */
+/* Flags for __rep_send_throttle. */
+#define	REP_THROTTLE_ONLY	0x0001	/* Send _MORE message only. */
+
+/* Throttled message processing information. */
+typedef struct __rep_throttle {
+	DB_LSN		lsn;		/* LSN of this record. */
+	DBT		*data_dbt;	/* DBT of this record. */
+	u_int32_t	gbytes;		/* This call's max gbytes sent. */
+	u_int32_t	bytes;		/* This call's max bytes sent. */
+	u_int32_t	type;		/* Record type. */
+} REP_THROTTLE;
+
+/* Bulk processing information. */
+/*
+ * !!!
+ * We use a uintptr_t for the offset.  We'd really like to use a ptrdiff_t
+ * since that really is what it is.  But ptrdiff_t is not portable and
+ * doesn't exist everywhere.
+ */
+typedef struct __rep_bulk {
+	u_int8_t	*addr;		/* Address of bulk buffer. */
+	uintptr_t	*offp;		/* Ptr to current offset into buffer. */
+	u_int32_t	len;		/* Bulk buffer length. */
+	u_int32_t	type;		/* Item type in buffer (log, page). */
+	DB_LSN		lsn;		/* First LSN in buffer. */
+	int		eid;		/* ID of potential recipients. */
+#define	BULK_FORCE	0x001		/* Force buffer after this record. */
+#define	BULK_XMIT	0x002		/* Buffer in transit. */
+	u_int32_t	*flagsp;	/* Buffer flags. */
+} REP_BULK;
+
 /*
  * This structure takes care of representing a transaction.
  * It holds all the records, sorted by page number so that
diff --git a/storage/bdb/dbinc/shqueue.h b/storage/bdb/dbinc/shqueue.h
index 8d7e4eef922..55cba7fc179 100644
--- a/storage/bdb/dbinc/shqueue.h
+++ b/storage/bdb/dbinc/shqueue.h
@@ -1,10 +1,10 @@
 /*-
  * See the file LICENSE for redistribution information.
  *
- * Copyright (c) 1996-2004
+ * Copyright (c) 1996-2005
  *	Sleepycat Software.  All rights reserved.
  *
- * $Id: shqueue.h,v 11.15 2004/03/24 20:37:37 bostic Exp $
+ * $Id: shqueue.h,v 12.2 2005/08/12 13:17:21 bostic Exp $
  */
 
 #ifndef	_SYS_SHQUEUE_H_
@@ -227,12 +227,14 @@ struct {								\
 #define	__SH_TAILQ_LAST_OFF(head)					\
 	((ssize_t *)(((u_int8_t *)(head)) + (head)->stqh_last))
 
-#define	SH_TAILQ_LAST(head, field, type)				\
-	(SH_TAILQ_EMPTY(head) ? NULL :				\
-	(struct type *)((ssize_t)(head) +				\
+#define	SH_TAILQ_LASTP(head, field, type)				\
+	((struct type *)((ssize_t)(head) +				\
 	 ((ssize_t)((head)->stqh_last) -				\
 	 ((ssize_t)SH_PTR_TO_OFF(SH_TAILQ_FIRST(head, type),		\
-		&(SH_TAILQ_FIRST(head, type)->field.stqe_next))))))
+		&(SH_TAILQ_FIRSTP(head, type)->field.stqe_next))))))
+
+#define	SH_TAILQ_LAST(head, field, type)				\
+	(SH_TAILQ_EMPTY(head) ? NULL : SH_TAILQ_LASTP(head, field, type))
 
 /*
  * Given correct A.next: B.prev = SH_TAILQ_NEXT_TO_PREV(A)
diff --git a/storage/bdb/dbinc/tcl_db.h b/storage/bdb/dbinc/tcl_db.h
index f1adea5ae7c..4bc68ba12bb 100644
--- a/storage/bdb/dbinc/tcl_db.h
+++ b/storage/bdb/dbinc/tcl_db.h
@@ -1,10 +1,10 @@
 /*-
  * See the file LICENSE for redistribution information.
  *
- * Copyright (c) 1999-2004
+ * Copyright (c) 1999-2005
  *	Sleepycat Software.  All rights reserved.
  *
- * $Id: tcl_db.h,v 11.40 2004/09/22 03:40:20 bostic Exp $
+ * $Id: tcl_db.h,v 12.4 2005/08/08 14:52:30 bostic Exp $
  */
 
 #ifndef _DB_TCL_DB_H_
@@ -13,8 +13,7 @@
 #define	MSG_SIZE 100		/* Message size */
 
 enum INFOTYPE {
-    I_ENV, I_DB, I_DBC, I_TXN, I_MP,
-    I_PG, I_LOCK, I_LOGC, I_NDBM, I_MUTEX, I_SEQ};
+    I_ENV, I_DB, I_DBC, I_TXN, I_MP, I_PG, I_LOCK, I_LOGC, I_NDBM, I_SEQ};
 
 #define	MAX_ID		8	/* Maximum number of sub-id's we need */
 #define	DBTCL_PREP	64	/* Size of txn_recover preplist */
@@ -22,33 +21,6 @@ enum INFOTYPE {
 #define	DBTCL_DBM	1
 #define	DBTCL_NDBM	2
 
-typedef struct _mutex_entry {
-	union {
-		struct {
-			DB_MUTEX	real_m;
-			int		real_val;
-		} r;
-		/*
-		 * This is here to make sure that each of the mutex structures
-		 * are 16-byte aligned, which is required on HP architectures.
-		 * The db_mutex_t structure might be >32 bytes itself, or the
-		 * real_val might push it over the 32 byte boundary.  The best
-		 * we can do is use a 48 byte boundary.
-		 */
-		char c[48];
-	} u;
-} _MUTEX_ENTRY;
-
-#define	m	u.r.real_m
-#define	val	u.r.real_val
-
-typedef struct _mutex_data {
-	DB_ENV		*env;
-	REGINFO		 reginfo;
-	_MUTEX_ENTRY	*marray;
-	size_t		 size;
-} _MUTEX_DATA;
-
 /*
  * Why use a home grown package over the Tcl_Hash functions?
  *
@@ -83,15 +55,14 @@ typedef struct dbtcl_info {
 	char *i_name;
 	enum INFOTYPE i_type;
 	union infop {
-		DB_ENV *envp;
-		void *anyp;
 		DB *dbp;
 		DBC *dbcp;
-		DB_TXN *txnp;
-		DB_MPOOLFILE *mp;
+		DB_ENV *envp;
 		DB_LOCK *lock;
-		_MUTEX_DATA *mutex;
 		DB_LOGC *logc;
+		DB_MPOOLFILE *mp;
+		DB_TXN *txnp;
+		void *anyp;
 	} un;
 	union data {
 		int anydata;
@@ -101,6 +72,7 @@ typedef struct dbtcl_info {
 	union data2 {
 		int anydata;
 		int pagesz;
+		DB_COMPACT *c_data;
 	} und2;
 	DBT i_lockobj;
 	FILE *i_err;
@@ -128,7 +100,6 @@ typedef struct dbtcl_info {
 #define	i_txnp un.txnp
 #define	i_mp un.mp
 #define	i_lock un.lock
-#define	i_mutex un.mutex
 #define	i_logc un.logc
 
 #define	i_data und.anydata
@@ -136,12 +107,12 @@ typedef struct dbtcl_info {
 #define	i_locker und.lockid
 #define	i_data2 und2.anydata
 #define	i_pgsz und2.pagesz
+#define	i_cdata und2.c_data
 
 #define	i_envtxnid i_otherid[0]
 #define	i_envmpid i_otherid[1]
 #define	i_envlockid i_otherid[2]
-#define	i_envmutexid i_otherid[3]
-#define	i_envlogcid i_otherid[4]
+#define	i_envlogcid i_otherid[3]
 
 #define	i_mppgid  i_otherid[0]
 
diff --git a/storage/bdb/dbinc/txn.h b/storage/bdb/dbinc/txn.h
index 514b740e88a..845cdee2349 100644
--- a/storage/bdb/dbinc/txn.h
+++ b/storage/bdb/dbinc/txn.h
@@ -1,10 +1,10 @@
 /*-
  * See the file LICENSE for redistribution information.
  *
- * Copyright (c) 1996-2004
+ * Copyright (c) 1996-2005
  *	Sleepycat Software.  All rights reserved.
  *
- * $Id: txn.h,v 11.54 2004/09/24 00:43:18 bostic Exp $
+ * $Id: txn.h,v 12.7 2005/10/13 00:53:00 bostic Exp $
  */
 
 #ifndef	_TXN_H_
@@ -39,18 +39,26 @@ struct __txn_logrec;	typedef struct __txn_logrec DB_TXNLOGREC;
 typedef struct __txn_detail {
 	u_int32_t txnid;		/* current transaction id
 					   used to link free list also */
+	pid_t pid;			/* Process owning txn */
+	db_threadid_t tid;	/* Thread owning txn */
+
 	DB_LSN	last_lsn;		/* last lsn written for this txn */
 	DB_LSN	begin_lsn;		/* lsn of begin record */
 	roff_t	parent;			/* Offset of transaction's parent. */
+	roff_t	name;			/* Offset of txn name. */
+
+	SH_TAILQ_HEAD(__tdkids)	kids;	/* Linked list of child txn detail. */
+	SH_TAILQ_ENTRY		klinks;
 
 #define	TXN_RUNNING		1
 #define	TXN_ABORTED		2
 #define	TXN_PREPARED		3
 #define	TXN_COMMITTED		4
 	u_int32_t status;		/* status of the transaction */
-#define	TXN_DTL_COLLECTED	0x1
-#define	TXN_DTL_RESTORED	0x2
-	u_int32_t flags;		/* collected during txn_recover */
+#define	TXN_DTL_COLLECTED	0x1	/* collected during txn_recover */
+#define	TXN_DTL_RESTORED	0x2	/* prepared txn restored */
+#define	TXN_DTL_INMEMORY	0x4	/* uses in memory logs */
+	u_int32_t flags;
 
 	SH_TAILQ_ENTRY	links;		/* free/active list */
 
@@ -80,33 +88,40 @@ struct __db_txnmgr {
 	/*
 	 * These fields need to be protected for multi-threaded support.
 	 *
-	 * !!!
-	 * As this structure is allocated in per-process memory, the mutex may
-	 * need to be stored elsewhere on architectures unable to support
-	 * mutexes in heap memory, e.g., HP/UX 9.
+	 * Lock list of active transactions (including the content of each
+	 * TXN_DETAIL structure on the list).
 	 */
-	DB_MUTEX	*mutexp;	/* Lock list of active transactions
-					 * (including the content of each
-					 * TXN_DETAIL structure on the list).
-					 */
+	db_mutex_t mutex;
 					/* List of active transactions. */
 	TAILQ_HEAD(_chain, __db_txn)	txn_chain;
-	u_int32_t	 n_discards;	/* Number of txns discarded. */
 
-/* These fields are never updated after creation, and so not protected. */
-	DB_ENV		*dbenv;		/* Environment. */
-	REGINFO		 reginfo;	/* Region information. */
+	u_int32_t n_discards;		/* Number of txns discarded. */
+
+	/* These fields are never updated after creation, so not protected. */
+	DB_ENV	*dbenv;			/* Environment. */
+	REGINFO	 reginfo;		/* Region information. */
 };
 
+/* Macros to lock/unlock the transaction region as a whole. */
+#define	TXN_SYSTEM_LOCK(dbenv)						\
+	MUTEX_LOCK(dbenv, ((DB_TXNREGION *)((DB_TXNMGR *)		\
+	    (dbenv)->tx_handle)->reginfo.primary)->mtx_region)
+#define	TXN_SYSTEM_UNLOCK(dbenv)					\
+	MUTEX_UNLOCK(dbenv, ((DB_TXNREGION *)((DB_TXNMGR *)		\
+	    (dbenv)->tx_handle)->reginfo.primary)->mtx_region)
+
 /*
  * DB_TXNREGION --
  *	The primary transaction data structure in the shared memory region.
  */
 struct __db_txnregion {
+	db_mutex_t	mtx_region;	/* Region mutex. */
+
 	u_int32_t	maxtxns;	/* maximum number of active TXNs */
 	u_int32_t	last_txnid;	/* last transaction id given out */
 	u_int32_t	cur_maxid;	/* current max unused id. */
 
+	db_mutex_t	mtx_ckp;	/* Single thread checkpoints. */
 	DB_LSN		last_ckp;	/* lsn of the last checkpoint */
 	time_t		time_ckp;	/* time of last checkpoint */
 
@@ -116,11 +131,6 @@ struct __db_txnregion {
 	u_int32_t	flags;
 					/* active TXN list */
 	SH_TAILQ_HEAD(__active) active_txn;
-#ifdef HAVE_MUTEX_SYSTEM_RESOURCES
-#define	TXN_MAINT_SIZE	(sizeof(roff_t) * DB_MAX_HANDLES)
-
-	roff_t		maint_off;	/* offset of region maintenance info */
-#endif
 };
 
 /*
diff --git a/storage/bdb/dbinc/xa.h b/storage/bdb/dbinc/xa.h
index 71333c2c97b..80c4032d20d 100644
--- a/storage/bdb/dbinc/xa.h
+++ b/storage/bdb/dbinc/xa.h
@@ -1,10 +1,10 @@
 /*-
  * See the file LICENSE for redistribution information.
  *
- * Copyright (c) 1998-2004
+ * Copyright (c) 1998-2005
  *	Sleepycat Software.  All rights reserved.
  *
- * $Id: xa.h,v 11.7 2004/01/28 03:36:02 bostic Exp $
+ * $Id: xa.h,v 12.1 2005/06/16 20:21:49 bostic Exp $
  */
 /*
  * Start of xa.h header
diff --git a/storage/bdb/dbm/dbm.c b/storage/bdb/dbm/dbm.c
index 842b8d0a445..a7f484b4357 100644
--- a/storage/bdb/dbm/dbm.c
+++ b/storage/bdb/dbm/dbm.c
@@ -1,7 +1,7 @@
 /*-
  * See the file LICENSE for redistribution information.
  *
- * Copyright (c) 1996-2004
+ * Copyright (c) 1996-2005
  *	Sleepycat Software.  All rights reserved.
  */
 /*
@@ -39,7 +39,7 @@
  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
  * SUCH DAMAGE.
  *
- * $Id: dbm.c,v 11.18 2004/05/10 21:26:47 bostic Exp $
+ * $Id: dbm.c,v 12.2 2005/06/16 20:21:49 bostic Exp $
  */
 
 #include "db_config.h"
@@ -98,7 +98,7 @@ __db_dbm_init(file)
 	if (__cur_db != NULL)
 		dbm_close(__cur_db);
 	if ((__cur_db =
-	    dbm_open(file, O_CREAT | O_RDWR, __db_omode("rw----"))) != NULL)
+	    dbm_open(file, O_CREAT | O_RDWR, __db_omode(OWNER_RW))) != NULL)
 		return (0);
 	if ((__cur_db = dbm_open(file, O_RDONLY, 0)) != NULL)
 		return (0);
@@ -123,7 +123,8 @@ __db_dbm_fetch(key)
 
 	if (__cur_db == NULL) {
 		__db_no_open();
-		item.dptr = 0;
+		item.dptr = NULL;
+		item.dsize = 0;
 		return (item);
 	}
 	return (dbm_fetch(__cur_db, key));
@@ -136,7 +137,8 @@ __db_dbm_firstkey()
 
 	if (__cur_db == NULL) {
 		__db_no_open();
-		item.dptr = 0;
+		item.dptr = NULL;
+		item.dsize = 0;
 		return (item);
 	}
 	return (dbm_firstkey(__cur_db));
@@ -152,7 +154,8 @@ __db_dbm_nextkey(key)
 
 	if (__cur_db == NULL) {
 		__db_no_open();
-		item.dptr = 0;
+		item.dptr = NULL;
+		item.dsize = 0;
 		return (item);
 	}
 	return (dbm_nextkey(__cur_db));
diff --git a/storage/bdb/dbreg/dbreg.c b/storage/bdb/dbreg/dbreg.c
index 930c8bb7ade..eb3e75cc739 100644
--- a/storage/bdb/dbreg/dbreg.c
+++ b/storage/bdb/dbreg/dbreg.c
@@ -1,10 +1,10 @@
 /*-
  * See the file LICENSE for redistribution information.
  *
- * Copyright (c) 1996-2004
+ * Copyright (c) 1996-2005
  *	Sleepycat Software.  All rights reserved.
  *
- * $Id: dbreg.c,v 11.90 2004/10/15 16:59:39 bostic Exp $
+ * $Id: dbreg.c,v 12.12 2005/10/14 14:40:41 bostic Exp $
  */
 
 #include "db_config.h"
@@ -21,7 +21,7 @@
 #include "dbinc/txn.h"
 #include "dbinc/db_am.h"
 
-static int __dbreg_push_id __P((DB_ENV *, int32_t));
+static int __dbreg_push_id __P((DB_ENV *, DB *, int32_t));
 static int __dbreg_pop_id __P((DB_ENV *, int32_t *));
 static int __dbreg_pluck_id __P((DB_ENV *, int32_t));
 
@@ -60,9 +60,9 @@ static int __dbreg_pluck_id __P((DB_ENV *, int32_t));
  *	region list so they can get logged on checkpoints.
  *
  *	An FNAME that may/does have a valid id must be accessed under
- *	protection of the fq_mutex, with the following exception:
+ *	protection of the mtx_filelist, with the following exception:
  *
- *	We don't want to have to grab the fq_mutex on every log
+ *	We don't want to have to grab the mtx_filelist on every log
  *	record, and it should be safe not to do so when we're just
  *	looking at the id, because once allocated, the id should
  *	not change under a handle until the handle is closed.
@@ -114,7 +114,7 @@ __dbreg_setup(dbp, name, create_txnid)
 	namep = NULL;
 
 	/* Allocate an FNAME and, if necessary, a buffer for the name itself. */
-	R_LOCK(dbenv, infop);
+	LOG_SYSTEM_LOCK(dbenv);
 	if ((ret = __db_shalloc(infop, sizeof(FNAME), 0, &fnp)) != 0)
 		goto err;
 	memset(fnp, 0, sizeof(FNAME));
@@ -127,7 +127,7 @@ __dbreg_setup(dbp, name, create_txnid)
 	} else
 		fnp->name_off = INVALID_ROFF;
 
-	R_UNLOCK(dbenv, infop);
+	LOG_SYSTEM_UNLOCK(dbenv);
 
 	/*
 	 * Fill in all the remaining info that we'll need later to register
@@ -143,7 +143,7 @@ __dbreg_setup(dbp, name, create_txnid)
 
 	return (0);
 
-err:	R_UNLOCK(dbenv, infop);
+err:	LOG_SYSTEM_UNLOCK(dbenv);
 	if (ret == ENOMEM)
 		__db_err(dbenv,
     "Logging region out of memory; you may need to increase its size");
@@ -175,16 +175,16 @@ __dbreg_teardown(dbp)
 	 * We may not have an FNAME if we were never opened.  This is not an
 	 * error.
 	 */
-	if (fnp == NULL)
+	if (fnp == NULL || F_ISSET(fnp, DB_FNAME_NOTLOGGED))
 		return (0);
 
 	DB_ASSERT(fnp->id == DB_LOGFILEID_INVALID);
 
-	R_LOCK(dbenv, infop);
+	LOG_SYSTEM_LOCK(dbenv);
 	if (fnp->name_off != INVALID_ROFF)
 		__db_shalloc_free(infop, R_ADDR(infop, fnp->name_off));
 	__db_shalloc_free(infop, fnp);
-	R_UNLOCK(dbenv, infop);
+	LOG_SYSTEM_UNLOCK(dbenv);
 
 	dbp->log_filename = NULL;
 
@@ -216,22 +216,22 @@ __dbreg_new_id(dbp, txn)
 	lp = dblp->reginfo.primary;
 	fnp = dbp->log_filename;
 
-	/* The fq_mutex protects the FNAME list and id management. */
-	MUTEX_LOCK(dbenv, &lp->fq_mutex);
+	/* The mtx_filelist protects the FNAME list and id management. */
+	MUTEX_LOCK(dbenv, lp->mtx_filelist);
 	if (fnp->id != DB_LOGFILEID_INVALID) {
-		MUTEX_UNLOCK(dbenv, &lp->fq_mutex);
+		MUTEX_UNLOCK(dbenv, lp->mtx_filelist);
 		return (0);
 	}
 	if ((ret = __dbreg_get_id(dbp, txn, &id)) == 0)
 		fnp->id = id;
-	MUTEX_UNLOCK(dbenv, &lp->fq_mutex);
+	MUTEX_UNLOCK(dbenv, lp->mtx_filelist);
 	return (ret);
 }
 
 /*
  * __dbreg_get_id --
  *	Assign an unused dbreg id to this database handle.
- *	Assume the caller holds the fq_mutex locked.  Assume the
+ *	Assume the caller holds the mtx_filelist locked.  Assume the
  *	caller will set the fnp->id field with the id we return.
  *
  * PUBLIC: int __dbreg_get_id __P((DB *, DB_TXN *, int32_t *));
@@ -242,10 +242,8 @@ __dbreg_get_id(dbp, txn, idp)
 	DB_TXN *txn;
 	int32_t *idp;
 {
-	DBT fid_dbt, r_name;
 	DB_ENV *dbenv;
 	DB_LOG *dblp;
-	DB_LSN unused;
 	FNAME *fnp;
 	LOG *lp;
 	int32_t id;
@@ -269,7 +267,9 @@ __dbreg_get_id(dbp, txn, idp)
 	if (id == DB_LOGFILEID_INVALID)
 		id = lp->fid_max++;
 
-	fnp->is_durable = !F_ISSET(dbp, DB_AM_NOT_DURABLE);
+	/* If the file is durable (i.e., not, not-durable), mark it as such. */
+	if (!F_ISSET(dbp, DB_AM_NOT_DURABLE))
+		F_SET(fnp, DB_FNAME_DURABLE);
 
 	/* Hook the FNAME into the list of open files. */
 	SH_TAILQ_INSERT_HEAD(&lp->fq, fnp, q, __fname);
@@ -280,19 +280,9 @@ __dbreg_get_id(dbp, txn, idp)
 	 */
 	DB_ASSERT(!F_ISSET(dbp, DB_AM_RECOVER));
 
-	memset(&fid_dbt, 0, sizeof(fid_dbt));
-	memset(&r_name, 0, sizeof(r_name));
-	if (fnp->name_off != INVALID_ROFF) {
-		r_name.data = R_ADDR(&dblp->reginfo, fnp->name_off);
-		r_name.size = (u_int32_t)strlen((char *)r_name.data) + 1;
-	}
-	fid_dbt.data = dbp->fileid;
-	fid_dbt.size = DB_FILE_ID_LEN;
-	if ((ret = __dbreg_register_log(dbenv, txn, &unused,
-	    F_ISSET(dbp, DB_AM_NOT_DURABLE) ? DB_LOG_NOT_DURABLE : 0,
-	    DBREG_OPEN, r_name.size == 0 ? NULL : &r_name, &fid_dbt, id,
-	    fnp->s_type, fnp->meta_pgno, fnp->create_txnid)) != 0)
+	if ((ret = __dbreg_log_id(dbp, txn, id, 0)) != 0)
 		goto err;
+
 	/*
 	 * Once we log the create_txnid, we need to make sure we never
 	 * log it again (as might happen if this is a replication client
@@ -345,8 +335,8 @@ __dbreg_assign_id(dbp, id)
 	close_dbp = NULL;
 	close_fnp = NULL;
 
-	/* The fq_mutex protects the FNAME list and id management. */
-	MUTEX_LOCK(dbenv, &lp->fq_mutex);
+	/* The mtx_filelist protects the FNAME list and id management. */
+	MUTEX_LOCK(dbenv, lp->mtx_filelist);
 
 	/* We should only call this on DB handles that have no ID. */
 	DB_ASSERT(fnp->id == DB_LOGFILEID_INVALID);
@@ -358,8 +348,8 @@ __dbreg_assign_id(dbp, id)
 	 */
 	if (__dbreg_id_to_fname(dblp, id, 1, &close_fnp) == 0) {
 		/*
-		 * We want to save off any dbp we have open with this id.
-		 * We can't safely close it now, because we hold the fq_mutex,
+		 * We want to save off any dbp we have open with this id.  We
+		 * can't safely close it now, because we hold the mtx_filelist,
 		 * but we should be able to rely on it being open in this
 		 * process, and we're running recovery, so no other thread
 		 * should muck with it if we just put off closing it until
@@ -391,7 +381,9 @@ cont:	if ((ret = __dbreg_pluck_id(dbenv, id)) != 0)
 
 	/* Now go ahead and assign the id to our dbp. */
 	fnp->id = id;
-	fnp->is_durable = !F_ISSET(dbp, DB_AM_NOT_DURABLE);
+	/* If the file is durable (i.e., not, not-durable), mark it as such. */
+	if (!F_ISSET(dbp, DB_AM_NOT_DURABLE))
+		F_SET(fnp, DB_FNAME_DURABLE);
 	SH_TAILQ_INSERT_HEAD(&lp->fq, fnp, q, __fname);
 
 	/*
@@ -402,7 +394,7 @@ cont:	if ((ret = __dbreg_pluck_id(dbenv, id)) != 0)
 	if ((ret = __dbreg_add_dbentry(dbenv, dblp, dbp, id)) != 0)
 		(void)__dbreg_revoke_id(dbp, 1, id);
 
-err:	MUTEX_UNLOCK(dbenv, &lp->fq_mutex);
+err:	MUTEX_UNLOCK(dbenv, lp->mtx_filelist);
 
 	/* There's nothing useful that our caller can do if this close fails. */
 	if (close_dbp != NULL)
@@ -452,21 +444,30 @@ __dbreg_revoke_id(dbp, have_lock, force_id)
 	else
 		id = fnp->id;
 	if (!have_lock)
-		MUTEX_LOCK(dbenv, &lp->fq_mutex);
+		MUTEX_LOCK(dbenv, lp->mtx_filelist);
 
 	fnp->id = DB_LOGFILEID_INVALID;
 
 	/* Remove the FNAME from the list of open files. */
 	SH_TAILQ_REMOVE(&lp->fq, fnp, q, __fname);
 
-	/* Remove this id from the dbentry table. */
-	__dbreg_rem_dbentry(dblp, id);
-
-	/* Push this id onto the free list. */
-	ret = __dbreg_push_id(dbenv, id);
+	/*
+	 * Remove this id from the dbentry table and push it onto the
+	 * free list.
+	 */
+	if ((ret = __dbreg_rem_dbentry(dblp, id)) == 0) {
+		/*
+		 * If we are not in recovery but the file was opened
+		 * for a recovery operation, then this process aborted
+		 * a transaction for another process and the id may
+		 * still be in use, so don't reuse this id.
+		 */
+		if (!F_ISSET(dbp, DB_AM_RECOVER) || IS_RECOVERING(dbenv))
+			ret = __dbreg_push_id(dbenv, dbp, id);
+	}
 
 	if (!have_lock)
-		MUTEX_UNLOCK(dbenv, &lp->fq_mutex);
+		MUTEX_UNLOCK(dbenv, lp->mtx_filelist);
 	return (ret);
 }
 
@@ -500,7 +501,7 @@ __dbreg_close_id(dbp, txn, op)
 	if (fnp == NULL || fnp->id == DB_LOGFILEID_INVALID)
 		return (0);
 
-	MUTEX_LOCK(dbenv, &lp->fq_mutex);
+	MUTEX_LOCK(dbenv, lp->mtx_filelist);
 
 	if (fnp->name_off == INVALID_ROFF)
 		dbtp = NULL;
@@ -517,12 +518,25 @@ __dbreg_close_id(dbp, txn, op)
 	if ((ret = __dbreg_register_log(dbenv, txn, &r_unused,
 	    F_ISSET(dbp, DB_AM_NOT_DURABLE) ? DB_LOG_NOT_DURABLE : 0,
 	    op, dbtp, &fid_dbt, fnp->id,
-	    fnp->s_type, fnp->meta_pgno, TXN_INVALID)) != 0)
+	    fnp->s_type, fnp->meta_pgno, TXN_INVALID)) != 0) {
+		/*
+		 * We are trying to close, but the log write failed.
+		 * Unfortunately, close needs to plow forward, because
+		 * the application can't do anything with the handle.
+		 * Make the entry in the shared memory region so that
+		 * when we close the environment, we know that this
+		 * happened.  Also, make sure we remove this from the
+		 * per-process table, so that we don't try to close it
+		 * later.
+		 */
+		F_SET(fnp, DB_FNAME_NOTLOGGED);
+		(void)__dbreg_rem_dbentry(dblp, fnp->id);
 		goto err;
+	}
 
 	ret = __dbreg_revoke_id(dbp, 1, DB_LOGFILEID_INVALID);
 
-err:	MUTEX_UNLOCK(dbenv, &lp->fq_mutex);
+err:	MUTEX_UNLOCK(dbenv, lp->mtx_filelist);
 	return (ret);
 }
 
@@ -533,15 +547,17 @@ err:	MUTEX_UNLOCK(dbenv, &lp->fq_mutex);
  * process keeps open files in an array by ID.)  Push them to the stack and
  * pop them from it, managing memory as appropriate.
  *
- * The stack is protected by the fq_mutex, and in both functions we assume
- * that this is already locked.
+ * The stack is protected by the mtx_filelist, and both functions assume it
+ * is already locked.
  */
 static int
-__dbreg_push_id(dbenv, id)
+__dbreg_push_id(dbenv, dbp, id)
 	DB_ENV *dbenv;
+	DB *dbp;
 	int32_t id;
 {
 	DB_LOG *dblp;
+	DB_REP *db_rep;
 	LOG *lp;
 	REGINFO *infop;
 	int32_t *stack, *newstack;
@@ -550,34 +566,38 @@ __dbreg_push_id(dbenv, id)
 	dblp = dbenv->lg_handle;
 	infop = &dblp->reginfo;
 	lp = infop->primary;
+	db_rep = dbenv->rep_handle;
 
-	if (lp->free_fid_stack == INVALID_ROFF) {
-		stack = NULL;
-		DB_ASSERT(lp->free_fids_alloced == 0);
-	} else
-		stack = R_ADDR(infop, lp->free_fid_stack);
-
+	/*
+	 * If our fid generation in replication has changed, this fid should
+	 * not be pushed back onto the stack.
+	 */
+	if (REP_ON(dbenv) && db_rep->region != NULL &&
+	   ((REP *)db_rep->region)->gen != dbp->fid_gen)
+		return (0);
 	/* Check if we have room on the stack. */
-	if (lp->free_fids_alloced <= lp->free_fids + 1) {
-		R_LOCK(dbenv, infop);
+	if (lp->free_fid_stack == INVALID_ROFF ||
+	    lp->free_fids_alloced <= lp->free_fids + 1) {
+		LOG_SYSTEM_LOCK(dbenv);
 		if ((ret = __db_shalloc(infop,
 		    (lp->free_fids_alloced + 20) * sizeof(u_int32_t), 0,
 		    &newstack)) != 0) {
-			R_UNLOCK(dbenv, infop);
+			LOG_SYSTEM_UNLOCK(dbenv);
 			return (ret);
 		}
 
-		if (stack != NULL) {
+		if (lp->free_fid_stack != INVALID_ROFF) {
+			stack = R_ADDR(infop, lp->free_fid_stack);
 			memcpy(newstack, stack,
 			    lp->free_fids_alloced * sizeof(u_int32_t));
 			__db_shalloc_free(infop, stack);
 		}
-		stack = newstack;
-		lp->free_fid_stack = R_OFFSET(infop, stack);
+		lp->free_fid_stack = R_OFFSET(infop, newstack);
 		lp->free_fids_alloced += 20;
-		R_UNLOCK(dbenv, infop);
+		LOG_SYSTEM_UNLOCK(dbenv);
 	}
 
+	stack = R_ADDR(infop, lp->free_fid_stack);
 	stack[lp->free_fids++] = id;
 	return (0);
 }
@@ -611,7 +631,7 @@ __dbreg_pop_id(dbenv, id)
  * be on the stack.
  *
  * Returns success whether or not the particular id was found, and like
- * push and pop, assumes that the fq_mutex is locked.
+ * push and pop, assumes that the mtx_filelist is locked.
  */
 static int
 __dbreg_pluck_id(dbenv, id)
@@ -644,3 +664,73 @@ __dbreg_pluck_id(dbenv, id)
 
 	return (0);
 }
+
+/*
+ * __dbreg_log_id --
+ *	Used for in-memory named files.  They are created in mpool and
+ * are given id's early in the open process so that we can read and
+ * create pages in the mpool for the files.  However, at the time that
+ * the mpf is created, the file may not be fully created and/or its
+ * meta-data may not be fully known, so we can't do a full dbregister.
+ * This is a routine exported that will log a complete dbregister
+ * record that will allow for both recovery and replication.
+ *
+ * PUBLIC: int __dbreg_log_id __P((DB *, DB_TXN *, int32_t, int));
+ */
+int
+__dbreg_log_id(dbp, txn, id, needlock)
+	DB *dbp;
+	DB_TXN *txn;
+	int32_t id;
+	int needlock;
+{
+	DBT fid_dbt, r_name;
+	DB_ENV *dbenv;
+	DB_LOG *dblp;
+	DB_LSN unused;
+	FNAME *fnp;
+	LOG *lp;
+	u_int32_t op;
+	int ret;
+
+	dbenv = dbp->dbenv;
+	dblp = dbenv->lg_handle;
+	lp = dblp->reginfo.primary;
+	fnp = dbp->log_filename;
+
+	/* Verify that the fnp has been initialized. */
+	if (fnp->s_type == DB_UNKNOWN) {
+		memcpy(fnp->ufid, dbp->fileid, DB_FILE_ID_LEN);
+		fnp->s_type = dbp->type;
+	}
+
+	/*
+	 * Log the registry.  We should only request a new ID in situations
+	 * where logging is reasonable.
+	 */
+	memset(&fid_dbt, 0, sizeof(fid_dbt));
+	memset(&r_name, 0, sizeof(r_name));
+
+	if (needlock)
+		MUTEX_LOCK(dbenv, lp->mtx_filelist);
+
+	if (fnp->name_off != INVALID_ROFF) {
+		r_name.data = R_ADDR(&dblp->reginfo, fnp->name_off);
+		r_name.size = (u_int32_t)strlen((char *)r_name.data) + 1;
+	}
+
+	fid_dbt.data = dbp->fileid;
+	fid_dbt.size = DB_FILE_ID_LEN;
+
+	op = !F_ISSET(dbp, DB_AM_OPEN_CALLED) ? DBREG_PREOPEN :
+	    (F_ISSET(dbp, DB_AM_INMEM) ? DBREG_REOPEN : DBREG_OPEN);
+	ret = __dbreg_register_log(dbenv, txn, &unused,
+	    F_ISSET(dbp, DB_AM_NOT_DURABLE) ? DB_LOG_NOT_DURABLE : 0,
+	    op, r_name.size == 0 ? NULL : &r_name, &fid_dbt, id,
+	    fnp->s_type, fnp->meta_pgno, fnp->create_txnid);
+
+	if (needlock)
+		MUTEX_UNLOCK(dbenv, lp->mtx_filelist);
+
+	return (ret);
+}
diff --git a/storage/bdb/dbreg/dbreg.src b/storage/bdb/dbreg/dbreg.src
index ff3fc292308..26793c1b916 100644
--- a/storage/bdb/dbreg/dbreg.src
+++ b/storage/bdb/dbreg/dbreg.src
@@ -1,10 +1,10 @@
 /*-
  * See the file LICENSE for redistribution information.
  *
- * Copyright (c) 1996-2004
+ * Copyright (c) 1996-2005
  *	Sleepycat Software.  All rights reserved.
  *
- * $Id: dbreg.src,v 10.26 2004/06/17 17:35:17 bostic Exp $
+ * $Id: dbreg.src,v 12.1 2005/06/16 20:21:49 bostic Exp $
  */
 
 PREFIX	__dbreg
diff --git a/storage/bdb/dbreg/dbreg_rec.c b/storage/bdb/dbreg/dbreg_rec.c
index 07b175a1f3a..44f663b3be9 100644
--- a/storage/bdb/dbreg/dbreg_rec.c
+++ b/storage/bdb/dbreg/dbreg_rec.c
@@ -1,7 +1,7 @@
 /*-
  * See the file LICENSE for redistribution information.
  *
- * Copyright (c) 1996-2004
+ * Copyright (c) 1996-2005
  *	Sleepycat Software.  All rights reserved.
  */
 /*
@@ -32,7 +32,7 @@
  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
  * SUCH DAMAGE.
  *
- * $Id: dbreg_rec.c,v 11.133 2004/09/24 00:43:18 bostic Exp $
+ * $Id: dbreg_rec.c,v 12.8 2005/11/09 14:20:32 margo Exp $
  */
 
 #include "db_config.h"
@@ -84,11 +84,19 @@ __dbreg_register_recover(dbenv, dbtp, lsnp, op, info)
 		goto out;
 
 	switch (argp->opcode) {
+	case DBREG_REOPEN:
+	case DBREG_PREOPEN:
 	case DBREG_OPEN:
+		/*
+		 * In general, we redo the open on REDO and abort on UNDO.
+		 * However, a reopen is a second instance of an open of
+		 * in-memory files and we don't want to close them yet
+		 * on abort, so just skip that here.
+		 */
 		if ((DB_REDO(op) ||
 		    op == DB_TXN_OPENFILES || op == DB_TXN_POPENFILES))
 			do_open = 1;
-		else
+		else if (argp->opcode != DBREG_REOPEN)
 			do_close = 1;
 		break;
 	case DBREG_CLOSE:
@@ -172,7 +180,9 @@ __dbreg_register_recover(dbenv, dbtp, lsnp, op, info)
 	if (do_close) {
 		/*
 		 * If we are undoing an open, or redoing a close,
-		 * then we need to close the file.
+		 * then we need to close the file.  If we are simply
+		 * revoking then we just need to grab the DBP and revoke
+		 * the log id.
 		 *
 		 * If the file is deleted, then we can just ignore this close.
 		 * Otherwise, we should usually have a valid dbp we should
@@ -181,7 +191,7 @@ __dbreg_register_recover(dbenv, dbtp, lsnp, op, info)
 		 * fact, not have the file open, and that's OK.
 		 */
 		do_rem = 0;
-		MUTEX_THREAD_LOCK(dbenv, dblp->mutexp);
+		MUTEX_LOCK(dbenv, dblp->mtx_dbreg);
 		if (argp->fileid < dblp->dbentry_cnt) {
 			/*
 			 * Typically, closes should match an open which means
@@ -206,12 +216,11 @@ __dbreg_register_recover(dbenv, dbtp, lsnp, op, info)
 				    argp->opcode != DBREG_RCLOSE) ||
 				    argp->opcode == DBREG_CHKPNT) {
 					__db_err(dbenv,
-					    "Improper file close at %lu/%lu",
+				    "Warning: Improper file close at %lu/%lu",
 					    (u_long)lsnp->file,
 					    (u_long)lsnp->offset);
-					ret = EINVAL;
 				}
-				MUTEX_THREAD_UNLOCK(dbenv, dblp->mutexp);
+				MUTEX_UNLOCK(dbenv, dblp->mtx_dbreg);
 				goto done;
 			}
 
@@ -228,7 +237,7 @@ __dbreg_register_recover(dbenv, dbtp, lsnp, op, info)
 				 */
 				do_rem = F_ISSET(dbp, DB_AM_RECOVER) ||
 				    op == DB_TXN_ABORT;
-				MUTEX_THREAD_UNLOCK(dbenv, dblp->mutexp);
+				MUTEX_UNLOCK(dbenv, dblp->mtx_dbreg);
 				if (op == DB_TXN_ABORT)
 					(void)__dbreg_close_id(dbp,
 					    NULL, DBREG_RCLOSE);
@@ -236,11 +245,13 @@ __dbreg_register_recover(dbenv, dbtp, lsnp, op, info)
 					(void)__dbreg_revoke_id(dbp, 0,
 					    DB_LOGFILEID_INVALID);
 			} else if (dbe->deleted) {
-				MUTEX_THREAD_UNLOCK(dbenv, dblp->mutexp);
-				__dbreg_rem_dbentry(dblp, argp->fileid);
+				MUTEX_UNLOCK(dbenv, dblp->mtx_dbreg);
+				if ((ret = __dbreg_rem_dbentry(
+				    dblp, argp->fileid)) != 0)
+					goto out;
 			}
 		} else
-			MUTEX_THREAD_UNLOCK(dbenv, dblp->mutexp);
+			MUTEX_UNLOCK(dbenv, dblp->mtx_dbreg);
 
 		/*
 		 * During recovery, all files are closed.  On an abort, we only
@@ -273,7 +284,7 @@ __dbreg_register_recover(dbenv, dbtp, lsnp, op, info)
 			if (op == DB_TXN_ABORT &&
 			    !F_ISSET(dbp, DB_AM_RECOVER)) {
 				if ((t_ret = __db_refresh(dbp,
-				    NULL, DB_NOSYNC, NULL)) != 0 && ret == 0)
+				    NULL, DB_NOSYNC, NULL, 0)) != 0 && ret == 0)
 					ret = t_ret;
 			} else {
 				if (op == DB_TXN_APPLY &&
@@ -318,20 +329,22 @@ __dbreg_open_file(dbenv, txn, argp, info)
 	 * is what we expect.  If it's not, then we close the old file and
 	 * open the new one.
 	 */
-	MUTEX_THREAD_LOCK(dbenv, dblp->mutexp);
-	if (argp->fileid < dblp->dbentry_cnt)
+	MUTEX_LOCK(dbenv, dblp->mtx_dbreg);
+	if (argp->fileid != DB_LOGFILEID_INVALID &&
+	    argp->fileid < dblp->dbentry_cnt)
 		dbe = &dblp->dbentry[argp->fileid];
 	else
 		dbe = NULL;
 
 	if (dbe != NULL) {
 		if (dbe->deleted) {
-			MUTEX_THREAD_UNLOCK(dbenv, dblp->mutexp);
+			MUTEX_UNLOCK(dbenv, dblp->mtx_dbreg);
 			return (ENOENT);
 		}
 
 		/*
-		 * At the end of OPENFILES, we may have a file open.  The
+		 * At the end of OPENFILES, we may have a file open.  If this
+		 * is a reopen, then we will always close and reopen.  If the
 		 * open was part of a committed transaction, so it doesn't
 		 * get undone.  However, if the fileid was previously used,
 		 * we'll see a close that may need to get undone.  There are
@@ -342,11 +355,12 @@ __dbreg_open_file(dbenv, txn, argp, info)
 		 * which case it should never be opened during recovery.
 		 */
 		if ((dbp = dbe->dbp) != NULL) {
-			if (dbp->meta_pgno != argp->meta_pgno ||
+			if (argp->opcode == DBREG_REOPEN ||
+			    dbp->meta_pgno != argp->meta_pgno ||
 			    argp->name.size == 0 ||
 			    memcmp(dbp->fileid, argp->uid.data,
 			    DB_FILE_ID_LEN) != 0) {
-				MUTEX_THREAD_UNLOCK(dbenv, dblp->mutexp);
+				MUTEX_UNLOCK(dbenv, dblp->mtx_dbreg);
 				(void)__dbreg_revoke_id(dbp, 0,
 				    DB_LOGFILEID_INVALID);
 				if (F_ISSET(dbp, DB_AM_RECOVER))
@@ -360,7 +374,7 @@ __dbreg_open_file(dbenv, txn, argp, info)
 			 * here had better be the same dbp.
 			 */
 			DB_ASSERT(dbe->dbp == dbp);
-			MUTEX_THREAD_UNLOCK(dbenv, dblp->mutexp);
+			MUTEX_UNLOCK(dbenv, dblp->mtx_dbreg);
 
 			/*
 			 * This is a successful open.  We need to record that
@@ -375,7 +389,7 @@ __dbreg_open_file(dbenv, txn, argp, info)
 		}
 	}
 
-	MUTEX_THREAD_UNLOCK(dbenv, dblp->mutexp);
+	MUTEX_UNLOCK(dbenv, dblp->mtx_dbreg);
 
 reopen:
 	/*
@@ -402,6 +416,6 @@ reopen:
 	}
 
 	return (__dbreg_do_open(dbenv,
-	    txn, dblp, argp->uid.data, argp->name.data,
-	    argp->ftype, argp->fileid, argp->meta_pgno, info, argp->id));
+	    txn, dblp, argp->uid.data, argp->name.data, argp->ftype,
+	    argp->fileid, argp->meta_pgno, info, argp->id, argp->opcode));
 }
diff --git a/storage/bdb/dbreg/dbreg_stat.c b/storage/bdb/dbreg/dbreg_stat.c
index dd53b77c3d4..bdbb9b2604f 100644
--- a/storage/bdb/dbreg/dbreg_stat.c
+++ b/storage/bdb/dbreg/dbreg_stat.c
@@ -1,10 +1,10 @@
 /*-
  * See the file LICENSE for redistribution information.
  *
- * Copyright (c) 1997-2004
+ * Copyright (c) 1997-2005
  *	Sleepycat Software.  All rights reserved.
  *
- * $Id: dbreg_stat.c,v 11.48 2004/10/15 16:59:41 bostic Exp $
+ * $Id: dbreg_stat.c,v 12.5 2005/10/12 15:01:47 margo Exp $
  */
 
 #include "db_config.h"
@@ -21,6 +21,28 @@
 #include "dbinc/txn.h"
 
 #ifdef HAVE_STATISTICS
+static int __dbreg_print_dblist __P((DB_ENV *, u_int32_t));
+
+/*
+ * __dbreg_stat_print --
+ *	Print the dbreg statistics.
+ *
+ * PUBLIC: int __dbreg_stat_print __P((DB_ENV *, u_int32_t));
+ */
+int
+__dbreg_stat_print(dbenv, flags)
+	DB_ENV *dbenv;
+	u_int32_t flags;
+{
+	int ret;
+
+	if (LF_ISSET(DB_STAT_ALL) &&
+	    (ret = __dbreg_print_dblist(dbenv, flags)) != 0)
+		return (ret);
+
+	return (0);
+}
+
 /*
  * __dbreg_print_fname --
  *	Display the contents of an FNAME structure.
@@ -32,22 +54,26 @@ __dbreg_print_fname(dbenv, fnp)
 	DB_ENV *dbenv;
 	FNAME *fnp;
 {
+	static const FN fn[] = {
+		{ DB_FNAME_DURABLE,	"DB_FNAME_DURABLE" },
+		{ DB_FNAME_NOTLOGGED,	"DB_FNAME_NOTLOGGED" },
+		{ 0,			NULL }
+	};
+
 	__db_msg(dbenv, "%s", DB_GLOBAL(db_line));
 	__db_msg(dbenv, "DB handle FNAME contents:");
 	STAT_LONG("log ID", fnp->id);
 	STAT_ULONG("Meta pgno", fnp->meta_pgno);
 	__db_print_fileid(dbenv, fnp->ufid, "\tFile ID");
 	STAT_ULONG("create txn", fnp->create_txnid);
-	STAT_LONG("durable", fnp->is_durable);
+	__db_prflags(dbenv, NULL, fnp->flags, fn, NULL, "\tFlags");
 }
 
 /*
  * __dbreg_print_dblist --
  *	Display the DB_ENV's list of files.
- *
- * PUBLIC: void __dbreg_print_dblist __P((DB_ENV *, u_int32_t));
  */
-void
+static int
 __dbreg_print_dblist(dbenv, flags)
 	DB_ENV *dbenv;
 	u_int32_t flags;
@@ -64,11 +90,12 @@ __dbreg_print_dblist(dbenv, flags)
 
 	__db_msg(dbenv, "%s", DB_GLOBAL(db_line));
 	__db_msg(dbenv, "LOG FNAME list:");
-	__db_print_mutex(dbenv, NULL, &lp->fq_mutex, "File name mutex", flags);
+	__mutex_print_debug_single(
+	    dbenv, "File name mutex", lp->mtx_filelist, flags);
 
 	STAT_LONG("Fid max", lp->fid_max);
 
-	MUTEX_LOCK(dbenv, &lp->fq_mutex);
+	MUTEX_LOCK(dbenv, lp->mtx_filelist);
 	for (first = 1, fnp = SH_TAILQ_FIRST(&lp->fq, __fname);
 	    fnp != NULL; fnp = SH_TAILQ_NEXT(fnp, q, __fname)) {
 		if (first) {
@@ -92,6 +119,8 @@ __dbreg_print_dblist(dbenv, flags)
 		    dbp == NULL ? "No DBP" : "DBP", del, P_TO_ULONG(dbp),
 		    (u_long)(dbp == NULL ? 0 : dbp->flags));
 	}
-	MUTEX_UNLOCK(dbenv, &lp->fq_mutex);
+	MUTEX_UNLOCK(dbenv, lp->mtx_filelist);
+
+	return (0);
 }
 #endif
diff --git a/storage/bdb/dbreg/dbreg_util.c b/storage/bdb/dbreg/dbreg_util.c
index 6f1cc9297a5..9c3082b1e9c 100644
--- a/storage/bdb/dbreg/dbreg_util.c
+++ b/storage/bdb/dbreg/dbreg_util.c
@@ -1,10 +1,10 @@
 /*-
  * See the file LICENSE for redistribution information.
  *
- * Copyright (c) 1997-2004
+ * Copyright (c) 1997-2005
  *	Sleepycat Software.  All rights reserved.
  *
- * $Id: dbreg_util.c,v 11.50 2004/10/15 16:59:41 bostic Exp $
+ * $Id: dbreg_util.c,v 12.10 2005/10/12 15:01:47 margo Exp $
  */
 
 #include "db_config.h"
@@ -17,7 +17,10 @@
 #include "db_int.h"
 #include "dbinc/db_page.h"
 #include "dbinc/db_am.h"
+#include "dbinc/db_shash.h"
+#include "dbinc/fop.h"
 #include "dbinc/log.h"
+#include "dbinc/mp.h"
 #include "dbinc/txn.h"
 
 static int __dbreg_check_master __P((DB_ENV *, u_int8_t *, char *));
@@ -40,7 +43,7 @@ __dbreg_add_dbentry(dbenv, dblp, dbp, ndx)
 
 	ret = 0;
 
-	MUTEX_THREAD_LOCK(dbenv, dblp->mutexp);
+	MUTEX_LOCK(dbenv, dblp->mtx_dbreg);
 
 	/*
 	 * Check if we need to grow the table.  Note, ndx is 0-based (the
@@ -65,7 +68,7 @@ __dbreg_add_dbentry(dbenv, dblp, dbp, ndx)
 	dblp->dbentry[ndx].deleted = dbp == NULL;
 	dblp->dbentry[ndx].dbp = dbp;
 
-err:	MUTEX_THREAD_UNLOCK(dbenv, dblp->mutexp);
+err:	MUTEX_UNLOCK(dbenv, dblp->mtx_dbreg);
 	return (ret);
 }
 
@@ -73,19 +76,21 @@ err:	MUTEX_THREAD_UNLOCK(dbenv, dblp->mutexp);
  * __dbreg_rem_dbentry
  *	Remove an entry from the DB entry table.
  *
- * PUBLIC: void __dbreg_rem_dbentry __P((DB_LOG *, int32_t));
+ * PUBLIC: int __dbreg_rem_dbentry __P((DB_LOG *, int32_t));
  */
-void
+int
 __dbreg_rem_dbentry(dblp, ndx)
 	DB_LOG *dblp;
 	int32_t ndx;
 {
-	MUTEX_THREAD_LOCK(dblp->dbenv, dblp->mutexp);
+	MUTEX_LOCK(dblp->dbenv, dblp->mtx_dbreg);
 	if (dblp->dbentry_cnt > ndx) {
 		dblp->dbentry[ndx].dbp = NULL;
 		dblp->dbentry[ndx].deleted = 0;
 	}
-	MUTEX_THREAD_UNLOCK(dblp->dbenv, dblp->mutexp);
+	MUTEX_UNLOCK(dblp->dbenv, dblp->mtx_dbreg);
+
+	return (0);
 }
 
 /*
@@ -110,7 +115,7 @@ __dbreg_log_files(dbenv)
 
 	ret = 0;
 
-	MUTEX_LOCK(dbenv, &lp->fq_mutex);
+	MUTEX_LOCK(dbenv, lp->mtx_filelist);
 
 	for (fnp = SH_TAILQ_FIRST(&lp->fq, __fname);
 	    fnp != NULL; fnp = SH_TAILQ_NEXT(fnp, q, __fname)) {
@@ -136,14 +141,14 @@ __dbreg_log_files(dbenv)
 		 */
 		if ((ret = __dbreg_register_log(dbenv,
 		    NULL, &r_unused,
-		    fnp->is_durable ? 0 : DB_LOG_NOT_DURABLE,
+		    F_ISSET(fnp, DB_FNAME_DURABLE) ? 0 : DB_LOG_NOT_DURABLE,
 		    F_ISSET(dblp, DBLOG_RECOVER) ? DBREG_RCLOSE : DBREG_CHKPNT,
 		    dbtp, &fid_dbt, fnp->id, fnp->s_type, fnp->meta_pgno,
 		    TXN_INVALID)) != 0)
 			break;
 	}
 
-	MUTEX_UNLOCK(dbenv, &lp->fq_mutex);
+	MUTEX_UNLOCK(dbenv, lp->mtx_filelist);
 
 	return (ret);
 }
@@ -173,7 +178,7 @@ __dbreg_close_files(dbenv)
 
 	dblp = dbenv->lg_handle;
 	ret = 0;
-	MUTEX_THREAD_LOCK(dbenv, dblp->mutexp);
+	MUTEX_LOCK(dbenv, dblp->mtx_dbreg);
 	for (i = 0; i < dblp->dbentry_cnt; i++) {
 		/*
 		 * We only want to close dbps that recovery opened.  Any
@@ -182,6 +187,12 @@ __dbreg_close_files(dbenv)
 		 * Before doing so, we need to revoke their log fileids
 		 * so that we don't end up leaving around FNAME entries
 		 * for dbps that shouldn't have them.
+		 *
+		 * Any FNAME entries that were marked NOTLOGGED had the
+		 * log write fail while they were being closed.  Since it's
+		 * too late to be logging now we flag that as a failure
+		 * so recovery will be run.  This will get returned by
+		 * __dbreg_revoke_id.
 		 */
 		if ((dbp = dblp->dbentry[i].dbp) != NULL) {
 			/*
@@ -195,7 +206,7 @@ __dbreg_close_files(dbenv)
 			 * we're in this loop anyway--we're in the process of
 			 * making all outstanding dbps invalid.
 			 */
-			MUTEX_THREAD_UNLOCK(dbenv, dblp->mutexp);
+			MUTEX_UNLOCK(dbenv, dblp->mtx_dbreg);
 			if (F_ISSET(dbp, DB_AM_RECOVER))
 				t_ret = __db_close(dbp,
 				     NULL, dbp->mpf == NULL ? DB_NOSYNC : 0);
@@ -204,13 +215,13 @@ __dbreg_close_files(dbenv)
 				     dbp, 0, DB_LOGFILEID_INVALID);
 			if (ret == 0)
 				ret = t_ret;
-			MUTEX_THREAD_LOCK(dbenv, dblp->mutexp);
+			MUTEX_LOCK(dbenv, dblp->mtx_dbreg);
 		}
 
 		dblp->dbentry[i].deleted = 0;
 		dblp->dbentry[i].dbp = NULL;
 	}
-	MUTEX_THREAD_UNLOCK(dbenv, dblp->mutexp);
+	MUTEX_UNLOCK(dbenv, dblp->mtx_dbreg);
 	return (ret);
 }
 
@@ -261,7 +272,7 @@ __dbreg_id_to_db_int(dbenv, txn, dbpp, ndx, inc, tryopen)
 	dblp = dbenv->lg_handle;
 	COMPQUIET(inc, 0);
 
-	MUTEX_THREAD_LOCK(dbenv, dblp->mutexp);
+	MUTEX_LOCK(dbenv, dblp->mtx_dbreg);
 
 	/*
 	 * Under XA, a process different than the one issuing DB operations
@@ -277,12 +288,12 @@ __dbreg_id_to_db_int(dbenv, txn, dbpp, ndx, inc, tryopen)
 		}
 
 		/*
-		 * __dbreg_id_to_fname acquires the region's fq_mutex,
-		 * which we can't safely acquire while we hold the thread lock.
-		 * We no longer need it anyway--the dbentry table didn't
-		 * have what we needed.
+		 * __dbreg_id_to_fname acquires the mtx_filelist mutex, which
+		 * we can't safely acquire while we hold the thread lock.  We
+		 * no longer need it anyway--the dbentry table didn't have what
+		 * we needed.
 		 */
-		MUTEX_THREAD_UNLOCK(dbenv, dblp->mutexp);
+		MUTEX_UNLOCK(dbenv, dblp->mtx_dbreg);
 
 		if (__dbreg_id_to_fname(dblp, ndx, 0, &fname) != 0)
 			/*
@@ -294,11 +305,11 @@ __dbreg_id_to_db_int(dbenv, txn, dbpp, ndx, inc, tryopen)
 			return (ENOENT);
 
 		/*
-		 * Note that we're relying on fname not to change, even
-		 * though we released the mutex that protects it (fq_mutex)
-		 * inside __dbreg_id_to_fname.  This should be a safe
-		 * assumption, because the other process that has the file
-		 * open shouldn't be closing it while we're trying to abort.
+		 * Note that we're relying on fname not to change, even though
+		 * we released the mutex that protects it (mtx_filelist) inside
+		 * __dbreg_id_to_fname.  This should be a safe assumption, the
+		 * other process that has the file open shouldn't be closing it
+		 * while we're trying to abort.
 		 */
 		name = R_ADDR(&dblp->reginfo, fname->name_off);
 
@@ -313,7 +324,7 @@ __dbreg_id_to_db_int(dbenv, txn, dbpp, ndx, inc, tryopen)
 		 */
 		if ((ret = __dbreg_do_open(dbenv, txn, dblp,
 		    fname->ufid, name, fname->s_type,
-		    ndx, fname->meta_pgno, NULL, 0)) != 0)
+		    ndx, fname->meta_pgno, NULL, 0, DBREG_OPEN)) != 0)
 			return (ret);
 
 		*dbpp = dblp->dbentry[ndx].dbp;
@@ -331,8 +342,20 @@ __dbreg_id_to_db_int(dbenv, txn, dbpp, ndx, inc, tryopen)
 	/* It's an error if we don't have a corresponding writeable DB. */
 	if ((*dbpp = dblp->dbentry[ndx].dbp) == NULL)
 		ret = ENOENT;
+	else
+		/*
+		 * If we are in recovery, then set that the file has
+		 * been written.  It is possible to run recovery,
+		 * find all the pages in their post update state
+		 * in the OS buffer pool, put a checkpoint in the log
+		 * and then crash the system without forcing the pages
+		 * to disk. If this is an in-memory file, we may not have
+		 * an mpf yet.
+		 */
+		if ((*dbpp)->mpf != NULL && (*dbpp)->mpf->mfp != NULL)
+			(*dbpp)->mpf->mfp->file_written = 1;
 
-err:	MUTEX_THREAD_UNLOCK(dbenv, dblp->mutexp);
+err:	MUTEX_UNLOCK(dbenv, dblp->mtx_dbreg);
 	return (ret);
 }
 
@@ -361,7 +384,7 @@ __dbreg_id_to_fname(dblp, id, have_lock, fnamep)
 	ret = -1;
 
 	if (!have_lock)
-		MUTEX_LOCK(dbenv, &lp->fq_mutex);
+		MUTEX_LOCK(dbenv, lp->mtx_filelist);
 	for (fnp = SH_TAILQ_FIRST(&lp->fq, __fname);
 	    fnp != NULL; fnp = SH_TAILQ_NEXT(fnp, q, __fname)) {
 		if (fnp->id == id) {
@@ -371,7 +394,7 @@ __dbreg_id_to_fname(dblp, id, have_lock, fnamep)
 		}
 	}
 	if (!have_lock)
-		MUTEX_UNLOCK(dbenv, &lp->fq_mutex);
+		MUTEX_UNLOCK(dbenv, lp->mtx_filelist);
 
 	return (ret);
 }
@@ -400,7 +423,7 @@ __dbreg_fid_to_fname(dblp, fid, have_lock, fnamep)
 	ret = -1;
 
 	if (!have_lock)
-		MUTEX_LOCK(dbenv, &lp->fq_mutex);
+		MUTEX_LOCK(dbenv, lp->mtx_filelist);
 	for (fnp = SH_TAILQ_FIRST(&lp->fq, __fname);
 	    fnp != NULL; fnp = SH_TAILQ_NEXT(fnp, q, __fname)) {
 		if (memcmp(fnp->ufid, fid, DB_FILE_ID_LEN) == 0) {
@@ -410,7 +433,7 @@ __dbreg_fid_to_fname(dblp, fid, have_lock, fnamep)
 		}
 	}
 	if (!have_lock)
-		MUTEX_UNLOCK(dbenv, &lp->fq_mutex);
+		MUTEX_UNLOCK(dbenv, lp->mtx_filelist);
 
 	return (ret);
 }
@@ -448,11 +471,12 @@ __dbreg_get_name(dbenv, fid, namep)
  *	Open files referenced in the log.  This is the part of the open that
  * is not protected by the thread mutex.
  * PUBLIC: int __dbreg_do_open __P((DB_ENV *, DB_TXN *, DB_LOG *, u_int8_t *,
- * PUBLIC:     char *, DBTYPE, int32_t, db_pgno_t, void *, u_int32_t));
+ * PUBLIC:     char *, DBTYPE, int32_t, db_pgno_t, void *, u_int32_t,
+ * PUBLIC:     u_int32_t));
  */
 int
 __dbreg_do_open(dbenv,
-    txn, lp, uid, name, ftype, ndx, meta_pgno, info, id)
+    txn, lp, uid, name, ftype, ndx, meta_pgno, info, id, opcode)
 	DB_ENV *dbenv;
 	DB_TXN *txn;
 	DB_LOG *lp;
@@ -462,12 +486,16 @@ __dbreg_do_open(dbenv,
 	int32_t ndx;
 	db_pgno_t meta_pgno;
 	void *info;
-	u_int32_t id;
+	u_int32_t id, opcode;
 {
 	DB *dbp;
 	u_int32_t cstat, ret_stat;
 	int ret;
+	char *dname, *fname;
 
+	cstat = TXN_EXPECTED;
+	fname = name;
+	dname = NULL;
 	if ((ret = db_create(&dbp, lp->dbenv, 0)) != 0)
 		return (ret);
 
@@ -490,9 +518,24 @@ __dbreg_do_open(dbenv,
 		memcpy(dbp->fileid, uid, DB_FILE_ID_LEN);
 		dbp->meta_pgno = meta_pgno;
 	}
-	if ((ret = __db_open(dbp, txn, name, NULL,
-	    ftype, DB_ODDFILESIZE, __db_omode("rw----"), meta_pgno)) == 0) {
+	if (opcode == DBREG_PREOPEN) {
+		dbp->type = ftype;
+		if ((ret = __dbreg_setup(dbp, name, id)) != 0)
+			goto err;
+		MAKE_INMEM(dbp);
+		goto skip_open;
+	}
 
+	if (opcode == DBREG_REOPEN) {
+		MAKE_INMEM(dbp);
+		fname = NULL;
+		dname = name;
+	}
+
+	if ((ret = __db_open(dbp, txn, fname, dname, ftype,
+	    DB_DURABLE_UNKNOWN | DB_ODDFILESIZE,
+	    __db_omode(OWNER_RW), meta_pgno)) == 0) {
+skip_open:
 		/*
 		 * Verify that we are opening the same file that we were
 		 * referring to when we wrote this log record.
@@ -500,7 +543,7 @@ __dbreg_do_open(dbenv,
 		if ((meta_pgno != PGNO_BASE_MD &&
 		    __dbreg_check_master(dbenv, uid, name) != 0) ||
 		    memcmp(uid, dbp->fileid, DB_FILE_ID_LEN) != 0)
-			cstat = TXN_IGNORE;
+			cstat = TXN_UNEXPECTED;
 		else
 			cstat = TXN_EXPECTED;
 
@@ -518,7 +561,7 @@ __dbreg_do_open(dbenv,
 			ret = __db_txnlist_update(dbenv,
 			    info, id, cstat, NULL, &ret_stat, 1);
 
-err:		if (cstat == TXN_IGNORE)
+err:		if (cstat == TXN_UNEXPECTED)
 			goto not_right;
 		return (ret);
 	} else if (ret == ENOENT) {
@@ -547,8 +590,8 @@ __dbreg_check_master(dbenv, uid, name)
 	if ((ret = db_create(&dbp, dbenv, 0)) != 0)
 		return (ret);
 	F_SET(dbp, DB_AM_RECOVER);
-	ret = __db_open(dbp,
-	    NULL, name, NULL, DB_BTREE, 0, __db_omode("rw----"), PGNO_BASE_MD);
+	ret = __db_open(dbp, NULL,
+	    name, NULL, DB_BTREE, 0, __db_omode(OWNER_RW), PGNO_BASE_MD);
 
 	if (ret == 0 && memcmp(uid, dbp->fileid, DB_FILE_ID_LEN) != 0)
 		ret = EINVAL;
@@ -592,10 +635,10 @@ __dbreg_lazy_id(dbp)
 	lp = dblp->reginfo.primary;
 	fnp = dbp->log_filename;
 
-	/* The fq_mutex protects the FNAME list and id management. */
-	MUTEX_LOCK(dbenv, &lp->fq_mutex);
+	/* The mtx_filelist protects the FNAME list and id management. */
+	MUTEX_LOCK(dbenv, lp->mtx_filelist);
 	if (fnp->id != DB_LOGFILEID_INVALID) {
-		MUTEX_UNLOCK(dbenv, &lp->fq_mutex);
+		MUTEX_UNLOCK(dbenv, lp->mtx_filelist);
 		return (0);
 	}
 	id = DB_LOGFILEID_INVALID;
@@ -612,7 +655,7 @@ __dbreg_lazy_id(dbp)
 
 	/*
 	 * All DB related logging routines check the id value *without*
-	 * holding the fq_mutex to know whether we need to call
+	 * holding the mtx_filelist to know whether we need to call
 	 * dbreg_lazy_id to begin with.  We must set the ID after a
 	 * *successful* commit so that there is no possibility of a second
 	 * modification call finding a valid ID in the dbp before the
@@ -624,6 +667,6 @@ __dbreg_lazy_id(dbp)
 err:
 	if (ret != 0 && id != DB_LOGFILEID_INVALID)
 		(void)__dbreg_revoke_id(dbp, 1, id);
-	MUTEX_UNLOCK(dbenv, &lp->fq_mutex);
+	MUTEX_UNLOCK(dbenv, lp->mtx_filelist);
 	return (ret);
 }
diff --git a/storage/bdb/dist/Makefile.in b/storage/bdb/dist/Makefile.in
index cf484b6bf99..abd7dd93b17 100644
--- a/storage/bdb/dist/Makefile.in
+++ b/storage/bdb/dist/Makefile.in
@@ -1,4 +1,4 @@
-# $Id: Makefile.in,v 11.264 2004/11/01 21:55:33 mark Exp $
+# $Id: Makefile.in,v 12.33 2005/11/03 17:43:46 bostic Exp $
 
 srcdir=	@srcdir@/..
 builddir=.
@@ -58,7 +58,7 @@ CCLINK=		@MAKEFILE_CCLINK@ @CFLAGS@
 LDFLAGS=	@LDFLAGS@
 LIBS=		@LIBS@
 TEST_LIBS=	@TEST_LIBS@
-LIBSO_LIBS=	@LIBSO_LIBS@
+LIBCSO_LIBS=	@LIBCSO_LIBS@ @LIBSO_LIBS@
 
 libdb_base=	libdb
 libdb=		$(libdb_base).a
@@ -77,7 +77,7 @@ CXXFLAGS=	-c $(CPPFLAGS) @CXXFLAGS@
 CXX=		@MAKEFILE_CXX@
 CXXLINK=	@MAKEFILE_CXXLINK@ @CXXFLAGS@
 XSOLINK=	@MAKEFILE_XSOLINK@ @CXXFLAGS@
-LIBXSO_LIBS=	@LIBXSO_LIBS@
+LIBXSO_LIBS=	@LIBXSO_LIBS@ @LIBSO_LIBS@
 
 libcxx_base=	libdb_cxx
 libcxx=		$(libcxx_base).a
@@ -93,7 +93,7 @@ libxso_major=	$(libcxx_base)-$(LIBMAJOR)@SOSUFFIX@
 # Java support is optional and requires shared librarires.
 ##################################################
 CLASSPATH=	$(JAVA_CLASSTOP)
-LIBJSO_LIBS=	@LIBJSO_LIBS@
+LIBJSO_LIBS=	@LIBJSO_LIBS@ @LIBSO_LIBS@
 
 JAR=		@JAR@
 JAVAC=		env CLASSPATH="$(CLASSPATH)" @JAVAC@
@@ -101,11 +101,9 @@ JAVACFLAGS=	@JAVACFLAGS@
 JAVA_CLASSTOP=	./classes
 JAVA_RPCCLASSTOP=./classes.rpc
 JAVA_EXCLASSTOP=./classes.ex
-JAVA_DBREL=	com/sleepycat/db
-JAVA_EXREL=	com/sleepycat/examples
 JAVA_RPCREL=	com/sleepycat/db/rpcserver
 JAVA_SRCDIR=	$(srcdir)/java/src
-JAVA_EXDIR=	$(srcdir)/examples_java/src/com/sleepycat/examples
+JAVA_EXDIR=	$(srcdir)/examples_java/src
 JAVA_RPCDIR=	$(srcdir)/rpc_server/java
 JAVA_SLEEPYCAT=	$(srcdir)/java/src/com/sleepycat
 
@@ -125,8 +123,8 @@ libjso_g=	$(libjso_base)-$(LIBVERSION)_g@JMODSUFFIX@
 #
 # Tcl support is optional and requires shared libraries.
 ##################################################
-TCFLAGS=	@TCFLAGS@
-LIBTSO_LIBS=	@LIBTSO_LIBS@
+TCL_INCLUDE_SPEC=	@TCL_INCLUDE_SPEC@
+LIBTSO_LIBS=	@LIBTSO_LIBS@ @LIBSO_LIBS@
 libtso_base=	libdb_tcl
 libtso=		$(libtso_base)-$(LIBVERSION)@MODSUFFIX@
 libtso_static=	$(libtso_base)-$(LIBVERSION).a
@@ -158,7 +156,7 @@ BTREE_OBJS=\
 	bt_compare@o@ bt_conv@o@ bt_curadj@o@ bt_cursor@o@ bt_delete@o@ \
 	bt_method@o@ bt_open@o@ bt_put@o@ bt_rec@o@ bt_reclaim@o@ \
 	bt_recno@o@ bt_rsearch@o@ bt_search@o@ bt_split@o@ bt_stat@o@ \
-	bt_upgrade@o@ btree_auto@o@
+	bt_compact@o@ bt_upgrade@o@ btree_auto@o@
 BTREE_VRFY_OBJS=\
 	db_ovfl_vrfy@o@ db_vrfy@o@ db_vrfyutil@o@ bt_verify@o@
 HASH_OBJS=\
@@ -173,8 +171,8 @@ QUEUE_OBJS=\
 QUEUE_VRFY_OBJS=\
 	qam_verify@o@
 REP_OBJS=\
-	rep_auto@o@ rep_backup@o@ rep_method@o@ rep_record@o@ rep_region@o@ \
-	rep_stat@o@ rep_util@o@
+	rep_auto@o@ rep_backup@o@ rep_elect@o@ rep_log@o@ rep_method@o@ \
+	rep_record@o@ rep_region@o@ rep_stat@o@ rep_util@o@ rep_verify@o@
 PRINT_OBJS=\
 	btree_autop@o@ crdel_autop@o@ db_autop@o@ dbreg_autop@o@ \
 	fileops_autop@o@ hash_autop@o@ qam_autop@o@ rep_autop@o@ \
@@ -182,29 +180,32 @@ PRINT_OBJS=\
 
 C_OBJS= @ADDITIONAL_OBJS@ @REPLACEMENT_OBJS@ @CRYPTO_OBJS@ @RPC_CLIENT_OBJS@ \
 	crdel_auto@o@ crdel_rec@o@ db@o@ db_am@o@ db_auto@o@ \
-	db_byteorder@o@ db_cam@o@ db_conv@o@ db_dispatch@o@ db_dup@o@ \
-	db_err@o@ db_getlong@o@ db_idspace@o@ db_iface@o@ db_join@o@ \
-	db_log2@o@ db_meta@o@ db_method@o@ db_open@o@ db_overflow@o@ \
-	db_pr@o@ db_rec@o@ db_reclaim@o@ db_rename@o@ db_remove@o@ \
-	db_ret@o@ db_salloc@o@ db_setid@o@ db_setlsn@o@ db_shash@o@ \
-	db_stati@o@ db_truncate@o@ db_upg@o@ db_upg_opd@o@ dbm@o@ \
-	dbreg@o@ dbreg_auto@o@ dbreg_rec@o@ dbreg_stat@o@ dbreg_util@o@ \
-	env_file@o@ env_method@o@ env_open@o@ env_recover@o@ \
-	env_region@o@ env_stat@o@ fileops_auto@o@ fop_basic@o@ \
-	fop_rec@o@ fop_util@o@ hash_func@o@ hmac@o@ hsearch@o@ lock@o@ \
-	lock_deadlock@o@ lock_id@o@ lock_list@o@ lock_method@o@ \
+	db_byteorder@o@ db_cam@o@ db_clock@o@ db_conv@o@ db_dispatch@o@ \
+	db_dup@o@ db_err@o@ db_getlong@o@ db_idspace@o@ db_iface@o@ \
+	db_join@o@ db_log2@o@ db_meta@o@ db_method@o@ db_open@o@ \
+	db_overflow@o@ db_pr@o@ db_rec@o@ db_reclaim@o@ db_rename@o@ \
+	db_remove@o@ db_ret@o@ db_salloc@o@ db_setid@o@ db_setlsn@o@ \
+	db_shash@o@ db_stati@o@ db_truncate@o@ db_upg@o@ db_upg_opd@o@ \
+	dbm@o@ dbreg@o@ dbreg_auto@o@ dbreg_rec@o@ dbreg_stat@o@ \
+	dbreg_util@o@ env_failchk@o@ env_file@o@ env_method@o@ \
+	env_open@o@ env_recover@o@ env_region@o@ env_register@o@ \
+	env_stat@o@ fileops_auto@o@ fop_basic@o@ fop_rec@o@ fop_util@o@ \
+	hash_func@o@ hmac@o@ hsearch@o@ lock@o@ lock_deadlock@o@ \
+	lock_failchk@o@ lock_id@o@ lock_list@o@ lock_method@o@ \
 	lock_region@o@ lock_stat@o@ lock_timer@o@ lock_util@o@ log@o@ \
-	log_archive@o@ log_compare@o@ log_get@o@ log_method@o@ \
+	log_archive@o@ log_compare@o@ log_debug@o@ log_get@o@ log_method@o@ \
 	log_put@o@ log_stat@o@ mp_alloc@o@ mp_bh@o@ mp_fget@o@ \
 	mp_fmethod@o@ mp_fopen@o@ mp_fput@o@ mp_fset@o@ mp_method@o@ \
 	mp_region@o@ mp_register@o@ mp_stat@o@ mp_sync@o@ mp_trickle@o@ \
-	mutex@o@ os_abs@o@ os_alloc@o@ os_clock@o@ os_config@o@ \
-	os_dir@o@ os_errno@o@ os_fid@o@ os_fsync@o@ os_handle@o@ \
-	os_id@o@ os_map@o@ os_method@o@ os_oflags@o@ os_open@o@ \
-	os_region@o@ os_rename@o@ os_root@o@ os_rpath@o@ os_rw@o@ \
-	os_seek@o@ os_sleep@o@ os_spin@o@ os_stat@o@ os_tmpdir@o@ \
-	os_truncate@o@ os_unlink@o@ sha1@o@ seq_stat@o@ sequence@o@ \
-	snprintf@o@ txn@o@ txn_auto@o@ txn_method@o@ txn_rec@o@ \
+	mut_alloc@o@ mut_method@o@ mut_region@o@ \
+	mut_stat@o@ os_abs@o@ os_alloc@o@ os_clock@o@ os_config@o@ \
+	os_dir@o@ os_errno@o@ os_fid@o@ os_flock@o@ os_fsync@o@ \
+	os_handle@o@ os_id@o@ os_map@o@ os_method@o@ os_mkdir@o@ \
+	os_oflags@o@ os_open@o@ os_region@o@ os_rename@o@ os_root@o@ \
+	os_rpath@o@ os_rw@o@ os_seek@o@ os_sleep@o@ os_spin@o@ \
+	os_stat@o@ os_tmpdir@o@ os_truncate@o@ os_unlink@o@ sha1@o@ \
+	seq_stat@o@ sequence@o@ snprintf@o@ txn@o@ txn_auto@o@ \
+	txn_chkpt@o@ txn_failchk@o@ txn_method@o@ txn_rec@o@ \
 	txn_recover@o@ txn_region@o@ txn_stat@o@ txn_util@o@ xa@o@ \
 	xa_db@o@ xa_map@o@
 
@@ -225,6 +226,7 @@ JAVA_DBSRCS=\
 	$(JAVA_SLEEPYCAT)/bind/EntryBinding.java \
 	$(JAVA_SLEEPYCAT)/bind/RecordNumberBinding.java \
 	$(JAVA_SLEEPYCAT)/bind/serial/ClassCatalog.java \
+	$(JAVA_SLEEPYCAT)/bind/serial/SerialBase.java \
 	$(JAVA_SLEEPYCAT)/bind/serial/SerialBinding.java \
 	$(JAVA_SLEEPYCAT)/bind/serial/SerialInput.java \
 	$(JAVA_SLEEPYCAT)/bind/serial/SerialOutput.java \
@@ -246,6 +248,7 @@ JAVA_DBSRCS=\
 	$(JAVA_SLEEPYCAT)/bind/tuple/MarshalledTupleKeyEntity.java \
 	$(JAVA_SLEEPYCAT)/bind/tuple/ShortBinding.java \
 	$(JAVA_SLEEPYCAT)/bind/tuple/StringBinding.java \
+	$(JAVA_SLEEPYCAT)/bind/tuple/TupleBase.java \
 	$(JAVA_SLEEPYCAT)/bind/tuple/TupleBinding.java \
 	$(JAVA_SLEEPYCAT)/bind/tuple/TupleInput.java \
 	$(JAVA_SLEEPYCAT)/bind/tuple/TupleInputBinding.java \
@@ -288,6 +291,8 @@ JAVA_DBSRCS=\
 	$(JAVA_SLEEPYCAT)/db/CacheFileStats.java \
 	$(JAVA_SLEEPYCAT)/db/CacheStats.java \
 	$(JAVA_SLEEPYCAT)/db/CheckpointConfig.java \
+	$(JAVA_SLEEPYCAT)/db/CompactConfig.java \
+	$(JAVA_SLEEPYCAT)/db/CompactStats.java \
 	$(JAVA_SLEEPYCAT)/db/Cursor.java \
 	$(JAVA_SLEEPYCAT)/db/CursorConfig.java \
 	$(JAVA_SLEEPYCAT)/db/Database.java \
@@ -324,13 +329,20 @@ JAVA_DBSRCS=\
 	$(JAVA_SLEEPYCAT)/db/MultipleEntry.java \
 	$(JAVA_SLEEPYCAT)/db/MultipleKeyDataEntry.java \
 	$(JAVA_SLEEPYCAT)/db/MultipleRecnoDataEntry.java \
+	$(JAVA_SLEEPYCAT)/db/MutexStats.java \
 	$(JAVA_SLEEPYCAT)/db/OperationStatus.java \
 	$(JAVA_SLEEPYCAT)/db/PanicHandler.java \
 	$(JAVA_SLEEPYCAT)/db/PreparedTransaction.java \
 	$(JAVA_SLEEPYCAT)/db/QueueStats.java \
 	$(JAVA_SLEEPYCAT)/db/RecordNumberAppender.java \
 	$(JAVA_SLEEPYCAT)/db/RecoveryOperation.java \
+	$(JAVA_SLEEPYCAT)/db/ReplicationConfig.java \
+	$(JAVA_SLEEPYCAT)/db/ReplicationDuplicateMasterException.java \
 	$(JAVA_SLEEPYCAT)/db/ReplicationHandleDeadException.java \
+	$(JAVA_SLEEPYCAT)/db/ReplicationHoldElectionException.java \
+	$(JAVA_SLEEPYCAT)/db/ReplicationJoinFailureException.java \
+	$(JAVA_SLEEPYCAT)/db/ReplicationLockoutException.java \
+	$(JAVA_SLEEPYCAT)/db/ReplicationSiteUnavailableException.java \
 	$(JAVA_SLEEPYCAT)/db/ReplicationStats.java \
 	$(JAVA_SLEEPYCAT)/db/ReplicationStatus.java \
 	$(JAVA_SLEEPYCAT)/db/ReplicationTransport.java \
@@ -347,6 +359,7 @@ JAVA_DBSRCS=\
 	$(JAVA_SLEEPYCAT)/db/TransactionConfig.java \
 	$(JAVA_SLEEPYCAT)/db/TransactionStats.java \
 	$(JAVA_SLEEPYCAT)/db/VerifyConfig.java \
+	$(JAVA_SLEEPYCAT)/db/VersionMismatchException.java \
 	$(JAVA_SLEEPYCAT)/db/internal/Db.java \
 	$(JAVA_SLEEPYCAT)/db/internal/DbClient.java \
 	$(JAVA_SLEEPYCAT)/db/internal/DbConstants.java \
@@ -463,7 +476,11 @@ JAVA_EXSRCS=\
 	$(JAVA_EXDIR)/db/LockExample.java \
 	$(JAVA_EXDIR)/db/RPCExample.java \
 	$(JAVA_EXDIR)/db/SequenceExample.java \
-	$(JAVA_EXDIR)/db/TpcbExample.java
+	$(JAVA_EXDIR)/db/TpcbExample.java \
+	$(JAVA_EXDIR)/db/txn/DBWriter.java \
+	$(JAVA_EXDIR)/db/txn/PayloadData.java \
+	$(JAVA_EXDIR)/db/txn/TxnGuide.java \
+	$(JAVA_EXDIR)/db/txn/TxnGuideInMemory.java
 
 TCL_OBJS=\
 	tcl_compat@o@ tcl_db@o@ tcl_db_pkg@o@ tcl_dbcursor@o@ tcl_env@o@ \
@@ -498,10 +515,6 @@ RPC_JAVASRV_SRCS=\
 	$(JAVA_RPCDIR)/gen/ServerStubs.java \
 	$(JAVA_RPCDIR)/gen/__db_associate_msg.java \
 	$(JAVA_RPCDIR)/gen/__db_associate_reply.java \
-	$(JAVA_RPCDIR)/gen/__db_bt_maxkey_msg.java \
-	$(JAVA_RPCDIR)/gen/__db_bt_maxkey_reply.java \
-	$(JAVA_RPCDIR)/gen/__db_bt_minkey_msg.java \
-	$(JAVA_RPCDIR)/gen/__db_bt_minkey_reply.java \
 	$(JAVA_RPCDIR)/gen/__db_close_msg.java \
 	$(JAVA_RPCDIR)/gen/__db_close_reply.java \
 	$(JAVA_RPCDIR)/gen/__db_create_msg.java \
@@ -510,18 +523,12 @@ RPC_JAVASRV_SRCS=\
 	$(JAVA_RPCDIR)/gen/__db_cursor_reply.java \
 	$(JAVA_RPCDIR)/gen/__db_del_msg.java \
 	$(JAVA_RPCDIR)/gen/__db_del_reply.java \
-	$(JAVA_RPCDIR)/gen/__db_encrypt_msg.java \
-	$(JAVA_RPCDIR)/gen/__db_encrypt_reply.java \
-	$(JAVA_RPCDIR)/gen/__db_extentsize_msg.java \
-	$(JAVA_RPCDIR)/gen/__db_extentsize_reply.java \
-	$(JAVA_RPCDIR)/gen/__db_flags_msg.java \
-	$(JAVA_RPCDIR)/gen/__db_flags_reply.java \
 	$(JAVA_RPCDIR)/gen/__db_get_bt_minkey_msg.java \
 	$(JAVA_RPCDIR)/gen/__db_get_bt_minkey_reply.java \
+	$(JAVA_RPCDIR)/gen/__db_get_dbname_msg.java \
+	$(JAVA_RPCDIR)/gen/__db_get_dbname_reply.java \
 	$(JAVA_RPCDIR)/gen/__db_get_encrypt_flags_msg.java \
 	$(JAVA_RPCDIR)/gen/__db_get_encrypt_flags_reply.java \
-	$(JAVA_RPCDIR)/gen/__db_get_extentsize_msg.java \
-	$(JAVA_RPCDIR)/gen/__db_get_extentsize_reply.java \
 	$(JAVA_RPCDIR)/gen/__db_get_flags_msg.java \
 	$(JAVA_RPCDIR)/gen/__db_get_flags_reply.java \
 	$(JAVA_RPCDIR)/gen/__db_get_h_ffactor_msg.java \
@@ -531,12 +538,12 @@ RPC_JAVASRV_SRCS=\
 	$(JAVA_RPCDIR)/gen/__db_get_lorder_msg.java \
 	$(JAVA_RPCDIR)/gen/__db_get_lorder_reply.java \
 	$(JAVA_RPCDIR)/gen/__db_get_msg.java \
-	$(JAVA_RPCDIR)/gen/__db_get_name_msg.java \
-	$(JAVA_RPCDIR)/gen/__db_get_name_reply.java \
 	$(JAVA_RPCDIR)/gen/__db_get_open_flags_msg.java \
 	$(JAVA_RPCDIR)/gen/__db_get_open_flags_reply.java \
 	$(JAVA_RPCDIR)/gen/__db_get_pagesize_msg.java \
 	$(JAVA_RPCDIR)/gen/__db_get_pagesize_reply.java \
+	$(JAVA_RPCDIR)/gen/__db_get_q_extentsize_msg.java \
+	$(JAVA_RPCDIR)/gen/__db_get_q_extentsize_reply.java \
 	$(JAVA_RPCDIR)/gen/__db_get_re_delim_msg.java \
 	$(JAVA_RPCDIR)/gen/__db_get_re_delim_reply.java \
 	$(JAVA_RPCDIR)/gen/__db_get_re_len_msg.java \
@@ -544,56 +551,62 @@ RPC_JAVASRV_SRCS=\
 	$(JAVA_RPCDIR)/gen/__db_get_re_pad_msg.java \
 	$(JAVA_RPCDIR)/gen/__db_get_re_pad_reply.java \
 	$(JAVA_RPCDIR)/gen/__db_get_reply.java \
-	$(JAVA_RPCDIR)/gen/__db_h_ffactor_msg.java \
-	$(JAVA_RPCDIR)/gen/__db_h_ffactor_reply.java \
-	$(JAVA_RPCDIR)/gen/__db_h_nelem_msg.java \
-	$(JAVA_RPCDIR)/gen/__db_h_nelem_reply.java \
 	$(JAVA_RPCDIR)/gen/__db_join_msg.java \
 	$(JAVA_RPCDIR)/gen/__db_join_reply.java \
 	$(JAVA_RPCDIR)/gen/__db_key_range_msg.java \
 	$(JAVA_RPCDIR)/gen/__db_key_range_reply.java \
-	$(JAVA_RPCDIR)/gen/__db_lorder_msg.java \
-	$(JAVA_RPCDIR)/gen/__db_lorder_reply.java \
 	$(JAVA_RPCDIR)/gen/__db_open_msg.java \
 	$(JAVA_RPCDIR)/gen/__db_open_reply.java \
-	$(JAVA_RPCDIR)/gen/__db_pagesize_msg.java \
-	$(JAVA_RPCDIR)/gen/__db_pagesize_reply.java \
 	$(JAVA_RPCDIR)/gen/__db_pget_msg.java \
 	$(JAVA_RPCDIR)/gen/__db_pget_reply.java \
 	$(JAVA_RPCDIR)/gen/__db_put_msg.java \
 	$(JAVA_RPCDIR)/gen/__db_put_reply.java \
-	$(JAVA_RPCDIR)/gen/__db_re_delim_msg.java \
-	$(JAVA_RPCDIR)/gen/__db_re_delim_reply.java \
-	$(JAVA_RPCDIR)/gen/__db_re_len_msg.java \
-	$(JAVA_RPCDIR)/gen/__db_re_len_reply.java \
-	$(JAVA_RPCDIR)/gen/__db_re_pad_msg.java \
-	$(JAVA_RPCDIR)/gen/__db_re_pad_reply.java \
 	$(JAVA_RPCDIR)/gen/__db_remove_msg.java \
 	$(JAVA_RPCDIR)/gen/__db_remove_reply.java \
 	$(JAVA_RPCDIR)/gen/__db_rename_msg.java \
 	$(JAVA_RPCDIR)/gen/__db_rename_reply.java \
+	$(JAVA_RPCDIR)/gen/__db_set_bt_minkey_msg.java \
+	$(JAVA_RPCDIR)/gen/__db_set_bt_minkey_reply.java \
+	$(JAVA_RPCDIR)/gen/__db_set_encrypt_msg.java \
+	$(JAVA_RPCDIR)/gen/__db_set_encrypt_reply.java \
+	$(JAVA_RPCDIR)/gen/__db_set_flags_msg.java \
+	$(JAVA_RPCDIR)/gen/__db_set_flags_reply.java \
+	$(JAVA_RPCDIR)/gen/__db_set_h_ffactor_msg.java \
+	$(JAVA_RPCDIR)/gen/__db_set_h_ffactor_reply.java \
+	$(JAVA_RPCDIR)/gen/__db_set_h_nelem_msg.java \
+	$(JAVA_RPCDIR)/gen/__db_set_h_nelem_reply.java \
+	$(JAVA_RPCDIR)/gen/__db_set_lorder_msg.java \
+	$(JAVA_RPCDIR)/gen/__db_set_lorder_reply.java \
+	$(JAVA_RPCDIR)/gen/__db_set_pagesize_msg.java \
+	$(JAVA_RPCDIR)/gen/__db_set_pagesize_reply.java \
+	$(JAVA_RPCDIR)/gen/__db_set_q_extentsize_msg.java \
+	$(JAVA_RPCDIR)/gen/__db_set_q_extentsize_reply.java \
+	$(JAVA_RPCDIR)/gen/__db_set_re_delim_msg.java \
+	$(JAVA_RPCDIR)/gen/__db_set_re_delim_reply.java \
+	$(JAVA_RPCDIR)/gen/__db_set_re_len_msg.java \
+	$(JAVA_RPCDIR)/gen/__db_set_re_len_reply.java \
+	$(JAVA_RPCDIR)/gen/__db_set_re_pad_msg.java \
+	$(JAVA_RPCDIR)/gen/__db_set_re_pad_reply.java \
 	$(JAVA_RPCDIR)/gen/__db_stat_msg.java \
 	$(JAVA_RPCDIR)/gen/__db_stat_reply.java \
 	$(JAVA_RPCDIR)/gen/__db_sync_msg.java \
 	$(JAVA_RPCDIR)/gen/__db_sync_reply.java \
 	$(JAVA_RPCDIR)/gen/__db_truncate_msg.java \
 	$(JAVA_RPCDIR)/gen/__db_truncate_reply.java \
-	$(JAVA_RPCDIR)/gen/__dbc_close_msg.java \
-	$(JAVA_RPCDIR)/gen/__dbc_close_reply.java \
-	$(JAVA_RPCDIR)/gen/__dbc_count_msg.java \
-	$(JAVA_RPCDIR)/gen/__dbc_count_reply.java \
-	$(JAVA_RPCDIR)/gen/__dbc_del_msg.java \
-	$(JAVA_RPCDIR)/gen/__dbc_del_reply.java \
-	$(JAVA_RPCDIR)/gen/__dbc_dup_msg.java \
-	$(JAVA_RPCDIR)/gen/__dbc_dup_reply.java \
-	$(JAVA_RPCDIR)/gen/__dbc_get_msg.java \
-	$(JAVA_RPCDIR)/gen/__dbc_get_reply.java \
-	$(JAVA_RPCDIR)/gen/__dbc_pget_msg.java \
-	$(JAVA_RPCDIR)/gen/__dbc_pget_reply.java \
-	$(JAVA_RPCDIR)/gen/__dbc_put_msg.java \
-	$(JAVA_RPCDIR)/gen/__dbc_put_reply.java \
-	$(JAVA_RPCDIR)/gen/__env_cachesize_msg.java \
-	$(JAVA_RPCDIR)/gen/__env_cachesize_reply.java \
+	$(JAVA_RPCDIR)/gen/__dbc_c_close_msg.java \
+	$(JAVA_RPCDIR)/gen/__dbc_c_close_reply.java \
+	$(JAVA_RPCDIR)/gen/__dbc_c_count_msg.java \
+	$(JAVA_RPCDIR)/gen/__dbc_c_count_reply.java \
+	$(JAVA_RPCDIR)/gen/__dbc_c_del_msg.java \
+	$(JAVA_RPCDIR)/gen/__dbc_c_del_reply.java \
+	$(JAVA_RPCDIR)/gen/__dbc_c_dup_msg.java \
+	$(JAVA_RPCDIR)/gen/__dbc_c_dup_reply.java \
+	$(JAVA_RPCDIR)/gen/__dbc_c_get_msg.java \
+	$(JAVA_RPCDIR)/gen/__dbc_c_get_reply.java \
+	$(JAVA_RPCDIR)/gen/__dbc_c_pget_msg.java \
+	$(JAVA_RPCDIR)/gen/__dbc_c_pget_reply.java \
+	$(JAVA_RPCDIR)/gen/__dbc_c_put_msg.java \
+	$(JAVA_RPCDIR)/gen/__dbc_c_put_reply.java \
 	$(JAVA_RPCDIR)/gen/__env_close_msg.java \
 	$(JAVA_RPCDIR)/gen/__env_close_reply.java \
 	$(JAVA_RPCDIR)/gen/__env_create_msg.java \
@@ -602,10 +615,6 @@ RPC_JAVASRV_SRCS=\
 	$(JAVA_RPCDIR)/gen/__env_dbremove_reply.java \
 	$(JAVA_RPCDIR)/gen/__env_dbrename_msg.java \
 	$(JAVA_RPCDIR)/gen/__env_dbrename_reply.java \
-	$(JAVA_RPCDIR)/gen/__env_encrypt_msg.java \
-	$(JAVA_RPCDIR)/gen/__env_encrypt_reply.java \
-	$(JAVA_RPCDIR)/gen/__env_flags_msg.java \
-	$(JAVA_RPCDIR)/gen/__env_flags_reply.java \
 	$(JAVA_RPCDIR)/gen/__env_get_cachesize_msg.java \
 	$(JAVA_RPCDIR)/gen/__env_get_cachesize_reply.java \
 	$(JAVA_RPCDIR)/gen/__env_get_encrypt_flags_msg.java \
@@ -620,24 +629,30 @@ RPC_JAVASRV_SRCS=\
 	$(JAVA_RPCDIR)/gen/__env_open_reply.java \
 	$(JAVA_RPCDIR)/gen/__env_remove_msg.java \
 	$(JAVA_RPCDIR)/gen/__env_remove_reply.java \
+	$(JAVA_RPCDIR)/gen/__env_set_cachesize_msg.java \
+	$(JAVA_RPCDIR)/gen/__env_set_cachesize_reply.java \
+	$(JAVA_RPCDIR)/gen/__env_set_encrypt_msg.java \
+	$(JAVA_RPCDIR)/gen/__env_set_encrypt_reply.java \
+	$(JAVA_RPCDIR)/gen/__env_set_flags_msg.java \
+	$(JAVA_RPCDIR)/gen/__env_set_flags_reply.java \
+	$(JAVA_RPCDIR)/gen/__env_txn_begin_msg.java \
+	$(JAVA_RPCDIR)/gen/__env_txn_begin_reply.java \
+	$(JAVA_RPCDIR)/gen/__env_txn_recover_msg.java \
+	$(JAVA_RPCDIR)/gen/__env_txn_recover_reply.java \
 	$(JAVA_RPCDIR)/gen/__txn_abort_msg.java \
 	$(JAVA_RPCDIR)/gen/__txn_abort_reply.java \
-	$(JAVA_RPCDIR)/gen/__txn_begin_msg.java \
-	$(JAVA_RPCDIR)/gen/__txn_begin_reply.java \
 	$(JAVA_RPCDIR)/gen/__txn_commit_msg.java \
 	$(JAVA_RPCDIR)/gen/__txn_commit_reply.java \
 	$(JAVA_RPCDIR)/gen/__txn_discard_msg.java \
 	$(JAVA_RPCDIR)/gen/__txn_discard_reply.java \
 	$(JAVA_RPCDIR)/gen/__txn_prepare_msg.java \
 	$(JAVA_RPCDIR)/gen/__txn_prepare_reply.java \
-	$(JAVA_RPCDIR)/gen/__txn_recover_msg.java \
-	$(JAVA_RPCDIR)/gen/__txn_recover_reply.java \
 	$(JAVA_RPCDIR)/gen/db_server.java
 
 UTIL_PROGS=\
 	@ADDITIONAL_PROGS@ \
-	db_archive db_checkpoint db_deadlock db_dump db_load db_printlog \
-	db_recover db_stat db_upgrade db_verify
+	db_archive db_checkpoint db_deadlock db_dump db_hotbackup \
+	db_load db_printlog db_recover db_stat db_upgrade db_verify
 
 ##################################################
 # List of files installed into the library directory.
@@ -693,7 +708,7 @@ $(libdb_version): $(C_OBJS)
 
 # Shared C library.
 $(libso_target): $(C_OBJS)
-	$(SOLINK) $(SOFLAGS) $(LDFLAGS) -o $@ $(C_OBJS) $(LIBSO_LIBS)
+	$(SOLINK) $(SOFLAGS) $(LDFLAGS) -o $@ $(C_OBJS) $(LIBCSO_LIBS)
 	$(rm) -f $(libdb)
 	$(ln) -s .libs/$(libdb_version) $(libdb)
 
@@ -797,6 +812,11 @@ db_dump185: db_dump185@o@ @REPLACEMENT_OBJS@
 	$(CCLINK) -o $@ $(LDFLAGS) db_dump185@o@ @REPLACEMENT_OBJS@ $(DB185LIB)
 	$(POSTLINK) $@
 
+db_hotbackup: db_hotbackup@o@ util_sig@o@ $(DEF_LIB)
+	$(CCLINK) -o $@ $(LDFLAGS) \
+	    db_hotbackup@o@ util_sig@o@ $(DEF_LIB) $(LIBS)
+	$(POSTLINK) $@
+
 db_load: db_load@o@ util_cache@o@ util_sig@o@ $(DEF_LIB)
 	$(CCLINK) -o $@ $(LDFLAGS) \
 	    db_load@o@ util_cache@o@ util_sig@o@ $(DEF_LIB) $(LIBS)
@@ -895,8 +915,8 @@ uninstall_utilities:
 		$(rm) -f $$i $$i.exe; \
 	done)
 
-DOCLIST=api_c api_cxx api_tcl collections gsg images index.html java ref \
-	sleepycat utility
+DOCLIST=api_c api_cxx api_tcl collections gsg gsg_txn images index.html \
+	java ref sleepycat utility
 
 install_docs:
 	@echo "Installing documentation: $(DESTDIR)$(docdir) ..."
@@ -907,19 +927,20 @@ install_docs:
 	@cd $(srcdir)/docs && $(cp) -pr $(DOCLIST) $(DESTDIR)$(docdir)/
 
 uninstall_docs:
-	@cd $(docdir) && $(rm) -rf $(DESTDIR)$(DOCLIST)
+	@cd $(DESTDIR)$(docdir) && $(rm) -rf $(DOCLIST)
 
 ##################################################
 # Remaining standard Makefile targets.
 ##################################################
 CLEAN_LIST=\
-	bench_001 berkeley_db_cxxsvc berkeley_db_javasvc berkeley_db_svc \
-	db_dump185 db_perf dbs ex_access ex_apprec ex_btrec ex_dbclient \
-	ex_env ex_lock ex_mpool ex_repquote ex_sequence ex_thread \
-	ex_tpcb example_database_load example_database_read excxx_access \
+	TxnGuide TxnGuideInMemory bench_001 berkeley_db_cxxsvc \
+	berkeley_db_javasvc berkeley_db_svc db_dump185 db_perf \
+	db_reptest dbs ex_access ex_apprec ex_btrec ex_dbclient ex_env \
+	ex_lock ex_mpool ex_repquote ex_sequence ex_thread ex_tpcb \ \
+	example_database_load example_database_read excxx_access \
 	excxx_btrec excxx_env excxx_example_database_load \
 	excxx_example_database_read excxx_lock excxx_mpool \
-	excxx_sequence excxx_tpcb
+	excxx_sequence excxx_tpcb txn_guide txn_guide_inmemory
 
 mostly-clean clean:
 	$(rm) -rf $(C_OBJS)
@@ -930,8 +951,8 @@ mostly-clean clean:
 	$(rm) -rf $(JAVA_RPCCLASSES) $(rpc_jarfile)
 	$(rm) -rf tags *@o@ *.o *.o.lock *.lo core *.core
 	$(rm) -rf ALL.OUT.* PARALLEL_TESTDIR.*
-	$(rm) -rf RUN_LOG RUNQUEUE TESTDIR TESTDIR.A
-	$(rm) -rf TEST.LIST logtrack_seen.db tm .libs $(LIB_INSTALL_FILE_LIST)
+	$(rm) -rf RUN_LOG RUNQUEUE TESTDIR TESTDIR.A TEST.LIST
+	$(rm) -rf logtrack_seen.db tm .libs $(LIB_INSTALL_FILE_LIST)
 
 REALCLEAN_LIST=\
 	Makefile confdefs.h config.cache config.log config.status \
@@ -1038,6 +1059,67 @@ db_perf: $(DBPERF_OBJS) $(DEF_LIB)
 	    $(LDFLAGS) $(DBPERF_OBJS) $(DEF_LIB) $(TEST_LIBS) $(LIBS)
 	$(POSTLINK) $@
 
+db_reptest@o@: $(srcdir)/test_rep/db_reptest.c
+	$(CC) $(CFLAGS) $?
+reptest_accept@o@: $(srcdir)/test_rep/reptest_accept.c
+	$(CC) $(CFLAGS) $?
+reptest_client@o@: $(srcdir)/test_rep/reptest_client.c
+	$(CC) $(CFLAGS) $?
+reptest_config@o@: $(srcdir)/test_rep/reptest_config.c
+	$(CC) $(CFLAGS) $?
+reptest_dbs@o@: $(srcdir)/test_rep/reptest_dbs.c
+	$(CC) $(CFLAGS) $?
+reptest_debug@o@: $(srcdir)/test_rep/reptest_debug.c
+	$(CC) $(CFLAGS) $?
+reptest_elect@o@: $(srcdir)/test_rep/reptest_elect.c
+	$(CC) $(CFLAGS) $?
+reptest_env@o@: $(srcdir)/test_rep/reptest_env.c
+	$(CC) $(CFLAGS) $?
+reptest_exec@o@: $(srcdir)/test_rep/reptest_exec.c
+	$(CC) $(CFLAGS) $?
+reptest_file@o@: $(srcdir)/test_rep/reptest_file.c
+	$(CC) $(CFLAGS) $?
+reptest_key@o@: $(srcdir)/test_rep/reptest_key.c
+	$(CC) $(CFLAGS) $?
+reptest_master@o@: $(srcdir)/test_rep/reptest_master.c
+	$(CC) $(CFLAGS) $?
+reptest_misc@o@: $(srcdir)/test_rep/reptest_misc.c
+	$(CC) $(CFLAGS) $?
+reptest_msg_thread@o@: $(srcdir)/test_rep/reptest_msg_thread.c
+	$(CC) $(CFLAGS) $?
+reptest_op@o@: $(srcdir)/test_rep/reptest_op.c
+	$(CC) $(CFLAGS) $?
+reptest_parse@o@: $(srcdir)/test_rep/reptest_parse.c
+	$(CC) $(CFLAGS) $?
+reptest_rand@o@: $(srcdir)/test_rep/reptest_rand.c
+	$(CC) $(CFLAGS) $?
+reptest_send@o@: $(srcdir)/test_rep/reptest_send.c
+	$(CC) $(CFLAGS) $?
+reptest_site@o@: $(srcdir)/test_rep/reptest_site.c
+	$(CC) $(CFLAGS) $?
+reptest_socket@o@: $(srcdir)/test_rep/reptest_socket.c
+	$(CC) $(CFLAGS) $?
+reptest_spawn@o@: $(srcdir)/test_rep/reptest_spawn.c
+	$(CC) $(CFLAGS) $?
+reptest_thread@o@: $(srcdir)/test_rep/reptest_thread.c
+	$(CC) $(CFLAGS) $?
+reptest_txn@o@: $(srcdir)/test_rep/reptest_txn.c
+	$(CC) $(CFLAGS) $?
+reptest_util@o@: $(srcdir)/test_rep/reptest_util.c
+	$(CC) $(CFLAGS) $?
+DBREPTEST_OBJS=\
+	db_reptest@o@ reptest_accept@o@ reptest_client@o@ reptest_config@o@ \
+	reptest_dbs@o@ reptest_debug@o@ reptest_elect@o@ reptest_env@o@ \
+	reptest_exec@o@ reptest_file@o@ reptest_key@o@ reptest_master@o@ \
+	reptest_misc@o@ reptest_msg_thread@o@ reptest_op@o@ reptest_parse@o@ \
+	reptest_rand@o@ reptest_send@o@ reptest_site@o@ reptest_socket@o@ \
+	reptest_spawn@o@ reptest_thread@o@ reptest_txn@o@ reptest_util@o@
+
+db_reptest: $(DBREPTEST_OBJS) $(DEF_LIB)
+	$(CCLINK) -o $@ \
+	    $(LDFLAGS) $(DBREPTEST_OBJS) $(DEF_LIB) $(TEST_LIBS) $(LIBS)
+	$(POSTLINK) $@
+
 tm@o@: $(srcdir)/mutex/tm.c
 	$(CC) $(CFLAGS) $?
 tm: tm@o@ $(DEF_LIB)
@@ -1063,9 +1145,12 @@ ex_apprec@o@: $(srcdir)/examples_c/ex_apprec/ex_apprec.c
 	$(CC) $(CFLAGS) $?
 ex_apprec_auto@o@: $(srcdir)/examples_c/ex_apprec/ex_apprec_auto.c
 	$(CC) $(CFLAGS) $?
+ex_apprec_autop@o@: $(srcdir)/examples_c/ex_apprec/ex_apprec_autop.c
+	$(CC) $(CFLAGS) $?
 ex_apprec_rec@o@: $(srcdir)/examples_c/ex_apprec/ex_apprec_rec.c
 	$(CC) $(CFLAGS) $?
-EX_APPREC_OBJS=ex_apprec@o@ ex_apprec_auto@o@ ex_apprec_rec@o@
+EX_APPREC_OBJS=\
+	ex_apprec@o@ ex_apprec_auto@o@ ex_apprec_autop@o@ ex_apprec_rec@o@
 ex_apprec: $(EX_APPREC_OBJS) $(DEF_LIB)
 	$(CCLINK) -o $@ \
 	    $(LDFLAGS) $(EX_APPREC_OBJS) $(DEF_LIB) $(TEST_LIBS) $(LIBS)
@@ -1156,6 +1241,18 @@ example_database_read: example_database_read@o@ gettingstarted_common@o@ \
 	    example_database_read@o@ gettingstarted_common@o@ $(DEF_LIB) $(LIBS)
 	$(POSTLINK) $@
 
+txn_guide_inmemory@o@: $(srcdir)/examples_c/txn_guide/txn_guide_inmemory.c
+	$(CC) $(CFLAGS) $?
+txn_guide_inmemory: txn_guide_inmemory@o@ $(DEF_LIB)
+	$(CCLINK) -o $@ $(LDFLAGS) txn_guide_inmemory@o@ $(DEF_LIB) $(LIBS)
+	$(POSTLINK) $@
+
+txn_guide@o@: $(srcdir)/examples_c/txn_guide/txn_guide.c
+	$(CC) $(CFLAGS) $?
+txn_guide: txn_guide@o@ $(DEF_LIB)
+	$(CCLINK) -o $@ $(LDFLAGS) txn_guide@o@ $(DEF_LIB) $(LIBS)
+	$(POSTLINK) $@
+
 ##################################################
 # Example programs for C++.
 ##################################################
@@ -1220,6 +1317,18 @@ excxx_example_database_read: \
 	    excxx_example_database_read@o@ MyDb@o@ $(DEF_LIB_CXX) $(LIBS)
 	$(POSTLINK) $@
 
+TxnGuideInMemory@o@: $(srcdir)/examples_cxx/txn_guide/TxnGuideInMemory.cpp
+	$(CXX) $(CXXFLAGS) $?
+TxnGuideInMemory: TxnGuideInMemory@o@ $(DEF_LIB_CXX)
+	$(CXXLINK) -o $@ $(LDFLAGS) TxnGuideInMemory@o@ $(DEF_LIB_CXX) $(LIBS)
+	$(POSTLINK) $@
+
+TxnGuide@o@: $(srcdir)/examples_cxx/txn_guide/TxnGuide.cpp
+	$(CXX) $(CXXFLAGS) $?
+TxnGuide: TxnGuide@o@ $(DEF_LIB_CXX)
+	$(CXXLINK) -o $@ $(LDFLAGS) TxnGuide@o@ $(DEF_LIB_CXX) $(LIBS)
+	$(POSTLINK) $@
+
 ##################################################
 # C API build rules.
 ##################################################
@@ -1255,6 +1364,8 @@ bt_split@o@: $(srcdir)/btree/bt_split.c
 	 $(CC) $(CFLAGS) $?
 bt_stat@o@: $(srcdir)/btree/bt_stat.c
 	 $(CC) $(CFLAGS) $?
+bt_compact@o@: $(srcdir)/btree/bt_compact.c
+	 $(CC) $(CFLAGS) $?
 bt_upgrade@o@: $(srcdir)/btree/bt_upgrade.c
 	 $(CC) $(CFLAGS) $?
 bt_verify@o@: $(srcdir)/btree/bt_verify.c
@@ -1287,6 +1398,8 @@ db_byteorder@o@: $(srcdir)/common/db_byteorder.c
 	 $(CC) $(CFLAGS) $?
 db_cam@o@: $(srcdir)/db/db_cam.c
 	 $(CC) $(CFLAGS) $?
+db_clock@o@: $(srcdir)/common/db_clock.c
+	 $(CC) $(CFLAGS) $?
 db_conv@o@: $(srcdir)/db/db_conv.c
 	 $(CC) $(CFLAGS) $?
 db_dispatch@o@: $(srcdir)/db/db_dispatch.c
@@ -1363,6 +1476,8 @@ dbreg_stat@o@: $(srcdir)/dbreg/dbreg_stat.c
 	 $(CC) $(CFLAGS) $?
 dbreg_util@o@: $(srcdir)/dbreg/dbreg_util.c
 	 $(CC) $(CFLAGS) $?
+env_failchk@o@: $(srcdir)/env/env_failchk.c
+	 $(CC) $(CFLAGS) $?
 env_file@o@: $(srcdir)/env/env_file.c
 	 $(CC) $(CFLAGS) $?
 env_method@o@: $(srcdir)/env/env_method.c
@@ -1373,6 +1488,8 @@ env_recover@o@: $(srcdir)/env/env_recover.c
 	 $(CC) $(CFLAGS) $?
 env_region@o@: $(srcdir)/env/env_region.c
 	 $(CC) $(CFLAGS) $?
+env_register@o@: $(srcdir)/env/env_register.c
+	 $(CC) $(CFLAGS) $?
 env_stat@o@: $(srcdir)/env/env_stat.c
 	 $(CC) $(CFLAGS) $?
 fileops_auto@o@: $(srcdir)/fileops/fileops_auto.c
@@ -1425,6 +1542,8 @@ lock@o@: $(srcdir)/lock/lock.c
 	 $(CC) $(CFLAGS) $?
 lock_deadlock@o@:$(srcdir)/lock/lock_deadlock.c
 	 $(CC) $(CFLAGS) $?
+lock_failchk@o@:$(srcdir)/lock/lock_failchk.c
+	 $(CC) $(CFLAGS) $?
 lock_id@o@:$(srcdir)/lock/lock_id.c
 	 $(CC) $(CFLAGS) $?
 lock_list@o@:$(srcdir)/lock/lock_list.c
@@ -1445,6 +1564,8 @@ log_archive@o@: $(srcdir)/log/log_archive.c
 	 $(CC) $(CFLAGS) $?
 log_compare@o@: $(srcdir)/log/log_compare.c
 	 $(CC) $(CFLAGS) $?
+log_debug@o@: $(srcdir)/log/log_debug.c
+	 $(CC) $(CFLAGS) $?
 log_get@o@: $(srcdir)/log/log_get.c
 	 $(CC) $(CFLAGS) $?
 log_method@o@: $(srcdir)/log/log_method.c
@@ -1481,16 +1602,22 @@ mp_trickle@o@: $(srcdir)/mp/mp_trickle.c
 	 $(CC) $(CFLAGS) $?
 mt19937db@o@: $(srcdir)/crypto/mersenne/mt19937db.c
 	 $(CC) $(CFLAGS) $?
+mut_alloc@o@: $(srcdir)/mutex/mut_alloc.c
+	 $(CC) $(CFLAGS) $?
 mut_fcntl@o@: $(srcdir)/mutex/mut_fcntl.c
 	 $(CC) $(CFLAGS) $?
+mut_method@o@: $(srcdir)/mutex/mut_method.c
+	 $(CC) $(CFLAGS) $?
 mut_pthread@o@: $(srcdir)/mutex/mut_pthread.c
 	 $(CC) $(CFLAGS) $?
+mut_region@o@: $(srcdir)/mutex/mut_region.c
+	 $(CC) $(CFLAGS) $?
+mut_stat@o@: $(srcdir)/mutex/mut_stat.c
+	 $(CC) $(CFLAGS) $?
 mut_tas@o@: $(srcdir)/mutex/mut_tas.c
 	 $(CC) $(CFLAGS) $?
 mut_win32@o@: $(srcdir)/mutex/mut_win32.c
 	 $(CC) $(CFLAGS) $?
-mutex@o@: $(srcdir)/mutex/mutex.c
-	 $(CC) $(CFLAGS) $?
 os_abs@o@: $(srcdir)/@OSDIR@/os_abs.c
 	 $(CC) $(CFLAGS) $?
 os_alloc@o@: $(srcdir)/os/os_alloc.c
@@ -1505,6 +1632,8 @@ os_errno@o@: $(srcdir)/@OSDIR@/os_errno.c
 	 $(CC) $(CFLAGS) $?
 os_fid@o@: $(srcdir)/@OSDIR@/os_fid.c
 	 $(CC) $(CFLAGS) $?
+os_flock@o@: $(srcdir)/@OSDIR@/os_flock.c
+	 $(CC) $(CFLAGS) $?
 os_fsync@o@: $(srcdir)/@OSDIR@/os_fsync.c
 	 $(CC) $(CFLAGS) $?
 os_id@o@: $(srcdir)/os/os_id.c
@@ -1515,6 +1644,8 @@ os_map@o@: $(srcdir)/@OSDIR@/os_map.c
 	 $(CC) $(CFLAGS) $?
 os_method@o@: $(srcdir)/os/os_method.c
 	 $(CC) $(CFLAGS) $?
+os_mkdir@o@: $(srcdir)/os/os_mkdir.c
+	 $(CC) $(CFLAGS) $?
 os_oflags@o@: $(srcdir)/os/os_oflags.c
 	 $(CC) $(CFLAGS) $?
 os_open@o@: $(srcdir)/@OSDIR@/os_open.c
@@ -1573,6 +1704,10 @@ rep_autop@o@: $(srcdir)/rep/rep_autop.c
 	 $(CC) $(CFLAGS) $?
 rep_backup@o@: $(srcdir)/rep/rep_backup.c
 	 $(CC) $(CFLAGS) $?
+rep_elect@o@: $(srcdir)/rep/rep_elect.c
+	 $(CC) $(CFLAGS) $?
+rep_log@o@: $(srcdir)/rep/rep_log.c
+	 $(CC) $(CFLAGS) $?
 rep_method@o@: $(srcdir)/rep/rep_method.c
 	 $(CC) $(CFLAGS) $?
 rep_record@o@: $(srcdir)/rep/rep_record.c
@@ -1585,6 +1720,8 @@ rep_stat@o@: $(srcdir)/rep/rep_stat.c
 	 $(CC) $(CFLAGS) $?
 rep_util@o@: $(srcdir)/rep/rep_util.c
 	 $(CC) $(CFLAGS) $?
+rep_verify@o@: $(srcdir)/rep/rep_verify.c
+	 $(CC) $(CFLAGS) $?
 rijndael-alg-fst@o@: $(srcdir)/crypto/rijndael/rijndael-alg-fst.c
 	$(CC) $(CFLAGS) $?
 rijndael-api-fst@o@: $(srcdir)/crypto/rijndael/rijndael-api-fst.c
@@ -1603,6 +1740,10 @@ txn_auto@o@: $(srcdir)/txn/txn_auto.c
 	 $(CC) $(CFLAGS) $?
 txn_autop@o@: $(srcdir)/txn/txn_autop.c
 	 $(CC) $(CFLAGS) $?
+txn_chkpt@o@: $(srcdir)/txn/txn_chkpt.c
+	 $(CC) $(CFLAGS) $?
+txn_failchk@o@: $(srcdir)/txn/txn_failchk.c
+	 $(CC) $(CFLAGS) $?
 txn_method@o@: $(srcdir)/txn/txn_method.c
 	 $(CC) $(CFLAGS) $?
 txn_rec@o@: $(srcdir)/txn/txn_rec.c
@@ -1666,31 +1807,31 @@ db_java_wrap@o@: $(srcdir)/libdb_java/db_java_wrap.c
 # Tcl API build rules.
 ##################################################
 tcl_compat@o@: $(srcdir)/tcl/tcl_compat.c
-	$(CC) $(CFLAGS) $(TCFLAGS) $?
+	$(CC) $(CFLAGS) $(TCL_INCLUDE_SPEC) $?
 tcl_db@o@: $(srcdir)/tcl/tcl_db.c
-	$(CC) $(CFLAGS) $(TCFLAGS) $?
+	$(CC) $(CFLAGS) $(TCL_INCLUDE_SPEC) $?
 tcl_db_pkg@o@: $(srcdir)/tcl/tcl_db_pkg.c
-	$(CC) $(CFLAGS) $(TCFLAGS) $?
+	$(CC) $(CFLAGS) $(TCL_INCLUDE_SPEC) $?
 tcl_dbcursor@o@: $(srcdir)/tcl/tcl_dbcursor.c
-	$(CC) $(CFLAGS) $(TCFLAGS) $?
+	$(CC) $(CFLAGS) $(TCL_INCLUDE_SPEC) $?
 tcl_env@o@: $(srcdir)/tcl/tcl_env.c
-	$(CC) $(CFLAGS) $(TCFLAGS) $?
+	$(CC) $(CFLAGS) $(TCL_INCLUDE_SPEC) $?
 tcl_internal@o@: $(srcdir)/tcl/tcl_internal.c
-	$(CC) $(CFLAGS) $(TCFLAGS) $?
+	$(CC) $(CFLAGS) $(TCL_INCLUDE_SPEC) $?
 tcl_lock@o@: $(srcdir)/tcl/tcl_lock.c
-	$(CC) $(CFLAGS) $(TCFLAGS) $?
+	$(CC) $(CFLAGS) $(TCL_INCLUDE_SPEC) $?
 tcl_log@o@: $(srcdir)/tcl/tcl_log.c
-	$(CC) $(CFLAGS) $(TCFLAGS) $?
+	$(CC) $(CFLAGS) $(TCL_INCLUDE_SPEC) $?
 tcl_mp@o@: $(srcdir)/tcl/tcl_mp.c
-	$(CC) $(CFLAGS) $(TCFLAGS) $?
+	$(CC) $(CFLAGS) $(TCL_INCLUDE_SPEC) $?
 tcl_rep@o@: $(srcdir)/tcl/tcl_rep.c
-	$(CC) $(CFLAGS) $(TCFLAGS) $?
+	$(CC) $(CFLAGS) $(TCL_INCLUDE_SPEC) $?
 tcl_seq@o@: $(srcdir)/tcl/tcl_seq.c
-	$(CC) $(CFLAGS) $(TCFLAGS) $?
+	$(CC) $(CFLAGS) $(TCL_INCLUDE_SPEC) $?
 tcl_txn@o@: $(srcdir)/tcl/tcl_txn.c
-	$(CC) $(CFLAGS) $(TCFLAGS) $?
+	$(CC) $(CFLAGS) $(TCL_INCLUDE_SPEC) $?
 tcl_util@o@: $(srcdir)/tcl/tcl_util.c
-	$(CC) $(CFLAGS) $(TCFLAGS) $?
+	$(CC) $(CFLAGS) $(TCL_INCLUDE_SPEC) $?
 
 ##################################################
 # RPC build rules.
@@ -1734,6 +1875,8 @@ db_dump@o@: $(srcdir)/db_dump/db_dump.c
 	$(CC) $(CFLAGS) $?
 db_dump185@o@: $(srcdir)/db_dump185/db_dump185.c
 	$(CC) $(DB185INC) $?
+db_hotbackup@o@: $(srcdir)/db_hotbackup/db_hotbackup.c
+	$(CC) $(CFLAGS) $?
 db_load@o@: $(srcdir)/db_load/db_load.c
 	$(CC) $(CFLAGS) $?
 db_printlog@o@: $(srcdir)/db_printlog/db_printlog.c
diff --git a/storage/bdb/dist/RELEASE b/storage/bdb/dist/RELEASE
index 35e16a4333b..e20f91edeb2 100644
--- a/storage/bdb/dist/RELEASE
+++ b/storage/bdb/dist/RELEASE
@@ -1,8 +1,8 @@
-# $Id: RELEASE,v 11.195 2004/11/09 01:30:41 bostic Exp $
+# $Id: RELEASE,v 12.17 2005/11/12 17:43:39 bostic Exp $
 
 DB_VERSION_MAJOR=4
-DB_VERSION_MINOR=3
-DB_VERSION_PATCH=28
+DB_VERSION_MINOR=4
+DB_VERSION_PATCH=16
 DB_VERSION="$DB_VERSION_MAJOR.$DB_VERSION_MINOR.$DB_VERSION_PATCH"
 
 DB_VERSION_UNIQUE_NAME=`printf "_%d%03d" $DB_VERSION_MAJOR $DB_VERSION_MINOR`
diff --git a/storage/bdb/dist/aclocal/libtool.ac b/storage/bdb/dist/aclocal/libtool.ac
index 71dae456a52..771b86f32dd 100644
--- a/storage/bdb/dist/aclocal/libtool.ac
+++ b/storage/bdb/dist/aclocal/libtool.ac
@@ -1,26 +1,11 @@
 # libtool.m4 - Configure libtool for the host system. -*-Autoconf-*-
-## Copyright 1996, 1997, 1998, 1999, 2000, 2001, 2003, 2004
+## Copyright 1996, 1997, 1998, 1999, 2000, 2001, 2003, 2004, 2005
 ## Free Software Foundation, Inc.
 ## Originally by Gordon Matzigkeit , 1996
 ##
-## This program is free software; you can redistribute it and/or modify
-## it under the terms of the GNU General Public License as published by
-## the Free Software Foundation; either version 2 of the License, or
-## (at your option) any later version.
-##
-## This program is distributed in the hope that it will be useful, but
-## WITHOUT ANY WARRANTY; without even the implied warranty of
-## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
-## General Public License for more details.
-##
-## You should have received a copy of the GNU General Public License
-## along with this program; if not, write to the Free Software
-## Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
-##
-## As a special exception to the GNU General Public License, if you
-## distribute this file as part of a program that contains a
-## configuration script generated by Autoconf, you may include it under
-## the same distribution terms that you use for the rest of that program.
+## This file is free software; the Free Software Foundation gives
+## unlimited permission to copy and/or distribute it, with or without
+## modifications, as long as this notice is preserved.
 
 # serial 47 AC_PROG_LIBTOOL
 
@@ -132,7 +117,7 @@ esac
 
 # Sed substitution that helps us do robust quoting.  It backslashifies
 # metacharacters that are still active within double-quoted strings.
-Xsed='sed -e s/^X//'
+Xsed='sed -e 1s/^X//'
 [sed_quote_subst='s/\([\\"\\`$\\\\]\)/\\\1/g']
 
 # Same as above, but do not quote variable references.
@@ -152,7 +137,7 @@ rm="rm -f"
 default_ofile=libtool
 can_build_shared=yes
 
-# All known linkers require a `.a' archive for static linking (except M$VC,
+# All known linkers require a `.a' archive for static linking (except MSVC,
 # which needs '.lib').
 libext=a
 ltmain="$ac_aux_dir/ltmain.sh"
@@ -200,7 +185,7 @@ if test -n "$RANLIB"; then
   old_archive_cmds="$old_archive_cmds~\$RANLIB \$oldlib"
 fi
 
-cc_basename=`$echo X"$compiler" | $Xsed -e 's%^.*/%%'`
+_LT_CC_BASENAME([$compiler])
 
 # Only perform the check for file, if the check method requires it
 case $deplibs_check_method in
@@ -247,6 +232,48 @@ compiler=$CC
 ])# _LT_AC_SYS_COMPILER
 
 
+# _LT_CC_BASENAME(CC)
+# -------------------
+# Calculate cc_basename.  Skip known compiler wrappers and cross-prefix.
+AC_DEFUN([_LT_CC_BASENAME],
+[for cc_temp in $1""; do
+  case $cc_temp in
+    compile | *[[\\/]]compile | ccache | *[[\\/]]ccache ) ;;
+    distcc | *[[\\/]]distcc | purify | *[[\\/]]purify ) ;;
+    \-*) ;;
+    *) break;;
+  esac
+done
+cc_basename=`$echo "X$cc_temp" | $Xsed -e 's%.*/%%' -e "s%^$host_alias-%%"`
+])
+
+
+# _LT_COMPILER_BOILERPLATE
+# ------------------------
+# Check for compiler boilerplate output or warnings with
+# the simple compiler test code.
+AC_DEFUN([_LT_COMPILER_BOILERPLATE],
+[ac_outfile=conftest.$ac_objext
+printf "$lt_simple_compile_test_code" >conftest.$ac_ext
+eval "$ac_compile" 2>&1 >/dev/null | $SED '/^$/d' >conftest.err
+_lt_compiler_boilerplate=`cat conftest.err`
+$rm conftest*
+])# _LT_COMPILER_BOILERPLATE
+
+
+# _LT_LINKER_BOILERPLATE
+# ----------------------
+# Check for linker boilerplate output or warnings with
+# the simple link test code.
+AC_DEFUN([_LT_LINKER_BOILERPLATE],
+[ac_outfile=conftest.$ac_objext
+printf "$lt_simple_link_test_code" >conftest.$ac_ext
+eval "$ac_link" 2>&1 >/dev/null | $SED '/^$/d' >conftest.err
+_lt_linker_boilerplate=`cat conftest.err`
+$rm conftest*
+])# _LT_LINKER_BOILERPLATE
+
+
 # _LT_AC_SYS_LIBPATH_AIX
 # ----------------------
 # Links a minimal program and checks the executable
@@ -326,8 +353,8 @@ if test "X${echo_test_string+set}" != Xset; then
 # find a string as large as possible, as long as the shell can cope with it
   for cmd in 'sed 50q "[$]0"' 'sed 20q "[$]0"' 'sed 10q "[$]0"' 'sed 2q "[$]0"' 'echo test'; do
     # expected sizes: less than 2Kb, 1Kb, 512 bytes, 16 bytes, ...
-    if (echo_test_string="`eval $cmd`") 2>/dev/null &&
-       echo_test_string="`eval $cmd`" &&
+    if (echo_test_string=`eval $cmd`) 2>/dev/null &&
+       echo_test_string=`eval $cmd` &&
        (test "X$echo_test_string" = "X$echo_test_string") 2>/dev/null
     then
       break
@@ -496,7 +523,7 @@ x86_64-*linux*|ppc*-*linux*|powerpc*-*linux*|s390*-*linux*|sparc*-*linux*)
   # Find out which ABI we are using.
   echo 'int i;' > conftest.$ac_ext
   if AC_TRY_EVAL(ac_compile); then
-    case "`/usr/bin/file conftest.o`" in
+    case `/usr/bin/file conftest.o` in
     *32-bit*)
       case $host in
         x86_64-*linux*)
@@ -578,7 +605,7 @@ AC_CACHE_CHECK([$1], [$2],
    # with a dollar sign (not a hyphen), so the echo should work correctly.
    # The option is referenced via a variable to avoid confusing sed.
    lt_compile=`echo "$ac_compile" | $SED \
-   -e 's:.*FLAGS}? :&$lt_compiler_flag :; t' \
+   -e 's:.*FLAGS}\{0,1\} :&$lt_compiler_flag :; t' \
    -e 's: [[^ ]]*conftest\.: $lt_compiler_flag&:; t' \
    -e 's:$: $lt_compiler_flag:'`
    (eval echo "\"\$as_me:__oline__: $lt_compile\"" >&AS_MESSAGE_LOG_FD)
@@ -588,8 +615,10 @@ AC_CACHE_CHECK([$1], [$2],
    echo "$as_me:__oline__: \$? = $ac_status" >&AS_MESSAGE_LOG_FD
    if (exit $ac_status) && test -s "$ac_outfile"; then
      # The compiler can only warn and ignore the option if not recognized
-     # So say no if there are warnings
-     if test ! -s conftest.err; then
+     # So say no if there are warnings other than the usual output.
+     $echo "X$_lt_compiler_boilerplate" | $Xsed >conftest.exp
+     $SED '/^$/d' conftest.err >conftest.er2
+     if test ! -s conftest.err || diff conftest.exp conftest.er2 >/dev/null; then
        $2=yes
      fi
    fi
@@ -615,11 +644,16 @@ AC_DEFUN([AC_LIBTOOL_LINKER_OPTION],
    LDFLAGS="$LDFLAGS $3"
    printf "$lt_simple_link_test_code" > conftest.$ac_ext
    if (eval $ac_link 2>conftest.err) && test -s conftest$ac_exeext; then
-     # The compiler can only warn and ignore the option if not recognized
+     # The linker can only warn and ignore the option if not recognized
      # So say no if there are warnings
      if test -s conftest.err; then
        # Append any errors to the config.log.
        cat conftest.err 1>&AS_MESSAGE_LOG_FD
+       $echo "X$_lt_linker_boilerplate" | $Xsed > conftest.exp
+       $SED '/^$/d' conftest.err >conftest.er2
+       if diff conftest.exp conftest.er2 >/dev/null; then
+         $2=yes
+       fi
      else
        $2=yes
      fi
@@ -678,20 +712,33 @@ AC_CACHE_VAL([lt_cv_sys_max_cmd_len], [dnl
     lt_cv_sys_max_cmd_len=8192;
     ;;
 
-  netbsd* | freebsd* | openbsd* | darwin* )
+  netbsd* | freebsd* | openbsd* | darwin* | dragonfly*)
     # This has been around since 386BSD, at least.  Likely further.
     if test -x /sbin/sysctl; then
       lt_cv_sys_max_cmd_len=`/sbin/sysctl -n kern.argmax`
     elif test -x /usr/sbin/sysctl; then
       lt_cv_sys_max_cmd_len=`/usr/sbin/sysctl -n kern.argmax`
     else
-      lt_cv_sys_max_cmd_len=65536 # usable default for *BSD
+      lt_cv_sys_max_cmd_len=65536	# usable default for all BSDs
     fi
     # And add a safety zone
     lt_cv_sys_max_cmd_len=`expr $lt_cv_sys_max_cmd_len \/ 4`
+    lt_cv_sys_max_cmd_len=`expr $lt_cv_sys_max_cmd_len \* 3`
     ;;
-
- *)
+  osf*)
+    # Dr. Hans Ekkehard Plesser reports seeing a kernel panic running configure
+    # due to this test when exec_disable_arg_limit is 1 on Tru64. It is not
+    # nice to cause kernel panics so lets avoid the loop below.
+    # First set a reasonable default.
+    lt_cv_sys_max_cmd_len=16384
+    #
+    if test -x /sbin/sysconfig; then
+      case `/sbin/sysconfig -q proc exec_disable_arg_limit` in
+        *1*) lt_cv_sys_max_cmd_len=-1 ;;
+      esac
+    fi
+    ;;
+  *)
     # If test is not a shell built-in, we'll probably end up computing a
     # maximum length that is only half of the actual maximum length, but
     # we can't tell.
@@ -801,7 +848,7 @@ int main ()
 }]
 EOF
   if AC_TRY_EVAL(ac_link) && test -s conftest${ac_exeext} 2>/dev/null; then
-    (./conftest; exit; ) 2>/dev/null
+    (./conftest; exit; ) >&AS_MESSAGE_LOG_FD 2>/dev/null
     lt_status=$?
     case x$lt_status in
       x$lt_dlno_uscore) $1 ;;
@@ -950,7 +997,7 @@ AC_CACHE_CHECK([if $compiler supports -c -o file.$ac_objext],
    # Note that $ac_compile itself does not contain backslashes and begins
    # with a dollar sign (not a hyphen), so the echo should work correctly.
    lt_compile=`echo "$ac_compile" | $SED \
-   -e 's:.*FLAGS}? :&$lt_compiler_flag :; t' \
+   -e 's:.*FLAGS}\{0,1\} :&$lt_compiler_flag :; t' \
    -e 's: [[^ ]]*conftest\.: $lt_compiler_flag&:; t' \
    -e 's:$: $lt_compiler_flag:'`
    (eval echo "\"\$as_me:__oline__: $lt_compile\"" >&AS_MESSAGE_LOG_FD)
@@ -962,11 +1009,13 @@ AC_CACHE_CHECK([if $compiler supports -c -o file.$ac_objext],
    then
      # The compiler can only warn and ignore the option if not recognized
      # So say no if there are warnings
-     if test ! -s out/conftest.err; then
+     $echo "X$_lt_compiler_boilerplate" | $Xsed > out/conftest.exp
+     $SED '/^$/d' out/conftest.err >out/conftest.er2
+     if test ! -s out/conftest.err || diff out/conftest.exp out/conftest.er2 >/dev/null; then
        _LT_AC_TAGVAR(lt_cv_prog_compiler_c_o, $1)=yes
      fi
    fi
-   chmod u+w .
+   chmod u+w . 2>&AS_MESSAGE_LOG_FD
    $rm conftest*
    # SGI C++ compiler will create directory out/ii_files/ for
    # template instantiation
@@ -1226,7 +1275,8 @@ cygwin* | mingw* | pw32*)
       dlpath=`$SHELL 2>&1 -c '\''. $dir/'\''\${base_file}'\''i;echo \$dlname'\''`~
       dldir=$destdir/`dirname \$dlpath`~
       test -d \$dldir || mkdir -p \$dldir~
-      $install_prog $dir/$dlname \$dldir/$dlname'
+      $install_prog $dir/$dlname \$dldir/$dlname~
+      chmod a+x \$dldir/$dlname'
     postuninstall_cmds='dldll=`$SHELL 2>&1 -c '\''. $file; echo \$dlname'\''`~
       dlpath=$dir/\$dldll~
        $rm \$dlpath'
@@ -1256,7 +1306,7 @@ cygwin* | mingw* | pw32*)
       ;;
     pw32*)
       # pw32 DLLs use 'pw' prefix rather than 'lib'
-      library_names_spec='`echo ${libname} | sed -e 's/^lib/pw/'``echo ${release} | $SED -e 's/[.]/-/g'`${versuffix}${shared_ext}'
+      library_names_spec='`echo ${libname} | sed -e 's/^lib/pw/'``echo ${release} | $SED -e 's/[[.]]/-/g'`${versuffix}${shared_ext}'
       ;;
     esac
     ;;
@@ -1279,7 +1329,7 @@ darwin* | rhapsody*)
   soname_spec='${libname}${release}${major}$shared_ext'
   shlibpath_overrides_runpath=yes
   shlibpath_var=DYLD_LIBRARY_PATH
-  shrext_cmds='$(test .$module = .yes && echo .so || echo .dylib)'
+  shrext_cmds='`test .$module = .yes && echo .so || echo .dylib`'
   # Apple's gcc prints 'gcc -print-search-dirs' doesn't operate the same.
   if test "$GCC" = yes; then
     sys_lib_search_path_spec=`$CC -print-search-dirs | tr "\n" "$PATH_SEPARATOR" | sed -e 's/libraries:/@libraries:/' | tr "@" "\n" | grep "^libraries:" | sed -e "s/^libraries://" -e "s,=/,/,g" -e "s,$PATH_SEPARATOR, ,g" -e "s,.*,& /lib /usr/lib /usr/local/lib,g"`
@@ -1314,8 +1364,17 @@ kfreebsd*-gnu)
   dynamic_linker='GNU ld.so'
   ;;
 
-freebsd*)
-  objformat=`test -x /usr/bin/objformat && /usr/bin/objformat || echo aout`
+freebsd* | dragonfly*)
+  # DragonFly does not have aout.  When/if they implement a new
+  # versioning mechanism, adjust this.
+  if test -x /usr/bin/objformat; then
+    objformat=`/usr/bin/objformat`
+  else
+    case $host_os in
+    freebsd[[123]]*) objformat=aout ;;
+    *) objformat=elf ;;
+    esac
+  fi
   version_type=freebsd-$objformat
   case $version_type in
     freebsd-elf*)
@@ -1333,7 +1392,7 @@ freebsd*)
   freebsd2*)
     shlibpath_overrides_runpath=yes
     ;;
-  freebsd3.[01]* | freebsdelf3.[01]*)
+  freebsd3.[[01]]* | freebsdelf3.[[01]]*)
     shlibpath_overrides_runpath=yes
     hardcode_into_libs=yes
     ;;
@@ -1360,7 +1419,7 @@ hpux9* | hpux10* | hpux11*)
   version_type=sunos
   need_lib_prefix=no
   need_version=no
-  case "$host_cpu" in
+  case $host_cpu in
   ia64*)
     shrext_cmds='.so'
     hardcode_into_libs=yes
@@ -1459,7 +1518,7 @@ linux*)
 
   # Append ld.so.conf contents to the search path
   if test -f /etc/ld.so.conf; then
-    lt_ld_extra=`$SED -e 's/[:,\t]/ /g;s/=[^=]*$//;s/=[^= ]* / /g' /etc/ld.so.conf | tr '\n' ' '`
+    lt_ld_extra=`awk '/^include / { system(sprintf("cd /etc; cat %s", \[$]2)); skip = 1; } { if (!skip) print \[$]0; skip = 0; }' < /etc/ld.so.conf | $SED -e 's/#.*//;s/[:,	]/ /g;s/=[^=]*$//;s/=[^= ]* / /g;/^$/d' | tr '\n' ' '`
     sys_lib_dlsearch_path_spec="/lib /usr/lib $lt_ld_extra"
   fi
 
@@ -1522,7 +1581,11 @@ nto-qnx*)
 openbsd*)
   version_type=sunos
   need_lib_prefix=no
-  need_version=no
+  # Some older versions of OpenBSD (3.3 at least) *do* need versioned libs.
+  case $host_os in
+    openbsd3.3 | openbsd3.3.*) need_version=yes ;;
+    *)                         need_version=no  ;;
+  esac
   library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${shared_ext}$versuffix'
   finish_cmds='PATH="\$PATH:/sbin" ldconfig -m $libdir'
   shlibpath_var=LD_LIBRARY_PATH
@@ -1688,7 +1751,9 @@ if test -f "$ltmain" && test -n "$tagnames"; then
 
       case $tagname in
       CXX)
-	if test -n "$CXX" && test "X$CXX" != "Xno"; then
+	if test -n "$CXX" && ( test "X$CXX" != "Xno" &&
+	    ( (test "X$CXX" = "Xg++" && `g++ -v >/dev/null 2>&1` ) ||
+	    (test "X$CXX" != "Xg++"))) ; then
 	  AC_LIBTOOL_LANG_CXX_CONFIG
 	else
 	  tagname=""
@@ -1750,7 +1815,7 @@ AC_DEFUN([AC_LIBTOOL_DLOPEN],
 
 # AC_LIBTOOL_WIN32_DLL
 # --------------------
-# declare package support for building win32 dll's
+# declare package support for building win32 DLLs
 AC_DEFUN([AC_LIBTOOL_WIN32_DLL],
 [AC_BEFORE([$0], [AC_LIBTOOL_SETUP])
 ])# AC_LIBTOOL_WIN32_DLL
@@ -1924,7 +1989,7 @@ dnl not every word.  This closes a longstanding sh security hole.
       if test -n "$file_magic_test_file"; then
 	case $deplibs_check_method in
 	"file_magic "*)
-	  file_magic_regex="`expr \"$deplibs_check_method\" : \"file_magic \(.*\)\"`"
+	  file_magic_regex=`expr "$deplibs_check_method" : "file_magic \(.*\)"`
 	  MAGIC_CMD="$lt_cv_path_MAGIC_CMD"
 	  if eval $file_magic_cmd \$file_magic_test_file 2> /dev/null |
 	    $EGREP "$file_magic_regex" > /dev/null; then
@@ -2034,7 +2099,7 @@ AC_CACHE_VAL(lt_cv_path_LD,
     if test -f "$ac_dir/$ac_prog" || test -f "$ac_dir/$ac_prog$ac_exeext"; then
       lt_cv_path_LD="$ac_dir/$ac_prog"
       # Check to see if the program is GNU ld.  I'd rather use --version,
-      # but apparently some GNU ld's only accept -v.
+      # but apparently some variants of GNU ld only accept -v.
       # Break only if it was the GNU/non-GNU ld that we prefer.
       case `"$lt_cv_path_LD" -v 2>&1 &1  /dev/null; then
     case $host_cpu in
     i*86 )
       # Not sure whether the presence of OpenBSD here was a mistake.
       # Let's accept both of them until this is cleared up.
-      lt_cv_deplibs_check_method='file_magic (FreeBSD|OpenBSD)/i[[3-9]]86 (compact )?demand paged shared library'
+      lt_cv_deplibs_check_method='file_magic (FreeBSD|OpenBSD|DragonFly)/i[[3-9]]86 (compact )?demand paged shared library'
       lt_cv_file_magic_cmd=/usr/bin/file
       lt_cv_file_magic_test_file=`echo /usr/lib/libc.so.*`
       ;;
@@ -2180,7 +2245,7 @@ gnu*)
 
 hpux10.20* | hpux11*)
   lt_cv_file_magic_cmd=/usr/bin/file
-  case "$host_cpu" in
+  case $host_cpu in
   ia64*)
     lt_cv_deplibs_check_method='file_magic (s[[0-9]][[0-9]][[0-9]]|ELF-[[0-9]][[0-9]]) shared object file - IA64'
     lt_cv_file_magic_test_file=/usr/lib/hpux32/libc.so
@@ -2208,15 +2273,6 @@ irix5* | irix6* | nonstopux*)
 
 # This must be Linux ELF.
 linux*)
-  case $host_cpu in
-  alpha*|hppa*|i*86|ia64*|m68*|mips*|powerpc*|sparc*|s390*|sh*)
-    lt_cv_deplibs_check_method=pass_all ;;
-  *)
-    # glibc up to 2.1.1 does not perform some relocations on ARM
-    # this will be overridden with pass_all, but let us keep it just in case
-    lt_cv_deplibs_check_method='file_magic ELF [[0-9]][[0-9]]*-bit [[LM]]SB (shared object|dynamic lib )' ;;
-  esac
-  lt_cv_file_magic_test_file=`echo /lib/libc.so* /lib/libc-*.so`
   lt_cv_deplibs_check_method=pass_all
   ;;
 
@@ -2363,13 +2419,13 @@ esac
 # -----------------------------------
 # sets LIBLTDL to the link flags for the libltdl convenience library and
 # LTDLINCL to the include flags for the libltdl header and adds
-# --enable-ltdl-convenience to the configure arguments.  Note that LIBLTDL
-# and LTDLINCL are not AC_SUBSTed, nor is AC_CONFIG_SUBDIRS called.  If
-# DIRECTORY is not provided, it is assumed to be `libltdl'.  LIBLTDL will
-# be prefixed with '${top_builddir}/' and LTDLINCL will be prefixed with
-# '${top_srcdir}/' (note the single quotes!).  If your package is not
-# flat and you're not using automake, define top_builddir and
-# top_srcdir appropriately in the Makefiles.
+# --enable-ltdl-convenience to the configure arguments.  Note that
+# AC_CONFIG_SUBDIRS is not called here.  If DIRECTORY is not provided,
+# it is assumed to be `libltdl'.  LIBLTDL will be prefixed with
+# '${top_builddir}/' and LTDLINCL will be prefixed with '${top_srcdir}/'
+# (note the single quotes!).  If your package is not flat and you're not
+# using automake, define top_builddir and top_srcdir appropriately in
+# the Makefiles.
 AC_DEFUN([AC_LIBLTDL_CONVENIENCE],
 [AC_BEFORE([$0],[AC_LIBTOOL_SETUP])dnl
   case $enable_ltdl_convenience in
@@ -2388,13 +2444,13 @@ AC_DEFUN([AC_LIBLTDL_CONVENIENCE],
 # -----------------------------------
 # sets LIBLTDL to the link flags for the libltdl installable library and
 # LTDLINCL to the include flags for the libltdl header and adds
-# --enable-ltdl-install to the configure arguments.  Note that LIBLTDL
-# and LTDLINCL are not AC_SUBSTed, nor is AC_CONFIG_SUBDIRS called.  If
-# DIRECTORY is not provided and an installed libltdl is not found, it is
-# assumed to be `libltdl'.  LIBLTDL will be prefixed with '${top_builddir}/'
-# and LTDLINCL will be prefixed with '${top_srcdir}/' (note the single
-# quotes!).  If your package is not flat and you're not using automake,
-# define top_builddir and top_srcdir appropriately in the Makefiles.
+# --enable-ltdl-install to the configure arguments.  Note that
+# AC_CONFIG_SUBDIRS is not called here.  If DIRECTORY is not provided,
+# and an installed libltdl is not found, it is assumed to be `libltdl'.
+# LIBLTDL will be prefixed with '${top_builddir}/'# and LTDLINCL with
+# '${top_srcdir}/' (note the single quotes!).  If your package is not
+# flat and you're not using automake, define top_builddir and top_srcdir
+# appropriately in the Makefiles.
 # In the future, this macro may have to be called after AC_PROG_LIBTOOL.
 AC_DEFUN([AC_LIBLTDL_INSTALLABLE],
 [AC_BEFORE([$0],[AC_LIBTOOL_SETUP])dnl
@@ -2432,10 +2488,21 @@ AC_DEFUN([AC_LIBTOOL_CXX],
 # ---------------
 AC_DEFUN([_LT_AC_LANG_CXX],
 [AC_REQUIRE([AC_PROG_CXX])
-AC_REQUIRE([AC_PROG_CXXCPP])
+AC_REQUIRE([_LT_AC_PROG_CXXCPP])
 _LT_AC_SHELL_INIT([tagnames=${tagnames+${tagnames},}CXX])
 ])# _LT_AC_LANG_CXX
 
+# _LT_AC_PROG_CXXCPP
+# ---------------
+AC_DEFUN([_LT_AC_PROG_CXXCPP],
+[
+AC_REQUIRE([AC_PROG_CXX])
+if test -n "$CXX" && ( test "X$CXX" != "Xno" &&
+    ( (test "X$CXX" = "Xg++" && `g++ -v >/dev/null 2>&1` ) ||
+    (test "X$CXX" != "Xg++"))) ; then
+  AC_PROG_CXXCPP
+fi
+])# _LT_AC_PROG_CXXCPP
 
 # AC_LIBTOOL_F77
 # --------------
@@ -2508,6 +2575,10 @@ lt_simple_link_test_code='int main(){return(0);}\n'
 
 _LT_AC_SYS_COMPILER
 
+# save warnings/boilerplate of simple test code
+_LT_COMPILER_BOILERPLATE
+_LT_LINKER_BOILERPLATE
+
 #
 # Check for any special shared library compilation flags.
 #
@@ -2562,7 +2633,7 @@ test "$can_build_shared" = "no" && enable_shared=no
 
 # On AIX, shared libraries and static libraries use the same namespace, and
 # are all built from PIC.
-case "$host_os" in
+case $host_os in
 aix3*)
   test "$enable_shared" = yes && enable_static=no
   if test -n "$RANLIB"; then
@@ -2600,7 +2671,7 @@ AC_DEFUN([AC_LIBTOOL_LANG_CXX_CONFIG], [_LT_AC_LANG_CXX_CONFIG(CXX)])
 AC_DEFUN([_LT_AC_LANG_CXX_CONFIG],
 [AC_LANG_PUSH(C++)
 AC_REQUIRE([AC_PROG_CXX])
-AC_REQUIRE([AC_PROG_CXXCPP])
+AC_REQUIRE([_LT_AC_PROG_CXXCPP])
 
 _LT_AC_TAGVAR(archive_cmds_need_lc, $1)=no
 _LT_AC_TAGVAR(allow_undefined_flag, $1)=
@@ -2629,7 +2700,7 @@ _LT_AC_TAGVAR(postdeps, $1)=
 _LT_AC_TAGVAR(compiler_lib_search_path, $1)=
 
 # Source file extension for C++ test sources.
-ac_ext=cc
+ac_ext=cpp
 
 # Object file extension for compiled C++ test sources.
 objext=o
@@ -2644,6 +2715,10 @@ lt_simple_link_test_code='int main(int, char *[]) { return(0); }\n'
 # ltmain only uses $CC for tagged configurations so make sure $CC is set.
 _LT_AC_SYS_COMPILER
 
+# save warnings/boilerplate of simple test code
+_LT_COMPILER_BOILERPLATE
+_LT_LINKER_BOILERPLATE
+
 # Allow CC to be a program name with arguments.
 lt_save_CC=$CC
 lt_save_LD=$LD
@@ -2665,7 +2740,7 @@ test -z "${LDCXX+set}" || LD=$LDCXX
 CC=${CXX-"c++"}
 compiler=$CC
 _LT_AC_TAGVAR(compiler, $1)=$CC
-cc_basename=`$echo X"$compiler" | $Xsed -e 's%^.*/%%'`
+_LT_CC_BASENAME([$compiler])
 
 # We don't want -fno-exception wen compiling C++ code, so set the
 # no_builtin_flag separately
@@ -2772,7 +2847,7 @@ case $host_os in
     _LT_AC_TAGVAR(link_all_deplibs, $1)=yes
 
     if test "$GXX" = yes; then
-      case $host_os in aix4.[012]|aix4.[012].*)
+      case $host_os in aix4.[[012]]|aix4.[[012]].*)
       # We only want to do this on AIX 4.2 and lower, the check
       # below for broken collect2 doesn't work under 4.3+
 	collect2name=`${CC} -print-prog-name=collect2`
@@ -2793,6 +2868,9 @@ case $host_os in
 	fi
       esac
       shared_flag='-shared'
+      if test "$aix_use_runtimelinking" = yes; then
+	shared_flag="$shared_flag "'${wl}-G'
+      fi
     else
       # not using gcc
       if test "$host_cpu" = ia64; then
@@ -2838,7 +2916,7 @@ case $host_os in
 	# Exported symbols can be pulled into shared objects from archives
 	_LT_AC_TAGVAR(whole_archive_flag_spec, $1)=' '
 	_LT_AC_TAGVAR(archive_cmds_need_lc, $1)=yes
-	# This is similar to how AIX traditionally builds it's shared libraries.
+	# This is similar to how AIX traditionally builds its shared libraries.
 	_LT_AC_TAGVAR(archive_expsym_cmds, $1)="\$CC $shared_flag"' -o $output_objdir/$soname $libobjs $deplibs $compiler_flags ${wl}-bE:$export_symbols ${wl}-bnoentry${allow_undefined_flag}~$AR $AR_FLAGS $output_objdir/$libname$release.a $output_objdir/$soname'
       fi
     fi
@@ -2877,7 +2955,7 @@ case $host_os in
     fi
   ;;
       darwin* | rhapsody*)
-        case "$host_os" in
+        case $host_os in
         rhapsody* | darwin1.[[012]])
          _LT_AC_TAGVAR(allow_undefined_flag, $1)='${wl}-undefined ${wl}suppress'
          ;;
@@ -2915,7 +2993,7 @@ case $host_os in
           _LT_AC_TAGVAR(archive_cmds, $1)='$CC -r -keep_private_externs -nostdlib -o ${lib}-master.o $libobjs~$CC -dynamiclib $allow_undefined_flag -o $lib ${lib}-master.o $deplibs $compiler_flags -install_name $rpath/$soname $verstring'
         fi
         _LT_AC_TAGVAR(module_cmds, $1)='$CC $allow_undefined_flag -o $lib -bundle $libobjs $deplibs$compiler_flags'
-        # Don't fix this by using the ld -exported_symbols_list flag, it doesn't exist in older darwin ld's
+        # Don't fix this by using the ld -exported_symbols_list flag, it doesn't exist in older darwin lds
           if test "X$lt_int_apple_cc_single_mod" = Xyes ; then
             _LT_AC_TAGVAR(archive_expsym_cmds, $1)='sed -e "s,#.*,," -e "s,^[    ]*,," -e "s,^\(..*\),_&," < $export_symbols > $output_objdir/${libname}-symbols.expsym~$CC -dynamiclib -single_module $allow_undefined_flag -o $lib $libobjs $deplibs $compiler_flags -install_name $rpath/$soname $verstring~nmedit -s $output_objdir/${libname}-symbols.expsym ${lib}'
           else
@@ -2923,12 +3001,12 @@ case $host_os in
           fi
             _LT_AC_TAGVAR(module_expsym_cmds, $1)='sed -e "s,#.*,," -e "s,^[    ]*,," -e "s,^\(..*\),_&," < $export_symbols > $output_objdir/${libname}-symbols.expsym~$CC $allow_undefined_flag  -o $lib -bundle $libobjs $deplibs$compiler_flags~nmedit -s $output_objdir/${libname}-symbols.expsym ${lib}'
       else
-      case "$cc_basename" in
+      case $cc_basename in
         xlc*)
          output_verbose_link_cmd='echo'
           _LT_AC_TAGVAR(archive_cmds, $1)='$CC -qmkshrobj ${wl}-single_module $allow_undefined_flag -o $lib $libobjs $deplibs $compiler_flags ${wl}-install_name ${wl}`echo $rpath/$soname` $verstring'
           _LT_AC_TAGVAR(module_cmds, $1)='$CC $allow_undefined_flag -o $lib -bundle $libobjs $deplibs$compiler_flags'
-          # Don't fix this by using the ld -exported_symbols_list flag, it doesn't exist in older darwin ld's
+          # Don't fix this by using the ld -exported_symbols_list flag, it doesn't exist in older darwin lds
           _LT_AC_TAGVAR(archive_expsym_cmds, $1)='sed -e "s,#.*,," -e "s,^[    ]*,," -e "s,^\(..*\),_&," < $export_symbols > $output_objdir/${libname}-symbols.expsym~$CC -qmkshrobj ${wl}-single_module $allow_undefined_flag -o $lib $libobjs $deplibs $compiler_flags ${wl}-install_name ${wl}$rpath/$soname $verstring~nmedit -s $output_objdir/${libname}-symbols.expsym ${lib}'
           _LT_AC_TAGVAR(module_expsym_cmds, $1)='sed -e "s,#.*,," -e "s,^[    ]*,," -e "s,^\(..*\),_&," < $export_symbols > $output_objdir/${libname}-symbols.expsym~$CC $allow_undefined_flag  -o $lib -bundle $libobjs $deplibs$compiler_flags~nmedit -s $output_objdir/${libname}-symbols.expsym ${lib}'
           ;;
@@ -2941,11 +3019,11 @@ case $host_os in
 
   dgux*)
     case $cc_basename in
-      ec++)
+      ec++*)
 	# FIXME: insert proper C++ library support
 	_LT_AC_TAGVAR(ld_shlibs, $1)=no
 	;;
-      ghcx)
+      ghcx*)
 	# Green Hills C++ Compiler
 	# FIXME: insert proper C++ library support
 	_LT_AC_TAGVAR(ld_shlibs, $1)=no
@@ -2956,14 +3034,14 @@ case $host_os in
 	;;
     esac
     ;;
-  freebsd[12]*)
+  freebsd[[12]]*)
     # C++ shared libraries reported to be fairly broken before switch to ELF
     _LT_AC_TAGVAR(ld_shlibs, $1)=no
     ;;
   freebsd-elf*)
     _LT_AC_TAGVAR(archive_cmds_need_lc, $1)=no
     ;;
-  freebsd* | kfreebsd*-gnu)
+  freebsd* | kfreebsd*-gnu | dragonfly*)
     # FreeBSD 3 and later use GNU C++ and GNU ld with standard ELF
     # conventions
     _LT_AC_TAGVAR(ld_shlibs, $1)=yes
@@ -2980,11 +3058,11 @@ case $host_os in
 				# location of the library.
 
     case $cc_basename in
-    CC)
+    CC*)
       # FIXME: insert proper C++ library support
       _LT_AC_TAGVAR(ld_shlibs, $1)=no
       ;;
-    aCC)
+    aCC*)
       _LT_AC_TAGVAR(archive_cmds, $1)='$rm $output_objdir/$soname~$CC -b ${wl}+b ${wl}$install_libdir -o $output_objdir/$soname $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags~test $output_objdir/$soname = $lib || mv $output_objdir/$soname $lib'
       # Commands to make compiler produce verbose output that lists
       # what "hidden" libraries, object files and flags are used when
@@ -2994,7 +3072,7 @@ case $host_os in
       # explicitly linking system object files so we need to strip them
       # from the output so that they don't get included in the library
       # dependencies.
-      output_verbose_link_cmd='templist=`($CC -b $CFLAGS -v conftest.$objext 2>&1) | grep "[-]L"`; list=""; for z in $templist; do case $z in conftest.$objext) list="$list $z";; *.$objext);; *) list="$list $z";;esac; done; echo $list'
+      output_verbose_link_cmd='templist=`($CC -b $CFLAGS -v conftest.$objext 2>&1) | grep "[[-]]L"`; list=""; for z in $templist; do case $z in conftest.$objext) list="$list $z";; *.$objext);; *) list="$list $z";;esac; done; echo $list'
       ;;
     *)
       if test "$GXX" = yes; then
@@ -3008,7 +3086,7 @@ case $host_os in
     ;;
   hpux10*|hpux11*)
     if test $with_gnu_ld = no; then
-      case "$host_cpu" in
+      case $host_cpu in
       hppa*64*)
 	_LT_AC_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}+b ${wl}$libdir'
 	_LT_AC_TAGVAR(hardcode_libdir_flag_spec_ld, $1)='+b $libdir'
@@ -3024,7 +3102,7 @@ case $host_os in
         ;;
       esac
     fi
-    case "$host_cpu" in
+    case $host_cpu in
     hppa*64*)
       _LT_AC_TAGVAR(hardcode_direct, $1)=no
       _LT_AC_TAGVAR(hardcode_shlibpath_var, $1)=no
@@ -3045,12 +3123,12 @@ case $host_os in
     esac
 
     case $cc_basename in
-      CC)
+      CC*)
 	# FIXME: insert proper C++ library support
 	_LT_AC_TAGVAR(ld_shlibs, $1)=no
 	;;
-      aCC)
-	case "$host_cpu" in
+      aCC*)
+	case $host_cpu in
 	hppa*64*|ia64*)
 	  _LT_AC_TAGVAR(archive_cmds, $1)='$LD -b +h $soname -o $lib $linker_flags $libobjs $deplibs'
 	  ;;
@@ -3071,7 +3149,7 @@ case $host_os in
       *)
 	if test "$GXX" = yes; then
 	  if test $with_gnu_ld = no; then
-	    case "$host_cpu" in
+	    case $host_cpu in
 	    ia64*|hppa*64*)
 	      _LT_AC_TAGVAR(archive_cmds, $1)='$LD -b +h $soname -o $lib $linker_flags $libobjs $deplibs'
 	      ;;
@@ -3089,9 +3167,9 @@ case $host_os in
     ;;
   irix5* | irix6*)
     case $cc_basename in
-      CC)
+      CC*)
 	# SGI C++
-	_LT_AC_TAGVAR(archive_cmds, $1)='$CC -shared -all -multigot $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags -soname $soname `test -n "$verstring" && echo -set_version $verstring` -update_registry ${objdir}/so_locations -o $lib'
+	_LT_AC_TAGVAR(archive_cmds, $1)='$CC -shared -all -multigot $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags -soname $soname `test -n "$verstring" && echo -set_version $verstring` -update_registry ${output_objdir}/so_locations -o $lib'
 
 	# Archives containing C++ object files must be created using
 	# "CC -ar", where "CC" is the IRIX C++ compiler.  This is
@@ -3102,7 +3180,7 @@ case $host_os in
       *)
 	if test "$GXX" = yes; then
 	  if test "$with_gnu_ld" = no; then
-	    _LT_AC_TAGVAR(archive_cmds, $1)='$CC -shared -nostdlib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname ${wl}$soname `test -n "$verstring" && echo ${wl}-set_version ${wl}$verstring` ${wl}-update_registry ${wl}${objdir}/so_locations -o $lib'
+	    _LT_AC_TAGVAR(archive_cmds, $1)='$CC -shared -nostdlib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname ${wl}$soname `test -n "$verstring" && echo ${wl}-set_version ${wl}$verstring` ${wl}-update_registry ${wl}${output_objdir}/so_locations -o $lib'
 	  else
 	    _LT_AC_TAGVAR(archive_cmds, $1)='$CC -shared -nostdlib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname ${wl}$soname `test -n "$verstring" && echo ${wl}-set_version ${wl}$verstring` -o $lib'
 	  fi
@@ -3115,7 +3193,7 @@ case $host_os in
     ;;
   linux*)
     case $cc_basename in
-      KCC)
+      KCC*)
 	# Kuck and Associates, Inc. (KAI) C++ Compiler
 
 	# KCC will only create a shared library if the output file
@@ -3140,7 +3218,7 @@ case $host_os in
 	# "CC -Bstatic", where "CC" is the KAI C++ compiler.
 	_LT_AC_TAGVAR(old_archive_cmds, $1)='$CC -Bstatic -o $oldlib $oldobjs'
 	;;
-      icpc)
+      icpc*)
 	# Intel C++
 	with_gnu_ld=yes
 	# version 8.0 and above of icpc choke on multiply defined symbols
@@ -3152,8 +3230,12 @@ case $host_os in
   	  _LT_AC_TAGVAR(archive_expsym_cmds, $1)='$CC -shared $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname $wl$soname ${wl}-retain-symbols-file $wl$export_symbols -o $lib'
 	  ;;
 	*)  # Version 8.0 or newer
-  	  _LT_AC_TAGVAR(archive_cmds, $1)='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib'
-  	_LT_AC_TAGVAR(archive_expsym_cmds, $1)='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname ${wl}-retain-symbols-file $wl$export_symbols -o $lib'
+	  tmp_idyn=
+	  case $host_cpu in
+	    ia64*) tmp_idyn=' -i_dynamic';;
+	  esac
+  	  _LT_AC_TAGVAR(archive_cmds, $1)='$CC -shared'"$tmp_idyn"' $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib'
+	  _LT_AC_TAGVAR(archive_expsym_cmds, $1)='$CC -shared'"$tmp_idyn"' $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname ${wl}-retain-symbols-file $wl$export_symbols -o $lib'
 	  ;;
 	esac
 	_LT_AC_TAGVAR(archive_cmds_need_lc, $1)=no
@@ -3161,7 +3243,16 @@ case $host_os in
 	_LT_AC_TAGVAR(export_dynamic_flag_spec, $1)='${wl}--export-dynamic'
 	_LT_AC_TAGVAR(whole_archive_flag_spec, $1)='${wl}--whole-archive$convenience ${wl}--no-whole-archive'
 	;;
-      cxx)
+      pgCC*)
+        # Portland Group C++ compiler
+	_LT_AC_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname ${wl}$soname -o $lib'
+  	_LT_AC_TAGVAR(archive_expsym_cmds, $1)='$CC -shared $pic_flag $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname ${wl}$soname ${wl}-retain-symbols-file ${wl}$export_symbols -o $lib'
+
+	_LT_AC_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}--rpath ${wl}$libdir'
+	_LT_AC_TAGVAR(export_dynamic_flag_spec, $1)='${wl}--export-dynamic'
+	_LT_AC_TAGVAR(whole_archive_flag_spec, $1)='${wl}--whole-archive`for conv in $convenience\"\"; do test  -n \"$conv\" && new_convenience=\"$new_convenience,$conv\"; done; $echo \"$new_convenience\"` ${wl}--no-whole-archive'
+        ;;
+      cxx*)
 	# Compaq C++
 	_LT_AC_TAGVAR(archive_cmds, $1)='$CC -shared $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname $wl$soname -o $lib'
 	_LT_AC_TAGVAR(archive_expsym_cmds, $1)='$CC -shared $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname $wl$soname  -o $lib ${wl}-retain-symbols-file $wl$export_symbols'
@@ -3192,7 +3283,7 @@ case $host_os in
     ;;
   mvs*)
     case $cc_basename in
-      cxx)
+      cxx*)
 	# FIXME: insert proper C++ library support
 	_LT_AC_TAGVAR(ld_shlibs, $1)=no
 	;;
@@ -3218,6 +3309,8 @@ case $host_os in
     _LT_AC_TAGVAR(ld_shlibs, $1)=no
     ;;
   openbsd*)
+    _LT_AC_TAGVAR(hardcode_direct, $1)=yes
+    _LT_AC_TAGVAR(hardcode_shlibpath_var, $1)=no
     _LT_AC_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags -o $lib'
     _LT_AC_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-rpath,$libdir'
     if test -z "`echo __ELF__ | $CC -E - | grep __ELF__`" || test "$host_os-$host_cpu" = "openbsd2.8-powerpc"; then
@@ -3229,7 +3322,7 @@ case $host_os in
     ;;
   osf3*)
     case $cc_basename in
-      KCC)
+      KCC*)
 	# Kuck and Associates, Inc. (KAI) C++ Compiler
 
 	# KCC will only create a shared library if the output file
@@ -3245,14 +3338,14 @@ case $host_os in
 	_LT_AC_TAGVAR(old_archive_cmds, $1)='$CC -Bstatic -o $oldlib $oldobjs'
 
 	;;
-      RCC)
+      RCC*)
 	# Rational C++ 2.4.1
 	# FIXME: insert proper C++ library support
 	_LT_AC_TAGVAR(ld_shlibs, $1)=no
 	;;
-      cxx)
+      cxx*)
 	_LT_AC_TAGVAR(allow_undefined_flag, $1)=' ${wl}-expect_unresolved ${wl}\*'
-	_LT_AC_TAGVAR(archive_cmds, $1)='$CC -shared${allow_undefined_flag} $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname $soname `test -n "$verstring" && echo ${wl}-set_version $verstring` -update_registry ${objdir}/so_locations -o $lib'
+	_LT_AC_TAGVAR(archive_cmds, $1)='$CC -shared${allow_undefined_flag} $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname $soname `test -n "$verstring" && echo ${wl}-set_version $verstring` -update_registry ${output_objdir}/so_locations -o $lib'
 
 	_LT_AC_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-rpath ${wl}$libdir'
 	_LT_AC_TAGVAR(hardcode_libdir_separator, $1)=:
@@ -3270,7 +3363,7 @@ case $host_os in
       *)
 	if test "$GXX" = yes && test "$with_gnu_ld" = no; then
 	  _LT_AC_TAGVAR(allow_undefined_flag, $1)=' ${wl}-expect_unresolved ${wl}\*'
-	  _LT_AC_TAGVAR(archive_cmds, $1)='$CC -shared -nostdlib ${allow_undefined_flag} $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname ${wl}$soname `test -n "$verstring" && echo ${wl}-set_version ${wl}$verstring` ${wl}-update_registry ${wl}${objdir}/so_locations -o $lib'
+	  _LT_AC_TAGVAR(archive_cmds, $1)='$CC -shared -nostdlib ${allow_undefined_flag} $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname ${wl}$soname `test -n "$verstring" && echo ${wl}-set_version ${wl}$verstring` ${wl}-update_registry ${wl}${output_objdir}/so_locations -o $lib'
 
 	  _LT_AC_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-rpath ${wl}$libdir'
 	  _LT_AC_TAGVAR(hardcode_libdir_separator, $1)=:
@@ -3289,7 +3382,7 @@ case $host_os in
     ;;
   osf4* | osf5*)
     case $cc_basename in
-      KCC)
+      KCC*)
 	# Kuck and Associates, Inc. (KAI) C++ Compiler
 
 	# KCC will only create a shared library if the output file
@@ -3304,17 +3397,17 @@ case $host_os in
 	# the KAI C++ compiler.
 	_LT_AC_TAGVAR(old_archive_cmds, $1)='$CC -o $oldlib $oldobjs'
 	;;
-      RCC)
+      RCC*)
 	# Rational C++ 2.4.1
 	# FIXME: insert proper C++ library support
 	_LT_AC_TAGVAR(ld_shlibs, $1)=no
 	;;
-      cxx)
+      cxx*)
 	_LT_AC_TAGVAR(allow_undefined_flag, $1)=' -expect_unresolved \*'
-	_LT_AC_TAGVAR(archive_cmds, $1)='$CC -shared${allow_undefined_flag} $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags -msym -soname $soname `test -n "$verstring" && echo -set_version $verstring` -update_registry ${objdir}/so_locations -o $lib'
+	_LT_AC_TAGVAR(archive_cmds, $1)='$CC -shared${allow_undefined_flag} $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags -msym -soname $soname `test -n "$verstring" && echo -set_version $verstring` -update_registry ${output_objdir}/so_locations -o $lib'
 	_LT_AC_TAGVAR(archive_expsym_cmds, $1)='for i in `cat $export_symbols`; do printf "%s %s\\n" -exported_symbol "\$i" >> $lib.exp; done~
 	  echo "-hidden">> $lib.exp~
-	  $CC -shared$allow_undefined_flag $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags -msym -soname $soname -Wl,-input -Wl,$lib.exp  `test -n "$verstring" && echo -set_version	$verstring` -update_registry $objdir/so_locations -o $lib~
+	  $CC -shared$allow_undefined_flag $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags -msym -soname $soname -Wl,-input -Wl,$lib.exp  `test -n "$verstring" && echo -set_version	$verstring` -update_registry ${output_objdir}/so_locations -o $lib~
 	  $rm $lib.exp'
 
 	_LT_AC_TAGVAR(hardcode_libdir_flag_spec, $1)='-rpath $libdir'
@@ -3333,7 +3426,7 @@ case $host_os in
       *)
 	if test "$GXX" = yes && test "$with_gnu_ld" = no; then
 	  _LT_AC_TAGVAR(allow_undefined_flag, $1)=' ${wl}-expect_unresolved ${wl}\*'
-	 _LT_AC_TAGVAR(archive_cmds, $1)='$CC -shared -nostdlib ${allow_undefined_flag} $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-msym ${wl}-soname ${wl}$soname `test -n "$verstring" && echo ${wl}-set_version ${wl}$verstring` ${wl}-update_registry ${wl}${objdir}/so_locations -o $lib'
+	 _LT_AC_TAGVAR(archive_cmds, $1)='$CC -shared -nostdlib ${allow_undefined_flag} $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-msym ${wl}-soname ${wl}$soname `test -n "$verstring" && echo ${wl}-set_version ${wl}$verstring` ${wl}-update_registry ${wl}${output_objdir}/so_locations -o $lib'
 
 	  _LT_AC_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-rpath ${wl}$libdir'
 	  _LT_AC_TAGVAR(hardcode_libdir_separator, $1)=:
@@ -3357,7 +3450,7 @@ case $host_os in
   sco*)
     _LT_AC_TAGVAR(archive_cmds_need_lc, $1)=no
     case $cc_basename in
-      CC)
+      CC*)
 	# FIXME: insert proper C++ library support
 	_LT_AC_TAGVAR(ld_shlibs, $1)=no
 	;;
@@ -3369,12 +3462,12 @@ case $host_os in
     ;;
   sunos4*)
     case $cc_basename in
-      CC)
+      CC*)
 	# Sun C++ 4.x
 	# FIXME: insert proper C++ library support
 	_LT_AC_TAGVAR(ld_shlibs, $1)=no
 	;;
-      lcc)
+      lcc*)
 	# Lucid
 	# FIXME: insert proper C++ library support
 	_LT_AC_TAGVAR(ld_shlibs, $1)=no
@@ -3387,36 +3480,33 @@ case $host_os in
     ;;
   solaris*)
     case $cc_basename in
-      CC)
+      CC*)
 	# Sun C++ 4.2, 5.x and Centerline C++
+        _LT_AC_TAGVAR(archive_cmds_need_lc,$1)=yes
 	_LT_AC_TAGVAR(no_undefined_flag, $1)=' -zdefs'
-	_LT_AC_TAGVAR(archive_cmds, $1)='$CC -G${allow_undefined_flag} -nolib -h$soname -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags'
+	_LT_AC_TAGVAR(archive_cmds, $1)='$CC -G${allow_undefined_flag}  -h$soname -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags'
 	_LT_AC_TAGVAR(archive_expsym_cmds, $1)='$echo "{ global:" > $lib.exp~cat $export_symbols | $SED -e "s/\(.*\)/\1;/" >> $lib.exp~$echo "local: *; };" >> $lib.exp~
-	$CC -G${allow_undefined_flag} -nolib ${wl}-M ${wl}$lib.exp -h$soname -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags~$rm $lib.exp'
+	$CC -G${allow_undefined_flag}  ${wl}-M ${wl}$lib.exp -h$soname -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags~$rm $lib.exp'
 
 	_LT_AC_TAGVAR(hardcode_libdir_flag_spec, $1)='-R$libdir'
 	_LT_AC_TAGVAR(hardcode_shlibpath_var, $1)=no
 	case $host_os in
-	  solaris2.[0-5] | solaris2.[0-5].*) ;;
+	  solaris2.[[0-5]] | solaris2.[[0-5]].*) ;;
 	  *)
 	    # The C++ compiler is used as linker so we must use $wl
 	    # flag to pass the commands to the underlying system
-	    # linker.
+	    # linker. We must also pass each convience library through
+	    # to the system linker between allextract/defaultextract.
+	    # The C++ compiler will combine linker options so we
+	    # cannot just pass the convience library names through
+	    # without $wl.
 	    # Supported since Solaris 2.6 (maybe 2.5.1?)
-	    _LT_AC_TAGVAR(whole_archive_flag_spec, $1)='${wl}-z ${wl}allextract$convenience ${wl}-z ${wl}defaultextract'
+	    _LT_AC_TAGVAR(whole_archive_flag_spec, $1)='${wl}-z ${wl}allextract`for conv in $convenience\"\"; do test -n \"$conv\" && new_convenience=\"$new_convenience,$conv\"; done; $echo \"$new_convenience\"` ${wl}-z ${wl}defaultextract'
 	    ;;
 	esac
 	_LT_AC_TAGVAR(link_all_deplibs, $1)=yes
 
-	# Commands to make compiler produce verbose output that lists
-	# what "hidden" libraries, object files and flags are used when
-	# linking a shared library.
-	#
-	# There doesn't appear to be a way to prevent this compiler from
-	# explicitly linking system object files so we need to strip them
-	# from the output so that they don't get included in the library
-	# dependencies.
-	output_verbose_link_cmd='templist=`$CC -G $CFLAGS -v conftest.$objext 2>&1 | grep "\-[[LR]]"`; list=""; for z in $templist; do case $z in conftest.$objext) list="$list $z";; *.$objext);; *) list="$list $z";;esac; done; echo $list'
+	output_verbose_link_cmd='echo'
 
 	# Archives containing C++ object files must be created using
 	# "CC -xar", where "CC" is the Sun C++ compiler.  This is
@@ -3424,7 +3514,7 @@ case $host_os in
 	# in the archive.
 	_LT_AC_TAGVAR(old_archive_cmds, $1)='$CC -xar -o $oldlib $oldobjs'
 	;;
-      gcx)
+      gcx*)
 	# Green Hills C++ Compiler
 	_LT_AC_TAGVAR(archive_cmds, $1)='$CC -shared $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-h $wl$soname -o $lib'
 
@@ -3467,7 +3557,7 @@ case $host_os in
     ;;
   tandem*)
     case $cc_basename in
-      NCC)
+      NCC*)
 	# NonStop-UX NCC 3.20
 	# FIXME: insert proper C++ library support
 	_LT_AC_TAGVAR(ld_shlibs, $1)=no
@@ -3577,7 +3667,7 @@ if AC_TRY_EVAL(ac_compile); then
   # The `*' in the case matches for architectures that use `case' in
   # $output_verbose_cmd can trigger glob expansion during the loop
   # eval without this substitution.
-  output_verbose_link_cmd="`$echo \"X$output_verbose_link_cmd\" | $Xsed -e \"$no_glob_subst\"`"
+  output_verbose_link_cmd=`$echo "X$output_verbose_link_cmd" | $Xsed -e "$no_glob_subst"`
 
   for p in `eval $output_verbose_link_cmd`; do
     case $p in
@@ -3653,6 +3743,21 @@ fi
 
 $rm -f confest.$objext
 
+# PORTME: override above test on systems where it is broken
+ifelse([$1],[CXX],
+[case $host_os in
+solaris*)
+  case $cc_basename in
+  CC*)
+    # Adding this requires a known-good setup of shared libraries for
+    # Sun compiler versions before 5.6, else PIC objects from an old
+    # archive will be linked into the output, leading to subtle bugs.
+    _LT_AC_TAGVAR(postdeps,$1)='-lCstd -lCrun'
+    ;;
+  esac
+esac
+])
+
 case " $_LT_AC_TAGVAR(postdeps, $1) " in
 *" -lc "*) _LT_AC_TAGVAR(archive_cmds_need_lc, $1)=no ;;
 esac
@@ -3703,12 +3808,16 @@ lt_simple_link_test_code="      program t\n      end\n"
 # ltmain only uses $CC for tagged configurations so make sure $CC is set.
 _LT_AC_SYS_COMPILER
 
+# save warnings/boilerplate of simple test code
+_LT_COMPILER_BOILERPLATE
+_LT_LINKER_BOILERPLATE
+
 # Allow CC to be a program name with arguments.
 lt_save_CC="$CC"
 CC=${F77-"f77"}
 compiler=$CC
 _LT_AC_TAGVAR(compiler, $1)=$CC
-cc_basename=`$echo X"$compiler" | $Xsed -e 's%^.*/%%'`
+_LT_CC_BASENAME([$compiler])
 
 AC_MSG_CHECKING([if libtool supports shared libraries])
 AC_MSG_RESULT([$can_build_shared])
@@ -3718,7 +3827,7 @@ test "$can_build_shared" = "no" && enable_shared=no
 
 # On AIX, shared libraries and static libraries use the same namespace, and
 # are all built from PIC.
-case "$host_os" in
+case $host_os in
 aix3*)
   test "$enable_shared" = yes && enable_static=no
   if test -n "$RANLIB"; then
@@ -3727,7 +3836,9 @@ aix3*)
   fi
   ;;
 aix4* | aix5*)
-  test "$enable_shared" = yes && enable_static=no
+  if test "$host_cpu" != ia64 && test "$aix_use_runtimelinking" = no ; then
+    test "$enable_shared" = yes && enable_static=no
+  fi
   ;;
 esac
 AC_MSG_RESULT([$enable_shared])
@@ -3778,20 +3889,27 @@ _LT_AC_TAGVAR(objext, $1)=$objext
 lt_simple_compile_test_code="class foo {}\n"
 
 # Code to be used in simple link tests
-lt_simple_link_test_code='public class conftest { public static void main(String[] argv) {}; }\n'
+lt_simple_link_test_code='public class conftest { public static void main(String[[]] argv) {}; }\n'
 
 # ltmain only uses $CC for tagged configurations so make sure $CC is set.
 _LT_AC_SYS_COMPILER
 
+# save warnings/boilerplate of simple test code
+_LT_COMPILER_BOILERPLATE
+_LT_LINKER_BOILERPLATE
+
 # Allow CC to be a program name with arguments.
 lt_save_CC="$CC"
 CC=${GCJ-"gcj"}
 compiler=$CC
 _LT_AC_TAGVAR(compiler, $1)=$CC
+_LT_CC_BASENAME([$compiler])
 
 # GCJ did not exist at the time GCC didn't implicitly link libc in.
 _LT_AC_TAGVAR(archive_cmds_need_lc, $1)=no
 
+_LT_AC_TAGVAR(old_archive_cmds, $1)=$old_archive_cmds
+
 ## CAVEAT EMPTOR:
 ## There is no encapsulation within the following macros, do not change
 ## the running order or otherwise move them around unless you know exactly
@@ -3838,11 +3956,16 @@ lt_simple_link_test_code="$lt_simple_compile_test_code"
 # ltmain only uses $CC for tagged configurations so make sure $CC is set.
 _LT_AC_SYS_COMPILER
 
+# save warnings/boilerplate of simple test code
+_LT_COMPILER_BOILERPLATE
+_LT_LINKER_BOILERPLATE
+
 # Allow CC to be a program name with arguments.
 lt_save_CC="$CC"
 CC=${RC-"windres"}
 compiler=$CC
 _LT_AC_TAGVAR(compiler, $1)=$CC
+_LT_CC_BASENAME([$compiler])
 _LT_AC_TAGVAR(lt_cv_prog_compiler_c_o, $1)=yes
 
 AC_LIBTOOL_CONFIG($1)
@@ -3978,7 +4101,7 @@ ifelse([$1], [],
 #
 # You should have received a copy of the GNU General Public License
 # along with this program; if not, write to the Free Software
-# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
 #
 # As a special exception to the GNU General Public License, if you
 # distribute this file as part of a program that contains a
@@ -3989,7 +4112,7 @@ ifelse([$1], [],
 SED=$lt_SED
 
 # Sed that helps us avoid accidentally triggering echo(1) options like -n.
-Xsed="$SED -e s/^X//"
+Xsed="$SED -e 1s/^X//"
 
 # The HP-UX ksh and POSIX shell print the target directory to stdout
 # if CDPATH is set.
@@ -4024,6 +4147,12 @@ fast_install=$enable_fast_install
 # The host system.
 host_alias=$host_alias
 host=$host
+host_os=$host_os
+
+# The build system.
+build_alias=$build_alias
+build=$build
+build_os=$build_os
 
 # An echo program that does not interpret backslashes.
 echo=$lt_echo
@@ -4100,7 +4229,7 @@ max_cmd_len=$lt_cv_sys_max_cmd_len
 # Does compiler simultaneously support -c and -o options?
 compiler_c_o=$lt_[]_LT_AC_TAGVAR(lt_cv_prog_compiler_c_o, $1)
 
-# Must we lock files when doing compilation ?
+# Must we lock files when doing compilation?
 need_locks=$lt_need_locks
 
 # Do we need the lib prefix for modules?
@@ -4374,9 +4503,6 @@ symcode='[[BCDEGRST]]'
 # Regexp to match symbols that can be accessed directly from C.
 sympat='\([[_A-Za-z]][[_A-Za-z0-9]]*\)'
 
-# Transform the above into a raw symbol and a C symbol.
-symxfrm='\1 \2\3 \3'
-
 # Transform an extracted symbol line into a proper C declaration
 lt_cv_sys_global_symbol_to_cdecl="sed -n -e 's/^. .* \(.*\)$/extern int \1;/p'"
 
@@ -4398,6 +4524,13 @@ hpux*) # Its linker distinguishes data from code symbols
   lt_cv_sys_global_symbol_to_cdecl="sed -n -e 's/^T .* \(.*\)$/extern int \1();/p' -e 's/^$symcode* .* \(.*\)$/extern char \1;/p'"
   lt_cv_sys_global_symbol_to_c_name_address="sed -n -e 's/^: \([[^ ]]*\) $/  {\\\"\1\\\", (lt_ptr) 0},/p' -e 's/^$symcode* \([[^ ]]*\) \([[^ ]]*\)$/  {\"\2\", (lt_ptr) \&\2},/p'"
   ;;
+linux*)
+  if test "$host_cpu" = ia64; then
+    symcode='[[ABCDGIRSTW]]'
+    lt_cv_sys_global_symbol_to_cdecl="sed -n -e 's/^T .* \(.*\)$/extern int \1();/p' -e 's/^$symcode* .* \(.*\)$/extern char \1;/p'"
+    lt_cv_sys_global_symbol_to_c_name_address="sed -n -e 's/^: \([[^ ]]*\) $/  {\\\"\1\\\", (lt_ptr) 0},/p' -e 's/^$symcode* \([[^ ]]*\) \([[^ ]]*\)$/  {\"\2\", (lt_ptr) \&\2},/p'"
+  fi
+  ;;
 irix* | nonstopux*)
   symcode='[[BCDEGRST]]'
   ;;
@@ -4429,8 +4562,11 @@ esac
 # Try without a prefix undercore, then with it.
 for ac_symprfx in "" "_"; do
 
+  # Transform symcode, sympat, and symprfx into a raw symbol and a C symbol.
+  symxfrm="\\1 $ac_symprfx\\2 \\2"
+
   # Write the raw and C identifiers.
-  lt_cv_sys_global_symbol_pipe="sed -n -e 's/^.*[[ 	]]\($symcode$symcode*\)[[ 	]][[ 	]]*\($ac_symprfx\)$sympat$opt_cr$/$symxfrm/p'"
+  lt_cv_sys_global_symbol_pipe="sed -n -e 's/^.*[[ 	]]\($symcode$symcode*\)[[ 	]][[ 	]]*$ac_symprfx$sympat$opt_cr$/$symxfrm/p'"
 
   # Check to see that the pipe works correctly.
   pipe_works=no
@@ -4594,7 +4730,7 @@ AC_MSG_CHECKING([for $compiler option to produce PIC])
     hpux*)
       # PIC is the default for IA64 HP-UX and 64-bit HP-UX, but
       # not for PA HP-UX.
-      case "$host_cpu" in
+      case $host_cpu in
       hppa*64*|ia64*)
 	;;
       *)
@@ -4619,7 +4755,7 @@ AC_MSG_CHECKING([for $compiler option to produce PIC])
 	;;
       chorus*)
 	case $cc_basename in
-	cxch68)
+	cxch68*)
 	  # Green Hills C++ Compiler
 	  # _LT_AC_TAGVAR(lt_prog_compiler_static, $1)="--no_auto_instantiation -u __main -u __premain -u _abort -r $COOL_DIR/lib/libOrb.a $MVME_DIR/lib/CC/libC.a $MVME_DIR/lib/classix/libcx.s.a"
 	  ;;
@@ -4628,7 +4764,7 @@ AC_MSG_CHECKING([for $compiler option to produce PIC])
        darwin*)
          # PIC is the default on this platform
          # Common symbols not allowed in MH_DYLIB files
-         case "$cc_basename" in
+         case $cc_basename in
            xlc*)
            _LT_AC_TAGVAR(lt_prog_compiler_pic, $1)='-qnocommon'
            _LT_AC_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,'
@@ -4637,10 +4773,10 @@ AC_MSG_CHECKING([for $compiler option to produce PIC])
        ;;
       dgux*)
 	case $cc_basename in
-	  ec++)
+	  ec++*)
 	    _LT_AC_TAGVAR(lt_prog_compiler_pic, $1)='-KPIC'
 	    ;;
-	  ghcx)
+	  ghcx*)
 	    # Green Hills C++ Compiler
 	    _LT_AC_TAGVAR(lt_prog_compiler_pic, $1)='-pic'
 	    ;;
@@ -4648,22 +4784,22 @@ AC_MSG_CHECKING([for $compiler option to produce PIC])
 	    ;;
 	esac
 	;;
-      freebsd* | kfreebsd*-gnu)
+      freebsd* | kfreebsd*-gnu | dragonfly*)
 	# FreeBSD uses GNU C++
 	;;
       hpux9* | hpux10* | hpux11*)
 	case $cc_basename in
-	  CC)
+	  CC*)
 	    _LT_AC_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,'
 	    _LT_AC_TAGVAR(lt_prog_compiler_static, $1)="${ac_cv_prog_cc_wl}-a ${ac_cv_prog_cc_wl}archive"
 	    if test "$host_cpu" != ia64; then
 	      _LT_AC_TAGVAR(lt_prog_compiler_pic, $1)='+Z'
 	    fi
 	    ;;
-	  aCC)
+	  aCC*)
 	    _LT_AC_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,'
 	    _LT_AC_TAGVAR(lt_prog_compiler_static, $1)="${ac_cv_prog_cc_wl}-a ${ac_cv_prog_cc_wl}archive"
-	    case "$host_cpu" in
+	    case $host_cpu in
 	    hppa*64*|ia64*)
 	      # +Z the default
 	      ;;
@@ -4678,7 +4814,7 @@ AC_MSG_CHECKING([for $compiler option to produce PIC])
 	;;
       irix5* | irix6* | nonstopux*)
 	case $cc_basename in
-	  CC)
+	  CC*)
 	    _LT_AC_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,'
 	    _LT_AC_TAGVAR(lt_prog_compiler_static, $1)='-non_shared'
 	    # CC pic flag -KPIC is the default.
@@ -4689,18 +4825,24 @@ AC_MSG_CHECKING([for $compiler option to produce PIC])
 	;;
       linux*)
 	case $cc_basename in
-	  KCC)
+	  KCC*)
 	    # KAI C++ Compiler
 	    _LT_AC_TAGVAR(lt_prog_compiler_wl, $1)='--backend -Wl,'
 	    _LT_AC_TAGVAR(lt_prog_compiler_pic, $1)='-fPIC'
 	    ;;
-	  icpc)
+	  icpc* | ecpc*)
 	    # Intel C++
 	    _LT_AC_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,'
 	    _LT_AC_TAGVAR(lt_prog_compiler_pic, $1)='-KPIC'
 	    _LT_AC_TAGVAR(lt_prog_compiler_static, $1)='-static'
 	    ;;
-	  cxx)
+	  pgCC*)
+	    # Portland Group C++ compiler.
+	    _LT_AC_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,'
+	    _LT_AC_TAGVAR(lt_prog_compiler_pic, $1)='-fpic'
+	    _LT_AC_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic'
+	    ;;
+	  cxx*)
 	    # Compaq C++
 	    # Make sure the PIC flag is empty.  It appears that all Alpha
 	    # Linux and Compaq Tru64 Unix objects are PIC.
@@ -4717,7 +4859,7 @@ AC_MSG_CHECKING([for $compiler option to produce PIC])
 	;;
       mvs*)
 	case $cc_basename in
-	  cxx)
+	  cxx*)
 	    _LT_AC_TAGVAR(lt_prog_compiler_pic, $1)='-W c,exportall'
 	    ;;
 	  *)
@@ -4728,14 +4870,14 @@ AC_MSG_CHECKING([for $compiler option to produce PIC])
 	;;
       osf3* | osf4* | osf5*)
 	case $cc_basename in
-	  KCC)
+	  KCC*)
 	    _LT_AC_TAGVAR(lt_prog_compiler_wl, $1)='--backend -Wl,'
 	    ;;
-	  RCC)
+	  RCC*)
 	    # Rational C++ 2.4.1
 	    _LT_AC_TAGVAR(lt_prog_compiler_pic, $1)='-pic'
 	    ;;
-	  cxx)
+	  cxx*)
 	    # Digital/Compaq C++
 	    _LT_AC_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,'
 	    # Make sure the PIC flag is empty.  It appears that all Alpha
@@ -4751,7 +4893,7 @@ AC_MSG_CHECKING([for $compiler option to produce PIC])
 	;;
       sco*)
 	case $cc_basename in
-	  CC)
+	  CC*)
 	    _LT_AC_TAGVAR(lt_prog_compiler_pic, $1)='-fPIC'
 	    ;;
 	  *)
@@ -4760,13 +4902,13 @@ AC_MSG_CHECKING([for $compiler option to produce PIC])
 	;;
       solaris*)
 	case $cc_basename in
-	  CC)
+	  CC*)
 	    # Sun C++ 4.2, 5.x and Centerline C++
 	    _LT_AC_TAGVAR(lt_prog_compiler_pic, $1)='-KPIC'
 	    _LT_AC_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic'
 	    _LT_AC_TAGVAR(lt_prog_compiler_wl, $1)='-Qoption ld '
 	    ;;
-	  gcx)
+	  gcx*)
 	    # Green Hills C++ Compiler
 	    _LT_AC_TAGVAR(lt_prog_compiler_pic, $1)='-PIC'
 	    ;;
@@ -4776,12 +4918,12 @@ AC_MSG_CHECKING([for $compiler option to produce PIC])
 	;;
       sunos4*)
 	case $cc_basename in
-	  CC)
+	  CC*)
 	    # Sun C++ 4.x
 	    _LT_AC_TAGVAR(lt_prog_compiler_pic, $1)='-pic'
 	    _LT_AC_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic'
 	    ;;
-	  lcc)
+	  lcc*)
 	    # Lucid
 	    _LT_AC_TAGVAR(lt_prog_compiler_pic, $1)='-pic'
 	    ;;
@@ -4791,7 +4933,7 @@ AC_MSG_CHECKING([for $compiler option to produce PIC])
 	;;
       tandem*)
 	case $cc_basename in
-	  NCC)
+	  NCC*)
 	    # NonStop-UX NCC 3.20
 	    _LT_AC_TAGVAR(lt_prog_compiler_pic, $1)='-KPIC'
 	    ;;
@@ -4862,7 +5004,7 @@ AC_MSG_CHECKING([for $compiler option to produce PIC])
     hpux*)
       # PIC is the default for IA64 HP-UX and 64-bit HP-UX, but
       # not for PA HP-UX.
-      case "$host_cpu" in
+      case $host_cpu in
       hppa*64*|ia64*)
 	# +Z the default
 	;;
@@ -4891,7 +5033,7 @@ AC_MSG_CHECKING([for $compiler option to produce PIC])
       darwin*)
         # PIC is the default on this platform
         # Common symbols not allowed in MH_DYLIB files
-       case "$cc_basename" in
+       case $cc_basename in
          xlc*)
          _LT_AC_TAGVAR(lt_prog_compiler_pic, $1)='-qnocommon'
          _LT_AC_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,'
@@ -4909,7 +5051,7 @@ AC_MSG_CHECKING([for $compiler option to produce PIC])
       _LT_AC_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,'
       # PIC is the default for IA64 HP-UX and 64-bit HP-UX, but
       # not for PA HP-UX.
-      case "$host_cpu" in
+      case $host_cpu in
       hppa*64*|ia64*)
 	# +Z the default
 	;;
@@ -4933,12 +5075,19 @@ AC_MSG_CHECKING([for $compiler option to produce PIC])
       ;;
 
     linux*)
-      case $CC in
+      case $cc_basename in
       icc* | ecc*)
 	_LT_AC_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,'
 	_LT_AC_TAGVAR(lt_prog_compiler_pic, $1)='-KPIC'
 	_LT_AC_TAGVAR(lt_prog_compiler_static, $1)='-static'
         ;;
+      pgcc* | pgf77* | pgf90* | pgf95*)
+        # Portland Group compilers (*not* the Pentium gcc compiler,
+	# which looks to be a dead project)
+	_LT_AC_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,'
+	_LT_AC_TAGVAR(lt_prog_compiler_pic, $1)='-fpic'
+	_LT_AC_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic'
+        ;;
       ccc*)
         _LT_AC_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,'
         # All Alpha code is PIC.
@@ -4959,9 +5108,14 @@ AC_MSG_CHECKING([for $compiler option to produce PIC])
       ;;
 
     solaris*)
-      _LT_AC_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,'
       _LT_AC_TAGVAR(lt_prog_compiler_pic, $1)='-KPIC'
       _LT_AC_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic'
+      case $cc_basename in
+      f77* | f90* | f95*)
+	_LT_AC_TAGVAR(lt_prog_compiler_wl, $1)='-Qoption ld ';;
+      *)
+	_LT_AC_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,';;
+      esac
       ;;
 
     sunos4*)
@@ -4983,6 +5137,11 @@ AC_MSG_CHECKING([for $compiler option to produce PIC])
       fi
       ;;
 
+    unicos*)
+      _LT_AC_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,'
+      _LT_AC_TAGVAR(lt_prog_compiler_can_build_shared, $1)=no
+      ;;
+
     uts4*)
       _LT_AC_TAGVAR(lt_prog_compiler_pic, $1)='-pic'
       _LT_AC_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic'
@@ -5010,7 +5169,7 @@ if test -n "$_LT_AC_TAGVAR(lt_prog_compiler_pic, $1)"; then
     [_LT_AC_TAGVAR(lt_prog_compiler_pic, $1)=
      _LT_AC_TAGVAR(lt_prog_compiler_can_build_shared, $1)=no])
 fi
-case "$host_os" in
+case $host_os in
   # For platforms which do not support PIC, -DPIC is meaningless:
   *djgpp*)
     _LT_AC_TAGVAR(lt_prog_compiler_pic, $1)=
@@ -5043,7 +5202,7 @@ ifelse([$1],[CXX],[
     _LT_AC_TAGVAR(export_symbols_cmds, $1)="$ltdll_cmds"
   ;;
   cygwin* | mingw*)
-    _LT_AC_TAGVAR(export_symbols_cmds, $1)='$NM $libobjs $convenience | $global_symbol_pipe | $SED -e '\''/^[[BCDGS]] /s/.* \([[^ ]]*\)/\1 DATA/'\'' | $SED -e '\''/^[[AITW]] /s/.* //'\'' | sort | uniq > $export_symbols'
+    _LT_AC_TAGVAR(export_symbols_cmds, $1)='$NM $libobjs $convenience | $global_symbol_pipe | $SED -e '\''/^[[BCDGRS]] /s/.* \([[^ ]]*\)/\1 DATA/;/^.* __nm__/s/^.* __nm__\([[^ ]]*\) [[^ ]]*/\1 DATA/;/^I /d;/^[[AITW]] /s/.* //'\'' | sort | uniq > $export_symbols'
   ;;
   *)
     _LT_AC_TAGVAR(export_symbols_cmds, $1)='$NM $libobjs $convenience | $global_symbol_pipe | $SED '\''s/.* //'\'' | sort | uniq > $export_symbols'
@@ -5086,7 +5245,8 @@ ifelse([$1],[CXX],[
   # rely on this symbol name, it's probably fine to never include it in
   # preloaded symbol tables.
   extract_expsyms_cmds=
-
+  # Just being paranoid about ensuring that cc_basename is set.
+  _LT_CC_BASENAME([$compiler])
   case $host_os in
   cygwin* | mingw* | pw32*)
     # FIXME: the MSVC++ port hasn't been tested in a loooong time
@@ -5106,6 +5266,27 @@ ifelse([$1],[CXX],[
     # If archive_cmds runs LD, not CC, wlarc should be empty
     wlarc='${wl}'
 
+    # Set some defaults for GNU ld with shared library support. These
+    # are reset later if shared libraries are not supported. Putting them
+    # here allows them to be overridden if necessary.
+    runpath_var=LD_RUN_PATH
+    _LT_AC_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}--rpath ${wl}$libdir'
+    _LT_AC_TAGVAR(export_dynamic_flag_spec, $1)='${wl}--export-dynamic'
+    # ancient GNU ld didn't support --whole-archive et. al.
+    if $LD --help 2>&1 | grep 'no-whole-archive' > /dev/null; then
+	_LT_AC_TAGVAR(whole_archive_flag_spec, $1)="$wlarc"'--whole-archive$convenience '"$wlarc"'--no-whole-archive'
+      else
+  	_LT_AC_TAGVAR(whole_archive_flag_spec, $1)=
+    fi
+    supports_anon_versioning=no
+    case `$LD -v 2>/dev/null` in
+      *\ [[01]].* | *\ 2.[[0-9]].* | *\ 2.10.*) ;; # catch versions < 2.11
+      *\ 2.11.93.0.2\ *) supports_anon_versioning=yes ;; # RH7.3 ...
+      *\ 2.11.92.0.12\ *) supports_anon_versioning=yes ;; # Mandrake 8.2 ...
+      *\ 2.11.*) ;; # other 2.11 versions
+      *) supports_anon_versioning=yes ;;
+    esac
+
     # See if GNU ld supports shared libraries.
     case $host_os in
     aix3* | aix4* | aix5*)
@@ -5156,7 +5337,7 @@ EOF
       _LT_AC_TAGVAR(allow_undefined_flag, $1)=unsupported
       _LT_AC_TAGVAR(always_export_symbols, $1)=no
       _LT_AC_TAGVAR(enable_shared_with_static_runtimes, $1)=yes
-      _LT_AC_TAGVAR(export_symbols_cmds, $1)='$NM $libobjs $convenience | $global_symbol_pipe | $SED -e '\''/^[[BCDGS]] /s/.* \([[^ ]]*\)/\1 DATA/'\'' | $SED -e '\''/^[[AITW]] /s/.* //'\'' | sort | uniq > $export_symbols'
+      _LT_AC_TAGVAR(export_symbols_cmds, $1)='$NM $libobjs $convenience | $global_symbol_pipe | $SED -e '\''/^[[BCDGRS]] /s/.* \([[^ ]]*\)/\1 DATA/'\'' | $SED -e '\''/^[[AITW]] /s/.* //'\'' | sort | uniq > $export_symbols'
 
       if $LD --help 2>&1 | grep 'auto-import' > /dev/null; then
         _LT_AC_TAGVAR(archive_cmds, $1)='$CC -shared $libobjs $deplibs $compiler_flags -o $output_objdir/$soname ${wl}--image-base=0x10000000 ${wl}--out-implib,$lib'
@@ -5170,7 +5351,38 @@ EOF
 	fi~
 	$CC -shared $output_objdir/$soname.def $libobjs $deplibs $compiler_flags -o $output_objdir/$soname ${wl}--image-base=0x10000000  ${wl}--out-implib,$lib'
       else
-	ld_shlibs=no
+	_LT_AC_TAGVAR(ld_shlibs, $1)=no
+      fi
+      ;;
+
+    linux*)
+      if $LD --help 2>&1 | grep ': supported targets:.* elf' > /dev/null; then
+	tmp_addflag=
+	case $cc_basename,$host_cpu in
+	pgcc*)				# Portland Group C compiler
+	  _LT_AC_TAGVAR(whole_archive_flag_spec, $1)='${wl}--whole-archive`for conv in $convenience\"\"; do test  -n \"$conv\" && new_convenience=\"$new_convenience,$conv\"; done; $echo \"$new_convenience\"` ${wl}--no-whole-archive'
+	  tmp_addflag=' $pic_flag'
+	  ;;
+	pgf77* | pgf90* | pgf95*)	# Portland Group f77 and f90 compilers
+	  _LT_AC_TAGVAR(whole_archive_flag_spec, $1)='${wl}--whole-archive`for conv in $convenience\"\"; do test  -n \"$conv\" && new_convenience=\"$new_convenience,$conv\"; done; $echo \"$new_convenience\"` ${wl}--no-whole-archive'
+	  tmp_addflag=' $pic_flag -Mnomain' ;;
+	ecc*,ia64* | icc*,ia64*)		# Intel C compiler on ia64
+	  tmp_addflag=' -i_dynamic' ;;
+	efc*,ia64* | ifort*,ia64*)	# Intel Fortran compiler on ia64
+	  tmp_addflag=' -i_dynamic -nofor_main' ;;
+	ifc* | ifort*)			# Intel Fortran compiler
+	  tmp_addflag=' -nofor_main' ;;
+	esac
+	_LT_AC_TAGVAR(archive_cmds, $1)='$CC -shared'"$tmp_addflag"' $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib'
+
+	if test $supports_anon_versioning = yes; then
+	  _LT_AC_TAGVAR(archive_expsym_cmds, $1)='$echo "{ global:" > $output_objdir/$libname.ver~
+  cat $export_symbols | sed -e "s/\(.*\)/\1;/" >> $output_objdir/$libname.ver~
+  $echo "local: *; };" >> $output_objdir/$libname.ver~
+	  $CC -shared'"$tmp_addflag"' $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname ${wl}-version-script ${wl}$output_objdir/$libname.ver -o $lib'
+	fi
+      else
+	_LT_AC_TAGVAR(ld_shlibs, $1)=no
       fi
       ;;
 
@@ -5212,31 +5424,6 @@ EOF
       _LT_AC_TAGVAR(hardcode_shlibpath_var, $1)=no
       ;;
 
-  linux*)
-    if $LD --help 2>&1 | grep ': supported targets:.* elf' > /dev/null; then
-        tmp_archive_cmds='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib'
-	_LT_AC_TAGVAR(archive_cmds, $1)="$tmp_archive_cmds"
-      supports_anon_versioning=no
-      case `$LD -v 2>/dev/null` in
-        *\ [01].* | *\ 2.[[0-9]].* | *\ 2.10.*) ;; # catch versions < 2.11
-        *\ 2.11.93.0.2\ *) supports_anon_versioning=yes ;; # RH7.3 ...
-        *\ 2.11.92.0.12\ *) supports_anon_versioning=yes ;; # Mandrake 8.2 ...
-        *\ 2.11.*) ;; # other 2.11 versions
-        *) supports_anon_versioning=yes ;;
-      esac
-      if test $supports_anon_versioning = yes; then
-        _LT_AC_TAGVAR(archive_expsym_cmds, $1)='$echo "{ global:" > $output_objdir/$libname.ver~
-cat $export_symbols | sed -e "s/\(.*\)/\1;/" >> $output_objdir/$libname.ver~
-$echo "local: *; };" >> $output_objdir/$libname.ver~
-        $CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname ${wl}-version-script ${wl}$output_objdir/$libname.ver -o $lib'
-      else
-        _LT_AC_TAGVAR(archive_expsym_cmds, $1)="$tmp_archive_cmds"
-      fi
-    else
-      _LT_AC_TAGVAR(ld_shlibs, $1)=no
-    fi
-    ;;
-
     *)
       if $LD --help 2>&1 | grep ': supported targets:.* elf' > /dev/null; then
 	_LT_AC_TAGVAR(archive_cmds, $1)='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib'
@@ -5247,16 +5434,11 @@ $echo "local: *; };" >> $output_objdir/$libname.ver~
       ;;
     esac
 
-    if test "$_LT_AC_TAGVAR(ld_shlibs, $1)" = yes; then
-      runpath_var=LD_RUN_PATH
-      _LT_AC_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}--rpath ${wl}$libdir'
-      _LT_AC_TAGVAR(export_dynamic_flag_spec, $1)='${wl}--export-dynamic'
-      # ancient GNU ld didn't support --whole-archive et. al.
-      if $LD --help 2>&1 | grep 'no-whole-archive' > /dev/null; then
- 	_LT_AC_TAGVAR(whole_archive_flag_spec, $1)="$wlarc"'--whole-archive$convenience '"$wlarc"'--no-whole-archive'
-      else
-  	_LT_AC_TAGVAR(whole_archive_flag_spec, $1)=
-      fi
+    if test "$_LT_AC_TAGVAR(ld_shlibs, $1)" = no; then
+      runpath_var=
+      _LT_AC_TAGVAR(hardcode_libdir_flag_spec, $1)=
+      _LT_AC_TAGVAR(export_dynamic_flag_spec, $1)=
+      _LT_AC_TAGVAR(whole_archive_flag_spec, $1)=
     fi
   else
     # PORTME fill in a description of your system's linker (not GNU ld)
@@ -5320,7 +5502,7 @@ $echo "local: *; };" >> $output_objdir/$libname.ver~
       _LT_AC_TAGVAR(link_all_deplibs, $1)=yes
 
       if test "$GCC" = yes; then
-	case $host_os in aix4.[012]|aix4.[012].*)
+	case $host_os in aix4.[[012]]|aix4.[[012]].*)
 	# We only want to do this on AIX 4.2 and lower, the check
 	# below for broken collect2 doesn't work under 4.3+
 	  collect2name=`${CC} -print-prog-name=collect2`
@@ -5341,6 +5523,9 @@ $echo "local: *; };" >> $output_objdir/$libname.ver~
 	  fi
 	esac
 	shared_flag='-shared'
+	if test "$aix_use_runtimelinking" = yes; then
+	  shared_flag="$shared_flag "'${wl}-G'
+	fi
       else
 	# not using gcc
 	if test "$host_cpu" = ia64; then
@@ -5385,7 +5570,7 @@ $echo "local: *; };" >> $output_objdir/$libname.ver~
 	  # Exported symbols can be pulled into shared objects from archives
 	  _LT_AC_TAGVAR(whole_archive_flag_spec, $1)=' '
 	  _LT_AC_TAGVAR(archive_cmds_need_lc, $1)=yes
-	  # This is similar to how AIX traditionally builds it's shared libraries.
+	  # This is similar to how AIX traditionally builds its shared libraries.
 	  _LT_AC_TAGVAR(archive_expsym_cmds, $1)="\$CC $shared_flag"' -o $output_objdir/$soname $libobjs $deplibs $compiler_flags ${wl}-bE:$export_symbols ${wl}-bnoentry${allow_undefined_flag}~$AR $AR_FLAGS $output_objdir/$libname$release.a $output_objdir/$soname'
 	fi
       fi
@@ -5420,12 +5605,12 @@ $echo "local: *; };" >> $output_objdir/$libname.ver~
       _LT_AC_TAGVAR(old_archive_From_new_cmds, $1)='true'
       # FIXME: Should let the user specify the lib program.
       _LT_AC_TAGVAR(old_archive_cmds, $1)='lib /OUT:$oldlib$oldobjs$old_deplibs'
-      fix_srcfile_path='`cygpath -w "$srcfile"`'
+      _LT_AC_TAGVAR(fix_srcfile_path, $1)='`cygpath -w "$srcfile"`'
       _LT_AC_TAGVAR(enable_shared_with_static_runtimes, $1)=yes
       ;;
 
     darwin* | rhapsody*)
-      case "$host_os" in
+      case $host_os in
         rhapsody* | darwin1.[[012]])
          _LT_AC_TAGVAR(allow_undefined_flag, $1)='${wl}-undefined ${wl}suppress'
          ;;
@@ -5454,16 +5639,16 @@ $echo "local: *; };" >> $output_objdir/$libname.ver~
     	output_verbose_link_cmd='echo'
         _LT_AC_TAGVAR(archive_cmds, $1)='$CC -dynamiclib $allow_undefined_flag -o $lib $libobjs $deplibs $compiler_flags -install_name $rpath/$soname $verstring'
       _LT_AC_TAGVAR(module_cmds, $1)='$CC $allow_undefined_flag -o $lib -bundle $libobjs $deplibs$compiler_flags'
-      # Don't fix this by using the ld -exported_symbols_list flag, it doesn't exist in older darwin ld's
+      # Don't fix this by using the ld -exported_symbols_list flag, it doesn't exist in older darwin lds
       _LT_AC_TAGVAR(archive_expsym_cmds, $1)='sed -e "s,#.*,," -e "s,^[    ]*,," -e "s,^\(..*\),_&," < $export_symbols > $output_objdir/${libname}-symbols.expsym~$CC -dynamiclib $allow_undefined_flag -o $lib $libobjs $deplibs $compiler_flags -install_name $rpath/$soname $verstring~nmedit -s $output_objdir/${libname}-symbols.expsym ${lib}'
       _LT_AC_TAGVAR(module_expsym_cmds, $1)='sed -e "s,#.*,," -e "s,^[    ]*,," -e "s,^\(..*\),_&," < $export_symbols > $output_objdir/${libname}-symbols.expsym~$CC $allow_undefined_flag  -o $lib -bundle $libobjs $deplibs$compiler_flags~nmedit -s $output_objdir/${libname}-symbols.expsym ${lib}'
     else
-      case "$cc_basename" in
+      case $cc_basename in
         xlc*)
          output_verbose_link_cmd='echo'
          _LT_AC_TAGVAR(archive_cmds, $1)='$CC -qmkshrobj $allow_undefined_flag -o $lib $libobjs $deplibs $compiler_flags ${wl}-install_name ${wl}`echo $rpath/$soname` $verstring'
          _LT_AC_TAGVAR(module_cmds, $1)='$CC $allow_undefined_flag -o $lib -bundle $libobjs $deplibs$compiler_flags'
-          # Don't fix this by using the ld -exported_symbols_list flag, it doesn't exist in older darwin ld's
+          # Don't fix this by using the ld -exported_symbols_list flag, it doesn't exist in older darwin lds
          _LT_AC_TAGVAR(archive_expsym_cmds, $1)='sed -e "s,#.*,," -e "s,^[    ]*,," -e "s,^\(..*\),_&," < $export_symbols > $output_objdir/${libname}-symbols.expsym~$CC -qmkshrobj $allow_undefined_flag -o $lib $libobjs $deplibs $compiler_flags ${wl}-install_name ${wl}$rpath/$soname $verstring~nmedit -s $output_objdir/${libname}-symbols.expsym ${lib}'
           _LT_AC_TAGVAR(module_expsym_cmds, $1)='sed -e "s,#.*,," -e "s,^[    ]*,," -e "s,^\(..*\),_&," < $export_symbols > $output_objdir/${libname}-symbols.expsym~$CC $allow_undefined_flag  -o $lib -bundle $libobjs $deplibs$compiler_flags~nmedit -s $output_objdir/${libname}-symbols.expsym ${lib}'
           ;;
@@ -5504,7 +5689,7 @@ $echo "local: *; };" >> $output_objdir/$libname.ver~
       ;;
 
     # FreeBSD 3 and greater uses gcc -shared to do shared libraries.
-    freebsd* | kfreebsd*-gnu)
+    freebsd* | kfreebsd*-gnu | dragonfly*)
       _LT_AC_TAGVAR(archive_cmds, $1)='$CC -shared -o $lib $libobjs $deplibs $compiler_flags'
       _LT_AC_TAGVAR(hardcode_libdir_flag_spec, $1)='-R$libdir'
       _LT_AC_TAGVAR(hardcode_direct, $1)=yes
@@ -5529,7 +5714,7 @@ $echo "local: *; };" >> $output_objdir/$libname.ver~
 
     hpux10* | hpux11*)
       if test "$GCC" = yes -a "$with_gnu_ld" = no; then
-	case "$host_cpu" in
+	case $host_cpu in
 	hppa*64*|ia64*)
 	  _LT_AC_TAGVAR(archive_cmds, $1)='$CC -shared ${wl}+h ${wl}$soname -o $lib $libobjs $deplibs $compiler_flags'
 	  ;;
@@ -5538,7 +5723,7 @@ $echo "local: *; };" >> $output_objdir/$libname.ver~
 	  ;;
 	esac
       else
-	case "$host_cpu" in
+	case $host_cpu in
 	hppa*64*|ia64*)
 	  _LT_AC_TAGVAR(archive_cmds, $1)='$LD -b +h $soname -o $lib $libobjs $deplibs $linker_flags'
 	  ;;
@@ -5548,7 +5733,7 @@ $echo "local: *; };" >> $output_objdir/$libname.ver~
 	esac
       fi
       if test "$with_gnu_ld" = no; then
-	case "$host_cpu" in
+	case $host_cpu in
 	hppa*64*)
 	  _LT_AC_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}+b ${wl}$libdir'
 	  _LT_AC_TAGVAR(hardcode_libdir_flag_spec_ld, $1)='+b $libdir'
@@ -5661,7 +5846,7 @@ $echo "local: *; };" >> $output_objdir/$libname.ver~
 	_LT_AC_TAGVAR(allow_undefined_flag, $1)=' -expect_unresolved \*'
 	_LT_AC_TAGVAR(archive_cmds, $1)='$LD -shared${allow_undefined_flag} $libobjs $deplibs $linker_flags -msym -soname $soname `test -n "$verstring" && echo -set_version $verstring` -update_registry ${output_objdir}/so_locations -o $lib'
 	_LT_AC_TAGVAR(archive_expsym_cmds, $1)='for i in `cat $export_symbols`; do printf "%s %s\\n" -exported_symbol "\$i" >> $lib.exp; done; echo "-hidden">> $lib.exp~
-	$LD -shared${allow_undefined_flag} -input $lib.exp $linker_flags $libobjs $deplibs -soname $soname `test -n "$verstring" && echo -set_version $verstring` -update_registry ${objdir}/so_locations -o $lib~$rm $lib.exp'
+	$LD -shared${allow_undefined_flag} -input $lib.exp $linker_flags $libobjs $deplibs -soname $soname `test -n "$verstring" && echo -set_version $verstring` -update_registry ${output_objdir}/so_locations -o $lib~$rm $lib.exp'
 
 	# Both c and cxx compiler support -rpath directly
 	_LT_AC_TAGVAR(hardcode_libdir_flag_spec, $1)='-rpath $libdir'
@@ -5680,10 +5865,12 @@ $echo "local: *; };" >> $output_objdir/$libname.ver~
     solaris*)
       _LT_AC_TAGVAR(no_undefined_flag, $1)=' -z text'
       if test "$GCC" = yes; then
+	wlarc='${wl}'
 	_LT_AC_TAGVAR(archive_cmds, $1)='$CC -shared ${wl}-h ${wl}$soname -o $lib $libobjs $deplibs $compiler_flags'
 	_LT_AC_TAGVAR(archive_expsym_cmds, $1)='$echo "{ global:" > $lib.exp~cat $export_symbols | $SED -e "s/\(.*\)/\1;/" >> $lib.exp~$echo "local: *; };" >> $lib.exp~
 	  $CC -shared ${wl}-M ${wl}$lib.exp ${wl}-h ${wl}$soname -o $lib $libobjs $deplibs $compiler_flags~$rm $lib.exp'
       else
+	wlarc=''
 	_LT_AC_TAGVAR(archive_cmds, $1)='$LD -G${allow_undefined_flag} -h $soname -o $lib $libobjs $deplibs $linker_flags'
 	_LT_AC_TAGVAR(archive_expsym_cmds, $1)='$echo "{ global:" > $lib.exp~cat $export_symbols | $SED -e "s/\(.*\)/\1;/" >> $lib.exp~$echo "local: *; };" >> $lib.exp~
   	$LD -G${allow_undefined_flag} -M $lib.exp -h $soname -o $lib $libobjs $deplibs $linker_flags~$rm $lib.exp'
@@ -5692,8 +5879,18 @@ $echo "local: *; };" >> $output_objdir/$libname.ver~
       _LT_AC_TAGVAR(hardcode_shlibpath_var, $1)=no
       case $host_os in
       solaris2.[[0-5]] | solaris2.[[0-5]].*) ;;
-      *) # Supported since Solaris 2.6 (maybe 2.5.1?)
-	_LT_AC_TAGVAR(whole_archive_flag_spec, $1)='-z allextract$convenience -z defaultextract' ;;
+      *)
+ 	# The compiler driver will combine linker options so we
+ 	# cannot just pass the convience library names through
+ 	# without $wl, iff we do not link with $LD.
+ 	# Luckily, gcc supports the same syntax we need for Sun Studio.
+ 	# Supported since Solaris 2.6 (maybe 2.5.1?)
+ 	case $wlarc in
+ 	'')
+ 	  _LT_AC_TAGVAR(whole_archive_flag_spec, $1)='-z allextract$convenience -z defaultextract' ;;
+ 	*)
+ 	  _LT_AC_TAGVAR(whole_archive_flag_spec, $1)='${wl}-z ${wl}allextract`for conv in $convenience\"\"; do test -n \"$conv\" && new_convenience=\"$new_convenience,$conv\"; done; $echo \"$new_convenience\"` ${wl}-z ${wl}defaultextract' ;;
+ 	esac ;;
       esac
       _LT_AC_TAGVAR(link_all_deplibs, $1)=yes
       ;;
@@ -5956,7 +6153,7 @@ lt_ac_count=0
 # Add /usr/xpg4/bin/sed as it is typically found on Solaris
 # along with /bin/sed that truncates output.
 for lt_ac_sed in $lt_ac_sed_list /usr/xpg4/bin/sed; do
-  test ! -f $lt_ac_sed && break
+  test ! -f $lt_ac_sed && continue
   cat /dev/null > conftest.in
   lt_ac_count=0
   echo $ECHO_N "0123456789$ECHO_C" >conftest.in
diff --git a/storage/bdb/dist/aclocal/mutex.ac b/storage/bdb/dist/aclocal/mutex.ac
index 959ed4ebe56..149bda737b2 100644
--- a/storage/bdb/dist/aclocal/mutex.ac
+++ b/storage/bdb/dist/aclocal/mutex.ac
@@ -1,4 +1,4 @@
-# $Id: mutex.ac,v 11.46 2004/07/09 16:23:19 bostic Exp $
+# $Id: mutex.ac,v 12.6 2005/11/04 20:19:29 bostic Exp $
 
 # POSIX pthreads tests: inter-process safe and intra-process only.
 AC_DEFUN(AM_PTHREADS_SHARED, [
@@ -353,6 +353,28 @@ AC_TRY_COMPILE(,[
 ], [db_cv_mutex="ARM/gcc-assembly"])
 fi
 
+# MIPS/gcc: Linux
+if test "$db_cv_mutex" = no; then
+AC_TRY_COMPILE(,[
+#if (defined(__mips) || defined(__mips__)) && defined(__GNUC__)
+	exit(0);
+#else
+	FAIL TO COMPILE/LINK
+#endif
+], [db_cv_mutex="MIPS/gcc-assembly"])
+fi
+
+# MIPS/gcc: Linux
+if test "$db_cv_mutex" = no; then
+AC_TRY_COMPILE(,[
+#if (defined(__mips) || defined(__mips__)) && defined(__GNUC__)
+	exit(0);
+#else
+	FAIL TO COMPILE/LINK
+#endif
+], [db_cv_mutex="MIPS/gcc-assembly"])
+fi
+
 # PaRisc/gcc: HP/UX
 if test "$db_cv_mutex" = no; then
 AC_TRY_COMPILE(,[
@@ -400,7 +422,7 @@ fi
 # x86/gcc: FreeBSD, NetBSD, BSD/OS, Linux
 if test "$db_cv_mutex" = no; then
 AC_TRY_COMPILE(,[
-#if (defined(i386) || defined(__i386__) || defined(__x86_64__)) && defined(__GNUC__)
+#if (defined(i386) || defined(__i386__)) && defined(__GNUC__)
 	exit(0);
 #else
 	FAIL TO COMPILE/LINK
@@ -408,6 +430,17 @@ AC_TRY_COMPILE(,[
 ], [db_cv_mutex="x86/gcc-assembly"])
 fi
 
+# x86_64/gcc: FreeBSD, NetBSD, BSD/OS, Linux
+if test "$db_cv_mutex" = no; then
+AC_TRY_COMPILE(,[
+#if (defined(x86_64) || defined(__x86_64__)) && defined(__GNUC__)
+	exit(0);
+#else
+	FAIL TO COMPILE/LINK
+#endif
+], [db_cv_mutex="x86_64/gcc-assembly"])
+fi
+
 # S390/cc: IBM OS/390 Unix
 if test "$db_cv_mutex" = no; then
 AC_TRY_COMPILE(,[
@@ -430,7 +463,7 @@ AC_TRY_COMPILE(,[
 ], [db_cv_mutex="S390/gcc-assembly"])
 fi
 
-# ia86/gcc: Linux
+# ia64/gcc: Linux
 if test "$db_cv_mutex" = no; then
 AC_TRY_COMPILE(,[
 #if defined(__ia64) && defined(__GNUC__)
@@ -458,6 +491,10 @@ if test "$db_cv_mutex" = no; then
 fi
 ])
 
+AC_SUBST(thread_h_decl)
+AC_SUBST(db_threadid_t_decl)
+db_threadid_t_decl=notset
+
 case "$db_cv_mutex" in
 68K/gcc-assembly)	ADDITIONAL_OBJS="mut_tas${o} $ADDITIONAL_OBJS"
 			AC_DEFINE(HAVE_MUTEX_68K_GCC_ASSEMBLY)
@@ -492,10 +529,14 @@ ia64/gcc-assembly)	ADDITIONAL_OBJS="mut_tas${o} $ADDITIONAL_OBJS"
 			AH_TEMPLATE(HAVE_MUTEX_IA64_GCC_ASSEMBLY,
 			    [Define to 1 to use the GCC compiler and IA64 assembly language mutexes.]);;
 POSIX/pthreads)		ADDITIONAL_OBJS="mut_pthread${o} $ADDITIONAL_OBJS"
+			thread_h_decl="#include "
+			db_threadid_t_decl="typedef pthread_t db_threadid_t;"
 			AC_DEFINE(HAVE_MUTEX_PTHREADS)
 			AH_TEMPLATE(HAVE_MUTEX_PTHREADS,
 			    [Define to 1 to use POSIX 1003.1 pthread_XXX mutexes.]);;
 POSIX/pthreads/private)	ADDITIONAL_OBJS="mut_pthread${o} $ADDITIONAL_OBJS"
+			thread_h_decl="#include "
+			db_threadid_t_decl="typedef pthread_t db_threadid_t;"
 			AC_DEFINE(HAVE_MUTEX_PTHREADS)
 			AH_TEMPLATE(HAVE_MUTEX_PTHREADS,
 			    [Define to 1 to use POSIX 1003.1 pthread_XXX mutexes.])
@@ -503,17 +544,19 @@ POSIX/pthreads/private)	ADDITIONAL_OBJS="mut_pthread${o} $ADDITIONAL_OBJS"
 			AH_TEMPLATE(HAVE_MUTEX_THREAD_ONLY,
 			    [Define to 1 to configure mutexes intra-process only.]);;
 POSIX/pthreads/library)	LIBS="$LIBS -lpthread"
-			LIBJSO_LIBS="$LIBJSO_LIBS -lpthread"
-			LIBTSO_LIBS="$LIBTSO_LIBS -lpthread"
+			LIBSO_LIBS="$LIBSO_LIBS -lpthread"
 			ADDITIONAL_OBJS="mut_pthread${o} $ADDITIONAL_OBJS"
+			thread_h_decl="#include "
+			db_threadid_t_decl="typedef pthread_t db_threadid_t;"
 			AC_DEFINE(HAVE_MUTEX_PTHREADS)
 			AH_TEMPLATE(HAVE_MUTEX_PTHREADS,
 			    [Define to 1 to use POSIX 1003.1 pthread_XXX mutexes.]);;
 POSIX/pthreads/library/private)
 			LIBS="$LIBS -lpthread"
-			LIBJSO_LIBS="$LIBJSO_LIBS -lpthread"
-			LIBTSO_LIBS="$LIBTSO_LIBS -lpthread"
+			LIBSO_LIBS="$LIBSO_LIBS -lpthread"
 			ADDITIONAL_OBJS="mut_pthread${o} $ADDITIONAL_OBJS"
+			thread_h_decl="#include "
+			db_threadid_t_decl="typedef pthread_t db_threadid_t;"
 			AC_DEFINE(HAVE_MUTEX_PTHREADS)
 			AH_TEMPLATE(HAVE_MUTEX_PTHREADS,
 			    [Define to 1 to use POSIX 1003.1 pthread_XXX mutexes.])
@@ -551,6 +594,8 @@ Solaris/_lock_try)	ADDITIONAL_OBJS="mut_tas${o} $ADDITIONAL_OBJS"
 			AH_TEMPLATE(HAVE_MUTEX_SOLARIS_LOCK_TRY,
 			    [Define to 1 to use the Solaris _lock_XXX mutexes.]);;
 Solaris/lwp)		ADDITIONAL_OBJS="mut_pthread${o} $ADDITIONAL_OBJS"
+			thread_h_decl="#include "
+			db_threadid_t_decl="typedef pthread_t db_threadid_t;"
 			AC_DEFINE(HAVE_MUTEX_SOLARIS_LWP)
 			AH_TEMPLATE(HAVE_MUTEX_SOLARIS_LWP,
 			    [Define to 1 to use the Solaris lwp threads mutexes.]);;
@@ -562,13 +607,17 @@ Tru64/cc-assembly)	ADDITIONAL_OBJS="mut_tas${o} $ADDITIONAL_OBJS"
 			AC_DEFINE(HAVE_MUTEX_TRU64_CC_ASSEMBLY)
 			AH_TEMPLATE(HAVE_MUTEX_TRU64_CC_ASSEMBLY,
 			    [Define to 1 to use the CC compiler and Tru64 assembly language mutexes.]);;
-
 UI/threads)		ADDITIONAL_OBJS="mut_pthread${o} $ADDITIONAL_OBJS"
+			thread_h_decl="#include "
+			db_threadid_t_decl="typedef thread_t db_threadid_t;"
 			AC_DEFINE(HAVE_MUTEX_UI_THREADS)
 			AH_TEMPLATE(HAVE_MUTEX_UI_THREADS,
 			    [Define to 1 to use the UNIX International mutexes.]);;
 UI/threads/library)	LIBS="$LIBS -lthread"
+			LIBSO_LIBS="$LIBSO_LIBS -lthread"
 			ADDITIONAL_OBJS="mut_pthread${o} $ADDITIONAL_OBJS"
+			thread_h_decl="#include "
+			db_threadid_t_decl="typedef thread_t db_threadid_t;"
 			AC_DEFINE(HAVE_MUTEX_UI_THREADS)
 			AH_TEMPLATE(HAVE_MUTEX_UI_THREADS,
 			    [Define to 1 to use the UNIX International mutexes.]);;
@@ -590,10 +639,18 @@ win32)			ADDITIONAL_OBJS="mut_win32${o} $ADDITIONAL_OBJS"
 win32/gcc)		ADDITIONAL_OBJS="mut_win32${o} $ADDITIONAL_OBJS"
 			AC_DEFINE(HAVE_MUTEX_WIN32_GCC)
 			AH_TEMPLATE(HAVE_MUTEX_WIN32_GCC, [Define to 1 to use the GCC compiler and Windows mutexes.]);;
+MIPS/gcc-assembly)	ADDITIONAL_OBJS="mut_tas${o} $ADDITIONAL_OBJS"
+			AC_DEFINE(HAVE_MUTEX_MIPS_GCC_ASSEMBLY)
+			AH_TEMPLATE(HAVE_MUTEX_MIPS_GCC_ASSEMBLY,
+			    [Define to 1 to use the GCC compiler and MIPS assembly language mutexes.]);;
 x86/gcc-assembly)	ADDITIONAL_OBJS="mut_tas${o} $ADDITIONAL_OBJS"
 			AC_DEFINE(HAVE_MUTEX_X86_GCC_ASSEMBLY)
 			AH_TEMPLATE(HAVE_MUTEX_X86_GCC_ASSEMBLY,
 			    [Define to 1 to use the GCC compiler and x86 assembly language mutexes.]);;
+x86_64/gcc-assembly)	ADDITIONAL_OBJS="mut_tas${o} $ADDITIONAL_OBJS"
+			AC_DEFINE(HAVE_MUTEX_X86_64_GCC_ASSEMBLY)
+			AH_TEMPLATE(HAVE_MUTEX_X86_64_GCC_ASSEMBLY,
+			[Define to 1 to use the GCC compiler and amd64 assembly language mutexes.]);;
 UNIX/fcntl)		AC_MSG_WARN(
 			    [NO FAST MUTEXES FOUND FOR THIS COMPILER/ARCHITECTURE.])
 			ADDITIONAL_OBJS="mut_fcntl${o} $ADDITIONAL_OBJS"
@@ -603,10 +660,24 @@ UNIX/fcntl)		AC_MSG_WARN(
 *)			AC_MSG_ERROR([Unknown mutex interface: $db_cv_mutex]);;
 esac
 
-if test "$db_cv_mutex" != "UNIX/fcntl"; then
-	AC_DEFINE(HAVE_MUTEX_THREADS)
-	AH_TEMPLATE(HAVE_MUTEX_THREADS,
-	    [Define to 1 if fast mutexes are available.])
+# The mutex selection often tells us what kind of thread package we're using.
+# We need to know if the thread ID type will fit into an integral type and we
+# can compare it for equality and generally treat it like an int, or if it's a
+# non-integral type and we have to treat it like a structure or other untyped
+# block of bytes.  For example, MVS typedef's pthread_t to a structure.
+AH_TEMPLATE(HAVE_INTEGRAL_THREAD_TYPE,
+    [Define to 1 if thread identifier type db_threadid_t is integral.])
+if test "$db_threadid_t_decl" = "notset"; then
+	db_threadid_t_decl="typedef uintmax_t db_threadid_t;"
+	AC_DEFINE(HAVE_INTEGRAL_THREAD_TYPE)
+else
+	AC_TRY_COMPILE(
+	#include 
+	$thread_h_decl, [
+	$db_threadid_t_decl
+	db_threadid_t a;
+	a = 0;
+	], AC_DEFINE(HAVE_INTEGRAL_THREAD_TYPE))
 fi
 
 # There are 3 classes of mutexes:
diff --git a/storage/bdb/dist/aclocal/options.ac b/storage/bdb/dist/aclocal/options.ac
index b770fc44478..2697c030b86 100644
--- a/storage/bdb/dist/aclocal/options.ac
+++ b/storage/bdb/dist/aclocal/options.ac
@@ -1,4 +1,4 @@
-# $Id: options.ac,v 11.37 2004/06/10 16:38:18 bostic Exp $
+# $Id: options.ac,v 12.2 2005/10/12 14:45:42 bostic Exp $
 
 # Process user-specified options.
 AC_DEFUN(AM_OPTIONS_SET, [
@@ -161,6 +161,13 @@ AC_ARG_ENABLE(posixmutexes,
 	[db_cv_posixmutexes="$enable_posixmutexes"], [db_cv_posixmutexes="no"])
 AC_MSG_RESULT($db_cv_posixmutexes)
 
+AC_MSG_CHECKING(if --enable-pthread_self option specified)
+AC_ARG_ENABLE(pthread_self,
+	[AC_HELP_STRING([--enable-pthread_self],
+			[Force use of pthread_self to identify threads.])],
+	[db_cv_pthread_self="$enable_pthread_self"], [db_cv_pthread_self="no"])
+AC_MSG_RESULT($db_cv_pthread_self)
+
 AC_MSG_CHECKING(if --enable-rpc option specified)
 AC_ARG_ENABLE(rpc,
 	[AC_HELP_STRING([--enable-rpc],
@@ -224,20 +231,13 @@ if test "$with_mutex" != "no"; then
 fi
 AC_MSG_RESULT($with_mutex)
 
-AH_TEMPLATE(MUTEX_ALIGN,
-    [Define to a value if using non-standard mutex alignment.])
-AC_MSG_CHECKING(if --with-mutexalign=ALIGNMENT option specified)
+# --with-mutexalign=ALIGNMENT was the configuration option that Berkeley DB
+# used before the DbEnv::mutex_set_align method was added.
 AC_ARG_WITH(mutexalign,
 	[AC_HELP_STRING([--with-mutexalign=ALIGNMENT],
-			[Selection of non-standard mutex alignment.])],
-	[with_mutexalign="$withval"], [with_mutexalign="no"])
-if test "$with_mutexalign" = "yes"; then
-	AC_MSG_ERROR([--with-mutexalign requires a mutex alignment argument])
-fi
-if test "$with_mutexalign" != "no"; then
-	AC_DEFINE_UNQUOTED(MUTEX_ALIGN, $with_mutexalign)
-fi
-AC_MSG_RESULT($with_mutexalign)
+			[Obsolete; use DbEnv::mutex_set_align instead.])],
+	[AC_MSG_ERROR(
+    [--with-mutexalign no longer supported, use DbEnv::mutex_set_align])])
 
 AC_MSG_CHECKING([if --with-tcl=DIR option specified])
 AC_ARG_WITH(tcl,
diff --git a/storage/bdb/dist/aclocal/programs.ac b/storage/bdb/dist/aclocal/programs.ac
index db6b4f03e84..76ce0ded66a 100644
--- a/storage/bdb/dist/aclocal/programs.ac
+++ b/storage/bdb/dist/aclocal/programs.ac
@@ -1,4 +1,4 @@
-# $Id: programs.ac,v 11.22 2004/06/10 16:38:18 bostic Exp $
+# $Id: programs.ac,v 12.1 2005/04/07 06:47:03 mjc Exp $
 
 # Check for programs used in building/installation.
 AC_DEFUN(AM_PROGRAMS_SET, [
@@ -53,6 +53,11 @@ if test "$db_cv_path_sh" = missing_sh; then
 	AC_MSG_ERROR([No sh utility found.])
 fi
 
+AC_CHECK_TOOL(db_cv_path_true, true, missing_true)
+if test "$db_cv_path_true" = missing_true; then
+	AC_MSG_ERROR([No true utility found.])
+fi
+
 # Don't strip the binaries if --enable-debug was specified.
 if test "$db_cv_debug" = yes; then
 	db_cv_path_strip=debug_build_no_strip
diff --git a/storage/bdb/dist/aclocal/rpc.ac b/storage/bdb/dist/aclocal/rpc.ac
index 7d7f4dabe80..7e7198bc0fe 100644
--- a/storage/bdb/dist/aclocal/rpc.ac
+++ b/storage/bdb/dist/aclocal/rpc.ac
@@ -1,4 +1,4 @@
-# $Id: rpc.ac,v 11.9 2004/09/27 21:33:48 mjc Exp $
+# $Id: rpc.ac,v 12.0 2004/11/17 03:43:37 bostic Exp $
 
 # Try and configure RPC support.
 AC_DEFUN(AM_RPC_CONFIGURE, [
diff --git a/storage/bdb/dist/aclocal/sequence.ac b/storage/bdb/dist/aclocal/sequence.ac
index ca320b23205..5c491eeb1cf 100644
--- a/storage/bdb/dist/aclocal/sequence.ac
+++ b/storage/bdb/dist/aclocal/sequence.ac
@@ -1,8 +1,9 @@
-# $Id: sequence.ac,v 1.3 2004/10/28 18:14:30 bostic Exp $
+# $Id: sequence.ac,v 12.2 2005/11/03 17:46:14 bostic Exp $
 
 # Try and configure sequence support.
 AC_DEFUN(AM_SEQUENCE_CONFIGURE, [
 	AC_MSG_CHECKING([for 64-bit integral type support for sequences])
+
 	db_cv_build_sequence="yes"
 
 	# Have to have found 64-bit types to support sequences.  If we don't
@@ -14,13 +15,27 @@ AC_DEFUN(AM_SEQUENCE_CONFIGURE, [
 		db_cv_build_sequence="no"
 	fi
 
-	# Have to be able to cast variables to the "unsigned long long" and
-	# "long long" types, that's our cast for the printf "%ll[du]" format.
-	if test "$ac_cv_type_long_long" = "no"; then
+	# Figure out what type is the right size, and set the format.
+	AC_SUBST(INT64_FMT)
+	AC_SUBST(UINT64_FMT)
+	db_cv_seq_type="no"
+	if test "$db_cv_build_sequence" = "yes" -a\
+	    "$ac_cv_sizeof_long" -eq "8"; then
+		db_cv_seq_type="long"
+		db_cv_seq_fmt='"%ld"'
+		db_cv_seq_ufmt='"%lu"'
+		INT64_FMT='#define	INT64_FMT	"%ld"'
+		UINT64_FMT='#define	UINT64_FMT	"%lu"'
+	else if test "$db_cv_build_sequence" = "yes" -a\
+	    "$ac_cv_sizeof_long_long" -eq "8"; then
+		db_cv_seq_type="long long"
+		db_cv_seq_fmt='"%lld"'
+		db_cv_seq_ufmt='"%llu"'
+		INT64_FMT='#define	INT64_FMT	"%lld"'
+		UINT64_FMT='#define	UINT64_FMT	"%llu"'
+	else
 		db_cv_build_sequence="no"
 	fi
-	if test "$ac_cv_type_unsigned_long_long" = "no"; then
-		db_cv_build_sequence="no"
 	fi
 
 	# Test to see if we can declare variables of the appropriate size
@@ -29,33 +44,33 @@ AC_DEFUN(AM_SEQUENCE_CONFIGURE, [
 	if test "$db_cv_build_sequence" = "yes"; then
 		AC_TRY_RUN([
 		main() {
-			long long l;
-			unsigned long long u;
-			char buf[100];
+			$db_cv_seq_type l;
+			unsigned $db_cv_seq_type u;
+			char buf@<:@100@:>@;
 
-			buf[0] = 'a';
+			buf@<:@0@:>@ = 'a';
 			l = 9223372036854775807LL;
-			(void)snprintf(buf, sizeof(buf), "%lld", l);
+			(void)snprintf(buf, sizeof(buf), $db_cv_seq_fmt, l);
 			if (strcmp(buf, "9223372036854775807"))
 				return (1);
 			u = 18446744073709551615ULL;
-			(void)snprintf(buf, sizeof(buf), "%llu", u);
+			(void)snprintf(buf, sizeof(buf), $db_cv_seq_ufmt, u);
 			if (strcmp(buf, "18446744073709551615"))
 				return (1);
 			return (0);
 		}],, [db_cv_build_sequence="no"],
 		AC_TRY_LINK(,[
-			long long l;
-			unsigned long long u;
-			char buf[100];
+			$db_cv_seq_type l;
+			unsigned $db_cv_seq_type u;
+			char buf@<:@100@:>@;
 
-			buf[0] = 'a';
+			buf@<:@0@:>@ = 'a';
 			l = 9223372036854775807LL;
-			(void)snprintf(buf, sizeof(buf), "%lld", l);
+			(void)snprintf(buf, sizeof(buf), $db_cv_seq_fmt, l);
 			if (strcmp(buf, "9223372036854775807"))
 				return (1);
 			u = 18446744073709551615ULL;
-			(void)snprintf(buf, sizeof(buf), "%llu", u);
+			(void)snprintf(buf, sizeof(buf), $db_cv_seq_ufmt, u);
 			if (strcmp(buf, "18446744073709551615"))
 				return (1);
 			return (0);
@@ -68,6 +83,10 @@ AC_DEFUN(AM_SEQUENCE_CONFIGURE, [
 
 		AC_SUBST(db_seq_decl)
 		db_seq_decl="typedef int64_t db_seq_t;";
+
+		AC_DEFINE(HAVE_64BIT_TYPES)
+		AH_TEMPLATE(HAVE_64BIT_TYPES,
+		    [Define to 1 if 64-bit types are available.])
 	else
 		# It still has to compile, but it won't run.
 		db_seq_decl="typedef int db_seq_t;";
diff --git a/storage/bdb/dist/aclocal/sosuffix.ac b/storage/bdb/dist/aclocal/sosuffix.ac
index 8864280f1ae..bd391e248a0 100644
--- a/storage/bdb/dist/aclocal/sosuffix.ac
+++ b/storage/bdb/dist/aclocal/sosuffix.ac
@@ -1,4 +1,4 @@
-# $Id: sosuffix.ac,v 1.4 2004/08/14 20:00:45 dda Exp $
+# $Id: sosuffix.ac,v 12.0 2004/11/17 03:43:38 bostic Exp $
 # Determine shared object suffixes.
 #
 # Our method is to use the libtool variable $library_names_spec,
diff --git a/storage/bdb/dist/aclocal/tcl.ac b/storage/bdb/dist/aclocal/tcl.ac
index d28d360834c..360cf62b185 100644
--- a/storage/bdb/dist/aclocal/tcl.ac
+++ b/storage/bdb/dist/aclocal/tcl.ac
@@ -1,4 +1,4 @@
-# $Id: tcl.ac,v 11.18 2004/03/11 20:11:17 bostic Exp $
+# $Id: tcl.ac,v 12.2 2005/06/28 20:45:25 gmf Exp $
 
 # The SC_* macros in this file are from the unix/tcl.m4 files in the Tcl
 # 8.3.0 distribution, with some minor changes.  For this reason, license
@@ -93,12 +93,11 @@ AC_DEFUN(SC_LOAD_TCLCONFIG, [
 		AC_MSG_ERROR([Berkeley DB requires Tcl version 8.4 or better.])
 	fi
 
-	#
-	# The eval is required to do the TCL_DBGX substitution in the
-	# TCL_LIB_FILE variable
-	#
-	eval TCL_LIB_FILE="${TCL_LIB_FILE}"
-	eval TCL_LIB_FLAG="${TCL_LIB_FLAG}"
+	# The eval is required to do substitution (for example, the TCL_DBGX
+	# substitution in the TCL_LIB_FILE variable.
+	eval "TCL_INCLUDE_SPEC=\"${TCL_INCLUDE_SPEC}\""
+	eval "TCL_LIB_FILE=\"${TCL_LIB_FILE}\""
+	eval "TCL_LIB_FLAG=\"${TCL_LIB_FLAG}\""
 	eval "TCL_LIB_SPEC=\"${TCL_LIB_SPEC}\""
 
 	#
@@ -115,8 +114,9 @@ AC_DEFUN(SC_LOAD_TCLCONFIG, [
 		LIBTSO_LIBS="$LIBTSO_LIBS $TCL_LIB_SPEC $TCL_LIB_FLAG";;
 	esac
 	AC_SUBST(TCL_BIN_DIR)
-	AC_SUBST(TCL_SRC_DIR)
+	AC_SUBST(TCL_INCLUDE_SPEC)
 	AC_SUBST(TCL_LIB_FILE)
+	AC_SUBST(TCL_SRC_DIR)
 
 	AC_SUBST(TCL_TCLSH)
 	TCL_TCLSH="${TCL_PREFIX}/bin/tclsh${TCL_VERSION}"
@@ -128,14 +128,8 @@ AC_DEFUN(AM_TCL_LOAD, [
 		AC_MSG_ERROR([Tcl requires shared libraries])
 	fi
 
-	AC_SUBST(TCFLAGS)
-
 	SC_PATH_TCLCONFIG
 	SC_LOAD_TCLCONFIG
 
-	if test x"$TCL_PREFIX" != x && test -f "$TCL_PREFIX/include/tcl.h"; then
-		TCFLAGS="-I$TCL_PREFIX/include"
-	fi
-
 	INSTALL_LIBS="${INSTALL_LIBS} \$(libtso_target)"
 ])
diff --git a/storage/bdb/dist/aclocal/types.ac b/storage/bdb/dist/aclocal/types.ac
index b5843385c1a..f9291386dc3 100644
--- a/storage/bdb/dist/aclocal/types.ac
+++ b/storage/bdb/dist/aclocal/types.ac
@@ -1,4 +1,4 @@
-# $Id: types.ac,v 11.18 2004/10/25 18:14:14 bostic Exp $
+# $Id: types.ac,v 12.3 2005/11/03 17:46:14 bostic Exp $
 
 # Check the sizes we know about, and see if any of them match what's needed.
 #
@@ -68,6 +68,11 @@ AC_CHECK_HEADER(stddef.h, [
 	db_includes="$db_includes
 #include "
 	stddef_h_decl="#include "])
+AC_SUBST(unistd_h_decl)
+AC_CHECK_HEADER(unistd.h, [
+	db_includes="$db_includes
+#include "
+	unistd_h_decl="#include "])
 db_includes="$db_includes
 #include "
 
@@ -76,10 +81,6 @@ db_includes="$db_includes
 AC_CHECK_TYPE(off_t,, AC_MSG_ERROR([No off_t type.]), $db_includes)
 AC_CHECK_TYPE(size_t,, AC_MSG_ERROR([No size_t type.]), $db_includes)
 
-# Check for long long and unsigned long long, we only support sequences
-# if those types are available.
-AC_CHECK_TYPES([long long, unsigned long long],,, $db_includes)
-
 # We need to know the sizes of various objects on this system.
 AC_CHECK_SIZEOF(char,, $db_includes)
 AC_CHECK_SIZEOF(unsigned char,, $db_includes)
@@ -148,6 +149,9 @@ AC_CHECK_TYPE(ssize_t,,
     [AM_SEARCH_SSIZES(ssize_t_decl, ssize_t, $ac_cv_sizeof_size_t)],
     $db_includes)
 
+# So far, no autoconf'd systems lack pid_t.
+AC_SUBST(pid_t_decl)
+
 # Check for uintmax_t -- if none exists, first the largest unsigned integral
 # type available.
 AC_SUBST(uintmax_t_decl)
diff --git a/storage/bdb/dist/aclocal_java/ac_check_class.ac b/storage/bdb/dist/aclocal_java/ac_check_class.ac
index 915198af567..b12e7f02f9a 100644
--- a/storage/bdb/dist/aclocal_java/ac_check_class.ac
+++ b/storage/bdb/dist/aclocal_java/ac_check_class.ac
@@ -15,7 +15,7 @@ dnl The general documentation, as well as the sample configure.in, is
 dnl included in the AC_PROG_JAVA macro.
 dnl
 dnl @author Stephane Bortzmeyer 
-dnl @version $Id: ac_check_class.ac,v 1.1 2001/08/23 16:58:42 dda Exp $
+dnl @version $Id: ac_check_class.ac,v 12.0 2004/11/17 03:43:38 bostic Exp $
 dnl
 AC_DEFUN([AC_CHECK_CLASS],[
 AC_REQUIRE([AC_PROG_JAVA])
diff --git a/storage/bdb/dist/aclocal_java/ac_check_classpath.ac b/storage/bdb/dist/aclocal_java/ac_check_classpath.ac
index 4a78d0f8785..b18d479b3f1 100644
--- a/storage/bdb/dist/aclocal_java/ac_check_classpath.ac
+++ b/storage/bdb/dist/aclocal_java/ac_check_classpath.ac
@@ -12,7 +12,7 @@ dnl The general documentation, as well as the sample configure.in, is
 dnl included in the AC_PROG_JAVA macro.
 dnl
 dnl @author Stephane Bortzmeyer 
-dnl @version $Id: ac_check_classpath.ac,v 1.1 2001/08/23 16:58:42 dda Exp $
+dnl @version $Id: ac_check_classpath.ac,v 12.0 2004/11/17 03:43:38 bostic Exp $
 dnl
 AC_DEFUN([AC_CHECK_CLASSPATH],[
 if test "x$CLASSPATH" = x; then
diff --git a/storage/bdb/dist/aclocal_java/ac_check_junit.ac b/storage/bdb/dist/aclocal_java/ac_check_junit.ac
index 3b81d1dc3fc..cc02e327662 100644
--- a/storage/bdb/dist/aclocal_java/ac_check_junit.ac
+++ b/storage/bdb/dist/aclocal_java/ac_check_junit.ac
@@ -30,7 +30,7 @@ dnl     echo "exec @JUNIT@ my.package.name.AllJunitTests" >> $@
 dnl     chmod +x $@
 dnl
 dnl @author Luc Maisonobe
-dnl @version $Id: ac_check_junit.ac,v 1.1 2001/08/23 16:58:43 dda Exp $
+dnl @version $Id: ac_check_junit.ac,v 12.0 2004/11/17 03:43:38 bostic Exp $
 dnl
 AC_DEFUN([AC_CHECK_JUNIT],[
 AC_CACHE_VAL(ac_cv_prog_JUNIT,[
diff --git a/storage/bdb/dist/aclocal_java/ac_check_rqrd_class.ac b/storage/bdb/dist/aclocal_java/ac_check_rqrd_class.ac
index ab62e33c887..c7c26b87741 100644
--- a/storage/bdb/dist/aclocal_java/ac_check_rqrd_class.ac
+++ b/storage/bdb/dist/aclocal_java/ac_check_rqrd_class.ac
@@ -14,7 +14,7 @@ dnl The general documentation, as well as the sample configure.in, is
 dnl included in the AC_PROG_JAVA macro.
 dnl
 dnl @author Stephane Bortzmeyer 
-dnl @version $Id: ac_check_rqrd_class.ac,v 1.1 2001/08/23 16:58:43 dda Exp $
+dnl @version $Id: ac_check_rqrd_class.ac,v 12.0 2004/11/17 03:43:38 bostic Exp $
 dnl
 
 AC_DEFUN([AC_CHECK_RQRD_CLASS],[
diff --git a/storage/bdb/dist/aclocal_java/ac_java_options.ac b/storage/bdb/dist/aclocal_java/ac_java_options.ac
index 567afca7fa5..e71adfe68b5 100644
--- a/storage/bdb/dist/aclocal_java/ac_java_options.ac
+++ b/storage/bdb/dist/aclocal_java/ac_java_options.ac
@@ -12,7 +12,7 @@ dnl The general documentation, as well as the sample configure.in, is
 dnl included in the AC_PROG_JAVA macro.
 dnl
 dnl @author Devin Weaver 
-dnl @version $Id: ac_java_options.ac,v 1.1 2001/08/23 16:58:43 dda Exp $
+dnl @version $Id: ac_java_options.ac,v 12.0 2004/11/17 03:43:38 bostic Exp $
 dnl
 AC_DEFUN([AC_JAVA_OPTIONS],[
 AC_ARG_WITH(java-prefix,
diff --git a/storage/bdb/dist/aclocal_java/ac_jni_include_dirs.ac b/storage/bdb/dist/aclocal_java/ac_jni_include_dirs.ac
index b70d108d6cf..35cdda383c3 100644
--- a/storage/bdb/dist/aclocal_java/ac_jni_include_dirs.ac
+++ b/storage/bdb/dist/aclocal_java/ac_jni_include_dirs.ac
@@ -28,7 +28,7 @@ dnl Note: This macro can work with the autoconf M4 macros for Java programs.
 dnl This particular macro is not part of the original set of macros.
 dnl
 dnl @author Don Anderson 
-dnl @version $Id: ac_jni_include_dirs.ac,v 1.12 2003/10/05 18:10:06 dda Exp $
+dnl @version $Id: ac_jni_include_dirs.ac,v 12.0 2004/11/17 03:43:38 bostic Exp $
 dnl
 AC_DEFUN(AC_JNI_INCLUDE_DIR,[
 
diff --git a/storage/bdb/dist/aclocal_java/ac_prog_jar.ac b/storage/bdb/dist/aclocal_java/ac_prog_jar.ac
index 9dfa1be6dad..c60a79a859d 100644
--- a/storage/bdb/dist/aclocal_java/ac_prog_jar.ac
+++ b/storage/bdb/dist/aclocal_java/ac_prog_jar.ac
@@ -22,7 +22,7 @@ dnl The general documentation of those macros, as well as the sample
 dnl configure.in, is included in the AC_PROG_JAVA macro.
 dnl
 dnl @author Egon Willighagen 
-dnl @version $Id: ac_prog_jar.ac,v 1.1 2001/08/23 16:58:43 dda Exp $
+dnl @version $Id: ac_prog_jar.ac,v 12.0 2004/11/17 03:43:38 bostic Exp $
 dnl
 AC_DEFUN([AC_PROG_JAR],[
 AC_REQUIRE([AC_EXEEXT])dnl
diff --git a/storage/bdb/dist/aclocal_java/ac_prog_java.ac b/storage/bdb/dist/aclocal_java/ac_prog_java.ac
index 67a879ffcf6..a011b0a9f5a 100644
--- a/storage/bdb/dist/aclocal_java/ac_prog_java.ac
+++ b/storage/bdb/dist/aclocal_java/ac_prog_java.ac
@@ -62,7 +62,7 @@ dnl
 dnl    AC_OUTPUT(Makefile)
 dnl
 dnl @author Stephane Bortzmeyer 
-dnl @version $Id: ac_prog_java.ac,v 1.2 2003/05/10 17:46:09 dda Exp $
+dnl @version $Id: ac_prog_java.ac,v 12.0 2004/11/17 03:43:38 bostic Exp $
 dnl
 dnl Note: Modified by dda@sleepycat.com to prefer java over kaffe. [#8059]
 dnl
diff --git a/storage/bdb/dist/aclocal_java/ac_prog_java_works.ac b/storage/bdb/dist/aclocal_java/ac_prog_java_works.ac
index 36acd2676fa..f0ff8c57f2f 100644
--- a/storage/bdb/dist/aclocal_java/ac_prog_java_works.ac
+++ b/storage/bdb/dist/aclocal_java/ac_prog_java_works.ac
@@ -11,7 +11,7 @@ dnl The general documentation, as well as the sample configure.in, is
 dnl included in the AC_PROG_JAVA macro.
 dnl
 dnl @author Stephane Bortzmeyer 
-dnl @version $Id: ac_prog_java_works.ac,v 1.1 2001/08/23 16:58:44 dda Exp $
+dnl @version $Id: ac_prog_java_works.ac,v 12.0 2004/11/17 03:43:38 bostic Exp $
 dnl
 AC_DEFUN([AC_PROG_JAVA_WORKS], [
 AC_CHECK_PROG(uudecode, uudecode$EXEEXT, yes)
diff --git a/storage/bdb/dist/aclocal_java/ac_prog_javac.ac b/storage/bdb/dist/aclocal_java/ac_prog_javac.ac
index 5ded7d1b7e6..b3607dcf842 100644
--- a/storage/bdb/dist/aclocal_java/ac_prog_javac.ac
+++ b/storage/bdb/dist/aclocal_java/ac_prog_javac.ac
@@ -28,7 +28,7 @@ dnl The general documentation, as well as the sample configure.in, is
 dnl included in the AC_PROG_JAVA macro.
 dnl
 dnl @author Stephane Bortzmeyer 
-dnl @version $Id: ac_prog_javac.ac,v 1.3 2001/08/23 17:08:22 dda Exp $
+dnl @version $Id: ac_prog_javac.ac,v 12.0 2004/11/17 03:43:38 bostic Exp $
 dnl
 AC_DEFUN([AC_PROG_JAVAC],[
 AC_REQUIRE([AC_EXEEXT])dnl
diff --git a/storage/bdb/dist/aclocal_java/ac_prog_javac_works.ac b/storage/bdb/dist/aclocal_java/ac_prog_javac_works.ac
index 139a99f989b..0cfd1f2137f 100644
--- a/storage/bdb/dist/aclocal_java/ac_prog_javac_works.ac
+++ b/storage/bdb/dist/aclocal_java/ac_prog_javac_works.ac
@@ -11,7 +11,7 @@ dnl The general documentation, as well as the sample configure.in, is
 dnl included in the AC_PROG_JAVA macro.
 dnl
 dnl @author Stephane Bortzmeyer 
-dnl @version $Id: ac_prog_javac_works.ac,v 1.1 2001/08/23 16:58:44 dda Exp $
+dnl @version $Id: ac_prog_javac_works.ac,v 12.0 2004/11/17 03:43:38 bostic Exp $
 dnl
 AC_DEFUN([AC_PROG_JAVAC_WORKS],[
 AC_CACHE_CHECK([if $JAVAC works], ac_cv_prog_javac_works, [
diff --git a/storage/bdb/dist/aclocal_java/ac_prog_javadoc.ac b/storage/bdb/dist/aclocal_java/ac_prog_javadoc.ac
index 5154d3f1f3b..36b95bd00a3 100644
--- a/storage/bdb/dist/aclocal_java/ac_prog_javadoc.ac
+++ b/storage/bdb/dist/aclocal_java/ac_prog_javadoc.ac
@@ -22,7 +22,7 @@ dnl The general documentation of those macros, as well as the sample
 dnl configure.in, is included in the AC_PROG_JAVA macro.
 dnl
 dnl @author Egon Willighagen 
-dnl @version $Id: ac_prog_javadoc.ac,v 1.1 2001/08/23 16:58:44 dda Exp $
+dnl @version $Id: ac_prog_javadoc.ac,v 12.0 2004/11/17 03:43:38 bostic Exp $
 dnl
 AC_DEFUN([AC_PROG_JAVADOC],[
 AC_REQUIRE([AC_EXEEXT])dnl
diff --git a/storage/bdb/dist/aclocal_java/ac_prog_javah.ac b/storage/bdb/dist/aclocal_java/ac_prog_javah.ac
index 1b16d9e24e5..7563036c091 100644
--- a/storage/bdb/dist/aclocal_java/ac_prog_javah.ac
+++ b/storage/bdb/dist/aclocal_java/ac_prog_javah.ac
@@ -5,7 +5,7 @@ dnl and looks for the jni.h header file. If available, JAVAH is set to
 dnl the full path of javah and CPPFLAGS is updated accordingly.
 dnl
 dnl @author Luc Maisonobe
-dnl @version $Id: ac_prog_javah.ac,v 1.1 2001/08/23 16:58:44 dda Exp $
+dnl @version $Id: ac_prog_javah.ac,v 12.0 2004/11/17 03:43:38 bostic Exp $
 dnl
 AC_DEFUN([AC_PROG_JAVAH],[
 AC_REQUIRE([AC_CANONICAL_SYSTEM])dnl
diff --git a/storage/bdb/dist/aclocal_java/ac_try_compile_java.ac b/storage/bdb/dist/aclocal_java/ac_try_compile_java.ac
index 775569ba054..d22aeab42f1 100644
--- a/storage/bdb/dist/aclocal_java/ac_try_compile_java.ac
+++ b/storage/bdb/dist/aclocal_java/ac_try_compile_java.ac
@@ -14,7 +14,7 @@ dnl The general documentation, as well as the sample configure.in, is
 dnl included in the AC_PROG_JAVA macro.
 dnl
 dnl @author Devin Weaver 
-dnl @version $Id: ac_try_compile_java.ac,v 1.1 2001/08/23 16:58:44 dda Exp $
+dnl @version $Id: ac_try_compile_java.ac,v 12.0 2004/11/17 03:43:38 bostic Exp $
 dnl
 AC_DEFUN([AC_TRY_COMPILE_JAVA],[
 AC_REQUIRE([AC_PROG_JAVAC])dnl
diff --git a/storage/bdb/dist/aclocal_java/ac_try_run_javac.ac b/storage/bdb/dist/aclocal_java/ac_try_run_javac.ac
index cf91306aff6..01249358883 100644
--- a/storage/bdb/dist/aclocal_java/ac_try_run_javac.ac
+++ b/storage/bdb/dist/aclocal_java/ac_try_run_javac.ac
@@ -14,7 +14,7 @@ dnl The general documentation, as well as the sample configure.in, is
 dnl included in the AC_PROG_JAVA macro.
 dnl
 dnl @author Devin Weaver 
-dnl @version $Id: ac_try_run_javac.ac,v 1.1 2001/08/23 16:58:45 dda Exp $
+dnl @version $Id: ac_try_run_javac.ac,v 12.0 2004/11/17 03:43:38 bostic Exp $
 dnl
 AC_DEFUN([AC_TRY_RUN_JAVA],[
 AC_REQUIRE([AC_PROG_JAVAC])dnl
diff --git a/storage/bdb/dist/buildrel b/storage/bdb/dist/buildrel
index 2537376a87a..3d4121f902a 100644
--- a/storage/bdb/dist/buildrel
+++ b/storage/bdb/dist/buildrel
@@ -1,4 +1,4 @@
-# $Id: buildrel,v 1.65 2004/11/09 01:26:29 bostic Exp $
+# $Id: buildrel,v 12.1 2005/10/25 00:27:35 bostic Exp $
 #
 # Build the distribution package.
 #
@@ -41,7 +41,7 @@ rm -rf $R/docs && cp -r $D/docs $R/docs
 # Remove source directories we don't distribute.
 cd $R && rm -rf docs_src docs/api_java
 cd $R && rm -rf test/TODO test/upgrade test_perf test_purify
-cd $R && rm -rf test_server test_thread test_vxworks test_xa
+cd $R && rm -rf test_rep test_server test_thread test_vxworks test_xa
 cd $R && rm -rf java/src/com/sleepycat/xa
 
 # Fix symbolic links and permissions.
diff --git a/storage/bdb/dist/config.guess b/storage/bdb/dist/config.guess
index 7d0185e019e..d0d57f6945f 100755
--- a/storage/bdb/dist/config.guess
+++ b/storage/bdb/dist/config.guess
@@ -1,9 +1,9 @@
 #! /bin/sh
 # Attempt to guess a canonical system name.
 #   Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999,
-#   2000, 2001, 2002, 2003, 2004 Free Software Foundation, Inc.
+#   2000, 2001, 2002, 2003, 2004, 2005 Free Software Foundation, Inc.
 
-timestamp='2004-09-07'
+timestamp='2005-09-19'
 
 # This file is free software; you can redistribute it and/or modify it
 # under the terms of the GNU General Public License as published by
@@ -17,13 +17,15 @@ timestamp='2004-09-07'
 #
 # You should have received a copy of the GNU General Public License
 # along with this program; if not, write to the Free Software
-# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+# Foundation, Inc., 51 Franklin Street - Fifth Floor, Boston, MA
+# 02110-1301, USA.
 #
 # As a special exception to the GNU General Public License, if you
 # distribute this file as part of a program that contains a
 # configuration script generated by Autoconf, you may include it under
 # the same distribution terms that you use for the rest of that program.
 
+
 # Originally written by Per Bothner .
 # Please send patches to .  Submit a context
 # diff and a properly formatted ChangeLog entry.
@@ -53,7 +55,7 @@ version="\
 GNU config.guess ($timestamp)
 
 Originally written by Per Bothner.
-Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, 2003, 2004
+Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, 2003, 2004, 2005
 Free Software Foundation, Inc.
 
 This is free software; see the source for copying conditions.  There is NO
@@ -66,11 +68,11 @@ Try \`$me --help' for more information."
 while test $# -gt 0 ; do
   case $1 in
     --time-stamp | --time* | -t )
-       echo "$timestamp" ; exit 0 ;;
+       echo "$timestamp" ; exit ;;
     --version | -v )
-       echo "$version" ; exit 0 ;;
+       echo "$version" ; exit ;;
     --help | --h* | -h )
-       echo "$usage"; exit 0 ;;
+       echo "$usage"; exit ;;
     -- )     # Stop option processing
        shift; break ;;
     - )	# Use stdin as input.
@@ -123,7 +125,7 @@ case $CC_FOR_BUILD,$HOST_CC,$CC in
 	;;
  ,,*)   CC_FOR_BUILD=$CC ;;
  ,*,*)  CC_FOR_BUILD=$HOST_CC ;;
-esac ;'
+esac ; set_cc_for_build= ;'
 
 # This is needed to find uname on a Pyramid OSx when run in the BSD universe.
 # (ghazi@noc.rutgers.edu 1994-08-24)
@@ -196,55 +198,20 @@ case "${UNAME_MACHINE}:${UNAME_SYSTEM}:${UNAME_RELEASE}:${UNAME_VERSION}" in
 	# contains redundant information, the shorter form:
 	# CPU_TYPE-MANUFACTURER-OPERATING_SYSTEM is used.
 	echo "${machine}-${os}${release}"
-	exit 0 ;;
-    amd64:OpenBSD:*:*)
-	echo x86_64-unknown-openbsd${UNAME_RELEASE}
-	exit 0 ;;
-    amiga:OpenBSD:*:*)
-	echo m68k-unknown-openbsd${UNAME_RELEASE}
-	exit 0 ;;
-    cats:OpenBSD:*:*)
-	echo arm-unknown-openbsd${UNAME_RELEASE}
-	exit 0 ;;
-    hp300:OpenBSD:*:*)
-	echo m68k-unknown-openbsd${UNAME_RELEASE}
-	exit 0 ;;
-    luna88k:OpenBSD:*:*)
-    	echo m88k-unknown-openbsd${UNAME_RELEASE}
-	exit 0 ;;
-    mac68k:OpenBSD:*:*)
-	echo m68k-unknown-openbsd${UNAME_RELEASE}
-	exit 0 ;;
-    macppc:OpenBSD:*:*)
-	echo powerpc-unknown-openbsd${UNAME_RELEASE}
-	exit 0 ;;
-    mvme68k:OpenBSD:*:*)
-	echo m68k-unknown-openbsd${UNAME_RELEASE}
-	exit 0 ;;
-    mvme88k:OpenBSD:*:*)
-	echo m88k-unknown-openbsd${UNAME_RELEASE}
-	exit 0 ;;
-    mvmeppc:OpenBSD:*:*)
-	echo powerpc-unknown-openbsd${UNAME_RELEASE}
-	exit 0 ;;
-    sgi:OpenBSD:*:*)
-	echo mips64-unknown-openbsd${UNAME_RELEASE}
-	exit 0 ;;
-    sun3:OpenBSD:*:*)
-	echo m68k-unknown-openbsd${UNAME_RELEASE}
-	exit 0 ;;
+	exit ;;
     *:OpenBSD:*:*)
-	echo ${UNAME_MACHINE}-unknown-openbsd${UNAME_RELEASE}
-	exit 0 ;;
+	UNAME_MACHINE_ARCH=`arch | sed 's/OpenBSD.//'`
+	echo ${UNAME_MACHINE_ARCH}-unknown-openbsd${UNAME_RELEASE}
+	exit ;;
     *:ekkoBSD:*:*)
 	echo ${UNAME_MACHINE}-unknown-ekkobsd${UNAME_RELEASE}
-	exit 0 ;;
+	exit ;;
     macppc:MirBSD:*:*)
 	echo powerppc-unknown-mirbsd${UNAME_RELEASE}
-	exit 0 ;;
+	exit ;;
     *:MirBSD:*:*)
 	echo ${UNAME_MACHINE}-unknown-mirbsd${UNAME_RELEASE}
-	exit 0 ;;
+	exit ;;
     alpha:OSF1:*:*)
 	case $UNAME_RELEASE in
 	*4.0)
@@ -297,37 +264,43 @@ case "${UNAME_MACHINE}:${UNAME_SYSTEM}:${UNAME_RELEASE}:${UNAME_VERSION}" in
 	# A Xn.n version is an unreleased experimental baselevel.
 	# 1.2 uses "1.2" for uname -r.
 	echo ${UNAME_MACHINE}-dec-osf`echo ${UNAME_RELEASE} | sed -e 's/^[PVTX]//' | tr 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' 'abcdefghijklmnopqrstuvwxyz'`
-	exit 0 ;;
+	exit ;;
     Alpha\ *:Windows_NT*:*)
 	# How do we know it's Interix rather than the generic POSIX subsystem?
 	# Should we change UNAME_MACHINE based on the output of uname instead
 	# of the specific Alpha model?
 	echo alpha-pc-interix
-	exit 0 ;;
+	exit ;;
     21064:Windows_NT:50:3)
 	echo alpha-dec-winnt3.5
-	exit 0 ;;
+	exit ;;
     Amiga*:UNIX_System_V:4.0:*)
 	echo m68k-unknown-sysv4
-	exit 0;;
+	exit ;;
     *:[Aa]miga[Oo][Ss]:*:*)
 	echo ${UNAME_MACHINE}-unknown-amigaos
-	exit 0 ;;
+	exit ;;
     *:[Mm]orph[Oo][Ss]:*:*)
 	echo ${UNAME_MACHINE}-unknown-morphos
-	exit 0 ;;
+	exit ;;
     *:OS/390:*:*)
 	echo i370-ibm-openedition
-	exit 0 ;;
+	exit ;;
+    *:z/VM:*:*)
+	echo s390-ibm-zvmoe
+	exit ;;
     *:OS400:*:*)
         echo powerpc-ibm-os400
-	exit 0 ;;
+	exit ;;
     arm:RISC*:1.[012]*:*|arm:riscix:1.[012]*:*)
 	echo arm-acorn-riscix${UNAME_RELEASE}
-	exit 0;;
+	exit ;;
+    arm:riscos:*:*|arm:RISCOS:*:*)
+	echo arm-unknown-riscos
+	exit ;;
     SR2?01:HI-UX/MPP:*:* | SR8000:HI-UX/MPP:*:*)
 	echo hppa1.1-hitachi-hiuxmpp
-	exit 0;;
+	exit ;;
     Pyramid*:OSx*:*:* | MIS*:OSx*:*:* | MIS*:SMP_DC-OSx*:*:*)
 	# akee@wpdis03.wpafb.af.mil (Earle F. Ake) contributed MIS and NILE.
 	if test "`(/bin/universe) 2>/dev/null`" = att ; then
@@ -335,32 +308,32 @@ case "${UNAME_MACHINE}:${UNAME_SYSTEM}:${UNAME_RELEASE}:${UNAME_VERSION}" in
 	else
 		echo pyramid-pyramid-bsd
 	fi
-	exit 0 ;;
+	exit ;;
     NILE*:*:*:dcosx)
 	echo pyramid-pyramid-svr4
-	exit 0 ;;
+	exit ;;
     DRS?6000:unix:4.0:6*)
 	echo sparc-icl-nx6
-	exit 0 ;;
-    DRS?6000:UNIX_SV:4.2*:7*)
+	exit ;;
+    DRS?6000:UNIX_SV:4.2*:7* | DRS?6000:isis:4.2*:7*)
 	case `/usr/bin/uname -p` in
-	    sparc) echo sparc-icl-nx7 && exit 0 ;;
+	    sparc) echo sparc-icl-nx7; exit ;;
 	esac ;;
     sun4H:SunOS:5.*:*)
 	echo sparc-hal-solaris2`echo ${UNAME_RELEASE}|sed -e 's/[^.]*//'`
-	exit 0 ;;
+	exit ;;
     sun4*:SunOS:5.*:* | tadpole*:SunOS:5.*:*)
 	echo sparc-sun-solaris2`echo ${UNAME_RELEASE}|sed -e 's/[^.]*//'`
-	exit 0 ;;
+	exit ;;
     i86pc:SunOS:5.*:*)
 	echo i386-pc-solaris2`echo ${UNAME_RELEASE}|sed -e 's/[^.]*//'`
-	exit 0 ;;
+	exit ;;
     sun4*:SunOS:6*:*)
 	# According to config.sub, this is the proper way to canonicalize
 	# SunOS6.  Hard to guess exactly what SunOS6 will be like, but
 	# it's likely to be more like Solaris than SunOS4.
 	echo sparc-sun-solaris3`echo ${UNAME_RELEASE}|sed -e 's/[^.]*//'`
-	exit 0 ;;
+	exit ;;
     sun4*:SunOS:*:*)
 	case "`/usr/bin/arch -k`" in
 	    Series*|S4*)
@@ -369,10 +342,10 @@ case "${UNAME_MACHINE}:${UNAME_SYSTEM}:${UNAME_RELEASE}:${UNAME_VERSION}" in
 	esac
 	# Japanese Language versions have a version number like `4.1.3-JL'.
 	echo sparc-sun-sunos`echo ${UNAME_RELEASE}|sed -e 's/-/_/'`
-	exit 0 ;;
+	exit ;;
     sun3*:SunOS:*:*)
 	echo m68k-sun-sunos${UNAME_RELEASE}
-	exit 0 ;;
+	exit ;;
     sun*:*:4.2BSD:*)
 	UNAME_RELEASE=`(sed 1q /etc/motd | awk '{print substr($5,1,3)}') 2>/dev/null`
 	test "x${UNAME_RELEASE}" = "x" && UNAME_RELEASE=3
@@ -384,10 +357,10 @@ case "${UNAME_MACHINE}:${UNAME_SYSTEM}:${UNAME_RELEASE}:${UNAME_VERSION}" in
 		echo sparc-sun-sunos${UNAME_RELEASE}
 		;;
 	esac
-	exit 0 ;;
+	exit ;;
     aushp:SunOS:*:*)
 	echo sparc-auspex-sunos${UNAME_RELEASE}
-	exit 0 ;;
+	exit ;;
     # The situation for MiNT is a little confusing.  The machine name
     # can be virtually everything (everything which is not
     # "atarist" or "atariste" at least should have a processor
@@ -398,40 +371,40 @@ case "${UNAME_MACHINE}:${UNAME_SYSTEM}:${UNAME_RELEASE}:${UNAME_VERSION}" in
     # be no problem.
     atarist[e]:*MiNT:*:* | atarist[e]:*mint:*:* | atarist[e]:*TOS:*:*)
         echo m68k-atari-mint${UNAME_RELEASE}
-	exit 0 ;;
+	exit ;;
     atari*:*MiNT:*:* | atari*:*mint:*:* | atarist[e]:*TOS:*:*)
 	echo m68k-atari-mint${UNAME_RELEASE}
-        exit 0 ;;
+        exit ;;
     *falcon*:*MiNT:*:* | *falcon*:*mint:*:* | *falcon*:*TOS:*:*)
         echo m68k-atari-mint${UNAME_RELEASE}
-	exit 0 ;;
+	exit ;;
     milan*:*MiNT:*:* | milan*:*mint:*:* | *milan*:*TOS:*:*)
         echo m68k-milan-mint${UNAME_RELEASE}
-        exit 0 ;;
+        exit ;;
     hades*:*MiNT:*:* | hades*:*mint:*:* | *hades*:*TOS:*:*)
         echo m68k-hades-mint${UNAME_RELEASE}
-        exit 0 ;;
+        exit ;;
     *:*MiNT:*:* | *:*mint:*:* | *:*TOS:*:*)
         echo m68k-unknown-mint${UNAME_RELEASE}
-        exit 0 ;;
+        exit ;;
     m68k:machten:*:*)
 	echo m68k-apple-machten${UNAME_RELEASE}
-	exit 0 ;;
+	exit ;;
     powerpc:machten:*:*)
 	echo powerpc-apple-machten${UNAME_RELEASE}
-	exit 0 ;;
+	exit ;;
     RISC*:Mach:*:*)
 	echo mips-dec-mach_bsd4.3
-	exit 0 ;;
+	exit ;;
     RISC*:ULTRIX:*:*)
 	echo mips-dec-ultrix${UNAME_RELEASE}
-	exit 0 ;;
+	exit ;;
     VAX*:ULTRIX*:*:*)
 	echo vax-dec-ultrix${UNAME_RELEASE}
-	exit 0 ;;
+	exit ;;
     2020:CLIX:*:* | 2430:CLIX:*:*)
 	echo clipper-intergraph-clix${UNAME_RELEASE}
-	exit 0 ;;
+	exit ;;
     mips:*:*:UMIPS | mips:*:*:RISCos)
 	eval $set_cc_for_build
 	sed 's/^	//' << EOF >$dummy.c
@@ -455,32 +428,33 @@ case "${UNAME_MACHINE}:${UNAME_SYSTEM}:${UNAME_RELEASE}:${UNAME_VERSION}" in
 	  exit (-1);
 	}
 EOF
-	$CC_FOR_BUILD -o $dummy $dummy.c \
-	  && $dummy `echo "${UNAME_RELEASE}" | sed -n 's/\([0-9]*\).*/\1/p'` \
-	  && exit 0
+	$CC_FOR_BUILD -o $dummy $dummy.c &&
+	  dummyarg=`echo "${UNAME_RELEASE}" | sed -n 's/\([0-9]*\).*/\1/p'` &&
+	  SYSTEM_NAME=`$dummy $dummyarg` &&
+	    { echo "$SYSTEM_NAME"; exit; }
 	echo mips-mips-riscos${UNAME_RELEASE}
-	exit 0 ;;
+	exit ;;
     Motorola:PowerMAX_OS:*:*)
 	echo powerpc-motorola-powermax
-	exit 0 ;;
+	exit ;;
     Motorola:*:4.3:PL8-*)
 	echo powerpc-harris-powermax
-	exit 0 ;;
+	exit ;;
     Night_Hawk:*:*:PowerMAX_OS | Synergy:PowerMAX_OS:*:*)
 	echo powerpc-harris-powermax
-	exit 0 ;;
+	exit ;;
     Night_Hawk:Power_UNIX:*:*)
 	echo powerpc-harris-powerunix
-	exit 0 ;;
+	exit ;;
     m88k:CX/UX:7*:*)
 	echo m88k-harris-cxux7
-	exit 0 ;;
+	exit ;;
     m88k:*:4*:R4*)
 	echo m88k-motorola-sysv4
-	exit 0 ;;
+	exit ;;
     m88k:*:3*:R3*)
 	echo m88k-motorola-sysv3
-	exit 0 ;;
+	exit ;;
     AViiON:dgux:*:*)
         # DG/UX returns AViiON for all architectures
         UNAME_PROCESSOR=`/usr/bin/uname -p`
@@ -496,29 +470,29 @@ EOF
 	else
 	    echo i586-dg-dgux${UNAME_RELEASE}
 	fi
- 	exit 0 ;;
+ 	exit ;;
     M88*:DolphinOS:*:*)	# DolphinOS (SVR3)
 	echo m88k-dolphin-sysv3
-	exit 0 ;;
+	exit ;;
     M88*:*:R3*:*)
 	# Delta 88k system running SVR3
 	echo m88k-motorola-sysv3
-	exit 0 ;;
+	exit ;;
     XD88*:*:*:*) # Tektronix XD88 system running UTekV (SVR3)
 	echo m88k-tektronix-sysv3
-	exit 0 ;;
+	exit ;;
     Tek43[0-9][0-9]:UTek:*:*) # Tektronix 4300 system running UTek (BSD)
 	echo m68k-tektronix-bsd
-	exit 0 ;;
+	exit ;;
     *:IRIX*:*:*)
 	echo mips-sgi-irix`echo ${UNAME_RELEASE}|sed -e 's/-/_/g'`
-	exit 0 ;;
+	exit ;;
     ????????:AIX?:[12].1:2)   # AIX 2.2.1 or AIX 2.1.1 is RT/PC AIX.
-	echo romp-ibm-aix      # uname -m gives an 8 hex-code CPU id
-	exit 0 ;;              # Note that: echo "'`uname -s`'" gives 'AIX '
+	echo romp-ibm-aix     # uname -m gives an 8 hex-code CPU id
+	exit ;;               # Note that: echo "'`uname -s`'" gives 'AIX '
     i*86:AIX:*:*)
 	echo i386-ibm-aix
-	exit 0 ;;
+	exit ;;
     ia64:AIX:*:*)
 	if [ -x /usr/bin/oslevel ] ; then
 		IBM_REV=`/usr/bin/oslevel`
@@ -526,7 +500,7 @@ EOF
 		IBM_REV=${UNAME_VERSION}.${UNAME_RELEASE}
 	fi
 	echo ${UNAME_MACHINE}-ibm-aix${IBM_REV}
-	exit 0 ;;
+	exit ;;
     *:AIX:2:3)
 	if grep bos325 /usr/include/stdio.h >/dev/null 2>&1; then
 		eval $set_cc_for_build
@@ -541,14 +515,18 @@ EOF
 			exit(0);
 			}
 EOF
-		$CC_FOR_BUILD -o $dummy $dummy.c && $dummy && exit 0
-		echo rs6000-ibm-aix3.2.5
+		if $CC_FOR_BUILD -o $dummy $dummy.c && SYSTEM_NAME=`$dummy`
+		then
+			echo "$SYSTEM_NAME"
+		else
+			echo rs6000-ibm-aix3.2.5
+		fi
 	elif grep bos324 /usr/include/stdio.h >/dev/null 2>&1; then
 		echo rs6000-ibm-aix3.2.4
 	else
 		echo rs6000-ibm-aix3.2
 	fi
-	exit 0 ;;
+	exit ;;
     *:AIX:*:[45])
 	IBM_CPU_ID=`/usr/sbin/lsdev -C -c processor -S available | sed 1q | awk '{ print $1 }'`
 	if /usr/sbin/lsattr -El ${IBM_CPU_ID} | grep ' POWER' >/dev/null 2>&1; then
@@ -562,28 +540,28 @@ EOF
 		IBM_REV=${UNAME_VERSION}.${UNAME_RELEASE}
 	fi
 	echo ${IBM_ARCH}-ibm-aix${IBM_REV}
-	exit 0 ;;
+	exit ;;
     *:AIX:*:*)
 	echo rs6000-ibm-aix
-	exit 0 ;;
+	exit ;;
     ibmrt:4.4BSD:*|romp-ibm:BSD:*)
 	echo romp-ibm-bsd4.4
-	exit 0 ;;
+	exit ;;
     ibmrt:*BSD:*|romp-ibm:BSD:*)            # covers RT/PC BSD and
 	echo romp-ibm-bsd${UNAME_RELEASE}   # 4.3 with uname added to
-	exit 0 ;;                           # report: romp-ibm BSD 4.3
+	exit ;;                             # report: romp-ibm BSD 4.3
     *:BOSX:*:*)
 	echo rs6000-bull-bosx
-	exit 0 ;;
+	exit ;;
     DPX/2?00:B.O.S.:*:*)
 	echo m68k-bull-sysv3
-	exit 0 ;;
+	exit ;;
     9000/[34]??:4.3bsd:1.*:*)
 	echo m68k-hp-bsd
-	exit 0 ;;
+	exit ;;
     hp300:4.4BSD:*:* | 9000/[34]??:4.3bsd:2.*:*)
 	echo m68k-hp-bsd4.4
-	exit 0 ;;
+	exit ;;
     9000/[34678]??:HP-UX:*:*)
 	HPUX_REV=`echo ${UNAME_RELEASE}|sed -e 's/[^.]*.[0B]*//'`
 	case "${UNAME_MACHINE}" in
@@ -645,9 +623,19 @@ EOF
 	esac
 	if [ ${HP_ARCH} = "hppa2.0w" ]
 	then
-	    # avoid double evaluation of $set_cc_for_build
-	    test -n "$CC_FOR_BUILD" || eval $set_cc_for_build
-	    if echo __LP64__ | (CCOPTS= $CC_FOR_BUILD -E -) | grep __LP64__ >/dev/null
+	    eval $set_cc_for_build
+
+	    # hppa2.0w-hp-hpux* has a 64-bit kernel and a compiler generating
+	    # 32-bit code.  hppa64-hp-hpux* has the same kernel and a compiler
+	    # generating 64-bit code.  GNU and HP use different nomenclature:
+	    #
+	    # $ CC_FOR_BUILD=cc ./config.guess
+	    # => hppa2.0w-hp-hpux11.23
+	    # $ CC_FOR_BUILD="cc +DA2.0w" ./config.guess
+	    # => hppa64-hp-hpux11.23
+
+	    if echo __LP64__ | (CCOPTS= $CC_FOR_BUILD -E - 2>/dev/null) |
+		grep __LP64__ >/dev/null
 	    then
 		HP_ARCH="hppa2.0w"
 	    else
@@ -655,11 +643,11 @@ EOF
 	    fi
 	fi
 	echo ${HP_ARCH}-hp-hpux${HPUX_REV}
-	exit 0 ;;
+	exit ;;
     ia64:HP-UX:*:*)
 	HPUX_REV=`echo ${UNAME_RELEASE}|sed -e 's/[^.]*.[0B]*//'`
 	echo ia64-hp-hpux${HPUX_REV}
-	exit 0 ;;
+	exit ;;
     3050*:HI-UX:*:*)
 	eval $set_cc_for_build
 	sed 's/^	//' << EOF >$dummy.c
@@ -687,158 +675,166 @@ EOF
 	  exit (0);
 	}
 EOF
-	$CC_FOR_BUILD -o $dummy $dummy.c && $dummy && exit 0
+	$CC_FOR_BUILD -o $dummy $dummy.c && SYSTEM_NAME=`$dummy` &&
+		{ echo "$SYSTEM_NAME"; exit; }
 	echo unknown-hitachi-hiuxwe2
-	exit 0 ;;
+	exit ;;
     9000/7??:4.3bsd:*:* | 9000/8?[79]:4.3bsd:*:* )
 	echo hppa1.1-hp-bsd
-	exit 0 ;;
+	exit ;;
     9000/8??:4.3bsd:*:*)
 	echo hppa1.0-hp-bsd
-	exit 0 ;;
+	exit ;;
     *9??*:MPE/iX:*:* | *3000*:MPE/iX:*:*)
 	echo hppa1.0-hp-mpeix
-	exit 0 ;;
+	exit ;;
     hp7??:OSF1:*:* | hp8?[79]:OSF1:*:* )
 	echo hppa1.1-hp-osf
-	exit 0 ;;
+	exit ;;
     hp8??:OSF1:*:*)
 	echo hppa1.0-hp-osf
-	exit 0 ;;
+	exit ;;
     i*86:OSF1:*:*)
 	if [ -x /usr/sbin/sysversion ] ; then
 	    echo ${UNAME_MACHINE}-unknown-osf1mk
 	else
 	    echo ${UNAME_MACHINE}-unknown-osf1
 	fi
-	exit 0 ;;
+	exit ;;
     parisc*:Lites*:*:*)
 	echo hppa1.1-hp-lites
-	exit 0 ;;
+	exit ;;
     C1*:ConvexOS:*:* | convex:ConvexOS:C1*:*)
 	echo c1-convex-bsd
-        exit 0 ;;
+        exit ;;
     C2*:ConvexOS:*:* | convex:ConvexOS:C2*:*)
 	if getsysinfo -f scalar_acc
 	then echo c32-convex-bsd
 	else echo c2-convex-bsd
 	fi
-        exit 0 ;;
+        exit ;;
     C34*:ConvexOS:*:* | convex:ConvexOS:C34*:*)
 	echo c34-convex-bsd
-        exit 0 ;;
+        exit ;;
     C38*:ConvexOS:*:* | convex:ConvexOS:C38*:*)
 	echo c38-convex-bsd
-        exit 0 ;;
+        exit ;;
     C4*:ConvexOS:*:* | convex:ConvexOS:C4*:*)
 	echo c4-convex-bsd
-        exit 0 ;;
+        exit ;;
     CRAY*Y-MP:*:*:*)
 	echo ymp-cray-unicos${UNAME_RELEASE} | sed -e 's/\.[^.]*$/.X/'
-	exit 0 ;;
+	exit ;;
     CRAY*[A-Z]90:*:*:*)
 	echo ${UNAME_MACHINE}-cray-unicos${UNAME_RELEASE} \
 	| sed -e 's/CRAY.*\([A-Z]90\)/\1/' \
 	      -e y/ABCDEFGHIJKLMNOPQRSTUVWXYZ/abcdefghijklmnopqrstuvwxyz/ \
 	      -e 's/\.[^.]*$/.X/'
-	exit 0 ;;
+	exit ;;
     CRAY*TS:*:*:*)
 	echo t90-cray-unicos${UNAME_RELEASE} | sed -e 's/\.[^.]*$/.X/'
-	exit 0 ;;
+	exit ;;
     CRAY*T3E:*:*:*)
 	echo alphaev5-cray-unicosmk${UNAME_RELEASE} | sed -e 's/\.[^.]*$/.X/'
-	exit 0 ;;
+	exit ;;
     CRAY*SV1:*:*:*)
 	echo sv1-cray-unicos${UNAME_RELEASE} | sed -e 's/\.[^.]*$/.X/'
-	exit 0 ;;
+	exit ;;
     *:UNICOS/mp:*:*)
 	echo craynv-cray-unicosmp${UNAME_RELEASE} | sed -e 's/\.[^.]*$/.X/'
-	exit 0 ;;
+	exit ;;
     F30[01]:UNIX_System_V:*:* | F700:UNIX_System_V:*:*)
 	FUJITSU_PROC=`uname -m | tr 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' 'abcdefghijklmnopqrstuvwxyz'`
         FUJITSU_SYS=`uname -p | tr 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' 'abcdefghijklmnopqrstuvwxyz' | sed -e 's/\///'`
         FUJITSU_REL=`echo ${UNAME_RELEASE} | sed -e 's/ /_/'`
         echo "${FUJITSU_PROC}-fujitsu-${FUJITSU_SYS}${FUJITSU_REL}"
-        exit 0 ;;
+        exit ;;
     5000:UNIX_System_V:4.*:*)
         FUJITSU_SYS=`uname -p | tr 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' 'abcdefghijklmnopqrstuvwxyz' | sed -e 's/\///'`
         FUJITSU_REL=`echo ${UNAME_RELEASE} | tr 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' 'abcdefghijklmnopqrstuvwxyz' | sed -e 's/ /_/'`
         echo "sparc-fujitsu-${FUJITSU_SYS}${FUJITSU_REL}"
-	exit 0 ;;
+	exit ;;
     i*86:BSD/386:*:* | i*86:BSD/OS:*:* | *:Ascend\ Embedded/OS:*:*)
 	echo ${UNAME_MACHINE}-pc-bsdi${UNAME_RELEASE}
-	exit 0 ;;
+	exit ;;
     sparc*:BSD/OS:*:*)
 	echo sparc-unknown-bsdi${UNAME_RELEASE}
-	exit 0 ;;
+	exit ;;
     *:BSD/OS:*:*)
 	echo ${UNAME_MACHINE}-unknown-bsdi${UNAME_RELEASE}
-	exit 0 ;;
+	exit ;;
     *:FreeBSD:*:*)
 	echo ${UNAME_MACHINE}-unknown-freebsd`echo ${UNAME_RELEASE}|sed -e 's/[-(].*//'`
-	exit 0 ;;
+	exit ;;
     i*:CYGWIN*:*)
 	echo ${UNAME_MACHINE}-pc-cygwin
-	exit 0 ;;
+	exit ;;
     i*:MINGW*:*)
 	echo ${UNAME_MACHINE}-pc-mingw32
-	exit 0 ;;
+	exit ;;
+    i*:windows32*:*)
+    	# uname -m includes "-pc" on this system.
+    	echo ${UNAME_MACHINE}-mingw32
+	exit ;;
     i*:PW*:*)
 	echo ${UNAME_MACHINE}-pc-pw32
-	exit 0 ;;
+	exit ;;
     x86:Interix*:[34]*)
 	echo i586-pc-interix${UNAME_RELEASE}|sed -e 's/\..*//'
-	exit 0 ;;
+	exit ;;
     [345]86:Windows_95:* | [345]86:Windows_98:* | [345]86:Windows_NT:*)
 	echo i${UNAME_MACHINE}-pc-mks
-	exit 0 ;;
+	exit ;;
     i*:Windows_NT*:* | Pentium*:Windows_NT*:*)
 	# How do we know it's Interix rather than the generic POSIX subsystem?
 	# It also conflicts with pre-2.0 versions of AT&T UWIN. Should we
 	# UNAME_MACHINE based on the output of uname instead of i386?
 	echo i586-pc-interix
-	exit 0 ;;
+	exit ;;
     i*:UWIN*:*)
 	echo ${UNAME_MACHINE}-pc-uwin
-	exit 0 ;;
+	exit ;;
+    amd64:CYGWIN*:*:* | x86_64:CYGWIN*:*:*)
+	echo x86_64-unknown-cygwin
+	exit ;;
     p*:CYGWIN*:*)
 	echo powerpcle-unknown-cygwin
-	exit 0 ;;
+	exit ;;
     prep*:SunOS:5.*:*)
 	echo powerpcle-unknown-solaris2`echo ${UNAME_RELEASE}|sed -e 's/[^.]*//'`
-	exit 0 ;;
+	exit ;;
     *:GNU:*:*)
 	# the GNU system
 	echo `echo ${UNAME_MACHINE}|sed -e 's,[-/].*$,,'`-unknown-gnu`echo ${UNAME_RELEASE}|sed -e 's,/.*$,,'`
-	exit 0 ;;
+	exit ;;
     *:GNU/*:*:*)
 	# other systems with GNU libc and userland
 	echo ${UNAME_MACHINE}-unknown-`echo ${UNAME_SYSTEM} | sed 's,^[^/]*/,,' | tr '[A-Z]' '[a-z]'``echo ${UNAME_RELEASE}|sed -e 's/[-(].*//'`-gnu
-	exit 0 ;;
+	exit ;;
     i*86:Minix:*:*)
 	echo ${UNAME_MACHINE}-pc-minix
-	exit 0 ;;
+	exit ;;
     arm*:Linux:*:*)
 	echo ${UNAME_MACHINE}-unknown-linux-gnu
-	exit 0 ;;
+	exit ;;
     cris:Linux:*:*)
 	echo cris-axis-linux-gnu
-	exit 0 ;;
+	exit ;;
     crisv32:Linux:*:*)
 	echo crisv32-axis-linux-gnu
-	exit 0 ;;
+	exit ;;
     frv:Linux:*:*)
     	echo frv-unknown-linux-gnu
-	exit 0 ;;
+	exit ;;
     ia64:Linux:*:*)
 	echo ${UNAME_MACHINE}-unknown-linux-gnu
-	exit 0 ;;
+	exit ;;
     m32r*:Linux:*:*)
 	echo ${UNAME_MACHINE}-unknown-linux-gnu
-	exit 0 ;;
+	exit ;;
     m68*:Linux:*:*)
 	echo ${UNAME_MACHINE}-unknown-linux-gnu
-	exit 0 ;;
+	exit ;;
     mips:Linux:*:*)
 	eval $set_cc_for_build
 	sed 's/^	//' << EOF >$dummy.c
@@ -856,7 +852,7 @@ EOF
 	#endif
 EOF
 	eval `$CC_FOR_BUILD -E $dummy.c 2>/dev/null | grep ^CPU=`
-	test x"${CPU}" != x && echo "${CPU}-unknown-linux-gnu" && exit 0
+	test x"${CPU}" != x && { echo "${CPU}-unknown-linux-gnu"; exit; }
 	;;
     mips64:Linux:*:*)
 	eval $set_cc_for_build
@@ -875,14 +871,17 @@ EOF
 	#endif
 EOF
 	eval `$CC_FOR_BUILD -E $dummy.c 2>/dev/null | grep ^CPU=`
-	test x"${CPU}" != x && echo "${CPU}-unknown-linux-gnu" && exit 0
+	test x"${CPU}" != x && { echo "${CPU}-unknown-linux-gnu"; exit; }
 	;;
+    or32:Linux:*:*)
+	echo or32-unknown-linux-gnu
+	exit ;;
     ppc:Linux:*:*)
 	echo powerpc-unknown-linux-gnu
-	exit 0 ;;
+	exit ;;
     ppc64:Linux:*:*)
 	echo powerpc64-unknown-linux-gnu
-	exit 0 ;;
+	exit ;;
     alpha:Linux:*:*)
 	case `sed -n '/^cpu model/s/^.*: \(.*\)/\1/p' < /proc/cpuinfo` in
 	  EV5)   UNAME_MACHINE=alphaev5 ;;
@@ -896,7 +895,7 @@ EOF
 	objdump --private-headers /bin/sh | grep ld.so.1 >/dev/null
 	if test "$?" = 0 ; then LIBC="libc1" ; else LIBC="" ; fi
 	echo ${UNAME_MACHINE}-unknown-linux-gnu${LIBC}
-	exit 0 ;;
+	exit ;;
     parisc:Linux:*:* | hppa:Linux:*:*)
 	# Look for CPU level
 	case `grep '^cpu[^a-z]*:' /proc/cpuinfo 2>/dev/null | cut -d' ' -f2` in
@@ -904,25 +903,25 @@ EOF
 	  PA8*) echo hppa2.0-unknown-linux-gnu ;;
 	  *)    echo hppa-unknown-linux-gnu ;;
 	esac
-	exit 0 ;;
+	exit ;;
     parisc64:Linux:*:* | hppa64:Linux:*:*)
 	echo hppa64-unknown-linux-gnu
-	exit 0 ;;
+	exit ;;
     s390:Linux:*:* | s390x:Linux:*:*)
 	echo ${UNAME_MACHINE}-ibm-linux
-	exit 0 ;;
+	exit ;;
     sh64*:Linux:*:*)
     	echo ${UNAME_MACHINE}-unknown-linux-gnu
-	exit 0 ;;
+	exit ;;
     sh*:Linux:*:*)
 	echo ${UNAME_MACHINE}-unknown-linux-gnu
-	exit 0 ;;
+	exit ;;
     sparc:Linux:*:* | sparc64:Linux:*:*)
 	echo ${UNAME_MACHINE}-unknown-linux-gnu
-	exit 0 ;;
+	exit ;;
     x86_64:Linux:*:*)
 	echo x86_64-unknown-linux-gnu
-	exit 0 ;;
+	exit ;;
     i*86:Linux:*:*)
 	# The BFD linker knows what the default object file format is, so
 	# first see if it will tell us. cd to the root directory to prevent
@@ -940,15 +939,15 @@ EOF
 		;;
 	  a.out-i386-linux)
 		echo "${UNAME_MACHINE}-pc-linux-gnuaout"
-		exit 0 ;;
+		exit ;;
 	  coff-i386)
 		echo "${UNAME_MACHINE}-pc-linux-gnucoff"
-		exit 0 ;;
+		exit ;;
 	  "")
 		# Either a pre-BFD a.out linker (linux-gnuoldld) or
 		# one that does not give us useful --help.
 		echo "${UNAME_MACHINE}-pc-linux-gnuoldld"
-		exit 0 ;;
+		exit ;;
 	esac
 	# Determine whether the default compiler is a.out or elf
 	eval $set_cc_for_build
@@ -976,15 +975,18 @@ EOF
 	#endif
 EOF
 	eval `$CC_FOR_BUILD -E $dummy.c 2>/dev/null | grep ^LIBC=`
-	test x"${LIBC}" != x && echo "${UNAME_MACHINE}-pc-linux-${LIBC}" && exit 0
-	test x"${TENTATIVE}" != x && echo "${TENTATIVE}" && exit 0
+	test x"${LIBC}" != x && {
+		echo "${UNAME_MACHINE}-pc-linux-${LIBC}"
+		exit
+	}
+	test x"${TENTATIVE}" != x && { echo "${TENTATIVE}"; exit; }
 	;;
     i*86:DYNIX/ptx:4*:*)
 	# ptx 4.0 does uname -s correctly, with DYNIX/ptx in there.
 	# earlier versions are messed up and put the nodename in both
 	# sysname and nodename.
 	echo i386-sequent-sysv4
-	exit 0 ;;
+	exit ;;
     i*86:UNIX_SV:4.2MP:2.*)
         # Unixware is an offshoot of SVR4, but it has its own version
         # number series starting with 2...
@@ -992,27 +994,27 @@ EOF
 	# I just have to hope.  -- rms.
         # Use sysv4.2uw... so that sysv4* matches it.
 	echo ${UNAME_MACHINE}-pc-sysv4.2uw${UNAME_VERSION}
-	exit 0 ;;
+	exit ;;
     i*86:OS/2:*:*)
 	# If we were able to find `uname', then EMX Unix compatibility
 	# is probably installed.
 	echo ${UNAME_MACHINE}-pc-os2-emx
-	exit 0 ;;
+	exit ;;
     i*86:XTS-300:*:STOP)
 	echo ${UNAME_MACHINE}-unknown-stop
-	exit 0 ;;
+	exit ;;
     i*86:atheos:*:*)
 	echo ${UNAME_MACHINE}-unknown-atheos
-	exit 0 ;;
-	i*86:syllable:*:*)
+	exit ;;
+    i*86:syllable:*:*)
 	echo ${UNAME_MACHINE}-pc-syllable
-	exit 0 ;;
+	exit ;;
     i*86:LynxOS:2.*:* | i*86:LynxOS:3.[01]*:* | i*86:LynxOS:4.0*:*)
 	echo i386-unknown-lynxos${UNAME_RELEASE}
-	exit 0 ;;
+	exit ;;
     i*86:*DOS:*:*)
 	echo ${UNAME_MACHINE}-pc-msdosdjgpp
-	exit 0 ;;
+	exit ;;
     i*86:*:4.*:* | i*86:SYSTEM_V:4.*:*)
 	UNAME_REL=`echo ${UNAME_RELEASE} | sed 's/\/MP$//'`
 	if grep Novell /usr/include/link.h >/dev/null 2>/dev/null; then
@@ -1020,15 +1022,16 @@ EOF
 	else
 		echo ${UNAME_MACHINE}-pc-sysv${UNAME_REL}
 	fi
-	exit 0 ;;
-    i*86:*:5:[78]*)
+	exit ;;
+    i*86:*:5:[678]*)
+    	# UnixWare 7.x, OpenUNIX and OpenServer 6.
 	case `/bin/uname -X | grep "^Machine"` in
 	    *486*)	     UNAME_MACHINE=i486 ;;
 	    *Pentium)	     UNAME_MACHINE=i586 ;;
 	    *Pent*|*Celeron) UNAME_MACHINE=i686 ;;
 	esac
 	echo ${UNAME_MACHINE}-unknown-sysv${UNAME_RELEASE}${UNAME_SYSTEM}${UNAME_VERSION}
-	exit 0 ;;
+	exit ;;
     i*86:*:3.2:*)
 	if test -f /usr/options/cb.name; then
 		UNAME_REL=`sed -n 's/.*Version //p' /dev/null 2>&1 ; then
 	  echo i860-stardent-sysv${UNAME_RELEASE} # Stardent Vistra i860-SVR4
 	else # Add other i860-SVR4 vendors below as they are discovered.
 	  echo i860-unknown-sysv${UNAME_RELEASE}  # Unknown i860-SVR4
 	fi
-	exit 0 ;;
+	exit ;;
     mini*:CTIX:SYS*5:*)
 	# "miniframe"
 	echo m68010-convergent-sysv
-	exit 0 ;;
+	exit ;;
     mc68k:UNIX:SYSTEM5:3.51m)
 	echo m68k-convergent-sysv
-	exit 0 ;;
+	exit ;;
     M680?0:D-NIX:5.3:*)
 	echo m68k-diab-dnix
-	exit 0 ;;
+	exit ;;
     M68*:*:R3V[5678]*:*)
-	test -r /sysV68 && echo 'm68k-motorola-sysv' && exit 0 ;;
+	test -r /sysV68 && { echo 'm68k-motorola-sysv'; exit; } ;;
     3[345]??:*:4.0:3.0 | 3[34]??A:*:4.0:3.0 | 3[34]??,*:*:4.0:3.0 | 3[34]??/*:*:4.0:3.0 | 4400:*:4.0:3.0 | 4850:*:4.0:3.0 | SKA40:*:4.0:3.0 | SDS2:*:4.0:3.0 | SHG2:*:4.0:3.0 | S7501*:*:4.0:3.0)
 	OS_REL=''
 	test -r /etc/.relid \
 	&& OS_REL=.`sed -n 's/[^ ]* [^ ]* \([0-9][0-9]\).*/\1/p' < /etc/.relid`
 	/bin/uname -p 2>/dev/null | grep 86 >/dev/null \
-	  && echo i486-ncr-sysv4.3${OS_REL} && exit 0
+	  && { echo i486-ncr-sysv4.3${OS_REL}; exit; }
 	/bin/uname -p 2>/dev/null | /bin/grep entium >/dev/null \
-	  && echo i586-ncr-sysv4.3${OS_REL} && exit 0 ;;
+	  && { echo i586-ncr-sysv4.3${OS_REL}; exit; } ;;
     3[34]??:*:4.0:* | 3[34]??,*:*:4.0:*)
         /bin/uname -p 2>/dev/null | grep 86 >/dev/null \
-          && echo i486-ncr-sysv4 && exit 0 ;;
+          && { echo i486-ncr-sysv4; exit; } ;;
     m68*:LynxOS:2.*:* | m68*:LynxOS:3.0*:*)
 	echo m68k-unknown-lynxos${UNAME_RELEASE}
-	exit 0 ;;
+	exit ;;
     mc68030:UNIX_System_V:4.*:*)
 	echo m68k-atari-sysv4
-	exit 0 ;;
+	exit ;;
     TSUNAMI:LynxOS:2.*:*)
 	echo sparc-unknown-lynxos${UNAME_RELEASE}
-	exit 0 ;;
+	exit ;;
     rs6000:LynxOS:2.*:*)
 	echo rs6000-unknown-lynxos${UNAME_RELEASE}
-	exit 0 ;;
+	exit ;;
     PowerPC:LynxOS:2.*:* | PowerPC:LynxOS:3.[01]*:* | PowerPC:LynxOS:4.0*:*)
 	echo powerpc-unknown-lynxos${UNAME_RELEASE}
-	exit 0 ;;
+	exit ;;
     SM[BE]S:UNIX_SV:*:*)
 	echo mips-dde-sysv${UNAME_RELEASE}
-	exit 0 ;;
+	exit ;;
     RM*:ReliantUNIX-*:*:*)
 	echo mips-sni-sysv4
-	exit 0 ;;
+	exit ;;
     RM*:SINIX-*:*:*)
 	echo mips-sni-sysv4
-	exit 0 ;;
+	exit ;;
     *:SINIX-*:*:*)
 	if uname -p 2>/dev/null >/dev/null ; then
 		UNAME_MACHINE=`(uname -p) 2>/dev/null`
@@ -1120,69 +1123,72 @@ EOF
 	else
 		echo ns32k-sni-sysv
 	fi
-	exit 0 ;;
+	exit ;;
     PENTIUM:*:4.0*:*) # Unisys `ClearPath HMP IX 4000' SVR4/MP effort
                       # says 
         echo i586-unisys-sysv4
-        exit 0 ;;
+        exit ;;
     *:UNIX_System_V:4*:FTX*)
 	# From Gerald Hewes .
 	# How about differentiating between stratus architectures? -djm
 	echo hppa1.1-stratus-sysv4
-	exit 0 ;;
+	exit ;;
     *:*:*:FTX*)
 	# From seanf@swdc.stratus.com.
 	echo i860-stratus-sysv4
-	exit 0 ;;
+	exit ;;
+    i*86:VOS:*:*)
+	# From Paul.Green@stratus.com.
+	echo ${UNAME_MACHINE}-stratus-vos
+	exit ;;
     *:VOS:*:*)
 	# From Paul.Green@stratus.com.
 	echo hppa1.1-stratus-vos
-	exit 0 ;;
+	exit ;;
     mc68*:A/UX:*:*)
 	echo m68k-apple-aux${UNAME_RELEASE}
-	exit 0 ;;
+	exit ;;
     news*:NEWS-OS:6*:*)
 	echo mips-sony-newsos6
-	exit 0 ;;
+	exit ;;
     R[34]000:*System_V*:*:* | R4000:UNIX_SYSV:*:* | R*000:UNIX_SV:*:*)
 	if [ -d /usr/nec ]; then
 	        echo mips-nec-sysv${UNAME_RELEASE}
 	else
 	        echo mips-unknown-sysv${UNAME_RELEASE}
 	fi
-        exit 0 ;;
+        exit ;;
     BeBox:BeOS:*:*)	# BeOS running on hardware made by Be, PPC only.
 	echo powerpc-be-beos
-	exit 0 ;;
+	exit ;;
     BeMac:BeOS:*:*)	# BeOS running on Mac or Mac clone, PPC only.
 	echo powerpc-apple-beos
-	exit 0 ;;
+	exit ;;
     BePC:BeOS:*:*)	# BeOS running on Intel PC compatible.
 	echo i586-pc-beos
-	exit 0 ;;
+	exit ;;
     SX-4:SUPER-UX:*:*)
 	echo sx4-nec-superux${UNAME_RELEASE}
-	exit 0 ;;
+	exit ;;
     SX-5:SUPER-UX:*:*)
 	echo sx5-nec-superux${UNAME_RELEASE}
-	exit 0 ;;
+	exit ;;
     SX-6:SUPER-UX:*:*)
 	echo sx6-nec-superux${UNAME_RELEASE}
-	exit 0 ;;
+	exit ;;
     Power*:Rhapsody:*:*)
 	echo powerpc-apple-rhapsody${UNAME_RELEASE}
-	exit 0 ;;
+	exit ;;
     *:Rhapsody:*:*)
 	echo ${UNAME_MACHINE}-apple-rhapsody${UNAME_RELEASE}
-	exit 0 ;;
+	exit ;;
     *:Darwin:*:*)
 	UNAME_PROCESSOR=`uname -p` || UNAME_PROCESSOR=unknown
 	case $UNAME_PROCESSOR in
-	    *86) UNAME_PROCESSOR=i686 ;;
 	    unknown) UNAME_PROCESSOR=powerpc ;;
 	esac
 	echo ${UNAME_PROCESSOR}-apple-darwin${UNAME_RELEASE}
-	exit 0 ;;
+	exit ;;
     *:procnto*:*:* | *:QNX:[0123456789]*:*)
 	UNAME_PROCESSOR=`uname -p`
 	if test "$UNAME_PROCESSOR" = "x86"; then
@@ -1190,22 +1196,25 @@ EOF
 		UNAME_MACHINE=pc
 	fi
 	echo ${UNAME_PROCESSOR}-${UNAME_MACHINE}-nto-qnx${UNAME_RELEASE}
-	exit 0 ;;
+	exit ;;
     *:QNX:*:4*)
 	echo i386-pc-qnx
-	exit 0 ;;
+	exit ;;
+    NSE-?:NONSTOP_KERNEL:*:*)
+	echo nse-tandem-nsk${UNAME_RELEASE}
+	exit ;;
     NSR-?:NONSTOP_KERNEL:*:*)
 	echo nsr-tandem-nsk${UNAME_RELEASE}
-	exit 0 ;;
+	exit ;;
     *:NonStop-UX:*:*)
 	echo mips-compaq-nonstopux
-	exit 0 ;;
+	exit ;;
     BS2000:POSIX*:*:*)
 	echo bs2000-siemens-sysv
-	exit 0 ;;
+	exit ;;
     DS/*:UNIX_System_V:*:*)
 	echo ${UNAME_MACHINE}-${UNAME_SYSTEM}-${UNAME_RELEASE}
-	exit 0 ;;
+	exit ;;
     *:Plan9:*:*)
 	# "uname -m" is not consistent, so use $cputype instead. 386
 	# is converted to i386 for consistency with other x86
@@ -1216,38 +1225,44 @@ EOF
 	    UNAME_MACHINE="$cputype"
 	fi
 	echo ${UNAME_MACHINE}-unknown-plan9
-	exit 0 ;;
+	exit ;;
     *:TOPS-10:*:*)
 	echo pdp10-unknown-tops10
-	exit 0 ;;
+	exit ;;
     *:TENEX:*:*)
 	echo pdp10-unknown-tenex
-	exit 0 ;;
+	exit ;;
     KS10:TOPS-20:*:* | KL10:TOPS-20:*:* | TYPE4:TOPS-20:*:*)
 	echo pdp10-dec-tops20
-	exit 0 ;;
+	exit ;;
     XKL-1:TOPS-20:*:* | TYPE5:TOPS-20:*:*)
 	echo pdp10-xkl-tops20
-	exit 0 ;;
+	exit ;;
     *:TOPS-20:*:*)
 	echo pdp10-unknown-tops20
-	exit 0 ;;
+	exit ;;
     *:ITS:*:*)
 	echo pdp10-unknown-its
-	exit 0 ;;
+	exit ;;
     SEI:*:*:SEIUX)
         echo mips-sei-seiux${UNAME_RELEASE}
-	exit 0 ;;
+	exit ;;
     *:DragonFly:*:*)
 	echo ${UNAME_MACHINE}-unknown-dragonfly`echo ${UNAME_RELEASE}|sed -e 's/[-(].*//'`
-	exit 0 ;;
+	exit ;;
     *:*VMS:*:*)
     	UNAME_MACHINE=`(uname -p) 2>/dev/null`
 	case "${UNAME_MACHINE}" in
-	    A*) echo alpha-dec-vms && exit 0 ;;
-	    I*) echo ia64-dec-vms && exit 0 ;;
-	    V*) echo vax-dec-vms && exit 0 ;;
-	esac
+	    A*) echo alpha-dec-vms ; exit ;;
+	    I*) echo ia64-dec-vms ; exit ;;
+	    V*) echo vax-dec-vms ; exit ;;
+	esac ;;
+    *:XENIX:*:SysV)
+	echo i386-pc-xenix
+	exit ;;
+    i*86:skyos:*:*)
+	echo ${UNAME_MACHINE}-pc-skyos`echo ${UNAME_RELEASE}` | sed -e 's/ .*$//'
+	exit ;;
 esac
 
 #echo '(No uname command or uname output not recognized.)' 1>&2
@@ -1279,7 +1294,7 @@ main ()
 #endif
 
 #if defined (__arm) && defined (__acorn) && defined (__unix)
-  printf ("arm-acorn-riscix"); exit (0);
+  printf ("arm-acorn-riscix\n"); exit (0);
 #endif
 
 #if defined (hp300) && !defined (hpux)
@@ -1368,11 +1383,12 @@ main ()
 }
 EOF
 
-$CC_FOR_BUILD -o $dummy $dummy.c 2>/dev/null && $dummy && exit 0
+$CC_FOR_BUILD -o $dummy $dummy.c 2>/dev/null && SYSTEM_NAME=`$dummy` &&
+	{ echo "$SYSTEM_NAME"; exit; }
 
 # Apollos put the system type in the environment.
 
-test -d /usr/apollo && { echo ${ISP}-apollo-${SYSTYPE}; exit 0; }
+test -d /usr/apollo && { echo ${ISP}-apollo-${SYSTYPE}; exit; }
 
 # Convex versions that predate uname can use getsysinfo(1)
 
@@ -1381,22 +1397,22 @@ then
     case `getsysinfo -f cpu_type` in
     c1*)
 	echo c1-convex-bsd
-	exit 0 ;;
+	exit ;;
     c2*)
 	if getsysinfo -f scalar_acc
 	then echo c32-convex-bsd
 	else echo c2-convex-bsd
 	fi
-	exit 0 ;;
+	exit ;;
     c34*)
 	echo c34-convex-bsd
-	exit 0 ;;
+	exit ;;
     c38*)
 	echo c38-convex-bsd
-	exit 0 ;;
+	exit ;;
     c4*)
 	echo c4-convex-bsd
-	exit 0 ;;
+	exit ;;
     esac
 fi
 
@@ -1407,7 +1423,9 @@ This script, last modified $timestamp, has failed to recognize
 the operating system you are using. It is advised that you
 download the most up to date version of the config scripts from
 
-    ftp://ftp.gnu.org/pub/gnu/config/
+  http://savannah.gnu.org/cgi-bin/viewcvs/*checkout*/config/config/config.guess
+and
+  http://savannah.gnu.org/cgi-bin/viewcvs/*checkout*/config/config/config.sub
 
 If the version you run ($0) is already up to date, please
 send the following data and any information you think might be
diff --git a/storage/bdb/dist/config.sub b/storage/bdb/dist/config.sub
index edb6b663ca2..1c366dfde9a 100755
--- a/storage/bdb/dist/config.sub
+++ b/storage/bdb/dist/config.sub
@@ -1,9 +1,9 @@
 #! /bin/sh
 # Configuration validation subroutine script.
 #   Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999,
-#   2000, 2001, 2002, 2003, 2004 Free Software Foundation, Inc.
+#   2000, 2001, 2002, 2003, 2004, 2005 Free Software Foundation, Inc.
 
-timestamp='2004-08-29'
+timestamp='2005-07-08'
 
 # This file is (in principle) common to ALL GNU software.
 # The presence of a machine in this file suggests that SOME GNU software
@@ -21,14 +21,15 @@ timestamp='2004-08-29'
 #
 # You should have received a copy of the GNU General Public License
 # along with this program; if not, write to the Free Software
-# Foundation, Inc., 59 Temple Place - Suite 330,
-# Boston, MA 02111-1307, USA.
-
+# Foundation, Inc., 51 Franklin Street - Fifth Floor, Boston, MA
+# 02110-1301, USA.
+#
 # As a special exception to the GNU General Public License, if you
 # distribute this file as part of a program that contains a
 # configuration script generated by Autoconf, you may include it under
 # the same distribution terms that you use for the rest of that program.
 
+
 # Please send patches to .  Submit a context
 # diff and a properly formatted ChangeLog entry.
 #
@@ -70,7 +71,7 @@ Report bugs and patches to ."
 version="\
 GNU config.sub ($timestamp)
 
-Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, 2003, 2004
+Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, 2003, 2004, 2005
 Free Software Foundation, Inc.
 
 This is free software; see the source for copying conditions.  There is NO
@@ -83,11 +84,11 @@ Try \`$me --help' for more information."
 while test $# -gt 0 ; do
   case $1 in
     --time-stamp | --time* | -t )
-       echo "$timestamp" ; exit 0 ;;
+       echo "$timestamp" ; exit ;;
     --version | -v )
-       echo "$version" ; exit 0 ;;
+       echo "$version" ; exit ;;
     --help | --h* | -h )
-       echo "$usage"; exit 0 ;;
+       echo "$usage"; exit ;;
     -- )     # Stop option processing
        shift; break ;;
     - )	# Use stdin as input.
@@ -99,7 +100,7 @@ while test $# -gt 0 ; do
     *local*)
        # First pass through any local machine types.
        echo $1
-       exit 0;;
+       exit ;;
 
     * )
        break ;;
@@ -231,13 +232,14 @@ case $basic_machine in
 	| alpha64 | alpha64ev[4-8] | alpha64ev56 | alpha64ev6[78] | alpha64pca5[67] \
 	| am33_2.0 \
 	| arc | arm | arm[bl]e | arme[lb] | armv[2345] | armv[345][lb] | avr \
+	| bfin \
 	| c4x | clipper \
 	| d10v | d30v | dlx | dsp16xx \
 	| fr30 | frv \
 	| h8300 | h8500 | hppa | hppa1.[01] | hppa2.0 | hppa2.0[nw] | hppa64 \
 	| i370 | i860 | i960 | ia64 \
 	| ip2k | iq2000 \
-	| m32r | m32rle | m68000 | m68k | m88k | mcore \
+	| m32r | m32rle | m68000 | m68k | m88k | maxq | mcore \
 	| mips | mipsbe | mipseb | mipsel | mipsle \
 	| mips16 \
 	| mips64 | mips64el \
@@ -246,6 +248,7 @@ case $basic_machine in
 	| mips64vr4100 | mips64vr4100el \
 	| mips64vr4300 | mips64vr4300el \
 	| mips64vr5000 | mips64vr5000el \
+	| mips64vr5900 | mips64vr5900el \
 	| mipsisa32 | mipsisa32el \
 	| mipsisa32r2 | mipsisa32r2el \
 	| mipsisa64 | mipsisa64el \
@@ -254,23 +257,28 @@ case $basic_machine in
 	| mipsisa64sr71k | mipsisa64sr71kel \
 	| mipstx39 | mipstx39el \
 	| mn10200 | mn10300 \
+	| ms1 \
 	| msp430 \
 	| ns16k | ns32k \
-	| openrisc | or32 \
+	| or32 \
 	| pdp10 | pdp11 | pj | pjl \
 	| powerpc | powerpc64 | powerpc64le | powerpcle | ppcbe \
 	| pyramid \
-	| sh | sh[1234] | sh[23]e | sh[34]eb | shbe | shle | sh[1234]le | sh3ele \
+	| sh | sh[1234] | sh[24]a | sh[23]e | sh[34]eb | shbe | shle | sh[1234]le | sh3ele \
 	| sh64 | sh64le \
-	| sparc | sparc64 | sparc86x | sparclet | sparclite | sparcv8 | sparcv9 | sparcv9b \
+	| sparc | sparc64 | sparc64b | sparc86x | sparclet | sparclite \
+	| sparcv8 | sparcv9 | sparcv9b \
 	| strongarm \
 	| tahoe | thumb | tic4x | tic80 | tron \
 	| v850 | v850e \
 	| we32k \
-	| x86 | xscale | xstormy16 | xtensa \
+	| x86 | xscale | xscalee[bl] | xstormy16 | xtensa \
 	| z8k)
 		basic_machine=$basic_machine-unknown
 		;;
+	m32c)
+		basic_machine=$basic_machine-unknown
+		;;
 	m6811 | m68hc11 | m6812 | m68hc12)
 		# Motorola 68HC11/12.
 		basic_machine=$basic_machine-unknown
@@ -298,7 +306,7 @@ case $basic_machine in
 	| alphapca5[67]-* | alpha64pca5[67]-* | arc-* \
 	| arm-*  | armbe-* | armle-* | armeb-* | armv*-* \
 	| avr-* \
-	| bs2000-* \
+	| bfin-* | bs2000-* \
 	| c[123]* | c30-* | [cjt]90-* | c4x-* | c54x-* | c55x-* | c6x-* \
 	| clipper-* | craynv-* | cydra-* \
 	| d10v-* | d30v-* | dlx-* \
@@ -310,7 +318,7 @@ case $basic_machine in
 	| ip2k-* | iq2000-* \
 	| m32r-* | m32rle-* \
 	| m68000-* | m680[012346]0-* | m68360-* | m683?2-* | m68k-* \
-	| m88110-* | m88k-* | mcore-* \
+	| m88110-* | m88k-* | maxq-* | mcore-* \
 	| mips-* | mipsbe-* | mipseb-* | mipsel-* | mipsle-* \
 	| mips16-* \
 	| mips64-* | mips64el-* \
@@ -319,6 +327,7 @@ case $basic_machine in
 	| mips64vr4100-* | mips64vr4100el-* \
 	| mips64vr4300-* | mips64vr4300el-* \
 	| mips64vr5000-* | mips64vr5000el-* \
+	| mips64vr5900-* | mips64vr5900el-* \
 	| mipsisa32-* | mipsisa32el-* \
 	| mipsisa32r2-* | mipsisa32r2el-* \
 	| mipsisa64-* | mipsisa64el-* \
@@ -327,6 +336,7 @@ case $basic_machine in
 	| mipsisa64sr71k-* | mipsisa64sr71kel-* \
 	| mipstx39-* | mipstx39el-* \
 	| mmix-* \
+	| ms1-* \
 	| msp430-* \
 	| none-* | np1-* | ns16k-* | ns32k-* \
 	| orion-* \
@@ -334,20 +344,23 @@ case $basic_machine in
 	| powerpc-* | powerpc64-* | powerpc64le-* | powerpcle-* | ppcbe-* \
 	| pyramid-* \
 	| romp-* | rs6000-* \
-	| sh-* | sh[1234]-* | sh[23]e-* | sh[34]eb-* | shbe-* \
+	| sh-* | sh[1234]-* | sh[24]a-* | sh[23]e-* | sh[34]eb-* | shbe-* \
 	| shle-* | sh[1234]le-* | sh3ele-* | sh64-* | sh64le-* \
-	| sparc-* | sparc64-* | sparc86x-* | sparclet-* | sparclite-* \
+	| sparc-* | sparc64-* | sparc64b-* | sparc86x-* | sparclet-* \
+	| sparclite-* \
 	| sparcv8-* | sparcv9-* | sparcv9b-* | strongarm-* | sv1-* | sx?-* \
 	| tahoe-* | thumb-* \
 	| tic30-* | tic4x-* | tic54x-* | tic55x-* | tic6x-* | tic80-* \
 	| tron-* \
 	| v850-* | v850e-* | vax-* \
 	| we32k-* \
-	| x86-* | x86_64-* | xps100-* | xscale-* | xstormy16-* \
-	| xtensa-* \
+	| x86-* | x86_64-* | xps100-* | xscale-* | xscalee[bl]-* \
+	| xstormy16-* | xtensa-* \
 	| ymp-* \
 	| z8k-*)
 		;;
+	m32c-*)
+		;;
 	# Recognize the various machine names and aliases which stand
 	# for a CPU type and a company and sometimes even an OS.
 	386bsd)
@@ -489,6 +502,10 @@ case $basic_machine in
 		basic_machine=m88k-motorola
 		os=-sysv3
 		;;
+	djgpp)
+		basic_machine=i586-pc
+		os=-msdosdjgpp
+		;;
 	dpx20 | dpx20-*)
 		basic_machine=rs6000-bull
 		os=-bosx
@@ -754,9 +771,8 @@ case $basic_machine in
 		basic_machine=hppa1.1-oki
 		os=-proelf
 		;;
-	or32 | or32-*)
+	openrisc | openrisc-*)
 		basic_machine=or32-unknown
-		os=-coff
 		;;
 	os400)
 		basic_machine=powerpc-ibm
@@ -1029,6 +1045,10 @@ case $basic_machine in
 		basic_machine=hppa1.1-winbond
 		os=-proelf
 		;;
+	xbox)
+		basic_machine=i686-pc
+		os=-mingw32
+		;;
 	xps | xps100)
 		basic_machine=xps100-honeywell
 		;;
@@ -1078,12 +1098,9 @@ case $basic_machine in
 	we32k)
 		basic_machine=we32k-att
 		;;
-	sh3 | sh4 | sh[34]eb | sh[1234]le | sh[23]ele)
+	sh[1234] | sh[24]a | sh[34]eb | sh[1234]le | sh[23]ele)
 		basic_machine=sh-unknown
 		;;
-	sh64)
-		basic_machine=sh64-unknown
-		;;
 	sparc | sparcv8 | sparcv9 | sparcv9b)
 		basic_machine=sparc-sun
 		;;
@@ -1170,7 +1187,8 @@ case $os in
 	      | -storm-chaos* | -tops10* | -tenex* | -tops20* | -its* \
 	      | -os2* | -vos* | -palmos* | -uclinux* | -nucleus* \
 	      | -morphos* | -superux* | -rtmk* | -rtmk-nova* | -windiss* \
-	      | -powermax* | -dnix* | -nx6 | -nx7 | -sei* | -dragonfly*)
+	      | -powermax* | -dnix* | -nx6 | -nx7 | -sei* | -dragonfly* \
+	      | -skyos* | -haiku*)
 	# Remember, each alternative MUST END IN *, to match a version number.
 		;;
 	-qnx*)
@@ -1188,7 +1206,7 @@ case $os in
 		os=`echo $os | sed -e 's|nto|nto-qnx|'`
 		;;
 	-sim | -es1800* | -hms* | -xray | -os68k* | -none* | -v88r* \
-	      | -windows* | -osx | -abug | -netware* | -os9* | -beos* \
+	      | -windows* | -osx | -abug | -netware* | -os9* | -beos* | -haiku* \
 	      | -macos* | -mpw* | -magic* | -mmixware* | -mon960* | -lnews*)
 		;;
 	-mac*)
@@ -1297,6 +1315,9 @@ case $os in
 	-kaos*)
 		os=-kaos
 		;;
+	-zvmoe)
+		os=-zvmoe
+		;;
 	-none)
 		;;
 	*)
@@ -1374,6 +1395,9 @@ case $basic_machine in
 	*-be)
 		os=-beos
 		;;
+	*-haiku)
+		os=-haiku
+		;;
 	*-ibm)
 		os=-aix
 		;;
@@ -1545,7 +1569,7 @@ case $basic_machine in
 esac
 
 echo $basic_machine$os
-exit 0
+exit
 
 # Local variables:
 # eval: (add-hook 'write-file-hooks 'time-stamp)
diff --git a/storage/bdb/dist/configure.ac b/storage/bdb/dist/configure.ac
index 8106d51d0ca..6f06d1eb3c8 100644
--- a/storage/bdb/dist/configure.ac
+++ b/storage/bdb/dist/configure.ac
@@ -1,4 +1,4 @@
-# $Id: configure.ac,v 11.221 2004/11/16 01:07:57 mjc Exp $
+# $Id: configure.ac,v 12.9 2005/10/14 20:52:29 bostic Exp $
 # Process this file with autoconf to produce a configure script.
 
 PACKAGE=db
@@ -43,6 +43,7 @@ AC_SUBST(INSTALL_TARGET)
 AC_SUBST(JAR)
 AC_SUBST(JAVACFLAGS)
 AC_SUBST(LDFLAGS)
+AC_SUBST(LIBCSO_LIBS)
 AC_SUBST(LIBJSO_LIBS)
 AC_SUBST(LIBS)
 AC_SUBST(LIBSO_LIBS)
@@ -65,6 +66,7 @@ AC_SUBST(RPC_CLIENT_OBJS)
 AC_SUBST(RPC_SERVER_H)
 AC_SUBST(SOFLAGS)
 AC_SUBST(TEST_LIBS)
+AC_SUBST(db_cv_build_type)
 AC_SUBST(db_int_def)
 AC_SUBST(o)
 
@@ -134,10 +136,10 @@ INSTALL_TARGET="library_install"
 # because the makefile CC may be set to use $(LIBTOOL).
 #
 # Don't override anything if it's already set from the environment.
-optimize_def="-O"
+optimize_debug="-O"
 case "$host_os" in
 aix4.3.*|aix5*)
-	optimize_def="-O2"
+	optimize_debug="-O2"
 	CC=${CC-"xlc_r"}
 	CPPFLAGS="$CPPFLAGS -D_THREAD_SAFE"
 	LDFLAGS="$LDFLAGS -Wl,-brtl";;
@@ -151,7 +153,7 @@ freebsd*)
 gnu*|k*bsd*-gnu|linux*)
 	CPPFLAGS="$CPPFLAGS -D_GNU_SOURCE -D_REENTRANT";;
 hpux*)	CPPFLAGS="$CPPFLAGS -D_REENTRANT";;
-irix*)	optimize_def="-O2"
+irix*)	optimize_debug="-O2"
 	CPPFLAGS="$CPPFLAGS -D_SGI_MP_SOURCE";;
 mpeix*)	CPPFLAGS="$CPPFLAGS -D_POSIX_SOURCE -D_SOCKET_SOURCE"
 	LIBS="$LIBS -lsocket -lsvipc";;
@@ -162,35 +164,30 @@ solaris*)
 	CPPFLAGS="$CPPFLAGS -D_REENTRANT";;
 esac
 
-# Set CFLAGS/CXXFLAGS.  We MUST set the flags before we call autoconf
-# compiler configuration macros, because if we don't, they set CFLAGS
-# to no optimization and -g, which isn't what we want.
-CFLAGS=${CFLAGS-$optimize_def}
-CXXFLAGS=${CXXFLAGS-"$CFLAGS"}
-
-# If the user wants a debugging environment, add -g to the CFLAGS value.
-#
-# XXX
-# Some compilers can't mix optimizing and debug flags.  The only way to
-# handle this is to specify CFLAGS in the environment before configuring.
+# If the user wants a debugging environment, change any compiler optimization
+# flags to -g.  We used to add -g to the -O compiler flags, but compilers are
+# good enough at code re-organization that debugging with -O no longer works.
+# If you want to compile with a different set of flags, specify CFLAGS in the
+# environment before configuring.
 if test "$db_cv_debug" = "yes"; then
 	AC_DEFINE(DEBUG)
 	AH_TEMPLATE(DEBUG, [Define to 1 if you want a debugging version.])
 
-	CFLAGS="$CFLAGS -g"
-	CXXFLAGS="$CXXFLAGS -g"
-	db_cv_build_type=debug
-else
-	db_cv_build_type=release
+	optimize_debug="-g"
 fi
-AC_SUBST(db_cv_build_type)
+
+# Set CFLAGS/CXXFLAGS.  We MUST set the flags before we call autoconf
+# compiler configuration macros, because if we don't, they set CFLAGS
+# to no optimization and -g, which isn't what we want.
+CFLAGS=${CFLAGS-$optimize_debug}
+CXXFLAGS=${CXXFLAGS-"$CFLAGS"}
 
 # The default compiler is cc (NOT gcc), the default CFLAGS is as specified
 # above, NOT what is set by AC_PROG_CC, as it won't set optimization flags
 # for any compiler other than gcc.
 AC_PROG_CC(cc gcc)
 
-# Set specific per-compiler flags.
+# We know what compiler we're going to use, now.  Set per-compiler flags.
 if test "$GCC" = "yes"; then
 	# We want -O2 if we're using gcc.
 	CFLAGS="$CFLAGS "
@@ -363,6 +360,14 @@ if test "$db_cv_cxx" = "yes"; then
 	fi
 fi
 
+# We split DbConstants.java into debug and release versions so Windows
+# developers don't need to do anything special to use the Debug DLL.
+if test "$db_cv_debug" = "yes"; then
+	db_cv_build_type=debug
+else
+	db_cv_build_type=release
+fi
+
 # Optional Java API.
 if test "$db_cv_java" = "yes"; then
 	# Java requires shared libraries.
@@ -438,13 +443,10 @@ fi
 #
 # The Berkeley DB library calls fdatasync, and it's only available in -lrt on
 # Solaris.  See if we can find it either without additional libraries or in
-# -lrt.  If fdatasync is found in -lrt, add -lrt to the Java and Tcl shared
-# library link lines.
+# -lrt.  If fdatasync is found in -lrt, add -lrt to the shared library links.
 AC_SEARCH_LIBS(fdatasync, rt, [dnl
     if test "$ac_cv_search_fdatasync" != "none required" ; then
-        LIBJSO_LIBS="$LIBJSO_LIBS -lrt";
-        LIBSO_LIBS="$LIBSO_LIBS -lrt";
-        LIBTSO_LIBS="$LIBTSO_LIBS -lrt";
+	LIBSO_LIBS="$LIBSO_LIBS -lrt";
     fi])
 
 # The test and example programs use the sched_yield function, taken from -lrt
@@ -482,9 +484,16 @@ AC_REPLACE_FUNCS(getcwd getopt memcmp memcpy memmove raise)
 AC_REPLACE_FUNCS(strcasecmp strdup strerror strtol strtoul)
 
 # Check for system functions we optionally use.
-AC_CHECK_FUNCS(_fstati64 clock_gettime directio fdatasync ftruncate getrusage)
-AC_CHECK_FUNCS(gettimeofday getuid pstat_getdynamic rand sched_yield)
-AC_CHECK_FUNCS(select snprintf srand sysconf vsnprintf yield)
+AC_CHECK_FUNCS(\
+   _fstati64 clock_gettime directio fchmod fcntl fdatasync ftruncate\
+   getrusage gettimeofday getuid pstat_getdynamic rand sched_yield\
+   select snprintf srand sysconf vsnprintf yield)
+
+# Pthread_self.
+# The use of pthread_self to identify threads can be forced.
+if test "$db_cv_pthread_self" = "yes"; then
+	AC_CHECK_FUNCS(pthread_self)
+fi
 
 # Pread/pwrite.
 # HP-UX has pread/pwrite, but it doesn't work with largefile support.
@@ -672,6 +681,12 @@ else
 	CRYPTO_OBJS="crypto_stub${o}"
 fi
 
+# If DIAGNOSTIC is defined, include the log print routines in the library
+# itself, various diagnostic modes use them.
+if test "$db_cv_diagnostic" = "yes"; then
+	ADDITIONAL_OBJS="$ADDITIONAL_OBJS \$(PRINT_OBJS)"
+fi
+
 # We need to add the additional object files into the Makefile with the correct
 # suffix.  We can't use $LTLIBOBJS itself, because that variable has $U encoded
 # in it for automake, and that's not what we want.  See SR #7227 for additional
diff --git a/storage/bdb/dist/gen_rec.awk b/storage/bdb/dist/gen_rec.awk
index 001ec65f0c5..b73a2bbf873 100644
--- a/storage/bdb/dist/gen_rec.awk
+++ b/storage/bdb/dist/gen_rec.awk
@@ -2,10 +2,10 @@
 #
 # See the file LICENSE for redistribution information.
 #
-# Copyright (c) 1996-2004
+# Copyright (c) 1996-2005
 #	Sleepycat Software.  All rights reserved.
 #
-# $Id: gen_rec.awk,v 11.110 2004/10/20 20:40:58 bostic Exp $
+# $Id: gen_rec.awk,v 12.6 2005/10/12 18:48:44 ubell Exp $
 #
 
 # This awk script generates all the log, print, and read routines for the DB
@@ -445,11 +445,10 @@ function log_function()
 		printf("DbEnv->log_put call,\n\t\t * ") >> CFILE;
 		printf("so pass in the appropriate memory location to be ") \
 		    >> CFILE;
-		printf("filled\n\t\t * in by the log_put code.\n\t\t*/\n") \
+		printf("filled\n\t\t * in by the log_put code.\n\t\t */\n") \
 		    >> CFILE;
-		printf("\t\tDB_SET_BEGIN_LSNP(txnid, &rlsnp);\n") >> CFILE;
+		printf("\t\tDB_SET_TXN_LSNP(txnid, &rlsnp, &lsnp);\n") >> CFILE;
 		printf("\t\ttxn_num = txnid->txnid;\n") >> CFILE;
-		printf("\t\tlsnp = &txnid->last_lsn;\n") >> CFILE;
 		printf("\t}\n\n") >> CFILE;
 
 		# If we're logging a DB handle, make sure we have a log
@@ -608,7 +607,7 @@ function log_function()
 			printf("(DBT *)&logrec,\n") >> CFILE;
 			printf("\t\t    flags | DB_LOG_NOCOPY)) == 0") >> CFILE;
 			printf(" && txnid != NULL) {\n") >> CFILE;
-			printf("\t\t\ttxnid->last_lsn = *rlsnp;\n") >> CFILE;
+			printf("\t\t\t*lsnp = *rlsnp;\n") >> CFILE;
 
 			printf("\t\t\tif (rlsnp != ret_lsnp)\n") >> CFILE;
 			printf("\t\t\t\t *ret_lsnp = *rlsnp;\n") >> CFILE;
@@ -639,6 +638,8 @@ function log_function()
 			# Add a ND record to the txn list.
 			printf("\t\tSTAILQ_INSERT_HEAD(&txnid") >> CFILE;
 			printf("->logs, lr, links);\n") >> CFILE;
+			printf("\t\tF_SET((TXN_DETAIL *)") >> CFILE;
+			printf("txnid->td, TXN_DTL_INMEMORY);\n") >> CFILE;
 			# Update the return LSN.
 			printf("\t\tLSN_NOT_LOGGED(*ret_lsnp);\n") >> CFILE;
 			printf("\t}\n\n") >> CFILE;
@@ -649,7 +650,7 @@ function log_function()
 			printf(" && txnid != NULL) {\n") >> CFILE;
 
                 	# Update the transactions last_lsn.
-			printf("\t\ttxnid->last_lsn = *rlsnp;\n") >> CFILE;
+			printf("\t\t*lsnp = *rlsnp;\n") >> CFILE;
 			printf("\t\tif (rlsnp != ret_lsnp)\n") >> CFILE;
 			printf("\t\t\t *ret_lsnp = *rlsnp;\n") >> CFILE;
 			printf("\t}\n") >> CFILE;
@@ -660,9 +661,8 @@ function log_function()
 		printf("#ifdef LOG_DIAGNOSTIC\n") >> CFILE
 		printf("\tif (ret != 0)\n") >> CFILE;
 		printf("\t\t(void)%s_print(dbenv,\n", funcname) >> CFILE;
-		printf("\t\t    (DBT *)&logrec, ret_lsnp, NULL, NULL);\n") \
-		    >> CFILE
-		printf("#endif\n\n") >> CFILE
+		printf("\t\t    (DBT *)&logrec, ret_lsnp, ") >> CFILE
+		printf("DB_TXN_PRINT, NULL);\n#endif\n\n") >> CFILE
 		# Free and return
 		if (dbprivate) {
 			printf("#ifdef DIAGNOSTIC\n") >> CFILE
@@ -738,7 +738,7 @@ function print_function()
 	printf("\tint ret;\n\n") >> PFILE;
 
 	# Get rid of complaints about unused parameters.
-	printf("\tnotused2 = DB_TXN_ABORT;\n\tnotused3 = NULL;\n\n") >> PFILE;
+	printf("\tnotused2 = DB_TXN_PRINT;\n\tnotused3 = NULL;\n\n") >> PFILE;
 
 	# Call read routine to initialize structure
 	printf("\tif ((ret = %s_read(dbenv, dbtp->data, &argp)) != 0)\n", \
diff --git a/storage/bdb/dist/gen_rpc.awk b/storage/bdb/dist/gen_rpc.awk
index 7eee77b3df2..ac29648ea6a 100644
--- a/storage/bdb/dist/gen_rpc.awk
+++ b/storage/bdb/dist/gen_rpc.awk
@@ -1,5 +1,5 @@
 #
-# $Id: gen_rpc.awk,v 11.58 2004/08/19 20:28:37 mjc Exp $
+# $Id: gen_rpc.awk,v 12.4 2005/07/21 18:21:20 bostic Exp $
 # Awk script for generating client/server RPC code.
 #
 # This awk script generates most of the RPC routines for DB client/server
@@ -54,6 +54,12 @@ BEGIN {
 	printf("/* Do not edit: automatically built by gen_rpc.awk. */\n") \
 	    > XFILE
 	nendlist = 1;
+
+	# Output headers
+	general_headers()
+
+	# Put out the actual illegal and no-server functions.
+	illegal_functions(CFILE)
 }
 END {
 	if (error == 0) {
@@ -65,29 +71,60 @@ END {
 
 		printf("\t} = %d%03d;\n", major, minor) >> XFILE
 		printf("} = 351457;\n") >> XFILE
+
+		obj_init("DB", "dbp", obj_db, CFILE)
+		obj_init("DBC", "dbc", obj_dbc, CFILE)
+		obj_init("DB_ENV", "dbenv", obj_dbenv, CFILE)
+		obj_init("DB_TXN", "txn", obj_txn, CFILE)
 	}
 }
 
-/^[	 ]*BEGIN/ {
-	name = $2;
-	nofunc_code = 0;
-	funcvars = 0;
-	ret_code = 0;
-	if ($3 == "NOFUNC")
-		nofunc_code = 1;
-	if ($3 == "RETCODE")
-		ret_code = 1;
+/^[	 ]*LOCAL/ {
+	# LOCAL methods are ones where we don't override the handle
+	# method for RPC, nor is it illegal -- it's just satisfied
+	# locally.
+	next;
+}
+/^[	 ]*NOFUNC/ {
+	++obj_indx;
 
+	# NOFUNC methods are illegal on the RPC client.
+	if ($2 ~ "^db_")
+		obj_illegal(obj_db, "dbp", $2, $3)
+	else if ($2 ~ "^dbc_")
+		obj_illegal(obj_dbc, "dbc", $2, $3)
+	else if ($2 ~ "^env_")
+		obj_illegal(obj_dbenv, "dbenv", $2, $3)
+	else if ($2 ~ "^txn_")
+		obj_illegal(obj_txn, "txn", $2, $3)
+	else {
+		print "unexpected handle prefix: " $2
+		error = 1; exit
+	}
+	next;
+}
+/^[	 ]*BEGIN/ {
+	++obj_indx;
+
+	name = $2;
+	link_only = ret_code = 0
+	if ($3 == "LINKONLY")
+		link_only = 1
+	else if ($3 == "RETCODE")
+		ret_code = 1
+
+	funcvars = 0;
+	newvars = 0;
 	nvars = 0;
 	rvars = 0;
-	newvars = 0;
-	db_handle = 0;
-	env_handle = 0;
-	dbc_handle = 0;
-	txn_handle = 0;
-	mp_handle = 0;
-	dbt_handle = 0;
 	xdr_free = 0;
+
+	db_handle = 0;
+	dbc_handle = 0;
+	dbt_handle = 0;
+	env_handle = 0;
+	mp_handle = 0;
+	txn_handle = 0;
 }
 /^[	 ]*ARG/ {
 	rpc_type[nvars] = $2;
@@ -102,36 +139,36 @@ END {
 
 	if (c_type[nvars] == "DBT *")
 		dbt_handle = 1;
-
-	if (c_type[nvars] == "DB_ENV *") {
+	else if (c_type[nvars] == "DB_ENV *") {
 		ctp_type[nvars] = "CT_ENV";
 		env_handle = 1;
 		env_idx = nvars;
-	}
 
-	if (c_type[nvars] == "DB *") {
+		if (nvars == 0)
+			obj_func("dbenv", obj_dbenv);
+	} else if (c_type[nvars] == "DB *") {
 		ctp_type[nvars] = "CT_DB";
 		if (db_handle != 1) {
 			db_handle = 1;
 			db_idx = nvars;
 		}
-	}
 
-	if (c_type[nvars] == "DBC *") {
+		if (nvars == 0)
+			obj_func("dbp", obj_db);
+	} else if (c_type[nvars] == "DBC *") {
 		ctp_type[nvars] = "CT_CURSOR";
 		dbc_handle = 1;
 		dbc_idx = nvars;
-	}
 
-	if (c_type[nvars] == "DB_TXN *") {
+		if (nvars == 0)
+			obj_func("dbc", obj_dbc);
+	} else if (c_type[nvars] == "DB_TXN *") {
 		ctp_type[nvars] = "CT_TXN";
 		txn_handle = 1;
 		txn_idx = nvars;
-	}
 
-	if (c_type[nvars] == "DB_MPOOLFILE *") {
-		mp_handle = 1;
-		mp_idx = nvars;
+		if (nvars == 0)
+			obj_func("txn", obj_txn);
 	}
 
 	++nvars;
@@ -189,198 +226,10 @@ END {
 /^[	 ]*END/ {
 	#
 	# =====================================================
-	# File headers, if necessary.
+	# LINKONLY -- just reference the function, that's all.
 	#
-	if (first == 0) {
-		printf("#include \"db_config.h\"\n") >> CFILE
-		printf("\n") >> CFILE
-		printf("#ifndef NO_SYSTEM_INCLUDES\n") >> CFILE
-		printf("#include \n") >> CFILE
-		printf("\n") >> CFILE
-		printf("#include \n") >> CFILE
-		printf("\n") >> CFILE
-		printf("#include \n") >> CFILE
-		printf("#endif\n") >> CFILE
-		printf("\n") >> CFILE
-		printf("#include \"db_server.h\"\n") >> CFILE
-		printf("\n") >> CFILE
-		printf("#include \"db_int.h\"\n") >> CFILE
-		printf("#include \"dbinc/txn.h\"\n") >> CFILE
-		printf("#include \"dbinc_auto/rpc_client_ext.h\"\n") >> CFILE
-		printf("\n") >> CFILE
-
-		printf("#include \"db_config.h\"\n") >> TFILE
-		printf("\n") >> TFILE
-		printf("#ifndef NO_SYSTEM_INCLUDES\n") >> TFILE
-		printf("#include \n") >> TFILE
-		printf("\n") >> TFILE
-		printf("#include \n") >> TFILE
-		printf("#endif\n") >> TFILE
-		printf("#include \"db_int.h\"\n") >> TFILE
-		printf("#include \"dbinc/txn.h\"\n") >> TFILE
-		printf("\n") >> TFILE
-
-		printf("#include \"db_config.h\"\n") >> SFILE
-		printf("\n") >> SFILE
-		printf("#ifndef NO_SYSTEM_INCLUDES\n") >> SFILE
-		printf("#include \n") >> SFILE
-		printf("\n") >> SFILE
-		printf("#include \n") >> SFILE
-		printf("\n") >> SFILE
-		printf("#include \n") >> SFILE
-		printf("#endif\n") >> SFILE
-		printf("\n") >> SFILE
-		printf("#include \"db_server.h\"\n") >> SFILE
-		printf("\n") >> SFILE
-		printf("#include \"db_int.h\"\n") >> SFILE
-		printf("#include \"dbinc/db_server_int.h\"\n") >> SFILE
-		printf("#include \"dbinc_auto/rpc_server_ext.h\"\n") >> SFILE
-		printf("\n") >> SFILE
-
-		printf("#include \"db_config.h\"\n") >> PFILE
-		printf("\n") >> PFILE
-		printf("#ifndef NO_SYSTEM_INCLUDES\n") >> PFILE
-		printf("#include \n") >> PFILE
-		printf("\n") >> PFILE
-		printf("#include \n") >> PFILE
-		printf("\n") >> PFILE
-		printf("#include \n") >> PFILE
-		printf("#endif\n") >> PFILE
-		printf("\n") >> PFILE
-		printf("#include \"db_server.h\"\n") >> PFILE
-		printf("\n") >> PFILE
-		printf("#include \"db_int.h\"\n") >> PFILE
-		printf("#include \"dbinc/db_server_int.h\"\n") >> PFILE
-		printf("\n") >> PFILE
-
-		first = 1;
-	}
-	#
-	# =====================================================
-	# Generate Client Nofunc code first if necessary
-	# NOTE:  This code must be first, because we don't want any
-	# other code other than this function, so before we write
-	# out to the XDR and server files, we just generate this
-	# and move on if this is all we are doing.
-	#
-	if (nofunc_code == 1) {
-		#
-		# First time through, put out the general no server and
-		# illegal functions.
-		#
-		if (first_nofunc == 0) {
-			printf("static int __dbcl_noserver ") >> CFILE
-			printf("__P((DB_ENV *));\n\n") >> CFILE
-			printf("static int\n") >> CFILE
-			printf("__dbcl_noserver(dbenv)\n") >> CFILE
-			printf("\tDB_ENV *dbenv;\n") >> CFILE
-			printf("{\n\t__db_err(dbenv,") >> CFILE
-			printf(" \"No server environment\");\n") >> CFILE
-			printf("\treturn (DB_NOSERVER);\n") >> CFILE
-			printf("}\n\n") >> CFILE
-
-			printf("static int __dbcl_rpc_illegal ") >> CFILE
-			printf("__P((DB_ENV *, char *));\n\n") >> CFILE
-			printf("static int\n") >> CFILE
-			printf("__dbcl_rpc_illegal(dbenv, name)\n") >> CFILE
-			printf("\tDB_ENV *dbenv;\n\tchar *name;\n") >> CFILE
-			printf("{\n\t__db_err(dbenv,") >> CFILE
-			printf(" \"%%s method unsupported in RPC") >> CFILE
-			printf(" environments\", name);\n") >> CFILE
-			printf("\treturn (DB_OPNOTSUP);\n") >> CFILE
-			printf("}\n\n") >> CFILE
-
-			first_nofunc = 1
-		}
-		#
-		# Spit out PUBLIC prototypes.
-		#
-		delete p;
-		pi = 1;
-		p[pi++] = sprintf("int __dbcl_%s __P((", name);
-		p[pi++] = "";
-		for (i = 0; i < nvars; ++i) {
-			p[pi++] = pr_type[i];
-			p[pi++] = ", ";
-		}
-		p[pi - 1] = "";
-		p[pi] = "));";
-		proto_format(p, CFILE);
-
-		#
-		# Spit out function name/args.
-		#
-		printf("int\n") >> CFILE
-		printf("__dbcl_%s(", name) >> CFILE
-		sep = "";
-		for (i = 0; i < nvars; ++i) {
-			printf("%s%s", sep, args[i]) >> CFILE
-			sep = ", ";
-		}
-		printf(")\n") >> CFILE
-
-		for (i = 0; i < nvars; ++i)
-			if (func_arg[i] == 0)
-				printf("\t%s %s;\n", c_type[i], args[i]) \
-				    >> CFILE
-			else
-				printf("\t%s;\n", c_type[i]) >> CFILE
-
-		#
-		# Call error function and return EINVAL
-		#
-		printf("{\n") >> CFILE
-
-		#
-		# If we don't have a local env, set one.
-		#
-		if (env_handle == 0) {
-			printf("\tDB_ENV *dbenv;\n\n") >> CFILE
-			if (db_handle)
-				printf("\tdbenv = %s->dbenv;\n", \
-				    args[db_idx]) >> CFILE
-			else if (dbc_handle)
-				printf("\tdbenv = %s->dbp->dbenv;\n", \
-				    args[dbc_idx]) >> CFILE
-			else if (txn_handle)
-				printf("\tdbenv = %s->mgrp->dbenv;\n", \
-				    args[txn_idx]) >> CFILE
-			else if (mp_handle)
-				printf("\tdbenv = %s->dbenv;\n", \
-				    args[mp_idx]) >> CFILE
-			else
-				printf("\tdbenv = NULL;\n") >> CFILE
-		}
-		#
-		# Quiet the compiler for all variables.
-		#
-		# NOTE:  Index 'i' starts at 1, not 0.  Our first arg is
-		# the handle we need to get to the env, and we do not want
-		# to COMPQUIET that one.
-		for (i = 1; i < nvars; ++i) {
-			if (rpc_type[i] == "CONST" || rpc_type[i] == "DBT" ||
-			    rpc_type[i] == "LIST" || rpc_type[i] == "STRING" ||
-			    rpc_type[i] == "GID") {
-				printf("\tCOMPQUIET(%s, NULL);\n", args[i]) \
-				    >> CFILE
-			}
-			if (rpc_type[i] == "INT" || rpc_type[i] == "IGNORE" ||
-			    rpc_type[i] == "ID") {
-				printf("\tCOMPQUIET(%s, 0);\n", args[i]) \
-				    >> CFILE
-			}
-		}
-
-		if (!env_handle) {
-			printf("\treturn (__dbcl_rpc_illegal(dbenv, ") >> CFILE
-			printf("\"%s\"));\n", name) >> CFILE
-		} else
-			printf("\treturn (__dbcl_rpc_illegal(%s, \"%s\"));\n", \
-			    args[env_idx], name) >> CFILE
-		printf("}\n\n") >> CFILE
-
+	if (link_only)
 		next;
-	}
 
 	#
 	# =====================================================
@@ -716,7 +565,7 @@ END {
 	#
 	for (i = 0; i < nvars; ++i) {
 		if (rpc_type[i] == "ID") {
-			printf("\tlong %scl_id;\n", args[i]) >> PFILE
+			printf("\tunsigned int %scl_id;\n", args[i]) >> PFILE
 		}
 		if (rpc_type[i] == "STRING") {
 			printf("\tchar *%s;\n", args[i]) >> PFILE
@@ -851,8 +700,7 @@ END {
 			    args[txn_idx]) >> CFILE
 		else
 			printf("\tdbenv = NULL;\n") >> CFILE
-		printf("\tif (dbenv == NULL || !RPC_ON(dbenv))\n") \
-		    >> CFILE
+		printf("\tif (dbenv == NULL || !RPC_ON(dbenv))\n") >> CFILE
 		printf("\t\treturn (__dbcl_noserver(NULL));\n") >> CFILE
 	} else {
 		printf("\tif (%s == NULL || !RPC_ON(%s))\n", \
@@ -862,13 +710,8 @@ END {
 	}
 	printf("\n") >> CFILE
 
-	if (!env_handle)
-		printf("\tcl = (CLIENT *)dbenv->cl_handle;\n") >> CFILE
-	else
-		printf("\tcl = (CLIENT *)%s->cl_handle;\n", \
-		    args[env_idx]) >> CFILE
-
-	printf("\n") >> CFILE
+	printf("\tcl = (CLIENT *)%s->cl_handle;\n\n", \
+	    env_handle ? args[env_idx] : "dbenv") >> CFILE
 
 	#
 	# If there is a function arg, check that it is NULL
@@ -891,15 +734,22 @@ END {
 	#
 	for (i = 0; i < nvars; ++i) {
 		if (rpc_type[i] == "ID") {
-			printf("\tif (%s == NULL)\n", args[i]) >> CFILE
-			printf("\t\tmsg.%scl_id = 0;\n\telse\n", \
-			    args[i]) >> CFILE
+			# We don't need to check for a NULL DB_ENV *, because
+			# we already checked for it.  I frankly couldn't care
+			# less, but lint gets all upset at the wasted cycles.
+			if (c_type[i] != "DB_ENV *") {
+				printf("\tif (%s == NULL)\n", args[i]) >> CFILE
+				printf("\t\tmsg.%scl_id = 0;\n\telse\n", \
+				    args[i]) >> CFILE
+				indent = "\t\t";
+			} else
+				indent = "\t";
 			if (c_type[i] == "DB_TXN *") {
-				printf("\t\tmsg.%scl_id = %s->txnid;\n", \
-				    args[i], args[i]) >> CFILE
+				printf("%smsg.%scl_id = %s->txnid;\n", \
+				    indent, args[i], args[i]) >> CFILE
 			} else {
-				printf("\t\tmsg.%scl_id = %s->cl_id;\n", \
-				    args[i], args[i]) >> CFILE
+				printf("%smsg.%scl_id = %s->cl_id;\n", \
+				    indent, args[i], args[i]) >> CFILE
 			}
 		}
 		if (rpc_type[i] == "GID") {
@@ -1129,6 +979,177 @@ END {
 	}
 }
 
+function general_headers()
+{
+	printf("#include \"db_config.h\"\n") >> CFILE
+	printf("\n") >> CFILE
+	printf("#ifndef NO_SYSTEM_INCLUDES\n") >> CFILE
+	printf("#include \n") >> CFILE
+	printf("\n") >> CFILE
+	printf("#include \n") >> CFILE
+	printf("\n") >> CFILE
+	printf("#include \n") >> CFILE
+	printf("#endif\n") >> CFILE
+	printf("\n") >> CFILE
+	printf("#include \"db_server.h\"\n") >> CFILE
+	printf("\n") >> CFILE
+	printf("#include \"db_int.h\"\n") >> CFILE
+	printf("#include \"dbinc/txn.h\"\n") >> CFILE
+	printf("#include \"dbinc_auto/rpc_client_ext.h\"\n") >> CFILE
+	printf("\n") >> CFILE
+
+	printf("#include \"db_config.h\"\n") >> TFILE
+	printf("\n") >> TFILE
+	printf("#ifndef NO_SYSTEM_INCLUDES\n") >> TFILE
+	printf("#include \n") >> TFILE
+	printf("\n") >> TFILE
+	printf("#include \n") >> TFILE
+	printf("#endif\n") >> TFILE
+	printf("#include \"db_int.h\"\n") >> TFILE
+	printf("#include \"dbinc/txn.h\"\n") >> TFILE
+	printf("\n") >> TFILE
+
+	printf("#include \"db_config.h\"\n") >> SFILE
+	printf("\n") >> SFILE
+	printf("#ifndef NO_SYSTEM_INCLUDES\n") >> SFILE
+	printf("#include \n") >> SFILE
+	printf("\n") >> SFILE
+	printf("#include \n") >> SFILE
+	printf("\n") >> SFILE
+	printf("#include \n") >> SFILE
+	printf("#endif\n") >> SFILE
+	printf("\n") >> SFILE
+	printf("#include \"db_server.h\"\n") >> SFILE
+	printf("\n") >> SFILE
+	printf("#include \"db_int.h\"\n") >> SFILE
+	printf("#include \"dbinc/db_server_int.h\"\n") >> SFILE
+	printf("#include \"dbinc_auto/rpc_server_ext.h\"\n") >> SFILE
+	printf("\n") >> SFILE
+
+	printf("#include \"db_config.h\"\n") >> PFILE
+	printf("\n") >> PFILE
+	printf("#ifndef NO_SYSTEM_INCLUDES\n") >> PFILE
+	printf("#include \n") >> PFILE
+	printf("\n") >> PFILE
+	printf("#include \n") >> PFILE
+	printf("\n") >> PFILE
+	printf("#include \n") >> PFILE
+	printf("#endif\n") >> PFILE
+	printf("\n") >> PFILE
+	printf("#include \"db_server.h\"\n") >> PFILE
+	printf("\n") >> PFILE
+	printf("#include \"db_int.h\"\n") >> PFILE
+	printf("#include \"dbinc/db_server_int.h\"\n") >> PFILE
+	printf("\n") >> PFILE
+}
+
+#
+# illegal_functions --
+#	Output general illegal-call functions
+function illegal_functions(OUTPUT)
+{
+	printf("static int __dbcl_dbp_illegal __P((DB *));\n") >> OUTPUT
+	printf("static int __dbcl_noserver __P((DB_ENV *));\n") >> OUTPUT
+	printf("static int __dbcl_txn_illegal __P((DB_TXN *));\n") >> OUTPUT
+	printf("\n") >> OUTPUT
+
+	printf("static int\n") >> OUTPUT
+	printf("__dbcl_noserver(dbenv)\n") >> OUTPUT
+	printf("\tDB_ENV *dbenv;\n") >> OUTPUT
+	printf("{\n\t__db_err(dbenv,") >> OUTPUT
+	printf(" \"No Berkeley DB RPC server environment\");\n") >> OUTPUT
+	printf("\treturn (DB_NOSERVER);\n") >> OUTPUT
+	printf("}\n\n") >> OUTPUT
+
+	printf("/*\n") >> OUTPUT
+	printf(" * __dbcl_dbenv_illegal --\n") >> OUTPUT
+	printf(" *	DB_ENV method not supported under RPC.\n") >> OUTPUT
+	printf(" *\n") >> OUTPUT
+	printf(" * PUBLIC: int __dbcl_dbenv_illegal __P((DB_ENV *));\n")\
+	    >> OUTPUT
+	printf(" */\n") >> OUTPUT
+	printf("int\n") >> OUTPUT
+	printf("__dbcl_dbenv_illegal(dbenv)\n") >> OUTPUT
+	printf("\tDB_ENV *dbenv;\n") >> OUTPUT
+	printf("{\n\t__db_err(dbenv,") >> OUTPUT
+	printf("\n\t    \"Interface not supported by ") >> OUTPUT
+	printf("Berkeley DB RPC client environments\");\n") >> OUTPUT
+	printf("\treturn (DB_OPNOTSUP);\n") >> OUTPUT
+	printf("}\n\n") >> OUTPUT
+	printf("/*\n") >> OUTPUT
+	printf(" * __dbcl_dbp_illegal --\n") >> OUTPUT
+	printf(" *	DB method not supported under RPC.\n") >> OUTPUT
+	printf(" */\n") >> OUTPUT
+	printf("static int\n") >> OUTPUT
+	printf("__dbcl_dbp_illegal(dbp)\n") >> OUTPUT
+	printf("\tDB *dbp;\n") >> OUTPUT
+	printf("{\n\treturn (__dbcl_dbenv_illegal(dbp->dbenv));\n") >> OUTPUT
+	printf("}\n\n") >> OUTPUT
+	printf("/*\n") >> OUTPUT
+	printf(" * __dbcl_txn_illegal --\n") >> OUTPUT
+	printf(" *	DB_TXN method not supported under RPC.\n") >> OUTPUT
+	printf(" */\n") >> OUTPUT
+	printf("static int\n__dbcl_txn_illegal(txn)\n") >> OUTPUT
+	printf("\tDB_TXN *txn;\n") >> OUTPUT
+	printf("{\n\treturn (__dbcl_dbenv_illegal(txn->mgrp->dbenv));\n")\
+	    >> OUTPUT
+	printf("}\n\n") >> OUTPUT
+}
+
+function obj_func(v, l)
+{
+	# Ignore db_create -- there's got to be something cleaner, but I
+	# don't want to rewrite rpc.src right now.
+	if (name == "db_create")
+		return;
+	if (name == "env_create")
+		return;
+
+	# Strip off the leading prefix for the method name -- there's got to
+	# be something cleaner, but I don't want to rewrite rpc.src right now.
+	len = length(name);
+	i = index(name, "_");
+	l[obj_indx] = sprintf("\t%s->%s = __dbcl_%s;",
+	    v, substr(name, i + 1, len - i), name);
+}
+
+function obj_illegal(l, handle, method, proto)
+{
+	# All of the functions return an int, with one exception.  Hack
+	# to make that work.
+	type = method == "db_get_mpf" ? "DB_MPOOLFILE *" : "int"
+
+	# Strip off the leading prefix for the method name -- there's got to
+	# be something cleaner, but I don't want to rewrite rpc.src right now.
+	len = length(method);
+	i = index(method, "_");
+
+	l[obj_indx] =\
+	    sprintf("\t%s->%s =\n\t    (%s (*)(",\
+	    handle, substr(method, i + 1, len - i), type)\
+	    proto\
+	    sprintf("))\n\t    __dbcl_%s_illegal;", handle);
+}
+
+function obj_init(obj, v, list, OUTPUT) {
+	printf("/*\n") >> OUTPUT
+	printf(" * __dbcl_%s_init --\n", v) >> OUTPUT
+	printf(" *\tInitialize %s handle methods.\n", obj) >> OUTPUT
+	printf(" *\n") >> OUTPUT
+	printf(\
+	    " * PUBLIC: void __dbcl_%s_init __P((%s *));\n", v, obj) >> OUTPUT
+	printf(" */\n") >> OUTPUT
+	printf("void\n") >> OUTPUT
+	printf("__dbcl_%s_init(%s)\n", v, v) >> OUTPUT
+	printf("\t%s *%s;\n", obj, v) >> OUTPUT
+	printf("{\n") >> OUTPUT
+	for (i = 1; i < obj_indx; ++i) {
+		if (i in list)
+			print list[i] >> OUTPUT
+	}
+	printf("\treturn;\n}\n\n") >> OUTPUT
+}
+
 #
 # split_lines --
 #	Add line separators to pretty-print the output.
diff --git a/storage/bdb/dist/ltmain.sh b/storage/bdb/dist/ltmain.sh
index c96a96ddd38..8915481a41b 100644
--- a/storage/bdb/dist/ltmain.sh
+++ b/storage/bdb/dist/ltmain.sh
@@ -1,7 +1,7 @@
 # ltmain.sh - Provide generalized library-building support services.
 # NOTE: Changing this file will not affect anything until you rerun configure.
 #
-# Copyright (C) 1996, 1997, 1998, 1999, 2000, 2001, 2003, 2004
+# Copyright (C) 1996, 1997, 1998, 1999, 2000, 2001, 2003, 2004, 2005
 # Free Software Foundation, Inc.
 # Originally by Gordon Matzigkeit , 1996
 #
@@ -17,7 +17,7 @@
 #
 # You should have received a copy of the GNU General Public License
 # along with this program; if not, write to the Free Software
-# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
 #
 # As a special exception to the GNU General Public License, if you
 # distribute this file as part of a program that contains a
@@ -43,8 +43,8 @@ EXIT_FAILURE=1
 
 PROGRAM=ltmain.sh
 PACKAGE=libtool
-VERSION=1.5.8
-TIMESTAMP=" (1.1220.2.117 2004/08/04 14:12:05)"
+VERSION=1.5.20
+TIMESTAMP=" (1.1220.2.287 2005/08/31 18:54:15)"
 
 # See if we are running on zsh, and set the options which allow our
 # commands through without removal of \ escapes.
@@ -88,14 +88,15 @@ rm="rm -f"
 Xsed="${SED}"' -e 1s/^X//'
 sed_quote_subst='s/\([\\`\\"$\\\\]\)/\\\1/g'
 # test EBCDIC or ASCII
-case `echo A|tr A '\301'` in
- A) # EBCDIC based system
-  SP2NL="tr '\100' '\n'"
-  NL2SP="tr '\r\n' '\100\100'"
+case `echo X|tr X '\101'` in
+ A) # ASCII based system
+    # \n is not interpreted correctly by Solaris 8 /usr/ucb/tr
+  SP2NL='tr \040 \012'
+  NL2SP='tr \015\012 \040\040'
   ;;
- *) # Assume ASCII based system
-  SP2NL="tr '\040' '\012'"
-  NL2SP="tr '\015\012' '\040\040'"
+ *) # EBCDIC based system
+  SP2NL='tr \100 \n'
+  NL2SP='tr \r\n \100\100'
   ;;
 esac
 
@@ -112,8 +113,9 @@ if test "${LANG+set}" = set; then
 fi
 
 # Make sure IFS has a sensible default
-: ${IFS=" 	
-"}
+lt_nl='
+'
+IFS=" 	$lt_nl"
 
 if test "$build_libtool_libs" != yes && test "$build_old_libs" != yes; then
   $echo "$modename: not configured to build any kind of library" 1>&2
@@ -143,7 +145,8 @@ o2lo="s/\\.${objext}\$/.lo/"
 # Need a lot of goo to handle *both* DLLs and import libs
 # Has to be a shell function in order to 'eat' the argument
 # that is supplied when $file_magic_command is called.
-func_win32_libid () {
+func_win32_libid ()
+{
   win32_libid_type="unknown"
   win32_fileres=`file -L $1 2>/dev/null`
   case $win32_fileres in
@@ -183,7 +186,8 @@ func_win32_libid () {
 # Only attempt this if the compiler in the base compile
 # command doesn't match the default compiler.
 # arg is usually of the form 'gcc ...'
-func_infer_tag () {
+func_infer_tag ()
+{
     if test -n "$available_tags" && test -z "$tagname"; then
       CC_quoted=
       for arg in $CC; do
@@ -242,8 +246,25 @@ func_infer_tag () {
 }
 
 
+# func_extract_an_archive dir oldlib
+func_extract_an_archive ()
+{
+    f_ex_an_ar_dir="$1"; shift
+    f_ex_an_ar_oldlib="$1"
+
+    $show "(cd $f_ex_an_ar_dir && $AR x $f_ex_an_ar_oldlib)"
+    $run eval "(cd \$f_ex_an_ar_dir && $AR x \$f_ex_an_ar_oldlib)" || exit $?
+    if ($AR t "$f_ex_an_ar_oldlib" | sort | sort -uc >/dev/null 2>&1); then
+     :
+    else
+      $echo "$modename: ERROR: object name conflicts: $f_ex_an_ar_dir/$f_ex_an_ar_oldlib" 1>&2
+      exit $EXIT_FAILURE
+    fi
+}
+
 # func_extract_archives gentop oldlib ...
-func_extract_archives () {
+func_extract_archives ()
+{
     my_gentop="$1"; shift
     my_oldlibs=${1+"$@"}
     my_oldobjs=""
@@ -287,7 +308,7 @@ func_extract_archives () {
 	  cd $my_xdir || exit $?
 	  darwin_archive=$my_xabs
 	  darwin_curdir=`pwd`
-	  darwin_base_archive=`basename $darwin_archive`
+	  darwin_base_archive=`$echo "X$darwin_archive" | $Xsed -e 's%^.*/%%'`
 	  darwin_arches=`lipo -info "$darwin_archive" 2>/dev/null | $EGREP Architectures 2>/dev/null`
 	  if test -n "$darwin_arches"; then 
 	    darwin_arches=`echo "$darwin_arches" | $SED -e 's/.*are://'`
@@ -296,64 +317,33 @@ func_extract_archives () {
 	    for darwin_arch in  $darwin_arches ; do
 	      mkdir -p "unfat-$$/${darwin_base_archive}-${darwin_arch}"
 	      lipo -thin $darwin_arch -output "unfat-$$/${darwin_base_archive}-${darwin_arch}/${darwin_base_archive}" "${darwin_archive}"
-	      # Remove the table of contents from the thin files.
-	      $AR -d "unfat-$$/${darwin_base_archive}-${darwin_arch}/${darwin_base_archive}" __.SYMDEF 2>/dev/null || true
-	      $AR -d "unfat-$$/${darwin_base_archive}-${darwin_arch}/${darwin_base_archive}" __.SYMDEF\ SORTED 2>/dev/null || true
 	      cd "unfat-$$/${darwin_base_archive}-${darwin_arch}"
-	      $AR -xo "${darwin_base_archive}"
-	      rm "${darwin_base_archive}"
+	      func_extract_an_archive "`pwd`" "${darwin_base_archive}"
 	      cd "$darwin_curdir"
+	      $rm "unfat-$$/${darwin_base_archive}-${darwin_arch}/${darwin_base_archive}"
 	    done # $darwin_arches
       ## Okay now we have a bunch of thin objects, gotta fatten them up :)
-	    darwin_filelist=`find unfat-$$ -type f | xargs basename | sort -u | $NL2SP`
+	    darwin_filelist=`find unfat-$$ -type f -name \*.o -print -o -name \*.lo -print| xargs basename | sort -u | $NL2SP`
 	    darwin_file=
 	    darwin_files=
 	    for darwin_file in $darwin_filelist; do
 	      darwin_files=`find unfat-$$ -name $darwin_file -print | $NL2SP`
 	      lipo -create -output "$darwin_file" $darwin_files
 	    done # $darwin_filelist
-	    rm -rf unfat-$$
+	    ${rm}r unfat-$$
 	    cd "$darwin_orig_dir"
 	  else
-	    cd $darwin_orig_dir
-	    (cd $my_xdir && $AR x $my_xabs) || exit $?
+	    cd "$darwin_orig_dir"
+ 	    func_extract_an_archive "$my_xdir" "$my_xabs"
 	  fi # $darwin_arches
 	fi # $run
-      ;;
-      *)
-	# We will extract separately just the conflicting names and we will
-	# no longer touch any unique names. It is faster to leave these
-	# extract automatically by $AR in one run.
-	$show "(cd $my_xdir && $AR x $my_xabs)"
-	$run eval "(cd \$my_xdir && $AR x \$my_xabs)" || exit $?
-	if ($AR t "$my_xabs" | sort | sort -uc >/dev/null 2>&1); then
-	  :
-	else
-	  $echo "$modename: warning: object name conflicts; renaming object files" 1>&2
-	  $echo "$modename: warning: to ensure that they will not overwrite" 1>&2
-	  $AR t "$my_xabs" | sort | uniq -cd | while read -r count name
-	  do
-	    i=1
-	    while test "$i" -le "$count"
-	    do
-	      # Put our $i before any first dot (extension)
-	      # Never overwrite any file
-	      name_to="$name"
-	      while test "X$name_to" = "X$name" || test -f "$my_xdir/$name_to"
-	      do
-		name_to=`$echo "X$name_to" | $Xsed -e "s/\([^.]*\)/\1-$i/"`
-	      done
-	      $show "(cd $my_xdir && $AR xN $i $my_xabs '$name' && $mv '$name' '$name_to')"
-	      $run eval "(cd \$my_xdir && $AR xN $i \$my_xabs '$name' && $mv '$name' '$name_to')" || exit $?
-	      i=`expr $i + 1`
-	    done
-	  done
-	fi
 	;;
+      *)
+        func_extract_an_archive "$my_xdir" "$my_xabs"
+        ;;
       esac
       my_oldobjs="$my_oldobjs "`find $my_xdir -name \*.$objext -print -o -name \*.lo -print | $NL2SP`
     done
-
     func_extract_archives_result="$my_oldobjs"
 }
 # End of Shell function definitions
@@ -426,10 +416,10 @@ do
   --version)
     $echo "$PROGRAM (GNU $PACKAGE) $VERSION$TIMESTAMP"
     $echo
-    $echo "Copyright (C) 2003  Free Software Foundation, Inc."
+    $echo "Copyright (C) 2005  Free Software Foundation, Inc."
     $echo "This is free software; see the source for copying conditions.  There is NO"
     $echo "warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE."
-    exit $EXIT_SUCCESS
+    exit $?
     ;;
 
   --config)
@@ -438,7 +428,7 @@ do
     for tagname in $taglist; do
       ${SED} -n -e "/^# ### BEGIN LIBTOOL TAG CONFIG: $tagname$/,/^# ### END LIBTOOL TAG CONFIG: $tagname$/p" < "$progpath"
     done
-    exit $EXIT_SUCCESS
+    exit $?
     ;;
 
   --debug)
@@ -463,7 +453,7 @@ do
     else
       $echo "disable static libraries"
     fi
-    exit $EXIT_SUCCESS
+    exit $?
     ;;
 
   --finish) mode="finish" ;;
@@ -520,7 +510,7 @@ if test -z "$show_help"; then
   # Infer the operation mode.
   if test -z "$mode"; then
     $echo "*** Warning: inferring the mode of operation is deprecated." 1>&2
-    $echo "*** Future versions of Libtool will require -mode=MODE be specified." 1>&2
+    $echo "*** Future versions of Libtool will require --mode=MODE be specified." 1>&2
     case $nonopt in
     *cc | cc* | *++ | gcc* | *-gcc* | g++* | xlc*)
       mode=link
@@ -586,7 +576,7 @@ if test -z "$show_help"; then
 
     for arg
     do
-      case "$arg_mode" in
+      case $arg_mode in
       arg  )
 	# do not "continue".  Instead, add this to base_compile
 	lastarg="$arg"
@@ -668,7 +658,10 @@ if test -z "$show_help"; then
       case $lastarg in
       # Double-quote args containing other shell metacharacters.
       # Many Bourne shells cannot handle close brackets correctly
-      # in scan sets, so we specify it separately.
+      # in scan sets, and some SunOS ksh mistreat backslash-escaping
+      # in scan sets (worked around with variable expansion),
+      # and furthermore cannot handle '|' '&' '(' ')' in scan sets 
+      # at all, so we specify them separately.
       *[\[\~\#\^\&\*\(\)\{\}\|\;\<\>\?\'\ \	]*|*]*|"")
 	lastarg="\"$lastarg\""
 	;;
@@ -742,6 +735,14 @@ if test -z "$show_help"; then
       esac
     done
 
+    qlibobj=`$echo "X$libobj" | $Xsed -e "$sed_quote_subst"`
+    case $qlibobj in
+      *[\[\~\#\^\&\*\(\)\{\}\|\;\<\>\?\'\ \	]*|*]*|"")
+	qlibobj="\"$qlibobj\"" ;;
+    esac
+    test "X$libobj" != "X$qlibobj" \
+	&& $echo "X$libobj" | grep '[]~#^*{};<>?"'"'"' 	&()|`$[]' \
+	&& $echo "$modename: libobj name \`$libobj' may not contain shell special characters."
     objname=`$echo "X$obj" | $Xsed -e 's%^.*/%%'`
     xdir=`$echo "X$obj" | $Xsed -e 's%/[^/]*$%%'`
     if test "X$xdir" = "X$obj"; then
@@ -814,12 +815,17 @@ compiler."
 	$run $rm $removelist
 	exit $EXIT_FAILURE
       fi
-      $echo $srcfile > "$lockfile"
+      $echo "$srcfile" > "$lockfile"
     fi
 
     if test -n "$fix_srcfile_path"; then
       eval srcfile=\"$fix_srcfile_path\"
     fi
+    qsrcfile=`$echo "X$srcfile" | $Xsed -e "$sed_quote_subst"`
+    case $qsrcfile in
+      *[\[\~\#\^\&\*\(\)\{\}\|\;\<\>\?\'\ \	]*|*]*|"")
+      qsrcfile="\"$qsrcfile\"" ;;
+    esac
 
     $run $rm "$libobj" "${libobj}T"
 
@@ -841,10 +847,10 @@ EOF
       fbsd_hideous_sh_bug=$base_compile
 
       if test "$pic_mode" != no; then
-	command="$base_compile $srcfile $pic_flag"
+	command="$base_compile $qsrcfile $pic_flag"
       else
 	# Don't build PIC code
-	command="$base_compile $srcfile"
+	command="$base_compile $qsrcfile"
       fi
 
       if test ! -d "${xdir}$objdir"; then
@@ -924,9 +930,9 @@ EOF
     if test "$build_old_libs" = yes; then
       if test "$pic_mode" != yes; then
 	# Don't build PIC code
-	command="$base_compile $srcfile"
+	command="$base_compile $qsrcfile"
       else
-	command="$base_compile $srcfile $pic_flag"
+	command="$base_compile $qsrcfile $pic_flag"
       fi
       if test "$compiler_c_o" = yes; then
 	command="$command -o $obj"
@@ -1348,6 +1354,13 @@ EOF
 	  prev=
 	  continue
 	  ;;
+        darwin_framework)
+	  compiler_flags="$compiler_flags $arg"
+	  compile_command="$compile_command $arg"
+	  finalize_command="$finalize_command $arg"
+	  prev=
+	  continue
+	  ;;
 	*)
 	  eval "$prev=\"\$arg\""
 	  prev=
@@ -1406,6 +1419,14 @@ EOF
 	continue
 	;;
 
+      -framework|-arch)
+        prev=darwin_framework
+        compiler_flags="$compiler_flags $arg"
+	compile_command="$compile_command $arg"
+	finalize_command="$finalize_command $arg"
+        continue
+        ;;
+
       -inst-prefix-dir)
 	prev=inst_prefix
 	continue
@@ -1466,7 +1487,7 @@ EOF
 	    # These systems don't actually have a C library (as such)
 	    test "X$arg" = "X-lc" && continue
 	    ;;
-	  *-*-openbsd* | *-*-freebsd*)
+	  *-*-openbsd* | *-*-freebsd* | *-*-dragonfly*)
 	    # Do not include libc due to us having libc/libc_r.
 	    test "X$arg" = "X-lc" && continue
 	    ;;
@@ -1477,7 +1498,7 @@ EOF
 	  esac
 	elif test "X$arg" = "X-lc_r"; then
 	 case $host in
-	 *-*-openbsd* | *-*-freebsd*)
+	 *-*-openbsd* | *-*-freebsd* | *-*-dragonfly*)
 	   # Do not include libc_r directly, use -pthread flag.
 	   continue
 	   ;;
@@ -1487,8 +1508,20 @@ EOF
 	continue
 	;;
 
+      # Tru64 UNIX uses -model [arg] to determine the layout of C++
+      # classes, name mangling, and exception handling.
+      -model)
+	compile_command="$compile_command $arg"
+	compiler_flags="$compiler_flags $arg"
+	finalize_command="$finalize_command $arg"
+	prev=xcompiler
+	continue
+	;;
+
      -mt|-mthreads|-kthread|-Kthread|-pthread|-pthreads|--thread-safe)
-	deplibs="$deplibs $arg"
+	compiler_flags="$compiler_flags $arg"
+	compile_command="$compile_command $arg"
+	finalize_command="$finalize_command $arg"
 	continue
 	;;
 
@@ -1497,13 +1530,18 @@ EOF
 	continue
 	;;
 
-      # gcc -m* arguments should be passed to the linker via $compiler_flags
-      # in order to pass architecture information to the linker
-      # (e.g. 32 vs 64-bit).  This may also be accomplished via -Wl,-mfoo
-      # but this is not reliable with gcc because gcc may use -mfoo to
-      # select a different linker, different libraries, etc, while
-      # -Wl,-mfoo simply passes -mfoo to the linker.
-      -m*)
+      ################################################################
+      #### Local edit for Sleepycat SR #8705
+      #### Some cases separated below.
+      ################################################################
+      # -64, -mips[0-9] enable 64-bit mode on the SGI compiler
+      # -r[0-9][0-9]* specifies the processor on the SGI compiler
+      # -xarch=*, -xtarget=* enable 64-bit mode on the Sun compiler
+      # +DA*, +DD* enable 64-bit mode on the HP compiler
+      # -q* pass through compiler args for the IBM compiler
+      # -m* pass through architecture-specific compiler args for GCC
+      -r[0-9][0-9]*|-xtarget=*|+DA*|+DD*|-q*|-m*)
+
 	# Unknown arguments in both finalize_command and compile_command need
 	# to be aesthetically quoted because they are evaled later.
 	arg=`$echo "X$arg" | $Xsed -e "$sed_quote_subst"`
@@ -1525,6 +1563,8 @@ EOF
       #### This case was given to us by Albert Chin, and we expect
       #### this to be included in future versions of libtool,
       #### though we must verify that before upgrading.
+      #### Note that libtool 1.5.20 at least, incorporates similar
+      #### code, but it got refactored incorrectly.
       ################################################################
       # Flags for IRIX and Solaris compiler
       -64|-mips[0-9]|-xarch=*)
@@ -1998,7 +2038,7 @@ EOF
 	    compile_deplibs="$deplib $compile_deplibs"
 	    finalize_deplibs="$deplib $finalize_deplibs"
 	  else
-	    deplibs="$deplib $deplibs"
+	    compiler_flags="$compiler_flags $deplib"
 	  fi
 	  continue
 	  ;;
@@ -2007,10 +2047,6 @@ EOF
 	    $echo "$modename: warning: \`-l' is ignored for archives/objects" 1>&2
 	    continue
 	  fi
-	  if test "$pass" = conv; then
-	    deplibs="$deplib $deplibs"
-	    continue
-	  fi
 	  name=`$echo "X$deplib" | $Xsed -e 's/^-l//'`
 	  for searchdir in $newlib_search_path $lib_search_path $sys_lib_search_path $shlib_search_path; do
 	    for search_ext in .la $std_shrext .so .a; do
@@ -2210,6 +2246,8 @@ EOF
 	# it will not redefine variables installed, or shouldnotlink
 	installed=yes
 	shouldnotlink=no
+	avoidtemprpath=
+
 
 	# Read the .la file
 	case $lib in
@@ -2308,6 +2346,7 @@ EOF
 	    dir="$libdir"
 	    absdir="$libdir"
 	  fi
+	  test "X$hardcode_automatic" = Xyes && avoidtemprpath=yes
 	else
 	  if test ! -f "$ladir/$objdir/$linklib" && test -f "$abs_ladir/$linklib"; then
 	    dir="$ladir"
@@ -2392,12 +2431,12 @@ EOF
 	  if test -n "$library_names" &&
 	     { test "$prefer_static_libs" = no || test -z "$old_library"; }; then
 	    # We need to hardcode the library path
-	    if test -n "$shlibpath_var"; then
+	    if test -n "$shlibpath_var" && test -z "$avoidtemprpath" ; then
 	      # Make sure the rpath contains only unique directories.
 	      case "$temp_rpath " in
 	      *" $dir "*) ;;
 	      *" $absdir "*) ;;
-	      *) temp_rpath="$temp_rpath $dir" ;;
+	      *) temp_rpath="$temp_rpath $absdir" ;;
 	      esac
 	    fi
 
@@ -2583,7 +2622,7 @@ EOF
 		add_dir="-L$dir"
 		# Try looking first in the location we're being installed to.
 		if test -n "$inst_prefix_dir"; then
-		  case "$libdir" in
+		  case $libdir in
 		    [\\/]*)
 		      add_dir="$add_dir -L$inst_prefix_dir$libdir"
 		      ;;
@@ -2656,7 +2695,7 @@ EOF
 	      add_dir="-L$libdir"
 	      # Try looking first in the location we're being installed to.
 	      if test -n "$inst_prefix_dir"; then
-		case "$libdir" in
+		case $libdir in
 		  [\\/]*)
 		    add_dir="$add_dir -L$inst_prefix_dir$libdir"
 		    ;;
@@ -2717,8 +2756,6 @@ EOF
 	      fi
 	    fi
 	  else
-	    convenience="$convenience $dir/$old_library"
-	    old_convenience="$old_convenience $dir/$old_library"
 	    deplibs="$dir/$old_library $deplibs"
 	    link_static=yes
 	  fi
@@ -2836,12 +2873,12 @@ EOF
 	      *) continue ;;
 	      esac
 	      case " $deplibs " in
-	      *" $depdepl "*) ;;
-	      *) deplibs="$depdepl $deplibs" ;;
+	      *" $path "*) ;;
+	      *) deplibs="$path $deplibs" ;;
 	      esac
 	      case " $deplibs " in
-	      *" $path "*) ;;
-	      *) deplibs="$deplibs $path" ;;
+	      *" $depdepl "*) ;;
+	      *) deplibs="$depdepl $deplibs" ;;
 	      esac
 	    done
 	  fi # link_all_deplibs != no
@@ -3106,27 +3143,27 @@ EOF
 
 	# Check that each of the things are valid numbers.
 	case $current in
-	0 | [1-9] | [1-9][0-9] | [1-9][0-9][0-9]) ;;
+	0|[1-9]|[1-9][0-9]|[1-9][0-9][0-9]|[1-9][0-9][0-9][0-9]|[1-9][0-9][0-9][0-9][0-9]) ;;
 	*)
-	  $echo "$modename: CURRENT \`$current' is not a nonnegative integer" 1>&2
+	  $echo "$modename: CURRENT \`$current' must be a nonnegative integer" 1>&2
 	  $echo "$modename: \`$vinfo' is not valid version information" 1>&2
 	  exit $EXIT_FAILURE
 	  ;;
 	esac
 
 	case $revision in
-	0 | [1-9] | [1-9][0-9] | [1-9][0-9][0-9]) ;;
+	0|[1-9]|[1-9][0-9]|[1-9][0-9][0-9]|[1-9][0-9][0-9][0-9]|[1-9][0-9][0-9][0-9][0-9]) ;;
 	*)
-	  $echo "$modename: REVISION \`$revision' is not a nonnegative integer" 1>&2
+	  $echo "$modename: REVISION \`$revision' must be a nonnegative integer" 1>&2
 	  $echo "$modename: \`$vinfo' is not valid version information" 1>&2
 	  exit $EXIT_FAILURE
 	  ;;
 	esac
 
 	case $age in
-	0 | [1-9] | [1-9][0-9] | [1-9][0-9][0-9]) ;;
+	0|[1-9]|[1-9][0-9]|[1-9][0-9][0-9]|[1-9][0-9][0-9][0-9]|[1-9][0-9][0-9][0-9][0-9]) ;;
 	*)
-	  $echo "$modename: AGE \`$age' is not a nonnegative integer" 1>&2
+	  $echo "$modename: AGE \`$age' must be a nonnegative integer" 1>&2
 	  $echo "$modename: \`$vinfo' is not valid version information" 1>&2
 	  exit $EXIT_FAILURE
 	  ;;
@@ -3358,7 +3395,7 @@ EOF
 	  *-*-netbsd*)
 	    # Don't link with libc until the a.out ld.so is fixed.
 	    ;;
-	  *-*-openbsd* | *-*-freebsd*)
+	  *-*-openbsd* | *-*-freebsd* | *-*-dragonfly*)
 	    # Do not include libc due to us having libc/libc_r.
 	    test "X$arg" = "X-lc" && continue
 	    ;;
@@ -3408,7 +3445,7 @@ EOF
 	  if test "$?" -eq 0 ; then
 	    ldd_output=`ldd conftest`
 	    for i in $deplibs; do
-	      name="`expr $i : '-l\(.*\)'`"
+	      name=`expr $i : '-l\(.*\)'`
 	      # If $name is empty we are operating on a -L argument.
               if test "$name" != "" && test "$name" -ne "0"; then
 		if test "X$allow_libtool_libs_with_static_runtimes" = "Xyes" ; then
@@ -3445,7 +3482,7 @@ EOF
 	    # Error occurred in the first compile.  Let's try to salvage
 	    # the situation: Compile a separate program for each library.
 	    for i in $deplibs; do
-	      name="`expr $i : '-l\(.*\)'`"
+	      name=`expr $i : '-l\(.*\)'`
 	      # If $name is empty we are operating on a -L argument.
               if test "$name" != "" && test "$name" != "0"; then
 		$rm conftest
@@ -3497,7 +3534,7 @@ EOF
 	  set dummy $deplibs_check_method
 	  file_magic_regex=`expr "$deplibs_check_method" : "$2 \(.*\)"`
 	  for a_deplib in $deplibs; do
-	    name="`expr $a_deplib : '-l\(.*\)'`"
+	    name=`expr $a_deplib : '-l\(.*\)'`
 	    # If $name is empty we are operating on a -L argument.
             if test "$name" != "" && test  "$name" != "0"; then
 	      if test "X$allow_libtool_libs_with_static_runtimes" = "Xyes" ; then
@@ -3566,7 +3603,7 @@ EOF
 	  set dummy $deplibs_check_method
 	  match_pattern_regex=`expr "$deplibs_check_method" : "$2 \(.*\)"`
 	  for a_deplib in $deplibs; do
-	    name="`expr $a_deplib : '-l\(.*\)'`"
+	    name=`expr $a_deplib : '-l\(.*\)'`
 	    # If $name is empty we are operating on a -L argument.
 	    if test -n "$name" && test "$name" != "0"; then
 	      if test "X$allow_libtool_libs_with_static_runtimes" = "Xyes" ; then
@@ -3807,6 +3844,9 @@ EOF
 	        # The command line is too long to execute in one step.
 	        $show "using reloadable object file for export list..."
 	        skipped_export=:
+		# Break out early, otherwise skipped_export may be
+		# set to false by a later but shorter cmd.
+		break
 	      fi
 	    done
 	    IFS="$save_ifs"
@@ -3876,7 +3916,8 @@ EOF
 	  fi
 	fi
 
-	if test "X$skipped_export" != "X:" && len=`expr "X$test_cmds" : ".*"` &&
+	if test "X$skipped_export" != "X:" &&
+	   len=`expr "X$test_cmds" : ".*" 2>/dev/null` &&
 	   test "$len" -le "$max_cmd_len" || test "$max_cmd_len" -le -1; then
 	  :
 	else
@@ -3895,6 +3936,7 @@ EOF
 	    save_libobjs=$libobjs
 	  fi
 	  save_output=$output
+	  output_la=`$echo "X$output" | $Xsed -e "$basename"`
 
 	  # Clear the reloadable object creation command queue and
 	  # initialize k to one.
@@ -3904,13 +3946,13 @@ EOF
 	  delfiles=
 	  last_robj=
 	  k=1
-	  output=$output_objdir/$save_output-${k}.$objext
+	  output=$output_objdir/$output_la-${k}.$objext
 	  # Loop over the list of objects to be linked.
 	  for obj in $save_libobjs
 	  do
 	    eval test_cmds=\"$reload_cmds $objlist $last_robj\"
 	    if test "X$objlist" = X ||
-	       { len=`expr "X$test_cmds" : ".*"` &&
+	       { len=`expr "X$test_cmds" : ".*" 2>/dev/null` &&
 		 test "$len" -le "$max_cmd_len"; }; then
 	      objlist="$objlist $obj"
 	    else
@@ -3924,9 +3966,9 @@ EOF
 		# the last one created.
 		eval concat_cmds=\"\$concat_cmds~$reload_cmds $objlist $last_robj\"
 	      fi
-	      last_robj=$output_objdir/$save_output-${k}.$objext
+	      last_robj=$output_objdir/$output_la-${k}.$objext
 	      k=`expr $k + 1`
-	      output=$output_objdir/$save_output-${k}.$objext
+	      output=$output_objdir/$output_la-${k}.$objext
 	      objlist=$obj
 	      len=1
 	    fi
@@ -3946,13 +3988,13 @@ EOF
 	    eval concat_cmds=\"\$concat_cmds~$export_symbols_cmds\"
           fi
 
-	  # Set up a command to remove the reloadale object files
+	  # Set up a command to remove the reloadable object files
 	  # after they are used.
 	  i=0
 	  while test "$i" -lt "$k"
 	  do
 	    i=`expr $i + 1`
-	    delfiles="$delfiles $output_objdir/$save_output-${i}.$objext"
+	    delfiles="$delfiles $output_objdir/$output_la-${i}.$objext"
 	  done
 
 	  $echo "creating a temporary reloadable object file: $output"
@@ -4000,13 +4042,30 @@ EOF
 	  IFS="$save_ifs"
 	  eval cmd=\"$cmd\"
 	  $show "$cmd"
-	  $run eval "$cmd" || exit $?
+	  $run eval "$cmd" || {
+	    lt_exit=$?
+
+	    # Restore the uninstalled library and exit
+	    if test "$mode" = relink; then
+	      $run eval '(cd $output_objdir && $rm ${realname}T && $mv ${realname}U $realname)'
+	    fi
+
+	    exit $lt_exit
+	  }
 	done
 	IFS="$save_ifs"
 
 	# Restore the uninstalled library and exit
 	if test "$mode" = relink; then
 	  $run eval '(cd $output_objdir && $rm ${realname}T && $mv $realname ${realname}T && $mv "$realname"U $realname)' || exit $?
+
+	  if test -n "$convenience"; then
+	    if test -z "$whole_archive_flag_spec"; then
+	      $show "${rm}r $gentop"
+	      $run ${rm}r "$gentop"
+	    fi
+	  fi
+
 	  exit $EXIT_SUCCESS
 	fi
 
@@ -4349,12 +4408,12 @@ extern \"C\" {
 
 	    # Prepare the list of exported symbols
 	    if test -z "$export_symbols"; then
-	      export_symbols="$output_objdir/$output.exp"
+	      export_symbols="$output_objdir/$outputname.exp"
 	      $run $rm $export_symbols
-	      $run eval "${SED} -n -e '/^: @PROGRAM@$/d' -e 's/^.* \(.*\)$/\1/p' "'< "$nlist" > "$export_symbols"'
+	      $run eval "${SED} -n -e '/^: @PROGRAM@ $/d' -e 's/^.* \(.*\)$/\1/p' "'< "$nlist" > "$export_symbols"'
 	    else
-	      $run eval "${SED} -e 's/\([][.*^$]\)/\\\1/g' -e 's/^/ /' -e 's/$/$/'"' < "$export_symbols" > "$output_objdir/$output.exp"'
-	      $run eval 'grep -f "$output_objdir/$output.exp" < "$nlist" > "$nlist"T'
+	      $run eval "${SED} -e 's/\([ ][.*^$]\)/\\\1/g' -e 's/^/ /' -e 's/$/$/'"' < "$export_symbols" > "$output_objdir/$outputname.exp"'
+	      $run eval 'grep -f "$output_objdir/$outputname.exp" < "$nlist" > "$nlist"T'
 	      $run eval 'mv "$nlist"T "$nlist"'
 	    fi
 	  fi
@@ -4406,7 +4465,26 @@ extern \"C\" {
 #endif
 
 /* The mapping between symbol names and symbols. */
+"
+
+	    case $host in
+	    *cygwin* | *mingw* )
+	  $echo >> "$output_objdir/$dlsyms" "\
+/* DATA imports from DLLs on WIN32 can't be const, because
+   runtime relocations are performed -- see ld's documentation
+   on pseudo-relocs */
+struct {
+"
+	      ;;
+	    * )
+	  $echo >> "$output_objdir/$dlsyms" "\
 const struct {
+"
+	      ;;
+	    esac
+
+
+	  $echo >> "$output_objdir/$dlsyms" "\
   const char *name;
   lt_ptr address;
 }
@@ -4635,7 +4713,7 @@ static const void *lt_preloaded_setup() {
 	esac
 	case $host in
 	  *cygwin* | *mingw* )
-	    cwrappersource=`$echo ${objdir}/lt-${output}.c`
+	    cwrappersource=`$echo ${objdir}/lt-${outputname}.c`
 	    cwrapper=`$echo ${output}.exe`
 	    $rm $cwrappersource $cwrapper
 	    trap "$rm $cwrappersource $cwrapper; exit $EXIT_FAILURE" 1 2 15
@@ -4734,6 +4812,7 @@ EOF
 EOF
 
 	    cat >> $cwrappersource <<"EOF"
+  return 127;
 }
 
 void *
@@ -4997,13 +5076,13 @@ else
 	# Backslashes separate directories on plain windows
 	*-*-mingw | *-*-os2*)
 	  $echo >> $output "\
-      exec \$progdir\\\\\$program \${1+\"\$@\"}
+      exec \"\$progdir\\\\\$program\" \${1+\"\$@\"}
 "
 	  ;;
 
 	*)
 	  $echo >> $output "\
-      exec \$progdir/\$program \${1+\"\$@\"}
+      exec \"\$progdir/\$program\" \${1+\"\$@\"}
 "
 	  ;;
 	esac
@@ -5013,7 +5092,7 @@ else
     fi
   else
     # The program doesn't exist.
-    \$echo \"\$0: error: \$progdir/\$program does not exist\" 1>&2
+    \$echo \"\$0: error: \\\`\$progdir/\$program' does not exist\" 1>&2
     \$echo \"This script is just a wrapper for \$program.\" 1>&2
     $echo \"See the $PACKAGE documentation for more information.\" 1>&2
     exit $EXIT_FAILURE
@@ -5055,6 +5134,63 @@ fi\
       if test -n "$old_archive_from_new_cmds" && test "$build_libtool_libs" = yes; then
        cmds=$old_archive_from_new_cmds
       else
+	# POSIX demands no paths to be encoded in archives.  We have
+	# to avoid creating archives with duplicate basenames if we
+	# might have to extract them afterwards, e.g., when creating a
+	# static archive out of a convenience library, or when linking
+	# the entirety of a libtool archive into another (currently
+	# not supported by libtool).
+	if (for obj in $oldobjs
+	    do
+	      $echo "X$obj" | $Xsed -e 's%^.*/%%'
+	    done | sort | sort -uc >/dev/null 2>&1); then
+	  :
+	else
+	  $echo "copying selected object files to avoid basename conflicts..."
+
+	  if test -z "$gentop"; then
+	    gentop="$output_objdir/${outputname}x"
+	    generated="$generated $gentop"
+
+	    $show "${rm}r $gentop"
+	    $run ${rm}r "$gentop"
+	    $show "$mkdir $gentop"
+	    $run $mkdir "$gentop"
+	    status=$?
+	    if test "$status" -ne 0 && test ! -d "$gentop"; then
+	      exit $status
+	    fi
+	  fi
+
+	  save_oldobjs=$oldobjs
+	  oldobjs=
+	  counter=1
+	  for obj in $save_oldobjs
+	  do
+	    objbase=`$echo "X$obj" | $Xsed -e 's%^.*/%%'`
+	    case " $oldobjs " in
+	    " ") oldobjs=$obj ;;
+	    *[\ /]"$objbase "*)
+	      while :; do
+		# Make sure we don't pick an alternate name that also
+		# overlaps.
+		newobj=lt$counter-$objbase
+		counter=`expr $counter + 1`
+		case " $oldobjs " in
+		*[\ /]"$newobj "*) ;;
+		*) if test ! -f "$gentop/$newobj"; then break; fi ;;
+		esac
+	      done
+	      $show "ln $obj $gentop/$newobj || cp $obj $gentop/$newobj"
+	      $run ln "$obj" "$gentop/$newobj" ||
+	      $run cp "$obj" "$gentop/$newobj"
+	      oldobjs="$oldobjs $gentop/$newobj"
+	      ;;
+	    *) oldobjs="$oldobjs $obj" ;;
+	    esac
+	  done
+	fi
+
 	eval cmds=\"$old_archive_cmds\"
 
 	if len=`expr "X$cmds" : ".*"` &&
@@ -5068,20 +5204,7 @@ fi\
 	  objlist=
 	  concat_cmds=
 	  save_oldobjs=$oldobjs
-	  # GNU ar 2.10+ was changed to match POSIX; thus no paths are
-	  # encoded into archives.  This makes 'ar r' malfunction in
-	  # this piecewise linking case whenever conflicting object
-	  # names appear in distinct ar calls; check, warn and compensate.
-	    if (for obj in $save_oldobjs
-	    do
-	      $echo "X$obj" | $Xsed -e 's%^.*/%%'
-	    done | sort | sort -uc >/dev/null 2>&1); then
-	    :
-	  else
-	    $echo "$modename: warning: object name conflicts; overriding AR_FLAGS to 'cq'" 1>&2
-	    $echo "$modename: warning: to ensure that POSIX-compatible ar will work" 1>&2
-	    AR_FLAGS=cq
-	  fi
+
 	  # Is there a better way of finding the last object in the list?
 	  for obj in $save_oldobjs
 	  do
@@ -5092,7 +5215,7 @@ fi\
 	    oldobjs="$objlist $obj"
 	    objlist="$objlist $obj"
 	    eval test_cmds=\"$old_archive_cmds\"
-	    if len=`expr "X$test_cmds" : ".*"` &&
+	    if len=`expr "X$test_cmds" : ".*" 2>/dev/null` &&
 	       test "$len" -le "$max_cmd_len"; then
 	      :
 	    else
@@ -5289,11 +5412,11 @@ relink_command=\"$relink_command\""
     # install_prog (especially on Windows NT).
     if test "$nonopt" = "$SHELL" || test "$nonopt" = /bin/sh ||
        # Allow the use of GNU shtool's install command.
-       $echo "X$nonopt" | $Xsed | grep shtool > /dev/null; then
+       $echo "X$nonopt" | grep shtool > /dev/null; then
       # Aesthetically quote it.
       arg=`$echo "X$nonopt" | $Xsed -e "$sed_quote_subst"`
       case $arg in
-      *[\[\~\#\^\&\*\(\)\{\}\|\;\<\>\?\'\ \	]*|*]*)
+      *[\[\~\#\^\&\*\(\)\{\}\|\;\<\>\?\'\ \	]*|*]*|"")
 	arg="\"$arg\""
 	;;
       esac
@@ -5302,14 +5425,14 @@ relink_command=\"$relink_command\""
       shift
     else
       install_prog=
-      arg="$nonopt"
+      arg=$nonopt
     fi
 
     # The real first argument should be the name of the installation program.
     # Aesthetically quote it.
     arg=`$echo "X$arg" | $Xsed -e "$sed_quote_subst"`
     case $arg in
-    *[\[\~\#\^\&\*\(\)\{\}\|\;\<\>\?\'\ \	]*|*]*)
+    *[\[\~\#\^\&\*\(\)\{\}\|\;\<\>\?\'\ \	]*|*]*|"")
       arg="\"$arg\""
       ;;
     esac
@@ -5327,28 +5450,31 @@ relink_command=\"$relink_command\""
     do
       if test -n "$dest"; then
 	files="$files $dest"
-	dest="$arg"
+	dest=$arg
 	continue
       fi
 
       case $arg in
       -d) isdir=yes ;;
-      -f) prev="-f" ;;
-      -g) prev="-g" ;;
-      -m) prev="-m" ;;
-      -o) prev="-o" ;;
+      -f) 
+      	case " $install_prog " in
+	*[\\\ /]cp\ *) ;;
+	*) prev=$arg ;;
+	esac
+	;;
+      -g | -m | -o) prev=$arg ;;
       -s)
 	stripme=" -s"
 	continue
 	;;
-      -*) ;;
-
+      -*)
+	;;
       *)
 	# If the previous option needed an argument, then skip it.
 	if test -n "$prev"; then
 	  prev=
 	else
-	  dest="$arg"
+	  dest=$arg
 	  continue
 	fi
 	;;
@@ -5357,7 +5483,7 @@ relink_command=\"$relink_command\""
       # Aesthetically quote the argument.
       arg=`$echo "X$arg" | $Xsed -e "$sed_quote_subst"`
       case $arg in
-      *[\[\~\#\^\&\*\(\)\{\}\|\;\<\>\?\'\ \	]*|*]*)
+      *[\[\~\#\^\&\*\(\)\{\}\|\;\<\>\?\'\ \	]*|*]*|"")
 	arg="\"$arg\""
 	;;
       esac
@@ -5526,11 +5652,14 @@ relink_command=\"$relink_command\""
 
 	  if test "$#" -gt 0; then
 	    # Delete the old symlinks, and create new ones.
+	    # Try `ln -sf' first, because the `ln' binary might depend on
+	    # the symlink we replace!  Solaris /bin/ln does not understand -f,
+	    # so we also need to try rm && ln -s.
 	    for linkname
 	    do
 	      if test "$linkname" != "$realname"; then
-		$show "(cd $destdir && $rm $linkname && $LN_S $realname $linkname)"
-		$run eval "(cd $destdir && $rm $linkname && $LN_S $realname $linkname)"
+                $show "(cd $destdir && { $LN_S -f $realname $linkname || { $rm $linkname && $LN_S $realname $linkname; }; })"
+                $run eval "(cd $destdir && { $LN_S -f $realname $linkname || { $rm $linkname && $LN_S $realname $linkname; }; })"
 	      fi
 	    done
 	  fi
@@ -5543,7 +5672,16 @@ relink_command=\"$relink_command\""
 	    IFS="$save_ifs"
 	    eval cmd=\"$cmd\"
 	    $show "$cmd"
-	    $run eval "$cmd" || exit $?
+	    $run eval "$cmd" || {
+	      lt_exit=$?
+
+	      # Restore the uninstalled library and exit
+	      if test "$mode" = relink; then
+		$run eval '(cd $output_objdir && $rm ${realname}T && $mv ${realname}U $realname)'
+	      fi
+
+	      exit $lt_exit
+	    }
 	  done
 	  IFS="$save_ifs"
 	fi
@@ -5637,17 +5775,15 @@ relink_command=\"$relink_command\""
 	  notinst_deplibs=
 	  relink_command=
 
-	  # To insure that "foo" is sourced, and not "foo.exe",
-	  # finese the cygwin/MSYS system by explicitly sourcing "foo."
-	  # which disallows the automatic-append-.exe behavior.
-	  case $build in
-	  *cygwin* | *mingw*) wrapperdot=${wrapper}. ;;
-	  *) wrapperdot=${wrapper} ;;
-	  esac
+	  # Note that it is not necessary on cygwin/mingw to append a dot to
+	  # foo even if both foo and FILE.exe exist: automatic-append-.exe
+	  # behavior happens only for exec(3), not for open(2)!  Also, sourcing
+	  # `FILE.' does not work on cygwin managed mounts.
+	  #
 	  # If there is no directory component, then add one.
-	  case $file in
-	  */* | *\\*) . ${wrapperdot} ;;
-	  *) . ./${wrapperdot} ;;
+	  case $wrapper in
+	  */* | *\\*) . ${wrapper} ;;
+	  *) . ./${wrapper} ;;
 	  esac
 
 	  # Check the variables that should have been set.
@@ -5675,17 +5811,15 @@ relink_command=\"$relink_command\""
 	  done
 
 	  relink_command=
-	  # To insure that "foo" is sourced, and not "foo.exe",
-	  # finese the cygwin/MSYS system by explicitly sourcing "foo."
-	  # which disallows the automatic-append-.exe behavior.
-	  case $build in
-	  *cygwin* | *mingw*) wrapperdot=${wrapper}. ;;
-	  *) wrapperdot=${wrapper} ;;
-	  esac
+	  # Note that it is not necessary on cygwin/mingw to append a dot to
+	  # foo even if both foo and FILE.exe exist: automatic-append-.exe
+	  # behavior happens only for exec(3), not for open(2)!  Also, sourcing
+	  # `FILE.' does not work on cygwin managed mounts.
+	  #
 	  # If there is no directory component, then add one.
-	  case $file in
-	  */* | *\\*) . ${wrapperdot} ;;
-	  *) . ./${wrapperdot} ;;
+	  case $wrapper in
+	  */* | *\\*) . ${wrapper} ;;
+	  *) . ./${wrapper} ;;
 	  esac
 
 	  outputname=
@@ -5726,7 +5860,7 @@ relink_command=\"$relink_command\""
 	fi
 
 	# remove .exe since cygwin /usr/bin/install will append another
-	# one anyways
+	# one anyway 
 	case $install_prog,$host in
 	*/usr/bin/install*,*cygwin*)
 	  case $file:$destfile in
@@ -6396,7 +6530,7 @@ esac
 $echo
 $echo "Try \`$modename --help' for more information about other modes."
 
-exit $EXIT_SUCCESS
+exit $?
 
 # The TAGs below are defined such that we never get into a situation
 # in which we disable both kinds of libraries.  Given conflicting
diff --git a/storage/bdb/dist/pubdef.in b/storage/bdb/dist/pubdef.in
index 7f6ad2ebd69..b0873839d7d 100644
--- a/storage/bdb/dist/pubdef.in
+++ b/storage/bdb/dist/pubdef.in
@@ -1,386 +1,412 @@
+# $Id: pubdef.in,v 12.18 2005/11/08 03:25:00 bostic Exp $
+#
 # Name
 # D == documentation
 # I == include file
 # J == Java constant
 # N == wrapped by the Java native layer
-DB_AFTER		D I J
-DB_AGGRESSIVE		D I J
-DB_ALREADY_ABORTED	* I *
-DB_AM_CHKSUM		* I *
-DB_AM_CL_WRITER		* I *
-DB_AM_COMPENSATE	* I *
-DB_AM_CREATED		* I *
-DB_AM_CREATED_MSTR	* I *
-DB_AM_DBM_ERROR		* I *
-DB_AM_DELIMITER		* I *
-DB_AM_DIRTY		* I *
-DB_AM_DISCARD		* I *
-DB_AM_DUP		* I *
-DB_AM_DUPSORT		* I *
-DB_AM_ENCRYPT		* I *
-DB_AM_FIXEDLEN		* I *
-DB_AM_INMEM		* I *
-DB_AM_INORDER		* I *
-DB_AM_IN_RENAME		* I *
-DB_AM_NOT_DURABLE	* I *
-DB_AM_OPEN_CALLED	* I *
-DB_AM_PAD		* I *
-DB_AM_PGDEF		* I *
-DB_AM_RDONLY		* I *
-DB_AM_RECNUM		* I *
-DB_AM_RECOVER		* I *
-DB_AM_RENUMBER		* I *
-DB_AM_REPLICATION	* I *
-DB_AM_REVSPLITOFF	* I *
-DB_AM_SECONDARY		* I *
-DB_AM_SNAPSHOT		* I *
-DB_AM_SUBDB		* I *
-DB_AM_SWAP		* I *
-DB_AM_TXN		* I *
-DB_AM_VERIFYING		* I *
-DB_APPEND		D I J
-DB_ARCH_ABS		D I J
-DB_ARCH_DATA		D I J
-DB_ARCH_LOG		D I J
-DB_ARCH_REMOVE		D I J
-DB_AUTO_COMMIT		D I J
-DB_BEFORE		D I J
-DB_BTREE		D I J
-DB_BTREEMAGIC		* I *
-DB_BTREEOLDVER		* I *
-DB_BTREEVERSION		* I *
-DB_BUFFER_SMALL		D I N
-DB_CACHED_COUNTS	* I *
-DB_CDB_ALLDB		D I J
-DB_CHKSUM		D I J
-DB_CONFIG		D * *
-DB_CONSUME		D I J
-DB_CONSUME_WAIT		D I J
-DB_CREATE		D I J
-DB_CURRENT		D I J
-DB_CXX_NO_EXCEPTIONS	D I *
-DB_DBM_HSEARCH		* I *
-DB_DBT_APPMALLOC	D I N
-DB_DBT_DUPOK		* I *
-DB_DBT_ISSET		* I *
-DB_DBT_MALLOC		D I J
-DB_DBT_PARTIAL		D I J
-DB_DBT_REALLOC		D I N
-DB_DBT_USERMEM		D I J
-DB_DEGREE_2		D I J
-DB_DELETED		* I *
-DB_DIRECT		D I *
-DB_DIRECT_DB		D I J
-DB_DIRECT_LOG		D I J
-DB_DIRTY_READ		D I J
-DB_DONOTINDEX		D I J
-DB_DSYNC_LOG		D I J
-DB_DUP			D I J
-DB_DUPSORT		D I J
-DB_DURABLE_UNKNOWN	* I *
-DB_EID_BROADCAST	D I J
-DB_EID_INVALID		D I J
-DB_ENCRYPT		D I J
-DB_ENCRYPT_AES		D I J
-DB_ENV_AUTO_COMMIT	* I *
-DB_ENV_CDB		* I *
-DB_ENV_CDB_ALLDB	* I *
-DB_ENV_CREATE		* I *
-DB_ENV_DBLOCAL		* I *
-DB_ENV_DIRECT_DB	* I *
-DB_ENV_DIRECT_LOG	* I *
-DB_ENV_DSYNC_LOG	* I *
-DB_ENV_FATAL		* I *
-DB_ENV_LOCKDOWN		* I *
-DB_ENV_LOG_AUTOREMOVE	* I *
-DB_ENV_LOG_INMEMORY	* I *
-DB_ENV_NOLOCKING	* I *
-DB_ENV_NOMMAP		* I *
-DB_ENV_NOPANIC		* I *
-DB_ENV_OPEN_CALLED	* I *
-DB_ENV_OVERWRITE	* I *
-DB_ENV_PRIVATE		* I *
-DB_ENV_REGION_INIT	* I *
-DB_ENV_RPCCLIENT	* I *
-DB_ENV_RPCCLIENT_GIVEN	* I *
-DB_ENV_SYSTEM_MEM	* I *
-DB_ENV_THREAD		* I *
-DB_ENV_TIME_NOTGRANTED	* I *
-DB_ENV_TXN_NOSYNC	* I *
-DB_ENV_TXN_WRITE_NOSYNC	* I *
-DB_ENV_YIELDCPU		* I *
-DB_EXCL			D I J
-DB_EXTENT		* I *
-DB_FAST_STAT		D I J
-DB_FCNTL_LOCKING	* I *
-DB_FILE_ID_LEN		* I *
-DB_FIRST		D I J
-DB_FLUSH		D I J
-DB_FORCE		D I J
-DB_GET_BOTH		D I J
-DB_GET_BOTHC		* I *
-DB_GET_BOTH_RANGE	D I J
-DB_GET_RECNO		D I J
-DB_HANDLE_LOCK		* I *
-DB_HASH			D I J
-DB_HASHMAGIC		* I *
-DB_HASHOLDVER		* I *
-DB_HASHVERSION		* I *
-DB_HOME			D * *
-DB_INIT_CDB		D I J
-DB_INIT_LOCK		D I J
-DB_INIT_LOG		D I J
-DB_INIT_MPOOL		D I J
-DB_INIT_REP		D I J
-DB_INIT_TXN		D I J
-DB_INORDER		D I J
-DB_JOINENV		D I J
-DB_JOIN_ITEM		D I J
-DB_JOIN_NOSORT		D I J
-DB_KEYEMPTY		D I J
-DB_KEYEXIST		D I J
-DB_KEYFIRST		D I J
-DB_KEYLAST		D I J
-DB_LAST			D I J
-DB_LOCKDOWN		D I J
-DB_LOCKVERSION		* I *
-DB_LOCK_ABORT		* I *
-DB_LOCK_DEADLOCK	D I J
-DB_LOCK_DEFAULT		D I J
-DB_LOCK_DIRTY		* I *
-DB_LOCK_DUMP		* I *
-DB_LOCK_EXPIRE		D I J
-DB_LOCK_GET		D I J
-DB_LOCK_GET_TIMEOUT	D I J
-DB_LOCK_INHERIT		* I *
-DB_LOCK_IREAD		D I J
-DB_LOCK_IWR		D I J
-DB_LOCK_IWRITE		D I J
-DB_LOCK_MAXLOCKS	D I J
-DB_LOCK_MAXWRITE	D I J
-DB_LOCK_MINLOCKS	D I J
-DB_LOCK_MINWRITE	D I J
-DB_LOCK_NG		* I *
-DB_LOCK_NORUN		* I *
-DB_LOCK_NOTEXIST	* I *
-DB_LOCK_NOTGRANTED	D I J
-DB_LOCK_NOWAIT		D I J
-DB_LOCK_OLDEST		D I J
-DB_LOCK_PUT		D I J
-DB_LOCK_PUT_ALL		D I J
-DB_LOCK_PUT_OBJ		D I J
-DB_LOCK_PUT_READ	* I *
-DB_LOCK_RANDOM		D I J
-DB_LOCK_READ		D I J
-DB_LOCK_RECORD		* I *
-DB_LOCK_REMOVE		* I *
-DB_LOCK_SET_TIMEOUT	* I *
-DB_LOCK_SWITCH		* I *
-DB_LOCK_TIMEOUT		D I J
-DB_LOCK_TRADE		* I *
-DB_LOCK_UPGRADE		* I *
-DB_LOCK_UPGRADE_WRITE	* I *
-DB_LOCK_WAIT		* I *
-DB_LOCK_WRITE		D I J
-DB_LOCK_WWRITE		* I *
-DB_LOCK_YOUNGEST	D I J
-DB_LOGC_BUF_SIZE	* I *
-DB_LOGFILEID_INVALID	* I *
-DB_LOGMAGIC		* I *
-DB_LOGOLDVER		* I *
-DB_LOGVERSION		* I *
-DB_LOG_AUTOREMOVE	D I J
-DB_LOG_BUFFER_FULL	D I *
-DB_LOG_CHKPNT		* I *
-DB_LOG_COMMIT		* I *
-DB_LOG_DISK		* I *
-DB_LOG_INMEMORY		D I J
-DB_LOG_LOCKED		* I *
-DB_LOG_NOCOPY		* I *
-DB_LOG_NOT_DURABLE	* I *
-DB_LOG_PERM		* I *
-DB_LOG_RESEND		* I *
-DB_LOG_SILENT_ERR	* I *
-DB_LOG_WRNOSYNC		* I *
-DB_LSTAT_ABORTED	* I *
-DB_LSTAT_EXPIRED	* I *
-DB_LSTAT_FREE		* I *
-DB_LSTAT_HELD		* I *
-DB_LSTAT_NOTEXIST	* I *
-DB_LSTAT_PENDING	* I *
-DB_LSTAT_WAITING	* I *
-DB_MAX_PAGES		* I *
-DB_MAX_RECORDS		* I *
-DB_MPOOL_CLEAN		D I *
-DB_MPOOL_CREATE		D I *
-DB_MPOOL_DIRTY		D I *
-DB_MPOOL_DISCARD	D I *
-DB_MPOOL_FREE		* I *
-DB_MPOOL_LAST		D I *
-DB_MPOOL_NEW		D I *
-DB_MPOOL_NOFILE		D I J
-DB_MPOOL_UNLINK		D I J
-DB_MULTIPLE		D I J
-DB_MULTIPLE_INIT	D I *
-DB_MULTIPLE_KEY		D I J
-DB_MULTIPLE_KEY_NEXT	D I *
-DB_MULTIPLE_NEXT	D I *
-DB_MULTIPLE_RECNO_NEXT	D I *
-DB_NEEDSPLIT		* I *
-DB_NEXT			D I J
-DB_NEXT_DUP		D I J
-DB_NEXT_NODUP		D I J
-DB_NODUPDATA		D I J
-DB_NOLOCKING		D I J
-DB_NOMMAP		D I J
-DB_NOORDERCHK		D I J
-DB_NOOVERWRITE		D I J
-DB_NOPANIC		D I J
-DB_NOSERVER		D I *
-DB_NOSERVER_HOME	D I J
-DB_NOSERVER_ID		D I J
-DB_NOSYNC		D I J
-DB_NOTFOUND		D I J
-DB_NO_AUTO_COMMIT	* I *
-DB_ODDFILESIZE		D I *
-DB_OK_BTREE		* I *
-DB_OK_HASH		* I *
-DB_OK_QUEUE		* I *
-DB_OK_RECNO		* I *
-DB_OLD_VERSION		D I *
-DB_OPFLAGS_MASK		* I *
-DB_ORDERCHKONLY		D I J
-DB_OVERWRITE		D I J
-DB_PAGE_LOCK		* I *
-DB_PAGE_NOTFOUND	D I *
-DB_PANIC_ENVIRONMENT	D I J
-DB_POSITION		D I J
-DB_PREV			D I J
-DB_PREV_NODUP		D I J
-DB_PRINTABLE		D I J
-DB_PRIORITY_DEFAULT	D I J
-DB_PRIORITY_HIGH	D I J
-DB_PRIORITY_LOW		D I J
-DB_PRIORITY_VERY_HIGH 	D I J
-DB_PRIORITY_VERY_LOW	D I J
-DB_PRIVATE		D I J
-DB_PR_PAGE		* I *
-DB_PR_RECOVERYTEST	* I *
-DB_QAMMAGIC		* I *
-DB_QAMOLDVER		* I *
-DB_QAMVERSION		* I *
-DB_QUEUE		D I J
-DB_RDONLY		D I J
-DB_RDWRMASTER		* I *
-DB_RECNO		D I J
-DB_RECNUM		D I J
-DB_RECORDCOUNT		* I *
-DB_RECORD_LOCK		* I *
-DB_RECOVER		D I J
-DB_RECOVER_FATAL	D I J
-DB_REDO			* I *
-DB_REGION_INIT		D I J
-DB_REGION_MAGIC		* I *
-DB_RENAMEMAGIC		* I *
-DB_RENUMBER		D I J
-DB_REP_CLIENT		D I J
-DB_REP_CREATE		* I *
-DB_REP_DUPMASTER	D I J
-DB_REP_EGENCHG		* I *
-DB_REP_HANDLE_DEAD	D I N
-DB_REP_HOLDELECTION	D I J
-DB_REP_ISPERM		D I J
-DB_REP_LOGREADY		* I *
-DB_REP_MASTER		D I J
-DB_REP_NEWMASTER	D I J
-DB_REP_NEWSITE		D I J
-DB_REP_NOBUFFER		D I J
-DB_REP_NOTPERM		D I J
-DB_REP_PAGEDONE		* I *
-DB_REP_PERMANENT	D I J
-DB_REP_STARTUPDONE	D I J
-DB_REP_UNAVAIL		D I *
-DB_REVSPLITOFF		D I J
-DB_RMW			D I J
-DB_RPCCLIENT		D I J
-DB_RUNRECOVERY		D I N
-DB_SALVAGE		D I J
-DB_SECONDARY_BAD	D I *
-DB_SEQUENCE_VERSION	* I *
-DB_SEQ_DEC		D I J
-DB_SEQ_INC		D I J
-DB_SEQ_RANGE_SET	* I *
-DB_SEQ_WRAP		D I J
-DB_SET			D I J
-DB_SET_BEGIN_LSNP	* I *
-DB_SET_LOCK_TIMEOUT	D I J
-DB_SET_RANGE		D I J
-DB_SET_RECNO		D I J
-DB_SET_TXN_NOW		* I *
-DB_SET_TXN_TIMEOUT	D I J
-DB_SNAPSHOT		D I J
-DB_STAT_ALL		D I *
-DB_STAT_CLEAR		D I J
-DB_STAT_LOCK_CONF	D I *
-DB_STAT_LOCK_LOCKERS	D I *
-DB_STAT_LOCK_OBJECTS	D I *
-DB_STAT_LOCK_PARAMS	D I *
-DB_STAT_MEMP_HASH	D I *
-DB_STAT_SUBSYSTEM	D I *
-DB_SURPRISE_KID		* I *
-DB_SWAPBYTES		* I *
-DB_SYSTEM_MEM		D I J
-DB_TEST_ELECTINIT	* I *
-DB_TEST_ELECTVOTE1	* I *
-DB_TEST_POSTDESTROY	* I *
-DB_TEST_POSTLOG		* I *
-DB_TEST_POSTLOGMETA	* I *
-DB_TEST_POSTOPEN	* I *
-DB_TEST_POSTSYNC	* I *
-DB_TEST_PREDESTROY	* I *
-DB_TEST_PREOPEN		* I *
-DB_TEST_SUBDB_LOCKS	* I *
-DB_THREAD		D I J
-DB_TIMEOUT		* I *
-DB_TIME_NOTGRANTED	D I J
-DB_TRUNCATE		D I J
-DB_TXNVERSION		* I *
-DB_TXN_ABORT		D I J
-DB_TXN_APPLY		D I J
-DB_TXN_BACKWARD_ALLOC	* I *
-DB_TXN_BACKWARD_ROLL	D I J
-DB_TXN_CKP		* I *
-DB_TXN_FORWARD_ROLL	D I J
-DB_TXN_NOSYNC		D I J
-DB_TXN_NOT_DURABLE	D I J
-DB_TXN_NOWAIT		D I J
-DB_TXN_OPENFILES	* I *
-DB_TXN_POPENFILES	* I *
-DB_TXN_PRINT		D I J
-DB_TXN_SYNC		D I J
-DB_TXN_WRITE_NOSYNC	D I J
-DB_UNDO			* I *
-DB_UNKNOWN		D I J
-DB_UNREF		* I *
-DB_UPDATE_SECONDARY	* I *
-DB_UPGRADE		D I J
-DB_USE_ENVIRON		D I J
-DB_USE_ENVIRON_ROOT	D I J
-DB_VERB_DEADLOCK	D I J
-DB_VERB_RECOVERY	D I J
-DB_VERB_REPLICATION	D I J
-DB_VERB_WAITSFOR	D I J
-DB_VERIFY		D I J
-DB_VERIFY_BAD		D I N
-DB_VERIFY_FATAL		* I *
-DB_VERSION_MAJOR	* I J
-DB_VERSION_MINOR	* I J
-DB_VERSION_MISMATCH	D I *
-DB_VERSION_PATCH	* I J
-DB_VERSION_STRING	* I N
-DB_WRITECURSOR		D I J
-DB_WRITELOCK		* I *
-DB_WRITEOPEN		* I *
-DB_XA_CREATE		D I J
-DB_XIDDATASIZE		D I J
-DB_YIELDCPU		D I J
+DB_AFTER			D I J
+DB_AGGRESSIVE			D I J
+DB_ALREADY_ABORTED		* I *
+DB_AM_CHKSUM			* I *
+DB_AM_CL_WRITER			* I *
+DB_AM_COMPENSATE		* I *
+DB_AM_CREATED			* I *
+DB_AM_CREATED_MSTR		* I *
+DB_AM_DBM_ERROR			* I *
+DB_AM_DELIMITER			* I *
+DB_AM_DISCARD			* I *
+DB_AM_DUP			* I *
+DB_AM_DUPSORT			* I *
+DB_AM_ENCRYPT			* I *
+DB_AM_FIXEDLEN			* I *
+DB_AM_INMEM			* I *
+DB_AM_INORDER			* I *
+DB_AM_IN_RENAME			* I *
+DB_AM_NOT_DURABLE		* I *
+DB_AM_OPEN_CALLED		* I *
+DB_AM_PAD			* I *
+DB_AM_PGDEF			* I *
+DB_AM_RDONLY			* I *
+DB_AM_READ_UNCOMMITTED		* I *
+DB_AM_RECNUM			* I *
+DB_AM_RECOVER			* I *
+DB_AM_RENUMBER			* I *
+DB_AM_REVSPLITOFF		* I *
+DB_AM_SECONDARY			* I *
+DB_AM_SNAPSHOT			* I *
+DB_AM_SUBDB			* I *
+DB_AM_SWAP			* I *
+DB_AM_TXN			* I *
+DB_AM_VERIFYING			* I *
+DB_APPEND			D I J
+DB_ARCH_ABS			D I J
+DB_ARCH_DATA			D I J
+DB_ARCH_LOG			D I J
+DB_ARCH_REMOVE			D I J
+DB_ASSOC_IMMUTABLE_KEY		* I *
+DB_AUTO_COMMIT			D I J
+DB_BEFORE			D I J
+DB_BTREE			D I J
+DB_BTREEMAGIC			* I *
+DB_BTREEOLDVER			* I *
+DB_BTREEVERSION			* I *
+DB_BUFFER_SMALL			D I N
+DB_CACHED_COUNTS		* I *
+DB_CDB_ALLDB			D I J
+DB_CHKSUM			D I J
+DB_COMPACT_FLAGS		* I *
+DB_CONFIG			D * *
+DB_CONSUME			D I J
+DB_CONSUME_WAIT			D I J
+DB_CREATE			D I J
+DB_CURRENT			D I J
+DB_CXX_NO_EXCEPTIONS		D I *
+DB_DBM_HSEARCH			* I *
+DB_DBT_APPMALLOC		D I N
+DB_DBT_DUPOK			* I *
+DB_DBT_ISSET			* I *
+DB_DBT_MALLOC			D I J
+DB_DBT_PARTIAL			D I J
+DB_DBT_REALLOC			D I N
+DB_DBT_USERMEM			D I J
+DB_DEGREE_2			* I *
+DB_DELETED			* I *
+DB_DIRECT			D I *
+DB_DIRECT_DB			D I J
+DB_DIRECT_LOG			D I J
+DB_DIRTY_READ			* I *
+DB_DONOTINDEX			D I J
+DB_DSYNC_DB			D I J
+DB_DSYNC_LOG			D I J
+DB_DUP				D I J
+DB_DUPSORT			D I J
+DB_DURABLE_UNKNOWN		* I *
+DB_EID_BROADCAST		D I J
+DB_EID_INVALID			D I J
+DB_ENCRYPT			D I J
+DB_ENCRYPT_AES			D I J
+DB_ENV_AUTO_COMMIT		* I *
+DB_ENV_CDB			* I *
+DB_ENV_CDB_ALLDB		* I *
+DB_ENV_CREATE			* I *
+DB_ENV_DBLOCAL			* I *
+DB_ENV_DIRECT_DB		* I *
+DB_ENV_DIRECT_LOG		* I *
+DB_ENV_DSYNC_DB			* I *
+DB_ENV_DSYNC_LOG		* I *
+DB_ENV_FATAL			* I *
+DB_ENV_LOCKDOWN			* I *
+DB_ENV_LOG_AUTOREMOVE		* I *
+DB_ENV_LOG_INMEMORY		* I *
+DB_ENV_NOLOCKING		* I *
+DB_ENV_NOMMAP			* I *
+DB_ENV_NOPANIC			* I *
+DB_ENV_OPEN_CALLED		* I *
+DB_ENV_OVERWRITE		* I *
+DB_ENV_PRIVATE			* I *
+DB_ENV_REGION_INIT		* I *
+DB_ENV_RPCCLIENT		* I *
+DB_ENV_RPCCLIENT_GIVEN		* I *
+DB_ENV_SYSTEM_MEM		* I *
+DB_ENV_THREAD			* I *
+DB_ENV_TIME_NOTGRANTED		* I *
+DB_ENV_TXN_NOSYNC		* I *
+DB_ENV_TXN_WRITE_NOSYNC		* I *
+DB_ENV_YIELDCPU			* I *
+DB_EXCL				D I J
+DB_EXTENT			* I *
+DB_FAST_STAT			D I J
+DB_FCNTL_LOCKING		* I *
+DB_FILE_ID_LEN			* I *
+DB_FIRST			D I J
+DB_FLUSH			D I J
+DB_FORCE			D I J
+DB_FREELIST_ONLY		D I J
+DB_FREE_SPACE			D I J
+DB_GET_BOTH			D I J
+DB_GET_BOTHC			* I *
+DB_GET_BOTH_RANGE		D I J
+DB_GET_RECNO			D I J
+DB_HANDLE_LOCK			* I *
+DB_HASH				D I J
+DB_HASHMAGIC			* I *
+DB_HASHOLDVER			* I *
+DB_HASHVERSION			* I *
+DB_HOME				D * *
+DB_IMMUTABLE_KEY		D I J
+DB_INIT_CDB			D I J
+DB_INIT_LOCK			D I J
+DB_INIT_LOG			D I J
+DB_INIT_MPOOL			D I J
+DB_INIT_REP			D I J
+DB_INIT_TXN			D I J
+DB_INORDER			D I J
+DB_JOINENV			* I J
+DB_JOIN_ITEM			D I J
+DB_JOIN_NOSORT			D I J
+DB_KEYEMPTY			D I J
+DB_KEYEXIST			D I J
+DB_KEYFIRST			D I J
+DB_KEYLAST			D I J
+DB_LAST				D I J
+DB_LOCKDOWN			D I J
+DB_LOCKVERSION			* I *
+DB_LOCK_ABORT			* I *
+DB_LOCK_DEADLOCK		D I J
+DB_LOCK_DEFAULT			D I J
+DB_LOCK_DUMP			* I *
+DB_LOCK_EXPIRE			D I J
+DB_LOCK_GET			D I J
+DB_LOCK_GET_TIMEOUT		D I J
+DB_LOCK_INHERIT			* I *
+DB_LOCK_IREAD			D I J
+DB_LOCK_IWR			D I J
+DB_LOCK_IWRITE			D I J
+DB_LOCK_MAXLOCKS		D I J
+DB_LOCK_MAXWRITE		D I J
+DB_LOCK_MINLOCKS		D I J
+DB_LOCK_MINWRITE		D I J
+DB_LOCK_NG			* I *
+DB_LOCK_NORUN			* I *
+DB_LOCK_NOTGRANTED		D I J
+DB_LOCK_NOWAIT			D I J
+DB_LOCK_OLDEST			D I J
+DB_LOCK_PUT			D I J
+DB_LOCK_PUT_ALL			D I J
+DB_LOCK_PUT_OBJ			D I J
+DB_LOCK_PUT_READ		* I *
+DB_LOCK_RANDOM			D I J
+DB_LOCK_READ			D I J
+DB_LOCK_READ_UNCOMMITTED	* I *
+DB_LOCK_RECORD			* I *
+DB_LOCK_SET_TIMEOUT		* I *
+DB_LOCK_SWITCH			* I *
+DB_LOCK_TIMEOUT			D I J
+DB_LOCK_TRADE			* I *
+DB_LOCK_UPGRADE			* I *
+DB_LOCK_UPGRADE_WRITE		* I *
+DB_LOCK_WAIT			* I *
+DB_LOCK_WRITE			D I J
+DB_LOCK_WWRITE			* I *
+DB_LOCK_YOUNGEST		D I J
+DB_LOGC_BUF_SIZE		* I *
+DB_LOGFILEID_INVALID		* I *
+DB_LOGMAGIC			* I *
+DB_LOGOLDVER			* I *
+DB_LOGVERSION			* I *
+DB_LOG_AUTOREMOVE		D I J
+DB_LOG_BUFFER_FULL		D I *
+DB_LOG_CHKPNT			* I *
+DB_LOG_COMMIT			* I *
+DB_LOG_DISK			* I *
+DB_LOG_INMEMORY			D I J
+DB_LOG_LOCKED			* I *
+DB_LOG_NOCOPY			* I *
+DB_LOG_NOT_DURABLE		* I *
+DB_LOG_PERM			* I *
+DB_LOG_RESEND			* I *
+DB_LOG_SILENT_ERR		* I *
+DB_LOG_WRNOSYNC			* I *
+DB_LSTAT_ABORTED		* I *
+DB_LSTAT_EXPIRED		* I *
+DB_LSTAT_FREE			* I *
+DB_LSTAT_HELD			* I *
+DB_LSTAT_PENDING		* I *
+DB_LSTAT_WAITING		* I *
+DB_MAX_PAGES			* I *
+DB_MAX_RECORDS			* I *
+DB_MPOOL_CLEAN			D I *
+DB_MPOOL_CREATE			D I *
+DB_MPOOL_DIRTY			D I *
+DB_MPOOL_DISCARD		D I *
+DB_MPOOL_FREE			* I *
+DB_MPOOL_LAST			D I *
+DB_MPOOL_NEW			D I *
+DB_MPOOL_NOFILE			D I J
+DB_MPOOL_UNLINK			D I J
+DB_MULTIPLE			D I J
+DB_MULTIPLE_INIT		D I *
+DB_MULTIPLE_KEY			D I J
+DB_MULTIPLE_KEY_NEXT		D I *
+DB_MULTIPLE_NEXT		D I *
+DB_MULTIPLE_RECNO_NEXT		D I *
+DB_MUTEX_ALLOCATED		* I *
+DB_MUTEX_LOCKED			* I *
+DB_MUTEX_LOGICAL_LOCK		* I *
+DB_MUTEX_SELF_BLOCK		D I *
+DB_MUTEX_THREAD			* I *
+DB_NEEDSPLIT			* I *
+DB_NEXT				D I J
+DB_NEXT_DUP			D I J
+DB_NEXT_NODUP			D I J
+DB_NODUPDATA			D I J
+DB_NOLOCKING			D I J
+DB_NOMMAP			D I J
+DB_NOORDERCHK			D I J
+DB_NOOVERWRITE			D I J
+DB_NOPANIC			D I J
+DB_NOSERVER			D I *
+DB_NOSERVER_HOME		D I J
+DB_NOSERVER_ID			D I J
+DB_NOSYNC			D I J
+DB_NOTFOUND			D I J
+DB_NO_AUTO_COMMIT		* I *
+DB_ODDFILESIZE			D I *
+DB_OK_BTREE			* I *
+DB_OK_HASH			* I *
+DB_OK_QUEUE			* I *
+DB_OK_RECNO			* I *
+DB_OLD_VERSION			D I *
+DB_OPFLAGS_MASK			* I *
+DB_ORDERCHKONLY			D I J
+DB_OVERWRITE			D I J
+DB_PAGE_LOCK			* I *
+DB_PAGE_NOTFOUND		D I *
+DB_PANIC_ENVIRONMENT		D I J
+DB_POSITION			D I J
+DB_PREV				D I J
+DB_PREV_NODUP			D I J
+DB_PRINTABLE			D I J
+DB_PRIORITY_DEFAULT		D I J
+DB_PRIORITY_HIGH		D I J
+DB_PRIORITY_LOW			D I J
+DB_PRIORITY_VERY_HIGH 		D I J
+DB_PRIORITY_VERY_LOW		D I J
+DB_PRIVATE			D I J
+DB_PR_PAGE			* I *
+DB_PR_RECOVERYTEST		* I *
+DB_QAMMAGIC			* I *
+DB_QAMOLDVER			* I *
+DB_QAMVERSION			* I *
+DB_QUEUE			D I J
+DB_RDONLY			D I J
+DB_RDWRMASTER			* I *
+DB_READ_COMMITTED		D I J
+DB_READ_UNCOMMITTED		D I J
+DB_RECNO			D I J
+DB_RECNUM			D I J
+DB_RECORDCOUNT			* I *
+DB_RECORD_LOCK			* I *
+DB_RECOVER			D I J
+DB_RECOVER_FATAL		D I J
+DB_REDO				* I *
+DB_REGION_INIT			D I J
+DB_REGION_MAGIC			* I *
+DB_REGISTER			D I J
+DB_RENAMEMAGIC			* I *
+DB_RENUMBER			D I J
+DB_REP_ANYWHERE			D I J
+DB_REP_BULKOVF			* I *
+DB_REP_CLIENT			D I J
+DB_REP_CONF_BULK		D I J
+DB_REP_CONF_DELAYCLIENT		D I J
+DB_REP_CONF_NOAUTOINIT		D I J
+DB_REP_CONF_NOWAIT		D I J
+DB_REP_DUPMASTER		D I N
+DB_REP_EGENCHG			* I *
+DB_REP_HANDLE_DEAD		D I N
+DB_REP_HOLDELECTION		D I N
+DB_REP_IGNORE			D I J
+DB_REP_ISPERM			D I J
+DB_REP_JOIN_FAILURE		D I N
+DB_REP_LOCKOUT			D I N
+DB_REP_LOGREADY			* I *
+DB_REP_MASTER			D I J
+DB_REP_NEWMASTER		D I J
+DB_REP_NEWSITE			D I J
+DB_REP_NOBUFFER			D I J
+DB_REP_NOTPERM			D I J
+DB_REP_PAGEDONE			* I *
+DB_REP_PERMANENT		D I J
+DB_REP_REREQUEST		D I J
+DB_REP_STARTUPDONE		D I J
+DB_REP_UNAVAIL			D I N
+DB_REVSPLITOFF			D I J
+DB_RMW				D I J
+DB_RPCCLIENT			D I J
+DB_RUNRECOVERY			D I N
+DB_SALVAGE			D I J
+DB_SECONDARY_BAD		D I *
+DB_SEQUENCE_OLDVER		* I *
+DB_SEQUENCE_VERSION		* I *
+DB_SEQ_DEC			D I J
+DB_SEQ_INC			D I J
+DB_SEQ_RANGE_SET		* I *
+DB_SEQ_WRAP			D I J
+DB_SEQ_WRAPPED			* I *
+DB_SET				D I J
+DB_SET_LOCK_TIMEOUT		D I J
+DB_SET_RANGE			D I J
+DB_SET_RECNO			D I J
+DB_SET_TXN_LSNP			* I *
+DB_SET_TXN_NOW			* I *
+DB_SET_TXN_TIMEOUT		D I J
+DB_SNAPSHOT			D I J
+DB_STAT_ALL			D I *
+DB_STAT_CLEAR			D I J
+DB_STAT_LOCK_CONF		D I *
+DB_STAT_LOCK_LOCKERS		D I *
+DB_STAT_LOCK_OBJECTS		D I *
+DB_STAT_LOCK_PARAMS		D I *
+DB_STAT_MEMP_HASH		D I *
+DB_STAT_SUBSYSTEM		D I *
+DB_SURPRISE_KID			* I *
+DB_SWAPBYTES			* I *
+DB_SYSTEM_MEM			D I J
+DB_TEST_ELECTINIT		* I *
+DB_TEST_ELECTVOTE1		* I *
+DB_TEST_POSTDESTROY		* I *
+DB_TEST_POSTLOG			* I *
+DB_TEST_POSTLOGMETA		* I *
+DB_TEST_POSTOPEN		* I *
+DB_TEST_POSTSYNC		* I *
+DB_TEST_PREDESTROY		* I *
+DB_TEST_PREOPEN			* I *
+DB_TEST_SUBDB_LOCKS		* I *
+DB_THREAD			D I J
+DB_THREADID_STRLEN		D I *
+DB_TIMEOUT			* I *
+DB_TIME_NOTGRANTED		D I J
+DB_TRUNCATE			D I J
+DB_TXNVERSION			* I *
+DB_TXN_ABORT			D I J
+DB_TXN_APPLY			D I J
+DB_TXN_BACKWARD_ALLOC		* I *
+DB_TXN_BACKWARD_ROLL		D I J
+DB_TXN_CKP			* I *
+DB_TXN_FORWARD_ROLL		D I J
+DB_TXN_NOSYNC			D I J
+DB_TXN_NOT_DURABLE		D I J
+DB_TXN_NOWAIT			D I J
+DB_TXN_OPENFILES		* I *
+DB_TXN_POPENFILES		* I *
+DB_TXN_PRINT			D I J
+DB_TXN_SYNC			D I J
+DB_TXN_WRITE_NOSYNC		D I J
+DB_UNDO				* I *
+DB_UNKNOWN			D I J
+DB_UNREF			* I *
+DB_UPDATE_SECONDARY		* I *
+DB_UPGRADE			D I J
+DB_USE_ENVIRON			D I J
+DB_USE_ENVIRON_ROOT		D I J
+DB_VERB_DEADLOCK		D I J
+DB_VERB_RECOVERY		D I J
+DB_VERB_REGISTER		D I J
+DB_VERB_REPLICATION		D I J
+DB_VERB_WAITSFOR		D I J
+DB_VERIFY			D I J
+DB_VERIFY_BAD			D I N
+DB_VERIFY_FATAL			* I *
+DB_VERSION_MAJOR		* I J
+DB_VERSION_MINOR		* I J
+DB_VERSION_MISMATCH		D I N
+DB_VERSION_PATCH		* I J
+DB_VERSION_STRING		* I N
+DB_WRITECURSOR			D I J
+DB_WRITELOCK			* I *
+DB_WRITEOPEN			* I *
+DB_XA_CREATE			D I J
+DB_XIDDATASIZE			D I J
+DB_YIELDCPU			D I J
diff --git a/storage/bdb/dist/s_all b/storage/bdb/dist/s_all
index 342074aa237..e3bccac23ff 100644
--- a/storage/bdb/dist/s_all
+++ b/storage/bdb/dist/s_all
@@ -1,5 +1,5 @@
 #!/bin/sh -
-#	$Id: s_all,v 1.11 2002/10/30 15:26:36 bostic Exp $
+#	$Id: s_all,v 12.0 2004/11/17 03:43:35 bostic Exp $
 
 sh s_perm		# permissions.
 sh s_symlink		# symbolic links.
@@ -15,7 +15,7 @@ sh s_include		# standard include files.
 
 sh s_win32		# Win32 include files.
 sh s_win32_dsp		# Win32 build environment.
-#sh s_vxworks		# VxWorks include files.
-#sh s_java		# Java support.
+sh s_vxworks		# VxWorks include files.
+sh s_java		# Java support.
 sh s_test		# Test suite support.
 sh s_tags		# Tags files.
diff --git a/storage/bdb/dist/s_config b/storage/bdb/dist/s_config
index 604a1d08916..194df83a59e 100644
--- a/storage/bdb/dist/s_config
+++ b/storage/bdb/dist/s_config
@@ -1,5 +1,5 @@
 #!/bin/sh -
-#	$Id: s_config,v 1.13 2003/07/02 15:27:44 bostic Exp $
+#	$Id: s_config,v 12.1 2005/08/16 05:19:18 mjc Exp $
 #
 # Build the autoconfiguration files.
 
@@ -20,14 +20,13 @@ rm -f configure
 autoconf
 
 # Edit version information we couldn't pre-compute.
-(echo "1,\$s/__EDIT_DB_VERSION_MAJOR__/$DB_VERSION_MAJOR/g" &&
- echo "1,\$s/__EDIT_DB_VERSION_MINOR__/$DB_VERSION_MINOR/g" &&
- echo "1,\$s/__EDIT_DB_VERSION_PATCH__/$DB_VERSION_PATCH/g" &&
- echo "1,\$s/__EDIT_DB_VERSION_STRING__/$DB_VERSION_STRING/g" &&
- echo "1,\$s/__EDIT_DB_VERSION_UNIQUE_NAME__/$DB_VERSION_UNIQUE_NAME/g" &&
- echo "1,\$s/__EDIT_DB_VERSION__/$DB_VERSION/g" &&
- echo "w" &&
- echo "q") | ed configure
+sed -e "s/__EDIT_DB_VERSION_MAJOR__/$DB_VERSION_MAJOR/g" \
+    -e "s/__EDIT_DB_VERSION_MINOR__/$DB_VERSION_MINOR/g" \
+    -e "s/__EDIT_DB_VERSION_PATCH__/$DB_VERSION_PATCH/g" \
+    -e "s/__EDIT_DB_VERSION_STRING__/$DB_VERSION_STRING/g" \
+    -e "s/__EDIT_DB_VERSION_UNIQUE_NAME__/$DB_VERSION_UNIQUE_NAME/g" \
+    -e "s/__EDIT_DB_VERSION__/$DB_VERSION/g" configure > configure.version
+mv configure.version configure
 
 rm -rf autom4te.cache
 chmod 555 configure
diff --git a/storage/bdb/dist/s_crypto b/storage/bdb/dist/s_crypto
index 05a93043a8b..cc54a347c07 100644
--- a/storage/bdb/dist/s_crypto
+++ b/storage/bdb/dist/s_crypto
@@ -1,5 +1,5 @@
 #!/bin/sh -
-#	$Id: s_crypto,v 11.8 2003/11/24 22:41:26 bostic Exp $
+#	$Id: s_crypto,v 12.0 2004/11/17 03:43:35 bostic Exp $
 
 # Remove crypto from the DB source tree.
 
diff --git a/storage/bdb/dist/s_include b/storage/bdb/dist/s_include
index ab058d86dba..e51aad36e63 100644
--- a/storage/bdb/dist/s_include
+++ b/storage/bdb/dist/s_include
@@ -1,5 +1,5 @@
 #!/bin/sh -
-#	$Id: s_include,v 1.22 2004/04/19 18:27:17 mjc Exp $
+#	$Id: s_include,v 12.0 2004/11/17 03:43:35 bostic Exp $
 #
 # Build the automatically generated function prototype files.
 
diff --git a/storage/bdb/dist/s_java b/storage/bdb/dist/s_java
index ae715ccd687..57b88e8e560 100644
--- a/storage/bdb/dist/s_java
+++ b/storage/bdb/dist/s_java
@@ -1,5 +1,5 @@
 #!/bin/sh -
-#	$Id: s_java,v 11.8 2004/04/06 20:43:35 mjc Exp $
+#	$Id: s_java,v 12.0 2004/11/17 03:43:35 bostic Exp $
 #
 # Build the Java files.
 
diff --git a/storage/bdb/dist/s_java_const b/storage/bdb/dist/s_java_const
index 644b32324d6..8374b1f61a8 100644
--- a/storage/bdb/dist/s_java_const
+++ b/storage/bdb/dist/s_java_const
@@ -1,5 +1,5 @@
 #!/bin/sh -
-#	$Id: s_java_const,v 1.27 2004/07/30 14:51:37 mjc Exp $
+#	$Id: s_java_const,v 12.0 2004/11/17 03:43:35 bostic Exp $
 #
 # Build the Java files.
 
diff --git a/storage/bdb/dist/s_java_stat b/storage/bdb/dist/s_java_stat
index 4eb2ab1470a..0d00be59646 100644
--- a/storage/bdb/dist/s_java_stat
+++ b/storage/bdb/dist/s_java_stat
@@ -1,5 +1,5 @@
 #!/bin/sh -
-#	$Id: s_java_stat,v 1.33 2004/09/28 19:30:36 mjc Exp $
+#	$Id: s_java_stat,v 12.9 2005/11/04 00:09:21 mjc Exp $
 #
 # Build the Java files.
 
@@ -8,7 +8,7 @@ msgjava="/*-
  *
  * See the file LICENSE for redistribution information.
  *
- * Copyright (c) 2002-2004
+ * Copyright (c) 2002-2005
  *	Sleepycat Software.  All rights reserved.
  */"
 
@@ -28,8 +28,9 @@ jclass()
 	/__db_$1/d
 	/;/!d
 	/^}/d
-	/char[	 ]*\*/{
-		s/^[	 ]*[^\*]*\*[	 ]*\([^;]*\).*/\\
+	/db_threadid_t/d
+	/char[	 ]*/{
+		s/^[	 ]*char[*	 ]*[	 ]*\([^[;]*\).*/\\
 $2    private String \1;\\
 $2    public String get_\1() {\\
 $2        return \1;\\
@@ -84,12 +85,14 @@ $2    }/p
 }
 EOF
 	sed -n -f $s < ../dbinc/db.in |
-	perl -w -p -e 's/get_(st|bt|hash|qs)_/get_/;' \
+	perl -w -p -e 's/get_(st|bt|hash|qs|compact)_/get_/;' \
 	           -e 'if (m/get.*\(/) {' \
-	           -e     's/_n([b-df-hj-np-tv-z])/_num_$1/;' \
+	           -e     's/_n([b-df-hj-np-tv-z]|upgrade)/_num_$1/;' \
 	           -e     's/_(min|max)([a-z])/_$1_$2/;' \
-	           -e     's/pg(.)/_pages_$1/;' \
-	           -e     's/(count|flag|free|id\(|page|size|timeout)/_$1/g;' \
+	           -e     's/_cnt/_count_/;' \
+	           -e     's/_pg/_pages_/;' \
+	           -e     's/(count|flag|free|page|percent|size|timeout)/_$1/g;' \
+	           -e     's/([^p])(id\()/$1_$2/g;' \
 	           -e     's/__*/_/g;' \
 	           -e     's/_(.)/\U$1/g' \
 	           -e '};' \
@@ -109,8 +112,9 @@ jclass_jni()
 	/__db_$1/d
 	/;/!d
 	/^}/d
-	/char[	 ]*\*/{
-		s/^[	 ]*[^\*]*\*[	 ]*\([^;]*\).*/	JAVADB_STAT_STRING(jnienv, jobj, $1_\1_fid, statp, \1);/p
+	/db_threadid_t/d
+	/char[	 ]*/{
+		s/^[	 ]*char[*	 ]*[	 ]*\([^[;]*\).*/	JAVADB_STAT_STRING(jnienv, jobj, $1_\1_fid, statp, \1);/p
 		d
 	}
 	/time_t/{
@@ -150,8 +154,9 @@ jni_fieldid_decls()
 	/__db_$1/d
 	/;/!d
 	/^}/d
-	/char[	 ]*\*/{
-		s/^[	 ]*[^\*]*\*[	 ]*\([^;]*\).*/static jfieldID $1_\1_fid;/p
+	/db_threadid_t/d
+	/char[	 ]*/{
+		s/^[	 ]*char[*	 ]*[	 ]*\([^[;]*\).*/static jfieldID $1_\1_fid;/p
 		d
 	}
 	/time_t/{
@@ -187,8 +192,9 @@ jni_fieldids()
 	/__db_$1/d
 	/;/!d
 	/^}/d
-	/char[	 ]*\*/{
-		s/^[	 ]*[^\*]*\*[	 ]*\([^;]*\).*/	{ \&$1_\1_fid, \&$1_class, \"\1\", \"Ljava\/lang\/String;\" },/p
+	/db_threadid_t/d
+	/char[	 ]*/{
+		s/^[	 ]*char[*	 ]*[	 ]*\([^[;]*\).*/	{ \&$1_\1_fid, \&$1_class, \"\1\", \"Ljava\/lang\/String;\" },/p
 		d
 	}
 	/time_t/{
@@ -225,8 +231,9 @@ jclass_toString()
 	/__db_$1/d
 	/;/!d
 	/^}/d
-	/char[	 ]*\*/{
-		s/^[	 ]*[^\*]*\*[	 ]*\([^;]*\).*/$3            + "\\\\n$3  \1=" + \1/p
+	/db_threadid_t/d
+	/char[	 ]*/{
+		s/^[	 ]*char[*	 ]*[	 ]*\([^[;]*\).*/$3            + "\\\\n$3  \1=" + \1/p
 		d
 	}
 	/DB_TXN_ACTIVE[	 ]*\*/{
@@ -275,11 +282,41 @@ echo "$msgjava" > $c
 > $u2
 
 stat_class bt_stat BtreeStats " extends DatabaseStats"
+
+# Build CompactStats.java - not purely a statistics class, but close enough to
+# share this code.
+(echo "$msgjava"
+ echo
+ echo 'package com.sleepycat.db;'
+ echo
+ echo 'import com.sleepycat.db.internal.DbUtil;'
+ echo
+ echo "public class CompactStats"
+ echo '{'
+ echo "    // no public constructor"
+ echo "    protected CompactStats() {}"
+ echo
+ echo "    /* package */"
+ echo "    CompactStats(int fillpercent, int timeout, int pages) {"
+ echo "        this.compact_fillpercent = fillpercent;"
+ echo "        this.compact_timeout = timeout;"
+ echo "        this.compact_pages = pages;"
+ echo "    }"
+ jclass compact
+ jclass_toString compact CompactStats
+ echo '}'
+ echo '// end of TransactionStats.java') > $t
+jclass_jni compact __dbj_fill_compact
+f=../java/src/com/sleepycat/db/CompactStats.java
+cmp $t $f > /dev/null 2>&1 ||
+    (echo "Building $f" && rm -f $f && cp $t $f && chmod 444 $f)
+
 stat_class h_stat HashStats " extends DatabaseStats"
 stat_class lock_stat LockStats
 stat_class log_stat LogStats
 stat_class mpool_fstat CacheFileStats
 stat_class mpool_stat CacheStats
+stat_class mutex_stat MutexStats
 stat_class qam_stat QueueStats " extends DatabaseStats"
 stat_class rep_stat ReplicationStats
 stat_class seq_stat SequenceStats
diff --git a/storage/bdb/dist/s_java_swig b/storage/bdb/dist/s_java_swig
index c1e67e31608..8be53b058bc 100644
--- a/storage/bdb/dist/s_java_swig
+++ b/storage/bdb/dist/s_java_swig
@@ -1,8 +1,12 @@
 #!/bin/sh -
-#       $Id: s_java_swig,v 11.11 2004/09/23 17:31:53 mjc Exp $
+#       $Id: s_java_swig,v 12.2 2005/10/17 19:20:12 bostic Exp $
 #
 # Run SWIG to generate the Java APIs
 
+t=/tmp/__db_a
+trap 'rm -f $t ; exit 0' 0
+trap 'rm -f $t ; exit 1' 1 2 3 13 15   
+
 SWIG=swig
 SWIG_DIR=../libdb_java
 SWIG_FILE=$SWIG_DIR/db.i
@@ -53,3 +57,9 @@ for f in *.java ; do
 	perl -p $SWIG_DIR/java-post.pl < $f > $JAVA_SRCDIR/$f || exit $?
 	rm -f $f
 done
+
+# db_config.h must be the first #include, move it to the top of the file.
+(
+      echo '#include "db_config.h"'
+      sed '/#include "db_config.h"/d' < db_java_wrap.c
+) > $t && cp $t db_java_wrap.c
diff --git a/storage/bdb/dist/s_je2db b/storage/bdb/dist/s_je2db
index aaaa42cf12d..a5c64197e57 100644
--- a/storage/bdb/dist/s_je2db
+++ b/storage/bdb/dist/s_je2db
@@ -52,7 +52,7 @@ E1='s/com\.sleepycat\.je/com.sleepycat.db/g'
 E2='/import com\.sleepycat\.db\.ForeignKeyNullifier/d'
 E3='/implements/s/, ForeignKeyNullifier//'
 E4='//,//d'
-EXCLUDETESTS="\(\(ForeignKeyTest\)\|\(TupleSerialFactoryTest\)\)"
+EXCLUDETESTS="\(\(ForeignKeyTest\)\|\(TupleSerialFactoryTest\)\\|\(XACollectionTest\)\)"
 
 cd "$JESRC"
 for f in `find . -name '*.java' | grep $DIRMATCH` ; do
diff --git a/storage/bdb/dist/s_perm b/storage/bdb/dist/s_perm
index b2bc1bbe746..094256a5fdf 100755
--- a/storage/bdb/dist/s_perm
+++ b/storage/bdb/dist/s_perm
@@ -1,5 +1,5 @@
 #!/bin/sh -
-#	$Id: s_perm,v 1.31 2004/10/05 18:56:58 bostic Exp $
+#	$Id: s_perm,v 12.0 2004/11/17 03:43:35 bostic Exp $
 
 d=..
 echo 'Updating Berkeley DB source tree permissions...'
@@ -39,7 +39,7 @@ run dist/s_vxworks 555
 run dist/s_win32 555
 run dist/s_win32_dsp 555
 run dist/vx_buildcd 555
-#run mod_db4/configure 555
+run mod_db4/configure 555
 
 run perl/BerkeleyDB/dbinfo 555
 run perl/BerkeleyDB/mkpod 555
diff --git a/storage/bdb/dist/s_readme b/storage/bdb/dist/s_readme
index f2ed0704451..1a56da1bff3 100644
--- a/storage/bdb/dist/s_readme
+++ b/storage/bdb/dist/s_readme
@@ -1,5 +1,5 @@
 #!/bin/sh -
-#	$Id: s_readme,v 1.7 2003/07/02 15:27:44 bostic Exp $
+#	$Id: s_readme,v 12.0 2004/11/17 03:43:35 bostic Exp $
 #
 # Build the README.
 
diff --git a/storage/bdb/dist/s_recover b/storage/bdb/dist/s_recover
index 9aad424fc01..0b9c16f7fbc 100755
--- a/storage/bdb/dist/s_recover
+++ b/storage/bdb/dist/s_recover
@@ -1,5 +1,5 @@
 #!/bin/sh -
-#	$Id: s_recover,v 1.17 2004/06/17 17:35:19 bostic Exp $
+#	$Id: s_recover,v 12.0 2004/11/17 03:43:35 bostic Exp $
 #
 # Build the automatically generated logging/recovery files.
 
@@ -68,4 +68,4 @@ for i in $DIR; do
 done
 
 # Build the example application's recovery routines.
-#(cd ../examples_c/ex_apprec && sh auto_rebuild)
+(cd ../examples_c/ex_apprec && sh auto_rebuild)
diff --git a/storage/bdb/dist/s_rpc b/storage/bdb/dist/s_rpc
index 8dada0e3baf..7da75819e06 100644
--- a/storage/bdb/dist/s_rpc
+++ b/storage/bdb/dist/s_rpc
@@ -1,5 +1,5 @@
 #!/bin/sh -
-#	$Id: s_rpc,v 11.20 2004/03/11 20:11:17 bostic Exp $
+#	$Id: s_rpc,v 12.0 2004/11/17 03:43:35 bostic Exp $
 #
 # Build the automatically generated RPC files
 
diff --git a/storage/bdb/dist/s_symlink b/storage/bdb/dist/s_symlink
index 03c575a5647..fe472dfbc35 100755
--- a/storage/bdb/dist/s_symlink
+++ b/storage/bdb/dist/s_symlink
@@ -1,5 +1,5 @@
 #!/bin/sh -
-#	$Id: s_symlink,v 1.32 2004/04/01 15:10:53 bostic Exp $
+#	$Id: s_symlink,v 12.1 2004/12/30 21:27:57 bostic Exp $
 
 echo 'Creating Berkeley DB source tree symbolic links...'
 
@@ -22,6 +22,7 @@ build db_checkpoint/tags ../dist/tags
 build db_deadlock/tags ../dist/tags
 build db_dump/tags ../dist/tags
 build db_dump185/tags ../dist/tags
+build db_hotbackup/tags ../dist/tags
 build db_load/tags ../dist/tags
 build db_printlog/tags ../dist/tags
 build db_recover/tags ../dist/tags
@@ -33,8 +34,8 @@ build dbinc_auto/tags ../dist/tags
 build dbm/tags ../dist/tags
 build dbreg/tags ../dist/tags
 build env/tags ../dist/tags
-#build examples_c/tags ../dist/tags
-#build examples_cxx/tags ../dist/tags
+build examples_c/tags ../dist/tags
+build examples_cxx/tags ../dist/tags
 build fileops/tags ../dist/tags
 build hash/tags ../dist/tags
 build hmac/tags ../dist/tags
diff --git a/storage/bdb/dist/s_tags b/storage/bdb/dist/s_tags
index 6d26bec62fb..22613775d94 100755
--- a/storage/bdb/dist/s_tags
+++ b/storage/bdb/dist/s_tags
@@ -1,5 +1,5 @@
 #!/bin/sh -
-#	$Id: s_tags,v 1.18 2004/04/01 15:11:14 bostic Exp $
+#	$Id: s_tags,v 12.1 2005/10/25 14:21:21 bostic Exp $
 #
 # Build tags files.
 
@@ -55,7 +55,9 @@ fi
 ctags $flags $files 2>/dev/null
 chmod 444 $f
 
-#f=../test_perf/tags
-#echo "Building $f"
-#(cd ../test_perf && ctags $flags *.[ch] 2>/dev/null)
-#chmod 444 $f
+for i in test_perf test_rep test_server; do
+	f=../$i/tags
+	echo "Building $f"
+	(cd ../$i && ctags $flags *.[ch] 2>/dev/null)
+	chmod 444 $f
+done
diff --git a/storage/bdb/dist/s_test b/storage/bdb/dist/s_test
index df0648d56ba..83b3c567587 100644
--- a/storage/bdb/dist/s_test
+++ b/storage/bdb/dist/s_test
@@ -1,5 +1,5 @@
 #!/bin/sh -
-#	$Id: s_test,v 1.29 2004/05/13 18:51:43 mjc Exp $
+#	$Id: s_test,v 12.2 2005/06/23 15:26:39 carol Exp $
 #
 # Build the Tcl test files.
 
@@ -30,9 +30,13 @@ trap 'rm -f $t; exit 0' 0 1 2 3 13 15
  echo "global dict"				&& \
  echo "global util_path"			&& \
  echo ""					&& \
+ echo "global is_freebsd_test"			&& \
  echo "global is_hp_test"			&& \
+ echo "global is_linux_test"			&& \
  echo "global is_qnx_test"			&& \
+ echo "global is_sunos_test"			&& \
  echo "global is_windows_test"			&& \
+ echo "global is_windows9x_test"		&& \
  echo ""					&& \
  echo "set KILL \"@db_cv_path_kill@\"") > $t
 
@@ -55,9 +59,13 @@ cmp $t $f > /dev/null 2>&1 ||
  echo "global dict"				&& \
  echo "global util_path"			&& \
  echo ""					&& \
+ echo "global is_freebsd_test"			&& \
  echo "global is_hp_test"			&& \
+ echo "global is_linux_test"			&& \
  echo "global is_qnx_test"			&& \
+ echo "global is_sunos_test"			&& \
  echo "global is_windows_test"			&& \
+ echo "global is_windows9x_test"		&& \
  echo ""					&& \
  echo "set KILL ./dbkill.exe") > $t
 
diff --git a/storage/bdb/dist/s_vxworks b/storage/bdb/dist/s_vxworks
index fbff44ab019..de2e41b55b6 100644
--- a/storage/bdb/dist/s_vxworks
+++ b/storage/bdb/dist/s_vxworks
@@ -1,5 +1,5 @@
 #!/bin/sh -
-#	$Id: s_vxworks,v 1.52 2004/10/15 18:28:21 bostic Exp $
+#	$Id: s_vxworks,v 12.6 2005/11/03 17:46:13 bostic Exp $
 #
 # Build the VxWorks files.
 
@@ -29,15 +29,17 @@ i\\
 #endif
 }
 /@inttypes_h_decl@/d
-/@stdint_h_decl@/d
 /@stddef_h_decl@/d
+/@stdint_h_decl@/d
+/@unistd_h_decl@/d
+/@thread_h_decl@/d
 s/@u_int8_decl@/typedef unsigned char u_int8_t;/
 /@int16_decl@/d
 s/@u_int16_decl@/typedef unsigned short u_int16_t;/
 /@int32_decl@/d
 s/@u_int32_decl@/typedef unsigned int u_int32_t;/
 s/@int64_decl@//
-s/@u_int64_decl@//
+s/@u_int64_decl@/typedef unsigned long long u_int64_t;/
 /@u_char_decl@/d
 /@u_short_decl@/d
 /@u_int_decl@/d
@@ -46,6 +48,8 @@ s/@u_int64_decl@//
 s/@uintmax_t_decl@/typedef unsigned long uintmax_t;/
 s/@uintptr_t_decl@/typedef unsigned long uintptr_t;/
 s/@db_seq_decl@/typedef int db_seq_t;/
+/@pid_t_decl@/d
+s/@db_threadid_t_decl@/typedef uintmax_t db_threadid_t;/
 s/@DB_VERSION_MAJOR@/$DB_VERSION_MAJOR/
 s/@DB_VERSION_MINOR@/$DB_VERSION_MINOR/
 s/@DB_VERSION_PATCH@/$DB_VERSION_PATCH/
@@ -58,7 +62,8 @@ ENDOFSEDTEXT
 (echo "$msgc" &&
     sed -f $s ../dbinc/db.in &&
     cat ../dbinc_auto/ext_prot.in) > $t
-`egrep '@.*@' $t` && {
+test `egrep '@.*@' $t` && {
+	egrep '@.*@' $t
 	echo 'Unexpanded autoconf variables found in VxWorks db.h.'
 	exit 1
 }
@@ -67,11 +72,14 @@ cmp $t $f > /dev/null 2>&1 ||
     (echo "Building $f" && rm -f $f && cp $t $f && chmod 444 $f)
 
 cat < $s
+s/@INT64_FMT@/#define	INT64_FMT	"%lld"/
+s/@UINT64_FMT@/#define	UINT64_FMT	"%llu"/
 s/@PATH_SEPARATOR@/\/\\\\\\\\/
 s/@db_int_def@//
 ENDOFSEDTEXT
 (echo "$msgc" && sed -f $s ../dbinc/db_int.in) > $t
-`egrep '@.*@' $t` && {
+test `egrep '@.*@' $t` && {
+	egrep '@.*@' $t
 	echo 'Unexpanded autoconf variables found in VxWorks db_int.h.'
 	exit 1
 }
@@ -163,8 +171,8 @@ ENDOFSEDTEXT
 	echo '#include '
 }
 
-PROGRAM_LIST="db_archive db_checkpoint db_deadlock db_dump db_load \
-    db_printlog db_recover db_stat db_upgrade db_verify ex_access"
+PROGRAM_LIST="db_archive db_checkpoint db_deadlock db_dump db_hotbackup \
+    db_load db_printlog db_recover db_stat db_upgrade db_verify ex_access"
 
 # Build VxWorks versions of the utilities.
 for i in $PROGRAM_LIST; do
diff --git a/storage/bdb/dist/s_win32 b/storage/bdb/dist/s_win32
index acdb9224918..490bcc888c0 100644
--- a/storage/bdb/dist/s_win32
+++ b/storage/bdb/dist/s_win32
@@ -1,5 +1,5 @@
 #!/bin/sh -
-#	$Id: s_win32,v 1.37 2004/10/15 18:28:21 bostic Exp $
+#	$Id: s_win32,v 12.10 2005/11/03 17:46:13 bostic Exp $
 #
 # Build Windows/32 include files.
 
@@ -19,6 +19,8 @@ cat < $s
 /@inttypes_h_decl@/d
 /@stdint_h_decl@/d
 s/@stddef_h_decl@/#include /
+/@unistd_h_decl@/d
+/@thread_h_decl@/d
 s/@u_int8_decl@/typedef unsigned char u_int8_t;/
 s/@int16_decl@/typedef short int16_t;/
 s/@u_int16_decl@/typedef unsigned short u_int16_t;/
@@ -27,6 +29,8 @@ s/@u_int32_decl@/typedef unsigned int u_int32_t;/
 s/@int64_decl@/typedef __int64 int64_t;/
 s/@u_int64_decl@/typedef unsigned __int64 u_int64_t;/
 s/@db_seq_decl@/typedef int64_t db_seq_t;/
+s/@pid_t_decl@/typedef int pid_t;/
+s/@db_threadid_t_decl@/typedef u_int32_t db_threadid_t;/
 /@u_char_decl@/{
 	i\\
 #ifndef _WINSOCKAPI_
@@ -70,14 +74,12 @@ ENDOFSEDTEXT
 (echo "$msgc" &&
     sed -f $s ../dbinc/db.in &&
     cat ../dbinc_auto/ext_prot.in) > $t
-`egrep '@.*@' $t` && {
+test `egrep '@.*@' $t` && {
+	egrep '@.*@' $t
 	echo 'Unexpanded autoconf variables found in Windows db.h.'
 	exit 1
 }
 f=../build_win32/db.h
-cmp $t $f > /dev/null 2>&1 ||
-    (echo "Building $f" && rm -f $f && cp $t $f && chmod 444 $f)
-f=../build_win64/db.h
 cmp $t $f > /dev/null 2>&1 ||
     (echo "Building $f" && rm -f $f && cp $t $f && chmod 444 $f)
 
@@ -85,38 +87,33 @@ cat < $s
 s/@cxx_have_stdheaders@/#define	HAVE_CXX_STDHEADERS 1/
 ENDOFSEDTEXT
 (echo "$msgc" && sed -f $s ../dbinc/db_cxx.in) > $t
-`egrep '@.*@' $t` && {
+test `egrep '@.*@' $t` && {
+	egrep '@.*@' $t
 	echo 'Unexpanded autoconf variables found in Windows db_cxx.h.'
 	exit 1
 }
 f=../build_win32/db_cxx.h
-cmp $t $f > /dev/null 2>&1 ||
-    (echo "Building $f" && rm -f $f && cp $t $f && chmod 444 $f)
-f=../build_win64/db_cxx.h
 cmp $t $f > /dev/null 2>&1 ||
     (echo "Building $f" && rm -f $f && cp $t $f && chmod 444 $f)
 
 cat < $s
+s/@INT64_FMT@//
+s/@UINT64_FMT@//
 s/@PATH_SEPARATOR@/\\\\\\\\\/:/
 s/@db_int_def@//
 ENDOFSEDTEXT
 (echo "$msgc" && sed -f $s ../dbinc/db_int.in) > $t
-`egrep '@.*@' $t` && {
+test `egrep '@.*@' $t` && {
+	egrep '@.*@' $t
 	echo 'Unexpanded autoconf variables found in Windows db_int.h.'
 	exit 1
 }
 f=../build_win32/db_int.h
-cmp $t $f > /dev/null 2>&1 ||
-    (echo "Building $f" && rm -f $f && cp $t $f && chmod 444 $f)
-f=../build_win64/db_int.h
 cmp $t $f > /dev/null 2>&1 ||
     (echo "Building $f" && rm -f $f && cp $t $f && chmod 444 $f)
 
 f=../build_win32/db_config.h
 (echo "$msgc" && sed "s/__EDIT_DB_VERSION__/$DB_VERSION/" win_config.in) > $t
-cmp $t $f > /dev/null 2>&1 ||
-    (echo "Building $f" && rm -f $f && cp $t $f && chmod 444 $f)
-f=../build_win64/db_config.h
 cmp $t $f > /dev/null 2>&1 ||
     (echo "Building $f" && rm -f $f && cp $t $f && chmod 444 $f)
 
@@ -144,9 +141,6 @@ cmp $t $f > /dev/null 2>&1 ||
 
 f=../build_win32/win_db.h
 i=win_db.in
-cmp $i $f > /dev/null 2>&1 ||
-    (echo "Building $f" && rm -f $f && cp $i $f && chmod 444 $f)
-f=../build_win64/win_db.h
 cmp $i $f > /dev/null 2>&1 ||
     (echo "Building $f" && rm -f $f && cp $i $f && chmod 444 $f)
 
diff --git a/storage/bdb/dist/s_win32_dsp b/storage/bdb/dist/s_win32_dsp
index 10e0ccc4b05..59d3664a5f0 100644
--- a/storage/bdb/dist/s_win32_dsp
+++ b/storage/bdb/dist/s_win32_dsp
@@ -1,5 +1,5 @@
 #!/bin/sh -
-#	$Id: s_win32_dsp,v 1.12 2004/08/20 15:01:06 mjc Exp $
+#	$Id: s_win32_dsp,v 12.3 2005/10/20 01:45:53 mjc Exp $
 #
 # Build Windows/32 .dsp files.
 
@@ -13,10 +13,20 @@ create_dsp()
     match="$2"          # the string used to egrep the $sources file
     sources="$3"        # a modified version of $SRCFILES to facilitate matches
     dsptemplate="$4"    # overall template file for the .dsp
-    srctemplate="$5"    # template file for the src file fragments
+    extra_cppflags="$5" # extra flags to send to compiler
+    release_libs="$6"   # libraries to link against in Release builds
+    debug_libs="$7"     # libraries to link against in Debug builds
+    lib_suffix="$8"     # the library name is libdb@lib_suffix@@VERSION@
 
+    srctemplate="$BUILDDIR/srcfile_dsp.src"    # template file for the src file fragments
     dspoutput=$BUILDDIR/$projname.dsp
 
+
+    postbuild=$dspoutput.postbuild
+    if [ ! -f $postbuild ] ; then
+	    postbuild=/dev/null
+    fi
+
     rm -f $dspoutput.insert
     for srcpath in `egrep "$match" $sources | sed -e 's/[ 	].*//'`
     do
@@ -36,7 +46,17 @@ create_dsp()
     done
     sed -e "/@SOURCE_FILES@/r$dspoutput.insert" \
         -e "/@SOURCE_FILES@/d" \
+	-e "/@POST_BUILD@/r$postbuild" \
+        -e "/@POST_BUILD@/d" \
         -e "s/@project_name@/$projname/g" \
+        -e "s/@bin_rel_dest@/Release/g" \
+        -e "s/@lib_rel_dest@/Release/g" \
+        -e "s/@bin_debug_dest@/Debug/g" \
+        -e "s/@lib_debug_dest@/Debug/g" \
+        -e "s,@extra_cppflags@,$extra_cppflags,g" \
+        -e "s,@release_libs@,$release_libs,g" \
+        -e "s,@debug_libs@,$debug_libs,g" \
+        -e "s,@lib_suffix@,$lib_suffix,g" \
         -e "s/@DB_VERSION_MAJOR@/$DB_VERSION_MAJOR/g" \
         -e "s/@DB_VERSION_MINOR@/$DB_VERSION_MINOR/g" \
       < $dsptemplate > $dspoutput.new
@@ -67,44 +87,44 @@ sed -e "s/#.*$//" \
 MODULES="`sed -e 's/^[^ ]* //' < $TMPA \
     | tr ' ' '\012' | sort | uniq`"
 
-for BUILDDIR in ../build_win32 ../build_win64
+for BUILDDIR in ../build_win32
 do
     for module in $MODULES
     do
         case "$module" in
         dynamic )
-            create_dsp db_dll " $module " $TMPA \
-                    $BUILDDIR/dynamic_dsp.src $BUILDDIR/srcfile_dsp.src
+            create_dsp db_dll " $module " $TMPA $BUILDDIR/dynamic_dsp.src
             ;;
         small )
-            create_dsp db_small " $module " $TMPA \
-                    $BUILDDIR/small_dsp.src $BUILDDIR/srcfile_dsp.src
+            create_dsp db_small " $module " $TMPA $BUILDDIR/static_dsp.src \
+                '/D "HAVE_SMALLBUILD"' '' '' _small
             ;;
         static )
-            create_dsp db_static " $module " $TMPA \
-                    $BUILDDIR/static_dsp.src $BUILDDIR/srcfile_dsp.src
+            create_dsp db_static " $module " $TMPA $BUILDDIR/static_dsp.src
             ;;
         java )
-            create_dsp db_java " $module " $TMPA \
-                    $BUILDDIR/java_dsp.src $BUILDDIR/srcfile_dsp.src
+            create_dsp db_java " $module " $TMPA $BUILDDIR/dynamic_dsp.src '' \
+                'libdb@DB_VERSION_MAJOR@@DB_VERSION_MINOR@.lib' \
+                'libdb@DB_VERSION_MAJOR@@DB_VERSION_MINOR@d.lib' _java
             ;;
         tcl )
-            create_dsp db_tcl " $module " $TMPA \
-                    $BUILDDIR/tcl_dsp.src $BUILDDIR/srcfile_dsp.src
+            create_dsp db_tcl " $module " $TMPA $BUILDDIR/dynamic_dsp.src \
+                '/D "DB_TCL_SUPPORT"' \
+                'libdb@DB_VERSION_MAJOR@@DB_VERSION_MINOR@.lib tcl84.lib' \
+                'libdb@DB_VERSION_MAJOR@@DB_VERSION_MINOR@d.lib tcl84g.lib' _tcl
             ;;
         testutil )
-            create_dsp db_test " $module " $TMPA \
-                    $BUILDDIR/db_test.src $BUILDDIR/srcfile_dsp.src
+            create_dsp db_test " $module " $TMPA $BUILDDIR/app_dsp.src \
+	        '' '/out:"$(OUTDIR)/dbkill.exe"' '/out:"$(OUTDIR)/dbkill.exe"'
+            ;;
+        app=ex_repquote )
+            create_dsp ex_repquote " $module " $TMPA \
+                    $BUILDDIR/app_dsp.src '' 'ws2_32.lib' 'ws2_32.lib'
             ;;
         app=* )
             appname=`echo $module | sed -e 's/^app=//'`
-            if [ -f $BUILDDIR/$appname.src ] ; then
-                    srcname=$BUILDDIR/$appname.src
-            else
-                    srcname=$BUILDDIR/app_dsp.src
-            fi
             create_dsp $appname " $module " $TMPA \
-                    $srcname $BUILDDIR/srcfile_dsp.src
+                    $BUILDDIR/app_dsp.src
             ;;
         vx|vxsmall )
             ;;
diff --git a/storage/bdb/dist/s_winmsi b/storage/bdb/dist/s_winmsi
index d7dc42770cf..23b9afe4ef9 100644
--- a/storage/bdb/dist/s_winmsi
+++ b/storage/bdb/dist/s_winmsi
@@ -1,5 +1,5 @@
 #!/bin/bash -
-#	$Id: s_winmsi,v 1.6 2005/04/15 18:48:57 philipr Exp $
+#	$Id: s_winmsi,v 1.7 2005/10/26 13:29:32 dda Exp $
 #
 # Note: The s_winmsi script in Berkeley DB core closely parallels the
 # s_winmsi script in Berkeley DB/XML.  If you change one,
@@ -72,6 +72,14 @@ PRODUCT_IMAGEDIR=$PRODUCT_SHARED_WINMSIDIR/images
 PRODUCT_ZIP_FILEFMT="db-X.Y.Z.NC.zip"
 PRODUCT_MSI_FILEFMT="db-X.Y.Z.NC.msi"
 
+PRODUCT_MAJOR=`echo "$PRODUCT_VERSION" | \
+    sed -e 's/\([0-9]*\)\.\([0-9]*\)\.\([0-9]*\)/\1/'`
+PRODUCT_MINOR=`echo "$PRODUCT_VERSION" | \
+    sed -e 's/\([0-9]*\)\.\([0-9]*\)\.\([0-9]*\)/\2/'`
+PRODUCT_PATCH=`echo "$PRODUCT_VERSION" | \
+    sed -e 's/\([0-9]*\)\.\([0-9]*\)\.\([0-9]*\)/\3/'`
+PRODUCT_MAJMIN="${PRODUCT_MAJOR}${PRODUCT_MINOR}"
+
 # Gather command line options, and use reasonable defaults
 SetupOptions \
         -input "$dbver.zip" \
diff --git a/storage/bdb/dist/srcfiles.in b/storage/bdb/dist/srcfiles.in
index b00be5a10f0..530c216934b 100644
--- a/storage/bdb/dist/srcfiles.in
+++ b/storage/bdb/dist/srcfiles.in
@@ -1,4 +1,4 @@
-#	$Id: srcfiles.in,v 1.87 2004/10/30 21:07:01 bostic Exp $
+#	$Id: srcfiles.in,v 12.24 2005/10/27 14:45:25 bostic Exp $
 #
 # This is an input file for the s_win32_dsp and s_vxworks scripts.  It lists
 # the source files in the Berkeley DB tree and notes which are used to build
@@ -21,6 +21,7 @@
 #   vx			File is in the VxWorks library.
 #   vxsmall		File is in the small VxWorks library.
 
+btree/bt_compact.c				dynamic small static vx vxsmall
 btree/bt_compare.c				dynamic small static vx vxsmall
 btree/bt_conv.c					dynamic small static vx vxsmall
 btree/bt_curadj.c				dynamic small static vx vxsmall
@@ -44,6 +45,7 @@ build_vxworks/db_archive/db_archive.c
 build_vxworks/db_checkpoint/db_checkpoint.c
 build_vxworks/db_deadlock/db_deadlock.c
 build_vxworks/db_dump/db_dump.c
+build_vxworks/db_hotbackup/db_hotbackup.c
 build_vxworks/db_load/db_load.c
 build_vxworks/db_printlog/db_printlog.c
 build_vxworks/db_recover/db_recover.c
@@ -68,6 +70,7 @@ clib/strtol.c
 clib/strtoul.c
 common/crypto_stub.c				small vxsmall
 common/db_byteorder.c				dynamic small static vx vxsmall
+common/db_clock.c				dynamic static vx
 common/db_err.c					dynamic small static vx vxsmall
 common/db_getlong.c				dynamic small static vx vxsmall
 common/db_idspace.c				dynamic small static vx vxsmall
@@ -131,6 +134,7 @@ db_checkpoint/db_checkpoint.c			app=db_checkpoint
 db_deadlock/db_deadlock.c			app=db_deadlock
 db_dump/db_dump.c				app=db_dump
 db_dump185/db_dump185.c
+db_hotbackup/db_hotbackup.c			app=db_hotbackup
 db_load/db_load.c				app=db_load
 db_printlog/db_printlog.c			app=db_printlog
 db_recover/db_recover.c				app=db_recover
@@ -149,13 +153,24 @@ dbreg/dbreg_stat.c				dynamic small static vx vxsmall
 dbreg/dbreg_util.c				dynamic small static vx vxsmall
 env/db_salloc.c					dynamic small static vx vxsmall
 env/db_shash.c					dynamic small static vx vxsmall
+env/env_failchk.c				dynamic small static vx vxsmall
 env/env_file.c					dynamic small static vx vxsmall
 env/env_method.c				dynamic small static vx vxsmall
 env/env_open.c					dynamic small static vx vxsmall
 env/env_recover.c				dynamic small static vx vxsmall
 env/env_region.c				dynamic small static vx vxsmall
+env/env_register.c				dynamic small static vx vxsmall
 env/env_stat.c					dynamic small static vx vxsmall
 examples_c/bench_001.c
+examples_c/csv/DbRecord.c			app=ex_csvload app=ex_csvquery
+examples_c/csv/code.c				app=ex_csvcode
+examples_c/csv/csv_local.c			app=ex_csvload app=ex_csvquery
+examples_c/csv/db.c				app=ex_csvload app=ex_csvquery
+examples_c/csv/load.c				app=ex_csvload
+examples_c/csv/load_main.c			app=ex_csvload
+examples_c/csv/query.c				app=ex_csvquery
+examples_c/csv/query_main.c			app=ex_csvquery
+examples_c/csv/util.c				app=ex_csvload app=ex_csvquery
 examples_c/ex_access.c				app=ex_access
 examples_c/ex_apprec/ex_apprec.c
 examples_c/ex_apprec/ex_apprec_auto.c
@@ -171,22 +186,26 @@ examples_c/ex_repquote/ex_rq_main.c		app=ex_repquote
 examples_c/ex_repquote/ex_rq_master.c		app=ex_repquote
 examples_c/ex_repquote/ex_rq_net.c		app=ex_repquote
 examples_c/ex_repquote/ex_rq_util.c		app=ex_repquote
-examples_c/ex_sequence.c
+examples_c/ex_sequence.c			app=ex_sequence
 examples_c/ex_thread.c
 examples_c/ex_tpcb.c				app=ex_tpcb
-examples_c/getting_started/example_database_load.c
-examples_c/getting_started/example_database_read.c
-examples_c/getting_started/gettingstarted_common.c
+examples_c/getting_started/example_database_load.c	app=example_database_load
+examples_c/getting_started/example_database_read.c	app=example_database_read
+examples_c/getting_started/gettingstarted_common.c	app=example_database_load app=example_database_read
+examples_c/txn_guide/txn_guide.c		app=ex_txnguide
+examples_c/txn_guide/txn_guide_inmemory.c	app=ex_txnguide_inmem
 examples_cxx/AccessExample.cpp			app=excxx_access
 examples_cxx/BtRecExample.cpp			app=excxx_btrec
 examples_cxx/EnvExample.cpp			app=excxx_env
 examples_cxx/LockExample.cpp			app=excxx_lock
 examples_cxx/MpoolExample.cpp			app=excxx_mpool
-examples_cxx/SequenceExample.cpp
+examples_cxx/SequenceExample.cpp		app=excxx_sequence
 examples_cxx/TpcbExample.cpp			app=excxx_tpcb
-examples_cxx/getting_started/MyDb.cpp
-examples_cxx/getting_started/excxx_example_database_load.cpp
-examples_cxx/getting_started/excxx_example_database_read.cpp
+examples_cxx/getting_started/MyDb.cpp		app=excxx_example_database_load app=excxx_example_database_read
+examples_cxx/getting_started/excxx_example_database_load.cpp	app=excxx_example_database_load
+examples_cxx/getting_started/excxx_example_database_read.cpp	app=excxx_example_database_read
+examples_cxx/txn_guide/TxnGuide.cpp		app=excxx_txnguide
+examples_cxx/txn_guide/TxnGuideInMemory.cpp	app=excxx_txnguide_inmem
 fileops/fileops_auto.c				dynamic small static vx vxsmall
 fileops/fileops_autop.c				app=db_printlog
 fileops/fop_basic.c				dynamic small static vx vxsmall
@@ -215,6 +234,7 @@ hsearch/hsearch.c				dynamic static vx
 libdb_java/db_java_wrap.c			java
 lock/lock.c					dynamic small static vx vxsmall
 lock/lock_deadlock.c				dynamic small static vx vxsmall
+lock/lock_failchk.c				dynamic small static vx vxsmall
 lock/lock_id.c					dynamic small static vx vxsmall
 lock/lock_list.c				dynamic small static vx vxsmall
 lock/lock_method.c				dynamic small static vx vxsmall
@@ -225,6 +245,7 @@ lock/lock_util.c				dynamic small static vx vxsmall
 log/log.c					dynamic small static vx vxsmall
 log/log_archive.c				dynamic small static vx vxsmall
 log/log_compare.c				dynamic small static vx vxsmall
+log/log_debug.c					dynamic small static vx vxsmall
 log/log_get.c					dynamic small static vx vxsmall
 log/log_method.c				dynamic small static vx vxsmall
 log/log_put.c					dynamic small static vx vxsmall
@@ -242,12 +263,15 @@ mp/mp_register.c				dynamic small static vx vxsmall
 mp/mp_stat.c					dynamic small static vx vxsmall
 mp/mp_sync.c					dynamic small static vx vxsmall
 mp/mp_trickle.c					dynamic small static vx vxsmall
+mutex/mut_alloc.c				dynamic small static vx vxsmall
 mutex/mut_fcntl.c
+mutex/mut_method.c				dynamic small static vx vxsmall
 mutex/mut_pthread.c
+mutex/mut_region.c				dynamic small static vx vxsmall
+mutex/mut_stat.c				dynamic small static vx vxsmall
 mutex/mut_tas.c					vx vxsmall
 mutex/mut_win32.c				dynamic small static
-mutex/mutex.c					dynamic small static vx vxsmall
-mutex/tm.c
+mutex/tm.c					app=tm
 os/os_abs.c
 os/os_alloc.c					dynamic small static vx vxsmall
 os/os_clock.c					vx vxsmall
@@ -255,11 +279,13 @@ os/os_config.c
 os/os_dir.c					vx vxsmall
 os/os_errno.c					vx vxsmall
 os/os_fid.c					vx vxsmall
+os/os_flock.c					vx vxsmall
 os/os_fsync.c					vx vxsmall
 os/os_handle.c					vx vxsmall
 os/os_id.c					dynamic small static vx vxsmall
 os/os_map.c
 os/os_method.c					dynamic small static vx vxsmall
+os/os_mkdir.c					dynamic small static vx vxsmall
 os/os_oflags.c					dynamic small static vx vxsmall
 os/os_open.c					vx vxsmall
 os/os_region.c					dynamic small static vx vxsmall
@@ -283,6 +309,7 @@ os_win32/os_config.c				dynamic small static
 os_win32/os_dir.c				dynamic small static
 os_win32/os_errno.c				dynamic small static
 os_win32/os_fid.c				dynamic small static
+os_win32/os_flock.c				dynamic small static
 os_win32/os_fsync.c				dynamic small static
 os_win32/os_handle.c				dynamic small static
 os_win32/os_map.c				dynamic small static
@@ -310,12 +337,15 @@ qam/qam_verify.c				dynamic static vx
 rep/rep_auto.c					dynamic static vx
 rep/rep_autop.c					app=db_printlog
 rep/rep_backup.c				dynamic static vx
+rep/rep_elect.c					dynamic static vx
+rep/rep_log.c					dynamic static vx
 rep/rep_method.c				dynamic static vx
 rep/rep_record.c				dynamic static vx
 rep/rep_region.c				dynamic static vx
 rep/rep_stat.c					dynamic static vx
 rep/rep_stub.c					small vxsmall
 rep/rep_util.c					dynamic static vx
+rep/rep_verify.c				dynamic static vx
 rpc_client/client.c
 rpc_client/gen_client.c
 rpc_client/gen_client_ret.c
@@ -362,6 +392,8 @@ test_perf/perf_vx.c
 txn/txn.c					dynamic small static vx vxsmall
 txn/txn_auto.c					dynamic small static vx vxsmall
 txn/txn_autop.c					app=db_printlog
+txn/txn_chkpt.c					dynamic small static vx vxsmall
+txn/txn_failchk.c				dynamic small static vx vxsmall
 txn/txn_method.c				dynamic small static vx vxsmall
 txn/txn_rec.c					dynamic small static vx vxsmall
 txn/txn_recover.c				dynamic small static vx vxsmall
diff --git a/storage/bdb/dist/vx_buildcd b/storage/bdb/dist/vx_buildcd
index a94d78db974..72bd10b8d52 100755
--- a/storage/bdb/dist/vx_buildcd
+++ b/storage/bdb/dist/vx_buildcd
@@ -1,5 +1,5 @@
 #!/bin/sh
-#	$Id: vx_buildcd,v 1.6 2001/11/05 21:05:58 sue Exp $
+#	$Id: vx_buildcd,v 12.0 2004/11/17 03:43:37 bostic Exp $
 #
 # Build the Setup SDK CD image on the VxWorks host machine.
 
diff --git a/storage/bdb/dist/vx_config.in b/storage/bdb/dist/vx_config.in
index 29e679e548f..539600d62ab 100644
--- a/storage/bdb/dist/vx_config.in
+++ b/storage/bdb/dist/vx_config.in
@@ -31,6 +31,9 @@
 /* Define to 1 if you want a version with run-time diagnostic checking. */
 /* #undef DIAGNOSTIC */
 
+/* Define to 1 if 64-bit types are available. */
+#define HAVE_64BIT_TYPES 1
+
 /* Define to 1 if you have the `clock_gettime' function. */
 #define HAVE_CLOCK_GETTIME 1
 
@@ -50,6 +53,12 @@
 /* Define to 1 if you have EXIT_SUCCESS/EXIT_FAILURE #defines. */
 #define HAVE_EXIT_SUCCESS 1
 
+/* Define to 1 if you have the `fchmod' function. */
+/* #undef HAVE_FCHMOD */
+
+/* Define to 1 if you have the `fcntl' function. */
+/* #undef HAVE_FCNTL */
+
 /* Define to 1 if fcntl/F_SETFD denies child access to file descriptors. */
 /* #undef HAVE_FCNTL_F_SETFD */
 
@@ -60,7 +69,7 @@
 #define HAVE_FILESYSTEM_NOTZERO 1
 
 /* Define to 1 if you have the `ftruncate' function. */
-/* #undef HAVE_FTRUNCATE */
+#define HAVE_FTRUNCATE 1
 
 /* Define to 1 if you have the `getcwd' function. */
 #define HAVE_GETCWD 1
@@ -80,15 +89,15 @@
 /* Define to 1 if building Hash access method. */
 #define HAVE_HASH 1
 
+/* Define to 1 if thread identifier type db_threadid_t is integral. */
+#define HAVE_INTEGRAL_THREAD_TYPE 1
+
 /* Define to 1 if you have the  header file. */
 /* #undef HAVE_INTTYPES_H */
 
 /* Define to 1 if you have the `nsl' library (-lnsl). */
 /* #undef HAVE_LIBNSL */
 
-/* Define to 1 if the system has the type `long long'. */
-/* #undef HAVE_LONG_LONG */
-
 /* Define to 1 if you have the `memcmp' function. */
 #define HAVE_MEMCMP 1
 
@@ -141,6 +150,9 @@
 /* Define to 1 to use the GCC compiler and IA64 assembly language mutexes. */
 /* #undef HAVE_MUTEX_IA64_GCC_ASSEMBLY */
 
+/* Define to 1 to use the GCC compiler and MIPS assembly language mutexes. */
+/* #undef HAVE_MUTEX_MIPS_GCC_ASSEMBLY */
+
 /* Define to 1 to use the msem_XXX mutexes on systems other than HP-UX. */
 /* #undef HAVE_MUTEX_MSEM_INIT */
 
@@ -182,9 +194,6 @@
 /* Define to 1 if mutexes hold system resources. */
 #define HAVE_MUTEX_SYSTEM_RESOURCES 1
 
-/* Define to 1 if fast mutexes are available. */
-#define HAVE_MUTEX_THREADS 1
-
 /* Define to 1 to configure mutexes intra-process only. */
 /* #undef HAVE_MUTEX_THREAD_ONLY */
 
@@ -209,6 +218,9 @@
 /* Define to 1 to use the GCC compiler and Windows mutexes. */
 /* #undef HAVE_MUTEX_WIN32_GCC */
 
+/* Define to 1 to use the GCC compiler and amd64 assembly language mutexes. */
+/* #undef HAVE_MUTEX_X86_64_GCC_ASSEMBLY */
+
 /* Define to 1 to use the GCC compiler and x86 assembly language mutexes. */
 /* #undef HAVE_MUTEX_X86_GCC_ASSEMBLY */
 
@@ -224,6 +236,9 @@
 /* Define to 1 if you have the `pstat_getdynamic' function. */
 /* #undef HAVE_PSTAT_GETDYNAMIC */
 
+/* Define to 1 if you have the `pthread_self' function. */
+/* #undef HAVE_PTHREAD_SELF */
+
 /* Define to 1 if you have the `pwrite' function. */
 /* #undef HAVE_PWRITE */
 
@@ -328,9 +343,6 @@
 /* Define to 1 if unlink of file with open file descriptors will fail. */
 #define HAVE_UNLINK_WITH_OPEN_FAILURE 1
 
-/* Define to 1 if the system has the type `unsigned long long'. */
-/* #undef HAVE_UNSIGNED_LONG_LONG */
-
 /* Define to 1 if building access method verification support. */
 #define HAVE_VERIFY 1
 
@@ -346,9 +358,6 @@
 /* Define to 1 if you have the `_fstati64' function. */
 /* #undef HAVE__FSTATI64 */
 
-/* Define to a value if using non-standard mutex alignment. */
-/* #undef MUTEX_ALIGN */
-
 /* Define to the address where bug reports for this package should be sent. */
 #define PACKAGE_BUGREPORT "support@sleepycat.com"
 
diff --git a/storage/bdb/dist/vx_setup/LICENSE.TXT b/storage/bdb/dist/vx_setup/LICENSE.TXT
index f31971375f3..812dce00d49 100644
--- a/storage/bdb/dist/vx_setup/LICENSE.TXT
+++ b/storage/bdb/dist/vx_setup/LICENSE.TXT
@@ -1,3 +1,3 @@
-Copyright (c) 1996-2004
+Copyright (c) 1996-2005
 	Sleepycat Software.  All rights reserved.
 See the file LICENSE for redistribution information.
diff --git a/storage/bdb/dist/win_config.in b/storage/bdb/dist/win_config.in
index 3406c8774f4..56814d115c2 100644
--- a/storage/bdb/dist/win_config.in
+++ b/storage/bdb/dist/win_config.in
@@ -24,6 +24,9 @@
 /* Define to 1 if you want a version with run-time diagnostic checking. */
 /* #undef DIAGNOSTIC */
 
+/* Define to 1 if 64-bit types are available. */
+#define HAVE_64BIT_TYPES 1
+
 /* Define to 1 if you have the `clock_gettime' function. */
 /* #undef HAVE_CLOCK_GETTIME */
 
@@ -45,6 +48,12 @@
 /* Define to 1 if you have EXIT_SUCCESS/EXIT_FAILURE #defines. */
 #define HAVE_EXIT_SUCCESS 1
 
+/* Define to 1 if you have the `fchmod' function. */
+/* #undef HAVE_FCHMOD */
+
+/* Define to 1 if you have the `fcntl' function. */
+/* #undef HAVE_FCNTL */
+
 /* Define to 1 if fcntl/F_SETFD denies child access to file descriptors. */
 /* #undef HAVE_FCNTL_F_SETFD */
 
@@ -77,15 +86,15 @@
 #define HAVE_HASH 1
 #endif
 
+/* Define to 1 if thread identifier type db_threadid_t is integral. */
+#define HAVE_INTEGRAL_THREAD_TYPE 1
+
 /* Define to 1 if you have the  header file. */
 /* #undef HAVE_INTTYPES_H */
 
 /* Define to 1 if you have the `nsl' library (-lnsl). */
 /* #undef HAVE_LIBNSL */
 
-/* Define to 1 if the system has the type `long long'. */
-#define HAVE_LONG_LONG 1
-
 /* Define to 1 if you have the `memcmp' function. */
 #define HAVE_MEMCMP 1
 
@@ -138,6 +147,9 @@
 /* Define to 1 to use the GCC compiler and IA64 assembly language mutexes. */
 /* #undef HAVE_MUTEX_IA64_GCC_ASSEMBLY */
 
+/* Define to 1 to use the GCC compiler and MIPS assembly language mutexes. */
+/* #undef HAVE_MUTEX_MIPS_GCC_ASSEMBLY */
+
 /* Define to 1 to use the msem_XXX mutexes on systems other than HP-UX. */
 /* #undef HAVE_MUTEX_MSEM_INIT */
 
@@ -179,9 +191,6 @@
 /* Define to 1 if mutexes hold system resources. */
 /* #undef HAVE_MUTEX_SYSTEM_RESOURCES */
 
-/* Define to 1 if fast mutexes are available. */
-#define HAVE_MUTEX_THREADS 1
-
 /* Define to 1 to configure mutexes intra-process only. */
 /* #undef HAVE_MUTEX_THREAD_ONLY */
 
@@ -206,6 +215,9 @@
 /* Define to 1 to use the GCC compiler and Windows mutexes. */
 /* #undef HAVE_MUTEX_WIN32_GCC */
 
+/* Define to 1 to use the GCC compiler and amd64 assembly language mutexes. */
+/* #undef HAVE_MUTEX_X86_64_GCC_ASSEMBLY */
+
 /* Define to 1 to use the GCC compiler and x86 assembly language mutexes. */
 /* #undef HAVE_MUTEX_X86_GCC_ASSEMBLY */
 
@@ -221,6 +233,9 @@
 /* Define to 1 if you have the `pstat_getdynamic' function. */
 /* #undef HAVE_PSTAT_GETDYNAMIC */
 
+/* Define to 1 if you have the `pthread_self' function. */
+/* #undef HAVE_PTHREAD_SELF */
+
 /* Define to 1 if you have the `pwrite' function. */
 /* #undef HAVE_PWRITE */
 
@@ -329,9 +344,6 @@
 /* Define to 1 if unlink of file with open file descriptors will fail. */
 /* #undef HAVE_UNLINK_WITH_OPEN_FAILURE */
 
-/* Define to 1 if the system has the type `unsigned long long'. */
-#define HAVE_UNSIGNED_LONG_LONG 1
-
 /* Define to 1 if building access method verification support. */
 #ifndef HAVE_SMALLBUILD
 #define HAVE_VERIFY 1
@@ -349,9 +361,6 @@
 /* Define to 1 if you have the `_fstati64' function. */
 #define HAVE__FSTATI64 1
 
-/* Define to a value if using non-standard mutex alignment. */
-/* #undef MUTEX_ALIGN */
-
 /* Define to the address where bug reports for this package should be sent. */
 #define PACKAGE_BUGREPORT "support@sleepycat.com"
 
diff --git a/storage/bdb/dist/win_db.in b/storage/bdb/dist/win_db.in
index 609d8460afa..433eaa6c09f 100644
--- a/storage/bdb/dist/win_db.in
+++ b/storage/bdb/dist/win_db.in
@@ -1,10 +1,15 @@
 /*-
- * $Id: win_db.in,v 11.4 2004/10/07 13:59:24 carol Exp $
+ * $Id: win_db.in,v 12.7 2005/11/02 03:12:17 mjc Exp $
  *
  * The following provides the information necessary to build Berkeley
  * DB on native Windows, and other Windows environments such as MinGW.
  */
 
+/*
+ * Avoid warnings with Visual Studio 8.
+ */
+#define _CRT_SECURE_NO_DEPRECATE 1
+
 #include 
 #include 
 
@@ -40,9 +45,12 @@
 #define	NO_SYSTEM_INCLUDES
 
 /*
- * Win32 has getcwd, snprintf and vsnprintf, but under different names.
+ * Microsoft's C runtime library has fsync, getcwd, getpid, snprintf and
+ * vsnprintf, but under different names.
  */
+#define	fsync			_commit
 #define	getcwd(buf, size)	_getcwd(buf, size)
+#define	getpid			_getpid
 #define	snprintf		_snprintf
 #define	vsnprintf		_vsnprintf
 
@@ -67,7 +75,7 @@ extern int getopt(int, char * const *, const char *);
 
 #ifdef _UNICODE
 #define TO_TSTRING(dbenv, s, ts, ret) do {				\
-		int __len = strlen(s) + 1;				\
+		int __len = (int)strlen(s) + 1;				\
 		ts = NULL;						\
 		if ((ret = __os_malloc((dbenv),				\
 		    __len * sizeof (_TCHAR), &(ts))) == 0 &&		\
@@ -98,3 +106,15 @@ extern int getopt(int, char * const *, const char *);
 #define FROM_TSTRING(dbenv, ts, s, ret) (ret) = 0, (s) = (char *)(ts)
 #define FREE_STRING(dbenv, ts)
 #endif
+
+#ifndef INVALID_HANDLE_VALUE
+#define INVALID_HANDLE_VALUE ((HANDLE)-1)
+#endif
+
+#ifndef INVALID_FILE_ATTRIBUTES
+#define INVALID_FILE_ATTRIBUTES ((DWORD)-1)
+#endif
+
+#ifndef INVALID_SET_FILE_POINTER
+#define INVALID_SET_FILE_POINTER ((DWORD)-1)
+#endif
diff --git a/storage/bdb/dist/win_exports.in b/storage/bdb/dist/win_exports.in
index 9087875ccf6..8aa6be789a2 100644
--- a/storage/bdb/dist/win_exports.in
+++ b/storage/bdb/dist/win_exports.in
@@ -1,4 +1,4 @@
-# $Id: win_exports.in,v 1.41 2004/10/12 17:44:10 bostic Exp $
+# $Id: win_exports.in,v 12.12 2005/10/14 06:19:27 mjc Exp $
 
 # Standard interfaces.
 	db_create
@@ -65,9 +65,6 @@
 	__db_panic
 	__db_r_attach
 	__db_r_detach
-	__db_win32_mutex_init
-	__db_win32_mutex_lock
-	__db_win32_mutex_unlock
 	__ham_func2
 	__ham_func3
 	__ham_func4
@@ -76,24 +73,30 @@
 	__lock_id_set
 	__os_calloc
 	__os_closehandle
+	__os_dirfree
+	__os_dirlist
 	__os_free
 	__os_ioinfo
 	__os_malloc
+	__os_mkdir
 	__os_open
 	__os_openhandle
 	__os_read
 	__os_realloc
 	__os_strdup
 	__os_umalloc
+	__os_unlink
 	__os_write
 	__txn_id_set
 
-#These are needed for linking tools or java.
+# These are needed for linking tools or java.
 	__bam_adj_read
 	__bam_cadjust_read
 	__bam_cdel_read
 	__bam_curadj_read
+	__bam_merge_read
 	__bam_pgin
+	__bam_pgno_read
 	__bam_pgout
 	__bam_rcuradj_read
 	__bam_relink_read
@@ -102,22 +105,24 @@
 	__bam_rsplit_read
 	__bam_split_read
 	__crdel_metasub_read
+	__crdel_inmem_create_read
+	__crdel_inmem_rename_read
+	__crdel_inmem_remove_read
 	__db_addrem_read
 	__db_big_read
 	__db_cksum_read
 	__db_debug_read
 	__db_dispatch
+	__db_dl
 	__db_dumptree
 	__db_err
-	__db_fileid_reset
 	__db_getlong
 	__db_getulong
 	__db_global_values
 	__db_isbigendian
-	__db_lsn_reset
+	__db_msg
 	__db_noop_read
 	__db_omode
-	__db_overwrite
 	__db_ovref_read
 	__db_pg_alloc_read
 	__db_pg_free_read
@@ -125,6 +130,7 @@
 	__db_pg_init_read
 	__db_pg_new_read
 	__db_pg_prepare_read
+	__db_pg_sort_read
 	__db_pgin
 	__db_pgout
 	__db_pr_callback
@@ -158,12 +164,18 @@
 	__ham_splitdata_read
 	__lock_list_print
 	__log_stat_pp
+	__mutex_set_wait_info
 	__os_clock
+	__os_exists
 	__os_get_errno
 	__os_id
+	__os_mapfile
+	__os_seek
 	__os_set_errno
 	__os_sleep
+	__os_spin
 	__os_ufree
+	__os_unmapfile
 	__os_yield
 	__qam_add_read
 	__qam_del_read
diff --git a/storage/bdb/dist/winmsi/files.in b/storage/bdb/dist/winmsi/files.in
index b3d5f4ba8bd..ed04b788dcf 100644
--- a/storage/bdb/dist/winmsi/files.in
+++ b/storage/bdb/dist/winmsi/files.in
@@ -1,17 +1,19 @@
-# $Id: files.in,v 1.7 2005/04/15 19:00:36 philipr Exp $
+# $Id: files.in,v 1.8 2005/10/26 13:29:32 dda Exp $
 # Lists files needed to install particular
 # features in Windows.  Feature names must be
 # listed in features.in .
+#
 
 # Note: columns below must be separated by tabs.
+#       ${PRODUCT_MAJMIN} is a macro for "43" (for BDB 4.3.x)
 
 # feature	source file				targdir	shortname
 
-CoreAPI		build_win32/Release/libdb43.dll		/bin/
-DCoreAPI	build_win32/Debug/libdb43d.dll		/bin/debug/
+CoreAPI		build_win32/Release/libdb${PRODUCT_MAJMIN}.dll	/bin/
+DCoreAPI	build_win32/Debug/libdb${PRODUCT_MAJMIN}d.dll	/bin/debug/
 DCoreAPI	build_win32/Debug/db_dll.pdb		/bin/debug/
-CoreAPI		build_win32/Release/libdb43.lib		/lib/
-DCoreAPI	build_win32/Debug/libdb43d.lib		/lib/
+CoreAPI		build_win32/Release/libdb${PRODUCT_MAJMIN}.lib	/lib/
+DCoreAPI	build_win32/Debug/libdb${PRODUCT_MAJMIN}d.lib	/lib/
 CoreAPI		build_win32/Release/msvcr71.dll		/bin/
 DCoreAPI	build_win32/Debug/msvcr71d.dll		/bin/debug/
 CoreAPI		build_win32/Release/msvcp71.dll		/bin/
@@ -26,8 +28,8 @@ CoreAPI		${PRODUCT_STAGE}/dbvars.bat		/
 
 # We don't include the .lib files for Java, nobody needs to
 # link C/C++ against the java library
-JavaAPI		build_win32/Release/libdb_java43.dll	/bin/	dbj43.dll
-DJavaAPI 	build_win32/Debug/libdb_java43d.dll	/bin/debug/	dbj43d.dll
+JavaAPI		build_win32/Release/libdb_java${PRODUCT_MAJMIN}.dll	/bin/	dbj${PRODUCT_MAJMIN}.dll
+DJavaAPI 	build_win32/Debug/libdb_java${PRODUCT_MAJMIN}d.dll	/bin/debug/	dbj${PRODUCT_MAJMIN}d.dll
 DJavaAPI 	build_win32/Debug/db_java.pdb		/bin/debug/
 JavaAPI		build_win32/Release/db.jar		/jar/
 DJavaAPI	build_win32/Debug/db.jar		/jar/debug/
@@ -37,9 +39,9 @@ DJavaEx		build_win32/Debug/dbexamples.jar	/jar/debug/	dbexam~1.jar
 
 # We don't include the .lib files for Tcl, nobody needs to
 # link C/C++ against the Tcl library
-DTclAPI		build_win32/Debug/libdb_tcl43d.dll	/bin/debug/	dbt43d.dll
+DTclAPI		build_win32/Debug/libdb_tcl${PRODUCT_MAJMIN}d.dll	/bin/debug/	dbt${PRODUCT_MAJMIN}d.dll
 DTclAPI		build_win32/Debug/db_tcl.pdb		/bin/debug/
-TclAPI		build_win32/Release/libdb_tcl43.dll	/bin/	dbt43.dll
+TclAPI		build_win32/Release/libdb_tcl${PRODUCT_MAJMIN}.dll	/bin/	dbt${PRODUCT_MAJMIN}.dll
 
 PerlAPI		perl/BerkeleyDB/blib/			/lib/perl/
 
diff --git a/storage/bdb/dist/winmsi/links.in b/storage/bdb/dist/winmsi/links.in
index 121102f2419..2907e8cecfa 100644
--- a/storage/bdb/dist/winmsi/links.in
+++ b/storage/bdb/dist/winmsi/links.in
@@ -1,4 +1,4 @@
-# $Id: links.in,v 1.3 2005/04/15 19:01:13 philipr Exp $
+# $Id: links.in,v 1.4 2005/10/27 11:00:39 dda Exp $
 # Lists links to Web sites to be installed on Windows.
 # If the URL starts with file: it is assumed to be a local file,
 # relative to the installation directory.
@@ -9,10 +9,10 @@
 
 sleepycp	"Command Prompt"	"cmd:dbvars.bat"
 sleepywb	"Sleepycat Website"	"http://www.sleepycat.com"
+sleepywb	"Sleepycat Developer Zone"	"http://dev.sleepycat.com"
 sleepych	"Change Log for Berkeley DB WIX_DB_VERSION"	"http://www.sleepycat.com/update/WIX_DB_VERSION/if.WIX_DB_VERSION.html"
 sleepynw	"Berkeley DB Newsgroup"	"http://groups-beta.google.com/group/comp.databases.berkeley-db"
-sleepybl	"Blog for Berkeley DB"	"http://www.sleepycat.com/blogs/db/"
-sleepysu	"Support for Berkeley DB"	"http://www.sleepycat.com/supports/index.shtml"
-sleepyon	"Online Documentation"	"http://www.sleepycat.com/supports/documentation.shtml/"
+sleepysu	"Support for Berkeley DB"	"http://www.sleepycat.com/services/support.html"
+sleepyon	"Online Documentation"	"http://dev.sleepycat.com/documentation/bdb.html"
 sleepyst	"Company Store"		"http://www.companystuffonline.com/sleepycat/"
 sleepyld	"On disk Documentation"	"file:docs/index.html"
diff --git a/storage/bdb/dist/winmsi/s_winmsi.fcn b/storage/bdb/dist/winmsi/s_winmsi.fcn
index e053e25ea59..3d0ccf83828 100644
--- a/storage/bdb/dist/winmsi/s_winmsi.fcn
+++ b/storage/bdb/dist/winmsi/s_winmsi.fcn
@@ -1,4 +1,4 @@
-#	$Id: s_winmsi.fcn,v 1.8 2005/04/16 15:56:44 philipr Exp $
+#	$Id: s_winmsi.fcn,v 1.9 2005/10/26 13:29:34 dda Exp $
 #
 # The common functions used by the s_winmsi scripts (both
 # for core DB and DB/XML).
@@ -12,6 +12,10 @@
 #   ERRORLOG             - a filename
 #   PRODUCT_NAME         - e.g. "Berkeley DB"
 #   PRODUCT_VERSION      - e.g. "4.1.25", derived from dist/RELEASE
+#   PRODUCT_MAJOR        - e.g. "4", (for release 4.1.25) from dist/RELEASE
+#   PRODUCT_MINOR        - e.g. "1", (for release 4.1.25) from dist/RELEASE
+#   PRODUCT_PATCH        - e.g. "25", (for release 4.1.25) from dist/RELEASE
+#   PRODUCT_MAJMIN       - e.g. "41", (for release 4.1.25) from dist/RELEASE
 #   PRODUCT_STAGE        - the staging directory for temp files and builds
 #   PRODUCT_LICENSEDIR   - the tree containing LICENSE and README
 #   PRODUCT_SUB_BLDDIR  - top of the subproduct build e.g. "dbxml-2.0.1/dbxml"
@@ -649,12 +653,12 @@ ProcessOneFeature() {
       XmlLevel=4
       local featcount=0
       local featurestring=""
-      while [ $(SlashCount $featurename) != 0 ]; do
-         Parent=`echo $featurename | sed -e 's:/.*::'`
-         featurename=`echo $featurename | sed -e 's:^[^/]*/::'`
-         featcount=$(($featcount + 1))
-         Xecho "" >> $outfeature
-      done
+      if [ $(SlashCount $featurename) -gt 0 ]; then
+         local parent=`echo $featurename | sed -e 's:/[^/]*$::' -e 's:.*/::'`
+         featurename=`echo $featurename | sed -e 's:^.*/::'`
+         featcount=1
+         Xecho "" >> $outfeature
+      fi
 
 
       # TODO: how to get +default to work?
@@ -949,6 +953,10 @@ CleanInputFile() {
         -e 's/$/	/' \
         -e 's/		*/	/g'  \
         -e 's:\${PRODUCT_VERSION}:'"${PRODUCT_VERSION}":g \
+        -e 's:\${PRODUCT_MAJOR}:'"${PRODUCT_MAJOR}":g \
+        -e 's:\${PRODUCT_MINOR}:'"${PRODUCT_MINOR}":g \
+        -e 's:\${PRODUCT_PATCH}:'"${PRODUCT_PATCH}":g \
+        -e 's:\${PRODUCT_MAJMIN}:'"${PRODUCT_MAJMIN}":g \
         -e 's:\${PRODUCT_STAGE}:'"${PRODUCT_STAGE}":g \
 	-e 's:\${PRODUCT_SHARED_WINMSIDIR}:'"${PRODUCT_SHARED_WINMSIDIR}":g \
           < "$1" > "$2"
diff --git a/storage/bdb/dist/winmsi/winbuild.bat b/storage/bdb/dist/winmsi/winbuild.bat
index b8c9a34add2..80b9dcf6446 100644
--- a/storage/bdb/dist/winmsi/winbuild.bat
+++ b/storage/bdb/dist/winmsi/winbuild.bat
@@ -1,5 +1,5 @@
 @echo off
-::	$Id: winbuild.bat,v 1.4 2005/04/15 19:01:52 philipr Exp $
+::	$Id: winbuild.bat,v 1.5 2005/10/26 13:29:34 dda Exp $
 ::	Helper script to build Berkeley DB libraries and executables
 ::	using MSDEV
 ::
@@ -68,12 +68,6 @@ devenv /useenv /build Debug /project build_all Berkeley_DB.sln >> ..\winbld.out
 if not %errorlevel% == 0 goto ERROR
 devenv /useenv /build Release /project build_all Berkeley_DB.sln >> ..\winbld.out 2>&1
 if not %errorlevel% == 0 goto ERROR
-devenv /useenv /build "Debug Static" /project build_all Berkeley_DB.sln >> ..\winbld.out 2>&1
-if not %errorlevel% == 0 goto ERROR
-devenv /useenv /build "Release Static" /project build_all Berkeley_DB.sln >> ..\winbld.out 2>&1
-if not %errorlevel% == 0 goto ERROR
-devenv /useenv /build Debug /project ex_repquote Berkeley_DB.sln >> ..\winbld.out 2>&1
-if not %errorlevel% == 0 goto ERROR
 devenv /useenv /build Debug /project db_java Berkeley_DB.sln >> ..\winbld.out 2>&1
 if not %errorlevel% == 0 goto ERROR
 devenv /useenv /build Release /project db_java Berkeley_DB.sln >> ..\winbld.out 2>&1
diff --git a/storage/bdb/env/db_salloc.c b/storage/bdb/env/db_salloc.c
index f2b1ed386eb..5566650d0cc 100644
--- a/storage/bdb/env/db_salloc.c
+++ b/storage/bdb/env/db_salloc.c
@@ -1,10 +1,10 @@
 /*-
  * See the file LICENSE for redistribution information.
  *
- * Copyright (c) 1996-2004
+ * Copyright (c) 1996-2005
  *	Sleepycat Software.  All rights reserved.
  *
- * $Id: db_salloc.c,v 11.28 2004/09/17 22:00:27 mjc Exp $
+ * $Id: db_salloc.c,v 12.4 2005/10/15 00:51:56 bostic Exp $
  */
 
 #include "db_config.h"
@@ -58,7 +58,7 @@ __db_shalloc_init(infop, size)
 	SH_LIST_INIT(hp);
 
 	elp = (struct __data *)(hp + 1);
-	elp->len = size - sizeof(struct __head) - sizeof(elp->len);
+	elp->len = (size - sizeof(struct __head)) - sizeof(elp->len);
 	SH_LIST_INSERT_HEAD(hp, elp, links, __data);
 }
 
@@ -158,6 +158,14 @@ __db_shalloc(infop, len, align, retp)
 	for (elp = SH_LIST_FIRST((struct __head *)p, __data);
 	    elp != NULL;
 	    elp = SH_LIST_NEXT(elp, links, __data)) {
+		/*
+		 * Skip chunks that are too small to work.  This avoids address
+		 * wrap-around in the subsequent calculations (if len were too
+		 * large).
+		 */
+		if (elp->len < len)
+			continue;
+
 		/*
 		 * Calculate the value of the returned pointer if we were to
 		 * use this chunk.
@@ -167,7 +175,6 @@ __db_shalloc(infop, len, align, retp)
 		 */
 		rp = (u_int8_t *)elp + sizeof(size_t) + elp->len;
 		rp = (u_int8_t *)rp - len;
-		rp = (u_int8_t *)((uintptr_t)rp & ~(align - 1));
 		rp = ALIGNP_DEC(rp, align);
 
 		/*
diff --git a/storage/bdb/env/db_shash.c b/storage/bdb/env/db_shash.c
index ac3b3162212..c18f77228c1 100644
--- a/storage/bdb/env/db_shash.c
+++ b/storage/bdb/env/db_shash.c
@@ -1,10 +1,10 @@
 /*-
  * See the file LICENSE for redistribution information.
  *
- * Copyright (c) 1996-2004
+ * Copyright (c) 1996-2005
  *	Sleepycat Software.  All rights reserved.
  *
- * $Id: db_shash.c,v 11.9 2004/03/20 16:18:51 bostic Exp $
+ * $Id: db_shash.c,v 12.1 2005/06/16 20:21:56 bostic Exp $
  */
 
 #include "db_config.h"
diff --git a/storage/bdb/env/env_failchk.c b/storage/bdb/env/env_failchk.c
new file mode 100644
index 00000000000..4eac661dfc0
--- /dev/null
+++ b/storage/bdb/env/env_failchk.c
@@ -0,0 +1,356 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2005
+ *	Sleepycat Software.  All rights reserved.
+ *
+ * $Id: env_failchk.c,v 12.17 2005/11/07 14:51:52 bostic Exp $
+ */
+
+#include "db_config.h"
+
+#ifndef NO_SYSTEM_INCLUDES
+#include 
+#include 
+#endif
+
+#include "db_int.h"
+#include "dbinc/db_shash.h"
+#include "dbinc/mutex_int.h"
+#include "dbinc/db_page.h"
+#include "dbinc/db_am.h"
+#include "dbinc/hash.h"			/* Needed for call to __ham_func5. */
+#include "dbinc/lock.h"
+#include "dbinc/txn.h"
+
+static int __env_in_api __P((DB_ENV *));
+
+/*
+ * __env_failchk_pp --
+ *	DB_ENV->failchk pre/post processing.
+ *
+ * PUBLIC: int __env_failchk_pp __P((DB_ENV *, u_int32_t));
+ */
+int
+__env_failchk_pp(dbenv, flags)
+	DB_ENV *dbenv;
+	u_int32_t flags;
+{
+	DB_THREAD_INFO *ip;
+	int ret;
+
+	PANIC_CHECK(dbenv);
+	ENV_ILLEGAL_BEFORE_OPEN(dbenv, "DB_ENV->failchk");
+
+	/*
+	 * DB_ENV->failchk requires self and is-alive functions.  We
+	 * have a default self function, but no is-alive function.
+	 */
+	if (!ALIVE_ON(dbenv)) {
+		__db_err(dbenv,
+	"DB_ENV->failchk requires DB_ENV->is_alive be configured");
+		return (EINVAL);
+	}
+
+	if (flags != 0)
+		return (__db_ferr(dbenv, "DB_ENV->failchk", 0));
+
+	ENV_ENTER(dbenv, ip);
+
+	/*
+	 * We check for dead threads in the API first as this would be likely
+	 * to hang other things we try later, like locks and transactions.
+	 */
+	if ((ret = __env_in_api(dbenv)) != 0)
+		goto err;
+
+	if (LOCKING_ON(dbenv) && (ret = __lock_failchk(dbenv)) != 0)
+		goto err;
+
+	if (TXN_ON(dbenv) && (ret = __txn_failchk(dbenv)) != 0)
+		goto err;
+
+err:	ENV_LEAVE(dbenv, ip);
+	return (ret);
+}
+
+/*
+ * __env_thread_init --
+ *	Initialize the thread control block table.
+ *
+ * PUBLIC: int __env_thread_init __P((DB_ENV *, int));
+ */
+int
+__env_thread_init(dbenv, created)
+	DB_ENV *dbenv;
+	int created;
+{
+	DB_HASHTAB *htab;
+	DB_MUTEXMGR *mtxmgr;
+	DB_MUTEXREGION *mtxregion;
+	REGINFO *infop;
+	THREAD_INFO *thread;
+	int ret;
+
+	mtxmgr = dbenv->mutex_handle;
+	mtxregion = mtxmgr->reginfo.primary;
+	infop = &mtxmgr->reginfo;
+
+	if (mtxregion->thread_off == INVALID_ROFF) {
+		if (dbenv->thr_nbucket == 0) {
+			dbenv->thr_hashtab = NULL;
+			if (ALIVE_ON(dbenv)) {
+				__db_err(dbenv,
+		"is_alive method specified but no thread region allocated");
+				return (EINVAL);
+			}
+			return (0);
+		}
+		
+		if (!created) {
+			__db_err(dbenv,
+		"thread table must be allocated at environment create time");
+			return (EINVAL);
+		}
+
+		if ((ret = __db_shalloc(infop,
+		     sizeof(THREAD_INFO), 0, &thread)) != 0) {
+			__db_err(dbenv,
+			     "cannot allocate a thread status block");
+			return (ret);
+		}
+		memset(thread, 0, sizeof(*thread));
+		mtxregion->thread_off = R_OFFSET(infop, thread);
+		thread->thr_nbucket = __db_tablesize(dbenv->thr_nbucket);
+		if ((ret = __db_shalloc(infop,
+		     thread->thr_nbucket * sizeof(DB_HASHTAB), 0, &htab)) != 0)
+			return (ret);
+		thread->thr_hashoff = R_OFFSET(infop, htab);
+		__db_hashinit(htab, thread->thr_nbucket);
+		thread->thr_max = dbenv->thr_max;
+	} else {
+		thread = R_ADDR(infop, mtxregion->thread_off);
+		htab = R_ADDR(infop, thread->thr_hashoff);
+	}
+
+	dbenv->thr_hashtab = htab;
+	dbenv->thr_nbucket = thread->thr_nbucket;
+	dbenv->thr_max = thread->thr_max;
+	return (0);
+}
+
+/*
+ * __env_in_api --
+ *	Look for threads which died in the api and complain.
+ */
+static int
+__env_in_api(dbenv)
+	DB_ENV *dbenv;
+{
+	DB_HASHTAB *htab;
+	DB_MUTEXMGR *mtxmgr;
+	DB_MUTEXREGION *mtxregion;
+	DB_THREAD_INFO *ip;
+	REGINFO *infop;
+	THREAD_INFO *thread;
+	u_int32_t i;
+
+	if ((htab = dbenv->thr_hashtab) == NULL)
+		return (EINVAL);
+
+	mtxmgr = dbenv->mutex_handle;
+	mtxregion = mtxmgr->reginfo.primary;
+	infop = &mtxmgr->reginfo;
+	thread = R_ADDR(infop, mtxregion->thread_off);
+
+	for (i = 0; i < dbenv->thr_nbucket; i++)
+		SH_TAILQ_FOREACH(ip, &htab[i], dbth_links, __db_thread_info) {
+			if (ip->dbth_state == THREAD_SLOT_NOT_IN_USE ||
+			    (ip->dbth_state == THREAD_OUT &&
+			    thread->thr_count <  thread->thr_max))
+				continue;
+			if (dbenv->is_alive(dbenv, ip->dbth_pid, ip->dbth_tid))
+				continue;
+			if (ip->dbth_state == THREAD_OUT) {
+				ip->dbth_state = THREAD_SLOT_NOT_IN_USE;
+				continue;
+			}
+			return (__db_failed(dbenv,
+			     "Thread died in Berkeley DB library",
+			     ip->dbth_pid, ip->dbth_tid));
+		}
+
+	return (0);
+}
+
+struct __db_threadid {
+	pid_t pid;
+	db_threadid_t tid;
+};
+
+static int __thread_id_cmp __P((struct __db_threadid *, DB_THREAD_INFO *));
+static int __thread_state_cmp __P((DB_THREAD_STATE, DB_THREAD_INFO *));
+
+static
+int __thread_id_cmp(id, ip)
+	struct __db_threadid *id;
+	DB_THREAD_INFO *ip;
+{
+#ifdef HAVE_INTEGRAL_THREAD_TYPE
+	return (id->pid == ip->dbth_pid && id->tid == ip->dbth_tid);
+#else
+	if (memcmp(&id->pid, &ip->dbth_pid, sizeof(id->pid)) != 0)
+		return (0);
+	if (memcmp(&id->tid, &ip->dbth_tid, sizeof(id->tid)) != 0)
+		return (0);
+	return (1);
+#endif
+}
+
+static
+int __thread_state_cmp(state, ip)
+	DB_THREAD_STATE state;
+	DB_THREAD_INFO *ip;
+{
+	return (ip->dbth_state == state);
+}
+
+/*
+ * PUBLIC: int __env_set_state __P((DB_ENV *,
+ * PUBLIC:      DB_THREAD_INFO **, DB_THREAD_STATE));
+ */
+int
+__env_set_state(dbenv, ipp, state)
+	DB_ENV *dbenv;
+	DB_THREAD_INFO **ipp;
+	DB_THREAD_STATE state;
+{
+	DB_HASHTAB *htab;
+	DB_MUTEXMGR *mtxmgr;
+	DB_MUTEXREGION *mtxregion;
+	DB_THREAD_INFO *ip;
+	struct __db_threadid id;
+	REGINFO *infop;
+	THREAD_INFO *thread;
+	int ret;
+	u_int32_t indx;
+
+	htab = (DB_HASHTAB *)dbenv->thr_hashtab;
+
+	dbenv->thread_id(dbenv, &id.pid, &id.tid);
+
+	/*
+	 * Hashing of thread ids.  This is simple but could be replaced with
+	 * something more expensive if needed.
+	 */
+#ifdef HAVE_INTEGRAL_THREAD_TYPE
+	/*
+	 * A thread ID may be a pointer, so explicitly cast to a pointer of
+	 * the appropriate size before doing the bitwise XOR.
+	 */
+	indx = (u_int32_t)((uintptr_t)id.pid ^ (uintptr_t)id.tid);
+#else
+	indx = __ham_func5(NULL, &id.tid, sizeof(id.tid));
+#endif
+	indx %= dbenv->thr_nbucket;
+	HASHLOOKUP(htab, indx,
+	     __db_thread_info, dbth_links, &id, ip, __thread_id_cmp);
+#ifdef DIAGNOSTIC
+	if (state == THREAD_DIAGNOSTIC) {
+		*ipp = ip;
+		return (0);
+	}
+#endif
+
+	ret = 0;
+	if (ip == NULL) {
+		mtxmgr = dbenv->mutex_handle;
+		mtxregion = mtxmgr->reginfo.primary;
+		infop = &mtxmgr->reginfo;
+		thread = R_ADDR(infop, mtxregion->thread_off);
+		MUTEX_SYSTEM_LOCK(dbenv);
+
+		/*
+		 * If we are passed the specified max, try to reclaim one from
+		 * our queue.  If failcheck has marked the slot not in use, we
+		 * can take it, otherwise we must call is_alive before freeing
+		 * it.
+		 */
+		if (thread->thr_count >= thread->thr_max) {
+			HASHLOOKUP(htab, indx, __db_thread_info,
+			    dbth_links, THREAD_OUT, ip, __thread_state_cmp);
+			while (ip != NULL &&
+			    ip->dbth_state != THREAD_SLOT_NOT_IN_USE &&
+			    (ip->dbth_state != THREAD_OUT || !ALIVE_ON(dbenv) ||
+			    dbenv->is_alive(dbenv,
+			    ip->dbth_pid, ip->dbth_tid))) {
+				ip = SH_TAILQ_NEXT(ip,
+				    dbth_links, __db_thread_info);
+			}
+			if (ip != NULL)
+				goto init;
+		}
+
+		thread->thr_count++;
+		if ((ret = __db_shalloc(infop,
+		     sizeof(DB_THREAD_INFO), 0, &ip)) == 0) {
+			memset(ip, 0, sizeof(*ip));
+			/*
+			 * This assumes we can link atomically since we do
+			 * no locking here.  We never use the backpointer
+			 * so we only need to be able to write an offset
+			 * atomically.
+			 */
+			HASHINSERT(htab,
+			    indx, __db_thread_info, dbth_links, ip);
+init:			ip->dbth_pid = id.pid;
+			ip->dbth_tid = id.tid;
+			ip->dbth_state = state;
+		}
+		MUTEX_SYSTEM_UNLOCK(dbenv);
+	} else
+		ip->dbth_state = state;
+	*ipp = ip;
+
+	return (ret);
+}
+
+/*
+ * __env_thread_id_string --
+ *	Convert a thread id to a string.
+ *
+ * PUBLIC: char *__env_thread_id_string
+ * PUBLIC:     __P((DB_ENV *, pid_t, db_threadid_t, char *));
+ */
+char *
+__env_thread_id_string(dbenv, pid, tid, buf)
+	DB_ENV *dbenv;
+	pid_t pid;
+	db_threadid_t tid;
+	char *buf;
+{
+#ifdef HAVE_INTEGRAL_THREAD_TYPE
+#ifdef UINT64_FMT
+	char fmt[10];
+
+	snprintf(fmt, sizeof(fmt), "%s/%s", UINT64_FMT, UINT64_FMT);
+	snprintf(buf,
+	    DB_THREADID_STRLEN, fmt, (u_int64_t)pid, (u_int64_t)(uintptr_t)tid);
+#else
+	snprintf(buf, DB_THREADID_STRLEN, "%lu/%lu", (u_long)pid, (u_long)tid);
+#endif
+#else
+#ifdef UINT64_FMT
+	char fmt[10];
+
+	snprintf(fmt, sizeof(fmt), "%s/TID", UINT64_FMT);
+	snprintf(buf, DB_THREADID_STRLEN, fmt, (u_int64_t)pid);
+#else
+	snprintf(buf, DB_THREADID_STRLEN, "%lu/TID", (u_long)pid);
+#endif
+#endif
+	COMPQUIET(dbenv, NULL);
+	COMPQUIET(*(u_int8_t *)&tid, 0);
+
+	return (buf);
+}
diff --git a/storage/bdb/env/env_file.c b/storage/bdb/env/env_file.c
index 53f93cc534e..c5be30cf183 100644
--- a/storage/bdb/env/env_file.c
+++ b/storage/bdb/env/env_file.c
@@ -1,10 +1,10 @@
 /*-
  * See the file LICENSE for redistribution information.
  *
- * Copyright (c) 2002-2004
+ * Copyright (c) 2002-2005
  *	Sleepycat Software.  All rights reserved.
  *
- * $Id: env_file.c,v 1.11 2004/03/24 20:51:38 bostic Exp $
+ * $Id: env_file.c,v 12.3 2005/06/16 20:21:56 bostic Exp $
  */
 
 #include "db_config.h"
@@ -17,82 +17,54 @@
 
 #include "db_int.h"
 
-static int __db_overwrite_pass __P((DB_ENV *,
-	       const char *, DB_FH *, u_int32_t, u_int32_t, int));
-
 /*
- * __db_fileinit --
- *	Initialize a regular file, optionally zero-filling it as well.
+ * __db_file_extend --
+ *	Initialize a regular file by writing the last page of the file.
  *
- * PUBLIC: int __db_fileinit __P((DB_ENV *, DB_FH *, size_t, int));
+ * PUBLIC: int __db_file_extend __P((DB_ENV *, DB_FH *, size_t));
  */
 int
-__db_fileinit(dbenv, fhp, size, zerofill)
+__db_file_extend(dbenv, fhp, size)
 	DB_ENV *dbenv;
 	DB_FH *fhp;
 	size_t size;
-	int zerofill;
 {
 	db_pgno_t pages;
-	size_t i;
 	size_t nw;
 	u_int32_t relative;
 	int ret;
-	char buf[OS_VMPAGESIZE];
-
-	/* Write nuls to the new bytes. */
-	memset(buf, 0, sizeof(buf));
+	char buf[8 * 1024];
 
 	/*
-	 * Extend the region by writing the last page.  If the region is >4Gb,
+	 * Extend the file by writing the last page.  If the region is >4Gb,
 	 * increment may be larger than the maximum possible seek "relative"
 	 * argument, as it's an unsigned 32-bit value.  Break the offset into
-	 * pages of 1MB each so that we don't overflow (2^20 + 2^32 is bigger
+	 * pages of 1MB each so we don't overflow -- (2^20 + 2^32 is bigger
 	 * than any memory I expect to see for awhile).
 	 */
+	memset(buf, 0, sizeof(buf));
+
 	if ((ret = __os_seek(dbenv, fhp, 0, 0, 0, 0, DB_OS_SEEK_END)) != 0)
 		return (ret);
-	pages = (db_pgno_t)((size - OS_VMPAGESIZE) / MEGABYTE);
-	relative = (u_int32_t)((size - OS_VMPAGESIZE) % MEGABYTE);
+	pages = (db_pgno_t)((size - sizeof(buf)) / MEGABYTE);
+	relative = (u_int32_t)((size - sizeof(buf)) % MEGABYTE);
 	if ((ret = __os_seek(dbenv,
 	    fhp, MEGABYTE, pages, relative, 0, DB_OS_SEEK_CUR)) != 0)
 		return (ret);
 	if ((ret = __os_write(dbenv, fhp, buf, sizeof(buf), &nw)) != 0)
 		return (ret);
 
-	/*
-	 * We may want to guarantee that there is enough disk space for the
-	 * file, so we also write a byte to each page.  We write the byte
-	 * because reading it is insufficient on systems smart enough not to
-	 * instantiate disk pages to satisfy a read (e.g., Solaris).
-	 */
-	if (zerofill) {
-		pages = (db_pgno_t)(size / MEGABYTE);
-		relative = (u_int32_t)(size % MEGABYTE);
-		if ((ret = __os_seek(dbenv, fhp,
-		    MEGABYTE, pages, relative, 1, DB_OS_SEEK_END)) != 0)
-			return (ret);
-
-		/* Write a byte to each page. */
-		for (i = 0; i < size; i += OS_VMPAGESIZE) {
-			if ((ret = __os_write(dbenv, fhp, buf, 1, &nw)) != 0)
-				return (ret);
-			if ((ret = __os_seek(dbenv, fhp,
-			    0, 0, OS_VMPAGESIZE - 1, 0, DB_OS_SEEK_CUR)) != 0)
-				return (ret);
-		}
-	}
 	return (0);
 }
 
 /*
- * __db_overwrite  --
- *	Overwrite a file.
+ * __db_file_multi_write  --
+ *	Overwrite a file with multiple passes to corrupt the data.
  *
- * PUBLIC: int __db_overwrite __P((DB_ENV *, const char *));
+ * PUBLIC: int __db_file_multi_write __P((DB_ENV *, const char *));
  */
 int
-__db_overwrite(dbenv, path)
+__db_file_multi_write(dbenv, path)
 	DB_ENV *dbenv;
 	const char *path;
 {
@@ -108,13 +80,13 @@ __db_overwrite(dbenv, path)
 		 * byte patterns.  Implies a fixed-block filesystem, journaling
 		 * or logging filesystems will require operating system support.
 		 */
-		if ((ret = __db_overwrite_pass(
+		if ((ret = __db_file_write(
 		    dbenv, path, fhp, mbytes, bytes, 255)) != 0)
 			goto err;
-		if ((ret = __db_overwrite_pass(
+		if ((ret = __db_file_write(
 		    dbenv, path, fhp, mbytes, bytes, 0)) != 0)
 			goto err;
-		if ((ret = __db_overwrite_pass(
+		if ((ret = __db_file_write(
 		    dbenv, path, fhp, mbytes, bytes, 255)) != 0)
 			goto err;
 	} else
@@ -126,11 +98,14 @@ err:	if (fhp != NULL)
 }
 
 /*
- * __db_overwrite_pass --
+ * __db_file_write --
  *	A single pass over the file, writing the specified byte pattern.
+ *
+ * PUBLIC: int __db_file_write __P((DB_ENV *,
+ * PUBLIC:     const char *, DB_FH *, u_int32_t, u_int32_t, int));
  */
-static int
-__db_overwrite_pass(dbenv, path, fhp, mbytes, bytes, pattern)
+int
+__db_file_write(dbenv, path, fhp, mbytes, bytes, pattern)
 	DB_ENV *dbenv;
 	const char *path;
 	DB_FH *fhp;
@@ -139,7 +114,7 @@ __db_overwrite_pass(dbenv, path, fhp, mbytes, bytes, pattern)
 {
 	size_t len, nw;
 	int i, ret;
-	char buf[8 * 1024];
+	char buf[32 * 1024];
 
 	if ((ret = __os_seek(dbenv, fhp, 0, 0, 0, 0, DB_OS_SEEK_SET)) != 0)
 		goto err;
diff --git a/storage/bdb/env/env_method.c b/storage/bdb/env/env_method.c
index 4f865061b82..070cba0467b 100644
--- a/storage/bdb/env/env_method.c
+++ b/storage/bdb/env/env_method.c
@@ -1,10 +1,10 @@
 /*-
  * See the file LICENSE for redistribution information.
  *
- * Copyright (c) 1999-2004
+ * Copyright (c) 1999-2005
  *	Sleepycat Software.  All rights reserved.
  *
- * $Id: env_method.c,v 11.136 2004/10/11 18:47:50 bostic Exp $
+ * $Id: env_method.c,v 12.19 2005/11/10 17:12:17 bostic Exp $
  */
 
 #include "db_config.h"
@@ -45,21 +45,27 @@
 #include "dbinc_auto/rpc_client_ext.h"
 #endif
 
-static void __dbenv_err __P((const DB_ENV *, int, const char *, ...));
-static void __dbenv_errx __P((const DB_ENV *, const char *, ...));
-static int  __dbenv_get_data_dirs __P((DB_ENV *, const char ***));
-static int  __dbenv_get_flags __P((DB_ENV *, u_int32_t *));
-static int  __dbenv_get_home __P((DB_ENV *, const char **));
-static int  __dbenv_get_shm_key __P((DB_ENV *, long *));
-static int  __dbenv_get_tas_spins __P((DB_ENV *, u_int32_t *));
-static int  __dbenv_get_tmp_dir __P((DB_ENV *, const char **));
-static int  __dbenv_get_verbose __P((DB_ENV *, u_int32_t, int *));
-static int  __dbenv_init __P((DB_ENV *));
-static void __dbenv_map_flags __P((DB_ENV *, u_int32_t *, u_int32_t *));
-static int  __dbenv_set_app_dispatch
+static void __env_err __P((const DB_ENV *, int, const char *, ...));
+static void __env_errx __P((const DB_ENV *, const char *, ...));
+static int  __env_get_data_dirs __P((DB_ENV *, const char ***));
+static int  __env_get_flags __P((DB_ENV *, u_int32_t *));
+static int  __env_get_home __P((DB_ENV *, const char **));
+static int  __env_get_shm_key __P((DB_ENV *, long *));
+static int  __env_get_tmp_dir __P((DB_ENV *, const char **));
+static int  __env_get_verbose __P((DB_ENV *, u_int32_t, int *));
+static int  __env_init __P((DB_ENV *));
+static void __env_map_flags __P((DB_ENV *, u_int32_t *, u_int32_t *));
+static int  __env_set_app_dispatch
 		__P((DB_ENV *, int (*)(DB_ENV *, DBT *, DB_LSN *, db_recops)));
-static int  __dbenv_set_feedback __P((DB_ENV *, void (*)(DB_ENV *, int, int)));
-static int  __dbenv_set_rpc_server_noclnt
+static int  __env_set_feedback __P((DB_ENV *, void (*)(DB_ENV *, int, int)));
+static int  __env_set_isalive __P((DB_ENV *, int (*)(DB_ENV *,
+		pid_t, db_threadid_t)));
+static int  __env_set_thread_id __P((DB_ENV *, void (*)(DB_ENV *,
+		pid_t *, db_threadid_t *)));
+static int  __env_set_thread_id_string __P((DB_ENV *,
+		char * (*)(DB_ENV *, pid_t, db_threadid_t, char *)));
+static int  __env_set_thread_count __P((DB_ENV *, u_int32_t));
+static int  __env_set_rpc_server
 		__P((DB_ENV *, void *, const char *, long, long, u_int32_t));
 
 /*
@@ -95,7 +101,7 @@ db_env_create(dbenvpp, flags)
 	if (LF_ISSET(DB_RPCCLIENT))
 		F_SET(dbenv, DB_ENV_RPCCLIENT);
 #endif
-	if ((ret = __dbenv_init(dbenv)) != 0) {
+	if ((ret = __env_init(dbenv)) != 0) {
 		__os_free(NULL, dbenv);
 		return (ret);
 	}
@@ -105,11 +111,11 @@ db_env_create(dbenvpp, flags)
 }
 
 /*
- * __dbenv_init --
+ * __env_init --
  *	Initialize a DB_ENV structure.
  */
 static int
-__dbenv_init(dbenv)
+__env_init(dbenv)
 	DB_ENV *dbenv;
 {
 	/*
@@ -118,118 +124,188 @@ __dbenv_init(dbenv)
 	 * state or turn off mutex locking, and so we can neither check
 	 * the panic state or acquire a mutex in the DB_ENV create path.
 	 *
-	 * Set up methods that are the same in both normal and RPC
+	 * Initialize the method handles.
 	 */
-	dbenv->err = __dbenv_err;
-	dbenv->errx = __dbenv_errx;
-	dbenv->set_errcall = __dbenv_set_errcall;
-	dbenv->get_errfile = __dbenv_get_errfile;
-	dbenv->set_errfile = __dbenv_set_errfile;
-	dbenv->get_errpfx = __dbenv_get_errpfx;
-	dbenv->set_errpfx = __dbenv_set_errpfx;
-	dbenv->set_msgcall = __dbenv_set_msgcall;
-	dbenv->get_msgfile = __dbenv_get_msgfile;
-	dbenv->set_msgfile = __dbenv_set_msgfile;
+	/* DB_ENV PUBLIC HANDLE LIST BEGIN */
+	dbenv->close = __env_close_pp;
+	dbenv->dbremove = __env_dbremove_pp;
+	dbenv->dbrename = __env_dbrename_pp;
+	dbenv->err = __env_err;
+	dbenv->errx = __env_errx;
+	dbenv->failchk = __env_failchk_pp;
+	dbenv->fileid_reset = __env_fileid_reset_pp;
+	dbenv->get_cachesize = __memp_get_cachesize;
+	dbenv->get_data_dirs = __env_get_data_dirs;
+	dbenv->get_encrypt_flags = __env_get_encrypt_flags;
+	dbenv->get_errfile = __env_get_errfile;
+	dbenv->get_errpfx = __env_get_errpfx;
+	dbenv->get_flags = __env_get_flags;
+	dbenv->get_home = __env_get_home;
+	dbenv->get_lg_bsize = __log_get_lg_bsize;
+	dbenv->get_lg_dir = __log_get_lg_dir;
+	dbenv->get_lg_filemode = __log_get_lg_filemode;
+	dbenv->get_lg_max = __log_get_lg_max;
+	dbenv->get_lg_regionmax = __log_get_lg_regionmax;
+	dbenv->get_lk_conflicts = __lock_get_lk_conflicts;
+	dbenv->get_lk_detect = __lock_get_lk_detect;
+	dbenv->get_lk_max_lockers = __lock_get_lk_max_lockers;
+	dbenv->get_lk_max_locks = __lock_get_lk_max_locks;
+	dbenv->get_lk_max_objects = __lock_get_lk_max_objects;
+	dbenv->get_mp_max_openfd = __memp_get_mp_max_openfd;
+	dbenv->get_mp_max_write = __memp_get_mp_max_write;
+	dbenv->get_mp_mmapsize = __memp_get_mp_mmapsize;
+	dbenv->get_msgfile = __env_get_msgfile;
+	dbenv->get_open_flags = __env_get_open_flags;
+	dbenv->get_rep_limit = __rep_get_limit;
+	dbenv->get_shm_key = __env_get_shm_key;
+	dbenv->get_timeout = __lock_get_env_timeout;
+	dbenv->get_tmp_dir = __env_get_tmp_dir;
+	dbenv->get_tx_max = __txn_get_tx_max;
+	dbenv->get_tx_timestamp = __txn_get_tx_timestamp;
+	dbenv->get_verbose = __env_get_verbose;
+	dbenv->is_bigendian = __db_isbigendian;
+	dbenv->lock_detect = __lock_detect_pp;
+	dbenv->lock_get = __lock_get_pp;
+	dbenv->lock_id = __lock_id_pp;
+	dbenv->lock_id_free = __lock_id_free_pp;
+	dbenv->lock_put = __lock_put_pp;
+	dbenv->lock_stat = __lock_stat_pp;
+	dbenv->lock_stat_print = __lock_stat_print_pp;
+	dbenv->lock_vec = __lock_vec_pp;
+	dbenv->log_archive = __log_archive_pp;
+	dbenv->log_cursor = __log_cursor_pp;
+	dbenv->log_file = __log_file_pp;
+	dbenv->log_flush = __log_flush_pp;
+	dbenv->log_printf = __log_printf_capi;
+	dbenv->log_put = __log_put_pp;
+	dbenv->log_stat = __log_stat_pp;
+	dbenv->log_stat_print = __log_stat_print_pp;
+	dbenv->lsn_reset = __env_lsn_reset_pp;
+	dbenv->memp_fcreate = __memp_fcreate_pp;
+	dbenv->memp_register = __memp_register_pp;
+	dbenv->memp_stat = __memp_stat_pp;
+	dbenv->memp_stat_print = __memp_stat_print_pp;
+	dbenv->memp_sync = __memp_sync_pp;
+	dbenv->memp_trickle = __memp_trickle_pp;
+	dbenv->mutex_alloc = __mutex_alloc_pp;
+	dbenv->mutex_free = __mutex_free_pp;
+	dbenv->mutex_get_align = __mutex_get_align;
+	dbenv->mutex_get_increment = __mutex_get_increment;
+	dbenv->mutex_get_max = __mutex_get_max;
+	dbenv->mutex_get_tas_spins = __mutex_get_tas_spins;
+	dbenv->mutex_lock = __mutex_lock_pp;
+	dbenv->mutex_set_align = __mutex_set_align;
+	dbenv->mutex_set_increment = __mutex_set_increment;
+	dbenv->mutex_set_max = __mutex_set_max;
+	dbenv->mutex_set_tas_spins = __mutex_set_tas_spins;
+	dbenv->mutex_stat = __mutex_stat;
+	dbenv->mutex_stat_print = __mutex_stat_print;
+	dbenv->mutex_unlock = __mutex_unlock_pp;
+	dbenv->open = __env_open_pp;
+	dbenv->remove = __env_remove;
+	dbenv->rep_elect = __rep_elect;
+	dbenv->rep_flush = __rep_flush;
+	dbenv->rep_get_config = __rep_get_config;
+	dbenv->rep_process_message = __rep_process_message;
+	dbenv->rep_set_config = __rep_set_config;
+	dbenv->rep_start = __rep_start;
+	dbenv->rep_stat = __rep_stat_pp;
+	dbenv->rep_stat_print = __rep_stat_print_pp;
+	dbenv->rep_sync = __rep_sync;
+	dbenv->set_alloc = __env_set_alloc;
+	dbenv->set_app_dispatch = __env_set_app_dispatch;
+	dbenv->set_cachesize = __memp_set_cachesize;
+	dbenv->set_data_dir = __env_set_data_dir;
+	dbenv->set_encrypt = __env_set_encrypt;
+	dbenv->set_errcall = __env_set_errcall;
+	dbenv->set_errfile = __env_set_errfile;
+	dbenv->set_errpfx = __env_set_errpfx;
+	dbenv->set_feedback = __env_set_feedback;
+	dbenv->set_flags = __env_set_flags;
+	dbenv->set_intermediate_dir = __env_set_intermediate_dir;
+	dbenv->set_isalive = __env_set_isalive;
+	dbenv->set_lg_bsize = __log_set_lg_bsize;
+	dbenv->set_lg_dir = __log_set_lg_dir;
+	dbenv->set_lg_filemode = __log_set_lg_filemode;
+	dbenv->set_lg_max = __log_set_lg_max;
+	dbenv->set_lg_regionmax = __log_set_lg_regionmax;
+	dbenv->set_lk_conflicts = __lock_set_lk_conflicts;
+	dbenv->set_lk_detect = __lock_set_lk_detect;
+	dbenv->set_lk_max = __lock_set_lk_max;
+	dbenv->set_lk_max_lockers = __lock_set_lk_max_lockers;
+	dbenv->set_lk_max_locks = __lock_set_lk_max_locks;
+	dbenv->set_lk_max_objects = __lock_set_lk_max_objects;
+	dbenv->set_mp_max_openfd = __memp_set_mp_max_openfd;
+	dbenv->set_mp_max_write = __memp_set_mp_max_write;
+	dbenv->set_mp_mmapsize = __memp_set_mp_mmapsize;
+	dbenv->set_msgcall = __env_set_msgcall;
+	dbenv->set_msgfile = __env_set_msgfile;
+	dbenv->set_paniccall = __env_set_paniccall;
+	dbenv->set_rep_limit = __rep_set_limit;
+	dbenv->set_rep_request = __rep_set_request;
+	dbenv->set_rep_transport = __rep_set_rep_transport;
+	dbenv->set_rpc_server = __env_set_rpc_server;
+	dbenv->set_shm_key = __env_set_shm_key;
+	dbenv->set_thread_count = __env_set_thread_count;
+	dbenv->set_thread_id = __env_set_thread_id;
+	dbenv->set_thread_id_string = __env_set_thread_id_string;
+	dbenv->set_timeout = __lock_set_env_timeout;
+	dbenv->set_tmp_dir = __env_set_tmp_dir;
+	dbenv->set_tx_max = __txn_set_tx_max;
+	dbenv->set_tx_timestamp = __txn_set_tx_timestamp;
+	dbenv->set_verbose = __env_set_verbose;
+	dbenv->stat_print = __env_stat_print_pp;
+	dbenv->txn_begin = __txn_begin_pp;
+	dbenv->txn_checkpoint = __txn_checkpoint_pp;
+	dbenv->txn_recover = __txn_recover_pp;
+	dbenv->txn_stat = __txn_stat_pp;
+	dbenv->txn_stat_print = __txn_stat_print_pp;
+	/* DB_ENV PUBLIC HANDLE LIST END */
 
-#ifdef	HAVE_RPC
+	/* DB_ENV PRIVATE HANDLE LIST BEGIN */
+	dbenv->prdbt = __db_prdbt;
+	/* DB_ENV PRIVATE HANDLE LIST END */
+
+	__os_id(NULL, &dbenv->pid_cache, NULL);
+	dbenv->thread_id = __os_id;
+	dbenv->thread_id_string = __env_thread_id_string;
+	dbenv->db_ref = 0;
+	dbenv->shm_key = INVALID_REGION_SEGID;
+
+	__lock_dbenv_create(dbenv);	/* Subsystem specific. */
+	__log_dbenv_create(dbenv);
+	__memp_dbenv_create(dbenv);
+	__txn_dbenv_create(dbenv);
+
+#ifdef HAVE_RPC
+	/*
+	 * RPC specific: must be last, as we replace methods set by the
+	 * access methods.
+	 */
 	if (F_ISSET(dbenv, DB_ENV_RPCCLIENT)) {
-		dbenv->close = __dbcl_env_close_wrap;
-		dbenv->dbremove = __dbcl_env_dbremove;
-		dbenv->dbrename = __dbcl_env_dbrename;
-		dbenv->get_home = __dbcl_env_get_home;
-		dbenv->get_open_flags = __dbcl_env_get_open_flags;
+		__dbcl_dbenv_init(dbenv);
+		/*
+		 * !!!
+		 * We wrap the DB_ENV->open and close methods for RPC, and
+		 * the rpc.src file can't handle that.
+		 */
 		dbenv->open = __dbcl_env_open_wrap;
-		dbenv->remove = __dbcl_env_remove;
-		dbenv->stat_print = NULL;
-
-		dbenv->fileid_reset = NULL;
-		dbenv->is_bigendian = NULL;
-		dbenv->lsn_reset = NULL;
-		dbenv->prdbt = NULL;
-
-		dbenv->set_alloc = __dbcl_env_alloc;
-		dbenv->set_app_dispatch = __dbcl_set_app_dispatch;
-		dbenv->get_data_dirs = __dbcl_get_data_dirs;
-		dbenv->set_data_dir = __dbcl_set_data_dir;
-		dbenv->get_encrypt_flags = __dbcl_env_get_encrypt_flags;
-		dbenv->set_encrypt = __dbcl_env_encrypt;
-		dbenv->set_feedback = __dbcl_env_set_feedback;
-		dbenv->get_flags = __dbcl_env_get_flags;
-		dbenv->set_flags = __dbcl_env_flags;
-		dbenv->set_paniccall = __dbcl_env_paniccall;
-		dbenv->set_rpc_server = __dbcl_envrpcserver;
-		dbenv->get_shm_key = __dbcl_get_shm_key;
-		dbenv->set_shm_key = __dbcl_set_shm_key;
-		dbenv->get_tas_spins = __dbcl_get_tas_spins;
-		dbenv->set_tas_spins = __dbcl_set_tas_spins;
-		dbenv->get_timeout = __dbcl_get_timeout;
-		dbenv->set_timeout = __dbcl_set_timeout;
-		dbenv->get_tmp_dir = __dbcl_get_tmp_dir;
-		dbenv->set_tmp_dir = __dbcl_set_tmp_dir;
-		dbenv->get_verbose = __dbcl_get_verbose;
-		dbenv->set_verbose = __dbcl_set_verbose;
-	} else {
-#endif
-		dbenv->close = __dbenv_close_pp;
-		dbenv->dbremove = __dbenv_dbremove_pp;
-		dbenv->dbrename = __dbenv_dbrename_pp;
-		dbenv->open = __dbenv_open;
-		dbenv->remove = __dbenv_remove;
-		dbenv->stat_print = __dbenv_stat_print_pp;
-
-		dbenv->fileid_reset = __db_fileid_reset;
-		dbenv->is_bigendian = __db_isbigendian;
-		dbenv->lsn_reset = __db_lsn_reset;
-		dbenv->prdbt = __db_prdbt;
-
-		dbenv->get_home = __dbenv_get_home;
-		dbenv->get_open_flags = __dbenv_get_open_flags;
-		dbenv->set_alloc = __dbenv_set_alloc;
-		dbenv->set_app_dispatch = __dbenv_set_app_dispatch;
-		dbenv->get_data_dirs = __dbenv_get_data_dirs;
-		dbenv->set_data_dir = __dbenv_set_data_dir;
-		dbenv->get_encrypt_flags = __dbenv_get_encrypt_flags;
-		dbenv->set_encrypt = __dbenv_set_encrypt;
-		dbenv->set_feedback = __dbenv_set_feedback;
-		dbenv->get_flags = __dbenv_get_flags;
-		dbenv->set_flags = __dbenv_set_flags;
-		dbenv->set_intermediate_dir = __dbenv_set_intermediate_dir;
-		dbenv->set_paniccall = __dbenv_set_paniccall;
-		dbenv->set_rpc_server = __dbenv_set_rpc_server_noclnt;
-		dbenv->get_shm_key = __dbenv_get_shm_key;
-		dbenv->set_shm_key = __dbenv_set_shm_key;
-		dbenv->get_tas_spins = __dbenv_get_tas_spins;
-		dbenv->set_tas_spins = __dbenv_set_tas_spins;
-		dbenv->get_tmp_dir = __dbenv_get_tmp_dir;
-		dbenv->set_tmp_dir = __dbenv_set_tmp_dir;
-		dbenv->get_verbose = __dbenv_get_verbose;
-		dbenv->set_verbose = __dbenv_set_verbose;
-#ifdef	HAVE_RPC
+		dbenv->close = __dbcl_env_close_wrap;
 	}
 #endif
-	dbenv->shm_key = INVALID_REGION_SEGID;
-	dbenv->db_ref = 0;
-
-	__os_spin(dbenv);
-
-	__log_dbenv_create(dbenv);		/* Subsystem specific. */
-	__lock_dbenv_create(dbenv);
-	__memp_dbenv_create(dbenv);
-	__rep_dbenv_create(dbenv);
-	__txn_dbenv_create(dbenv);
 
 	return (0);
 }
 
 /*
- * __dbenv_err --
+ * __env_err --
  *	Error message, including the standard error string.
  */
 static void
 #ifdef STDC_HEADERS
-__dbenv_err(const DB_ENV *dbenv, int error, const char *fmt, ...)
+__env_err(const DB_ENV *dbenv, int error, const char *fmt, ...)
 #else
-__dbenv_err(dbenv, error, fmt, va_alist)
+__env_err(dbenv, error, fmt, va_alist)
 	const DB_ENV *dbenv;
 	int error;
 	const char *fmt;
@@ -240,14 +316,14 @@ __dbenv_err(dbenv, error, fmt, va_alist)
 }
 
 /*
- * __dbenv_errx --
+ * __env_errx --
  *	Error message.
  */
 static void
 #ifdef STDC_HEADERS
-__dbenv_errx(const DB_ENV *dbenv, const char *fmt, ...)
+__env_errx(const DB_ENV *dbenv, const char *fmt, ...)
 #else
-__dbenv_errx(dbenv, fmt, va_alist)
+__env_errx(dbenv, fmt, va_alist)
 	const DB_ENV *dbenv;
 	const char *fmt;
 	va_dcl
@@ -257,7 +333,7 @@ __dbenv_errx(dbenv, fmt, va_alist)
 }
 
 static int
-__dbenv_get_home(dbenv, homep)
+__env_get_home(dbenv, homep)
 	DB_ENV *dbenv;
 	const char **homep;
 {
@@ -267,14 +343,14 @@ __dbenv_get_home(dbenv, homep)
 }
 
 /*
- * __dbenv_set_alloc --
+ * __env_set_alloc --
  *	{DB_ENV,DB}->set_alloc.
  *
- * PUBLIC: int  __dbenv_set_alloc __P((DB_ENV *, void *(*)(size_t),
+ * PUBLIC: int  __env_set_alloc __P((DB_ENV *, void *(*)(size_t),
  * PUBLIC:          void *(*)(void *, size_t), void (*)(void *)));
  */
 int
-__dbenv_set_alloc(dbenv, mal_func, real_func, free_func)
+__env_set_alloc(dbenv, mal_func, real_func, free_func)
 	DB_ENV *dbenv;
 	void *(*mal_func) __P((size_t));
 	void *(*real_func) __P((void *, size_t));
@@ -289,11 +365,11 @@ __dbenv_set_alloc(dbenv, mal_func, real_func, free_func)
 }
 
 /*
- * __dbenv_set_app_dispatch --
+ * __env_set_app_dispatch --
  *	Set the transaction abort recover function.
  */
 static int
-__dbenv_set_app_dispatch(dbenv, app_dispatch)
+__env_set_app_dispatch(dbenv, app_dispatch)
 	DB_ENV *dbenv;
 	int (*app_dispatch) __P((DB_ENV *, DBT *, DB_LSN *, db_recops));
 {
@@ -304,13 +380,13 @@ __dbenv_set_app_dispatch(dbenv, app_dispatch)
 }
 
 /*
- * __dbenv_get_encrypt_flags --
+ * __env_get_encrypt_flags --
  *	{DB_ENV,DB}->get_encrypt_flags.
  *
- * PUBLIC: int __dbenv_get_encrypt_flags __P((DB_ENV *, u_int32_t *));
+ * PUBLIC: int __env_get_encrypt_flags __P((DB_ENV *, u_int32_t *));
  */
 int
-__dbenv_get_encrypt_flags(dbenv, flagsp)
+__env_get_encrypt_flags(dbenv, flagsp)
 	DB_ENV *dbenv;
 	u_int32_t *flagsp;
 {
@@ -332,13 +408,13 @@ __dbenv_get_encrypt_flags(dbenv, flagsp)
 }
 
 /*
- * __dbenv_set_encrypt --
+ * __env_set_encrypt --
  *	DB_ENV->set_encrypt.
  *
- * PUBLIC: int __dbenv_set_encrypt __P((DB_ENV *, const char *, u_int32_t));
+ * PUBLIC: int __env_set_encrypt __P((DB_ENV *, const char *, u_int32_t));
  */
 int
-__dbenv_set_encrypt(dbenv, passwd, flags)
+__env_set_encrypt(dbenv, passwd, flags)
 	DB_ENV *dbenv;
 	const char *passwd;
 	u_int32_t flags;
@@ -413,7 +489,7 @@ err:
 }
 
 static void
-__dbenv_map_flags(dbenv, inflagsp, outflagsp)
+__env_map_flags(dbenv, inflagsp, outflagsp)
 	DB_ENV *dbenv;
 	u_int32_t *inflagsp, *outflagsp;
 {
@@ -435,6 +511,10 @@ __dbenv_map_flags(dbenv, inflagsp, outflagsp)
 		FLD_SET(*outflagsp, DB_ENV_DIRECT_LOG);
 		FLD_CLR(*inflagsp, DB_DIRECT_LOG);
 	}
+	if (FLD_ISSET(*inflagsp, DB_DSYNC_DB)) {
+		FLD_SET(*outflagsp, DB_ENV_DSYNC_DB);
+		FLD_CLR(*inflagsp, DB_DSYNC_DB);
+	}
 	if (FLD_ISSET(*inflagsp, DB_DSYNC_LOG)) {
 		FLD_SET(*outflagsp, DB_ENV_DSYNC_LOG);
 		FLD_CLR(*inflagsp, DB_DSYNC_LOG);
@@ -486,7 +566,7 @@ __dbenv_map_flags(dbenv, inflagsp, outflagsp)
 }
 
 static int
-__dbenv_get_flags(dbenv, flagsp)
+__env_get_flags(dbenv, flagsp)
 	DB_ENV *dbenv;
 	u_int32_t *flagsp;
 {
@@ -495,6 +575,7 @@ __dbenv_get_flags(dbenv, flagsp)
 		DB_CDB_ALLDB,
 		DB_DIRECT_DB,
 		DB_DIRECT_LOG,
+		DB_DSYNC_DB,
 		DB_DSYNC_LOG,
 		DB_LOG_AUTOREMOVE,
 		DB_LOG_INMEMORY,
@@ -515,7 +596,7 @@ __dbenv_get_flags(dbenv, flagsp)
 	flags = 0;
 	for (i = 0; (f = env_flags[i]) != 0; i++) {
 		mapped_flag = 0;
-		__dbenv_map_flags(dbenv, &f, &mapped_flag);
+		__env_map_flags(dbenv, &f, &mapped_flag);
 		DB_ASSERT(f == 0);
 		if (F_ISSET(dbenv, mapped_flag) == mapped_flag)
 			LF_SET(env_flags[i]);
@@ -523,7 +604,7 @@ __dbenv_get_flags(dbenv, flagsp)
 
 	/* Some flags are persisted in the regions. */
 	if (dbenv->reginfo != NULL &&
-	    ((REGENV *)((REGINFO *)dbenv->reginfo)->primary)->envpanic != 0) {
+	    ((REGENV *)((REGINFO *)dbenv->reginfo)->primary)->panic != 0) {
 		LF_SET(DB_PANIC_ENVIRONMENT);
 	}
 	__log_get_flags(dbenv, &flags);
@@ -533,13 +614,13 @@ __dbenv_get_flags(dbenv, flagsp)
 }
 
 /*
- * __dbenv_set_flags --
+ * __env_set_flags --
  *	DB_ENV->set_flags.
  *
- * PUBLIC: int  __dbenv_set_flags __P((DB_ENV *, u_int32_t, int));
+ * PUBLIC: int  __env_set_flags __P((DB_ENV *, u_int32_t, int));
  */
 int
-__dbenv_set_flags(dbenv, flags, on)
+__env_set_flags(dbenv, flags, on)
 	DB_ENV *dbenv;
 	u_int32_t flags;
 	int on;
@@ -549,10 +630,11 @@ __dbenv_set_flags(dbenv, flags, on)
 
 #define	OK_FLAGS							\
 	(DB_AUTO_COMMIT | DB_CDB_ALLDB | DB_DIRECT_DB | DB_DIRECT_LOG |	\
-	    DB_DSYNC_LOG | DB_LOG_AUTOREMOVE | DB_LOG_INMEMORY | \
-	    DB_NOLOCKING | DB_NOMMAP | DB_NOPANIC | DB_OVERWRITE | \
-	    DB_PANIC_ENVIRONMENT | DB_REGION_INIT | DB_TIME_NOTGRANTED | \
-	    DB_TXN_NOSYNC | DB_TXN_WRITE_NOSYNC | DB_YIELDCPU)
+	    DB_DSYNC_DB | DB_DSYNC_LOG | DB_LOG_AUTOREMOVE |		\
+	    DB_LOG_INMEMORY | DB_NOLOCKING | DB_NOMMAP | DB_NOPANIC |	\
+	    DB_OVERWRITE | DB_PANIC_ENVIRONMENT | DB_REGION_INIT |	\
+	    DB_TIME_NOTGRANTED | DB_TXN_NOSYNC | DB_TXN_WRITE_NOSYNC |	\
+	    DB_YIELDCPU)
 
 	if (LF_ISSET(~OK_FLAGS))
 		return (__db_ferr(dbenv, "DB_ENV->set_flags", 0));
@@ -580,11 +662,18 @@ __dbenv_set_flags(dbenv, flags, on)
 	if (LF_ISSET(DB_PANIC_ENVIRONMENT)) {
 		ENV_ILLEGAL_BEFORE_OPEN(dbenv,
 		    "DB_ENV->set_flags: DB_PANIC_ENVIRONMENT");
-		PANIC_SET(dbenv, on);
+		if (on) {
+			__db_err(dbenv, "Environment panic set");
+			(void)__db_panic(dbenv, EACCES);
+		} else
+			__db_panic_set(dbenv, 0);
 	}
 	if (LF_ISSET(DB_REGION_INIT))
 		ENV_ILLEGAL_AFTER_OPEN(dbenv,
 		    "DB_ENV->set_flags: DB_REGION_INIT");
+	if (LF_ISSET(DB_LOG_INMEMORY))
+		ENV_ILLEGAL_AFTER_OPEN(dbenv,
+		    "DB_ENV->set_flags: DB_LOG_INMEMORY");
 
 	/*
 	 * DB_LOG_INMEMORY, DB_TXN_NOSYNC and DB_TXN_WRITE_NOSYNC are
@@ -601,7 +690,7 @@ __dbenv_set_flags(dbenv, flags, on)
 	__log_set_flags(dbenv, flags, on);
 
 	mapped_flags = 0;
-	__dbenv_map_flags(dbenv, &flags, &mapped_flags);
+	__env_map_flags(dbenv, &flags, &mapped_flags);
 	if (on)
 		F_SET(dbenv, mapped_flags);
 	else
@@ -611,7 +700,7 @@ __dbenv_set_flags(dbenv, flags, on)
 }
 
 static int
-__dbenv_get_data_dirs(dbenv, dirpp)
+__env_get_data_dirs(dbenv, dirpp)
 	DB_ENV *dbenv;
 	const char ***dirpp;
 {
@@ -620,13 +709,13 @@ __dbenv_get_data_dirs(dbenv, dirpp)
 }
 
 /*
- * __dbenv_set_data_dir --
+ * __env_set_data_dir --
  *	DB_ENV->set_data_dir.
  *
- * PUBLIC: int  __dbenv_set_data_dir __P((DB_ENV *, const char *));
+ * PUBLIC: int  __env_set_data_dir __P((DB_ENV *, const char *));
  */
 int
-__dbenv_set_data_dir(dbenv, dir)
+__env_set_data_dir(dbenv, dir)
 	DB_ENV *dbenv;
 	const char *dir;
 {
@@ -658,17 +747,17 @@ __dbenv_set_data_dir(dbenv, dir)
 }
 
 /*
- * __dbenv_set_intermediate_dir --
+ * __env_set_intermediate_dir --
  *	DB_ENV->set_intermediate_dir.
  *
  * !!!
  * Undocumented routine allowing applications to configure Berkeley DB to
  * create intermediate directories.
  *
- * PUBLIC: int  __dbenv_set_intermediate_dir __P((DB_ENV *, int, u_int32_t));
+ * PUBLIC: int  __env_set_intermediate_dir __P((DB_ENV *, int, u_int32_t));
  */
 int
-__dbenv_set_intermediate_dir(dbenv, mode, flags)
+__env_set_intermediate_dir(dbenv, mode, flags)
 	DB_ENV *dbenv;
 	int mode;
 	u_int32_t flags;
@@ -686,14 +775,14 @@ __dbenv_set_intermediate_dir(dbenv, mode, flags)
 }
 
 /*
- * __dbenv_set_errcall --
+ * __env_set_errcall --
  *	{DB_ENV,DB}->set_errcall.
  *
- * PUBLIC: void __dbenv_set_errcall __P((DB_ENV *,
+ * PUBLIC: void __env_set_errcall __P((DB_ENV *,
  * PUBLIC:		void (*)(const DB_ENV *, const char *, const char *)));
  */
 void
-__dbenv_set_errcall(dbenv, errcall)
+__env_set_errcall(dbenv, errcall)
 	DB_ENV *dbenv;
 	void (*errcall) __P((const DB_ENV *, const char *, const char *));
 {
@@ -701,13 +790,13 @@ __dbenv_set_errcall(dbenv, errcall)
 }
 
 /*
- * __dbenv_get_errfile --
+ * __env_get_errfile --
  *	{DB_ENV,DB}->get_errfile.
  *
- * PUBLIC: void __dbenv_get_errfile __P((DB_ENV *, FILE **));
+ * PUBLIC: void __env_get_errfile __P((DB_ENV *, FILE **));
  */
 void
-__dbenv_get_errfile(dbenv, errfilep)
+__env_get_errfile(dbenv, errfilep)
 	DB_ENV *dbenv;
 	FILE **errfilep;
 {
@@ -715,13 +804,13 @@ __dbenv_get_errfile(dbenv, errfilep)
 }
 
 /*
- * __dbenv_set_errfile --
+ * __env_set_errfile --
  *	{DB_ENV,DB}->set_errfile.
  *
- * PUBLIC: void __dbenv_set_errfile __P((DB_ENV *, FILE *));
+ * PUBLIC: void __env_set_errfile __P((DB_ENV *, FILE *));
  */
 void
-__dbenv_set_errfile(dbenv, errfile)
+__env_set_errfile(dbenv, errfile)
 	DB_ENV *dbenv;
 	FILE *errfile;
 {
@@ -729,13 +818,13 @@ __dbenv_set_errfile(dbenv, errfile)
 }
 
 /*
- * __dbenv_get_errpfx --
+ * __env_get_errpfx --
  *	{DB_ENV,DB}->get_errpfx.
  *
- * PUBLIC: void __dbenv_get_errpfx __P((DB_ENV *, const char **));
+ * PUBLIC: void __env_get_errpfx __P((DB_ENV *, const char **));
  */
 void
-__dbenv_get_errpfx(dbenv, errpfxp)
+__env_get_errpfx(dbenv, errpfxp)
 	DB_ENV *dbenv;
 	const char **errpfxp;
 {
@@ -743,13 +832,13 @@ __dbenv_get_errpfx(dbenv, errpfxp)
 }
 
 /*
- * __dbenv_set_errpfx --
+ * __env_set_errpfx --
  *	{DB_ENV,DB}->set_errpfx.
  *
- * PUBLIC: void __dbenv_set_errpfx __P((DB_ENV *, const char *));
+ * PUBLIC: void __env_set_errpfx __P((DB_ENV *, const char *));
  */
 void
-__dbenv_set_errpfx(dbenv, errpfx)
+__env_set_errpfx(dbenv, errpfx)
 	DB_ENV *dbenv;
 	const char *errpfx;
 {
@@ -757,7 +846,7 @@ __dbenv_set_errpfx(dbenv, errpfx)
 }
 
 static int
-__dbenv_set_feedback(dbenv, feedback)
+__env_set_feedback(dbenv, feedback)
 	DB_ENV *dbenv;
 	void (*feedback) __P((DB_ENV *, int, int));
 {
@@ -766,14 +855,80 @@ __dbenv_set_feedback(dbenv, feedback)
 }
 
 /*
- * __dbenv_set_msgcall --
+ * __env_set_thread_id --
+ *	DB_ENV->set_thread_id
+ */
+static int
+__env_set_thread_id(dbenv, id)
+	DB_ENV *dbenv;
+	void (*id) __P((DB_ENV *, pid_t *, db_threadid_t *));
+{
+	dbenv->thread_id = id;
+	return (0);
+}
+
+/*
+ * __env_set_threadid_string --
+ *	DB_ENV->set_threadid_string
+ */
+static int
+__env_set_thread_id_string(dbenv, thread_id_string)
+	DB_ENV *dbenv;
+	char *(*thread_id_string) __P((DB_ENV *, pid_t, db_threadid_t, char *));
+{
+	dbenv->thread_id_string = thread_id_string;
+	return (0);
+}
+
+/*
+ * __env_set_isalive --
+ *	DB_ENV->set_isalive
+ */
+static int
+__env_set_isalive(dbenv, is_alive)
+	DB_ENV *dbenv;
+	int (*is_alive) __P((DB_ENV *, pid_t, db_threadid_t));
+{
+	if (F_ISSET((dbenv), DB_ENV_OPEN_CALLED) &&
+	    dbenv->thr_nbucket == 0) {
+		__db_err(dbenv,
+		    "is_alive method specified but no thread region allocated");
+		return (EINVAL);
+	}
+	dbenv->is_alive = is_alive;
+	return (0);
+}
+
+/*
+ * __env_set_thread_count --
+ *	DB_ENV->set_thread_count
+ */
+static int
+__env_set_thread_count(dbenv, count)
+	DB_ENV *dbenv;
+	u_int32_t count;
+{
+	ENV_ILLEGAL_AFTER_OPEN(dbenv, "DB_ENV->set_thread_count");
+	dbenv->thr_max = count;
+
+	/*
+	 * Set the number of buckets to be 1/8th the number of
+	 * proposed threads control blocks.  This is rather
+	 * arbitrary.
+	 */
+	dbenv->thr_nbucket = count / 8;
+	return (0);
+}
+
+/*
+ * __env_set_msgcall --
  *	{DB_ENV,DB}->set_msgcall.
  *
- * PUBLIC: void __dbenv_set_msgcall
+ * PUBLIC: void __env_set_msgcall
  * PUBLIC:     __P((DB_ENV *, void (*)(const DB_ENV *, const char *)));
  */
 void
-__dbenv_set_msgcall(dbenv, msgcall)
+__env_set_msgcall(dbenv, msgcall)
 	DB_ENV *dbenv;
 	void (*msgcall) __P((const DB_ENV *, const char *));
 {
@@ -781,13 +936,13 @@ __dbenv_set_msgcall(dbenv, msgcall)
 }
 
 /*
- * __dbenv_get_msgfile --
+ * __env_get_msgfile --
  *	{DB_ENV,DB}->get_msgfile.
  *
- * PUBLIC: void __dbenv_get_msgfile __P((DB_ENV *, FILE **));
+ * PUBLIC: void __env_get_msgfile __P((DB_ENV *, FILE **));
  */
 void
-__dbenv_get_msgfile(dbenv, msgfilep)
+__env_get_msgfile(dbenv, msgfilep)
 	DB_ENV *dbenv;
 	FILE **msgfilep;
 {
@@ -795,13 +950,13 @@ __dbenv_get_msgfile(dbenv, msgfilep)
 }
 
 /*
- * __dbenv_set_msgfile --
+ * __env_set_msgfile --
  *	{DB_ENV,DB}->set_msgfile.
  *
- * PUBLIC: void __dbenv_set_msgfile __P((DB_ENV *, FILE *));
+ * PUBLIC: void __env_set_msgfile __P((DB_ENV *, FILE *));
  */
 void
-__dbenv_set_msgfile(dbenv, msgfile)
+__env_set_msgfile(dbenv, msgfile)
 	DB_ENV *dbenv;
 	FILE *msgfile;
 {
@@ -809,13 +964,13 @@ __dbenv_set_msgfile(dbenv, msgfile)
 }
 
 /*
- * __dbenv_set_paniccall --
+ * __env_set_paniccall --
  *	{DB_ENV,DB}->set_paniccall.
  *
- * PUBLIC: int  __dbenv_set_paniccall __P((DB_ENV *, void (*)(DB_ENV *, int)));
+ * PUBLIC: int  __env_set_paniccall __P((DB_ENV *, void (*)(DB_ENV *, int)));
  */
 int
-__dbenv_set_paniccall(dbenv, paniccall)
+__env_set_paniccall(dbenv, paniccall)
 	DB_ENV *dbenv;
 	void (*paniccall) __P((DB_ENV *, int));
 {
@@ -824,7 +979,7 @@ __dbenv_set_paniccall(dbenv, paniccall)
 }
 
 static int
-__dbenv_get_shm_key(dbenv, shm_keyp)
+__env_get_shm_key(dbenv, shm_keyp)
 	DB_ENV *dbenv;
 	long *shm_keyp;			/* !!!: really a key_t *. */
 {
@@ -833,13 +988,13 @@ __dbenv_get_shm_key(dbenv, shm_keyp)
 }
 
 /*
- * __dbenv_set_shm_key --
+ * __env_set_shm_key --
  *	DB_ENV->set_shm_key.
  *
- * PUBLIC: int  __dbenv_set_shm_key __P((DB_ENV *, long));
+ * PUBLIC: int  __env_set_shm_key __P((DB_ENV *, long));
  */
 int
-__dbenv_set_shm_key(dbenv, shm_key)
+__env_set_shm_key(dbenv, shm_key)
 	DB_ENV *dbenv;
 	long shm_key;			/* !!!: really a key_t. */
 {
@@ -850,31 +1005,7 @@ __dbenv_set_shm_key(dbenv, shm_key)
 }
 
 static int
-__dbenv_get_tas_spins(dbenv, tas_spinsp)
-	DB_ENV *dbenv;
-	u_int32_t *tas_spinsp;
-{
-	*tas_spinsp = dbenv->tas_spins;
-	return (0);
-}
-
-/*
- * __dbenv_set_tas_spins --
- *	DB_ENV->set_tas_spins.
- *
- * PUBLIC: int  __dbenv_set_tas_spins __P((DB_ENV *, u_int32_t));
- */
-int
-__dbenv_set_tas_spins(dbenv, tas_spins)
-	DB_ENV *dbenv;
-	u_int32_t tas_spins;
-{
-	dbenv->tas_spins = tas_spins;
-	return (0);
-}
-
-static int
-__dbenv_get_tmp_dir(dbenv, dirp)
+__env_get_tmp_dir(dbenv, dirp)
 	DB_ENV *dbenv;
 	const char **dirp;
 {
@@ -883,13 +1014,13 @@ __dbenv_get_tmp_dir(dbenv, dirp)
 }
 
 /*
- * __dbenv_set_tmp_dir --
+ * __env_set_tmp_dir --
  *	DB_ENV->set_tmp_dir.
  *
- * PUBLIC: int  __dbenv_set_tmp_dir __P((DB_ENV *, const char *));
+ * PUBLIC: int  __env_set_tmp_dir __P((DB_ENV *, const char *));
  */
 int
-__dbenv_set_tmp_dir(dbenv, dir)
+__env_set_tmp_dir(dbenv, dir)
 	DB_ENV *dbenv;
 	const char *dir;
 {
@@ -899,7 +1030,7 @@ __dbenv_set_tmp_dir(dbenv, dir)
 }
 
 static int
-__dbenv_get_verbose(dbenv, which, onoffp)
+__env_get_verbose(dbenv, which, onoffp)
 	DB_ENV *dbenv;
 	u_int32_t which;
 	int *onoffp;
@@ -907,6 +1038,7 @@ __dbenv_get_verbose(dbenv, which, onoffp)
 	switch (which) {
 	case DB_VERB_DEADLOCK:
 	case DB_VERB_RECOVERY:
+	case DB_VERB_REGISTER:
 	case DB_VERB_REPLICATION:
 	case DB_VERB_WAITSFOR:
 		*onoffp = FLD_ISSET(dbenv->verbose, which) ? 1 : 0;
@@ -918,13 +1050,13 @@ __dbenv_get_verbose(dbenv, which, onoffp)
 }
 
 /*
- * __dbenv_set_verbose --
+ * __env_set_verbose --
  *	DB_ENV->set_verbose.
  *
- * PUBLIC: int  __dbenv_set_verbose __P((DB_ENV *, u_int32_t, int));
+ * PUBLIC: int  __env_set_verbose __P((DB_ENV *, u_int32_t, int));
  */
 int
-__dbenv_set_verbose(dbenv, which, on)
+__env_set_verbose(dbenv, which, on)
 	DB_ENV *dbenv;
 	u_int32_t which;
 	int on;
@@ -932,6 +1064,7 @@ __dbenv_set_verbose(dbenv, which, on)
 	switch (which) {
 	case DB_VERB_DEADLOCK:
 	case DB_VERB_RECOVERY:
+	case DB_VERB_REGISTER:
 	case DB_VERB_REPLICATION:
 	case DB_VERB_WAITSFOR:
 		if (on)
@@ -1019,7 +1152,7 @@ __db_env_config(dbenv, i, flags)
 }
 
 static int
-__dbenv_set_rpc_server_noclnt(dbenv, cl, host, tsec, ssec, flags)
+__env_set_rpc_server(dbenv, cl, host, tsec, ssec, flags)
 	DB_ENV *dbenv;
 	void *cl;
 	const char *host;
@@ -1032,7 +1165,6 @@ __dbenv_set_rpc_server_noclnt(dbenv, cl, host, tsec, ssec, flags)
 	COMPQUIET(ssec, 0);
 	COMPQUIET(flags, 0);
 
-	__db_err(dbenv,
-	    "set_rpc_server method not permitted in non-RPC environment");
+	__db_err(dbenv, "Berkeley DB was not configured for RPC support");
 	return (DB_OPNOTSUP);
 }
diff --git a/storage/bdb/env/env_open.c b/storage/bdb/env/env_open.c
index 0fbce72d009..d00355a7407 100644
--- a/storage/bdb/env/env_open.c
+++ b/storage/bdb/env/env_open.c
@@ -1,10 +1,10 @@
 /*-
  * See the file LICENSE for redistribution information.
  *
- * Copyright (c) 1996-2004
+ * Copyright (c) 1996-2005
  *	Sleepycat Software.  All rights reserved.
  *
- * $Id: env_open.c,v 11.177 2004/07/17 18:55:08 ubell Exp $
+ * $Id: env_open.c,v 12.36 2005/10/31 02:22:28 bostic Exp $
  */
 
 #include "db_config.h"
@@ -33,9 +33,9 @@
 
 static int __db_parse __P((DB_ENV *, char *));
 static int __db_tmp_open __P((DB_ENV *, u_int32_t, char *, DB_FH **));
-static int __dbenv_config __P((DB_ENV *, const char *, u_int32_t));
-static int __dbenv_refresh __P((DB_ENV *, u_int32_t, int));
-static int __dbenv_remove_int __P((DB_ENV *, const char *, u_int32_t));
+static int __env_config __P((DB_ENV *, const char *, u_int32_t));
+static int __env_refresh __P((DB_ENV *, u_int32_t, int));
+static int __env_remove_int __P((DB_ENV *, const char *, u_int32_t));
 
 /*
  * db_version --
@@ -57,76 +57,68 @@ db_version(majverp, minverp, patchp)
 }
 
 /*
- * __dbenv_open --
- *	DB_ENV->open.
+ * __env_open_pp --
+ *	DB_ENV->open pre/post processing.
  *
- * PUBLIC: int __dbenv_open __P((DB_ENV *, const char *, u_int32_t, int));
+ * PUBLIC: int __env_open_pp __P((DB_ENV *, const char *, u_int32_t, int));
  */
 int
-__dbenv_open(dbenv, db_home, flags, mode)
+__env_open_pp(dbenv, db_home, flags, mode)
 	DB_ENV *dbenv;
 	const char *db_home;
 	u_int32_t flags;
 	int mode;
 {
-	DB_MPOOL *dbmp;
-	u_int32_t init_flags, orig_flags;
-	int rep_check, ret;
+	DB_THREAD_INFO *ip;
+	u_int32_t orig_flags;
+	int need_recovery, ret, t_ret;
 
-	orig_flags = dbenv->flags;
-	rep_check = 0;
+	need_recovery = 0;
 
 #undef	OKFLAGS
 #define	OKFLAGS								\
 	(DB_CREATE | DB_INIT_CDB | DB_INIT_LOCK | DB_INIT_LOG |		\
-	DB_INIT_MPOOL | DB_INIT_REP | DB_INIT_TXN | DB_JOINENV |	\
-	DB_LOCKDOWN | DB_PRIVATE | DB_RECOVER | DB_RECOVER_FATAL |	\
-	DB_SYSTEM_MEM |	DB_THREAD | DB_USE_ENVIRON | DB_USE_ENVIRON_ROOT)
+	DB_INIT_MPOOL | DB_INIT_REP | DB_INIT_TXN | DB_LOCKDOWN |	\
+	DB_PRIVATE | DB_RECOVER | DB_RECOVER_FATAL | DB_REGISTER |	\
+	DB_SYSTEM_MEM | DB_THREAD | DB_USE_ENVIRON | DB_USE_ENVIRON_ROOT)
 #undef	OKFLAGS_CDB
 #define	OKFLAGS_CDB							\
 	(DB_CREATE | DB_INIT_CDB | DB_INIT_MPOOL | DB_LOCKDOWN |	\
 	DB_PRIVATE | DB_SYSTEM_MEM | DB_THREAD |			\
 	DB_USE_ENVIRON | DB_USE_ENVIRON_ROOT)
 
-	/*
-	 * Flags saved in the init_flags field of the environment, representing
-	 * flags to DB_ENV->set_flags and DB_ENV->open that need to be set.
-	 */
-#define	DB_INITENV_CDB		0x0001	/* DB_INIT_CDB */
-#define	DB_INITENV_CDB_ALLDB	0x0002	/* DB_INIT_CDB_ALLDB */
-#define	DB_INITENV_LOCK		0x0004	/* DB_INIT_LOCK */
-#define	DB_INITENV_LOG		0x0008	/* DB_INIT_LOG */
-#define	DB_INITENV_MPOOL	0x0010	/* DB_INIT_MPOOL */
-#define	DB_INITENV_REP		0x0020	/* DB_INIT_REP */
-#define	DB_INITENV_TXN		0x0040	/* DB_INIT_TXN */
-
 	if ((ret = __db_fchk(dbenv, "DB_ENV->open", flags, OKFLAGS)) != 0)
 		return (ret);
-	if (LF_ISSET(DB_INIT_CDB) &&
-	    (ret = __db_fchk(dbenv, "DB_ENV->open", flags, OKFLAGS_CDB)) != 0)
+	if ((ret = __db_fcchk(
+	    dbenv, "DB_ENV->open", flags, DB_INIT_CDB, ~OKFLAGS_CDB)) != 0)
 		return (ret);
-	if ((ret = __db_fcchk(dbenv,
-	    "DB_ENV->open", flags, DB_PRIVATE, DB_SYSTEM_MEM)) != 0)
+	if ((ret = __db_fcchk(dbenv, "DB_ENV->open", flags,
+	    DB_PRIVATE, DB_REGISTER | DB_SYSTEM_MEM)) != 0)
 		return (ret);
-	if ((ret = __db_fcchk(dbenv,
-	    "DB_ENV->open", flags, DB_RECOVER, DB_RECOVER_FATAL)) != 0)
-		return (ret);
-	if ((ret = __db_fcchk(dbenv, "DB_ENV->open", flags, DB_JOINENV,
-	    DB_CREATE | DB_INIT_LOCK | DB_INIT_LOG | DB_INIT_MPOOL |
-	    DB_INIT_REP | DB_INIT_TXN |
-	    DB_PRIVATE | DB_RECOVER | DB_RECOVER_FATAL)) != 0)
-		return (ret);
-	if (LF_ISSET(DB_INIT_REP) && !LF_ISSET(DB_INIT_TXN)) {
-		__db_err(dbenv, "Replication requires transaction support");
-		return (EINVAL);
+	if (LF_ISSET(DB_INIT_REP)) {
+		if (!LF_ISSET(DB_INIT_LOCK)) {
+			__db_err(dbenv, "replication requires locking support");
+			return (EINVAL);
+		}
+		if (!LF_ISSET(DB_INIT_TXN)) {
+			__db_err(
+			    dbenv, "replication requires transaction support");
+			return (EINVAL);
+		}
 	}
-	if (LF_ISSET(DB_INIT_REP) && !LF_ISSET(DB_INIT_LOCK)) {
-		__db_err(dbenv, "Replication requires locking support");
-		return (EINVAL);
-	}
-	if (LF_ISSET(DB_RECOVER | DB_RECOVER_FATAL) && !LF_ISSET(DB_INIT_TXN)) {
-		__db_err(dbenv, "Recovery requires transaction support");
-		return (EINVAL);
+	if (LF_ISSET(DB_RECOVER | DB_RECOVER_FATAL)) {
+		if ((ret = __db_fcchk(dbenv,
+		    "DB_ENV->open", flags, DB_RECOVER, DB_RECOVER_FATAL)) != 0)
+			return (ret);
+		if (!LF_ISSET(DB_CREATE)) {
+			__db_err(dbenv, "recovery requires the create flag");
+			return (EINVAL);
+		}
+		if (!LF_ISSET(DB_INIT_TXN)) {
+			__db_err(
+			    dbenv, "recovery requires transaction support");
+			return (EINVAL);
+		}
 	}
 
 	/*
@@ -137,36 +129,131 @@ __dbenv_open(dbenv, db_home, flags, mode)
 #ifdef HAVE_MUTEX_THREAD_ONLY
 	if (!LF_ISSET(DB_PRIVATE)) {
 		__db_err(dbenv,
-    "Berkeley DB library configured to support only private environments");
+	 "Berkeley DB library configured to support only private environments");
 		return (EINVAL);
 	}
 #endif
 
+#if defined(HAVE_MUTEX_FCNTL)
+	/*
+	 * !!!
+	 * We need a file descriptor for fcntl(2) locking.  We use the file
+	 * handle from the REGENV file for this purpose.
+	 *
+	 * Since we may be using shared memory regions, e.g., shmget(2), and
+	 * not a mapped-in regular file, the backing file may be only a few
+	 * bytes in length.  So, this depends on the ability to call fcntl to
+	 * lock file offsets much larger than the actual physical file.  I
+	 * think that's safe -- besides, very few systems actually need this
+	 * kind of support, SunOS is the only one still in wide use of which
+	 * I'm aware.
+	 *
+	 * The error case is if an application lacks spinlocks and wants to be
+	 * threaded.  That doesn't work because fcntl will lock the underlying
+	 * process, including all its threads.
+	 */
+	if (F_ISSET(dbenv, DB_ENV_THREAD)) {
+		__db_err(dbenv,
+	    "architecture lacks fast mutexes: applications cannot be threaded");
+		return (EINVAL);
+	}
+#endif
+
+	if (LF_ISSET(DB_INIT_REP) && !__os_support_replication()) {
+		__db_err(dbenv,
+	     "Berkeley DB library does not support replication on this system");
+		return (EINVAL);
+	}
+
+	/*
+	 * If we're going to register with the environment, that's the first
+	 * thing we do.
+	 */
+	if (LF_ISSET(DB_REGISTER)) {
+		if (!__os_support_db_register()) {
+			__db_err(dbenv,
+	     "Berkeley DB library does not support DB_REGISTER on this system");
+			return (EINVAL);
+		}
+
+		if ((ret =
+		    __envreg_register(dbenv, db_home, &need_recovery)) != 0)
+			return (ret);
+		if (need_recovery) {
+			if (!LF_ISSET(DB_RECOVER)) {
+				__db_err(dbenv,
+		    "No recovery flag was specified, and recovery is needed");
+				ret = DB_RUNRECOVERY;
+				goto err;
+			}
+		} else
+			LF_CLR(DB_RECOVER | DB_RECOVER_FATAL);
+	}
+
 	/*
 	 * If we're doing recovery, destroy the environment so that we create
-	 * all the regions from scratch.  I'd like to reuse already created
-	 * regions, but that's hard.  We would have to create the environment
-	 * region from scratch, at least, as we have no way of knowing if its
-	 * linked lists are corrupted.
-	 *
-	 * I suppose we could set flags while modifying those links, but that
-	 * is going to be difficult to get right.  The major concern I have
-	 * is if the application stomps the environment with a rogue pointer.
-	 * We have no way of detecting that, and we could be forced into a
-	 * situation where we start up and then crash, repeatedly.
+	 * all the regions from scratch.  The major concern I have is if the
+	 * application stomps the environment with a rogue pointer.  We have
+	 * no way of detecting that, and we could be forced into a situation
+	 * where we start up and then crash, repeatedly.
 	 *
 	 * Note that we do not check any flags like DB_PRIVATE before calling
 	 * remove.  We don't care if the current environment was private or
 	 * not, we just want to nail any files that are left-over for whatever
 	 * reason, from whatever session.
 	 */
-	if (LF_ISSET(DB_RECOVER | DB_RECOVER_FATAL))
-		if ((ret = __dbenv_remove_int(dbenv, db_home, DB_FORCE)) != 0 ||
-		    (ret = __dbenv_refresh(dbenv, orig_flags, 0)) != 0)
-			return (ret);
+	if (LF_ISSET(DB_RECOVER | DB_RECOVER_FATAL)) {
+		orig_flags = dbenv->flags;
+		if ((ret = __env_remove_int(dbenv, db_home, DB_FORCE)) != 0 ||
+		    (ret = __env_refresh(dbenv, orig_flags, 0)) != 0)
+			goto err;
+	}
+
+	ret = __env_open(dbenv, db_home, flags, mode);
+	if (ret == 0 && dbenv->thr_hashtab != NULL &&
+	    (t_ret = __env_set_state(dbenv, &ip, THREAD_OUT)) != 0)
+		ret = t_ret;
+
+err:	if (need_recovery) {
+		/*
+		 * If recovery succeeded, release our exclusive lock, other
+		 * processes can now proceed.
+		 *
+		 * If recovery failed, unregister now.
+		 */
+		if (ret == 0 && (t_ret = __envreg_xunlock(dbenv)) != 0)
+			ret = t_ret;
+		if (ret != 0)
+			(void)__envreg_unregister(dbenv, 1);
+	}
+
+	return (ret);
+}
+
+/*
+ * __env_open --
+ *	DB_ENV->open.
+ *
+ * PUBLIC: int __env_open __P((DB_ENV *, const char *, u_int32_t, int));
+ */
+int
+__env_open(dbenv, db_home, flags, mode)
+	DB_ENV *dbenv;
+	const char *db_home;
+	u_int32_t flags;
+	int mode;
+{
+	DB_THREAD_INFO *ip;
+	REGINFO *infop;
+	u_int32_t init_flags, orig_flags;
+	int rep_check, ret;
+
+	orig_flags = dbenv->flags;
+	rep_check = 0;
+	ip = NULL;
 
 	/* Initialize the DB_ENV structure. */
-	if ((ret = __dbenv_config(dbenv, db_home, flags)) != 0)
+	if ((ret = __env_config(dbenv, db_home, flags)) != 0)
 		goto err;
 
 	/* Convert the DB_ENV->open flags to internal flags. */
@@ -184,12 +271,25 @@ __dbenv_open(dbenv, db_home, flags, mode)
 		F_SET(dbenv, DB_ENV_THREAD);
 
 	/* Default permissions are read-write for both owner and group. */
-	dbenv->db_mode = mode == 0 ? __db_omode("rwrw--") : mode;
+	dbenv->db_mode = mode == 0 ? __db_omode("rw-rw----") : mode;
 
 	/*
-	 * Create/join the environment.  We pass in the flags that will be of
-	 * interest to an environment joining later; if we're not the ones to
-	 * do the create, we pull out whatever has been stored.
+	 * Flags saved in the init_flags field of the environment, representing
+	 * flags to DB_ENV->set_flags and DB_ENV->open that need to be set.
+	 */
+#define	DB_INITENV_CDB		0x0001	/* DB_INIT_CDB */
+#define	DB_INITENV_CDB_ALLDB	0x0002	/* DB_INIT_CDB_ALLDB */
+#define	DB_INITENV_LOCK		0x0004	/* DB_INIT_LOCK */
+#define	DB_INITENV_LOG		0x0008	/* DB_INIT_LOG */
+#define	DB_INITENV_MPOOL	0x0010	/* DB_INIT_MPOOL */
+#define	DB_INITENV_REP		0x0020	/* DB_INIT_REP */
+#define	DB_INITENV_TXN		0x0040	/* DB_INIT_TXN */
+
+	/*
+	 * Create/join the environment.  We pass in the flags of interest to
+	 * a thread subsequently joining an environment we create.  If we're
+	 * not the ones to create the environment, our flags will be updated
+	 * to match the existing environment.
 	 */
 	init_flags = 0;
 	if (LF_ISSET(DB_INIT_CDB))
@@ -213,45 +313,29 @@ __dbenv_open(dbenv, db_home, flags, mode)
 	 * __db_e_attach will return the saved init_flags field, which contains
 	 * the DB_INIT_* flags used when the environment was created.
 	 *
-	 * Check if existing environment flags conflict with our flags.
+	 * We may be joining an environment -- reset our flags to match the
+	 * ones in the environment.
 	 */
-	if (LF_ISSET(DB_INIT_CDB) && FLD_ISSET(init_flags, DB_INITENV_TXN)) {
-		__db_err(dbenv,
-		    "Concurrent Data Store incompatible with environment");
-		ret = EINVAL;
+	if (FLD_ISSET(init_flags, DB_INITENV_CDB))
+		LF_SET(DB_INIT_CDB);
+	if (FLD_ISSET(init_flags, DB_INITENV_LOCK))
+		LF_SET(DB_INIT_LOCK);
+	if (FLD_ISSET(init_flags, DB_INITENV_LOG))
+		LF_SET(DB_INIT_LOG);
+	if (FLD_ISSET(init_flags, DB_INITENV_MPOOL))
+		LF_SET(DB_INIT_MPOOL);
+	if (FLD_ISSET(init_flags, DB_INITENV_REP))
+		LF_SET(DB_INIT_REP);
+	if (FLD_ISSET(init_flags, DB_INITENV_TXN))
+		LF_SET(DB_INIT_TXN);
+	if (FLD_ISSET(init_flags, DB_INITENV_CDB_ALLDB) &&
+	    (ret = __env_set_flags(dbenv, DB_CDB_ALLDB, 1)) != 0)
 		goto err;
-	}
-	if (LF_ISSET(DB_INIT_TXN) && FLD_ISSET(init_flags, DB_INITENV_CDB)) {
-		__db_err(dbenv,
-		    "Transactional Data Store incompatible with environment");
-		ret = EINVAL;
-		goto err;
-	}
-
-	/* If we're joining the environment, find out what we're joining. */
-	if (LF_ISSET(DB_JOINENV)) {
-		LF_CLR(DB_JOINENV);
-		if (FLD_ISSET(init_flags, DB_INITENV_CDB))
-			LF_SET(DB_INIT_CDB);
-		if (FLD_ISSET(init_flags, DB_INITENV_LOCK))
-			LF_SET(DB_INIT_LOCK);
-		if (FLD_ISSET(init_flags, DB_INITENV_LOG))
-			LF_SET(DB_INIT_LOG);
-		if (FLD_ISSET(init_flags, DB_INITENV_MPOOL))
-			LF_SET(DB_INIT_MPOOL);
-		if (FLD_ISSET(init_flags, DB_INITENV_REP))
-			LF_SET(DB_INIT_REP);
-		if (FLD_ISSET(init_flags, DB_INITENV_TXN))
-			LF_SET(DB_INIT_TXN);
-		if (FLD_ISSET(init_flags, DB_INITENV_CDB_ALLDB) &&
-		    (ret = __dbenv_set_flags(dbenv, DB_CDB_ALLDB, 1)) != 0)
-			goto err;
-	}
 
 	/*
-	 * Save the flags passed to create the DB_ENV->open, that is, we've
-	 * now replaced flags like DB_JOINENV with the flags responsible for
-	 * the underlying set of subsystems.
+	 * Save the flags matching the database environment: we've replaced
+	 * the argument flags with the flags corresponding to the existing,
+	 * underlying set of subsystems.
 	 */
 	dbenv->open_flags = flags;
 
@@ -261,18 +345,37 @@ __dbenv_open(dbenv, db_home, flags, mode)
 		F_SET(dbenv, DB_ENV_CDB);
 	}
 
+	/*
+	 * The DB_ENV structure has been initialized.  This has to be set
+	 * before we start calling into the subsystems, some of them look
+	 * for it.
+	 */
+	F_SET(dbenv, DB_ENV_OPEN_CALLED);
+
 	/*
 	 * Initialize the subsystems.
 	 *
-	 * Initialize the replication area first, so that we can lock out this
+	 * Initialize the mutex regions first.  There's no ordering requirement,
+	 * but it's simpler to get this in place so we don't have to keep track
+	 * of mutexes for later allocation, once the mutex region is created we
+	 * can go ahead and do the allocation for real.
+	 */
+	if ((ret = __mutex_open(dbenv)) != 0)
+		goto err;
+
+	/* __mutex_open creates the thread info region, enter it now. */
+	ENV_ENTER(dbenv, ip);
+
+	/*
+	 * Initialize the replication area next, so that we can lock out this
 	 * call if we're currently running recovery for replication.
 	 */
 	if (LF_ISSET(DB_INIT_REP) && (ret = __rep_open(dbenv)) != 0)
 		goto err;
 
 	rep_check = IS_ENV_REPLICATED(dbenv) ? 1 : 0;
-	if (rep_check)
-		__env_rep_enter(dbenv);
+	if (rep_check && (ret = __env_rep_enter(dbenv, 0)) != 0)
+		goto err;
 
 	if (LF_ISSET(DB_INIT_MPOOL))
 		if ((ret = __memp_open(dbenv)) != 0)
@@ -343,7 +446,7 @@ __dbenv_open(dbenv, db_home, flags, mode)
 	 * mpool wasn't initialized, then we can't ever open a DB handle.
 	 *
 	 * We also need to initialize the MT mutex as necessary, so do them
-	 * both.  If we error, __dbenv_refresh() will clean up.
+	 * both.
 	 *
 	 * !!!
 	 * This must come after the __memp_open call above because if we are
@@ -353,21 +456,17 @@ __dbenv_open(dbenv, db_home, flags, mode)
 	 */
 	LIST_INIT(&dbenv->dblist);
 	if (LF_ISSET(DB_INIT_MPOOL)) {
-		dbmp = dbenv->mp_handle;
-		if (F_ISSET(dbenv, DB_ENV_THREAD)) {
-			if ((ret = __db_mutex_setup(
-			    dbenv, dbmp->reginfo, &dbenv->dblist_mutexp,
-			    MUTEX_ALLOC | MUTEX_THREAD)) != 0)
-				goto err;
-			if ((ret = __db_mutex_setup(
-			    dbenv, dbmp->reginfo, &dbenv->mt_mutexp,
-			    MUTEX_ALLOC | MUTEX_THREAD)) != 0)
-				goto err;
-		}
+		if ((ret = __mutex_alloc(dbenv, MTX_ENV_DBLIST,
+		    DB_MUTEX_THREAD, &dbenv->mtx_dblist)) != 0)
+			goto err;
+		if ((ret = __mutex_alloc(dbenv,
+		    MTX_TWISTER, DB_MUTEX_THREAD, &dbenv->mtx_mt)) != 0)
+			goto err;
+
 		/* Register DB's pgin/pgout functions.  */
 		if ((ret = __memp_register(
 		    dbenv, DB_FTYPE_SET, __db_pgin, __db_pgout)) != 0)
-			return (ret);
+			goto err;
 	}
 
 	/* Perform recovery for any previous run. */
@@ -386,47 +485,55 @@ __dbenv_open(dbenv, db_home, flags, mode)
 	 * transaction ID and logs the reset if that's appropriate, so we
 	 * don't need to do anything here in the recover case.
 	 */
+	infop = dbenv->reginfo;
 	if (TXN_ON(dbenv) &&
 	    !F_ISSET(dbenv, DB_ENV_LOG_INMEMORY) &&
-	    F_ISSET((REGINFO *)dbenv->reginfo, REGION_CREATE) &&
+	    F_ISSET(infop, REGION_CREATE) &&
 	    !LF_ISSET(DB_RECOVER | DB_RECOVER_FATAL) &&
 	    (ret = __txn_reset(dbenv)) != 0)
 		goto err;
 
-	if (rep_check)
-		__env_db_rep_exit(dbenv);
+	/* The database environment is ready for business. */
+	if ((ret = __db_e_golive(dbenv)) != 0)
+		goto err;
 
+	if (rep_check && (ret = __env_db_rep_exit(dbenv)) != 0)
+		goto err;
+
+	ENV_LEAVE(dbenv, ip);
 	return (0);
 
 err:	/*
-	 * If we fail after creating the regions, remove them.
+	 * If we fail after creating the regions, panic and remove them.
 	 *
 	 * !!!
 	 * No need to call __env_db_rep_exit, that work is done by the calls to
-	 * __dbenv_refresh.
+	 * __env_refresh.
 	 */
-	if (dbenv->reginfo != NULL &&
-	    F_ISSET((REGINFO *)dbenv->reginfo, REGION_CREATE)) {
+	infop = dbenv->reginfo;
+	if (infop != NULL && F_ISSET(infop, REGION_CREATE)) {
 		ret = __db_panic(dbenv, ret);
 
 		/* Refresh the DB_ENV so we can use it to call remove. */
-		(void)__dbenv_refresh(dbenv, orig_flags, rep_check);
-		(void)__dbenv_remove_int(dbenv, db_home, DB_FORCE);
-		(void)__dbenv_refresh(dbenv, orig_flags, 0);
+		(void)__env_refresh(dbenv, orig_flags, rep_check);
+		(void)__env_remove_int(dbenv, db_home, DB_FORCE);
+		(void)__env_refresh(dbenv, orig_flags, 0);
 	} else
-		(void)__dbenv_refresh(dbenv, orig_flags, rep_check);
+		(void)__env_refresh(dbenv, orig_flags, rep_check);
 
+	if (ip != NULL)
+		ENV_LEAVE(dbenv, ip);
 	return (ret);
 }
 
 /*
- * __dbenv_remove --
+ * __env_remove --
  *	DB_ENV->remove.
  *
- * PUBLIC: int __dbenv_remove __P((DB_ENV *, const char *, u_int32_t));
+ * PUBLIC: int __env_remove __P((DB_ENV *, const char *, u_int32_t));
  */
 int
-__dbenv_remove(dbenv, db_home, flags)
+__env_remove(dbenv, db_home, flags)
 	DB_ENV *dbenv;
 	const char *db_home;
 	u_int32_t flags;
@@ -443,20 +550,20 @@ __dbenv_remove(dbenv, db_home, flags)
 
 	ENV_ILLEGAL_AFTER_OPEN(dbenv, "DB_ENV->remove");
 
-	ret = __dbenv_remove_int(dbenv, db_home, flags);
+	ret = __env_remove_int(dbenv, db_home, flags);
 
-	if ((t_ret = __dbenv_close(dbenv, 0)) != 0 && ret == 0)
+	if ((t_ret = __env_close(dbenv, 0)) != 0 && ret == 0)
 		ret = t_ret;
 
 	return (ret);
 }
 
 /*
- * __dbenv_remove_int --
+ * __env_remove_int --
  *	Discard an environment, internal version.
  */
 static int
-__dbenv_remove_int(dbenv, db_home, flags)
+__env_remove_int(dbenv, db_home, flags)
 	DB_ENV *dbenv;
 	const char *db_home;
 	u_int32_t flags;
@@ -464,19 +571,22 @@ __dbenv_remove_int(dbenv, db_home, flags)
 	int ret;
 
 	/* Initialize the DB_ENV structure. */
-	if ((ret = __dbenv_config(dbenv, db_home, flags)) != 0)
+	if ((ret = __env_config(dbenv, db_home, flags)) != 0)
 		return (ret);
 
+	/* The DB_ENV structure has been initialized. */
+	F_SET(dbenv, DB_ENV_OPEN_CALLED);
+
 	/* Remove the environment. */
 	return (__db_e_remove(dbenv, flags));
 }
 
 /*
- * __dbenv_config --
- *	Minor initialization of the DB_ENV structure, read the DB_CONFIG file.
+ * __env_config --
+ *	Initialization of the DB_ENV structure, read the DB_CONFIG file.
  */
 static int
-__dbenv_config(dbenv, db_home, flags)
+__env_config(dbenv, db_home, flags)
 	DB_ENV *dbenv;
 	const char *db_home;
 	u_int32_t flags;
@@ -532,29 +642,28 @@ __dbenv_config(dbenv, db_home, flags)
 	if (dbenv->db_tmp_dir == NULL && (ret = __os_tmpdir(dbenv, flags)) != 0)
 		return (ret);
 
-	/* Flag that the DB_ENV structure has been initialized. */
-	F_SET(dbenv, DB_ENV_OPEN_CALLED);
-
 	return (0);
 }
 
 /*
- * __dbenv_close_pp --
+ * __env_close_pp --
  *	DB_ENV->close pre/post processor.
  *
- * PUBLIC: int __dbenv_close_pp __P((DB_ENV *, u_int32_t));
+ * PUBLIC: int __env_close_pp __P((DB_ENV *, u_int32_t));
  */
 int
-__dbenv_close_pp(dbenv, flags)
+__env_close_pp(dbenv, flags)
 	DB_ENV *dbenv;
 	u_int32_t flags;
 {
+	DB_THREAD_INFO *ip;
 	int rep_check, ret, t_ret;
 
 	ret = 0;
 
 	PANIC_CHECK(dbenv);
 
+	ENV_ENTER(dbenv, ip);
 	/*
 	 * Validate arguments, but as a DB_ENV handle destructor, we can't
 	 * fail.
@@ -564,23 +673,24 @@ __dbenv_close_pp(dbenv, flags)
 		ret = t_ret;
 
 	rep_check = IS_ENV_REPLICATED(dbenv) ? 1 : 0;
-	if (rep_check)
-		__env_rep_enter(dbenv);
-
-	if ((t_ret = __dbenv_close(dbenv, rep_check)) != 0 && ret == 0)
+	if (rep_check && (t_ret = __env_rep_enter(dbenv, 0)) != 0 && ret == 0)
 		ret = t_ret;
 
+	if ((t_ret = __env_close(dbenv, rep_check)) != 0 && ret == 0)
+		ret = t_ret;
+
+	/* Don't ENV_LEAVE as we have already detached from the region. */
 	return (ret);
 }
 
 /*
- * __dbenv_close --
+ * __env_close --
  *	DB_ENV->close.
  *
- * PUBLIC: int __dbenv_close __P((DB_ENV *, int));
+ * PUBLIC: int __env_close __P((DB_ENV *, int));
  */
 int
-__dbenv_close(dbenv, rep_check)
+__env_close(dbenv, rep_check)
 	DB_ENV *dbenv;
 	int rep_check;
 {
@@ -598,14 +708,14 @@ __dbenv_close(dbenv, rep_check)
 		ret = t_ret;
 
 	if (REP_ON(dbenv) &&
-	    (t_ret = __rep_preclose(dbenv, 1)) != 0 && ret == 0)
+	    (t_ret = __rep_preclose(dbenv)) != 0 && ret == 0)
 		ret = t_ret;
 
 	/*
 	 * Detach from the regions and undo the allocations done by
 	 * DB_ENV->open.
 	 */
-	if ((t_ret = __dbenv_refresh(dbenv, 0, rep_check)) != 0 && ret == 0)
+	if ((t_ret = __env_refresh(dbenv, 0, rep_check)) != 0 && ret == 0)
 		ret = t_ret;
 
 	/* Do per-subsystem close. */
@@ -635,6 +745,12 @@ __dbenv_close(dbenv, rep_check)
 		__os_free(dbenv, dbenv->db_data_dir);
 	}
 
+	/* If we're registered, clean up. */
+	if (dbenv->registry != NULL) {
+		(void)__envreg_unregister(dbenv, 0);
+		dbenv->registry = NULL;
+	}
+
 	/* Discard the structure. */
 	memset(dbenv, CLEAR_BYTE, sizeof(DB_ENV));
 	__os_free(NULL, dbenv);
@@ -643,23 +759,22 @@ __dbenv_close(dbenv, rep_check)
 }
 
 /*
- * __dbenv_refresh --
+ * __env_refresh --
  *	Refresh the DB_ENV structure, releasing resources allocated by
  * DB_ENV->open, and returning it to the state it was in just before
  * open was called.  (Note that this means that any state set by
  * pre-open configuration functions must be preserved.)
  */
 static int
-__dbenv_refresh(dbenv, orig_flags, rep_check)
+__env_refresh(dbenv, orig_flags, rep_check)
 	DB_ENV *dbenv;
 	u_int32_t orig_flags;
 	int rep_check;
 {
 	DB *ldbp;
-	DB_MPOOL *dbmp;
+	DB_THREAD_INFO *ip;
 	int ret, t_ret;
 
-	dbmp = dbenv->mp_handle;
 	ret = 0;
 
 	/*
@@ -667,10 +782,10 @@ __dbenv_refresh(dbenv, orig_flags, rep_check)
 	 * must be first, it may want to discard locks and flush the log).
 	 *
 	 * !!!
-	 * Note that these functions, like all of __dbenv_refresh, only undo
-	 * the effects of __dbenv_open.  Functions that undo work done by
+	 * Note that these functions, like all of __env_refresh, only undo
+	 * the effects of __env_open.  Functions that undo work done by
 	 * db_env_create or by a configuration function should go in
-	 * __dbenv_close.
+	 * __env_close.
 	 */
 	if (TXN_ON(dbenv) &&
 	    (t_ret = __txn_dbenv_refresh(dbenv)) != 0 && ret == 0)
@@ -686,11 +801,10 @@ __dbenv_refresh(dbenv, orig_flags, rep_check)
 	 */
 	if (LOCKING_ON(dbenv)) {
 		if (!F_ISSET(dbenv, DB_ENV_THREAD) &&
-		    dbenv->env_lid != DB_LOCK_INVALIDID &&
-		    (t_ret = __lock_id_free(dbenv, dbenv->env_lid)) != 0 &&
-		    ret == 0)
+		    dbenv->env_lref != NULL && (t_ret = __lock_id_free(dbenv,
+		    ((DB_LOCKER *)dbenv->env_lref)->id)) != 0 && ret == 0)
 			ret = t_ret;
-		dbenv->env_lid = DB_LOCK_INVALIDID;
+		dbenv->env_lref = NULL;
 
 		if ((t_ret = __lock_dbenv_refresh(dbenv)) != 0 && ret == 0)
 			ret = t_ret;
@@ -708,20 +822,24 @@ __dbenv_refresh(dbenv, orig_flags, rep_check)
 	 * log file handles.  Ick.
 	 */
 	if (dbenv->db_ref != 0) {
-		__db_err(dbenv, "Database handles remain at environment close");
+		__db_err(dbenv,
+		    "Database handles still open at environment close");
 		for (ldbp = LIST_FIRST(&dbenv->dblist);
 		    ldbp != NULL; ldbp = LIST_NEXT(ldbp, dblistlinks))
 			__db_err(dbenv, "Open database handle: %s%s%s",
-			    ldbp->fname, ldbp->dname == NULL ? "" : "/",
+			    ldbp->fname == NULL ? "unnamed" : ldbp->fname,
+			    ldbp->dname == NULL ? "" : "/",
 			    ldbp->dname == NULL ? "" : ldbp->dname);
 		if (ret == 0)
 			ret = EINVAL;
 	}
 	LIST_INIT(&dbenv->dblist);
-	if (dbenv->dblist_mutexp != NULL)
-		__db_mutex_free(dbenv, dbmp->reginfo, dbenv->dblist_mutexp);
-	if (dbenv->mt_mutexp != NULL)
-		__db_mutex_free(dbenv, dbmp->reginfo, dbenv->mt_mutexp);
+
+	if ((t_ret = __mutex_free(dbenv, &dbenv->mtx_dblist)) != 0 && ret == 0)
+		ret = t_ret;
+	if ((t_ret = __mutex_free(dbenv, &dbenv->mtx_mt)) != 0 && ret == 0)
+		ret = t_ret;
+
 	if (dbenv->mt != NULL) {
 		__os_free(dbenv, dbenv->mt);
 		dbenv->mt = NULL;
@@ -751,8 +869,8 @@ __dbenv_refresh(dbenv, orig_flags, rep_check)
 	 * as soon as we drop the handle count, there's little opportunity
 	 * to do harm.
 	 */
-	if (rep_check)
-		__env_db_rep_exit(dbenv);
+	if (rep_check && (t_ret = __env_db_rep_exit(dbenv)) != 0 && ret == 0)
+		ret = t_ret;
 
 	/*
 	 * Detach from the region.
@@ -761,6 +879,18 @@ __dbenv_refresh(dbenv, orig_flags, rep_check)
 	 */
 	__rep_dbenv_refresh(dbenv);
 
+	/*
+	 * Mark the thread as out of the env before we get rid
+	 * of the handles needed to do so.
+	 */
+	if (dbenv->thr_hashtab != NULL &&
+	    (t_ret = __env_set_state(dbenv, &ip, THREAD_OUT)) != 0 && ret == 0)
+		ret = t_ret;
+
+	if (MUTEX_ON(dbenv) &&
+	    (t_ret = __mutex_dbenv_refresh(dbenv)) != 0 && ret == 0)
+		ret = t_ret;
+
 	if (dbenv->reginfo != NULL) {
 		if ((t_ret = __db_e_detach(dbenv, 0)) != 0 && ret == 0)
 			ret = t_ret;
@@ -771,11 +901,19 @@ __dbenv_refresh(dbenv, orig_flags, rep_check)
 		 */
 	}
 
-	/* Undo changes and allocations done by __dbenv_open. */
+	/* Undo changes and allocations done by __env_open. */
 	if (dbenv->db_home != NULL) {
 		__os_free(dbenv, dbenv->db_home);
 		dbenv->db_home = NULL;
 	}
+	if (dbenv->db_abshome != NULL) {
+		__os_free(dbenv, dbenv->db_abshome);
+		dbenv->db_abshome = NULL;
+	}
+	if (dbenv->mutex_iq != NULL) {
+		__os_free(dbenv, dbenv->mutex_iq);
+		dbenv->mutex_iq = NULL;
+	}
 
 	dbenv->open_flags = 0;
 	dbenv->db_mode = 0;
@@ -813,13 +951,13 @@ __dbenv_refresh(dbenv, orig_flags, rep_check)
 }
 
 /*
- * __dbenv_get_open_flags
+ * __env_get_open_flags
  *	Retrieve the flags passed to DB_ENV->open.
  *
- * PUBLIC: int __dbenv_get_open_flags __P((DB_ENV *, u_int32_t *));
+ * PUBLIC: int __env_get_open_flags __P((DB_ENV *, u_int32_t *));
  */
 int
-__dbenv_get_open_flags(dbenv, flagsp)
+__env_get_open_flags(dbenv, flagsp)
 	DB_ENV *dbenv;
 	u_int32_t *flagsp;
 {
@@ -965,7 +1103,9 @@ __db_home(dbenv, db_home, flags)
 	const char *db_home;
 	u_int32_t flags;
 {
+	int ret;
 	const char *p;
+	char path[MAXPATHLEN];
 
 	/*
 	 * Use db_home by default, this allows utilities to reasonably
@@ -980,8 +1120,35 @@ __db_home(dbenv, db_home, flags)
 		__db_err(dbenv, "illegal DB_HOME environment variable");
 		return (EINVAL);
 	}
+	if (p != NULL && (ret = __os_strdup(dbenv, p, &dbenv->db_home)) != 0)
+		return (ret);
 
-	return (p == NULL ? 0 : __os_strdup(dbenv, p, &dbenv->db_home));
+	/*
+	 * Get the absolute pathname of the current directory.  We use this
+	 * to build absolute pathnames when removing log files.
+	 *
+	 * XXX
+	 * Can't trust getcwd(3) to set a valid errno, so don't try to display
+	 * one unless we know it's good.  It's likely a permissions problem:
+	 * use something bland and useless in the default return value, so we
+	 * don't send somebody off in the wrong direction.
+	 */
+	__os_set_errno(0);
+	if ((p = getcwd(path, sizeof(path))) == NULL) {
+		if ((ret = __os_get_errno()) == 0) {
+			__db_err(dbenv,
+			    "no absolute path for the current directory");
+			ret = EAGAIN;
+		} else
+			__db_err(dbenv,
+			    "no absolute path for the current directory: %s",
+			    db_strerror(ret));
+		return (ret);
+	}
+	if (p != NULL && (ret = __os_strdup(dbenv, p, &dbenv->db_abshome)) != 0)
+		return (ret);
+
+	return (0);
 }
 
 #define	__DB_OVFL(v, max)						\
@@ -1042,7 +1209,53 @@ illegal:	__db_err(dbenv, "mis-formatted name-value pair: %s", s);
 	}
 	*p = '\0';
 
-	if (!strcasecmp(name, "set_cachesize")) {
+	if (strcasecmp(name, "mutex_set_align") == 0) {
+		if (sscanf(value, "%lu %c", &v1, &v4) != 1)
+			goto badarg;
+		__DB_OVFL(v1, UINT32_MAX);
+		return (__mutex_set_align(dbenv, (u_int32_t)v1));
+	}
+
+	if (strcasecmp(name, "mutex_set_increment") == 0) {
+		if (sscanf(value, "%lu %c", &v1, &v4) != 1)
+			goto badarg;
+		__DB_OVFL(v1, UINT32_MAX);
+		return (__mutex_set_increment(dbenv, (u_int32_t)v1));
+	}
+
+	if (strcasecmp(name, "mutex_set_max") == 0) {
+		if (sscanf(value, "%lu %c", &v1, &v4) != 1)
+			goto badarg;
+		__DB_OVFL(v1, UINT32_MAX);
+		return (__mutex_set_max(dbenv, (u_int32_t)v1));
+	}
+
+	if (strcasecmp(name, "mutex_set_tas_spins") == 0) {
+		if (sscanf(value, "%lu %c", &v1, &v4) != 1)
+			goto badarg;
+		__DB_OVFL(v1, UINT32_MAX);
+		return (__mutex_set_tas_spins(dbenv, (u_int32_t)v1));
+	}
+
+	if (strcasecmp(name, "rep_set_config") == 0) {
+		if (sscanf(value, "%40s %c", arg, &v4) != 1)
+			goto badarg;
+
+		if (strcasecmp(value, "rep_bulk") == 0)
+			return (__rep_set_config(dbenv,
+			    DB_REP_CONF_BULK, 1));
+		if (strcasecmp(value, "rep_delayclient") == 0)
+			return (__rep_set_config(dbenv,
+			    DB_REP_CONF_DELAYCLIENT, 1));
+		if (strcasecmp(value, "rep_noautoinit") == 0)
+			return (__rep_set_config(dbenv,
+			    DB_REP_CONF_NOAUTOINIT, 1));
+		if (strcasecmp(value, "rep_nowait") == 0)
+			return (__rep_set_config(dbenv, DB_REP_CONF_NOWAIT, 1));
+		goto badarg;
+	}
+
+	if (strcasecmp(name, "set_cachesize") == 0) {
 		if (sscanf(value, "%lu %lu %lu %c", &v1, &v2, &v3, &v4) != 3)
 			goto badarg;
 		__DB_OVFL(v1, UINT32_MAX);
@@ -1052,137 +1265,147 @@ illegal:	__db_err(dbenv, "mis-formatted name-value pair: %s", s);
 		    dbenv, (u_int32_t)v1, (u_int32_t)v2, (int)v3));
 	}
 
-	if (!strcasecmp(name, "set_data_dir") ||
-	    !strcasecmp(name, "db_data_dir"))		/* Compatibility. */
-		return (__dbenv_set_data_dir(dbenv, value));
+	if (strcasecmp(name, "set_data_dir") == 0 ||
+	    strcasecmp(name, "db_data_dir") == 0)	/* Compatibility. */
+		return (__env_set_data_dir(dbenv, value));
 
-	if (!strcasecmp(name, "set_intermediate_dir")) {/* Undocumented. */
+							/* Undocumented. */
+	if (strcasecmp(name, "set_intermediate_dir") == 0) {
 		if (sscanf(value, "%lu %c", &v1, &v4) != 1)
 			goto badarg;
 #ifdef INT_MAX
 		__DB_OVFL(v1, INT_MAX);
 #endif
-		return (__dbenv_set_intermediate_dir(dbenv, (int)v1, 0));
+		return (__env_set_intermediate_dir(dbenv, (int)v1, 0));
 	}
 
-	if (!strcasecmp(name, "set_flags")) {
+	if (strcasecmp(name, "set_flags") == 0) {
 		if (sscanf(value, "%40s %c", arg, &v4) != 1)
 			goto badarg;
 
-		if (!strcasecmp(value, "db_auto_commit"))
-			return (__dbenv_set_flags(dbenv, DB_AUTO_COMMIT, 1));
-		if (!strcasecmp(value, "db_cdb_alldb"))
-			return (__dbenv_set_flags(dbenv, DB_CDB_ALLDB, 1));
-		if (!strcasecmp(value, "db_direct_db"))
-			return (__dbenv_set_flags(dbenv, DB_DIRECT_DB, 1));
-		if (!strcasecmp(value, "db_direct_log"))
-			return (__dbenv_set_flags(dbenv, DB_DIRECT_LOG, 1));
-		if (!strcasecmp(value, "db_dsync_log"))
-			return (__dbenv_set_flags(dbenv, DB_DSYNC_LOG, 1));
-		if (!strcasecmp(value, "db_log_autoremove"))
-			return (__dbenv_set_flags(dbenv, DB_LOG_AUTOREMOVE, 1));
-		if (!strcasecmp(value, "db_log_inmemory"))
-			return (__dbenv_set_flags(dbenv, DB_LOG_INMEMORY, 1));
-		if (!strcasecmp(value, "db_nolocking"))
-			return (__dbenv_set_flags(dbenv, DB_NOLOCKING, 1));
-		if (!strcasecmp(value, "db_nommap"))
-			return (__dbenv_set_flags(dbenv, DB_NOMMAP, 1));
-		if (!strcasecmp(value, "db_nopanic"))
-			return (__dbenv_set_flags(dbenv, DB_NOPANIC, 1));
-		if (!strcasecmp(value, "db_overwrite"))
-			return (__dbenv_set_flags(dbenv, DB_OVERWRITE, 1));
-		if (!strcasecmp(value, "db_region_init"))
-			return (__dbenv_set_flags(dbenv, DB_REGION_INIT, 1));
-		if (!strcasecmp(value, "db_txn_nosync"))
-			return (__dbenv_set_flags(dbenv, DB_TXN_NOSYNC, 1));
-		if (!strcasecmp(value, "db_txn_write_nosync"))
+		if (strcasecmp(value, "db_auto_commit") == 0)
+			return (__env_set_flags(dbenv, DB_AUTO_COMMIT, 1));
+		if (strcasecmp(value, "db_cdb_alldb") == 0)
+			return (__env_set_flags(dbenv, DB_CDB_ALLDB, 1));
+		if (strcasecmp(value, "db_direct_db") == 0)
+			return (__env_set_flags(dbenv, DB_DIRECT_DB, 1));
+		if (strcasecmp(value, "db_direct_log") == 0)
+			return (__env_set_flags(dbenv, DB_DIRECT_LOG, 1));
+		if (strcasecmp(value, "db_dsync_db") == 0)
+			return (__env_set_flags(dbenv, DB_DSYNC_DB, 1));
+		if (strcasecmp(value, "db_dsync_log") == 0)
+			return (__env_set_flags(dbenv, DB_DSYNC_LOG, 1));
+		if (strcasecmp(value, "db_log_autoremove") == 0)
+			return (__env_set_flags(dbenv, DB_LOG_AUTOREMOVE, 1));
+		if (strcasecmp(value, "db_log_inmemory") == 0)
+			return (__env_set_flags(dbenv, DB_LOG_INMEMORY, 1));
+		if (strcasecmp(value, "db_nolocking") == 0)
+			return (__env_set_flags(dbenv, DB_NOLOCKING, 1));
+		if (strcasecmp(value, "db_nommap") == 0)
+			return (__env_set_flags(dbenv, DB_NOMMAP, 1));
+		if (strcasecmp(value, "db_nopanic") == 0)
+			return (__env_set_flags(dbenv, DB_NOPANIC, 1));
+		if (strcasecmp(value, "db_overwrite") == 0)
+			return (__env_set_flags(dbenv, DB_OVERWRITE, 1));
+		if (strcasecmp(value, "db_region_init") == 0)
+			return (__env_set_flags(dbenv, DB_REGION_INIT, 1));
+		if (strcasecmp(value, "db_txn_nosync") == 0)
+			return (__env_set_flags(dbenv, DB_TXN_NOSYNC, 1));
+		if (strcasecmp(value, "db_txn_write_nosync") == 0)
 			return (
-			    __dbenv_set_flags(dbenv, DB_TXN_WRITE_NOSYNC, 1));
-		if (!strcasecmp(value, "db_yieldcpu"))
-			return (__dbenv_set_flags(dbenv, DB_YIELDCPU, 1));
+			    __env_set_flags(dbenv, DB_TXN_WRITE_NOSYNC, 1));
+		if (strcasecmp(value, "db_yieldcpu") == 0)
+			return (__env_set_flags(dbenv, DB_YIELDCPU, 1));
 		goto badarg;
 	}
 
-	if (!strcasecmp(name, "set_lg_bsize")) {
+	if (strcasecmp(name, "set_lg_bsize") == 0) {
 		if (sscanf(value, "%lu %c", &v1, &v4) != 1)
 			goto badarg;
 		__DB_OVFL(v1, UINT32_MAX);
 		return (__log_set_lg_bsize(dbenv, (u_int32_t)v1));
 	}
 
-	if (!strcasecmp(name, "set_lg_max")) {
+	if (strcasecmp(name, "set_lg_filemode") == 0) {
+		if (sscanf(value, "%lu %c", &v1, &v4) != 1)
+			goto badarg;
+		__DB_OVFL(v1, INT_MAX);
+		return (__log_set_lg_filemode(dbenv, (int)v1));
+	}
+
+	if (strcasecmp(name, "set_lg_max") == 0) {
 		if (sscanf(value, "%lu %c", &v1, &v4) != 1)
 			goto badarg;
 		__DB_OVFL(v1, UINT32_MAX);
 		return (__log_set_lg_max(dbenv, (u_int32_t)v1));
 	}
 
-	if (!strcasecmp(name, "set_lg_regionmax")) {
+	if (strcasecmp(name, "set_lg_regionmax") == 0) {
 		if (sscanf(value, "%lu %c", &v1, &v4) != 1)
 			goto badarg;
 		__DB_OVFL(v1, UINT32_MAX);
 		return (__log_set_lg_regionmax(dbenv, (u_int32_t)v1));
 	}
 
-	if (!strcasecmp(name, "set_lg_dir") ||
-	    !strcasecmp(name, "db_log_dir"))		/* Compatibility. */
+	if (strcasecmp(name, "set_lg_dir") == 0 ||
+	    strcasecmp(name, "db_log_dir") == 0)	/* Compatibility. */
 		return (__log_set_lg_dir(dbenv, value));
 
-	if (!strcasecmp(name, "set_lk_detect")) {
+	if (strcasecmp(name, "set_lk_detect") == 0) {
 		if (sscanf(value, "%40s %c", arg, &v4) != 1)
 			goto badarg;
-		if (!strcasecmp(value, "db_lock_default"))
+		if (strcasecmp(value, "db_lock_default") == 0)
 			flags = DB_LOCK_DEFAULT;
-		else if (!strcasecmp(value, "db_lock_expire"))
+		else if (strcasecmp(value, "db_lock_expire") == 0)
 			flags = DB_LOCK_EXPIRE;
-		else if (!strcasecmp(value, "db_lock_maxlocks"))
+		else if (strcasecmp(value, "db_lock_maxlocks") == 0)
 			flags = DB_LOCK_MAXLOCKS;
-		else if (!strcasecmp(value, "db_lock_maxwrite"))
+		else if (strcasecmp(value, "db_lock_maxwrite") == 0)
 			flags = DB_LOCK_MAXWRITE;
-		else if (!strcasecmp(value, "db_lock_minlocks"))
+		else if (strcasecmp(value, "db_lock_minlocks") == 0)
 			flags = DB_LOCK_MINLOCKS;
-		else if (!strcasecmp(value, "db_lock_minwrite"))
+		else if (strcasecmp(value, "db_lock_minwrite") == 0)
 			flags = DB_LOCK_MINWRITE;
-		else if (!strcasecmp(value, "db_lock_oldest"))
+		else if (strcasecmp(value, "db_lock_oldest") == 0)
 			flags = DB_LOCK_OLDEST;
-		else if (!strcasecmp(value, "db_lock_random"))
+		else if (strcasecmp(value, "db_lock_random") == 0)
 			flags = DB_LOCK_RANDOM;
-		else if (!strcasecmp(value, "db_lock_youngest"))
+		else if (strcasecmp(value, "db_lock_youngest") == 0)
 			flags = DB_LOCK_YOUNGEST;
 		else
 			goto badarg;
 		return (__lock_set_lk_detect(dbenv, flags));
 	}
 
-	if (!strcasecmp(name, "set_lk_max")) {
+	if (strcasecmp(name, "set_lk_max") == 0) {
 		if (sscanf(value, "%lu %c", &v1, &v4) != 1)
 			goto badarg;
 		__DB_OVFL(v1, UINT32_MAX);
 		return (__lock_set_lk_max(dbenv, (u_int32_t)v1));
 	}
 
-	if (!strcasecmp(name, "set_lk_max_locks")) {
+	if (strcasecmp(name, "set_lk_max_locks") == 0) {
 		if (sscanf(value, "%lu %c", &v1, &v4) != 1)
 			goto badarg;
 		__DB_OVFL(v1, UINT32_MAX);
 		return (__lock_set_lk_max_locks(dbenv, (u_int32_t)v1));
 	}
 
-	if (!strcasecmp(name, "set_lk_max_lockers")) {
+	if (strcasecmp(name, "set_lk_max_lockers") == 0) {
 		if (sscanf(value, "%lu %c", &v1, &v4) != 1)
 			goto badarg;
 		__DB_OVFL(v1, UINT32_MAX);
 		return (__lock_set_lk_max_lockers(dbenv, (u_int32_t)v1));
 	}
 
-	if (!strcasecmp(name, "set_lk_max_objects")) {
+	if (strcasecmp(name, "set_lk_max_objects") == 0) {
 		if (sscanf(value, "%lu %c", &v1, &v4) != 1)
 			goto badarg;
 		__DB_OVFL(v1, UINT32_MAX);
 		return (__lock_set_lk_max_objects(dbenv, (u_int32_t)v1));
 	}
 
-	if (!strcasecmp(name, "set_lock_timeout")) {
+	if (strcasecmp(name, "set_lock_timeout") == 0) {
 		if (sscanf(value, "%lu %c", &v1, &v4) != 1)
 			goto badarg;
 		__DB_OVFL(v1, UINT32_MAX);
@@ -1190,14 +1413,14 @@ illegal:	__db_err(dbenv, "mis-formatted name-value pair: %s", s);
 		    dbenv, (u_int32_t)v1, DB_SET_LOCK_TIMEOUT));
 	}
 
-	if (!strcasecmp(name, "set_mp_max_openfd")) {
+	if (strcasecmp(name, "set_mp_max_openfd") == 0) {
 		if (sscanf(value, "%lu %c", &v1, &v4) != 1)
 			goto badarg;
 		__DB_OVFL(v1, INT_MAX);
 		return (__memp_set_mp_max_openfd(dbenv, (int)v1));
 	}
 
-	if (!strcasecmp(name, "set_mp_max_write")) {
+	if (strcasecmp(name, "set_mp_max_write") == 0) {
 		if (sscanf(value, "%lu %lu %c", &v1, &v2, &v4) != 2)
 			goto badarg;
 		__DB_OVFL(v1, INT_MAX);
@@ -1205,45 +1428,49 @@ illegal:	__db_err(dbenv, "mis-formatted name-value pair: %s", s);
 		return (__memp_set_mp_max_write(dbenv, (int)v1, (int)v2));
 	}
 
-	if (!strcasecmp(name, "set_mp_mmapsize")) {
+	if (strcasecmp(name, "set_mp_mmapsize") == 0) {
 		if (sscanf(value, "%lu %c", &v1, &v4) != 1)
 			goto badarg;
 		__DB_OVFL(v1, UINT32_MAX);
 		return (__memp_set_mp_mmapsize(dbenv, (u_int32_t)v1));
 	}
 
-	if (!strcasecmp(name, "set_region_init")) {
+	if (strcasecmp(name, "set_region_init") == 0) {
 		if (sscanf(value, "%lu %c", &v1, &v4) != 1 || v1 != 1)
 			goto badarg;
-		return (__dbenv_set_flags(
+		return (__env_set_flags(
 		    dbenv, DB_REGION_INIT, v1 == 0 ? 0 : 1));
 	}
 
-	if (!strcasecmp(name, "set_shm_key")) {
+	if (strcasecmp(name, "set_shm_key") == 0) {
 		if (sscanf(value, "%lu %c", &v1, &v4) != 1)
 			goto badarg;
-		return (__dbenv_set_shm_key(dbenv, (long)v1));
+		return (__env_set_shm_key(dbenv, (long)v1));
 	}
 
-	if (!strcasecmp(name, "set_tas_spins")) {
+	/*
+	 * The set_tas_spins method has been replaced by mutex_set_tas_spins.
+	 * The set_tas_spins name remains for DB_CONFIG compatibility.
+	 */
+	if (strcasecmp(name, "set_tas_spins") == 0) {
 		if (sscanf(value, "%lu %c", &v1, &v4) != 1)
 			goto badarg;
 		__DB_OVFL(v1, UINT32_MAX);
-		return (__dbenv_set_tas_spins(dbenv, (u_int32_t)v1));
+		return (__mutex_set_tas_spins(dbenv, (u_int32_t)v1));
 	}
 
-	if (!strcasecmp(name, "set_tmp_dir") ||
-	    !strcasecmp(name, "db_tmp_dir"))		/* Compatibility.*/
-		return (__dbenv_set_tmp_dir(dbenv, value));
+	if (strcasecmp(name, "set_tmp_dir") == 0 ||
+	    strcasecmp(name, "db_tmp_dir") == 0)	/* Compatibility.*/
+		return (__env_set_tmp_dir(dbenv, value));
 
-	if (!strcasecmp(name, "set_tx_max")) {
+	if (strcasecmp(name, "set_tx_max") == 0) {
 		if (sscanf(value, "%lu %c", &v1, &v4) != 1)
 			goto badarg;
 		__DB_OVFL(v1, UINT32_MAX);
 		return (__txn_set_tx_max(dbenv, (u_int32_t)v1));
 	}
 
-	if (!strcasecmp(name, "set_txn_timeout")) {
+	if (strcasecmp(name, "set_txn_timeout") == 0) {
 		if (sscanf(value, "%lu %c", &v1, &v4) != 1)
 			goto badarg;
 		__DB_OVFL(v1, UINT32_MAX);
@@ -1251,21 +1478,23 @@ illegal:	__db_err(dbenv, "mis-formatted name-value pair: %s", s);
 		    dbenv, (u_int32_t)v1, DB_SET_TXN_TIMEOUT));
 	}
 
-	if (!strcasecmp(name, "set_verbose")) {
+	if (strcasecmp(name, "set_verbose") == 0) {
 		if (sscanf(value, "%40s %c", arg, &v4) != 1)
 			goto badarg;
 
-		else if (!strcasecmp(value, "db_verb_deadlock"))
+		else if (strcasecmp(value, "db_verb_deadlock") == 0)
 			flags = DB_VERB_DEADLOCK;
-		else if (!strcasecmp(value, "db_verb_recovery"))
+		else if (strcasecmp(value, "db_verb_recovery") == 0)
 			flags = DB_VERB_RECOVERY;
-		else if (!strcasecmp(value, "db_verb_replication"))
+		else if (strcasecmp(value, "db_verb_register") == 0)
+			flags = DB_VERB_REGISTER;
+		else if (strcasecmp(value, "db_verb_replication") == 0)
 			flags = DB_VERB_REPLICATION;
-		else if (!strcasecmp(value, "db_verb_waitsfor"))
+		else if (strcasecmp(value, "db_verb_waitsfor") == 0)
 			flags = DB_VERB_WAITSFOR;
 		else
 			goto badarg;
-		return (__dbenv_set_verbose(dbenv, flags, 1));
+		return (__env_set_verbose(dbenv, flags, 1));
 	}
 
 	__db_err(dbenv, "unrecognized name-value pair: %s", s);
@@ -1290,7 +1519,8 @@ __db_tmp_open(dbenv, tmp_oflags, path, fhpp)
 	char *path;
 	DB_FH **fhpp;
 {
-	u_int32_t id;
+	pid_t pid;
+	db_threadid_t tid;
 	int filenum, i, isdir, ret;
 	char *firstx, *trv;
 
@@ -1312,15 +1542,16 @@ __db_tmp_open(dbenv, tmp_oflags, path, fhpp)
 	(void)strcat(path, DB_TRAIL);
 
 	/* Replace the X's with the process ID (in decimal). */
-	for (trv = path + strlen(path), __os_id(&id); *--trv == 'X'; id /= 10)
-		*trv = '0' + (id % 10);
+	__os_id(dbenv, &pid, &tid);
+	for (trv = path + strlen(path); *--trv == 'X'; pid /= 10)
+		*trv = '0' + (u_char)(pid % 10);
 	firstx = trv + 1;
 
 	/* Loop, trying to open a file. */
 	for (filenum = 1;; filenum++) {
 		if ((ret = __os_open(dbenv, path,
 		    tmp_oflags | DB_OSO_CREATE | DB_OSO_EXCL | DB_OSO_TEMP,
-		    __db_omode("rw----"), fhpp)) == 0)
+		    __db_omode(OWNER_RW), fhpp)) == 0)
 			return (0);
 
 		/*
@@ -1338,7 +1569,7 @@ __db_tmp_open(dbenv, tmp_oflags, path, fhpp)
 
 		/*
 		 * Generate temporary file names in a backwards-compatible way.
-		 * If id == 12345, the result is:
+		 * If pid == 12345, the result is:
 		 *   /DB12345 (tried above, the first time through).
 		 *   /DBa2345 ...  /DBz2345
 		 *   /DBaa345 ...  /DBaz345
diff --git a/storage/bdb/env/env_recover.c b/storage/bdb/env/env_recover.c
index 1c018171f4e..7e7ebe9fec9 100644
--- a/storage/bdb/env/env_recover.c
+++ b/storage/bdb/env/env_recover.c
@@ -1,17 +1,17 @@
 /*-
  * See the file LICENSE for redistribution information.
  *
- * Copyright (c) 1996-2004
+ * Copyright (c) 1996-2005
  *	Sleepycat Software.  All rights reserved.
  *
- * $Id: env_recover.c,v 11.126 2004/09/22 03:43:52 bostic Exp $
+ * $Id: env_recover.c,v 12.10 2005/10/19 15:14:11 bostic Exp $
  */
 
 #include "db_config.h"
 
 #ifndef lint
 static const char copyright[] =
-    "Copyright (c) 1996-2004\nSleepycat Software Inc.  All rights reserved.\n";
+    "Copyright (c) 1996-2005\nSleepycat Software Inc.  All rights reserved.\n";
 #endif
 
 #ifndef NO_SYSTEM_INCLUDES
@@ -62,6 +62,7 @@ __db_apprec(dbenv, max_lsn, trunclsn, update, flags)
 	DBT data;
 	DB_LOGC *logc;
 	DB_LSN ckp_lsn, first_lsn, last_lsn, lowlsn, lsn, stop_lsn, tlsn;
+	DB_TXNHEAD *txninfo;
 	DB_TXNREGION *region;
 	REGENV *renv;
 	REGINFO *infop;
@@ -71,18 +72,16 @@ __db_apprec(dbenv, max_lsn, trunclsn, update, flags)
 	u_int32_t hi_txn, log_size, txnid;
 	int32_t low;
 	int have_rec, progress, ret, t_ret;
-	int (**dtab) __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
 	char *p, *pass, t1[60], t2[60];
-	void *txninfo;
 
-	COMPQUIET(nfiles, (double)0);
+	COMPQUIET(nfiles, (double)0.001);
 
 	logc = NULL;
 	ckp_args = NULL;
-	dtab = NULL;
 	hi_txn = TXN_MAXIMUM;
 	txninfo = NULL;
 	pass = "initial";
+	ZERO_LSN(lsn);
 
 	/*
 	 * XXX
@@ -95,7 +94,7 @@ __db_apprec(dbenv, max_lsn, trunclsn, update, flags)
 	/*
 	 * If we need to, update the env handle timestamp.
 	 */
-	if (update) {
+	if (update && REP_ON(dbenv)) {
 		infop = dbenv->reginfo;
 		renv = infop->primary;
 		(void)time(&renv->rep_timestamp);
@@ -326,8 +325,8 @@ __db_apprec(dbenv, max_lsn, trunclsn, update, flags)
 			    (double)((log_size - first_lsn.offset) +
 			    last_lsn.offset) / log_size;
 		/* We are going to divide by nfiles; make sure it isn't 0. */
-		if (nfiles == 0)
-			nfiles = (double)0.001;
+		if (nfiles < 0.001)
+			nfiles = 0.001;
 	}
 
 	/* Find a low txnid. */
@@ -501,6 +500,7 @@ __db_apprec(dbenv, max_lsn, trunclsn, update, flags)
 #endif
 	}
 
+done:
 	/* Take a checkpoint here to force any dirty data pages to disk. */
 	if ((ret = __txn_checkpoint(dbenv, 0, 0, DB_FORCE)) != 0)
 		goto err;
@@ -509,7 +509,6 @@ __db_apprec(dbenv, max_lsn, trunclsn, update, flags)
 	if ((ret = __dbreg_close_files(dbenv)) != 0)
 		goto err;
 
-done:
 	if (max_lsn != NULL) {
 		if (!IS_ZERO_LSN(((DB_TXNHEAD *)txninfo)->ckplsn))
 			region->last_ckp = ((DB_TXNHEAD *)txninfo)->ckplsn;
@@ -559,13 +558,24 @@ done:
 		if ((ret = __env_openfiles(dbenv, logc,
 		    txninfo, &data, &first_lsn, NULL, nfiles, 1)) != 0)
 			goto err;
-	} else if (region->stat.st_nrestores == 0)
+	} else if (region->stat.st_nrestores == 0) {
 		/*
 		 * If there are no prepared transactions that need resolution,
 		 * we need to reset the transaction ID space and log this fact.
 		 */
 		if ((ret = __txn_reset(dbenv)) != 0)
 			goto err;
+	} else {
+		/*
+		 * If we have restored prepared txns then they are in process
+		 * as far as replication is concerned.
+		 */
+		if (REP_ON(dbenv))
+			((DB_REP *)dbenv->rep_handle)->region->op_cnt =
+			    region->stat.st_nrestores;
+		if ((ret = __txn_recycle_id(dbenv)) != 0)
+			goto err;
+	}
 
 	if (FLD_ISSET(dbenv->verbose, DB_VERB_RECOVERY)) {
 		(void)time(&now);
@@ -591,9 +601,6 @@ err:	if (logc != NULL && (t_ret = __log_c_close(logc)) != 0 && ret == 0)
 	if (txninfo != NULL)
 		__db_txnlist_end(dbenv, txninfo);
 
-	if (dtab != NULL)
-		__os_free(dbenv, dtab);
-
 	dbenv->tx_timestamp = 0;
 
 	F_CLR((DB_LOG *)dbenv->lg_handle, DBLOG_RECOVER);
@@ -627,8 +634,9 @@ __lsn_diff(low, high, current, max, is_forward)
 		if (current->file == low->file)
 			nf = (double)(current->offset - low->offset) / max;
 		else if (current->offset < low->offset)
-			nf = (double)(current->file - low->file - 1) +
-			    (double)(max - low->offset + current->offset) / max;
+			nf = (double)((current->file - low->file) - 1) +
+			    (double)((max - low->offset) + current->offset) /
+			    max;
 		else
 			nf = (double)(current->file - low->file) +
 			    (double)(current->offset - low->offset) / max;
@@ -636,9 +644,9 @@ __lsn_diff(low, high, current, max, is_forward)
 		if (current->file == high->file)
 			nf = (double)(high->offset - current->offset) / max;
 		else if (current->offset > high->offset)
-			nf = (double)(high->file - current->file - 1) +
+			nf = (double)((high->file - current->file) - 1) +
 			    (double)
-			    (max - current->offset + high->offset) / max;
+			    ((max - current->offset) + high->offset) / max;
 		else
 			nf = (double)(high->file - current->file) +
 			    (double)(high->offset - current->offset) / max;
diff --git a/storage/bdb/env/env_region.c b/storage/bdb/env/env_region.c
index 17d2e65d37c..6ef15a13d43 100644
--- a/storage/bdb/env/env_region.c
+++ b/storage/bdb/env/env_region.c
@@ -1,10 +1,10 @@
 /*-
  * See the file LICENSE for redistribution information.
  *
- * Copyright (c) 1996-2004
+ * Copyright (c) 1996-2005
  *	Sleepycat Software.  All rights reserved.
  *
- * $Id: env_region.c,v 11.103 2004/10/15 16:59:41 bostic Exp $
+ * $Id: env_region.c,v 12.13 2005/10/21 19:13:01 bostic Exp $
  */
 
 #include "db_config.h"
@@ -12,22 +12,29 @@
 #ifndef NO_SYSTEM_INCLUDES
 #include 
 
+#if TIME_WITH_SYS_TIME
+#include 
+#include 
+#else
+#if HAVE_SYS_TIME_H
+#include 
+#else
+#include 
+#endif
+#endif
+
 #include 
 #endif
 
 #include "db_int.h"
 #include "dbinc/db_shash.h"
 #include "dbinc/crypto.h"
-#include "dbinc/lock.h"
-#include "dbinc/log.h"
 #include "dbinc/mp.h"
-#include "dbinc/txn.h"
 
-static int  __db_des_destroy __P((DB_ENV *, REGION *, int));
+static void __db_des_destroy __P((DB_ENV *, REGION *));
 static int  __db_des_get __P((DB_ENV *, REGINFO *, REGINFO *, REGION **));
 static int  __db_e_remfile __P((DB_ENV *));
 static int  __db_faultmem __P((DB_ENV *, void *, size_t, int));
-static void __db_region_destroy __P((DB_ENV *, REGINFO *));
 
 /*
  * __db_e_attach
@@ -46,37 +53,11 @@ __db_e_attach(dbenv, init_flagsp)
 	REGION *rp, tregion;
 	size_t size;
 	size_t nrw;
-	u_int32_t mbytes, bytes;
+	u_int32_t bytes, i, mbytes, nregions;
 	u_int retry_cnt;
-	int ret, segid;
+	int majver, minver, patchver, ret, segid;
 	char buf[sizeof(DB_REGION_FMT) + 20];
 
-#if !defined(HAVE_MUTEX_THREADS)
-	/*
-	 * !!!
-	 * If we don't have spinlocks, we need a file descriptor for fcntl(2)
-	 * locking.  We use the file handle from the REGENV file for this
-	 * purpose.
-	 *
-	 * Since we may be using shared memory regions, e.g., shmget(2), and
-	 * not a mapped-in regular file, the backing file may be only a few
-	 * bytes in length.  So, this depends on the ability to call fcntl to
-	 * lock file offsets much larger than the actual physical file.  I
-	 * think that's safe -- besides, very few systems actually need this
-	 * kind of support, SunOS is the only one still in wide use of which
-	 * I'm aware.
-	 *
-	 * The error case is if an application lacks spinlocks and wants to be
-	 * threaded.  That doesn't work because fcntl may lock the underlying
-	 * process, including all its threads.
-	 */
-	if (F_ISSET(dbenv, DB_ENV_THREAD)) {
-		__db_err(dbenv,
-	    "architecture lacks fast mutexes: applications cannot be threaded");
-		return (EINVAL);
-	}
-#endif
-
 	/* Initialization */
 	retry_cnt = 0;
 
@@ -95,8 +76,8 @@ loop:	renv = NULL;
 
 	/*
 	 * We have to single-thread the creation of the REGENV region.  Once
-	 * it exists, we can do locking using locks in the region, but until
-	 * then we have to be the only player in the game.
+	 * it exists, we can serialize using region mutexes, but until then
+	 * we have to be the only player in the game.
 	 *
 	 * If this is a private environment, we are only called once and there
 	 * are no possible race conditions.
@@ -193,7 +174,8 @@ loop:	renv = NULL;
 	/*
 	 * If the size is less than the size of a REGENV_REF structure, the
 	 * region (or, possibly, the REGENV_REF structure) has not yet been
-	 * completely written.  Wait awhile and try again.
+	 * completely written.  Shouldn't be possible, but there's no reason
+	 * not to wait awhile and try again.
 	 *
 	 * Otherwise, if the size is the size of a REGENV_REF structure,
 	 * read it into memory and use it as a reference to the real region.
@@ -224,13 +206,12 @@ loop:	renv = NULL;
 	} else
 		segid = INVALID_REGION_SEGID;
 
+#ifndef HAVE_MUTEX_FCNTL
 	/*
-	 * If not doing thread locking, we need to save the file handle for
-	 * fcntl(2) locking.  Otherwise, discard the handle, we no longer
-	 * need it, and the less contact between the buffer cache and the VM,
-	 * the better.
+	 * If we're not doing fcntl locking, we can close the file handle.  We
+	 * no longer need it and the less contact between the buffer cache and
+	 * the VM, the better.
 	 */
-#ifdef HAVE_MUTEX_THREADS
 	 (void)__os_closehandle(dbenv, dbenv->lockfhp);
 	 dbenv->lockfhp = NULL;
 #endif
@@ -253,13 +234,24 @@ loop:	renv = NULL;
 	infop->addr = (u_int8_t *)infop->addr + sizeof(REGENV);
 	renv = infop->primary;
 
-	/* Make sure the region matches our build. */
+	/*
+	 * Make sure the region matches our build.  Special case a region
+	 * that's all nul bytes, just treat it like any other corruption.
+	 *
+	 * !!!
+	 * We don't display the major/minor version from the environment,
+	 * because it may be in a different place in the two regions.
+	 */
 	if (renv->majver != DB_VERSION_MAJOR ||
 	    renv->minver != DB_VERSION_MINOR) {
-		__db_err(dbenv,
-	"Program version %d.%d doesn't match environment version",
-		    DB_VERSION_MAJOR, DB_VERSION_MINOR);
-		ret = DB_VERSION_MISMATCH;
+		if (renv->majver != 0 || renv->minver != 0) {
+			__db_err(dbenv,
+	"Program version %d.%d doesn't match environment version %d.%d",
+			    DB_VERSION_MAJOR, DB_VERSION_MINOR,
+			    renv->majver, renv->minver);
+			ret = DB_VERSION_MISMATCH;
+		} else
+			ret = EINVAL;
 		goto err;
 	}
 
@@ -277,33 +269,19 @@ loop:	renv = NULL;
 	 * I'd rather play permissions games using the underlying file, but I
 	 * can't because Windows/NT filesystems won't open files mode 0.
 	 */
-	if (renv->envpanic && !F_ISSET(dbenv, DB_ENV_NOPANIC)) {
+	if (renv->panic && !F_ISSET(dbenv, DB_ENV_NOPANIC)) {
 		ret = __db_panic_msg(dbenv);
 		goto err;
 	}
 	if (renv->magic != DB_REGION_MAGIC)
 		goto retry;
 
-	/* Lock the environment. */
-	MUTEX_LOCK(dbenv, &renv->mutex);
-
-	/*
-	 * Finally!  We own the environment now.  Repeat the panic check, it's
-	 * possible that it was set while we waited for the lock.
-	 */
-	if (renv->envpanic && !F_ISSET(dbenv, DB_ENV_NOPANIC)) {
-		ret = __db_panic_msg(dbenv);
-		goto err_unlock;
-	}
-
 	/*
 	 * Get a reference to the underlying REGION information for this
 	 * environment.
 	 */
-	if ((ret = __db_des_get(dbenv, infop, infop, &rp)) != 0 || rp == NULL) {
-		MUTEX_UNLOCK(dbenv, &renv->mutex);
+	if ((ret = __db_des_get(dbenv, infop, infop, &rp)) != 0 || rp == NULL)
 		goto find_err;
-	}
 	infop->rp = rp;
 
 	/*
@@ -312,28 +290,33 @@ loop:	renv = NULL;
 	 * growing as part of its creation.  We can detect this by checking the
 	 * size we originally found against the region's current size.  (The
 	 * region's current size has to be final, the creator finished growing
-	 * it before releasing the environment for us to lock.)
+	 * it before setting the magic number in the region.)
 	 */
-	if (rp->size != size) {
-err_unlock:	MUTEX_UNLOCK(dbenv, &renv->mutex);
+	if (rp->size != size)
 		goto retry;
-	}
 
 	/* Increment the reference count. */
+	MUTEX_LOCK(dbenv, renv->mtx_regenv);
 	++renv->refcnt;
+	MUTEX_UNLOCK(dbenv, renv->mtx_regenv);
 
 	/*
-	 * Add configuration flags from our caller; return the total set of
-	 * configuration flags for later DB_JOINENV calls.
+	 * Check our callers configuration flags, it's an error to configure
+	 * incompatible or additional subsystems in an existing environment.
+	 * Return the total set of flags to the caller so they initialize the
+	 * correct set of subsystems.
 	 */
 	if (init_flagsp != NULL) {
-		renv->init_flags |= *init_flagsp;
+		FLD_CLR(*init_flagsp, renv->init_flags);
+		if (*init_flagsp != 0) {
+			__db_err(dbenv,
+    "configured environment flags incompatible with existing environment");
+			ret = EINVAL;
+			goto err;
+		}
 		*init_flagsp = renv->init_flags;
 	}
 
-	/* Discard our lock. */
-	MUTEX_UNLOCK(dbenv, &renv->mutex);
-
 	/*
 	 * Fault the pages into memory.  Note, do this AFTER releasing the
 	 * lock, because we're only reading the pages, not writing them.
@@ -349,15 +332,17 @@ creation:
 	F_SET(infop, REGION_CREATE);
 
 	/*
-	 * Allocate room for 100 REGION structures plus overhead (we're going
-	 * to use this space for last-ditch allocation requests), although we
-	 * should never need anything close to that.
+	 * Allocate room for REGION structures plus overhead.
 	 *
-	 * Encryption passwds are stored in the env region.  Add that in too.
+	 * XXX
+	 * Overhead is so high because encryption passwds are stored in the
+	 * base environment region, as are replication vote arrays.  This is
+	 * a bug, not a feature, replication needs its own region.
 	 */
 	memset(&tregion, 0, sizeof(tregion));
-	tregion.size = (roff_t)(100 * sizeof(REGION) +
-	    dbenv->passwd_len + 4096);
+	nregions = dbenv->mp_ncache + 10;
+	tregion.size =
+	   (roff_t)(nregions * sizeof(REGION) + dbenv->passwd_len + 16 * 1024);
 	tregion.segid = INVALID_REGION_SEGID;
 	if ((ret = __os_r_attach(dbenv, infop, &tregion)) != 0)
 		goto err;
@@ -391,17 +376,25 @@ creation:
 	__db_shalloc_init(infop, tregion.size - sizeof(REGENV));
 
 	/*
-	 * Initialize the rest of the REGENV structure, except for the magic
-	 * number which validates the file/environment.
+	 * Initialize the rest of the REGENV structure.  (Don't set the magic
+	 * number to the correct value, that would validate the environment).
 	 */
 	renv = infop->primary;
-	renv->envpanic = 0;
+	renv->magic = 0;
+	renv->panic = 0;
+
+	(void)db_version(&majver, &minver, &patchver);
+	renv->majver = (u_int32_t)majver;
+	renv->minver = (u_int32_t)minver;
+	renv->patchver = (u_int32_t)patchver;
+
+	(void)time(&renv->timestamp);
 	__os_unique_id(dbenv, &renv->envid);
-	(void)db_version(&renv->majver, &renv->minver, &renv->patch);
-	SH_LIST_INIT(&renv->regionq);
+
+	if ((ret = __mutex_alloc(
+	    dbenv, MTX_ENV_REGION, 0, &renv->mtx_regenv)) != 0)
+		goto err;
 	renv->refcnt = 1;
-	renv->cipher_off = INVALID_ROFF;
-	renv->rep_off = INVALID_ROFF;
 
 	/*
 	 * Initialize init_flags to store the flags that any other environment
@@ -410,27 +403,27 @@ creation:
 	renv->init_flags = (init_flagsp == NULL) ? 0 : *init_flagsp;
 
 	/*
-	 * Lock the environment.
-	 *
-	 * Check the lock call return.  This is the first lock we initialize
-	 * and acquire, and we have to know if it fails.  (It CAN fail, e.g.,
-	 * SunOS, when using fcntl(2) for locking and using an in-memory
-	 * filesystem as the database home.  But you knew that, I'm sure -- it
-	 * probably wasn't even worth mentioning.)
+	 * Set up the region array.  We use an array rather than a linked list
+	 * as we have to traverse this list after failure in some cases, and
+	 * we don't want to infinitely loop should the application fail while
+	 * we're manipulating the list.
 	 */
-	if ((ret = __db_mutex_setup(dbenv, infop, &renv->mutex,
-	    MUTEX_NO_RECORD | MUTEX_NO_RLOCK)) != 0) {
-		__db_err(dbenv, "%s: unable to initialize environment lock: %s",
-		    infop->name, db_strerror(ret));
+	renv->region_cnt = nregions;
+	if ((ret =
+	    __db_shalloc(infop, nregions * sizeof(REGION), 0, &rp)) != 0) {
+		__db_err(dbenv, "unable to create new master region array: %s",
+		    db_strerror(ret));
 		goto err;
 	}
+	renv->region_off = R_OFFSET(infop, rp);
+	for (i = 0; i < nregions; ++i, ++rp)
+		rp->id = INVALID_REGION_ID;
 
-	if (!F_ISSET(&renv->mutex, MUTEX_IGNORE) &&
-	    (ret = __db_mutex_lock(dbenv, &renv->mutex)) != 0) {
-		__db_err(dbenv, "%s: unable to acquire environment lock: %s",
-		    infop->name, db_strerror(ret));
-		goto err;
-	}
+	renv->cipher_off = INVALID_ROFF;
+
+	renv->rep_off = INVALID_ROFF;
+	renv->flags = 0;
+	renv->op_timestamp = renv->rep_timestamp = 0;
 
 	/*
 	 * Get the underlying REGION structure for this environment.  Note,
@@ -439,8 +432,7 @@ creation:
 	 * the REGION structure.
 	 */
 	if ((ret = __db_des_get(dbenv, infop, infop, &rp)) != 0) {
-find_err:	__db_err(dbenv,
-		    "%s: unable to find environment", infop->name);
+find_err:	__db_err(dbenv, "%s: unable to find environment", infop->name);
 		if (ret == 0)
 			ret = EINVAL;
 		goto err;
@@ -474,25 +466,18 @@ find_err:	__db_err(dbenv,
 		}
 	}
 
+#ifndef HAVE_MUTEX_FCNTL
 	/*
-	 * If not doing thread locking, we need to save the file handle for
-	 * fcntl(2) locking.  Otherwise, discard the handle, we no longer
-	 * need it, and the less contact between the buffer cache and the VM,
-	 * the better.
+	 * If we're not doing fcntl locking, we can close the file handle.  We
+	 * no longer need it and the less contact between the buffer cache and
+	 * the VM, the better.
 	 */
-#if defined(HAVE_MUTEX_THREADS)
 	if (dbenv->lockfhp != NULL) {
 		 (void)__os_closehandle(dbenv, dbenv->lockfhp);
 		 dbenv->lockfhp = NULL;
 	}
 #endif
 
-	/* Validate the file. */
-	renv->magic = DB_REGION_MAGIC;
-
-	/* Discard our lock. */
-	MUTEX_UNLOCK(dbenv, &renv->mutex);
-
 	/* Everything looks good, we're done. */
 	dbenv->reginfo = infop;
 	return (0);
@@ -540,6 +525,35 @@ retry:	/* Close any open file handle. */
 	return (ret);
 }
 
+/*
+ * __db_e_golive --
+ *	Turn on the created environment.
+ *
+ * PUBLIC: int __db_e_golive __P((DB_ENV *));
+ */
+int
+__db_e_golive(dbenv)
+	DB_ENV *dbenv;
+{
+	REGENV *renv;
+	REGINFO *infop;
+
+	infop = dbenv->reginfo;
+	renv = infop->primary;
+
+	/* If we didn't create the region, there's no need for further work. */
+	if (!F_ISSET(infop, REGION_CREATE))
+		return (0);
+
+	/*
+	 * Validate the file.  All other threads of control are waiting
+	 * on this value to be written -- "Let slip the hounds of war!"
+	 */
+	renv->magic = DB_REGION_MAGIC;
+
+	return (0);
+}
+
 /*
  * __db_e_detach --
  *	Detach from the environment.
@@ -553,81 +567,97 @@ __db_e_detach(dbenv, destroy)
 {
 	REGENV *renv;
 	REGINFO *infop;
+	REGION rp;
+	int ret, t_ret;
 
 	infop = dbenv->reginfo;
 	renv = infop->primary;
+	ret = 0;
 
 	if (F_ISSET(dbenv, DB_ENV_PRIVATE))
 		destroy = 1;
 
-	/* Lock the environment. */
-	MUTEX_LOCK(dbenv, &renv->mutex);
-
 	/* Decrement the reference count. */
-	if (renv->refcnt == 0) {
-		__db_err(dbenv,
-		    "region %lu (environment): reference count went negative",
-		    (u_long)infop->rp->id);
-	} else
+	MUTEX_LOCK(dbenv, renv->mtx_regenv);
+	if (renv->refcnt == 0)
+		__db_err(dbenv, "environment reference count went negative");
+	else
 		--renv->refcnt;
-
-	/* Release the lock. */
-	MUTEX_UNLOCK(dbenv, &renv->mutex);
+	MUTEX_UNLOCK(dbenv, renv->mtx_regenv);
 
 	/* Close the locking file handle. */
 	if (dbenv->lockfhp != NULL) {
-		(void)__os_closehandle(dbenv, dbenv->lockfhp);
+		if ((t_ret =
+		    __os_closehandle(dbenv, dbenv->lockfhp)) != 0 && ret == 0)
+			ret = t_ret;
 		dbenv->lockfhp = NULL;
 	}
 
 	/*
-	 * If we are destroying the environment, destroy any system resources
-	 * the crypto and replication systems may have acquired and put in the
-	 * main region.
+	 * Release the region, and kill our reference.
 	 */
 	if (destroy) {
 #ifdef HAVE_CRYPTO
-		(void)__crypto_region_destroy(dbenv);
+		/*
+		 * Destroy any system resources the crypto subsystem may have
+		 * acquired.
+		 */
+		if ((t_ret = __crypto_region_destroy(dbenv)) != 0 && ret == 0)
+			ret = t_ret;
 #endif
-		(void)__rep_region_destroy(dbenv);
+		/*
+		 * Destroy any system resources the replication subsystem may
+		 * have acquired.
+		 */
+		if ((t_ret = __rep_region_destroy(dbenv)) != 0 && ret == 0)
+			ret = t_ret;
+
+		/*
+		 * Free the REGION array.
+		 *
+		 * The actual underlying region structure is allocated from the
+		 * primary shared region, and we're about to free it.  Save a
+		 * copy on our stack for the REGINFO to reference when it calls
+		 * down into the OS layer to release the shared memory segment.
+		 */
+		rp = *infop->rp;
+		infop->rp = &rp;
+
+		if (renv->region_off != INVALID_ROFF)
+			__db_shalloc_free(
+			   infop, R_ADDR(infop, renv->region_off));
+
+		/* Discard any mutex resources we may have acquired. */
+		if ((t_ret =
+		    __mutex_free(dbenv, &renv->mtx_regenv)) != 0 && ret == 0)
+			ret = t_ret;
 	}
 
 	/*
-	 * Release the region, and kill our reference.
+	 * Set the DB_ENV->reginfo field to NULL.  First, DB_ENV->remove calls
+	 * __env_remove to do the region remove, and __envremove attached and
+	 * then detaches from the region.  We don't want to return to
+	 * DB_ENV->remove with a non-NULL DB_ENV->reginfo field because it will
+	 * attempt to detach again as part of its cleanup.
 	 *
-	 * If we are destroying the environment, destroy any system resources
-	 * backing the mutex.
+	 * Second, DB code uses DB_ENV->reginfo to decide if it's OK to read
+	 * the underlying region.  We're about to destroy what it references,
+	 * so it needs to be cleared.
 	 */
-	if (destroy) {
-		(void)__db_mutex_destroy(&renv->mutex);
-		(void)__db_mutex_destroy(&infop->rp->mutex);
-
-		/*
-		 * Only free the REGION structure itself if it was separately
-		 * allocated from the heap.
-		 */
-		if (F_ISSET(dbenv, DB_ENV_PRIVATE))
-			__db_shalloc_free(infop, infop->rp);
-	}
+	dbenv->reginfo = NULL;
 
 	/* Reset the addr value that we "corrected" above. */
 	infop->addr = infop->primary;
 
-	(void)__os_r_detach(dbenv, infop, destroy);
+	if ((t_ret = __os_r_detach(dbenv, infop, destroy)) != 0 && ret == 0)
+		ret = t_ret;
 	if (infop->name != NULL)
 		__os_free(dbenv, infop->name);
 
-	/*
-	 * We set the DB_ENV->reginfo field to NULL here and discard its memory.
-	 * DB_ENV->remove calls __dbenv_remove to do the region remove, and
-	 * __dbenv_remove attached and then detaches from the region.  We don't
-	 * want to return to DB_ENV->remove with a non-NULL DB_ENV->reginfo
-	 * field because it will attempt to detach again as part of its cleanup.
-	 */
-	__os_free(dbenv, dbenv->reginfo);
-	dbenv->reginfo = NULL;
+	/* Discard the DB_ENV->reginfo field's memory. */
+	__os_free(dbenv, infop);
 
-	return (0);
+	return (ret);
 }
 
 /*
@@ -644,33 +674,39 @@ __db_e_remove(dbenv, flags)
 	REGENV *renv;
 	REGINFO *infop, reginfo;
 	REGION *rp;
-	u_int32_t db_env_reset;
-	int force, ret;
+	u_int32_t db_env_reset, i;
+	int ret;
+
+	db_env_reset = F_ISSET(dbenv, DB_ENV_NOLOCKING | DB_ENV_NOPANIC);
 
-	force = LF_ISSET(DB_FORCE) ? 1 : 0;
 	/*
 	 * This routine has to walk a nasty line between not looking into
 	 * the environment (which may be corrupted after an app or system
 	 * crash), and removing everything that needs removing.  What we
 	 * do is:
-	 *	1. Connect to the environment (so it better be OK).
+	 *	1. Connect to the environment.
 	 *	2. If the environment is in use (reference count is non-zero),
 	 *	   return EBUSY.
-	 *	3. Overwrite the magic number so that any threads of control
-	 *	   attempting to connect will backoff and retry.
-	 *	4. Walk the list of regions.  Connect to each region and then
+	 *	3. Panic it and overwrite the magic number so any threads of
+	 *	   control attempting to connect (or racing with us) backoff
+	 *	   and retry or just die.
+	 *	4. Walk the array of regions.  Connect to each region and then
 	 *	   disconnect with the destroy flag set.  This shouldn't cause
 	 *	   any problems, even if the region is corrupted, because we
-	 *	   should never be looking inside the region.
+	 *	   never look inside the region (with the single exception of
+	 *	   mutex regions on systems where we have to return resources
+	 *	   to the underlying system).
 	 *	5. Walk the list of files in the directory, unlinking any
 	 *	   files that match a region name.  Unlink the environment
 	 *	   file last.
 	 *
 	 * If the force flag is set, we do not acquire any locks during this
 	 * process.
+	 *
+	 * We're going to panic the environment, so we'll want to ignore that
+	 * flag.
 	 */
-	db_env_reset = F_ISSET(dbenv, DB_ENV_NOLOCKING | DB_ENV_NOPANIC);
-	if (force)
+	if (LF_ISSET(DB_FORCE))
 		F_SET(dbenv, DB_ENV_NOLOCKING);
 	F_SET(dbenv, DB_ENV_NOPANIC);
 
@@ -682,7 +718,7 @@ __db_e_remove(dbenv, flags)
 		 * probably isn't important.
 		 */
 		ret = 0;
-		if (force)
+		if (LF_ISSET(DB_FORCE))
 			goto remfiles;
 		goto done;
 	}
@@ -691,14 +727,14 @@ __db_e_remove(dbenv, flags)
 	renv = infop->primary;
 
 	/* Lock the environment. */
-	MUTEX_LOCK(dbenv, &renv->mutex);
+	MUTEX_LOCK(dbenv, renv->mtx_regenv);
 
 	/*
 	 * If it's in use, we're done unless we're forcing the issue or the
 	 * environment has panic'd.  (Presumably, if the environment panic'd,
 	 * the thread holding the reference count may not have cleaned up.)
 	 */
-	if (renv->refcnt == 1 || renv->envpanic == 1 || force) {
+	if (renv->refcnt == 1 || renv->panic == 1 || LF_ISSET(DB_FORCE)) {
 		/*
 		 * Set the panic flag and overwrite the magic number.
 		 *
@@ -706,42 +742,50 @@ __db_e_remove(dbenv, flags)
 		 * From this point on, there's no going back, we pretty
 		 * much ignore errors, and just whack on whatever we can.
 		 */
-		renv->envpanic = 1;
 		renv->magic = 0;
+		renv->panic = 1;
 
 		/*
-		 * Unlock the environment.  We should no longer need the lock
-		 * because we've poisoned the pool, but we can't continue to
-		 * hold it either, because other routines may want it.
+		 * Unlock the environment -- nobody should need this lock
+		 * because we've poisoned the pool.
 		 */
-		MUTEX_UNLOCK(dbenv, &renv->mutex);
+		MUTEX_UNLOCK(dbenv, renv->mtx_regenv);
 
-		/*
-		 * Attach to each sub-region and destroy it.
-		 *
-		 * !!!
-		 * The REGION_CREATE_OK flag is set for Windows/95 -- regions
-		 * are zero'd out when the last reference to the region goes
-		 * away, in which case the underlying OS region code requires
-		 * callers be prepared to create the region in order to join it.
-		 */
-		memset(®info, 0, sizeof(reginfo));
-		for (rp = SH_LIST_FIRST(&renv->regionq, __db_region);
-		    rp != NULL; rp = SH_LIST_NEXT(rp, q, __db_region)) {
-			if (rp->type == REGION_TYPE_ENV)
+		/* Attach to each sub-region and destroy it. */
+		for (rp = R_ADDR(infop, renv->region_off),
+		    i = 0; i < renv->region_cnt; ++i, ++rp) {
+			if (rp->id == INVALID_REGION_ID ||
+			    rp->type == REGION_TYPE_ENV)
 				continue;
+			/*
+			 * !!!
+			 * The REGION_CREATE_OK flag is set for Windows/95 --
+			 * regions are zero'd out when the last reference to
+			 * the region goes away, in which case the underlying
+			 * OS region code requires callers be prepared to
+			 * create the region in order to join it.
+			 */
+			memset(®info, 0, sizeof(reginfo));
+			reginfo.id = rp->id;
+			reginfo.flags = REGION_CREATE_OK;
 
 			/*
 			 * If we get here and can't attach and/or detach to the
 			 * region, it's a mess.  Ignore errors, there's nothing
 			 * we can do about them.
 			 */
-			reginfo.id = rp->id;
-			reginfo.flags = REGION_CREATE_OK;
-			if (__db_r_attach(dbenv, ®info, 0) == 0) {
-				R_UNLOCK(dbenv, ®info);
-				(void)__db_r_detach(dbenv, ®info, 1);
-			}
+			if (__db_r_attach(dbenv, ®info, 0) != 0)
+				continue;
+
+#ifdef  HAVE_MUTEX_SYSTEM_RESOURCES
+			/*
+			 * If destroying the mutex region, return any system
+			 * resources to the system.
+			 */
+			if (reginfo.type == REGION_TYPE_MUTEX)
+				__mutex_resource_return(dbenv, ®info);
+#endif
+			(void)__db_r_detach(dbenv, ®info, 1);
 		}
 
 		/* Destroy the environment's region. */
@@ -751,7 +795,7 @@ __db_e_remove(dbenv, flags)
 remfiles:	(void)__db_e_remfile(dbenv);
 	} else {
 		/* Unlock the environment. */
-		MUTEX_UNLOCK(dbenv, &renv->mutex);
+		MUTEX_UNLOCK(dbenv, renv->mtx_regenv);
 
 		/* Discard the environment. */
 		(void)__db_e_detach(dbenv, 0);
@@ -819,6 +863,10 @@ __db_e_remfile(dbenv)
 		if (strncmp(names[cnt], "__dbq.", 6) == 0)
 			continue;
 
+		/* Skip registry files. */
+		if (strncmp(names[cnt], "__db.register", 13) == 0)
+			continue;
+
 		/* Skip replication files. */
 		if (strncmp(names[cnt], "__db.rep.", 9) == 0)
 			continue;
@@ -846,7 +894,7 @@ __db_e_remfile(dbenv)
 			 */
 			if (F_ISSET(dbenv, DB_ENV_OVERWRITE) &&
 			    strlen(names[cnt]) == DB_REGION_NAME_LENGTH)
-				(void)__db_overwrite(dbenv, path);
+				(void)__db_file_multi_write(dbenv, path);
 			(void)__os_unlink(dbenv, path);
 			__os_free(dbenv, path);
 		}
@@ -856,7 +904,7 @@ __db_e_remfile(dbenv)
 		if (__db_appname(dbenv,
 		    DB_APP_NONE, names[lastrm], 0, NULL, &path) == 0) {
 			if (F_ISSET(dbenv, DB_ENV_OVERWRITE))
-				(void)__db_overwrite(dbenv, path);
+				(void)__db_file_multi_write(dbenv, path);
 			(void)__os_unlink(dbenv, path);
 			__os_free(dbenv, path);
 		}
@@ -877,31 +925,26 @@ __db_r_attach(dbenv, infop, size)
 	REGINFO *infop;
 	size_t size;
 {
-	REGENV *renv;
 	REGION *rp;
 	int ret;
 	char buf[sizeof(DB_REGION_FMT) + 20];
 
-	renv = ((REGINFO *)dbenv->reginfo)->primary;
-
-	/* Lock the environment. */
-	MUTEX_LOCK(dbenv, &renv->mutex);
-
 	/*
 	 * Find or create a REGION structure for this region.  If we create
 	 * it, the REGION_CREATE flag will be set in the infop structure.
 	 */
 	F_CLR(infop, REGION_CREATE);
-	if ((ret = __db_des_get(dbenv, dbenv->reginfo, infop, &rp)) != 0) {
-		MUTEX_UNLOCK(dbenv, &renv->mutex);
+	if ((ret = __db_des_get(dbenv, dbenv->reginfo, infop, &rp)) != 0)
 		return (ret);
-	}
 	infop->dbenv = dbenv;
 	infop->rp = rp;
 	infop->type = rp->type;
 	infop->id = rp->id;
 
-	/* If we're creating the region, set the desired size. */
+	/*
+	 * __db_des_get may have created the region and reset the create
+	 * flag.  If we're creating the region, set the desired size.
+	 */
 	if (F_ISSET(infop, REGION_CREATE))
 		rp->size = (roff_t)size;
 
@@ -932,15 +975,6 @@ __db_r_attach(dbenv, infop, size)
 	if (F_ISSET(infop, REGION_CREATE))
 		__db_shalloc_init(infop, rp->size);
 
-	/*
-	 * If the underlying REGION isn't the environment, acquire a lock
-	 * for it and release our lock on the environment.
-	 */
-	if (infop->type != REGION_TYPE_ENV) {
-		MUTEX_LOCK(dbenv, &rp->mutex);
-		MUTEX_UNLOCK(dbenv, &renv->mutex);
-	}
-
 	return (0);
 
 err:	/* Discard the underlying region. */
@@ -952,13 +986,10 @@ err:	/* Discard the underlying region. */
 
 	/* Discard the REGION structure if we created it. */
 	if (F_ISSET(infop, REGION_CREATE)) {
-		(void)__db_des_destroy(dbenv, rp, 1);
+		__db_des_destroy(dbenv, rp);
 		F_CLR(infop, REGION_CREATE);
 	}
 
-	/* Release the environment lock. */
-	MUTEX_UNLOCK(dbenv, &renv->mutex);
-
 	return (ret);
 }
 
@@ -974,48 +1005,28 @@ __db_r_detach(dbenv, infop, destroy)
 	REGINFO *infop;
 	int destroy;
 {
-	REGENV *renv;
 	REGION *rp;
-	int ret, t_ret;
+	int ret;
 
-	renv = ((REGINFO *)dbenv->reginfo)->primary;
 	rp = infop->rp;
 	if (F_ISSET(dbenv, DB_ENV_PRIVATE))
 		destroy = 1;
 
-	/* Lock the environment. */
-	MUTEX_LOCK(dbenv, &renv->mutex);
-
-	/* Acquire the lock for the REGION. */
-	MUTEX_LOCK(dbenv, &rp->mutex);
-
 	/*
-	 * We need to call destroy on per-subsystem info before we free the
-	 * memory associated with the region.
+	 * When discarding the regions as we shut down a database environment,
+	 * discard any allocated shared memory segments.  This is the last time
+	 * we use them, and db_region_destroy is the last region-specific call
+	 * we make.
 	 */
-	if (destroy)
-		__db_region_destroy(dbenv, infop);
+	if (F_ISSET(dbenv, DB_ENV_PRIVATE) && infop->primary != NULL)
+		__db_shalloc_free(infop, infop->primary);
 
 	/* Detach from the underlying OS region. */
 	ret = __os_r_detach(dbenv, infop, destroy);
 
-	/* Release the REGION lock. */
-	MUTEX_UNLOCK(dbenv, &rp->mutex);
-
-	/*
-	 * If we destroyed the region, discard the REGION structure.  The only
-	 * time this routine is called with the destroy flag set is when the
-	 * environment is being removed, and it's likely that the only reason
-	 * the environment is being removed is because we crashed.  Don't do
-	 * any unnecessary shared memory manipulation.
-	 */
-	if (destroy &&
-	    ((t_ret = __db_des_destroy(
-		dbenv, rp, F_ISSET(dbenv, DB_ENV_PRIVATE))) != 0) && ret == 0)
-		ret = t_ret;
-
-	/* Release the environment lock. */
-	MUTEX_UNLOCK(dbenv, &renv->mutex);
+	/* If we destroyed the region, discard the REGION structure. */
+	if (destroy)
+		__db_des_destroy(dbenv, rp);
 
 	/* Destroy the structure. */
 	if (infop->name != NULL)
@@ -1036,14 +1047,9 @@ __db_des_get(dbenv, env_infop, infop, rpp)
 	REGION **rpp;
 {
 	REGENV *renv;
-	REGION *rp, *first_type;
-	u_int32_t maxid;
-	int ret;
+	REGION *rp, *empty_slot, *first_type;
+	u_int32_t i, maxid;
 
-	/*
-	 * !!!
-	 * Called with the environment already locked.
-	 */
 	*rpp = NULL;
 	renv = env_infop->primary;
 
@@ -1054,14 +1060,20 @@ __db_des_get(dbenv, env_infop, infop, rpp)
 	 * return the "primary" region, that is, the first region that was
 	 * created of this type.
 	 *
-	 * Track the maximum region ID so we can allocate a new region,
-	 * note that we have to start at 1 because the primary environment
-	 * uses ID == 1.
+	 * Track the first empty slot and maximum region ID for new region
+	 * allocation.
+	 *
+	 * MaxID starts at REGION_ID_ENV, the ID of the primary environment.
 	 */
 	maxid = REGION_ID_ENV;
-	for (first_type = NULL,
-	    rp = SH_LIST_FIRST(&renv->regionq, __db_region);
-	    rp != NULL; rp = SH_LIST_NEXT(rp, q, __db_region)) {
+	empty_slot = first_type = NULL;
+	for (rp = R_ADDR(env_infop, renv->region_off),
+	    i = 0; i < renv->region_cnt; ++i, ++rp) {
+		if (rp->id == INVALID_REGION_ID) {
+			if (empty_slot == NULL)
+				empty_slot = rp;
+			continue;
+		}
 		if (infop->id != INVALID_REGION_ID) {
 			if (infop->id == rp->id)
 				break;
@@ -1075,51 +1087,48 @@ __db_des_get(dbenv, env_infop, infop, rpp)
 		if (rp->id > maxid)
 			maxid = rp->id;
 	}
-	if (rp == NULL)
+
+	/* If we found a matching ID (or a matching type), return it. */
+	if (i >= renv->region_cnt)
 		rp = first_type;
+	if (rp != NULL) {
+		*rpp = rp;
+		return (0);
+	}
 
 	/*
-	 * If we didn't find a region and we can't create the region, fail.
-	 * The caller generates any error message.
+	 * If we didn't find a region and we don't have permission to create
+	 * the region, fail.  The caller generates any error message.
 	 */
-	if (rp == NULL && !F_ISSET(infop, REGION_CREATE_OK))
+	if (!F_ISSET(infop, REGION_CREATE_OK))
 		return (ENOENT);
 
 	/*
-	 * If we didn't find a region, create and initialize a REGION structure
-	 * for the caller.  If id was set, use that value, otherwise we use the
-	 * next available ID.
+	 * If we didn't find a region and don't have room to create the region
+	 * fail with an error message, there's a sizing problem.
 	 */
-	if (rp == NULL) {
-		if ((ret = __db_shalloc(env_infop,
-		    sizeof(REGION), MUTEX_ALIGN, &rp)) != 0) {
-			__db_err(dbenv,
-			    "unable to create new master region entry: %s",
-			    db_strerror(ret));
-			return (ret);
-		}
-
-		/* Initialize the region. */
-		memset(rp, 0, sizeof(*rp));
-		if ((ret = __db_mutex_setup(dbenv, env_infop, &rp->mutex,
-		    MUTEX_NO_RECORD | MUTEX_NO_RLOCK)) != 0) {
-			__db_shalloc_free(env_infop, rp);
-			return (ret);
-		}
-		rp->segid = INVALID_REGION_SEGID;
-
-		/*
-		 * Set the type and ID; if no region ID was specified,
-		 * allocate one.
-		 */
-		rp->type = infop->type;
-		rp->id = infop->id == INVALID_REGION_ID ? maxid + 1 : infop->id;
-
-		SH_LIST_INSERT_HEAD(&renv->regionq, rp, q, __db_region);
-		F_SET(infop, REGION_CREATE);
+	if (empty_slot == NULL) {
+		__db_err(dbenv, "no room remaining for additional REGIONs");
+		return (ENOENT);
 	}
 
-	*rpp = rp;
+	/*
+	 * Initialize a REGION structure for the caller.  If id was set, use
+	 * that value, otherwise we use the next available ID.
+	 */
+	memset(empty_slot, 0, sizeof(REGION));
+	empty_slot->segid = INVALID_REGION_SEGID;
+
+	/*
+	 * Set the type and ID; if no region ID was specified,
+	 * allocate one.
+	 */
+	empty_slot->type = infop->type;
+	empty_slot->id = infop->id == INVALID_REGION_ID ? maxid + 1 : infop->id;
+
+	F_SET(infop, REGION_CREATE);
+
+	*rpp = empty_slot;
 	return (0);
 }
 
@@ -1127,41 +1136,14 @@ __db_des_get(dbenv, env_infop, infop, rpp)
  * __db_des_destroy --
  *	Destroy a reference to a REGION.
  */
-static int
-__db_des_destroy(dbenv, rp, shmem_safe)
+static void
+__db_des_destroy(dbenv, rp)
 	DB_ENV *dbenv;
 	REGION *rp;
-	int shmem_safe;
 {
-	REGINFO *infop;
+	COMPQUIET(dbenv, NULL);
 
-	/*
-	 * !!!
-	 * Called with the environment already locked.
-	 */
-	infop = dbenv->reginfo;
-
-	/*
-	 * If we're calling during recovery, it may not be safe to access the
-	 * shared memory, as the shared memory may have been corrupted during
-	 * the crash.  If the shared memory is safe, remove the REGION entry
-	 * from its linked list, destroy the mutex, and free the allocated
-	 * memory.  On systems that require system mutex support, we don't
-	 * have a choice -- safe or not, we have to destroy the mutex or we'll
-	 * leak memory.
-	 */
-#ifdef HAVE_MUTEX_SYSTEM_RESOURCES
-	(void)__db_mutex_destroy(&rp->mutex);
-#else
-	if (shmem_safe)
-		(void)__db_mutex_destroy(&rp->mutex);
-#endif
-	if (shmem_safe) {
-		SH_LIST_REMOVE(rp, q, __db_region);
-		__db_shalloc_free(infop, rp);
-	}
-
-	return (0);
+	rp->id = INVALID_REGION_ID;
 }
 
 /*
@@ -1193,51 +1175,21 @@ __db_faultmem(dbenv, addr, size, created)
 	 * system can't cheat.  If we're just joining the region, we can
 	 * only read the value and try to confuse the compiler sufficiently
 	 * that it doesn't figure out that we're never really using it.
+	 *
+	 * Touch every page (assuming pages are 512B, the smallest VM page
+	 * size used in any general purpose processor).
 	 */
 	ret = 0;
 	if (F_ISSET(dbenv, DB_ENV_REGION_INIT)) {
 		if (created)
-			for (p = addr, t = (u_int8_t *)addr + size;
-			    p < t; p += OS_VMPAGESIZE)
+			for (p = addr,
+			    t = (u_int8_t *)addr + size; p < t; p += 512)
 				p[0] = 0xdb;
 		else
-			for (p = addr, t = (u_int8_t *)addr + size;
-			    p < t; p += OS_VMPAGESIZE)
+			for (p = addr,
+			    t = (u_int8_t *)addr + size; p < t; p += 512)
 				ret |= p[0];
 	}
 
 	return (ret);
 }
-
-/*
- * __db_region_destroy --
- *	Destroy per-subsystem region information.
- *	Called with the region already locked.
- */
-static void
-__db_region_destroy(dbenv, infop)
-	DB_ENV *dbenv;
-	REGINFO *infop;
-{
-	switch (infop->type) {
-	case REGION_TYPE_LOCK:
-		__lock_region_destroy(dbenv, infop);
-		break;
-	case REGION_TYPE_LOG:
-		__log_region_destroy(dbenv, infop);
-		break;
-	case REGION_TYPE_MPOOL:
-		__memp_region_destroy(dbenv, infop);
-		break;
-	case REGION_TYPE_TXN:
-		__txn_region_destroy(dbenv, infop);
-		break;
-	case REGION_TYPE_ENV:
-	case REGION_TYPE_MUTEX:
-		break;
-	case INVALID_REGION_TYPE:
-	default:
-		DB_ASSERT(0);
-		break;
-	}
-}
diff --git a/storage/bdb/env/env_register.c b/storage/bdb/env/env_register.c
new file mode 100644
index 00000000000..a5001d2e5f1
--- /dev/null
+++ b/storage/bdb/env/env_register.c
@@ -0,0 +1,421 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2004-2005
+ *	Sleepycat Software.  All rights reserved.
+ *
+ * $Id: env_register.c,v 1.15 2005/10/07 20:21:27 ubell Exp $
+ */
+#include "db_config.h"
+
+#ifndef NO_SYSTEM_INCLUDES
+#include 
+
+#include 
+#endif
+
+#include "db_int.h"
+
+#define	REGISTER_FILE	"__db.register"
+
+#define	PID_EMPTY	"X%23lu\n"	/* An unused PID entry. */
+#define	PID_FMT		"%24lu\n"	/* File PID format. */
+#define	PID_ISEMPTY(p)	(p[0] == 'X')
+#define	PID_LEN		25		/* Length of PID line. */
+
+#define	REGISTRY_LOCK(dbenv, pos, nowait)				\
+	__os_fdlock(dbenv, (dbenv)->registry, (off_t)(pos), 1, nowait)
+#define	REGISTRY_UNLOCK(dbenv, pos)					\
+	__os_fdlock(dbenv, (dbenv)->registry, (off_t)(pos), 0, 0)
+#define	REGISTRY_EXCL_LOCK(dbenv, nowait)				\
+	REGISTRY_LOCK(dbenv, 1, nowait)
+#define	REGISTRY_EXCL_UNLOCK(dbenv)					\
+	REGISTRY_UNLOCK(dbenv, 1)
+
+static  int __envreg_add __P((DB_ENV *, int *));
+
+/*
+ * Support for portable, multi-process database environment locking, based on
+ * the Subversion SR (#11511).
+ *
+ * The registry feature is configured by specifying the DB_REGISTER flag to the
+ * DbEnv.open method.  If DB_REGISTER is specified, DB opens the registry file
+ * in the database environment home directory.  The registry file is formatted
+ * as follows:
+ *
+ *	                    12345		# process ID slot 1
+ *	X		# empty slot
+ *	                    12346		# process ID slot 2
+ *	X		# empty slot
+ *	                    12347		# process ID slot 3
+ *	                    12348		# process ID slot 4
+ *	X                   12349		# empty slot
+ *	X		# empty slot
+ *
+ * All lines are fixed-length.  All lines are process ID slots.  Empty slots
+ * are marked with leading non-digit characters.
+ *
+ * To modify the file, you get an exclusive lock on the first byte of the file.
+ *
+ * While holding any DbEnv handle, each process has an exclusive lock on the
+ * first byte of a process ID slot.  There is a restriction on having more
+ * than one DbEnv handle open at a time, because Berkeley DB uses per-process
+ * locking to implement this feature, that is, a process may never have more
+ * than a single slot locked.
+ *
+ * This work requires that if a process dies or the system crashes, locks held
+ * by the dying processes will be dropped.  (We can't use system shared
+ * memory-backed or filesystem-backed locks because they're persistent when a
+ * process dies.)  On POSIX systems, we use fcntl(2) locks; on Win32 we have
+ * LockFileEx/UnlockFile, except for Win/9X and Win/ME which have to loop on
+ * Lockfile/UnlockFile.
+ *
+ * We could implement the same solution with flock locking instead of fcntl,
+ * but flock would require a separate file for each process of control (and
+ * probably each DbEnv handle) in the database environment, which is fairly
+ * ugly.
+ *
+ * Whenever a process opens a new DbEnv handle, it walks the registry file and
+ * verifies it CANNOT acquire the lock for any non-empty slot.  If a lock for
+ * a non-empty slot is available, we know a process died holding an open handle,
+ * and recovery needs to be run.
+ *
+ * There can still be processes running in the environment when we recover it,
+ * and, in fact, there can still be processes running in the old environment
+ * after we're up and running in a new one.  This is safe because performing
+ * recovery panics (and removes) the existing environment, so the window of
+ * vulnerability is small.  Further, we check the panic flag in the DB API
+ * methods, when waking from spinning on a mutex, and whenever we're about to
+ * write to disk).  The only window of corruption is if the write check of the
+ * panic were to complete, the region subsequently be recovered, and then the
+ * write continues.  That's very, very unlikely to happen.  This vulnerability
+ * already exists in Berkeley DB, too, the registry code doesn't make it any
+ * worse than it already is.
+ */
+/*
+ * __envreg_register --
+ *	Register a DB_ENV handle.
+ *
+ * PUBLIC: int __envreg_register __P((DB_ENV *, const char *, int *));
+ */
+int
+__envreg_register(dbenv, db_home, need_recoveryp)
+	DB_ENV *dbenv;
+	const char *db_home;
+	int *need_recoveryp;
+{
+	pid_t pid;
+	db_threadid_t tid;
+	u_int32_t bytes, mbytes;
+	int ret;
+	char path[MAXPATHLEN];
+
+	*need_recoveryp = 0;
+	dbenv->thread_id(dbenv, &pid, &tid);
+
+	if (FLD_ISSET(dbenv->verbose, DB_VERB_REGISTER))
+		__db_msg(dbenv, "%lu: register environment", (u_long)pid);
+
+	/* Build the path name and open the registry file. */
+	(void)snprintf(path, sizeof(path), "%s/%s", db_home, REGISTER_FILE);
+	if ((ret = __os_open(dbenv, path,
+	    DB_OSO_CREATE, __db_omode("rw-rw----"), &dbenv->registry)) != 0)
+		goto err;
+
+	/*
+	 * Wait for an exclusive lock on the file.
+	 *
+	 * !!!
+	 * We're locking bytes that don't yet exist, but that's OK as far as
+	 * I know.
+	 */
+	if ((ret = REGISTRY_EXCL_LOCK(dbenv, 0)) != 0)
+		goto err;
+
+	/*
+	 * If the file size is 0, initialize the file.
+	 *
+	 * Run recovery if we create the file, that means we can clean up the
+	 * system by removing the registry file and restarting the application.
+	 */
+	if ((ret = __os_ioinfo(
+	    dbenv, path, dbenv->registry, &mbytes, &bytes, NULL)) != 0)
+		goto err;
+	if (mbytes == 0 && bytes == 0) {
+		if (FLD_ISSET(dbenv->verbose, DB_VERB_REGISTER))
+			__db_msg(dbenv,
+			    "%lu: creating %s", (u_long)pid, path);
+		*need_recoveryp = 1;
+	}
+
+	/* Register this process. */
+	if ((ret = __envreg_add(dbenv, need_recoveryp)) != 0)
+		goto err;
+
+	/*
+	 * Release our exclusive lock if we don't need to run recovery.  If
+	 * we need to run recovery, DB_ENV->open will call back into register
+	 * code once recovery has completed.
+	 */
+	if (*need_recoveryp == 0 && (ret = REGISTRY_EXCL_UNLOCK(dbenv)) != 0)
+		goto err;
+
+	if (0) {
+err:		*need_recoveryp = 0;
+
+		/*
+		 * !!!
+		 * Closing the file handle must release all of our locks.
+		 */
+		(void)__os_closehandle(dbenv, dbenv->registry);
+		dbenv->registry = NULL;
+	}
+
+	return (ret);
+}
+
+/*
+ * __envreg_add --
+ *	Add the process' pid to the register.
+ */
+static int
+__envreg_add(dbenv, need_recoveryp)
+	DB_ENV *dbenv;
+	int *need_recoveryp;
+{
+	pid_t pid;
+	db_threadid_t tid;
+	off_t end, pos;
+	size_t nr, nw;
+	u_int lcnt;
+	u_int32_t bytes, mbytes;
+	int need_recovery, ret;
+	char *p, buf[256], pid_buf[256];
+
+	need_recovery = 0;
+	COMPQUIET(p, NULL);
+
+	/* Get a copy of our process ID. */
+	dbenv->thread_id(dbenv, &pid, &tid);
+	snprintf(pid_buf, sizeof(pid_buf), PID_FMT, (u_long)pid);
+
+	if (FLD_ISSET(dbenv->verbose, DB_VERB_REGISTER))
+		__db_msg(dbenv, "===== %lu: before add", (u_long)pid);
+
+	/*
+	 * Read the file.  Skip empty slots, and check that a lock is held
+	 * for any allocated slots.  An allocated slot which we can lock
+	 * indicates a process died holding a handle and recovery needs to
+	 * be run.
+	 */
+	for (lcnt = 0;; ++lcnt) {
+		if ((ret = __os_read(
+		    dbenv, dbenv->registry, buf, PID_LEN, &nr)) != 0)
+			return (ret);
+		if (nr == 0)
+			break;
+		if (nr != PID_LEN)
+			goto corrupt;
+
+		if (FLD_ISSET(
+		    dbenv->verbose, DB_VERB_REGISTER) && PID_ISEMPTY(buf)) {
+			__db_msg(dbenv, "%02u: EMPTY", lcnt);
+			continue;
+		}
+
+		/*
+		 * !!!
+		 * DB_REGISTER is implemented using per-process locking, only
+		 * a single DB_ENV handle may be open per process.  Enforce
+		 * that restriction.
+		 */
+		if (memcmp(buf, pid_buf, PID_LEN) == 0) {
+			__db_err(dbenv,
+	"DB_REGISTER limits each process to a single open DB_ENV handle");
+			return (EINVAL);
+		}
+
+		if (FLD_ISSET(dbenv->verbose, DB_VERB_REGISTER)) {
+			for (p = buf; *p == ' ';)
+				++p;
+			buf[nr - 1] = '\0';
+		}
+
+		pos = (off_t)lcnt * PID_LEN;
+		if (REGISTRY_LOCK(dbenv, pos, 1) == 0) {
+			if ((ret = REGISTRY_UNLOCK(dbenv, pos)) != 0)
+				return (ret);
+
+			if (FLD_ISSET(dbenv->verbose, DB_VERB_REGISTER))
+				__db_msg(dbenv, "%02u: %s: FAILED", lcnt, p);
+
+			need_recovery = 1;
+			break;
+		} else
+			if (FLD_ISSET(dbenv->verbose, DB_VERB_REGISTER))
+				__db_msg(dbenv, "%02u: %s: LOCKED", lcnt, p);
+	}
+
+	/*
+	 * If we have to perform recovery...
+	 *
+	 * Mark all slots empty.  Registry ignores empty slots we can't lock,
+	 * so it doesn't matter if any of the processes are in the middle of
+	 * exiting Berkeley DB -- they'll discard their lock when they exit.
+	 */
+	if (need_recovery) {
+		/* Figure out how big the file is. */
+		if ((ret = __os_ioinfo(
+		    dbenv, NULL, dbenv->registry, &mbytes, &bytes, NULL)) != 0)
+			return (ret);
+		end = (off_t)mbytes * MEGABYTE + bytes;
+
+		/* Confirm the file is of a reasonable size. */
+		DB_ASSERT(end % PID_LEN == 0);
+
+		/*
+		 * Seek to the beginning of the file and overwrite slots to
+		 * the end of the file.
+		 */
+		if ((ret = __os_seek(
+		    dbenv, dbenv->registry, 0, 0, 0, 0, DB_OS_SEEK_SET)) != 0)
+			return (ret);
+		snprintf(buf, sizeof(buf), PID_EMPTY, (u_long)0);
+		for (lcnt = (u_int)end / PID_LEN; lcnt > 0; --lcnt)
+			if ((ret = __os_write(
+			    dbenv, dbenv->registry, buf, PID_LEN, &nw)) != 0 ||
+			    nw != PID_LEN)
+				goto corrupt;
+	}
+
+	/*
+	 * Seek to the first process slot and add ourselves to the first empty
+	 * slot we can lock.
+	 */
+	if ((ret = __os_seek(
+	    dbenv, dbenv->registry, 0, 0, 0, 0, DB_OS_SEEK_SET)) != 0)
+		return (ret);
+	for (lcnt = 0;; ++lcnt) {
+		if ((ret = __os_read(
+		    dbenv, dbenv->registry, buf, PID_LEN, &nr)) != 0)
+			return (ret);
+		if (nr == PID_LEN && !PID_ISEMPTY(buf))
+			continue;
+		pos = (off_t)lcnt * PID_LEN;
+		if (REGISTRY_LOCK(dbenv, pos, 1) == 0) {
+			if (FLD_ISSET(dbenv->verbose, DB_VERB_REGISTER))
+				__db_msg(dbenv,
+				    "%lu: locking slot %02u at offset %lu",
+				    (u_long)pid, lcnt, (u_long)pos);
+
+			if ((ret = __os_seek(dbenv, dbenv->registry,
+			    0, 0, (u_int32_t)pos, 0, DB_OS_SEEK_SET)) != 0 ||
+			    (ret = __os_write(dbenv,
+			    dbenv->registry, pid_buf, PID_LEN, &nw)) != 0 ||
+			    nw != PID_LEN)
+				return (ret);
+			dbenv->registry_off = (u_int32_t)pos;
+			break;
+		}
+	}
+
+	if (need_recovery)
+		*need_recoveryp = 1;
+
+	if (0) {
+corrupt:	__db_err(dbenv, "%s: file contents corrupted", REGISTER_FILE);
+		return (ret == 0 ? EACCES : ret);
+	}
+
+	return (ret);
+}
+
+/*
+ * __envreg_unregister --
+ *	Unregister a DB_ENV handle.
+ *
+ * PUBLIC: int __envreg_unregister __P((DB_ENV *, int));
+ */
+int
+__envreg_unregister(dbenv, recovery_failed)
+	DB_ENV *dbenv;
+	int recovery_failed;
+{
+	size_t nw;
+	int ret, t_ret;
+	char buf[256];
+
+	ret = 0;
+
+	/*
+	 * If recovery failed, we want to drop our locks and return, but still
+	 * make sure any subsequent process doesn't decide everything is just
+	 * fine and try to get into the database environment.  In the case of
+	 * an error, discard our locks, but leave our slot filled-in.
+	 */
+	if (recovery_failed)
+		goto err;
+
+	/*
+	 * Why isn't an exclusive lock necessary to discard a DB_ENV handle?
+	 *
+	 * We mark our process ID slot empty before we discard the process slot
+	 * lock, and threads of control reviewing the register file ignore any
+	 * slots which they can't lock.
+	 */
+	snprintf(buf, sizeof(buf), PID_EMPTY, (u_long)0);
+	if ((ret = __os_seek(dbenv, dbenv->registry,
+	    0, 0, dbenv->registry_off, 0, DB_OS_SEEK_SET)) != 0 ||
+	    (ret = __os_write(
+	    dbenv, dbenv->registry, buf, PID_LEN, &nw)) != 0 ||
+	    nw != PID_LEN)
+		goto err;
+
+	/*
+	 * !!!
+	 * This code assumes that closing the file descriptor discards all
+	 * held locks.
+	 *
+	 * !!!
+	 * There is an ordering problem here -- in the case of a process that
+	 * failed in recovery, we're unlocking both the exclusive lock and our
+	 * slot lock.  If the OS unlocked the exclusive lock and then allowed
+	 * another thread of control to acquire the exclusive lock before also
+	 * also releasing our slot lock, we could race.  That can't happen, I
+	 * don't think.
+	 */
+err:	if ((t_ret =
+	    __os_closehandle(dbenv, dbenv->registry)) != 0 && ret == 0)
+		ret = t_ret;
+
+	dbenv->registry = NULL;
+	return (ret);
+}
+
+/*
+ * __envreg_xunlock --
+ *	Discard the exclusive lock held by the DB_ENV handle.
+ *
+ * PUBLIC: int __envreg_xunlock __P((DB_ENV *));
+ */
+int
+__envreg_xunlock(dbenv)
+	DB_ENV *dbenv;
+{
+	pid_t pid;
+	db_threadid_t tid;
+	int ret;
+
+	dbenv->thread_id(dbenv, &pid, &tid);
+
+	if (FLD_ISSET(dbenv->verbose, DB_VERB_REGISTER))
+		__db_msg(dbenv,
+		    "%lu: recovery completed, unlocking", (u_long)pid);
+
+	if ((ret = REGISTRY_EXCL_UNLOCK(dbenv)) == 0)
+		return (ret);
+
+	__db_err(dbenv,
+	    "%s: exclusive file unlock: %s", REGISTER_FILE, db_strerror(ret));
+	return (__db_panic(dbenv, ret));
+}
diff --git a/storage/bdb/env/env_stat.c b/storage/bdb/env/env_stat.c
index c9ab7a112b3..57130773fa7 100644
--- a/storage/bdb/env/env_stat.c
+++ b/storage/bdb/env/env_stat.c
@@ -1,16 +1,28 @@
 /*-
  * See the file LICENSE for redistribution information.
  *
- * Copyright (c) 1996-2004
+ * Copyright (c) 1996-2005
  *	Sleepycat Software.  All rights reserved.
  *
- * $Id: env_stat.c,v 1.21 2004/10/29 17:37:23 bostic Exp $
+ * $Id: env_stat.c,v 12.23 2005/11/01 00:44:25 bostic Exp $
  */
 
 #include "db_config.h"
 
 #ifndef NO_SYSTEM_INCLUDES
 #include 
+#include "string.h"
+
+#if TIME_WITH_SYS_TIME
+#include 
+#include 
+#else
+#if HAVE_SYS_TIME_H
+#include 
+#else
+#include 
+#endif
+#endif
 #endif
 
 #include "db_int.h"
@@ -23,23 +35,27 @@
 #include "dbinc/txn.h"
 
 #ifdef HAVE_STATISTICS
-static int  __dbenv_print_all __P((DB_ENV *, u_int32_t));
-static int  __dbenv_print_stats __P((DB_ENV *, u_int32_t));
-static int  __dbenv_stat_print __P((DB_ENV *, u_int32_t));
-static const char *__reg_type __P((reg_type_t));
+static int   __env_print_all __P((DB_ENV *, u_int32_t));
+static int   __env_print_stats __P((DB_ENV *, u_int32_t));
+static int   __env_print_threads __P((DB_ENV *));
+static int   __env_stat_print __P((DB_ENV *, u_int32_t));
+static char *__env_thread_state_print __P((DB_THREAD_STATE));
+static const char *
+	     __reg_type __P((reg_type_t));
 
 /*
- * __dbenv_stat_print_pp --
+ * __env_stat_print_pp --
  *	DB_ENV->stat_print pre/post processor.
  *
- * PUBLIC: int __dbenv_stat_print_pp __P((DB_ENV *, u_int32_t));
+ * PUBLIC: int __env_stat_print_pp __P((DB_ENV *, u_int32_t));
  */
 int
-__dbenv_stat_print_pp(dbenv, flags)
+__env_stat_print_pp(dbenv, flags)
 	DB_ENV *dbenv;
 	u_int32_t flags;
 {
-	int rep_check, ret;
+	DB_THREAD_INFO *ip;
+	int ret;
 
 	PANIC_CHECK(dbenv);
 	ENV_ILLEGAL_BEFORE_OPEN(dbenv, "DB_ENV->stat_print");
@@ -48,32 +64,35 @@ __dbenv_stat_print_pp(dbenv, flags)
 	    flags, DB_STAT_ALL | DB_STAT_CLEAR | DB_STAT_SUBSYSTEM)) != 0)
 		return (ret);
 
-	rep_check = IS_ENV_REPLICATED(dbenv) ? 1 : 0;
-	if (rep_check)
-		__env_rep_enter(dbenv);
-	ret = __dbenv_stat_print(dbenv, flags);
-	if (rep_check)
-		__env_db_rep_exit(dbenv);
+	ENV_ENTER(dbenv, ip);
+	REPLICATION_WRAP(dbenv, (__env_stat_print(dbenv, flags)), ret);
+	ENV_LEAVE(dbenv, ip);
 	return (ret);
 }
 
 /*
- * __dbenv_stat_print --
+ * __env_stat_print --
  *	DB_ENV->stat_print method.
  */
 static int
-__dbenv_stat_print(dbenv, flags)
+__env_stat_print(dbenv, flags)
 	DB_ENV *dbenv;
 	u_int32_t flags;
 {
-	DB *dbp;
+	time_t now;
 	int ret;
 
-	if ((ret = __dbenv_print_stats(dbenv, flags)) != 0)
+	(void)time(&now);
+	__db_msg(dbenv, "%.24s\tLocal time", ctime(&now));
+
+	if ((ret = __env_print_stats(dbenv, flags)) != 0)
 		return (ret);
 
 	if (LF_ISSET(DB_STAT_ALL) &&
-	    (ret = __dbenv_print_all(dbenv, flags)) != 0)
+	    (ret = __env_print_all(dbenv, flags)) != 0)
+		return (ret);
+
+	if ((ret = __env_print_threads(dbenv)) != 0)
 		return (ret);
 
 	if (!LF_ISSET(DB_STAT_SUBSYSTEM))
@@ -82,10 +101,20 @@ __dbenv_stat_print(dbenv, flags)
 	/* The subsystems don't know anything about DB_STAT_SUBSYSTEM. */
 	LF_CLR(DB_STAT_SUBSYSTEM);
 
+	if (MUTEX_ON(dbenv)) {
+		__db_msg(dbenv, "%s", DB_GLOBAL(db_line));
+		if ((ret = __mutex_stat_print(dbenv, flags)) != 0)
+			return (ret);
+	}
+
 	if (LOGGING_ON(dbenv)) {
 		__db_msg(dbenv, "%s", DB_GLOBAL(db_line));
 		if ((ret = __log_stat_print(dbenv, flags)) != 0)
 			return (ret);
+
+		__db_msg(dbenv, "%s", DB_GLOBAL(db_line));
+		if ((ret = __dbreg_stat_print(dbenv, flags)) != 0)
+			return (ret);
 	}
 
 	if (LOCKING_ON(dbenv)) {
@@ -112,28 +141,16 @@ __dbenv_stat_print(dbenv, flags)
 			return (ret);
 	}
 
-	MUTEX_THREAD_LOCK(dbenv, dbenv->dblist_mutexp);
-	for (dbp = LIST_FIRST(&dbenv->dblist);
-	    dbp != NULL; dbp = LIST_NEXT(dbp, dblistlinks)) {
-		__db_msg(dbenv, "%s", DB_GLOBAL(db_line));
-		__db_msg(dbenv, "%s%s%s\tDatabase name",
-		    dbp->fname, dbp->dname == NULL ? "" : "/",
-		    dbp->dname == NULL ? "" : dbp->dname);
-		if ((ret = __db_stat_print(dbp, flags)) != 0)
-			break;
-	}
-	MUTEX_THREAD_UNLOCK(dbenv, dbenv->dblist_mutexp);
-
-	return (ret);
+	return (0);
 }
 
 /*
- * __dbenv_print_stats --
+ * __env_print_stats --
  *	Display the default environment statistics.
  *
  */
 static int
-__dbenv_print_stats(dbenv, flags)
+__env_print_stats(dbenv, flags)
 	DB_ENV *dbenv;
 	u_int32_t flags;
 {
@@ -147,24 +164,26 @@ __dbenv_print_stats(dbenv, flags)
 		__db_msg(dbenv, "%s", DB_GLOBAL(db_line));
 		__db_msg(dbenv, "Default database environment information:");
 	}
-	__db_msg(dbenv, "%d.%d.%d\tEnvironment version",
-	    renv->majver, renv->minver, renv->patch);
 	STAT_HEX("Magic number", renv->magic);
-	STAT_LONG("Panic value", renv->envpanic);
+	STAT_LONG("Panic value", renv->panic);
+	__db_msg(dbenv, "%d.%d.%d\tEnvironment version",
+	    renv->majver, renv->minver, renv->patchver);
+	__db_msg(dbenv, "%.24s\tCreation time", ctime(&renv->timestamp));
+	STAT_HEX("Environment ID", renv->envid);
+	__mutex_print_debug_single(dbenv,
+	    "Primary region allocation and reference count mutex",
+	    renv->mtx_regenv, flags);
 	STAT_LONG("References", renv->refcnt);
 
-	__db_print_mutex(dbenv, NULL, &renv->mutex,
-	    "The number of region locks that required waiting", flags);
-
 	return (0);
 }
 
 /*
- * __dbenv_print_all --
+ * __env_print_all --
  *	Display the debugging environment statistics.
  */
 static int
-__dbenv_print_all(dbenv, flags)
+__env_print_all(dbenv, flags)
 	DB_ENV *dbenv;
 	u_int32_t flags;
 {
@@ -176,6 +195,7 @@ __dbenv_print_all(dbenv, flags)
 		{ DB_ENV_DBLOCAL,		"DB_ENV_DBLOCAL" },
 		{ DB_ENV_DIRECT_DB,		"DB_ENV_DIRECT_DB" },
 		{ DB_ENV_DIRECT_LOG,		"DB_ENV_DIRECT_LOG" },
+		{ DB_ENV_DSYNC_DB,		"DB_ENV_DSYNC_DB" },
 		{ DB_ENV_DSYNC_LOG,		"DB_ENV_DSYNC_LOG" },
 		{ DB_ENV_FATAL,			"DB_ENV_FATAL" },
 		{ DB_ENV_LOCKDOWN,		"DB_ENV_LOCKDOWN" },
@@ -200,15 +220,13 @@ __dbenv_print_all(dbenv, flags)
 	};
 	static const FN ofn[] = {
 		{ DB_CREATE,			"DB_CREATE" },
-		{ DB_CXX_NO_EXCEPTIONS,	"DB_CXX_NO_EXCEPTIONS" },
 		{ DB_FORCE,			"DB_FORCE" },
 		{ DB_INIT_CDB,			"DB_INIT_CDB" },
-		{ DB_INIT_LOCK,		"DB_INIT_LOCK" },
+		{ DB_INIT_LOCK,			"DB_INIT_LOCK" },
 		{ DB_INIT_LOG,			"DB_INIT_LOG" },
 		{ DB_INIT_MPOOL,		"DB_INIT_MPOOL" },
 		{ DB_INIT_REP,			"DB_INIT_REP" },
 		{ DB_INIT_TXN,			"DB_INIT_TXN" },
-		{ DB_JOINENV,			"DB_JOINENV" },
 		{ DB_LOCKDOWN,			"DB_LOCKDOWN" },
 		{ DB_NOMMAP,			"DB_NOMMAP" },
 		{ DB_PRIVATE,			"DB_PRIVATE" },
@@ -226,51 +244,48 @@ __dbenv_print_all(dbenv, flags)
 	static const FN vfn[] = {
 		{ DB_VERB_DEADLOCK,		"DB_VERB_DEADLOCK" },
 		{ DB_VERB_RECOVERY,		"DB_VERB_RECOVERY" },
+		{ DB_VERB_REGISTER,		"DB_VERB_REGISTER" },
 		{ DB_VERB_REPLICATION,		"DB_VERB_REPLICATION" },
 		{ DB_VERB_WAITSFOR,		"DB_VERB_WAITSFOR" },
 		{ 0,				NULL }
 	};
+	static const FN regenvfn[] = {
+		{ DB_REGENV_REPLOCKED,		"DB_REGENV_REPLOCKED" },
+		{ 0,				NULL }
+	};
 	DB_MSGBUF mb;
 	REGENV *renv;
 	REGINFO *infop;
-	REGION *rp, regs[1024];
-	size_t n;
+	REGION *rp;
+	u_int32_t i;
 	char **p;
 
 	infop = dbenv->reginfo;
 	renv = infop->primary;
 	DB_MSGBUF_INIT(&mb);
 
-	/*
-	 * Lock the database environment while we get copies of the region
-	 * information.
-	 */
-	MUTEX_LOCK(dbenv, &infop->rp->mutex);
+	__db_msg(dbenv, "%s", DB_GLOBAL(db_line));
+	__db_prflags(dbenv,
+	    NULL, renv->init_flags, ofn, NULL, "\tInitialization flags");
+	STAT_ULONG("Region slots", renv->region_cnt);
+	__db_prflags(dbenv,
+	    NULL, renv->flags, regenvfn, NULL, "\tReplication flags");
+	__db_msg(dbenv, "%.24s\tOperation timestamp",
+	    renv->op_timestamp == 0 ? "!Set" : ctime(&renv->op_timestamp));
+	__db_msg(dbenv, "%.24s\tReplication timestamp",
+	    renv->rep_timestamp == 0 ? "!Set" : ctime(&renv->rep_timestamp));
 
-	for (n = 0, rp = SH_LIST_FIRST(&renv->regionq, __db_region);
-	    n < sizeof(regs) / sizeof(regs[0]) && rp != NULL;
-	    ++n, rp = SH_LIST_NEXT(rp, q, __db_region)) {
-		regs[n] = *rp;
-		if (LF_ISSET(DB_STAT_CLEAR))
-			MUTEX_CLEAR(&rp->mutex);
-	}
-	if (n > 0)
-		--n;
-	MUTEX_UNLOCK(dbenv, &infop->rp->mutex);
-
-	if (LF_ISSET(DB_STAT_ALL)) {
-		__db_msg(dbenv, "%s", DB_GLOBAL(db_line));
-		__db_msg(dbenv, "Per region database environment information:");
-	}
-	while (n > 0) {
-		rp = ®s[--n];
+	__db_msg(dbenv, "%s", DB_GLOBAL(db_line));
+	__db_msg(dbenv, "Per region database environment information:");
+	for (rp = R_ADDR(infop, renv->region_off),
+	    i = 0; i < renv->region_cnt; ++i, ++rp) {
+		if (rp->id == INVALID_REGION_ID)
+			continue;
 		__db_msg(dbenv, "%s Region:", __reg_type(rp->type));
 		STAT_LONG("Region ID", rp->id);
 		STAT_LONG("Segment ID", rp->segid);
 		__db_dlbytes(dbenv,
 		    "Size", (u_long)0, (u_long)0, (u_long)rp->size);
-		__db_print_mutex(dbenv, NULL, &rp->mutex,
-		    "The number of region locks that required waiting", flags);
 	}
 
 	__db_msg(dbenv, "%s", DB_GLOBAL(db_line));
@@ -300,36 +315,83 @@ __dbenv_print_all(dbenv, flags)
 	STAT_FMT("Mode", "%#o", int, dbenv->db_mode);
 	__db_prflags(dbenv, NULL, dbenv->open_flags, ofn, NULL, "\tOpen flags");
 	STAT_ISSET("Lockfhp", dbenv->lockfhp);
-	STAT_ISSET("Rec tab", dbenv->recover_dtab);
-	STAT_ULONG("Rec tab slots", dbenv->recover_dtab_size);
+	STAT_ISSET("Recovery table", dbenv->recover_dtab);
+	STAT_ULONG("Number of recovery table slots", dbenv->recover_dtab_size);
 	STAT_ISSET("RPC client", dbenv->cl_handle);
 	STAT_LONG("RPC client ID", dbenv->cl_id);
-	STAT_LONG("DB ref count", dbenv->db_ref);
-	STAT_LONG("Shared mem key", dbenv->shm_key);
-	STAT_ULONG("test-and-set spin configuration", dbenv->tas_spins);
-	__db_print_mutex(
-	    dbenv, NULL, dbenv->dblist_mutexp, "DB handle mutex", flags);
+	STAT_LONG("DB reference count", dbenv->db_ref);
+	STAT_LONG("Shared memory key", dbenv->shm_key);
+	__mutex_print_debug_single(
+	    dbenv, "DB handle mutex", dbenv->mtx_dblist, flags);
 
 	STAT_ISSET("api1 internal", dbenv->api1_internal);
 	STAT_ISSET("api2 internal", dbenv->api2_internal);
 	STAT_ISSET("password", dbenv->passwd);
 	STAT_ISSET("crypto handle", dbenv->crypto_handle);
-	__db_print_mutex(dbenv, NULL, dbenv->mt_mutexp, "MT mutex", flags);
+	__mutex_print_debug_single(dbenv, "MT mutex", dbenv->mtx_mt, flags);
 
 	__db_prflags(dbenv, NULL, dbenv->flags, fn, NULL, "\tFlags");
 
 	return (0);
 }
 
+static char *
+__env_thread_state_print(state)
+	DB_THREAD_STATE state;
+{
+	switch (state) {
+	case THREAD_ACTIVE:
+		return ("active");
+	case THREAD_BLOCKED:
+		return ("blocked");
+	case THREAD_OUT:
+		return ("out");
+	default:
+		return ("unknown");
+	}
+}
+
+/*
+ * __env_print_threads --
+ *	Display the current active threads
+ *
+ */
+static int
+__env_print_threads(dbenv)
+	DB_ENV *dbenv;
+{
+	DB_HASHTAB *htab;
+	DB_THREAD_INFO *ip;
+	u_int32_t i;
+	char buf[DB_THREADID_STRLEN];
+
+	htab = (DB_HASHTAB *)dbenv->thr_hashtab;
+	__db_msg(dbenv, "Thread status blocks:");
+	for (i = 0; i < dbenv->thr_nbucket; i++) {
+		for (ip = SH_TAILQ_FIRST(&htab[i], __db_thread_info);
+		     ip != NULL;
+		     ip = SH_TAILQ_NEXT(ip, dbth_links, __db_thread_info)) {
+			if (ip->dbth_state == THREAD_SLOT_NOT_IN_USE)
+				continue;
+			__db_msg(dbenv, "\tprocess/thread %s: %s",
+			    dbenv->thread_id_string(
+			    dbenv, ip->dbth_pid, ip->dbth_tid, buf),
+			    __env_thread_state_print(ip->dbth_state));
+		}
+	}
+	return (0);
+}
+
 /*
  * __db_print_fh --
  *	Print out a file handle.
  *
- * PUBLIC: void __db_print_fh __P((DB_ENV *, DB_FH *, u_int32_t));
+ * PUBLIC: void __db_print_fh __P((DB_ENV *, const char *, DB_FH *, u_int32_t));
  */
 void
-__db_print_fh(dbenv, fh, flags)
+__db_print_fh(dbenv, tag, fh, flags)
 	DB_ENV *dbenv;
+	const char *tag;
 	DB_FH *fh;
 	u_int32_t flags;
 {
@@ -340,7 +402,13 @@ __db_print_fh(dbenv, fh, flags)
 		{ 0,		NULL }
 	};
 
-	__db_print_mutex(dbenv, NULL, fh->mutexp, "file-handle.mutex", flags);
+	if (fh == NULL) {
+		STAT_ISSET(tag, fh);
+		return;
+	}
+
+	__mutex_print_debug_single(
+	    dbenv, "file-handle.mutex", fh->mtx_fh, flags);
 
 	STAT_LONG("file-handle.reference count", fh->ref);
 	STAT_LONG("file-handle.file descriptor", fh->fd);
@@ -368,6 +436,11 @@ __db_print_fileid(dbenv, id, suffix)
 	DB_MSGBUF mb;
 	int i;
 
+	if (id == NULL) {
+		STAT_ISSET("ID", id);
+		return;
+	}
+
 	DB_MSGBUF_INIT(&mb);
 	for (i = 0; i < DB_FILE_ID_LEN; ++i, ++id) {
 		__db_msgadd(dbenv, &mb, "%x", (u_int)*id);
@@ -379,95 +452,6 @@ __db_print_fileid(dbenv, id, suffix)
 	DB_MSGBUF_FLUSH(dbenv, &mb);
 }
 
-/*
- * __db_print_mutex --
- *	Print out mutex statistics.
- *
- * PUBLIC: void __db_print_mutex
- * PUBLIC:    __P((DB_ENV *, DB_MSGBUF *, DB_MUTEX *, const char *, u_int32_t));
- */
-void
-__db_print_mutex(dbenv, mbp, mutex, suffix, flags)
-	DB_ENV *dbenv;
-	DB_MSGBUF *mbp;
-	DB_MUTEX *mutex;
-	const char *suffix;
-	u_int32_t flags;
-{
-	DB_MSGBUF mb;
-	u_long value;
-	int standalone;
-
-	/* If we don't have a mutex, point that out and return. */
-	if (mutex == NULL) {
-		STAT_ISSET(suffix, mutex);
-		return;
-	}
-
-	if (mbp == NULL) {
-		DB_MSGBUF_INIT(&mb);
-		mbp = &mb;
-		standalone = 1;
-	} else
-		standalone = 0;
-
-	/*
-	 * !!!
-	 * We may not hold the mutex lock -- that's OK, we're only reading
-	 * the statistics.
-	 */
-	if ((value = mutex->mutex_set_wait) < 10000000)
-		__db_msgadd(dbenv, mbp, "%lu", value);
-	else
-		__db_msgadd(dbenv, mbp, "%luM", value / 1000000);
-
-	/*
-	 * If standalone, append the mutex percent and the locker information
-	 * after the suffix line.  Otherwise, append it after the counter.
-	 *
-	 * The setting of "suffix" tracks "standalone" -- if standalone, expect
-	 * a suffix and prefix it with a , otherwise, it's optional.  This
-	 * isn't a design, it's just the semantics we happen to need right now.
-	 */
-	if (standalone) {
-		if (suffix == NULL)			/* Defense. */
-			suffix = "";
-
-		__db_msgadd(dbenv, &mb, "\t%s (%d%%", suffix,
-		    DB_PCT(mutex->mutex_set_wait,
-		    mutex->mutex_set_wait + mutex->mutex_set_nowait));
-#ifdef DIAGNOSTIC
-#ifdef HAVE_MUTEX_THREADS
-		if (mutex->locked != 0)
-			__db_msgadd(dbenv, &mb, "/%lu", (u_long)mutex->locked);
-#else
-		if (mutex->pid != 0)
-			__db_msgadd(dbenv, &mb, "/%lu", (u_long)mutex->pid);
-#endif
-#endif
-		__db_msgadd(dbenv, &mb, ")");
-
-		DB_MSGBUF_FLUSH(dbenv, mbp);
-	} else {
-		__db_msgadd(dbenv, mbp, "/%d%%", DB_PCT(mutex->mutex_set_wait,
-		    mutex->mutex_set_wait + mutex->mutex_set_nowait));
-#ifdef DIAGNOSTIC
-#ifdef HAVE_MUTEX_THREADS
-		if (mutex->locked != 0)
-			__db_msgadd(dbenv, &mb, "/%lu", (u_long)mutex->locked);
-#else
-		if (mutex->pid != 0)
-			__db_msgadd(dbenv, &mb, "/%lu", (u_long)mutex->pid);
-#endif
-#endif
-		if (suffix != NULL)
-			__db_msgadd(dbenv, mbp, "%s", suffix);
-	}
-
-	if (LF_ISSET(DB_STAT_CLEAR))
-		MUTEX_CLEAR(mutex);
-}
-
 /*
  * __db_dl --
  *	Display a big value.
@@ -510,12 +494,13 @@ __db_dl_pct(dbenv, msg, value, pct, tag)
 
 	/*
 	 * Two formats: if less than 10 million, display as the number, if
-	 * greater than 10 million display as ###M.
+	 * greater than 10 million, round it off and display as ###M.
 	 */
 	if (value < 10000000)
 		__db_msgadd(dbenv, &mb, "%lu\t%s", value, msg);
 	else
-		__db_msgadd(dbenv, &mb, "%luM\t%s", value / 1000000, msg);
+		__db_msgadd(dbenv,
+		    &mb, "%luM\t%s", (value + 500000) / 1000000, msg);
 	if (tag == NULL)
 		__db_msgadd(dbenv, &mb, " (%d%%)", pct);
 	else
@@ -602,9 +587,9 @@ __db_print_reginfo(dbenv, infop, s)
 	STAT_STRING("Region type", __reg_type(infop->type));
 	STAT_ULONG("Region ID", infop->id);
 	STAT_STRING("Region name", infop->name);
-	STAT_HEX("Original region address", infop->addr_orig);
-	STAT_HEX("Region address", infop->addr);
-	STAT_HEX("Region primary address", infop->primary);
+	STAT_POINTER("Original region address", infop->addr_orig);
+	STAT_POINTER("Region address", infop->addr);
+	STAT_POINTER("Region primary address", infop->primary);
 	STAT_ULONG("Region maximum allocation", infop->max_alloc);
 	STAT_ULONG("Region allocated", infop->max_alloc);
 
@@ -655,7 +640,7 @@ __db_stat_not_built(dbenv)
 }
 
 int
-__dbenv_stat_print_pp(dbenv, flags)
+__env_stat_print_pp(dbenv, flags)
 	DB_ENV *dbenv;
 	u_int32_t flags;
 {
diff --git a/storage/bdb/fileops/fileops.src b/storage/bdb/fileops/fileops.src
index a77b5d5c477..9a1822c94f7 100644
--- a/storage/bdb/fileops/fileops.src
+++ b/storage/bdb/fileops/fileops.src
@@ -1,10 +1,10 @@
 /*-
  * See the file LICENSE for redistribution information.
  *
- * Copyright (c) 2001-2004
+ * Copyright (c) 2001-2005
  *	Sleepycat Software.  All rights reserved.
  *
- * $Id: fileops.src,v 1.13 2004/06/17 17:35:20 bostic Exp $
+ * $Id: fileops.src,v 12.2 2005/06/16 20:22:47 bostic Exp $
  */
 
 PREFIX	__fop
@@ -20,7 +20,6 @@ INCLUDE
 INCLUDE #include "db_int.h"
 INCLUDE #include "dbinc/crypto.h"
 INCLUDE #include "dbinc/db_page.h"
-INCLUDE #include "dbinc/db_dispatch.h"
 INCLUDE #include "dbinc/db_am.h"
 INCLUDE #include "dbinc/log.h"
 INCLUDE #include "dbinc/txn.h"
diff --git a/storage/bdb/fileops/fop_basic.c b/storage/bdb/fileops/fop_basic.c
index 36a958e950a..d4202aa36a1 100644
--- a/storage/bdb/fileops/fop_basic.c
+++ b/storage/bdb/fileops/fop_basic.c
@@ -1,10 +1,10 @@
 /*-
  * See the file LICENSE for redistribution information.
  *
- * Copyright (c) 2001-2004
+ * Copyright (c) 2001-2005
  *	Sleepycat Software.  All rights reserved.
  *
- * $Id: fop_basic.c,v 1.32 2004/11/15 20:04:50 bostic Exp $
+ * $Id: fop_basic.c,v 12.8 2005/10/12 17:52:16 bostic Exp $
  */
 
 #include "db_config.h"
@@ -24,9 +24,33 @@
 #include "dbinc/db_am.h"
 
 /*
- * This file implements the basic file-level operations.  This code
- * ought to be fairly independent of DB, other than through its
- * error-reporting mechanism.
+ * The transactional guarantees Berkeley DB provides for file
+ * system level operations (database physical file create, delete,
+ * rename) are based on our understanding of current file system
+ * semantics; a system that does not provide these semantics and
+ * guarantees could be in danger.
+ *
+ * First, as in standard database changes, fsync and fdatasync must
+ * work: when applied to the log file, the records written into the
+ * log must be transferred to stable storage.
+ *
+ * Second, it must not be possible for the log file to be removed
+ * without previous file system level operations being flushed to
+ * stable storage.  Berkeley DB applications write log records
+ * describing file system operations into the log, then perform the
+ * file system operation, then commit the enclosing transaction
+ * (which flushes the log file to stable storage).  Subsequently,
+ * a database environment checkpoint may make it possible for the
+ * application to remove the log file containing the record of the
+ * file system operation.  DB's transactional guarantees for file
+ * system operations require the log file removal not succeed until
+ * all previous filesystem operations have been flushed to stable
+ * storage.  In other words, the flush of the log file, or the
+ * removal of the log file, must block until all previous
+ * filesystem operations have been flushed to stable storage.  This
+ * semantic is not, as far as we know, required by any existing
+ * standards document, but we have never seen a filesystem where
+ * it does not apply.
  */
 
 /*
@@ -55,20 +79,22 @@ __fop_create(dbenv, txn, fhpp, name, appname, mode, flags)
 	char *real_name;
 
 	real_name = NULL;
+	fhp = NULL;
 
 	if ((ret =
 	    __db_appname(dbenv, appname, name, 0, NULL, &real_name)) != 0)
 		return (ret);
 
 	if (mode == 0)
-		mode = __db_omode("rw----");
+		mode = __db_omode(OWNER_RW);
 
 	if (DBENV_LOGGING(dbenv)) {
 		memset(&data, 0, sizeof(data));
 		data.data = (void *)name;
 		data.size = (u_int32_t)strlen(name) + 1;
 		if ((ret = __fop_create_log(dbenv, txn, &lsn,
-		    flags | DB_FLUSH, &data, (u_int32_t)appname, mode)) != 0)
+		    flags | DB_FLUSH,
+		    &data, (u_int32_t)appname, (u_int32_t)mode)) != 0)
 			goto err;
 	}
 
@@ -117,7 +143,7 @@ __fop_remove(dbenv, txn, fileid, name, appname, flags)
 
 	if (txn == NULL) {
 		if (fileid != NULL && (ret = __memp_nameop(
-		    dbenv, fileid, NULL, real_name, NULL)) != 0)
+		    dbenv, fileid, NULL, real_name, NULL, 0)) != 0)
 			goto err;
 	} else {
 		if (DBENV_LOGGING(dbenv)) {
@@ -127,11 +153,11 @@ __fop_remove(dbenv, txn, fileid, name, appname, flags)
 			memset(&ndbt, 0, sizeof(ndbt));
 			ndbt.data = (void *)name;
 			ndbt.size = (u_int32_t)strlen(name) + 1;
-			if ((ret = __fop_remove_log(dbenv,
-			    txn, &lsn, flags, &ndbt, &fdbt, appname)) != 0)
+			if ((ret = __fop_remove_log(dbenv, txn, &lsn,
+			    flags, &ndbt, &fdbt, (u_int32_t)appname)) != 0)
 				goto err;
 		}
-		ret = __txn_remevent(dbenv, txn, real_name, fileid);
+		ret = __txn_remevent(dbenv, txn, real_name, fileid, 0);
 	}
 
 err:	if (real_name != NULL)
@@ -192,8 +218,9 @@ __fop_write(dbenv,
 		memset(&namedbt, 0, sizeof(namedbt));
 		namedbt.data = (void *)name;
 		namedbt.size = (u_int32_t)strlen(name) + 1;
-		if ((ret = __fop_write_log(dbenv, txn, &lsn, flags,
-		    &namedbt, appname, pgsize, pageno, off, &data, istmp)) != 0)
+		if ((ret = __fop_write_log(dbenv, txn,
+		    &lsn, flags, &namedbt, (u_int32_t)appname,
+		    pgsize, pageno, off, &data, istmp)) != 0)
 			goto err;
 	}
 
@@ -265,7 +292,7 @@ __fop_rename(dbenv, txn, oldname, newname, fid, appname, flags)
 			goto err;
 	}
 
-	ret = __memp_nameop(dbenv, fid, newname, o, n);
+	ret = __memp_nameop(dbenv, fid, newname, o, n, 0);
 
 err:	if (o != NULL)
 		__os_free(dbenv, o);
diff --git a/storage/bdb/fileops/fop_rec.c b/storage/bdb/fileops/fop_rec.c
index a9326d53289..bccf89ae1ab 100644
--- a/storage/bdb/fileops/fop_rec.c
+++ b/storage/bdb/fileops/fop_rec.c
@@ -1,10 +1,10 @@
 /*-
  * See the file LICENSE for redistribution information.
  *
- * Copyright (c) 2001-2004
+ * Copyright (c) 2001-2005
  *	Sleepycat Software.  All rights reserved.
  *
- * $Id: fop_rec.c,v 1.31 2004/09/22 03:45:25 bostic Exp $
+ * $Id: fop_rec.c,v 12.6 2005/10/12 17:52:16 bostic Exp $
  */
 
 #include "db_config.h"
@@ -23,6 +23,36 @@
 #include "dbinc/mp.h"
 #include "dbinc/txn.h"
 
+/*
+ * The transactional guarantees Berkeley DB provides for file
+ * system level operations (database physical file create, delete,
+ * rename) are based on our understanding of current file system
+ * semantics; a system that does not provide these semantics and
+ * guarantees could be in danger.
+ *
+ * First, as in standard database changes, fsync and fdatasync must
+ * work: when applied to the log file, the records written into the
+ * log must be transferred to stable storage.
+ *
+ * Second, it must not be possible for the log file to be removed
+ * without previous file system level operations being flushed to
+ * stable storage.  Berkeley DB applications write log records
+ * describing file system operations into the log, then perform the
+ * file system operation, then commit the enclosing transaction
+ * (which flushes the log file to stable storage).  Subsequently,
+ * a database environment checkpoint may make it possible for the
+ * application to remove the log file containing the record of the
+ * file system operation.  DB's transactional guarantees for file
+ * system operations require the log file removal not succeed until
+ * all previous filesystem operations have been flushed to stable
+ * storage.  In other words, the flush of the log file, or the
+ * removal of the log file, must block until all previous
+ * filesystem operations have been flushed to stable storage.  This
+ * semantic is not, as far as we know, required by any existing
+ * standards document, but we have never seen a filesystem where
+ * it does not apply.
+ */
+
 /*
  * __fop_create_recover --
  *	Recovery function for create.
@@ -56,7 +86,7 @@ __fop_create_recover(dbenv, dbtp, lsnp, op, info)
 		(void)__os_unlink(dbenv, real_name);
 	else if (DB_REDO(op)) {
 		if ((ret = __os_open(dbenv, real_name,
-		    DB_OSO_CREATE | DB_OSO_EXCL, argp->mode, &fhp)) == 0)
+		    DB_OSO_CREATE | DB_OSO_EXCL, (int)argp->mode, &fhp)) == 0)
 			(void)__os_closehandle(dbenv, fhp);
 		else
 			goto out;
@@ -101,7 +131,7 @@ __fop_remove_recover(dbenv, dbtp, lsnp, op, info)
 	/* Its ok if the file is not there. */
 	if (DB_REDO(op))
 		(void)__memp_nameop(dbenv,
-		    (u_int8_t *)argp->fid.data, NULL, real_name, NULL);
+		    (u_int8_t *)argp->fid.data, NULL, real_name, NULL, 0);
 
 	*lsnp = argp->prev_lsn;
 out:	if (real_name != NULL)
@@ -136,7 +166,7 @@ __fop_write_recover(dbenv, dbtp, lsnp, op, info)
 		DB_ASSERT(argp->flag != 0);
 	else if (DB_REDO(op))
 		ret = __fop_write(dbenv,
-		    argp->txnid, argp->name.data, argp->appname,
+		    argp->txnid, argp->name.data, (APPNAME)argp->appname,
 		    NULL, argp->pgsize, argp->pageno, argp->offset,
 		    argp->page.data, argp->page.size, argp->flag, 0);
 
@@ -209,14 +239,33 @@ __fop_rename_recover(dbenv, dbtp, lsnp, op, info)
 			goto done;
 		(void)__os_closehandle(dbenv, fhp);
 		fhp = NULL;
+		if (DB_REDO(op)) {
+			/*
+			 * Check to see if the target file exists.  If it
+			 * does and it does not have the proper id then
+			 * it is a later version.  We just remove the source
+			 * file since the state of the world is beyond this
+			 * point.
+			 */
+			if (__os_open(dbenv, real_new, 0, 0, &fhp) == 0 &&
+			    __fop_read_meta(dbenv, src, mbuf,
+			    DBMETASIZE, fhp, 1, NULL) == 0 &&
+			    __db_chk_meta(dbenv, NULL, meta, 1) == 0 &&
+			    memcmp(argp->fileid.data,
+			    meta->uid, DB_FILE_ID_LEN) != 0) {
+				(void)__memp_nameop(dbenv,
+				    fileid, NULL, real_old, NULL, 0);
+				goto done;
+			}
+		}
 	}
 
 	if (DB_UNDO(op))
 		(void)__memp_nameop(dbenv, fileid,
-		    (const char *)argp->oldname.data, real_new, real_old);
+		    (const char *)argp->oldname.data, real_new, real_old, 0);
 	if (DB_REDO(op))
 		(void)__memp_nameop(dbenv, fileid,
-		    (const char *)argp->newname.data, real_old, real_new);
+		    (const char *)argp->newname.data, real_old, real_new, 0);
 
 done:	*lsnp = argp->prev_lsn;
 out:	if (real_new != NULL)
@@ -327,7 +376,7 @@ __fop_file_remove_recover(dbenv, dbtp, lsnp, op, info)
 		if (cstat == TXN_COMMIT)
 			(void)__memp_nameop(dbenv,
 			    is_real ? argp->real_fid.data : argp->tmp_fid.data,
-			    NULL, real_name, NULL);
+			    NULL, real_name, NULL, 0);
 	}
 
 done:	*lsnp = argp->prev_lsn;
diff --git a/storage/bdb/fileops/fop_util.c b/storage/bdb/fileops/fop_util.c
index 564dc4a36b1..179452f23db 100644
--- a/storage/bdb/fileops/fop_util.c
+++ b/storage/bdb/fileops/fop_util.c
@@ -1,10 +1,10 @@
 /*-
  * See the file LICENSE for redistribution information.
  *
- * Copyright (c) 2001-2004
+ * Copyright (c) 2001-2005
  *	Sleepycat Software.  All rights reserved.
  *
- * $Id: fop_util.c,v 1.104 2004/09/24 00:43:18 bostic Exp $
+ * $Id: fop_util.c,v 12.19 2005/10/27 01:26:00 mjc Exp $
  */
 
 #include "db_config.h"
@@ -12,7 +12,6 @@
 #ifndef NO_SYSTEM_INCLUDES
 #include 
 
-#include 
 #include 
 #endif
 
@@ -20,6 +19,7 @@
 #include "dbinc/db_page.h"
 #include "dbinc/db_shash.h"
 #include "dbinc/db_am.h"
+#include "dbinc/hash.h"
 #include "dbinc/fop.h"
 #include "dbinc/lock.h"
 #include "dbinc/mp.h"
@@ -27,6 +27,15 @@
 #include "dbinc/txn.h"
 
 static int __fop_set_pgsize __P((DB *, DB_FH *, const char *));
+static int __fop_inmem_create __P((DB *, const char *, DB_TXN *, u_int32_t));
+static int __fop_inmem_dummy __P((DB *, DB_TXN *, const char *, u_int8_t *));
+static int __fop_inmem_read_meta __P((DB *, const char *, u_int32_t));
+static int __fop_inmem_swap __P((DB *, DB *, DB_TXN *,
+	       const char *, const char *, const char *, u_int32_t));
+static int __fop_ondisk_dummy __P((DB *,
+	       DB_TXN *, const char *, u_int8_t *, u_int32_t));
+static int __fop_ondisk_swap __P((DB *, DB *, DB_TXN *,
+	       const char *, const char *, const char *, u_int32_t, u_int32_t));
 
 /*
  * Acquire the environment meta-data lock.  The parameters are the
@@ -56,6 +65,14 @@ static int __fop_set_pgsize __P((DB *, DB_FH *, const char *));
 } while (0)
 #endif
 
+#define	RESET_MPF(D, F) do {						\
+	(void)__memp_fclose((D)->mpf, (F));				\
+	(D)->mpf = NULL;						\
+	F_CLR((D), DB_AM_OPEN_CALLED);					\
+	if ((ret = __memp_fcreate((D)->dbenv, &(D)->mpf)) != 0)		\
+		goto err;						\
+} while (0)
+
 /*
  * If we open a file handle and our caller is doing fcntl(2) locking,
  * we can't close the handle because that would discard the caller's
@@ -109,7 +126,7 @@ __fop_lock_handle(dbenv, dbp, locker, mode, elockp, flags)
 	 * doing is on the global environment.
 	 */
 	if (IS_RECOVERING(dbenv))
-		return (elockp == NULL ? 0 : __ENV_LPUT(dbenv, *elockp, 0));
+		return (elockp == NULL ? 0 : __ENV_LPUT(dbenv, *elockp));
 
 	memcpy(lock_desc.fileid, dbp->fileid, DB_FILE_ID_LEN);
 	lock_desc.pgno = dbp->meta_pgno;
@@ -199,24 +216,28 @@ __fop_file_setup(dbp, txn, name, mode, flags, retidp)
 	DB_FH *fhp;
 	DB_LOCK elock;
 	DB_TXN *stxn;
+	DBTYPE save_type;
 	size_t len;
 	u_int32_t dflags, locker, oflags;
 	u_int8_t mbuf[DBMETASIZE];
-	int created_locker, ret, retries, t_ret, tmp_created, truncating;
+	int created_locker, create_ok, ret, retries, t_ret, tmp_created;
+	int truncating, was_inval;
 	char *real_name, *real_tmpname, *tmpname;
 
-	DB_ASSERT(name != NULL);
-
 	*retidp = TXN_INVALID;
 
 	dbenv = dbp->dbenv;
 	fhp = NULL;
 	LOCK_INIT(elock);
 	stxn = NULL;
-	created_locker = tmp_created = truncating = 0;
+	created_locker = tmp_created = truncating = was_inval = 0;
 	real_name = real_tmpname = tmpname = NULL;
 	dflags = F_ISSET(dbp, DB_AM_NOT_DURABLE) ? DB_LOG_NOT_DURABLE : 0;
 
+	ret = 0;
+	retries = 0;
+	save_type = dbp->type;
+
 	/*
 	 * Get a lockerid for this handle.  There are paths through queue
 	 * rename and remove where this dbp already has a locker, so make
@@ -226,7 +247,7 @@ __fop_file_setup(dbp, txn, name, mode, flags, retidp)
 	    !F_ISSET(dbp, DB_AM_COMPENSATE) &&
 	    !F_ISSET(dbp, DB_AM_RECOVER) &&
 	    dbp->lid == DB_LOCK_INVALIDID) {
-		if ((ret = __lock_id(dbenv, &dbp->lid)) != 0)
+		if ((ret = __lock_id(dbenv, &dbp->lid, NULL)) != 0)
 			goto err;
 		created_locker = 1;
 	}
@@ -234,21 +255,29 @@ __fop_file_setup(dbp, txn, name, mode, flags, retidp)
 
 	locker = txn == NULL ? dbp->lid : txn->txnid;
 
-	/* Get the real backing file name. */
-	if ((ret = __db_appname(dbenv,
-	    DB_APP_DATA, name, 0, NULL, &real_name)) != 0)
-		goto err;
-
-	/* Fill in the default file mode. */
-	if (mode == 0)
-		mode = __db_omode("rwrw--");
-
 	oflags = 0;
-	if (LF_ISSET(DB_RDONLY))
-		oflags |= DB_OSO_RDONLY;
-	if (LF_ISSET(DB_TRUNCATE))
-		oflags |= DB_OSO_TRUNC;
+	if (F_ISSET(dbp, DB_AM_INMEM))
+		real_name = (char *)name;
+	else {
+		/* Get the real backing file name. */
+		if ((ret = __db_appname(dbenv,
+		    DB_APP_DATA, name, 0, NULL, &real_name)) != 0)
+			goto err;
+
+		/* Fill in the default file mode. */
+		if (mode == 0)
+			mode = __db_omode("rw-rw----");
+
+		if (LF_ISSET(DB_RDONLY))
+			oflags |= DB_OSO_RDONLY;
+		if (LF_ISSET(DB_TRUNCATE))
+			oflags |= DB_OSO_TRUNC;
+	}
+
 	retries = 0;
+	create_ok = LF_ISSET(DB_CREATE);
+	LF_CLR(DB_CREATE);
+
 retry:
 	/*
 	 * If we cannot create the file, only retry a few times.  We
@@ -263,7 +292,30 @@ retry:
 	}
 	if (!F_ISSET(dbp, DB_AM_COMPENSATE) && !F_ISSET(dbp, DB_AM_RECOVER))
 		GET_ENVLOCK(dbenv, locker, &elock);
-	if ((ret = __os_exists(real_name, NULL)) == 0) {
+	if (name == NULL)
+		ret = ENOENT;
+	else if (F_ISSET(dbp, DB_AM_INMEM)) {
+		ret = __db_dbenv_mpool(dbp, name, flags);
+		/*
+		 * We are using __db_dbenv_open as a check for existence.
+		 * However, db_dbenv_mpool does an actual open and there
+		 * are scenarios where the object exists, but cannot be
+		 * opened, because our settings don't match those internally.
+		 * We need to check for that explicitly.  We'll need the
+		 * mpool open to read the meta-data page, so we're going to
+		 * have to temporarily turn this dbp into an UNKNOWN one.
+		 */
+		if (ret == EINVAL) {
+			was_inval = 1;
+			save_type = dbp->type;
+			dbp->type = DB_UNKNOWN;
+			ret = __db_dbenv_mpool(dbp, name, flags);
+			dbp->type = save_type;
+		}
+	} else
+		ret = __os_exists(real_name, NULL);
+
+	if (ret == 0) {
 		/*
 		 * If the file exists, there are 5 possible cases:
 		 * 1. DB_EXCL was specified so this is an error, unless
@@ -275,12 +327,14 @@ retry:
 		 *	of file it is, we should open/create it.
 		 * 3. It is 0-length, we are not doing transactions (i.e.,
 		 *      we are sendmail), we should open/create into it.
+		 *	-- on-disk files only!
 		 * 4. Is it a Berkeley DB file and we should simply open it.
 		 * 5. It is not a BDB file and we should return an error.
 		 */
 
-		/* We have to open the file. */
-reopen:		if ((ret = __os_open(dbenv, real_name, oflags, 0, &fhp)) != 0)
+		/* Open file (if there is one). */
+reopen:		if (!F_ISSET(dbp, DB_AM_INMEM) &&
+		    (ret = __os_open(dbenv, real_name, oflags, 0, &fhp)) != 0)
 			goto err;
 
 		/* Case 2: DB_TRUNCATE: we must do the creation in place. */
@@ -295,33 +349,43 @@ reopen:		if ((ret = __os_open(dbenv, real_name, oflags, 0, &fhp)) != 0)
 		}
 
 		/* Cases 1,3-5: we need to read the meta-data page. */
-		ret = __fop_read_meta(dbenv, real_name, mbuf, sizeof(mbuf), fhp,
-		    LF_ISSET(DB_FCNTL_LOCKING) && txn == NULL ? 1 : 0, &len);
+		if (F_ISSET(dbp, DB_AM_INMEM))
+			ret = __fop_inmem_read_meta(dbp, name, flags);
+		else {
+			ret = __fop_read_meta(dbenv, real_name, mbuf,
+			    sizeof(mbuf), fhp,
+			    LF_ISSET(DB_FCNTL_LOCKING) && txn == NULL ? 1 : 0,
+			    &len);
 
-		/* Case 3: 0-length, no txns. */
-		if (ret != 0 && len == 0 && txn == NULL) {
-			if (LF_ISSET(DB_EXCL)) {
-				/* Case 1b: DB_EXCL and 0-lenth file exists. */
-				ret = EEXIST;
-				goto err;
+			/* Case 3: 0-length, no txns. */
+			if (ret != 0 && len == 0 && txn == NULL) {
+				if (LF_ISSET(DB_EXCL)) {
+					/*
+					 * Case 1b: DB_EXCL and
+					 * 0-lenth file exists.
+					 */
+					ret = EEXIST;
+					goto err;
+				}
+				tmpname = (char *)name;
+				goto creat2;
 			}
-			tmpname = (char *)name;
-			goto creat2;
+
+			/* Case 4: This is a valid file. */
+			if (ret == 0)
+				ret = __db_meta_setup(dbenv, dbp,
+				    real_name, (DBMETA *)mbuf, flags, 1);
+
 		}
 
 		/* Case 5: Invalid file. */
 		if (ret != 0)
 			goto err;
 
-		/* Case 4: This is a valid file. */
-		if ((ret = __db_meta_setup(dbenv,
-		    dbp, real_name, (DBMETA *)mbuf, flags, 1)) != 0)
-			goto err;
-
 		/* Now, get our handle lock. */
 		if ((ret = __fop_lock_handle(dbenv,
 		    dbp, locker, DB_LOCK_READ, NULL, DB_LOCK_NOWAIT)) == 0) {
-			if ((ret = __ENV_LPUT(dbenv, elock, 0)) != 0)
+			if ((ret = __ENV_LPUT(dbenv, elock)) != 0)
 				goto err;
 		} else if (ret != DB_LOCK_NOTGRANTED ||
 		    (txn != NULL && F_ISSET(txn, TXN_NOWAIT)))
@@ -342,27 +406,64 @@ reopen:		if ((ret = __os_open(dbenv, real_name, oflags, 0, &fhp)) != 0)
 			 * any application level FCNTL semantics.
 			 */
 			DB_ASSERT(!LF_ISSET(DB_FCNTL_LOCKING));
-			if ((ret = __os_closehandle(dbenv, fhp)) != 0)
-				goto err;
-			fhp = NULL;
-			ret = __fop_lock_handle(dbenv,
-			    dbp, locker, DB_LOCK_READ, &elock, 0);
-			if (ret == DB_LOCK_NOTEXIST)
-				goto retry;
-			if (ret != 0)
+			if (!F_ISSET(dbp, DB_AM_INMEM)) {
+				if ((ret = __os_closehandle(dbenv, fhp)) != 0)
+					goto err;
+				fhp = NULL;
+			}
+			if ((ret = __fop_lock_handle(dbenv,
+			    dbp, locker, DB_LOCK_READ, &elock, 0)) != 0) {
+				if (F_ISSET(dbp, DB_AM_INMEM))
+					RESET_MPF(dbp, 0);
 				goto err;
+			}
+
 			/*
-			 * XXX
-			 * I need to convince myself that I don't need to
-			 * re-read the metadata page here.  If you do need
-			 * to re-read it you'd better decrypt it too...
+			 * It's possible that our DBP was initialized
+			 * with a different file last time we opened it.
+			 * Therefore, we need to reset the DBP type and then
+			 * re-read the meta-data page and reset any other
+			 * fields that __db_meta_setup initializes.  We
+			 * need to shut down this dbp and reopen for in-memory
+			 * named databases. Unfortunately __db_refresh is
+			 * pretty aggressive at the shutting down, so we need
+			 * to do a bunch of restoration.
+			 * XXX it would be nice to pull refresh apart into
+			 * the stuff you need to do to call __db_env_mpool
+			 * and the stuff you can really throw away.
 			 */
-			if ((ret =
-			    __os_open(dbenv, real_name, 0, 0, &fhp)) != 0)
+			if (F_ISSET(dbp, DB_AM_INMEM)) {
+				if ((ret = __db_refresh(dbp,
+				    txn, DB_NOSYNC, NULL, 1)) != 0)
+					goto err;
+				ret = __db_dbenv_mpool(dbp, name, flags);
+			} else 
+				ret = __os_open(dbenv, real_name, 0, 0, &fhp);
+
+			if (ret != 0) {
+				if ((ret =
+				    __ENV_LPUT(dbenv, dbp->handle_lock)) != 0) {
+					LOCK_INIT(dbp->handle_lock);
+					goto err;
+				}
+				goto retry;
+			}
+
+			dbp->type = save_type;
+			if (F_ISSET(dbp, DB_AM_INMEM)) 
+				ret = __fop_inmem_read_meta(dbp, name, flags);
+			else if ((ret =
+			    __fop_read_meta(dbenv, real_name, mbuf,
+			    sizeof(mbuf), fhp,
+			    LF_ISSET(DB_FCNTL_LOCKING) && txn == NULL ? 1 : 0,
+			    &len)) != 0 ||
+			    (ret = __db_meta_setup(dbenv, dbp, real_name,
+			    (DBMETA *)mbuf, flags, 1)) != 0)
 				goto err;
+
 		}
 
-		/* If we got here, then we now have the handle lock. */
+		/* If we got here, then we have the handle lock. */
 
 		/*
 		 * Check for a file in the midst of a rename.  If we find that
@@ -370,12 +471,18 @@ reopen:		if ((ret = __os_open(dbenv, real_name, oflags, 0, &fhp)) != 0)
 		 * that it is in our current transaction (else we would still
 		 * be blocking), so we can continue along and create a new file
 		 * with the same name.  In that case, we have to close the file
-		 * handle because we reuse it below.
+		 * handle because we reuse it below.  This is a case where
+		 * a 'was_inval' above is OK.
 		 */
 		if (F_ISSET(dbp, DB_AM_IN_RENAME)) {
-			if (LF_ISSET(DB_CREATE)) {
-				if ((ret = __os_closehandle(dbenv, fhp)) != 0)
+			was_inval = 0;
+			if (create_ok) {
+				if (F_ISSET(dbp, DB_AM_INMEM)) {
+					RESET_MPF(dbp, DB_MPOOL_DISCARD);
+				} else if ((ret =
+				    __os_closehandle(dbenv, fhp)) != 0)
 					goto err;
+				LF_SET(DB_CREATE);
 				goto create;
 			} else {
 				ret = ENOENT;
@@ -383,6 +490,12 @@ reopen:		if ((ret = __os_open(dbenv, real_name, oflags, 0, &fhp)) != 0)
 			}
 		}
 
+		/* If we get here, a was_inval is bad. */
+		if (was_inval) {
+			ret = EINVAL;
+			goto err;
+		}
+
 		/*
 		 * Now, case 1: check for DB_EXCL, because the file that exists
 		 * is not in the middle of a rename, so we have an error.  This
@@ -391,7 +504,7 @@ reopen:		if ((ret = __os_open(dbenv, real_name, oflags, 0, &fhp)) != 0)
 		 * should not have been allowed to open it.
 		 */
 		if (LF_ISSET(DB_EXCL)) {
-			ret = __ENV_LPUT(dbenv, dbp->handle_lock, 0);
+			ret = __ENV_LPUT(dbenv, dbp->handle_lock);
 			LOCK_INIT(dbp->handle_lock);
 			if (ret == 0)
 				ret = EEXIST;
@@ -401,16 +514,28 @@ reopen:		if ((ret = __os_open(dbenv, real_name, oflags, 0, &fhp)) != 0)
 	}
 
 	/* File does not exist. */
-	if (!LF_ISSET(DB_CREATE))
+#ifdef	HAVE_VXWORKS
+	/*
+	 * VxWorks can return file-system specific error codes if the
+	 * file does not exist, not ENOENT.
+	 */
+	if (!create_ok)
+#else
+	if (!create_ok || ret != ENOENT)
+#endif
 		goto err;
+	LF_SET(DB_CREATE);
 	ret = 0;
 
 	/*
 	 * We need to create file, which means that we need to set up the file,
 	 * the fileid and the locks.  Then we need to call the appropriate
-	 * routines to create meta-data pages.
+	 * routines to create meta-data pages.  For in-memory files, we retain
+	 * the environment lock, while for on-disk files, we drop the env lock
+	 * and create into a temporary.
 	 */
-	if ((ret = __ENV_LPUT(dbenv, elock, 0)) != 0)
+	if (!F_ISSET(dbp, DB_AM_INMEM) &&
+	    (ret = __ENV_LPUT(dbenv, elock)) != 0)
 		goto err;
 
 create:	if (txn != NULL && IS_REP_CLIENT(dbenv)) {
@@ -419,41 +544,50 @@ create:	if (txn != NULL && IS_REP_CLIENT(dbenv)) {
 		ret = EINVAL;
 		goto err;
 	}
-	if ((ret = __db_backup_name(dbenv, name, txn, &tmpname)) != 0)
-		goto err;
-	if (TXN_ON(dbenv) && txn != NULL &&
-	    (ret = __txn_begin(dbenv, txn, &stxn, 0)) != 0)
-		goto err;
-	if ((ret = __fop_create(dbenv,
-	    stxn, &fhp, tmpname, DB_APP_DATA, mode, dflags)) != 0) {
-		/*
-		 * If we don't have transactions there is a race on
-		 * creating the temp file.
-		 */
-		if (!TXN_ON(dbenv) && ret == EEXIST) {
-			__os_free(dbenv, tmpname);
-			tmpname = NULL;
-			__os_yield(dbenv, 1);
-			goto retry;
+
+	if (F_ISSET(dbp, DB_AM_INMEM))
+		ret = __fop_inmem_create(dbp, name, txn, flags);
+	else {
+		if ((ret = __db_backup_name(dbenv, name, txn, &tmpname)) != 0)
+			goto err;
+		if (TXN_ON(dbenv) && txn != NULL &&
+		    (ret = __txn_begin(dbenv, txn, &stxn, 0)) != 0)
+			goto err;
+		if ((ret = __fop_create(dbenv,
+		    stxn, &fhp, tmpname, DB_APP_DATA, mode, dflags)) != 0) {
+			/*
+			 * If we don't have transactions there is a race on
+			 * creating the temp file.
+			 */
+			if (!TXN_ON(dbenv) && ret == EEXIST) {
+				__os_free(dbenv, tmpname);
+				tmpname = NULL;
+				__os_yield(dbenv, 1);
+				goto retry;
+			}
+			goto err;
 		}
-		goto err;
+		tmp_created = 1;
 	}
-	tmp_created = 1;
 
-creat2:	if ((ret = __db_appname(dbenv,
-	    DB_APP_DATA, tmpname, 0, NULL, &real_tmpname)) != 0)
-		goto err;
+creat2:	if (!F_ISSET(dbp, DB_AM_INMEM)) {
+		if ((ret = __db_appname(dbenv,
+		    DB_APP_DATA, tmpname, 0, NULL, &real_tmpname)) != 0)
+			goto err;
 
-	/* Set the pagesize if it isn't yet set. */
-	if (dbp->pgsize == 0 &&
-	    (ret = __fop_set_pgsize(dbp, fhp, real_tmpname)) != 0)
-		goto errmsg;
+		/* Set the pagesize if it isn't yet set. */
+		if (dbp->pgsize == 0 &&
+		    (ret = __fop_set_pgsize(dbp, fhp, real_tmpname)) != 0)
+			goto errmsg;
 
-	/* Construct a file_id. */
-	if ((ret = __os_fileid(dbenv, real_tmpname, 1, dbp->fileid)) != 0)
-		goto errmsg;
+		/* Construct a file_id. */
+		if ((ret =
+		    __os_fileid(dbenv, real_tmpname, 1, dbp->fileid)) != 0)
+			goto errmsg;
+	}
 
-	if ((ret = __db_new_file(dbp, stxn, fhp, tmpname)) != 0)
+	if ((ret = __db_new_file(dbp,
+	    F_ISSET(dbp, DB_AM_INMEM) ? txn : stxn, fhp, tmpname)) != 0)
 		goto err;
 
 	/*
@@ -464,9 +598,12 @@ creat2:	if ((ret = __db_appname(dbenv,
 
 	/*
 	 * Now move the file into place unless we are creating in place (because
-	 * we created a database in a file that started out 0-length).
+	 * we created a database in a file that started out 0-length).  If
+	 * this is an in-memory file, we may or may not hold the environment
+	 * lock depending on how we got here.
 	 */
-	if (!F_ISSET(dbp, DB_AM_COMPENSATE) && !F_ISSET(dbp, DB_AM_RECOVER))
+	if (!F_ISSET(dbp, DB_AM_COMPENSATE) &&
+	    !F_ISSET(dbp, DB_AM_RECOVER) && !LOCK_ISSET(elock))
 		GET_ENVLOCK(dbenv, locker, &elock);
 
 	if (F_ISSET(dbp, DB_AM_IN_RENAME)) {
@@ -474,14 +611,15 @@ creat2:	if ((ret = __db_appname(dbenv,
 		__txn_remrem(dbenv, txn, real_name);
 	} else if (name == tmpname) {
 		/* We created it in place. */
-	} else if (__os_exists(real_name, NULL) == 0) {
+	} else if (!F_ISSET(dbp, DB_AM_INMEM) &&
+	    __os_exists(real_name, NULL) == 0) {
 		/*
 		 * Someone managed to create the file; remove our temp
 		 * and try to open the file that now exists.
 		 */
 		(void)__fop_remove(dbenv,
 		    NULL, dbp->fileid, tmpname, DB_APP_DATA, dflags);
-		(void)__ENV_LPUT(dbenv, dbp->handle_lock, 0);
+		(void)__ENV_LPUT(dbenv, dbp->handle_lock);
 		LOCK_INIT(dbp->handle_lock);
 
 		if (stxn != NULL) {
@@ -493,10 +631,10 @@ creat2:	if ((ret = __db_appname(dbenv,
 		goto reopen;
 	}
 
-	if ((ret = __fop_lock_handle(dbenv,
+	if (name != NULL && (ret = __fop_lock_handle(dbenv,
 	    dbp, locker, DB_LOCK_WRITE, &elock, NOWAIT_FLAG(txn))) != 0)
 		goto err;
-	if (tmpname != name && (ret = __fop_rename(dbenv,
+	if (tmpname != NULL && tmpname != name && (ret = __fop_rename(dbenv,
 	    stxn, tmpname, name, dbp->fileid, DB_APP_DATA, dflags)) != 0)
 		goto err;
 
@@ -522,8 +660,8 @@ err:		CLOSE_HANDLE(dbp, fhp);
 			(void)__fop_remove(dbenv,
 			    NULL, NULL, tmpname, DB_APP_DATA, dflags);
 		if (txn == NULL)
-			(void)__ENV_LPUT(dbenv, dbp->handle_lock, 0);
-		(void)__ENV_LPUT(dbenv, elock, 0);
+			(void)__ENV_LPUT(dbenv, dbp->handle_lock);
+		(void)__ENV_LPUT(dbenv, elock);
 		if (created_locker) {
 			(void)__lock_id_free(dbenv, dbp->lid);
 			dbp->lid = DB_LOCK_INVALIDID;
@@ -537,7 +675,7 @@ done:	/*
 	 */
 	if (!truncating && tmpname != NULL && tmpname != name)
 		__os_free(dbenv, tmpname);
-	if (real_name != NULL)
+	if (real_name != name && real_name != NULL)
 		__os_free(dbenv, real_name);
 	if (real_tmpname != NULL)
 		__os_free(dbenv, real_tmpname);
@@ -681,7 +819,7 @@ __fop_subdb_setup(dbp, txn, mname, name, mode, flags)
 		 * If there was no transaction and we created this database,
 		 * then we need to undo the update of the master database.
 		 */
-		if (F_ISSET(dbp, DB_AM_CREATED) && txn != NULL)
+		if (F_ISSET(dbp, DB_AM_CREATED) && txn == NULL)
 			(void)__db_master_update(mdbp, dbp, txn,
 			    name, dbp->type, MU_REMOVE, NULL, 0);
 		F_CLR(dbp, DB_AM_CREATED);
@@ -721,7 +859,7 @@ __fop_subdb_setup(dbp, txn, mname, name, mode, flags)
 err:
 DB_TEST_RECOVERY_LABEL
 		if (txn == NULL)
-			(void)__ENV_LPUT(dbenv, dbp->handle_lock, 0);
+			(void)__ENV_LPUT(dbenv, dbp->handle_lock);
 	}
 
 	/*
@@ -786,13 +924,14 @@ __fop_remove_setup(dbp, txn, name, flags)
 	PANIC_CHECK(dbenv);
 	LOCK_INIT(elock);
 	fhp = NULL;
+	ret = 0;
 
 	/* Create locker if necessary. */
 retry:	if (LOCKING_ON(dbenv)) {
 		if (txn != NULL)
 			dbp->lid = txn->txnid;
 		else if (dbp->lid == DB_LOCK_INVALIDID) {
-			if ((ret = __lock_id(dbenv, &dbp->lid)) != 0)
+			if ((ret = __lock_id(dbenv, &dbp->lid, NULL)) != 0)
 				goto err;
 		}
 	}
@@ -816,15 +955,23 @@ retry:	if (LOCKING_ON(dbenv)) {
 	 * the handle.
 	 */
 	GET_ENVLOCK(dbenv, dbp->lid, &elock);
-	if (fhp == NULL &&
-	    (ret = __os_open(dbenv, name, DB_OSO_RDONLY, 0, &fhp)) != 0)
-		goto err;
-	if ((ret = __fop_read_meta(dbenv,
-	    name, mbuf, sizeof(mbuf), fhp, 0, NULL)) != 0)
+
+	/* Open database. */
+	if (F_ISSET(dbp, DB_AM_INMEM))
+		ret = __db_dbenv_mpool(dbp, name, flags);
+	else if (fhp == NULL)
+		ret = __os_open(dbenv, name, DB_OSO_RDONLY, 0, &fhp);
+	if (ret != 0)
 		goto err;
 
-	if ((ret =
-	    __db_meta_setup(dbenv, dbp, name, (DBMETA *)mbuf, flags, 1)) != 0)
+	/* Get meta-data */
+	if (F_ISSET(dbp, DB_AM_INMEM))
+		ret = __fop_inmem_read_meta(dbp, name, flags);
+	else if ((ret = __fop_read_meta(dbenv,
+	    name, mbuf, sizeof(mbuf), fhp, 0, NULL)) == 0)
+		ret = __db_meta_setup(dbenv,
+		    dbp, name, (DBMETA *)mbuf, flags, 1);
+	if (ret != 0)
 		goto err;
 
 	/*
@@ -839,39 +986,43 @@ retry:	if (LOCKING_ON(dbenv)) {
 		 * Close the file, block on the lock, clean up the dbp, and
 		 * then start all over again.
 		 */
-		if (!LF_ISSET(DB_FCNTL_LOCKING)) {
+		if (!F_ISSET(dbp, DB_AM_INMEM) && !LF_ISSET(DB_FCNTL_LOCKING)) {
 			(void)__os_closehandle(dbenv, fhp);
 			fhp = NULL;
 		}
-		if (ret == DB_LOCK_NOTEXIST) {
-			if ((ret = __ENV_LPUT(dbenv, elock, 0)) != 0)
-				goto err;
-		} else if (ret != DB_LOCK_NOTGRANTED ||
+		if (ret != DB_LOCK_NOTGRANTED ||
 		    (txn != NULL && F_ISSET(txn, TXN_NOWAIT)))
 			goto err;
 		else if ((ret = __fop_lock_handle(dbenv,
-		    dbp, dbp->lid, DB_LOCK_WRITE, &elock, 0)) != 0 &&
-		    ret != DB_LOCK_NOTEXIST)
+		    dbp, dbp->lid, DB_LOCK_WRITE, &elock, 0)) != 0)
 			goto err;
 
-		if (txn != NULL)
-			dbp->lid = DB_LOCK_INVALIDID;
-		(void)__db_refresh(dbp, txn, DB_NOSYNC, NULL);
+		if (F_ISSET(dbp, DB_AM_INMEM)) {
+			(void)__lock_put(dbenv, &dbp->handle_lock);
+			(void)__db_refresh(dbp, txn, DB_NOSYNC, NULL, 1);
+		} else {
+			if (txn != NULL)
+				dbp->lid = DB_LOCK_INVALIDID;
+			(void)__db_refresh(dbp, txn, DB_NOSYNC, NULL, 0);
+		}
 		goto retry;
-	} else if ((ret = __ENV_LPUT(dbenv, elock, 0)) != 0)
+	} else if ((ret = __ENV_LPUT(dbenv, elock)) != 0)
 		goto err;
 
 	/* Check if the file is already open. */
 	if ((ret = __memp_get_refcnt(dbenv, dbp->fileid, &refcnt)) != 0)
 		goto err;
+
 	/*
-	 * Now, error check.  If the file is already open (refcnt != 0), then
-	 * we must have it open (since we got the lock) and we need to panic,
-	 * because this is a self deadlock and the application has a bug.
-	 * If the file isn't open, but it's in the midst of a rename then
-	 * this file doesn't really exist.
+	 * Now, error check.  If the file is already open, then we must have
+	 * it open (since we got the lock) and we need to panic, because this
+	 * is a self deadlock and the application has a bug. If the file isn't
+	 * open, but it's in the midst of a rename then this file doesn't
+	 * really exist.  Note that in-memory files will always have an
+	 * artificially incremented ref count.
 	 */
-	if (refcnt != 0) {
+	if ((F_ISSET(dbp, DB_AM_INMEM) && refcnt != 2) ||
+	    (!F_ISSET(dbp, DB_AM_INMEM) && refcnt != 0)) {
 		__db_err(dbenv,
 "Attempting to remove file open in current transaction causing self-deadlock");
 		ret = __db_panic(dbenv, DB_LOCK_DEADLOCK);
@@ -879,15 +1030,19 @@ retry:	if (LOCKING_ON(dbenv)) {
 		ret = ENOENT;
 
 	if (0) {
-err:		(void)__ENV_LPUT(dbenv, elock, 0);
+err:		(void)__ENV_LPUT(dbenv, elock);
 	}
 	if (fhp != NULL && !LF_ISSET(DB_FCNTL_LOCKING))
 		(void)__os_closehandle(dbenv, fhp);
 	/*
-	 * If we are going to proceed with the removal, then we need to make
-	 * sure that we don't leave any pages around in the mpool.
+	 * If this is a real file and we are going to proceed with the removal,
+	 * then we need to make sure that we don't leave any pages around in the
+	 * mpool since the file is closed and will be reopened again before
+	 * access.  However, this might be an in-memory file, in which case
+	 * we will handle the discard from the mpool later as it's the "real"
+	 * removal of the database.
 	 */
-	if (ret == 0)
+	if (ret == 0 && !F_ISSET(dbp, DB_AM_INMEM))
 		F_SET(dbp, DB_AM_DISCARD);
 	return (ret);
 }
@@ -957,175 +1112,57 @@ __fop_dummy(dbp, txn, old, new, flags)
 	const char *old, *new;
 	u_int32_t flags;
 {
-	DB *tmpdbp, *t2dbp;
+	DB *tmpdbp;
 	DB_ENV *dbenv;
-	DB_FH *fhp;
-	DB_LOCK elock;
-	DB_LSN lsn;
-	DBT fiddbt, namedbt, tmpdbt;
 	DB_TXN *stxn;
 	char *back;
-	char *realback, *realnew, *realold;
 	int ret, t_ret;
-	size_t len;
 	u_int8_t mbuf[DBMETASIZE];
-	u_int32_t dflags, locker, stxnid;
+	u_int32_t locker;
 
 	dbenv = dbp->dbenv;
-	LOCK_INIT(elock);
-	realback = NULL;
-	realnew = NULL;
-	realold = NULL;
 	back = NULL;
 	stxn = NULL;
-	tmpdbp = t2dbp = NULL;
-	fhp = NULL;
-	dflags = F_ISSET(dbp, DB_AM_NOT_DURABLE) ? DB_LOG_NOT_DURABLE : 0;
+	tmpdbp = NULL;
 
 	DB_ASSERT(txn != NULL);
 	locker = txn->txnid;
 
-	/* Begin sub transaction to encapsulate the rename. */
+	/*
+	 * Begin sub transaction to encapsulate the rename.  Note that we
+	 * expect the inmem_swap calls to complete the sub-transaction,
+	 * aborting on error and committing on success.
+	 */
 	if (TXN_ON(dbenv) && (ret = __txn_begin(dbenv, txn, &stxn, 0)) != 0)
 		goto err;
 
 	/* We need to create a dummy file as a place holder. */
 	if ((ret = __db_backup_name(dbenv, new, stxn, &back)) != 0)
 		goto err;
-	if ((ret = __db_appname(dbenv,
-	    DB_APP_DATA, back, flags, NULL, &realback)) != 0)
-		goto err;
-	if ((ret = __fop_create(dbenv,
-	    stxn, NULL, back, DB_APP_DATA, 0, dflags)) != 0)
-		goto err;
-
-	memset(mbuf, 0, sizeof(mbuf));
-	if ((ret =
-	    __os_fileid(dbenv, realback, 1, ((DBMETA *)mbuf)->uid)) != 0)
-		goto err;
-	((DBMETA *)mbuf)->magic = DB_RENAMEMAGIC;
-	if ((ret = __fop_write(dbenv, stxn, back,
-	    DB_APP_DATA, NULL, 0, 0, 0, mbuf, DBMETASIZE, 1, dflags)) != 0)
-		goto err;
-
 	/* Create a dummy dbp handle. */
 	if ((ret = db_create(&tmpdbp, dbenv, 0)) != 0)
 		goto err;
-	memcpy(tmpdbp->fileid, ((DBMETA *)mbuf)->uid, DB_FILE_ID_LEN);
 
-	/* Now, lock the name space while we initialize this file. */
-	if ((ret = __db_appname(dbenv,
-	    DB_APP_DATA, new, 0, NULL, &realnew)) != 0)
-		goto err;
-	GET_ENVLOCK(dbenv, locker, &elock);
-	if (__os_exists(realnew, NULL) == 0) {
-		/*
-		 * It is possible that the only reason this file exists is
-		 * because we've done a previous rename of it and we have
-		 * left a placeholder here.  We need to check for that case
-		 * and allow this rename to succeed if that's the case.
-		 */
-		if ((ret = db_create(&t2dbp, dbenv, 0)) != 0)
-			goto err;
-		if ((ret = __os_open(dbenv, realnew, 0, 0, &fhp)) != 0)
-			goto err;
-		if ((ret = __fop_read_meta(dbenv,
-		    realnew, mbuf, sizeof(mbuf), fhp, 0, &len)) != 0 ||
-		    (ret = __db_meta_setup(dbenv,
-		    t2dbp, realnew, (DBMETA *)mbuf, 0, 1)) != 0) {
-			ret = EEXIST;
-			goto err;
-		}
+	memset(mbuf, 0, sizeof(mbuf));
+	ret = F_ISSET(dbp, DB_AM_INMEM) ?
+	    __fop_inmem_dummy(tmpdbp, stxn, back, mbuf) :
+	    __fop_ondisk_dummy(tmpdbp, stxn, back, mbuf, flags);
 
-		/*
-		 * Now, try to acquire the handle lock.  If it's from our txn,
-		 * then we'll get the lock.  If it's not, then someone else has
-		 * it locked,  and we need to report this as an error.  If we
-		 * know we can get the lock, we can immediately release it,
-		 * which we need to do since this is a temporary handle.
-		 */
-		if ((ret = __fop_lock_handle(dbenv,
-		    t2dbp, locker, DB_LOCK_WRITE, NULL, DB_LOCK_NOWAIT)) != 0)
-			ret = EEXIST;
-		else {
-			(void)__lock_put(dbenv, &t2dbp->handle_lock, 0);
-			if (!F_ISSET(t2dbp, DB_AM_IN_RENAME))
-				ret = EEXIST;
-		}
-		if ((t_ret = __os_closehandle(dbenv, fhp)) != 0 && ret == 0)
-			ret = t_ret;
-		fhp = NULL;
-		if (ret != 0)
-			goto err;
-	}
-
-	/*
-	 * While we have the namespace locked, do the renames and then
-	 * swap for the handle lock.
-	 */
-	if ((ret = __fop_rename(dbenv,
-	    stxn, old, new, dbp->fileid, DB_APP_DATA, dflags)) != 0)
-		goto err;
-	if ((ret = __fop_rename(dbenv,
-	    stxn, back, old, tmpdbp->fileid, DB_APP_DATA, dflags)) != 0)
-		goto err;
-	if ((ret = __fop_lock_handle(dbenv,
-	    tmpdbp, locker, DB_LOCK_WRITE, &elock, NOWAIT_FLAG(txn))) != 0)
+	if (ret != 0)
 		goto err;
 
-	/*
-	 * We just acquired a transactional lock on the tmp handle.
-	 * We need to null out the tmp handle's lock so that it
-	 * doesn't create problems for us in the close path.
-	 */
-	LOCK_INIT(tmpdbp->handle_lock);
-
-	if (stxn != NULL) {
-		/* Commit the child. */
-		stxnid = stxn->txnid;
-		ret = __txn_commit(stxn, 0);
-		stxn = NULL;
-
-		/* Now log the child information in the parent. */
-		memset(&fiddbt, 0, sizeof(fiddbt));
-		memset(&tmpdbt, 0, sizeof(fiddbt));
-		memset(&namedbt, 0, sizeof(namedbt));
-		fiddbt.data = dbp->fileid;
-		fiddbt.size = DB_FILE_ID_LEN;
-		tmpdbt.data = tmpdbp->fileid;
-		tmpdbt.size = DB_FILE_ID_LEN;
-		namedbt.data = (void *)old;
-		namedbt.size = (u_int32_t)strlen(old) + 1;
-		if ((t_ret =
-		    __fop_file_remove_log(dbenv, txn, &lsn, 0, &fiddbt,
-		    &tmpdbt, &namedbt, DB_APP_DATA, stxnid)) != 0 && ret == 0)
-			ret = t_ret;
-	}
-
-	/* This is a delayed delete of the dummy file. */
-	if ((ret = __db_appname(dbenv,
-	    DB_APP_DATA, old, flags, NULL, &realold)) != 0)
-		goto err;
-	if ((ret = __txn_remevent(dbenv, txn, realold, NULL)) != 0)
+	ret = F_ISSET(dbp, DB_AM_INMEM) ?
+	    __fop_inmem_swap(dbp, tmpdbp, stxn, old, new, back, locker) :
+	    __fop_ondisk_swap(dbp, tmpdbp, stxn, old, new, back, locker, flags);
+	stxn = NULL;
+	if (ret != 0)
 		goto err;
 
-err:	(void)__ENV_LPUT(dbenv, elock, 0);
-	if (stxn != NULL)
+err:	if (stxn != NULL)
 		(void)__txn_abort(stxn);
 	if (tmpdbp != NULL &&
 	    (t_ret = __db_close(tmpdbp, NULL, 0)) != 0 && ret == 0)
 		ret = t_ret;
-	if (t2dbp != NULL &&
-	    (t_ret = __db_close(t2dbp, NULL, 0)) != 0 && ret == 0)
-		ret = t_ret;
-	if (fhp != NULL)
-		(void)__os_closehandle(dbenv, fhp);
-	if (realold != NULL)
-		__os_free(dbenv, realold);
-	if (realnew != NULL)
-		__os_free(dbenv, realnew);
-	if (realback != NULL)
-		__os_free(dbenv, realback);
 	if (back != NULL)
 		__os_free(dbenv, back);
 	return (ret);
@@ -1155,33 +1192,546 @@ __fop_dbrename(dbp, old, new)
 	real_old = NULL;
 	LOCK_INIT(elock);
 
-	/* Find the real newname of the file. */
-	if ((ret = __db_appname(dbenv,
-	    DB_APP_DATA, new, 0, NULL, &real_new)) != 0)
-		goto err;
+	if (F_ISSET(dbp, DB_AM_INMEM)) {
+		real_new = (char *)new;
+		real_old = (char *)old;
+	} else {
+		/* Get full names. */
+		if ((ret = __db_appname(dbenv,
+		    DB_APP_DATA, new, 0, NULL, &real_new)) != 0)
+			goto err;
+
+		if ((ret = __db_appname(dbenv,
+		    DB_APP_DATA, old, 0, NULL, &real_old)) != 0)
+			goto err;
+
+	}
 
 	/*
 	 * It is an error to rename a file over one that already exists,
-	 * as that wouldn't be transaction-safe.
+	 * as that wouldn't be transaction-safe.  We check explicitly
+	 * for ondisk files, but it's done memp_nameop for in-memory ones.
 	 */
 	GET_ENVLOCK(dbenv, dbp->lid, &elock);
-	if (__os_exists(real_new, NULL) == 0) {
+	ret = F_ISSET(dbp, DB_AM_INMEM) ? ENOENT :
+	    __os_exists(real_new, NULL);
+
+	if (ret == 0) {
 		ret = EEXIST;
 		__db_err(dbenv, "rename: file %s exists", real_new);
 		goto err;
 	}
 
-	if ((ret = __db_appname(dbenv,
-	    DB_APP_DATA, old, 0, NULL, &real_old)) != 0)
-		goto err;
+	ret = __memp_nameop(dbenv,
+	    dbp->fileid, new, real_old, real_new, F_ISSET(dbp, DB_AM_INMEM));
 
-	ret = __memp_nameop(dbenv, dbp->fileid, new, real_old, real_new);
-
-err:	if ((t_ret = __ENV_LPUT(dbenv, elock, 0)) != 0 && ret == 0)
+err:	if ((t_ret = __ENV_LPUT(dbenv, elock)) != 0 && ret == 0)
 		ret = t_ret;
-	if (real_old != NULL)
+	if (!F_ISSET(dbp, DB_AM_INMEM) && real_old != NULL)
 		__os_free(dbenv, real_old);
-	if (real_new != NULL)
+	if (!F_ISSET(dbp, DB_AM_INMEM) && real_new != NULL)
 		__os_free(dbenv, real_new);
 	return (ret);
 }
+
+static int
+__fop_inmem_create(dbp, name, txn, flags)
+	DB *dbp;
+	const char *name;
+	DB_TXN *txn;
+	u_int32_t flags;
+{
+	DB_ENV *dbenv;
+	DB_LSN lsn;
+	DBT fid_dbt, name_dbt;
+	int ret;
+	int32_t lfid;
+	u_int32_t *p32;
+
+	dbenv = dbp->dbenv;
+
+	MAKE_INMEM(dbp);
+
+	/* Set the pagesize if it isn't yet set. */
+	if (dbp->pgsize == 0)
+		dbp->pgsize = DB_DEF_IOSIZE;
+
+	/*
+	 * Construct a file_id.
+	 *
+	 * If this file has no name, then we only need a fileid for locking.
+	 * If this file has a name, we need the fileid both for locking and
+	 * matching in the memory pool.  So, with unnamed in-memory databases,
+	 * use a lock_id.  For named in-memory files, we need to find a value
+	 * that we can use to uniquely identify a name/fid pair.  We use a
+	 * combination of a unique id (__os_unique_id) and a hash of the
+	 * original name.
+	 */
+	if (name == NULL) {
+		if (LOCKING_ON(dbenv) && (ret =
+		    __lock_id(dbenv, (u_int32_t *)dbp->fileid, NULL)) != 0)
+			goto err;
+	}  else {
+		p32 = (u_int32_t *)(&dbp->fileid[0]);
+		__os_unique_id(dbenv, p32);
+		p32++;
+		(void)strncpy(
+		    (char *)p32, name, DB_FILE_ID_LEN - sizeof(u_int32_t));
+		dbp->preserve_fid = 1;
+
+		if (DBENV_LOGGING(dbenv) && dbp->log_filename != NULL)
+			memcpy(dbp->log_filename->ufid,
+			    dbp->fileid, DB_FILE_ID_LEN);
+	}
+
+	/* Now, set the fileid. */
+	if ((ret = __memp_set_fileid(dbp->mpf, dbp->fileid)) != 0)
+		goto err;
+
+	if ((ret = __db_dbenv_mpool(dbp, name, flags)) != 0)
+		goto err;
+
+	if (name != NULL && DBENV_LOGGING(dbenv)) {
+		memset(&name_dbt, 0, sizeof(name_dbt));
+		name_dbt.data = (void *)name;
+		name_dbt.size = (u_int32_t)strlen(name) + 1;
+		memset(&fid_dbt, 0, sizeof(fid_dbt));
+		fid_dbt.data = dbp->fileid;
+		fid_dbt.size = DB_FILE_ID_LEN;
+		lfid = dbp->log_filename == NULL ?
+		    DB_LOGFILEID_INVALID : dbp->log_filename->id;
+		if ((ret = __crdel_inmem_create_log(dbenv, txn,
+		    &lsn, 0, lfid, &name_dbt, &fid_dbt, dbp->pgsize)) != 0)
+			goto err;
+	}
+
+	F_SET(dbp, DB_AM_CREATED);
+
+err:
+	return (ret);
+}
+
+static int
+__fop_inmem_read_meta(dbp, name, flags)
+	DB *dbp;
+	const char *name;
+	u_int32_t flags;
+{
+	DBMETA *metap;
+	db_pgno_t pgno;
+	int ret, t_ret;
+
+	pgno  = PGNO_BASE_MD;
+	if ((ret = __memp_fget(dbp->mpf, &pgno, 0, &metap)) != 0)
+		return (ret);
+	ret = __db_meta_setup(dbp->dbenv, dbp, name, metap, flags, 1);
+
+	if ((t_ret = __memp_fput(dbp->mpf, metap, 0)) && ret == 0)
+		ret = t_ret;
+
+	return (ret);
+}
+
+static int
+__fop_ondisk_dummy(dbp, txn, name, mbuf, flags)
+	DB *dbp;
+	DB_TXN *txn;
+	const char *name;
+	u_int8_t *mbuf;
+	u_int32_t flags;
+{
+	DB_ENV *dbenv;
+	int ret;
+	char *realname;
+	u_int32_t dflags;
+
+	realname = NULL;
+	dbenv = dbp->dbenv;
+	dflags = F_ISSET(dbp, DB_AM_NOT_DURABLE) ? DB_LOG_NOT_DURABLE : 0;
+
+	if ((ret = __db_appname(dbenv,
+	    DB_APP_DATA, name, flags, NULL, &realname)) != 0)
+		goto err;
+
+	if ((ret = __fop_create(dbenv,
+	    txn, NULL, name, DB_APP_DATA, 0, dflags)) != 0)
+		goto err;
+
+	if ((ret =
+	    __os_fileid(dbenv, realname, 1, ((DBMETA *)mbuf)->uid)) != 0)
+		goto err;
+
+	((DBMETA *)mbuf)->magic = DB_RENAMEMAGIC;
+	if ((ret = __fop_write(dbenv, txn, name,
+	    DB_APP_DATA, NULL, 0, 0, 0, mbuf, DBMETASIZE, 1, dflags)) != 0)
+		goto err;
+
+	memcpy(dbp->fileid, ((DBMETA *)mbuf)->uid, DB_FILE_ID_LEN);
+
+err:	if (realname != NULL)
+		__os_free(dbenv, realname);
+
+	return (ret);
+}
+
+static int
+__fop_inmem_dummy(dbp, txn, name, mbuf)
+	DB *dbp;
+	DB_TXN *txn;
+	const char *name;
+	u_int8_t *mbuf;
+{
+	DBMETA *metap;
+	db_pgno_t pgno;
+	int ret, t_ret;
+
+	if ((ret = __fop_inmem_create(dbp, name, txn, DB_CREATE)) != 0)
+		return (ret);
+
+	pgno  = PGNO_BASE_MD;
+	if ((ret =
+	    __memp_fget(dbp->mpf, &pgno, DB_MPOOL_CREATE, &metap)) != 0)
+		return (ret);
+	/* Check file existed. */
+	if (metap->magic != 0)
+		ret = EEXIST;
+	else
+		metap->magic = DB_RENAMEMAGIC;
+
+	/* Copy the fileid onto the meta-data page. */
+	memcpy(metap->uid, dbp->fileid, DB_FILE_ID_LEN);
+
+	if ((t_ret = __memp_fput(dbp->mpf,
+	    metap, ret == 0 ? DB_MPOOL_DIRTY : DB_MPOOL_DISCARD)) != 0 &&
+	    ret == 0)
+		ret = t_ret;
+
+	if (ret != 0)
+		goto err;
+
+	((DBMETA *)mbuf)->magic = DB_RENAMEMAGIC;
+
+err:	return (ret);
+}
+
+static int
+__fop_ondisk_swap(dbp, tmpdbp, txn, old, new, back, locker, flags)
+	DB *dbp, *tmpdbp;
+	DB_TXN *txn;
+	const char *old, *new, *back;
+	u_int32_t locker, flags;
+{
+	DB_ENV *dbenv;
+	DB_FH *fhp;
+	DB_LOCK elock;
+	DB_LSN lsn;
+	DBT fiddbt, namedbt, tmpdbt;
+	DB_TXN *parent;
+	char *realold, *realnew;
+	int ret, t_ret;
+	u_int8_t mbuf[DBMETASIZE];
+	u_int32_t child_txnid, dflags;
+
+	DB_ASSERT(txn != NULL);
+	DB_ASSERT(old != NULL);
+
+	dbenv = dbp->dbenv;
+	realold = realnew = NULL;
+	LOCK_INIT(elock);
+	fhp = NULL;
+	dflags = F_ISSET(dbp, DB_AM_NOT_DURABLE) ? DB_LOG_NOT_DURABLE : 0;
+
+	if ((ret =
+	    __db_appname(dbenv, DB_APP_DATA, new, 0, NULL, &realnew)) != 0)
+		goto err;
+
+	/* Now, lock the name space while we initialize this file. */
+retry:	GET_ENVLOCK(dbenv, locker, &elock);
+	if (__os_exists(realnew, NULL) == 0) {
+		/*
+		 * It is possible that the only reason this file exists is
+		 * because we've done a previous rename of it and we have
+		 * left a placeholder here.  We need to check for that case
+		 * and allow this rename to succeed if that's the case.
+		 */
+		if ((ret = __os_open(dbenv, realnew, 0, 0, &fhp)) != 0)
+			goto err;
+		if ((ret = __fop_read_meta(dbenv,
+		    realnew, mbuf, sizeof(mbuf), fhp, 0, NULL)) != 0 ||
+		    (ret = __db_meta_setup(dbenv,
+		    tmpdbp, realnew, (DBMETA *)mbuf, 0, 1)) != 0) {
+			ret = EEXIST;
+			goto err;
+		}
+
+		/*
+		 * Now, try to acquire the handle lock.  If the handle is locked
+		 * by our current, transaction, then we'll get it and life is
+		 * good.
+		 *
+		 * Alternately, it's not locked at all, we'll get the lock, but
+		 * we will realize it exists and consider this an error.
+		 *
+		 * However, if it's held by another transaction, then there
+		 * could be two different scenarios: 1) the file is in the
+		 * midst of being created or deleted and when that transaction
+		 * is over, we might be able to proceed. 2) the file is open
+		 * and exists and we should report an error. In order to
+		 * distinguish these two cases, we do the following. First, we
+		 * try to acquire a READLOCK.  If the handle is in the midst of
+		 * being created, then we'll block because a writelock is held.
+		 * In that case, we should request a blocking write, and when we
+		 * get the lock, we should then go back and check to see if the
+		 * object exists and start all over again.
+		 *
+		 * If we got the READLOCK, then either no one is holding the
+		 * lock or someone has an open handle and the fact that the file
+		 * exists is problematic.  So, in this case, we request the
+		 * WRITELOCK non-blocking -- if it succeeds, we're golden.  If
+		 * it fails, then the file exists and we return EEXIST.
+		 */
+		if ((ret = __fop_lock_handle(dbenv,
+		    tmpdbp, locker, DB_LOCK_READ, NULL, DB_LOCK_NOWAIT)) != 0) {
+			/*
+			 * Someone holds a writelock.  Try for the WRITELOCK
+			 * and after we get it, retry.
+			 */
+			if ((ret = __fop_lock_handle(dbenv, tmpdbp,
+			    locker, DB_LOCK_WRITE, &elock, 0)) != 0)
+				goto err;
+
+			/*
+			 * We now have the write lock; release it and start
+			 * over.
+			 */
+			(void)__lock_put(dbenv, &tmpdbp->handle_lock);
+			(void)__db_refresh(tmpdbp, NULL, 0, NULL, 0);
+			goto retry;
+		} else {
+			/* We got the read lock; try to upgrade it. */
+			ret = __fop_lock_handle(dbenv,
+			    tmpdbp, locker, DB_LOCK_WRITE,
+			    NULL, DB_LOCK_UPGRADE | DB_LOCK_NOWAIT);
+			if (ret != 0) {
+				/*
+				 * We did not get the writelock, so someone
+				 * has the handle open.  This is an error.
+				 */
+				(void)__lock_put(dbenv, &tmpdbp->handle_lock);
+				ret = EEXIST;
+			} else  if (F_ISSET(tmpdbp, DB_AM_IN_RENAME))
+				/* We got the lock and are renaming it. */
+				ret = 0;
+			else { /* We got the lock, but the file exists. */
+				(void)__lock_put(dbenv, &tmpdbp->handle_lock);
+				ret = EEXIST;
+			}
+		}
+		if ((t_ret = __os_closehandle(dbenv, fhp)) != 0 && ret == 0)
+			ret = t_ret;
+		fhp = NULL;
+		if (ret != 0)
+			goto err;
+	}
+
+	/*
+	 * While we have the namespace locked, do the renames and then
+	 * swap for the handle lock.
+	 */
+	if ((ret = __fop_rename(dbenv,
+	    txn, old, new, dbp->fileid, DB_APP_DATA, dflags)) != 0)
+		goto err;
+	if ((ret = __fop_rename(dbenv,
+	    txn, back, old, tmpdbp->fileid, DB_APP_DATA, dflags)) != 0)
+		goto err;
+	if ((ret = __fop_lock_handle(dbenv,
+	    tmpdbp, locker, DB_LOCK_WRITE, &elock, NOWAIT_FLAG(txn))) != 0)
+		goto err;
+
+	/*
+	 * We just acquired a transactional lock on the tmp handle.
+	 * We need to null out the tmp handle's lock so that it
+	 * doesn't create problems for us in the close path.
+	 */
+	LOCK_INIT(tmpdbp->handle_lock);
+
+	/* Commit the child. */
+	child_txnid = txn->txnid;
+	parent = txn->parent;
+	ret = __txn_commit(txn, 0);
+	txn = NULL;
+
+	/* Now log the child information in the parent. */
+	memset(&fiddbt, 0, sizeof(fiddbt));
+	memset(&tmpdbt, 0, sizeof(fiddbt));
+	memset(&namedbt, 0, sizeof(namedbt));
+	fiddbt.data = dbp->fileid;
+	fiddbt.size = DB_FILE_ID_LEN;
+	tmpdbt.data = tmpdbp->fileid;
+	tmpdbt.size = DB_FILE_ID_LEN;
+	namedbt.data = (void *)old;
+	namedbt.size = (u_int32_t)strlen(old) + 1;
+	if ((t_ret = __fop_file_remove_log(dbenv,
+	    parent, &lsn, 0, &fiddbt, &tmpdbt, &namedbt,
+	    (u_int32_t)DB_APP_DATA, child_txnid)) != 0 && ret == 0)
+		ret = t_ret;
+
+	/* This is a delayed delete of the dummy file. */
+	if ((ret = __db_appname(dbenv,
+	    DB_APP_DATA, old, flags, NULL, &realold)) != 0)
+		goto err;
+
+	if ((ret = __txn_remevent(dbenv, parent, realold, NULL, 0)) != 0)
+		goto err;
+
+err:	if (txn != NULL)	/* Ret must already be set, so void abort. */
+		(void)__txn_abort(txn);
+
+	(void)__ENV_LPUT(dbenv, elock);
+	if (realnew != NULL)
+		__os_free(dbenv, realnew);
+	if (realold != NULL)
+		__os_free(dbenv, realold);
+	return (ret);
+}
+
+static int
+__fop_inmem_swap(olddbp, backdbp, txn, old, new, back, locker)
+	DB *olddbp, *backdbp;
+	DB_TXN *txn;
+	const char *old, *new, *back;
+	u_int32_t locker;
+{
+	DB_ENV *dbenv;
+	DB_LOCK elock;
+	DB_LSN lsn;
+	DB_TXN *parent;
+	DBT fid_dbt, n1_dbt, n2_dbt;
+	DB *tmpdbp;
+	int ret, t_ret;
+
+	dbenv = olddbp->dbenv;
+	parent = txn->parent;
+retry:	LOCK_INIT(elock);
+	if ((ret = db_create(&tmpdbp, dbenv, 0)) != 0)
+		return (ret);
+	MAKE_INMEM(tmpdbp);
+
+	GET_ENVLOCK(dbenv, locker, &elock);
+	if ((ret = __db_dbenv_mpool(tmpdbp, new, 0)) == 0) {
+		/*
+		 * It is possible that the only reason this database exists is
+		 * because we've done a previous rename of it and we have
+		 * left a placeholder here.  We need to check for that case
+		 * and allow this rename to succeed if that's the case.
+		 */
+
+		if ((ret = __fop_inmem_read_meta(tmpdbp, new, 0)) != 0) {
+			ret = EEXIST;
+			goto err;
+		}
+
+		/*
+		 * Now, try to acquire the handle lock.  If it's from our txn,
+		 * then we'll get the lock.  If it's not, then someone else has
+		 * it locked.  See the comments in __fop_ondisk_swap for
+		 * details.
+		 */
+		if ((ret = __fop_lock_handle(dbenv,
+		    tmpdbp, locker, DB_LOCK_READ, NULL, DB_LOCK_NOWAIT)) != 0) {
+			/*
+			 * Someone holds a writelock.  Try for the WRITELOCK
+			 * and after we get it, retry.
+			 */
+			if ((ret = __fop_lock_handle(dbenv, tmpdbp,
+			    locker, DB_LOCK_WRITE, &elock, 0)) != 0)
+				goto err;
+
+			/* We now have the write lock; release it and start over. */
+			(void)__lock_put(dbenv, &tmpdbp->handle_lock);
+			(void)__db_close(tmpdbp, NULL, DB_NOSYNC);
+			(void)__ENV_LPUT(dbenv, elock);
+			goto retry;
+		} else {
+			(void)__lock_put(dbenv, &tmpdbp->handle_lock);
+			if (!F_ISSET(tmpdbp, DB_AM_IN_RENAME))
+				ret = EEXIST;
+		}
+		if (ret != 0)
+			goto err;
+	}
+
+	/* Log the renames. */
+	if (LOGGING_ON(dbenv)) {
+		/* Rename old to new. */
+		memset(&fid_dbt, 0, sizeof(fid_dbt));
+		fid_dbt.data = olddbp->fileid;
+		fid_dbt.size = DB_FILE_ID_LEN;
+		memset(&n1_dbt, 0, sizeof(n1_dbt));
+		n1_dbt.data = (void *)old;
+		n1_dbt.size = (u_int32_t)strlen(old) + 1;
+		memset(&n2_dbt, 0, sizeof(n2_dbt));
+		n2_dbt.data = (void *)new;
+		n2_dbt.size = (u_int32_t)strlen(new) + 1;
+		if ((ret = __crdel_inmem_rename_log(dbenv, txn, &lsn, 0,
+		    &n1_dbt, &n2_dbt, &fid_dbt)) != 0)
+			goto err;
+
+		/* Rename back to old */
+		fid_dbt.data = backdbp->fileid;
+		n2_dbt.data = (char *)back;
+		n2_dbt.size = (u_int32_t)strlen(back) + 1;
+		if ((ret = __crdel_inmem_rename_log(dbenv, txn, &lsn, 0,
+		    &n2_dbt, &n1_dbt, &fid_dbt)) != 0)
+			goto err;
+	}
+
+	/*
+	 * While we have the namespace locked, do the renames and then
+	 * swap for the handle lock.   If we ran into a file in the midst
+	 * of rename, then we need to delete it first, else nameop is
+	 * going to consider it an error.
+	 */
+	if (F_ISSET(tmpdbp, DB_AM_IN_RENAME)) {
+		if ((ret = __memp_nameop(dbenv,
+		    tmpdbp->fileid, NULL, new, NULL, 1)) != 0)
+			goto err;
+		__txn_remrem(dbenv, parent, new);
+	}
+
+	if ((ret = __memp_nameop(dbenv, olddbp->fileid, new, old, new, 1)) != 0)
+		goto err;
+	if ((ret =
+	    __memp_nameop(dbenv, backdbp->fileid, old, back, old, 1)) != 0)
+		goto err;
+
+	if ((ret = __fop_lock_handle(dbenv,
+	    tmpdbp, locker, DB_LOCK_WRITE, &elock, 0)) != 0)
+		goto err;
+
+	/*
+	 * We just acquired a transactional lock on the tmp handle.
+	 * We need to null out the tmp handle's lock so that it
+	 * doesn't create problems for us in the close path.
+	 */
+	LOCK_INIT(tmpdbp->handle_lock);
+
+	DB_ASSERT(txn != NULL);
+
+	/* Commit the child. */
+	ret = __txn_commit(txn, 0);
+	txn = NULL;
+
+	if ((ret = __db_inmem_remove(backdbp, parent, old)) != 0)
+		goto err;
+
+err:	(void)__ENV_LPUT(dbenv, elock);
+
+	if (txn != NULL)
+		(void)__txn_abort(txn);
+
+	if ((t_ret = __db_close(tmpdbp, NULL, 0)) != 0 && ret == 0)
+		ret = t_ret;
+
+	return (ret);
+}
diff --git a/storage/bdb/hash/hash.c b/storage/bdb/hash/hash.c
index 71c2f5e0690..2d622249699 100644
--- a/storage/bdb/hash/hash.c
+++ b/storage/bdb/hash/hash.c
@@ -1,7 +1,7 @@
 /*-
  * See the file LICENSE for redistribution information.
  *
- * Copyright (c) 1996-2004
+ * Copyright (c) 1996-2005
  *	Sleepycat Software.  All rights reserved.
  */
 /*
@@ -39,7 +39,7 @@
  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
  * SUCH DAMAGE.
  *
- * $Id: hash.c,v 11.200 2004/10/14 18:11:36 bostic Exp $
+ * $Id: hash.c,v 12.10 2005/10/20 18:57:07 bostic Exp $
  */
 
 #include "db_config.h"
@@ -73,21 +73,8 @@ static int  __ham_overwrite __P((DBC *, DBT *, u_int32_t));
 
 /*
  * __ham_quick_delete --
- *	When performing a DB->del operation that does not involve secondary
- *	indices and is not removing an off-page duplicate tree, we can
- *	speed things up substantially by removing the entire duplicate
- *	set, if any is present, in one operation, rather than by conjuring
- *	up and deleting each of the items individually.  (All are stored
- *	in one big HKEYDATA structure.)  We don't bother to distinguish
- *	on-page duplicate sets from single, non-dup items;  they're deleted
- *	in exactly the same way.
- *
- *	This function is called by __db_delete when the appropriate
- *	conditions are met, and it performs the delete in the optimized way.
- *
- *	The cursor should be set to the first item in the duplicate
- *	set, or to the sole key/data pair when the key does not have a
- *	duplicate set, before the function is called.
+ *	This function is called by __db_del when the appropriate conditions
+ *	are met, and it performs the delete in the optimized way.
  *
  * PUBLIC: int __ham_quick_delete __P((DBC *));
  */
@@ -97,20 +84,33 @@ __ham_quick_delete(dbc)
 {
 	int ret, t_ret;
 
+	/*
+	 * When performing a DB->del operation not involving secondary indices
+	 * and not removing an off-page duplicate tree, we can speed things up
+	 * substantially by removing the entire duplicate set, if any is
+	 * present, in one operation, rather than by conjuring up and deleting
+	 * each of the items individually.  (All are stored in one big HKEYDATA
+	 * structure.)  We don't bother to distinguish on-page duplicate sets
+	 * from single, non-dup items;  they're deleted in exactly the same way.
+	 *
+	 * The cursor should be set to the first item in the duplicate set, or
+	 * to the sole key/data pair when the key does not have a duplicate set,
+	 * before the function is called.
+	 *
+	 * We do not need to call CDB_LOCKING_INIT, __db_del calls here with
+	 * a write cursor.
+	 *
+	 * Assert we're initialized, but not to an off-page duplicate.
+	 * Assert we're not using secondary indices.
+	 */
+	DB_ASSERT(IS_INITIALIZED(dbc));
+	DB_ASSERT(dbc->internal->opd == NULL);
+	DB_ASSERT(!F_ISSET(dbc->dbp, DB_AM_SECONDARY));
+	DB_ASSERT(LIST_FIRST(&dbc->dbp->s_secondaries) == NULL);
+
 	if ((ret = __ham_get_meta(dbc)) != 0)
 		return (ret);
 
-	/* Assert that we're not using secondary indices. */
-	DB_ASSERT(!F_ISSET(dbc->dbp, DB_AM_SECONDARY));
-	/*
-	 * We should assert that we're not a primary either, but that
-	 * would require grabbing the dbp's mutex, so we don't bother.
-	 */
-
-	/* Assert that we're set, but not to an off-page duplicate. */
-	DB_ASSERT(IS_INITIALIZED(dbc));
-	DB_ASSERT(((HASH_CURSOR *)dbc->internal)->opd == NULL);
-
 	if ((ret = __ham_c_writelock(dbc)) == 0)
 		ret = __ham_del_pair(dbc, 1);
 
@@ -146,7 +146,7 @@ __ham_c_init(dbc)
 	}
 
 	dbc->internal = (DBC_INTERNAL *) new_curs;
-	dbc->c_close = __db_c_close;
+	dbc->c_close = __db_c_close_pp;
 	dbc->c_count = __db_c_count_pp;
 	dbc->c_del = __db_c_del_pp;
 	dbc->c_dup = __db_c_dup_pp;
@@ -195,7 +195,7 @@ __ham_c_close(dbc, root_pgno, rmroot)
 		lock_mode = DB_LOCK_READ;
 
 		/* To support dirty reads we must reget the write lock. */
-		if (F_ISSET(dbc->dbp, DB_AM_DIRTY) &&
+		if (F_ISSET(dbc->dbp, DB_AM_READ_UNCOMMITTED) &&
 		     F_ISSET((BTREE_CURSOR *)
 		     dbc->internal->opd->internal, C_DELETED))
 			lock_mode = DB_LOCK_WRITE;
@@ -1103,6 +1103,9 @@ done:	if (hcp->page != NULL) {
 	if (ret == 0 && F_ISSET(hcp, H_EXPAND)) {
 		ret = __ham_expand_table(dbc);
 		F_CLR(hcp, H_EXPAND);
+		/* If we are out of space, ignore the error. */
+		if (ret == ENOSPC && dbc->txn == NULL)
+			ret = 0;
 	}
 
 err2:	if ((t_ret = __ham_release_meta(dbc)) != 0 && ret == 0)
@@ -1221,12 +1224,13 @@ __ham_expand_table(dbc)
 		 * that, we calculate the last pgno.
 		 */
 
-		hcp->hdr->spares[logn + 1] = pgno - new_bucket;
 		pgno += hcp->hdr->max_bucket;
 
 		if ((ret = __memp_fget(mpf, &pgno, DB_MPOOL_CREATE, &h)) != 0)
 			goto err;
 
+		hcp->hdr->spares[logn + 1] =
+		    (pgno - new_bucket) - hcp->hdr->max_bucket;
 		mmeta->last_pgno = pgno;
 		mmeta->lsn = lsn;
 		dirty_meta = DB_MPOOL_DIRTY;
@@ -1442,27 +1446,31 @@ __ham_dup_return(dbc, val, flags)
 		 * duplicate which is itself a partial.
 		 */
 		memcpy(&tmp_val, val, sizeof(*val));
+
 		if (F_ISSET(&tmp_val, DB_DBT_PARTIAL)) {
 			/*
 			 * Take the user's length unless it would go
 			 * beyond the end of the duplicate.
 			 */
-			if (tmp_val.doff + hcp->dup_off > hcp->dup_len)
+			if (tmp_val.doff > hcp->dup_len)
 				tmp_val.dlen = 0;
-			else if (tmp_val.dlen + tmp_val.doff >
-			    hcp->dup_len)
-				tmp_val.dlen =
-				    hcp->dup_len - tmp_val.doff;
+			else if (tmp_val.dlen + tmp_val.doff > hcp->dup_len)
+				tmp_val.dlen = hcp->dup_len - tmp_val.doff;
 
-			/*
-			 * Calculate the new offset.
-			 */
-			tmp_val.doff += hcp->dup_off;
 		} else {
 			F_SET(&tmp_val, DB_DBT_PARTIAL);
 			tmp_val.dlen = hcp->dup_len;
-			tmp_val.doff = hcp->dup_off + sizeof(db_indx_t);
+			tmp_val.doff = 0;
 		}
+
+		/*
+		 * Set offset to the appropriate place within the
+		 * current duplicate -- need to take into account
+		 * both the dup_off and the current duplicate's
+		 * length.
+		 */
+		tmp_val.doff += hcp->dup_off + sizeof(db_indx_t);
+
 		myval = &tmp_val;
 	}
 
@@ -1856,7 +1864,7 @@ __ham_c_update(dbc, len, add, is_dup)
 	my_txn = IS_SUBTRANSACTION(dbc->txn) ? dbc->txn : NULL;
 	found = 0;
 
-	MUTEX_THREAD_LOCK(dbenv, dbenv->dblist_mutexp);
+	MUTEX_LOCK(dbenv, dbenv->mtx_dblist);
 
 	/*
 	 * Calculate the order of this deleted record.
@@ -1869,7 +1877,7 @@ __ham_c_update(dbc, len, add, is_dup)
 		for (ldbp = __dblist_get(dbenv, dbp->adj_fileid);
 		    ldbp != NULL && ldbp->adj_fileid == dbp->adj_fileid;
 		    ldbp = LIST_NEXT(ldbp, dblistlinks)) {
-			MUTEX_THREAD_LOCK(dbenv, dbp->mutexp);
+			MUTEX_LOCK(dbenv, dbp->mutex);
 			for (cp = TAILQ_FIRST(&ldbp->active_queue); cp != NULL;
 			    cp = TAILQ_NEXT(cp, links)) {
 				if (cp == dbc || cp->dbtype != DB_HASH)
@@ -1882,7 +1890,7 @@ __ham_c_update(dbc, len, add, is_dup)
 				    (!is_dup || hcp->dup_off == lcp->dup_off))
 					order = lcp->order + 1;
 			}
-			MUTEX_THREAD_UNLOCK(dbenv, dbp->mutexp);
+			MUTEX_UNLOCK(dbenv, dbp->mutex);
 		}
 		hcp->order = order;
 	}
@@ -1890,7 +1898,7 @@ __ham_c_update(dbc, len, add, is_dup)
 	for (ldbp = __dblist_get(dbenv, dbp->adj_fileid);
 	    ldbp != NULL && ldbp->adj_fileid == dbp->adj_fileid;
 	    ldbp = LIST_NEXT(ldbp, dblistlinks)) {
-		MUTEX_THREAD_LOCK(dbenv, dbp->mutexp);
+		MUTEX_LOCK(dbenv, dbp->mutex);
 		for (cp = TAILQ_FIRST(&ldbp->active_queue); cp != NULL;
 		    cp = TAILQ_NEXT(cp, links)) {
 			if (cp == dbc || cp->dbtype != DB_HASH)
@@ -1992,9 +2000,9 @@ __ham_c_update(dbc, len, add, is_dup)
 				}
 			}
 		}
-		MUTEX_THREAD_UNLOCK(dbenv, dbp->mutexp);
+		MUTEX_UNLOCK(dbenv, dbp->mutex);
 	}
-	MUTEX_THREAD_UNLOCK(dbenv, dbenv->dblist_mutexp);
+	MUTEX_UNLOCK(dbenv, dbenv->mtx_dblist);
 
 	if (found != 0 && DBC_LOGGING(dbc)) {
 		if ((ret = __ham_curadj_log(dbp, my_txn, &lsn, 0, hcp->pgno,
@@ -2036,11 +2044,11 @@ __ham_get_clist(dbp, pgno, indx, listp)
 	*listp = NULL;
 	dbenv = dbp->dbenv;
 
-	MUTEX_THREAD_LOCK(dbenv, dbenv->dblist_mutexp);
+	MUTEX_LOCK(dbenv, dbenv->mtx_dblist);
 	for (ldbp = __dblist_get(dbenv, dbp->adj_fileid);
 	    ldbp != NULL && ldbp->adj_fileid == dbp->adj_fileid;
 	    ldbp = LIST_NEXT(ldbp, dblistlinks)) {
-		MUTEX_THREAD_LOCK(dbenv, dbp->mutexp);
+		MUTEX_LOCK(dbenv, dbp->mutex);
 		for (cp = TAILQ_FIRST(&ldbp->active_queue); cp != NULL;
 		    cp = TAILQ_NEXT(cp, links))
 			/*
@@ -2061,9 +2069,9 @@ __ham_get_clist(dbp, pgno, indx, listp)
 				(*listp)[nused++] = cp;
 			}
 
-		MUTEX_THREAD_UNLOCK(dbp->dbenv, dbp->mutexp);
+		MUTEX_UNLOCK(dbp->dbenv, dbp->mutex);
 	}
-	MUTEX_THREAD_UNLOCK(dbenv, dbenv->dblist_mutexp);
+	MUTEX_UNLOCK(dbenv, dbenv->mtx_dblist);
 
 	if (listp != NULL) {
 		if (nused >= nalloc) {
@@ -2076,8 +2084,8 @@ __ham_get_clist(dbp, pgno, indx, listp)
 	}
 	return (0);
 err:
-	MUTEX_THREAD_UNLOCK(dbp->dbenv, dbp->mutexp);
-	MUTEX_THREAD_UNLOCK(dbenv, dbenv->dblist_mutexp);
+	MUTEX_UNLOCK(dbp->dbenv, dbp->mutex);
+	MUTEX_UNLOCK(dbenv, dbenv->mtx_dblist);
 	return (ret);
 }
 
diff --git a/storage/bdb/hash/hash.src b/storage/bdb/hash/hash.src
index 4acff5e5928..9c415a30418 100644
--- a/storage/bdb/hash/hash.src
+++ b/storage/bdb/hash/hash.src
@@ -1,10 +1,10 @@
 /*-
  * See the file LICENSE for redistribution information.
  *
- * Copyright (c) 1996-2004
+ * Copyright (c) 1996-2005
  *	Sleepycat Software.  All rights reserved.
  *
- * $Id: hash.src,v 10.44 2004/06/17 17:35:21 bostic Exp $
+ * $Id: hash.src,v 12.1 2005/06/16 20:22:50 bostic Exp $
  */
 /*
  * Copyright (c) 1995, 1996
diff --git a/storage/bdb/hash/hash_conv.c b/storage/bdb/hash/hash_conv.c
index a90799c7b25..f84ec745744 100644
--- a/storage/bdb/hash/hash_conv.c
+++ b/storage/bdb/hash/hash_conv.c
@@ -1,10 +1,10 @@
 /*-
  * See the file LICENSE for redistribution information.
  *
- * Copyright (c) 1996-2004
+ * Copyright (c) 1996-2005
  *	Sleepycat Software.  All rights reserved.
  *
- * $Id: hash_conv.c,v 11.16 2004/03/24 20:37:38 bostic Exp $
+ * $Id: hash_conv.c,v 12.1 2005/06/16 20:22:50 bostic Exp $
  */
 
 #include "db_config.h"
diff --git a/storage/bdb/hash/hash_dup.c b/storage/bdb/hash/hash_dup.c
index 93fc2b51f62..127c3d87a56 100644
--- a/storage/bdb/hash/hash_dup.c
+++ b/storage/bdb/hash/hash_dup.c
@@ -1,7 +1,7 @@
 /*-
  * See the file LICENSE for redistribution information.
  *
- * Copyright (c) 1996-2004
+ * Copyright (c) 1996-2005
  *	Sleepycat Software.  All rights reserved.
  */
 /*
@@ -35,7 +35,7 @@
  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
  * SUCH DAMAGE.
  *
- * $Id: hash_dup.c,v 11.85 2004/06/03 16:32:21 margo Exp $
+ * $Id: hash_dup.c,v 12.3 2005/07/20 16:51:41 bostic Exp $
  */
 
 #include "db_config.h"
@@ -499,7 +499,8 @@ __ham_check_move(dbc, add_len)
 	    HOFFDUP_SIZE - old_len <= P_FREESPACE(dbp, hcp->page)))
 		return (0);
 
-	if (!ISBIG(hcp, new_datalen) && add_len <= P_FREESPACE(dbp, hcp->page))
+	if (!ISBIG(hcp, new_datalen) &&
+	    (new_datalen - old_len) <= P_FREESPACE(dbp, hcp->page))
 		return (0);
 
 	/*
@@ -842,11 +843,11 @@ __ham_c_chgpg(dbc, old_pgno, old_index, new_pgno, new_index)
 	my_txn = IS_SUBTRANSACTION(dbc->txn) ? dbc->txn : NULL;
 	found = 0;
 
-	MUTEX_THREAD_LOCK(dbenv, dbenv->dblist_mutexp);
+	MUTEX_LOCK(dbenv, dbenv->mtx_dblist);
 	for (ldbp = __dblist_get(dbenv, dbp->adj_fileid);
 	    ldbp != NULL && ldbp->adj_fileid == dbp->adj_fileid;
 	    ldbp = LIST_NEXT(ldbp, dblistlinks)) {
-		MUTEX_THREAD_LOCK(dbenv, dbp->mutexp);
+		MUTEX_LOCK(dbenv, dbp->mutex);
 		for (cp = TAILQ_FIRST(&ldbp->active_queue); cp != NULL;
 		    cp = TAILQ_NEXT(cp, links)) {
 			if (cp == dbc || cp->dbtype != DB_HASH)
@@ -872,9 +873,9 @@ __ham_c_chgpg(dbc, old_pgno, old_index, new_pgno, new_index)
 					found = 1;
 			}
 		}
-		MUTEX_THREAD_UNLOCK(dbenv, dbp->mutexp);
+		MUTEX_UNLOCK(dbenv, dbp->mutex);
 	}
-	MUTEX_THREAD_UNLOCK(dbenv, dbenv->dblist_mutexp);
+	MUTEX_UNLOCK(dbenv, dbenv->mtx_dblist);
 
 	if (found != 0 && DBC_LOGGING(dbc)) {
 		if ((ret = __ham_chgpg_log(dbp, my_txn, &lsn, 0, DB_HAM_CHGPG,
diff --git a/storage/bdb/hash/hash_func.c b/storage/bdb/hash/hash_func.c
index b117fcee323..c7094017d92 100644
--- a/storage/bdb/hash/hash_func.c
+++ b/storage/bdb/hash/hash_func.c
@@ -1,7 +1,7 @@
 /*-
  * See the file LICENSE for redistribution information.
  *
- * Copyright (c) 1996-2004
+ * Copyright (c) 1996-2005
  *	Sleepycat Software.  All rights reserved.
  */
 /*
@@ -39,7 +39,7 @@
  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
  * SUCH DAMAGE.
  *
- * $Id: hash_func.c,v 11.15 2004/01/28 03:36:11 bostic Exp $
+ * $Id: hash_func.c,v 12.1 2005/06/16 20:22:52 bostic Exp $
  */
 
 #include "db_config.h"
diff --git a/storage/bdb/hash/hash_meta.c b/storage/bdb/hash/hash_meta.c
index 6d700fcc1ea..010e0da3ddf 100644
--- a/storage/bdb/hash/hash_meta.c
+++ b/storage/bdb/hash/hash_meta.c
@@ -1,10 +1,10 @@
 /*-
  * See the file LICENSE for redistribution information.
  *
- * Copyright (c) 1999-2004
+ * Copyright (c) 1999-2005
  *	Sleepycat Software.  All rights reserved.
  *
- * $Id: hash_meta.c,v 11.31 2004/09/22 03:46:22 bostic Exp $
+ * $Id: hash_meta.c,v 12.1 2005/06/16 20:22:52 bostic Exp $
  */
 
 #include "db_config.h"
diff --git a/storage/bdb/hash/hash_method.c b/storage/bdb/hash/hash_method.c
index 6b59787a672..96e3e5d0875 100644
--- a/storage/bdb/hash/hash_method.c
+++ b/storage/bdb/hash/hash_method.c
@@ -1,10 +1,10 @@
 /*-
  * See the file LICENSE for redistribution information.
  *
- * Copyright (c) 1999-2004
+ * Copyright (c) 1999-2005
  *	Sleepycat Software.  All rights reserved.
  *
- * $Id: hash_method.c,v 11.17 2004/01/28 03:36:11 bostic Exp $
+ * $Id: hash_method.c,v 12.1 2005/06/16 20:22:53 bostic Exp $
  */
 
 #include "db_config.h"
diff --git a/storage/bdb/hash/hash_open.c b/storage/bdb/hash/hash_open.c
index 67b12e5eba5..91ad4b45a45 100644
--- a/storage/bdb/hash/hash_open.c
+++ b/storage/bdb/hash/hash_open.c
@@ -1,7 +1,7 @@
 /*-
  * See the file LICENSE for redistribution information.
  *
- * Copyright (c) 1996-2004
+ * Copyright (c) 1996-2005
  *	Sleepycat Software.  All rights reserved.
  */
 /*
@@ -39,7 +39,7 @@
  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
  * SUCH DAMAGE.
  *
- * $Id: hash_open.c,v 11.191 2004/06/22 18:43:38 margo Exp $
+ * $Id: hash_open.c,v 12.7 2005/11/09 14:19:51 margo Exp $
  */
 
 #include "db_config.h"
@@ -58,7 +58,6 @@
 #include "dbinc/db_shash.h"
 #include "dbinc/lock.h"
 #include "dbinc/mp.h"
-#include "dbinc/db_swap.h"
 #include "dbinc/btree.h"
 #include "dbinc/fop.h"
 
@@ -118,6 +117,7 @@ __ham_open(dbp, txn, name, base_pgno, flags)
 			ret = EINVAL;
 			goto err2;
 		}
+		hashp->h_nelem = hcp->hdr->nelem;
 		if (F_ISSET(&hcp->hdr->dbmeta, DB_HASH_DUP))
 			F_SET(dbp, DB_AM_DUP);
 		if (F_ISSET(&hcp->hdr->dbmeta, DB_HASH_DUPSORT))
@@ -291,6 +291,7 @@ __ham_init_meta(dbp, meta, pgno, lsnp)
 	meta->high_mask = nbuckets - 1;
 	meta->low_mask = (nbuckets >> 1) - 1;
 	meta->ffactor = hashp->h_ffactor;
+	meta->nelem = hashp->h_nelem;
 	meta->h_charkey = hashp->h_hash(dbp, CHARKEY, sizeof(CHARKEY));
 	memcpy(meta->dbmeta.uid, dbp->fileid, DB_FILE_ID_LEN);
 
@@ -357,7 +358,7 @@ __ham_new_file(dbp, txn, fhp, name)
 	page = NULL;
 	buf = NULL;
 
-	if (name == NULL) {
+	if (F_ISSET(dbp, DB_AM_INMEM)) {
 		/* Build meta-data page. */
 		lpgno = PGNO_BASE_MD;
 		if ((ret =
@@ -366,6 +367,9 @@ __ham_new_file(dbp, txn, fhp, name)
 		LSN_NOT_LOGGED(lsn);
 		lpgno = __ham_init_meta(dbp, meta, PGNO_BASE_MD, &lsn);
 		meta->dbmeta.last_pgno = lpgno;
+		if ((ret = __db_log_page(dbp,
+		    txn, &lsn, meta->dbmeta.pgno, (PAGE *)meta)) != 0)
+			goto err;
 		ret = __memp_fput(mpf, meta, DB_MPOOL_DIRTY);
 		meta = NULL;
 		if (ret != 0)
@@ -378,6 +382,9 @@ __ham_new_file(dbp, txn, fhp, name)
 		P_INIT(page,
 		    dbp->pgsize, lpgno, PGNO_INVALID, PGNO_INVALID, 0, P_HASH);
 		LSN_NOT_LOGGED(page->lsn);
+		if ((ret =
+		    __db_log_page(dbp, txn, &page->lsn, lpgno, page)) != 0)
+			goto err;
 		ret = __memp_fput(mpf, page, DB_MPOOL_DIRTY);
 		page = NULL;
 		if (ret != 0)
diff --git a/storage/bdb/hash/hash_page.c b/storage/bdb/hash/hash_page.c
index 636767f4ec5..769a874f4e0 100644
--- a/storage/bdb/hash/hash_page.c
+++ b/storage/bdb/hash/hash_page.c
@@ -1,7 +1,7 @@
 /*-
  * See the file LICENSE for redistribution information.
  *
- * Copyright (c) 1996-2004
+ * Copyright (c) 1996-2005
  *	Sleepycat Software.  All rights reserved.
  */
 /*
@@ -39,7 +39,7 @@
  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
  * SUCH DAMAGE.
  *
- * $Id: hash_page.c,v 11.102 2004/09/22 21:14:56 ubell Exp $
+ * $Id: hash_page.c,v 12.7 2005/10/13 22:22:43 ubell Exp $
  */
 
 #include "db_config.h"
@@ -842,21 +842,26 @@ __ham_replpair(dbc, dbt, make_dup)
 	DB_LSN	new_lsn;
 	HASH_CURSOR *hcp;
 	u_int32_t change;
-	u_int32_t dup_flag, len, memsize;
+	u_int32_t dup_flag, len, memsize, newlen;
 	int beyond_eor, is_big, is_plus, ret, type;
 	u_int8_t *beg, *dest, *end, *hk, *src;
 	void *memp;
 
 	/*
-	 * Big item replacements are handled in generic code.
-	 * Items that fit on the current page fall into 4 classes.
-	 * 1. On-page element, same size
-	 * 2. On-page element, new is bigger (fits)
-	 * 3. On-page element, new is bigger (does not fit)
-	 * 4. On-page element, old is bigger
-	 * Numbers 1, 2, and 4 are essentially the same (and should
-	 * be the common case).  We handle case 3 as a delete and
-	 * add.
+	 * Items that were already offpage (ISBIG) were handled before
+	 * we get in here.  So, we need only handle cases where the old
+	 * key is on a regular page.  That leaves us 6 cases:
+	 * 1. Original data onpage; new data is smaller
+	 * 2. Original data onpage; new data is the same size
+	 * 3. Original data onpage; new data is bigger, but not ISBIG,
+	 *    fits on page
+	 * 4. Original data onpage; new data is bigger, but not ISBIG,
+	 *    does not fit on page
+	 * 5. Original data onpage; New data is an off-page item.
+	 * 6. Original data was offpage; new item is smaller.
+	 *
+	 * Cases 1-3 are essentially the same (and should be the common case).
+	 * We handle 4-6 as delete and add.
 	 */
 	dbp = dbc->dbp;
 	dbenv = dbp->dbenv;
@@ -891,7 +896,7 @@ __ham_replpair(dbc, dbt, make_dup)
 	beyond_eor = dbt->doff + dbt->dlen > len;
 	if (beyond_eor) {
 		/*
-		 * The change is beyond the end of file.  If change
+		 * The change is beyond the end of record.  If change
 		 * is a positive number, we can simply add the extension
 		 * to it.  However, if change is negative, then we need
 		 * to figure out if the extension is larger than the
@@ -907,10 +912,21 @@ __ham_replpair(dbc, dbt, make_dup)
 			change -= (dbt->doff + dbt->dlen - len);
 	}
 
-	if ((is_plus && change > P_FREESPACE(dbp, hcp->page)) ||
+	newlen = (is_plus ? len + change : len - change);
+	if (ISBIG(hcp, newlen) ||
+	    (is_plus && change > P_FREESPACE(dbp, hcp->page)) ||
 	    beyond_eor || is_big) {
+	    	/* 
+		 * If we are in cases 4 or 5 then is_plus will be true.
+		 * If we don't have a transaction then we cannot roll back,
+		 * make sure there is enough room for the new page.
+		 */
+		if (is_plus && dbc->txn == NULL &&
+		    dbp->mpf->mfp->maxpgno != 0 &&
+		    dbp->mpf->mfp->maxpgno == dbp->mpf->mfp->last_pgno)
+		    	return (__db_space_err(dbp));
 		/*
-		 * Case 3 -- two subcases.
+		 * Cases 4-6 -- two subcases.
 		 * A. This is not really a partial operation, but an overwrite.
 		 *    Simple del and add works.
 		 * B. This is a partial and we need to construct the data that
@@ -1344,7 +1360,8 @@ __ham_add_el(dbc, key, val, type)
 	HASH_CURSOR *hcp;
 	HOFFPAGE doff, koff;
 	db_pgno_t next_pgno, pgno;
-	u_int32_t data_size, key_size, pairsize, rectype;
+	u_int32_t data_size, key_size;
+	u_int32_t pages, pagespace, pairsize, rectype;
 	int do_expand, is_keybig, is_databig, ret;
 	int key_type, data_type;
 
@@ -1395,6 +1412,24 @@ __ham_add_el(dbc, key, val, type)
 		hcp->pgno = PGNO(hcp->page);
 	}
 
+	/*
+	 * If we don't have a transaction then make sure we will not
+	 * run out of file space before updating the key or data.
+	 */
+	if (dbc->txn == NULL &&
+	    dbp->mpf->mfp->maxpgno != 0 && (is_keybig || is_databig)) {
+		pagespace = P_MAXSPACE(dbp, dbp->pgsize);
+		pages = 0;
+		if (is_databig) 
+			pages = ((data_size - 1) / pagespace) + 1;
+		if (is_keybig) {
+			pages += ((key->size - 1) / pagespace) + 1;
+			if (pages >
+			    (dbp->mpf->mfp->maxpgno - dbp->mpf->mfp->last_pgno))
+				return (__db_space_err(dbp));
+		}
+	}
+
 	/*
 	 * Update cursor.
 	 */
@@ -1612,7 +1647,8 @@ __ham_get_cpage(dbc, mode)
 		 */
 		if ((LOCK_ISSET(hcp->lock) &&
 		    ((hcp->lock_mode == DB_LOCK_READ ||
-		    F_ISSET(dbp, DB_AM_DIRTY)) && mode == DB_LOCK_WRITE))) {
+		    F_ISSET(dbp, DB_AM_READ_UNCOMMITTED)) &&
+		    mode == DB_LOCK_WRITE))) {
 			/* Case 3. */
 			tmp_lock = hcp->lock;
 			LOCK_INIT(hcp->lock);
@@ -1628,7 +1664,7 @@ __ham_get_cpage(dbc, mode)
 			hcp->lock_mode = mode;
 			hcp->lbucket = hcp->bucket;
 			/* Case 3: release the original lock. */
-			if ((ret = __ENV_LPUT(dbp->dbenv, tmp_lock, 0)) != 0)
+			if ((ret = __ENV_LPUT(dbp->dbenv, tmp_lock)) != 0)
 				return (ret);
 		} else if (LOCK_ISSET(tmp_lock))
 			hcp->lock = tmp_lock;
@@ -1829,7 +1865,7 @@ __ham_c_delpg(dbc, old_pgno, new_pgno, num_ent, op, orderp)
 	my_txn = IS_SUBTRANSACTION(dbc->txn) ? dbc->txn : NULL;
 	found = 0;
 
-	MUTEX_THREAD_LOCK(dbenv, dbenv->dblist_mutexp);
+	MUTEX_LOCK(dbenv, dbenv->mtx_dblist);
 	/*
 	 * Find the highest order of any cursor our movement
 	 * may collide with.
@@ -1838,7 +1874,7 @@ __ham_c_delpg(dbc, old_pgno, new_pgno, num_ent, op, orderp)
 	for (ldbp = __dblist_get(dbenv, dbp->adj_fileid);
 	    ldbp != NULL && ldbp->adj_fileid == dbp->adj_fileid;
 	    ldbp = LIST_NEXT(ldbp, dblistlinks)) {
-		MUTEX_THREAD_LOCK(dbenv, dbp->mutexp);
+		MUTEX_LOCK(dbenv, dbp->mutex);
 		for (cp = TAILQ_FIRST(&ldbp->active_queue); cp != NULL;
 		    cp = TAILQ_NEXT(cp, links)) {
 			if (cp == dbc || cp->dbtype != DB_HASH)
@@ -1855,13 +1891,13 @@ __ham_c_delpg(dbc, old_pgno, new_pgno, num_ent, op, orderp)
 				    F_ISSET(hcp, H_DELETED)));
 			}
 		}
-		MUTEX_THREAD_UNLOCK(dbenv, dbp->mutexp);
+		MUTEX_UNLOCK(dbenv, dbp->mutex);
 	}
 
 	for (ldbp = __dblist_get(dbenv, dbp->adj_fileid);
 	    ldbp != NULL && ldbp->adj_fileid == dbp->adj_fileid;
 	    ldbp = LIST_NEXT(ldbp, dblistlinks)) {
-		MUTEX_THREAD_LOCK(dbenv, dbp->mutexp);
+		MUTEX_LOCK(dbenv, dbp->mutex);
 		for (cp = TAILQ_FIRST(&ldbp->active_queue); cp != NULL;
 		    cp = TAILQ_NEXT(cp, links)) {
 			if (cp == dbc || cp->dbtype != DB_HASH)
@@ -1906,9 +1942,9 @@ __ham_c_delpg(dbc, old_pgno, new_pgno, num_ent, op, orderp)
 					found = 1;
 			}
 		}
-		MUTEX_THREAD_UNLOCK(dbenv, dbp->mutexp);
+		MUTEX_UNLOCK(dbenv, dbp->mutex);
 	}
-	MUTEX_THREAD_UNLOCK(dbenv, dbenv->dblist_mutexp);
+	MUTEX_UNLOCK(dbenv, dbenv->mtx_dblist);
 
 	if (found != 0 && DBC_LOGGING(dbc)) {
 		if ((ret = __ham_chgpg_log(dbp, my_txn, &lsn, 0, op,
diff --git a/storage/bdb/hash/hash_rec.c b/storage/bdb/hash/hash_rec.c
index 2934f19192b..d8db8690590 100644
--- a/storage/bdb/hash/hash_rec.c
+++ b/storage/bdb/hash/hash_rec.c
@@ -1,7 +1,7 @@
 /*-
  * See the file LICENSE for redistribution information.
  *
- * Copyright (c) 1996-2004
+ * Copyright (c) 1996-2005
  *	Sleepycat Software.  All rights reserved.
  */
 /*
@@ -39,7 +39,7 @@
  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
  * SUCH DAMAGE.
  *
- * $Id: hash_rec.c,v 11.83 2004/10/25 16:52:13 ubell Exp $
+ * $Id: hash_rec.c,v 12.7 2005/09/28 17:44:52 margo Exp $
  */
 
 #include "db_config.h"
@@ -86,7 +86,7 @@ __ham_insdel_recover(dbenv, dbtp, lsnp, op, info)
 	COMPQUIET(info, NULL);
 
 	REC_PRINT(__ham_insdel_print);
-	REC_INTRO(__ham_insdel_read, 1);
+	REC_INTRO(__ham_insdel_read, 1, 0);
 
 	if ((ret = __memp_fget(mpf, &argp->pgno, 0, &pagep)) != 0) {
 		if (DB_UNDO(op)) {
@@ -116,7 +116,7 @@ __ham_insdel_recover(dbenv, dbtp, lsnp, op, info)
 
 	cmp_n = log_compare(lsnp, &LSN(pagep));
 	cmp_p = log_compare(&LSN(pagep), &argp->pagelsn);
-	CHECK_LSN(op, cmp_p, &LSN(pagep), &argp->pagelsn);
+	CHECK_LSN(dbenv, op, cmp_p, &LSN(pagep), &argp->pagelsn);
 	/*
 	 * Two possible things going on:
 	 * redo a delete/undo a put: delete the item from the page.
@@ -207,7 +207,7 @@ __ham_newpage_recover(dbenv, dbtp, lsnp, op, info)
 	COMPQUIET(info, NULL);
 
 	REC_PRINT(__ham_newpage_print);
-	REC_INTRO(__ham_newpage_read, 1);
+	REC_INTRO(__ham_newpage_read, 1, 0);
 
 	REC_FGET(mpf, argp->new_pgno, &pagep, ppage);
 
@@ -219,7 +219,7 @@ __ham_newpage_recover(dbenv, dbtp, lsnp, op, info)
 
 	cmp_n = log_compare(lsnp, &LSN(pagep));
 	cmp_p = log_compare(&LSN(pagep), &argp->pagelsn);
-	CHECK_LSN(op, cmp_p, &LSN(pagep), &argp->pagelsn);
+	CHECK_LSN(dbenv, op, cmp_p, &LSN(pagep), &argp->pagelsn);
 
 	flags = 0;
 	if ((cmp_p == 0 && DB_REDO(op) && argp->opcode == PUTOVFL) ||
@@ -250,7 +250,7 @@ ppage:	if (argp->prev_pgno != PGNO_INVALID) {
 
 		cmp_n = log_compare(lsnp, &LSN(pagep));
 		cmp_p = log_compare(&LSN(pagep), &argp->prevlsn);
-		CHECK_LSN(op, cmp_p, &LSN(pagep), &argp->prevlsn);
+		CHECK_LSN(dbenv, op, cmp_p, &LSN(pagep), &argp->prevlsn);
 		flags = 0;
 
 		if ((cmp_p == 0 && DB_REDO(op) && argp->opcode == PUTOVFL) ||
@@ -280,7 +280,7 @@ npage:	if (argp->next_pgno != PGNO_INVALID) {
 
 		cmp_n = log_compare(lsnp, &LSN(pagep));
 		cmp_p = log_compare(&LSN(pagep), &argp->nextlsn);
-		CHECK_LSN(op, cmp_p, &LSN(pagep), &argp->nextlsn);
+		CHECK_LSN(dbenv, op, cmp_p, &LSN(pagep), &argp->nextlsn);
 		flags = 0;
 
 		if ((cmp_p == 0 && DB_REDO(op) && argp->opcode == PUTOVFL) ||
@@ -343,13 +343,13 @@ __ham_replace_recover(dbenv, dbtp, lsnp, op, info)
 	COMPQUIET(info, NULL);
 
 	REC_PRINT(__ham_replace_print);
-	REC_INTRO(__ham_replace_read, 1);
+	REC_INTRO(__ham_replace_read, 1, 0);
 
 	REC_FGET(mpf, argp->pgno, &pagep, done);
 
 	cmp_n = log_compare(lsnp, &LSN(pagep));
 	cmp_p = log_compare(&LSN(pagep), &argp->pagelsn);
-	CHECK_LSN(op, cmp_p, &LSN(pagep), &argp->pagelsn);
+	CHECK_LSN(dbenv, op, cmp_p, &LSN(pagep), &argp->pagelsn);
 
 	memset(&dbt, 0, sizeof(dbt));
 	flags = 0;
@@ -439,7 +439,7 @@ __ham_splitdata_recover(dbenv, dbtp, lsnp, op, info)
 	COMPQUIET(info, NULL);
 
 	REC_PRINT(__ham_splitdata_print);
-	REC_INTRO(__ham_splitdata_read, 1);
+	REC_INTRO(__ham_splitdata_read, 1, 0);
 
 	if ((ret = __memp_fget(mpf, &argp->pgno, 0, &pagep)) != 0) {
 		if (DB_UNDO(op)) {
@@ -469,7 +469,7 @@ __ham_splitdata_recover(dbenv, dbtp, lsnp, op, info)
 
 	cmp_n = log_compare(lsnp, &LSN(pagep));
 	cmp_p = log_compare(&LSN(pagep), &argp->pagelsn);
-	CHECK_LSN(op, cmp_p, &LSN(pagep), &argp->pagelsn);
+	CHECK_LSN(dbenv, op, cmp_p, &LSN(pagep), &argp->pagelsn);
 
 	/*
 	 * There are two types of log messages here, one for the old page
@@ -539,7 +539,7 @@ __ham_copypage_recover(dbenv, dbtp, lsnp, op, info)
 	COMPQUIET(info, NULL);
 
 	REC_PRINT(__ham_copypage_print);
-	REC_INTRO(__ham_copypage_read, 1);
+	REC_INTRO(__ham_copypage_read, 1, 0);
 
 	flags = 0;
 
@@ -548,7 +548,7 @@ __ham_copypage_recover(dbenv, dbtp, lsnp, op, info)
 
 	cmp_n = log_compare(lsnp, &LSN(pagep));
 	cmp_p = log_compare(&LSN(pagep), &argp->pagelsn);
-	CHECK_LSN(op, cmp_p, &LSN(pagep), &argp->pagelsn);
+	CHECK_LSN(dbenv, op, cmp_p, &LSN(pagep), &argp->pagelsn);
 
 	if (cmp_p == 0 && DB_REDO(op)) {
 		/* Need to redo update described. */
@@ -574,7 +574,7 @@ donext:	/* Now fix up the "next" page. */
 	/* For REDO just update the LSN. For UNDO copy page back. */
 	cmp_n = log_compare(lsnp, &LSN(pagep));
 	cmp_p = log_compare(&LSN(pagep), &argp->nextlsn);
-	CHECK_LSN(op, cmp_p, &LSN(pagep), &argp->nextlsn);
+	CHECK_LSN(dbenv, op, cmp_p, &LSN(pagep), &argp->nextlsn);
 	flags = 0;
 	if (cmp_p == 0 && DB_REDO(op)) {
 		LSN(pagep) = *lsnp;
@@ -596,7 +596,7 @@ do_nn:	if (argp->nnext_pgno == PGNO_INVALID)
 
 	cmp_n = log_compare(lsnp, &LSN(pagep));
 	cmp_p = log_compare(&LSN(pagep), &argp->nnextlsn);
-	CHECK_LSN(op, cmp_p, &LSN(pagep), &argp->nnextlsn);
+	CHECK_LSN(dbenv, op, cmp_p, &LSN(pagep), &argp->nnextlsn);
 
 	flags = 0;
 	if (cmp_p == 0 && DB_REDO(op)) {
@@ -646,13 +646,14 @@ __ham_metagroup_recover(dbenv, dbtp, lsnp, op, info)
 	PAGE *pagep;
 	db_pgno_t pgno;
 	u_int32_t flags, mmeta_flags;
-	int cmp_n, cmp_p, did_recover, groupgrow, ret;
+	int cmp_n, cmp_p, did_alloc, did_recover, groupgrow, ret;
 
 	COMPQUIET(info, NULL);
 	mmeta_flags = 0;
+	did_alloc = 0;
 	mmeta = NULL;
 	REC_PRINT(__ham_metagroup_print);
-	REC_INTRO(__ham_metagroup_read, 1);
+	REC_INTRO(__ham_metagroup_read, 1, 1);
 
 	/*
 	 * This logs the virtual create of pages pgno to pgno + bucket
@@ -664,13 +665,15 @@ __ham_metagroup_recover(dbenv, dbtp, lsnp, op, info)
 	 * are rolling backward.  If the file has not been extended
 	 * then the metapage could not have been updated.
 	 * The log record contains:
-	 * bucket: new bucket being allocated.
+	 * bucket: old maximum bucket
 	 * pgno: page number of the new bucket.
-	 * if bucket is a power of 2, then we allocated a whole batch of
-	 * pages; if it's not, then we simply allocated one new page.
+	 * We round up on log calculations, so we can figure out if we are
+	 * about to double the hash table if argp->bucket+1 is a power of 2.
+	 * If it is, then we are allocating an entire doubling of pages,
+	 * otherwise, we are simply allocated one new page.
 	 */
-	groupgrow = (u_int32_t)(1 << __db_log2(argp->bucket + 1)) ==
-	    argp->bucket + 1;
+	groupgrow =
+	    (u_int32_t)(1 << __db_log2(argp->bucket + 1)) == argp->bucket + 1;
 	pgno = argp->pgno;
 	if (argp->newalloc)
 		pgno += argp->bucket;
@@ -686,29 +689,32 @@ __ham_metagroup_recover(dbenv, dbtp, lsnp, op, info)
 	/* If we are undoing, then we don't want to create the page. */
 	if (ret != 0 && DB_REDO(op))
 		ret = __memp_fget(mpf, &pgno, DB_MPOOL_CREATE, &pagep);
-	else if (ret == DB_PAGE_NOTFOUND) {
-		groupgrow = 0;
+	else if (ret == DB_PAGE_NOTFOUND)
 		goto do_meta;
-	}
 #endif
 	if (ret != 0) {
 		if (ret != ENOSPC)
 			goto out;
 		pgno = 0;
-		groupgrow = 0;
 		goto do_meta;
 	}
 
+	/*
+	 * When we get here then either we did not grow the file
+	 * (groupgrow == 0) or we did grow the file and the allocation
+	 * of those new pages succeeded.
+	 */
+	did_alloc = groupgrow;
+
 	cmp_n = log_compare(lsnp, &LSN(pagep));
 	cmp_p = log_compare(&LSN(pagep), &argp->pagelsn);
-	CHECK_LSN(op, cmp_p, &LSN(pagep), &argp->pagelsn);
+	CHECK_LSN(dbenv, op, cmp_p, &LSN(pagep), &argp->pagelsn);
 
 	flags = 0;
 	if (cmp_p == 0 && DB_REDO(op)) {
 		pagep->lsn = *lsnp;
 		flags = DB_MPOOL_DIRTY;
-	}
-	else if (cmp_n == 0 && DB_UNDO(op)) {
+	} else if (cmp_n == 0 && DB_UNDO(op)) {
 #ifdef HAVE_FTRUNCATE
 		/* If this record allocated the pages give them back. */
 		if (argp->newalloc) {
@@ -739,7 +745,7 @@ do_meta:
 		goto out;
 	cmp_n = log_compare(lsnp, &hcp->hdr->dbmeta.lsn);
 	cmp_p = log_compare(&hcp->hdr->dbmeta.lsn, &argp->metalsn);
-	CHECK_LSN(op, cmp_p, &hcp->hdr->dbmeta.lsn, &argp->metalsn);
+	CHECK_LSN(dbenv, op, cmp_p, &hcp->hdr->dbmeta.lsn, &argp->metalsn);
 	did_recover = 0;
 	if (cmp_p == 0 && DB_REDO(op)) {
 		/* Redo the actual updating of bucket counts. */
@@ -753,9 +759,9 @@ do_meta:
 		did_recover = 1;
 	} else if (cmp_n == 0 && DB_UNDO(op)) {
 		/* Undo the actual updating of bucket counts. */
-		--hcp->hdr->max_bucket;
+		hcp->hdr->max_bucket = argp->bucket;
 		if (groupgrow) {
-			hcp->hdr->high_mask = hcp->hdr->low_mask;
+			hcp->hdr->high_mask = argp->bucket;
 			hcp->hdr->low_mask = hcp->hdr->high_mask >> 1;
 		}
 		hcp->hdr->dbmeta.lsn = argp->metalsn;
@@ -767,10 +773,10 @@ do_meta:
 	 * spares array indicates the beginning page number for the
 	 * indicated doubling.  We need to fill this in whenever the
 	 * spares array is invalid, if we never reclaim pages then
-	 * we have to allocate the pages to the
-	 * spares array in both the redo and undo cases.
+	 * we have to allocate the pages to the spares array in both
+	 * the redo and undo cases.
 	 */
-	if (groupgrow &&
+	if (did_alloc &&
 #ifdef HAVE_FTRUNCATE
 	    !DB_UNDO(op) &&
 #endif
@@ -862,7 +868,7 @@ __ham_groupalloc_recover(dbenv, dbtp, lsnp, op, info)
 	mmeta = NULL;
 	modified = 0;
 	REC_PRINT(__ham_groupalloc_print);
-	REC_INTRO(__ham_groupalloc_read, 0);
+	REC_INTRO(__ham_groupalloc_read, 0, 0);
 
 	pgno = PGNO_BASE_MD;
 	if ((ret = __memp_fget(mpf, &pgno, 0, &mmeta)) != 0) {
@@ -875,7 +881,7 @@ __ham_groupalloc_recover(dbenv, dbtp, lsnp, op, info)
 
 	cmp_n = log_compare(lsnp, &LSN(mmeta));
 	cmp_p = log_compare(&LSN(mmeta), &argp->meta_lsn);
-	CHECK_LSN(op, cmp_p, &LSN(mmeta), &argp->meta_lsn);
+	CHECK_LSN(dbenv, op, cmp_p, &LSN(mmeta), &argp->meta_lsn);
 
 	/*
 	 * Basically, we used mpool to allocate a chunk of pages.
@@ -1054,7 +1060,7 @@ __ham_curadj_recover(dbenv, dbtp, lsnp, op, info)
 
 	COMPQUIET(info, NULL);
 	REC_PRINT(__ham_curadj_print);
-	REC_INTRO(__ham_curadj_read, 0);
+	REC_INTRO(__ham_curadj_read, 0, 1);
 
 	if (op != DB_TXN_ABORT)
 		goto done;
@@ -1104,7 +1110,7 @@ __ham_chgpg_recover(dbenv, dbtp, lsnp, op, info)
 
 	COMPQUIET(info, NULL);
 	REC_PRINT(__ham_chgpg_print);
-	REC_INTRO(__ham_chgpg_read, 0);
+	REC_INTRO(__ham_chgpg_read, 0, 0);
 
 	if (op != DB_TXN_ABORT)
 		goto done;
@@ -1113,11 +1119,11 @@ __ham_chgpg_recover(dbenv, dbtp, lsnp, op, info)
 	indx = argp->old_indx;
 	order = argp->new_indx;
 
-	MUTEX_THREAD_LOCK(dbenv, dbenv->dblist_mutexp);
+	MUTEX_LOCK(dbenv, dbenv->mtx_dblist);
 	for (ldbp = __dblist_get(dbenv, file_dbp->adj_fileid);
 	    ldbp != NULL && ldbp->adj_fileid == file_dbp->adj_fileid;
 	    ldbp = LIST_NEXT(ldbp, dblistlinks)) {
-		MUTEX_THREAD_LOCK(dbenv, file_dbp->mutexp);
+		MUTEX_LOCK(dbenv, file_dbp->mutex);
 
 		for (cp = TAILQ_FIRST(&ldbp->active_queue); cp != NULL;
 		    cp = TAILQ_NEXT(cp, links)) {
@@ -1183,17 +1189,17 @@ __ham_chgpg_recover(dbenv, dbtp, lsnp, op, info)
 				 * the cursor we're adjusting can't be closed
 				 * under us.
 				 */
-				MUTEX_THREAD_UNLOCK(dbenv, file_dbp->mutexp);
+				MUTEX_UNLOCK(dbenv, file_dbp->mutex);
 				if ((ret = __db_c_close(lcp->opd)) != 0)
 					goto out;
-				MUTEX_THREAD_LOCK(dbenv, file_dbp->mutexp);
+				MUTEX_LOCK(dbenv, file_dbp->mutex);
 				lcp->opd = NULL;
 				break;
 			}
 		}
-		MUTEX_THREAD_UNLOCK(dbenv, file_dbp->mutexp);
+		MUTEX_UNLOCK(dbenv, file_dbp->mutex);
 	}
-	MUTEX_THREAD_UNLOCK(dbenv, dbenv->dblist_mutexp);
+	MUTEX_UNLOCK(dbenv, dbenv->mtx_dblist);
 
 done:	*lsnp = argp->prev_lsn;
 out:	REC_CLOSE;
diff --git a/storage/bdb/hash/hash_reclaim.c b/storage/bdb/hash/hash_reclaim.c
index f0adba7c67c..20f354d9838 100644
--- a/storage/bdb/hash/hash_reclaim.c
+++ b/storage/bdb/hash/hash_reclaim.c
@@ -1,10 +1,10 @@
 /*-
  * See the file LICENSE for redistribution information.
  *
- * Copyright (c) 1996-2004
+ * Copyright (c) 1996-2005
  *	Sleepycat Software.  All rights reserved.
  *
- * $Id: hash_reclaim.c,v 11.17 2004/06/22 18:43:38 margo Exp $
+ * $Id: hash_reclaim.c,v 12.2 2005/06/16 20:22:53 bostic Exp $
  */
 
 #include "db_config.h"
@@ -88,6 +88,7 @@ __ham_truncate(dbc, countp)
 	if ((t_ret = __ham_release_meta(dbc)) != 0 && ret == 0)
 		ret = t_ret;
 
-	*countp = trunc.count;
+	if (countp != NULL)
+		*countp = trunc.count;
 	return (ret);
 }
diff --git a/storage/bdb/hash/hash_stat.c b/storage/bdb/hash/hash_stat.c
index a50e383af0d..1771c2a2296 100644
--- a/storage/bdb/hash/hash_stat.c
+++ b/storage/bdb/hash/hash_stat.c
@@ -1,10 +1,10 @@
 /*-
  * See the file LICENSE for redistribution information.
  *
- * Copyright (c) 1996-2004
+ * Copyright (c) 1996-2005
  *	Sleepycat Software.  All rights reserved.
  *
- * $Id: hash_stat.c,v 11.66 2004/09/22 03:46:22 bostic Exp $
+ * $Id: hash_stat.c,v 12.1 2005/06/16 20:22:53 bostic Exp $
  */
 
 #include "db_config.h"
diff --git a/storage/bdb/hash/hash_stub.c b/storage/bdb/hash/hash_stub.c
index 7bbe925c7d2..da6791bc5c5 100644
--- a/storage/bdb/hash/hash_stub.c
+++ b/storage/bdb/hash/hash_stub.c
@@ -1,10 +1,10 @@
 /*-
  * See the file LICENSE for redistribution information.
  *
- * Copyright (c) 1996-2004
+ * Copyright (c) 1996-2005
  *	Sleepycat Software.  All rights reserved.
  *
- * $Id: hash_stub.c,v 1.10 2004/09/29 15:35:14 bostic Exp $
+ * $Id: hash_stub.c,v 12.1 2005/06/16 20:22:54 bostic Exp $
  */
 
 #include "db_config.h"
diff --git a/storage/bdb/hash/hash_upgrade.c b/storage/bdb/hash/hash_upgrade.c
index b626138efd5..4d7f705b178 100644
--- a/storage/bdb/hash/hash_upgrade.c
+++ b/storage/bdb/hash/hash_upgrade.c
@@ -1,10 +1,10 @@
 /*-
  * See the file LICENSE for redistribution information.
  *
- * Copyright (c) 1996-2004
+ * Copyright (c) 1996-2005
  *	Sleepycat Software.  All rights reserved.
  *
- * $Id: hash_upgrade.c,v 11.35 2004/04/06 12:38:08 bostic Exp $
+ * $Id: hash_upgrade.c,v 12.1 2005/06/16 20:22:54 bostic Exp $
  */
 
 #include "db_config.h"
diff --git a/storage/bdb/hash/hash_verify.c b/storage/bdb/hash/hash_verify.c
index b9caab27601..de82103430a 100644
--- a/storage/bdb/hash/hash_verify.c
+++ b/storage/bdb/hash/hash_verify.c
@@ -1,10 +1,10 @@
 /*-
  * See the file LICENSE for redistribution information.
  *
- * Copyright (c) 1999-2004
+ * Copyright (c) 1999-2005
  *	Sleepycat Software.  All rights reserved.
  *
- * $Id: hash_verify.c,v 1.63 2004/10/14 18:11:36 bostic Exp $
+ * $Id: hash_verify.c,v 12.8 2005/06/16 20:22:54 bostic Exp $
  */
 
 #include "db_config.h"
@@ -110,8 +110,6 @@ __ham_vrfy_meta(dbp, vdp, m, pgno, flags)
 	 * max_bucket, high_mask and low_mask: high_mask must be one
 	 * less than the next power of two above max_bucket, and
 	 * low_mask must be one less than the power of two below it.
-	 *
-	 *
 	 */
 	pwr = (m->max_bucket == 0) ? 1 : 1 << __db_log2(m->max_bucket + 1);
 	if (m->high_mask != pwr - 1) {
@@ -795,10 +793,10 @@ __ham_salvage(dbp, vdp, pgno, h, handle, callback, flags)
 	DBT dbt, unkdbt;
 	db_pgno_t dpgno;
 	int ret, err_ret, t_ret;
-	u_int32_t himark, tlen;
-	u_int8_t *hk;
+	u_int32_t himark, i;
+	u_int8_t *hk, *p;
 	void *buf;
-	u_int32_t dlen, len, i;
+	db_indx_t dlen, len, tlen;
 
 	memset(&dbt, 0, sizeof(DBT));
 	dbt.flags = DB_DBT_REALLOC;
@@ -830,28 +828,25 @@ __ham_salvage(dbp, vdp, pgno, h, handle, callback, flags)
 			break;
 
 		if (ret == 0) {
+			/* Set len to total entry length. */
+			len = LEN_HITEM(dbp, h, dbp->pgsize, i);
 			hk = P_ENTRY(dbp, h, i);
-			len = LEN_HKEYDATA(dbp, h, dbp->pgsize, i);
-			if ((u_int32_t)(hk + len - (u_int8_t *)h) >
+			if (len == 0 || len > dbp->pgsize ||
+			    (u_int32_t)(hk + len - (u_int8_t *)h) >
 			    dbp->pgsize) {
-				/*
-				 * Item is unsafely large;  either continue
-				 * or set it to the whole page, depending on
-				 * aggressiveness.
-				 */
-				if (!LF_ISSET(DB_AGGRESSIVE))
-					continue;
-				len = dbp->pgsize -
-				    (u_int32_t)(hk - (u_int8_t *)h);
+				/* Item is unsafely large; skip it. */
 				err_ret = DB_VERIFY_BAD;
+				continue;
 			}
 			switch (HPAGE_PTYPE(hk)) {
 			default:
 				if (!LF_ISSET(DB_AGGRESSIVE))
 					break;
 				err_ret = DB_VERIFY_BAD;
-				/* FALLTHROUGH */
+				break;
 			case H_KEYDATA:
+				/* Update len to size of item. */
+				len = LEN_HKEYDATA(dbp, h, dbp->pgsize, i);
 keydata:			memcpy(buf, HKEYDATA_DATA(hk), len);
 				dbt.size = len;
 				dbt.data = buf;
@@ -878,12 +873,12 @@ keydata:			memcpy(buf, HKEYDATA_DATA(hk), len);
 					err_ret = ret;
 				break;
 			case H_OFFDUP:
-				if (len < HOFFPAGE_SIZE) {
+				if (len < HOFFDUP_SIZE) {
 					err_ret = DB_VERIFY_BAD;
 					continue;
 				}
 				memcpy(&dpgno,
-				    HOFFPAGE_PGNO(hk), sizeof(dpgno));
+				    HOFFDUP_PGNO(hk), sizeof(dpgno));
 				/* UNKNOWN iff pgno is bad or we're a key. */
 				if (!IS_VALID_PGNO(dpgno) || (i % 2 == 0)) {
 					if ((ret =
@@ -896,6 +891,7 @@ keydata:			memcpy(buf, HKEYDATA_DATA(hk), len);
 					err_ret = ret;
 				break;
 			case H_DUPLICATE:
+				len = LEN_HKEYDATA(dbp, h, dbp->pgsize, i);
 				/*
 				 * We're a key;  printing dups will seriously
 				 * foul the output.  If we're being aggressive,
@@ -909,7 +905,12 @@ keydata:			memcpy(buf, HKEYDATA_DATA(hk), len);
 					break;
 				}
 
-				/* Too small to have any data. */
+				/*
+				 * Check if too small to have any data.
+				 * But first, we have to update the len to
+				 * reflect the size of the data not the
+				 * size of the on-page entry.
+				 */
 				if (len <
 				    HKEYDATA_SIZE(2 * sizeof(db_indx_t))) {
 					err_ret = DB_VERIFY_BAD;
@@ -919,15 +920,17 @@ keydata:			memcpy(buf, HKEYDATA_DATA(hk), len);
 				/* Loop until we hit the total length. */
 				for (tlen = 0; tlen + sizeof(db_indx_t) < len;
 				    tlen += dlen) {
+					p = HKEYDATA_DATA(hk) + tlen;
 					tlen += sizeof(db_indx_t);
-					memcpy(&dlen, hk, sizeof(db_indx_t));
+					memcpy(&dlen, p, sizeof(db_indx_t));
+					p += sizeof(db_indx_t);
 					/*
 					 * If dlen is too long, print all the
 					 * rest of the dup set in a chunk.
 					 */
 					if (dlen + tlen > len)
 						dlen = len - tlen;
-					memcpy(buf, hk + tlen, dlen);
+					memcpy(buf, p, dlen);
 					dbt.size = dlen;
 					dbt.data = buf;
 					if ((ret = __db_vrfy_prdbt(&dbt, 0, " ",
diff --git a/storage/bdb/hmac/hmac.c b/storage/bdb/hmac/hmac.c
index bb2da5eb472..b2874f74ee8 100644
--- a/storage/bdb/hmac/hmac.c
+++ b/storage/bdb/hmac/hmac.c
@@ -1,13 +1,13 @@
 /*-
  * See the file LICENSE for redistribution information.
  *
- * Copyright (c) 2001-2004
+ * Copyright (c) 2001-2005
  *	Sleepycat Software.  All rights reserved.
  *
  * Some parts of this code originally written by Adam Stubblefield,
  * -- astubble@rice.edu.
  *
- * $Id: hmac.c,v 1.27 2004/01/28 03:36:11 bostic Exp $
+ * $Id: hmac.c,v 12.1 2005/06/16 20:22:55 bostic Exp $
  */
 
 #include "db_config.h"
diff --git a/storage/bdb/hmac/sha1.c b/storage/bdb/hmac/sha1.c
index 8824796f099..839b97b8355 100644
--- a/storage/bdb/hmac/sha1.c
+++ b/storage/bdb/hmac/sha1.c
@@ -1,5 +1,5 @@
 /*
- * $Id: sha1.c,v 1.14 2004/01/28 03:36:11 bostic Exp $
+ * $Id: sha1.c,v 12.0 2004/11/17 03:43:56 bostic Exp $
  */
 
 #include "db_config.h"
diff --git a/storage/bdb/hsearch/hsearch.c b/storage/bdb/hsearch/hsearch.c
index 5bcbe93d386..f9cd03e66a1 100644
--- a/storage/bdb/hsearch/hsearch.c
+++ b/storage/bdb/hsearch/hsearch.c
@@ -1,7 +1,7 @@
 /*-
  * See the file LICENSE for redistribution information.
  *
- * Copyright (c) 1996-2004
+ * Copyright (c) 1996-2005
  *	Sleepycat Software.  All rights reserved.
  */
 /*
@@ -39,7 +39,7 @@
  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
  * SUCH DAMAGE.
  *
- * $Id: hsearch.c,v 11.14 2004/01/28 03:36:11 bostic Exp $
+ * $Id: hsearch.c,v 12.2 2005/06/16 20:22:56 bostic Exp $
  */
 
 #include "db_config.h"
@@ -82,8 +82,8 @@ __db_hcreate(nel)
 	if ((ret = dbp->set_pagesize(dbp, 512)) != 0 ||
 	    (ret = dbp->set_h_ffactor(dbp, 16)) != 0 ||
 	    (ret = dbp->set_h_nelem(dbp, (u_int32_t)nel)) != 0 ||
-	    (ret = dbp->open(dbp,
-	    NULL, NULL, NULL, DB_HASH, DB_CREATE, __db_omode("rw----"))) != 0)
+	    (ret = dbp->open(dbp, NULL,
+	    NULL, NULL, DB_HASH, DB_CREATE, __db_omode(OWNER_RW))) != 0)
 		__os_set_errno(ret);
 
 	/*
diff --git a/storage/bdb/libdb_java/checkapi.prl b/storage/bdb/libdb_java/checkapi.prl
deleted file mode 100644
index a27b8ffd107..00000000000
--- a/storage/bdb/libdb_java/checkapi.prl
+++ /dev/null
@@ -1,134 +0,0 @@
-#
-# Released to public domain by Donald Anderson  dda@world.std.com
-# No warranties.
-#
-# Perl script to check for matching of JNI interfaces to implementation.
-# We check all .cpp arguments and .h arguments and make sure that for
-# each .h declaration (marked by JNIEXPORT keyword), there is a .cpp
-# definition for the same function (also marked by JNIEXPORT keyword),
-# and vice versa.  Definitions and declarations are determined solely
-# by whether they are in a .h or .cpp file - we don't do any further
-# analysis.
-#
-# Some additions made to help with Berkeley DB sources:
-#
-#   Berkeley DB Java sources use JAVADB_*_ACCESS #defines
-#   to quickly define routine access functions.
-
-foreach $file (<@ARGV>) {      # glob allows direct use from Win* makefiles
-    open (FILE, $file) || die "$file: cannot open\n";
-    $dot_h = 0;
-    if ($file =~ /.*[hH]$/) {
-        $dot_h = 1;
-    }
-    $in_def = 0;
-nextline:
-    while () {
-        chop;
-        if (/JNIEXPORT/ || /^JAVADB_.*_ACCESS/) {
-            $in_def = 1;
-            $def = "";
-        }
-        if ($in_def == 1) {
-            $def .= " $_";
-        }
-        if (/\)/) {
-            $line = "";
-            $in_def = 0;
-            if ($def eq "") {
-                next nextline;
-            }
-            $_ = $def;
-            # remove comments
-            s@/\*[^*]*\*/@@g;
-            s@[ 	][ 	]*@ @g;
-            s@^[ 	]@@g;
-            s@[ 	]$@@g;
-            s@JNIEnv *\* *@JNIEnv @g;
-            s@([,*()]) @\1@g;
-            s@ ([,*()])@\1@g;
-
-            s/JAVADB_WO_ACCESS_METHOD/JAVADB_WO_ACCESS/;
-
-            if (/^JAVADB_.*_ACCESS/) {
-                s@  *@ @g;
-                s@_ACCESS_STRING\(([^,]*),@_ACCESS(\1,jstring,@;
-                s@_ACCESS_BEFORE_APPINIT@_ACCESS@;
-                s@_ACCESS\(@,normal,@;
-                s@JAVADB_@@;
-                s@\)@,@;
-                @vars = split(/,/);
-                $get = 0;
-                $set = 0;
-                if (@vars[0] eq "RW") {
-                    $get = 1;
-                    $set = 1;
-                }
-                if (@vars[0] eq "RO") {
-                    $get = 1;
-                }
-                if (@vars[0] eq "WO") {
-                    $set = 1;
-                }
-                if ($get == 0 && $set == 0) {
-                    print "Invalid use of JAVADB_ macro\n";
-                }
-                if ($set == 1) {
-                    $line = "JNIEXPORT void JNICALL Java_com_sleepycat_db_@vars[2]_set_1@vars[4](JNIEnv,jobject,@vars[3])";
-                }
-                if ($get == 1) {
-                    $line2 = "JNIEXPORT @vars[3] JNICALL Java_com_sleepycat_db_@vars[2]_get_1@vars[4](JNIEnv,jobject)";
-                }
-            }
-            else {
-                s@([,(][a-zA-Z0-9_]*) [a-zA-Z0-9_]*@\1@g;
-                s@;$@@g;
-                $line = $_;
-            }
-
-            $def = "";
-
-            if ($line ne "") {
-                if ($lines{$line} eq "") {
-                    $lines{$line} = 0;
-                }
-                if ($dot_h == 1) {
-                    $lines{$line} += 1;
-                }
-                else {
-                    $lines{$line} -= 1;
-                }
-                $line = "";
-            }
-            if ($line2 ne "") {
-                if ($lines{$line2} eq "") {
-                    $lines{$line2} = 0;
-                }
-                if ($dot_h == 1) {
-                    $lines{$line2} += 1;
-                }
-                else {
-                    $lines{$line2} -= 1;
-                }
-                $line2 = "";
-            }
-        }
-    }
-    close (FILE);
-}
-
-$status = 0;
-foreach $key (sort keys %lines) {
-    if ($lines{$key} != 0) {
-        if ($lines{$key} > 0) {
-            print "Missing .cpp implementation: $lines${key}\n";
-            $status = 1;
-        }
-        else {
-            print "Missing .h declaration: $lines${key}\n";
-            $status = 1;
-        }
-    }
-}
-
-exit ($status);
diff --git a/storage/bdb/libdb_java/com_sleepycat_db_Db.h b/storage/bdb/libdb_java/com_sleepycat_db_Db.h
deleted file mode 100644
index 0787ae87aed..00000000000
--- a/storage/bdb/libdb_java/com_sleepycat_db_Db.h
+++ /dev/null
@@ -1,598 +0,0 @@
-/* DO NOT EDIT THIS FILE - it is machine generated */
-#include 
-/* Header for class com_sleepycat_db_Db */
-
-#ifndef _Included_com_sleepycat_db_Db
-#define _Included_com_sleepycat_db_Db
-#ifdef __cplusplus
-extern "C" {
-#endif
-#undef com_sleepycat_db_Db_DB_BTREE
-#define com_sleepycat_db_Db_DB_BTREE 1L
-#undef com_sleepycat_db_Db_DB_DONOTINDEX
-#define com_sleepycat_db_Db_DB_DONOTINDEX -30999L
-#undef com_sleepycat_db_Db_DB_HASH
-#define com_sleepycat_db_Db_DB_HASH 2L
-#undef com_sleepycat_db_Db_DB_KEYEMPTY
-#define com_sleepycat_db_Db_DB_KEYEMPTY -30998L
-#undef com_sleepycat_db_Db_DB_KEYEXIST
-#define com_sleepycat_db_Db_DB_KEYEXIST -30997L
-#undef com_sleepycat_db_Db_DB_LOCK_DEADLOCK
-#define com_sleepycat_db_Db_DB_LOCK_DEADLOCK -30996L
-#undef com_sleepycat_db_Db_DB_LOCK_NOTGRANTED
-#define com_sleepycat_db_Db_DB_LOCK_NOTGRANTED -30995L
-#undef com_sleepycat_db_Db_DB_NOSERVER
-#define com_sleepycat_db_Db_DB_NOSERVER -30994L
-#undef com_sleepycat_db_Db_DB_NOSERVER_HOME
-#define com_sleepycat_db_Db_DB_NOSERVER_HOME -30993L
-#undef com_sleepycat_db_Db_DB_NOSERVER_ID
-#define com_sleepycat_db_Db_DB_NOSERVER_ID -30992L
-#undef com_sleepycat_db_Db_DB_NOTFOUND
-#define com_sleepycat_db_Db_DB_NOTFOUND -30991L
-#undef com_sleepycat_db_Db_DB_OLD_VERSION
-#define com_sleepycat_db_Db_DB_OLD_VERSION -30990L
-#undef com_sleepycat_db_Db_DB_PAGE_NOTFOUND
-#define com_sleepycat_db_Db_DB_PAGE_NOTFOUND -30989L
-#undef com_sleepycat_db_Db_DB_QUEUE
-#define com_sleepycat_db_Db_DB_QUEUE 4L
-#undef com_sleepycat_db_Db_DB_RECNO
-#define com_sleepycat_db_Db_DB_RECNO 3L
-#undef com_sleepycat_db_Db_DB_REP_DUPMASTER
-#define com_sleepycat_db_Db_DB_REP_DUPMASTER -30988L
-#undef com_sleepycat_db_Db_DB_REP_HOLDELECTION
-#define com_sleepycat_db_Db_DB_REP_HOLDELECTION -30987L
-#undef com_sleepycat_db_Db_DB_REP_NEWMASTER
-#define com_sleepycat_db_Db_DB_REP_NEWMASTER -30986L
-#undef com_sleepycat_db_Db_DB_REP_NEWSITE
-#define com_sleepycat_db_Db_DB_REP_NEWSITE -30985L
-#undef com_sleepycat_db_Db_DB_REP_OUTDATED
-#define com_sleepycat_db_Db_DB_REP_OUTDATED -30984L
-#undef com_sleepycat_db_Db_DB_RUNRECOVERY
-#define com_sleepycat_db_Db_DB_RUNRECOVERY -30982L
-#undef com_sleepycat_db_Db_DB_SECONDARY_BAD
-#define com_sleepycat_db_Db_DB_SECONDARY_BAD -30981L
-#undef com_sleepycat_db_Db_DB_TXN_ABORT
-#define com_sleepycat_db_Db_DB_TXN_ABORT 0L
-#undef com_sleepycat_db_Db_DB_TXN_APPLY
-#define com_sleepycat_db_Db_DB_TXN_APPLY 1L
-#undef com_sleepycat_db_Db_DB_TXN_BACKWARD_ROLL
-#define com_sleepycat_db_Db_DB_TXN_BACKWARD_ROLL 3L
-#undef com_sleepycat_db_Db_DB_TXN_FORWARD_ROLL
-#define com_sleepycat_db_Db_DB_TXN_FORWARD_ROLL 4L
-#undef com_sleepycat_db_Db_DB_TXN_PRINT
-#define com_sleepycat_db_Db_DB_TXN_PRINT 8L
-#undef com_sleepycat_db_Db_DB_UNKNOWN
-#define com_sleepycat_db_Db_DB_UNKNOWN 5L
-#undef com_sleepycat_db_Db_DB_VERIFY_BAD
-#define com_sleepycat_db_Db_DB_VERIFY_BAD -30980L
-/* Inaccessible static: DB_AFTER */
-/* Inaccessible static: DB_AGGRESSIVE */
-/* Inaccessible static: DB_APPEND */
-/* Inaccessible static: DB_ARCH_ABS */
-/* Inaccessible static: DB_ARCH_DATA */
-/* Inaccessible static: DB_ARCH_LOG */
-/* Inaccessible static: DB_AUTO_COMMIT */
-/* Inaccessible static: DB_BEFORE */
-/* Inaccessible static: DB_CACHED_COUNTS */
-/* Inaccessible static: DB_CDB_ALLDB */
-/* Inaccessible static: DB_CHKSUM_SHA1 */
-/* Inaccessible static: DB_CLIENT */
-/* Inaccessible static: DB_CONSUME */
-/* Inaccessible static: DB_CONSUME_WAIT */
-/* Inaccessible static: DB_CREATE */
-/* Inaccessible static: DB_CURRENT */
-/* Inaccessible static: DB_CXX_NO_EXCEPTIONS */
-/* Inaccessible static: DB_DBT_MALLOC */
-/* Inaccessible static: DB_DBT_PARTIAL */
-/* Inaccessible static: DB_DBT_REALLOC */
-/* Inaccessible static: DB_DBT_USERMEM */
-/* Inaccessible static: DB_DIRECT */
-/* Inaccessible static: DB_DIRECT_DB */
-/* Inaccessible static: DB_DIRECT_LOG */
-/* Inaccessible static: DB_DIRTY_READ */
-/* Inaccessible static: DB_DUP */
-/* Inaccessible static: DB_DUPSORT */
-/* Inaccessible static: DB_EID_BROADCAST */
-/* Inaccessible static: DB_EID_INVALID */
-/* Inaccessible static: DB_ENCRYPT */
-/* Inaccessible static: DB_ENCRYPT_AES */
-/* Inaccessible static: DB_EXCL */
-/* Inaccessible static: DB_FAST_STAT */
-/* Inaccessible static: DB_FIRST */
-/* Inaccessible static: DB_FLUSH */
-/* Inaccessible static: DB_FORCE */
-/* Inaccessible static: DB_GET_BOTH */
-/* Inaccessible static: DB_GET_BOTH_RANGE */
-/* Inaccessible static: DB_GET_RECNO */
-/* Inaccessible static: DB_INIT_CDB */
-/* Inaccessible static: DB_INIT_LOCK */
-/* Inaccessible static: DB_INIT_LOG */
-/* Inaccessible static: DB_INIT_MPOOL */
-/* Inaccessible static: DB_INIT_TXN */
-/* Inaccessible static: DB_JOINENV */
-/* Inaccessible static: DB_JOIN_ITEM */
-/* Inaccessible static: DB_JOIN_NOSORT */
-/* Inaccessible static: DB_KEYFIRST */
-/* Inaccessible static: DB_KEYLAST */
-/* Inaccessible static: DB_LAST */
-/* Inaccessible static: DB_LOCKDOWN */
-/* Inaccessible static: DB_LOCK_DEFAULT */
-/* Inaccessible static: DB_LOCK_EXPIRE */
-/* Inaccessible static: DB_LOCK_GET */
-/* Inaccessible static: DB_LOCK_GET_TIMEOUT */
-/* Inaccessible static: DB_LOCK_IREAD */
-/* Inaccessible static: DB_LOCK_IWR */
-/* Inaccessible static: DB_LOCK_IWRITE */
-/* Inaccessible static: DB_LOCK_MAXLOCKS */
-/* Inaccessible static: DB_LOCK_MINLOCKS */
-/* Inaccessible static: DB_LOCK_MINWRITE */
-/* Inaccessible static: DB_LOCK_NOWAIT */
-/* Inaccessible static: DB_LOCK_OLDEST */
-/* Inaccessible static: DB_LOCK_PUT */
-/* Inaccessible static: DB_LOCK_PUT_ALL */
-/* Inaccessible static: DB_LOCK_PUT_OBJ */
-/* Inaccessible static: DB_LOCK_RANDOM */
-/* Inaccessible static: DB_LOCK_READ */
-/* Inaccessible static: DB_LOCK_TIMEOUT */
-/* Inaccessible static: DB_LOCK_WRITE */
-/* Inaccessible static: DB_LOCK_YOUNGEST */
-/* Inaccessible static: DB_MULTIPLE */
-/* Inaccessible static: DB_MULTIPLE_KEY */
-/* Inaccessible static: DB_NEXT */
-/* Inaccessible static: DB_NEXT_DUP */
-/* Inaccessible static: DB_NEXT_NODUP */
-/* Inaccessible static: DB_NODUPDATA */
-/* Inaccessible static: DB_NOLOCKING */
-/* Inaccessible static: DB_NOMMAP */
-/* Inaccessible static: DB_NOORDERCHK */
-/* Inaccessible static: DB_NOOVERWRITE */
-/* Inaccessible static: DB_NOPANIC */
-/* Inaccessible static: DB_NOSYNC */
-/* Inaccessible static: DB_ODDFILESIZE */
-/* Inaccessible static: DB_ORDERCHKONLY */
-/* Inaccessible static: DB_OVERWRITE */
-/* Inaccessible static: DB_PANIC_ENVIRONMENT */
-/* Inaccessible static: DB_POSITION */
-/* Inaccessible static: DB_PREV */
-/* Inaccessible static: DB_PREV_NODUP */
-/* Inaccessible static: DB_PRINTABLE */
-/* Inaccessible static: DB_PRIORITY_DEFAULT */
-/* Inaccessible static: DB_PRIORITY_HIGH */
-/* Inaccessible static: DB_PRIORITY_LOW */
-/* Inaccessible static: DB_PRIORITY_VERY_HIGH */
-/* Inaccessible static: DB_PRIORITY_VERY_LOW */
-/* Inaccessible static: DB_PRIVATE */
-/* Inaccessible static: DB_RDONLY */
-/* Inaccessible static: DB_RECNUM */
-/* Inaccessible static: DB_RECORDCOUNT */
-/* Inaccessible static: DB_RECOVER */
-/* Inaccessible static: DB_RECOVER_FATAL */
-/* Inaccessible static: DB_REGION_INIT */
-/* Inaccessible static: DB_RENUMBER */
-/* Inaccessible static: DB_REP_CLIENT */
-/* Inaccessible static: DB_REP_LOGSONLY */
-/* Inaccessible static: DB_REP_MASTER */
-/* Inaccessible static: DB_REP_PERMANENT */
-/* Inaccessible static: DB_REP_UNAVAIL */
-/* Inaccessible static: DB_REVSPLITOFF */
-/* Inaccessible static: DB_RMW */
-/* Inaccessible static: DB_SALVAGE */
-/* Inaccessible static: DB_SET */
-/* Inaccessible static: DB_SET_LOCK_TIMEOUT */
-/* Inaccessible static: DB_SET_RANGE */
-/* Inaccessible static: DB_SET_RECNO */
-/* Inaccessible static: DB_SET_TXN_TIMEOUT */
-/* Inaccessible static: DB_SNAPSHOT */
-/* Inaccessible static: DB_STAT_CLEAR */
-/* Inaccessible static: DB_SYSTEM_MEM */
-/* Inaccessible static: DB_THREAD */
-/* Inaccessible static: DB_TRUNCATE */
-/* Inaccessible static: DB_TXN_NOSYNC */
-/* Inaccessible static: DB_TXN_NOWAIT */
-/* Inaccessible static: DB_TXN_SYNC */
-/* Inaccessible static: DB_TXN_WRITE_NOSYNC */
-/* Inaccessible static: DB_UPGRADE */
-/* Inaccessible static: DB_USE_ENVIRON */
-/* Inaccessible static: DB_USE_ENVIRON_ROOT */
-/* Inaccessible static: DB_VERB_CHKPOINT */
-/* Inaccessible static: DB_VERB_DEADLOCK */
-/* Inaccessible static: DB_VERB_RECOVERY */
-/* Inaccessible static: DB_VERB_REPLICATION */
-/* Inaccessible static: DB_VERB_WAITSFOR */
-/* Inaccessible static: DB_VERIFY */
-/* Inaccessible static: DB_VERSION_MAJOR */
-/* Inaccessible static: DB_VERSION_MINOR */
-/* Inaccessible static: DB_VERSION_PATCH */
-/* Inaccessible static: DB_WRITECURSOR */
-/* Inaccessible static: DB_XA_CREATE */
-/* Inaccessible static: DB_XIDDATASIZE */
-/* Inaccessible static: DB_YIELDCPU */
-/* Inaccessible static: already_loaded_ */
-/*
- * Class:     com_sleepycat_db_Db
- * Method:    _init
- * Signature: (Lcom/sleepycat/db/DbEnv;I)V
- */
-JNIEXPORT void JNICALL Java_com_sleepycat_db_Db__1init
-  (JNIEnv *, jobject, jobject, jint);
-
-/*
- * Class:     com_sleepycat_db_Db
- * Method:    _notify_internal
- * Signature: ()V
- */
-JNIEXPORT void JNICALL Java_com_sleepycat_db_Db__1notify_1internal
-  (JNIEnv *, jobject);
-
-/*
- * Class:     com_sleepycat_db_Db
- * Method:    _associate
- * Signature: (Lcom/sleepycat/db/DbTxn;Lcom/sleepycat/db/Db;Lcom/sleepycat/db/DbSecondaryKeyCreate;I)V
- */
-JNIEXPORT void JNICALL Java_com_sleepycat_db_Db__1associate
-  (JNIEnv *, jobject, jobject, jobject, jobject, jint);
-
-/*
- * Class:     com_sleepycat_db_Db
- * Method:    _close
- * Signature: (I)I
- */
-JNIEXPORT jint JNICALL Java_com_sleepycat_db_Db__1close
-  (JNIEnv *, jobject, jint);
-
-/*
- * Class:     com_sleepycat_db_Db
- * Method:    cursor
- * Signature: (Lcom/sleepycat/db/DbTxn;I)Lcom/sleepycat/db/Dbc;
- */
-JNIEXPORT jobject JNICALL Java_com_sleepycat_db_Db_cursor
-  (JNIEnv *, jobject, jobject, jint);
-
-/*
- * Class:     com_sleepycat_db_Db
- * Method:    del
- * Signature: (Lcom/sleepycat/db/DbTxn;Lcom/sleepycat/db/Dbt;I)I
- */
-JNIEXPORT jint JNICALL Java_com_sleepycat_db_Db_del
-  (JNIEnv *, jobject, jobject, jobject, jint);
-
-/*
- * Class:     com_sleepycat_db_Db
- * Method:    err
- * Signature: (ILjava/lang/String;)V
- */
-JNIEXPORT void JNICALL Java_com_sleepycat_db_Db_err
-  (JNIEnv *, jobject, jint, jstring);
-
-/*
- * Class:     com_sleepycat_db_Db
- * Method:    errx
- * Signature: (Ljava/lang/String;)V
- */
-JNIEXPORT void JNICALL Java_com_sleepycat_db_Db_errx
-  (JNIEnv *, jobject, jstring);
-
-/*
- * Class:     com_sleepycat_db_Db
- * Method:    fd
- * Signature: ()I
- */
-JNIEXPORT jint JNICALL Java_com_sleepycat_db_Db_fd
-  (JNIEnv *, jobject);
-
-/*
- * Class:     com_sleepycat_db_Db
- * Method:    _finalize
- * Signature: (Lcom/sleepycat/db/DbErrcall;Ljava/lang/String;)V
- */
-JNIEXPORT void JNICALL Java_com_sleepycat_db_Db__1finalize
-  (JNIEnv *, jobject, jobject, jstring);
-
-/*
- * Class:     com_sleepycat_db_Db
- * Method:    get
- * Signature: (Lcom/sleepycat/db/DbTxn;Lcom/sleepycat/db/Dbt;Lcom/sleepycat/db/Dbt;I)I
- */
-JNIEXPORT jint JNICALL Java_com_sleepycat_db_Db_get
-  (JNIEnv *, jobject, jobject, jobject, jobject, jint);
-
-/*
- * Class:     com_sleepycat_db_Db
- * Method:    get_byteswapped
- * Signature: ()Z
- */
-JNIEXPORT jboolean JNICALL Java_com_sleepycat_db_Db_get_1byteswapped
-  (JNIEnv *, jobject);
-
-/*
- * Class:     com_sleepycat_db_Db
- * Method:    get_type
- * Signature: ()I
- */
-JNIEXPORT jint JNICALL Java_com_sleepycat_db_Db_get_1type
-  (JNIEnv *, jobject);
-
-/*
- * Class:     com_sleepycat_db_Db
- * Method:    join
- * Signature: ([Lcom/sleepycat/db/Dbc;I)Lcom/sleepycat/db/Dbc;
- */
-JNIEXPORT jobject JNICALL Java_com_sleepycat_db_Db_join
-  (JNIEnv *, jobject, jobjectArray, jint);
-
-/*
- * Class:     com_sleepycat_db_Db
- * Method:    key_range
- * Signature: (Lcom/sleepycat/db/DbTxn;Lcom/sleepycat/db/Dbt;Lcom/sleepycat/db/DbKeyRange;I)V
- */
-JNIEXPORT void JNICALL Java_com_sleepycat_db_Db_key_1range
-  (JNIEnv *, jobject, jobject, jobject, jobject, jint);
-
-/*
- * Class:     com_sleepycat_db_Db
- * Method:    _open
- * Signature: (Lcom/sleepycat/db/DbTxn;Ljava/lang/String;Ljava/lang/String;III)V
- */
-JNIEXPORT void JNICALL Java_com_sleepycat_db_Db__1open
-  (JNIEnv *, jobject, jobject, jstring, jstring, jint, jint, jint);
-
-/*
- * Class:     com_sleepycat_db_Db
- * Method:    pget
- * Signature: (Lcom/sleepycat/db/DbTxn;Lcom/sleepycat/db/Dbt;Lcom/sleepycat/db/Dbt;Lcom/sleepycat/db/Dbt;I)I
- */
-JNIEXPORT jint JNICALL Java_com_sleepycat_db_Db_pget
-  (JNIEnv *, jobject, jobject, jobject, jobject, jobject, jint);
-
-/*
- * Class:     com_sleepycat_db_Db
- * Method:    put
- * Signature: (Lcom/sleepycat/db/DbTxn;Lcom/sleepycat/db/Dbt;Lcom/sleepycat/db/Dbt;I)I
- */
-JNIEXPORT jint JNICALL Java_com_sleepycat_db_Db_put
-  (JNIEnv *, jobject, jobject, jobject, jobject, jint);
-
-/*
- * Class:     com_sleepycat_db_Db
- * Method:    _rename
- * Signature: (Ljava/lang/String;Ljava/lang/String;Ljava/lang/String;I)V
- */
-JNIEXPORT void JNICALL Java_com_sleepycat_db_Db__1rename
-  (JNIEnv *, jobject, jstring, jstring, jstring, jint);
-
-/*
- * Class:     com_sleepycat_db_Db
- * Method:    _remove
- * Signature: (Ljava/lang/String;Ljava/lang/String;I)V
- */
-JNIEXPORT void JNICALL Java_com_sleepycat_db_Db__1remove
-  (JNIEnv *, jobject, jstring, jstring, jint);
-
-/*
- * Class:     com_sleepycat_db_Db
- * Method:    append_recno_changed
- * Signature: (Lcom/sleepycat/db/DbAppendRecno;)V
- */
-JNIEXPORT void JNICALL Java_com_sleepycat_db_Db_append_1recno_1changed
-  (JNIEnv *, jobject, jobject);
-
-/*
- * Class:     com_sleepycat_db_Db
- * Method:    bt_compare_changed
- * Signature: (Lcom/sleepycat/db/DbBtreeCompare;)V
- */
-JNIEXPORT void JNICALL Java_com_sleepycat_db_Db_bt_1compare_1changed
-  (JNIEnv *, jobject, jobject);
-
-/*
- * Class:     com_sleepycat_db_Db
- * Method:    set_bt_maxkey
- * Signature: (I)V
- */
-JNIEXPORT void JNICALL Java_com_sleepycat_db_Db_set_1bt_1maxkey
-  (JNIEnv *, jobject, jint);
-
-/*
- * Class:     com_sleepycat_db_Db
- * Method:    set_bt_minkey
- * Signature: (I)V
- */
-JNIEXPORT void JNICALL Java_com_sleepycat_db_Db_set_1bt_1minkey
-  (JNIEnv *, jobject, jint);
-
-/*
- * Class:     com_sleepycat_db_Db
- * Method:    bt_prefix_changed
- * Signature: (Lcom/sleepycat/db/DbBtreePrefix;)V
- */
-JNIEXPORT void JNICALL Java_com_sleepycat_db_Db_bt_1prefix_1changed
-  (JNIEnv *, jobject, jobject);
-
-/*
- * Class:     com_sleepycat_db_Db
- * Method:    set_cachesize
- * Signature: (III)V
- */
-JNIEXPORT void JNICALL Java_com_sleepycat_db_Db_set_1cachesize
-  (JNIEnv *, jobject, jint, jint, jint);
-
-/*
- * Class:     com_sleepycat_db_Db
- * Method:    set_cache_priority
- * Signature: (I)V
- */
-JNIEXPORT void JNICALL Java_com_sleepycat_db_Db_set_1cache_1priority
-  (JNIEnv *, jobject, jint);
-
-/*
- * Class:     com_sleepycat_db_Db
- * Method:    dup_compare_changed
- * Signature: (Lcom/sleepycat/db/DbDupCompare;)V
- */
-JNIEXPORT void JNICALL Java_com_sleepycat_db_Db_dup_1compare_1changed
-  (JNIEnv *, jobject, jobject);
-
-/*
- * Class:     com_sleepycat_db_Db
- * Method:    set_encrypt
- * Signature: (Ljava/lang/String;I)V
- */
-JNIEXPORT void JNICALL Java_com_sleepycat_db_Db_set_1encrypt
-  (JNIEnv *, jobject, jstring, jint);
-
-/*
- * Class:     com_sleepycat_db_Db
- * Method:    feedback_changed
- * Signature: (Lcom/sleepycat/db/DbFeedback;)V
- */
-JNIEXPORT void JNICALL Java_com_sleepycat_db_Db_feedback_1changed
-  (JNIEnv *, jobject, jobject);
-
-/*
- * Class:     com_sleepycat_db_Db
- * Method:    set_flags
- * Signature: (I)V
- */
-JNIEXPORT void JNICALL Java_com_sleepycat_db_Db_set_1flags
-  (JNIEnv *, jobject, jint);
-
-/*
- * Class:     com_sleepycat_db_Db
- * Method:    get_flags_raw
- * Signature: ()I
- */
-JNIEXPORT jint JNICALL Java_com_sleepycat_db_Db_get_1flags_1raw
-  (JNIEnv *, jobject);
-
-/*
- * Class:     com_sleepycat_db_Db
- * Method:    set_h_ffactor
- * Signature: (I)V
- */
-JNIEXPORT void JNICALL Java_com_sleepycat_db_Db_set_1h_1ffactor
-  (JNIEnv *, jobject, jint);
-
-/*
- * Class:     com_sleepycat_db_Db
- * Method:    hash_changed
- * Signature: (Lcom/sleepycat/db/DbHash;)V
- */
-JNIEXPORT void JNICALL Java_com_sleepycat_db_Db_hash_1changed
-  (JNIEnv *, jobject, jobject);
-
-/*
- * Class:     com_sleepycat_db_Db
- * Method:    set_h_nelem
- * Signature: (I)V
- */
-JNIEXPORT void JNICALL Java_com_sleepycat_db_Db_set_1h_1nelem
-  (JNIEnv *, jobject, jint);
-
-/*
- * Class:     com_sleepycat_db_Db
- * Method:    set_lorder
- * Signature: (I)V
- */
-JNIEXPORT void JNICALL Java_com_sleepycat_db_Db_set_1lorder
-  (JNIEnv *, jobject, jint);
-
-/*
- * Class:     com_sleepycat_db_Db
- * Method:    set_pagesize
- * Signature: (J)V
- */
-JNIEXPORT void JNICALL Java_com_sleepycat_db_Db_set_1pagesize
-  (JNIEnv *, jobject, jlong);
-
-/*
- * Class:     com_sleepycat_db_Db
- * Method:    set_re_delim
- * Signature: (I)V
- */
-JNIEXPORT void JNICALL Java_com_sleepycat_db_Db_set_1re_1delim
-  (JNIEnv *, jobject, jint);
-
-/*
- * Class:     com_sleepycat_db_Db
- * Method:    set_re_len
- * Signature: (I)V
- */
-JNIEXPORT void JNICALL Java_com_sleepycat_db_Db_set_1re_1len
-  (JNIEnv *, jobject, jint);
-
-/*
- * Class:     com_sleepycat_db_Db
- * Method:    set_re_pad
- * Signature: (I)V
- */
-JNIEXPORT void JNICALL Java_com_sleepycat_db_Db_set_1re_1pad
-  (JNIEnv *, jobject, jint);
-
-/*
- * Class:     com_sleepycat_db_Db
- * Method:    set_re_source
- * Signature: (Ljava/lang/String;)V
- */
-JNIEXPORT void JNICALL Java_com_sleepycat_db_Db_set_1re_1source
-  (JNIEnv *, jobject, jstring);
-
-/*
- * Class:     com_sleepycat_db_Db
- * Method:    set_q_extentsize
- * Signature: (I)V
- */
-JNIEXPORT void JNICALL Java_com_sleepycat_db_Db_set_1q_1extentsize
-  (JNIEnv *, jobject, jint);
-
-/*
- * Class:     com_sleepycat_db_Db
- * Method:    stat
- * Signature: (I)Ljava/lang/Object;
- */
-JNIEXPORT jobject JNICALL Java_com_sleepycat_db_Db_stat
-  (JNIEnv *, jobject, jint);
-
-/*
- * Class:     com_sleepycat_db_Db
- * Method:    sync
- * Signature: (I)V
- */
-JNIEXPORT void JNICALL Java_com_sleepycat_db_Db_sync
-  (JNIEnv *, jobject, jint);
-
-/*
- * Class:     com_sleepycat_db_Db
- * Method:    truncate
- * Signature: (Lcom/sleepycat/db/DbTxn;I)I
- */
-JNIEXPORT jint JNICALL Java_com_sleepycat_db_Db_truncate
-  (JNIEnv *, jobject, jobject, jint);
-
-/*
- * Class:     com_sleepycat_db_Db
- * Method:    upgrade
- * Signature: (Ljava/lang/String;I)V
- */
-JNIEXPORT void JNICALL Java_com_sleepycat_db_Db_upgrade
-  (JNIEnv *, jobject, jstring, jint);
-
-/*
- * Class:     com_sleepycat_db_Db
- * Method:    verify
- * Signature: (Ljava/lang/String;Ljava/lang/String;Ljava/io/OutputStream;I)V
- */
-JNIEXPORT void JNICALL Java_com_sleepycat_db_Db_verify
-  (JNIEnv *, jobject, jstring, jstring, jobject, jint);
-
-/*
- * Class:     com_sleepycat_db_Db
- * Method:    one_time_init
- * Signature: ()V
- */
-JNIEXPORT void JNICALL Java_com_sleepycat_db_Db_one_1time_1init
-  (JNIEnv *, jclass);
-
-#ifdef __cplusplus
-}
-#endif
-#endif
diff --git a/storage/bdb/libdb_java/com_sleepycat_db_DbEnv.h b/storage/bdb/libdb_java/com_sleepycat_db_DbEnv.h
deleted file mode 100644
index f239dfc7593..00000000000
--- a/storage/bdb/libdb_java/com_sleepycat_db_DbEnv.h
+++ /dev/null
@@ -1,581 +0,0 @@
-/* DO NOT EDIT THIS FILE - it is machine generated */
-#include 
-/* Header for class com_sleepycat_db_DbEnv */
-
-#ifndef _Included_com_sleepycat_db_DbEnv
-#define _Included_com_sleepycat_db_DbEnv
-#ifdef __cplusplus
-extern "C" {
-#endif
-/*
- * Class:     com_sleepycat_db_DbEnv
- * Method:    _close
- * Signature: (I)V
- */
-JNIEXPORT void JNICALL Java_com_sleepycat_db_DbEnv__1close
-  (JNIEnv *, jobject, jint);
-
-/*
- * Class:     com_sleepycat_db_DbEnv
- * Method:    dbremove
- * Signature: (Lcom/sleepycat/db/DbTxn;Ljava/lang/String;Ljava/lang/String;I)V
- */
-JNIEXPORT void JNICALL Java_com_sleepycat_db_DbEnv_dbremove
-  (JNIEnv *, jobject, jobject, jstring, jstring, jint);
-
-/*
- * Class:     com_sleepycat_db_DbEnv
- * Method:    dbrename
- * Signature: (Lcom/sleepycat/db/DbTxn;Ljava/lang/String;Ljava/lang/String;Ljava/lang/String;I)V
- */
-JNIEXPORT void JNICALL Java_com_sleepycat_db_DbEnv_dbrename
-  (JNIEnv *, jobject, jobject, jstring, jstring, jstring, jint);
-
-/*
- * Class:     com_sleepycat_db_DbEnv
- * Method:    err
- * Signature: (ILjava/lang/String;)V
- */
-JNIEXPORT void JNICALL Java_com_sleepycat_db_DbEnv_err
-  (JNIEnv *, jobject, jint, jstring);
-
-/*
- * Class:     com_sleepycat_db_DbEnv
- * Method:    errx
- * Signature: (Ljava/lang/String;)V
- */
-JNIEXPORT void JNICALL Java_com_sleepycat_db_DbEnv_errx
-  (JNIEnv *, jobject, jstring);
-
-/*
- * Class:     com_sleepycat_db_DbEnv
- * Method:    _finalize
- * Signature: (Lcom/sleepycat/db/DbErrcall;Ljava/lang/String;)V
- */
-JNIEXPORT void JNICALL Java_com_sleepycat_db_DbEnv__1finalize
-  (JNIEnv *, jobject, jobject, jstring);
-
-/*
- * Class:     com_sleepycat_db_DbEnv
- * Method:    _init
- * Signature: (Lcom/sleepycat/db/DbErrcall;I)V
- */
-JNIEXPORT void JNICALL Java_com_sleepycat_db_DbEnv__1init
-  (JNIEnv *, jobject, jobject, jint);
-
-/*
- * Class:     com_sleepycat_db_DbEnv
- * Method:    _init_using_db
- * Signature: (Lcom/sleepycat/db/DbErrcall;Lcom/sleepycat/db/Db;)V
- */
-JNIEXPORT void JNICALL Java_com_sleepycat_db_DbEnv__1init_1using_1db
-  (JNIEnv *, jobject, jobject, jobject);
-
-/*
- * Class:     com_sleepycat_db_DbEnv
- * Method:    _init_using_xa
- * Signature: (Lcom/sleepycat/db/DbErrcall;II)V
- */
-JNIEXPORT void JNICALL Java_com_sleepycat_db_DbEnv__1init_1using_1xa
-  (JNIEnv *, jobject, jobject, jint, jint);
-
-/*
- * Class:     com_sleepycat_db_DbEnv
- * Method:    _notify_db_close
- * Signature: ()V
- */
-JNIEXPORT void JNICALL Java_com_sleepycat_db_DbEnv__1notify_1db_1close
-  (JNIEnv *, jobject);
-
-/*
- * Class:     com_sleepycat_db_DbEnv
- * Method:    open
- * Signature: (Ljava/lang/String;II)V
- */
-JNIEXPORT void JNICALL Java_com_sleepycat_db_DbEnv_open
-  (JNIEnv *, jobject, jstring, jint, jint);
-
-/*
- * Class:     com_sleepycat_db_DbEnv
- * Method:    remove
- * Signature: (Ljava/lang/String;I)V
- */
-JNIEXPORT void JNICALL Java_com_sleepycat_db_DbEnv_remove
-  (JNIEnv *, jobject, jstring, jint);
-
-/*
- * Class:     com_sleepycat_db_DbEnv
- * Method:    set_cachesize
- * Signature: (III)V
- */
-JNIEXPORT void JNICALL Java_com_sleepycat_db_DbEnv_set_1cachesize
-  (JNIEnv *, jobject, jint, jint, jint);
-
-/*
- * Class:     com_sleepycat_db_DbEnv
- * Method:    set_encrypt
- * Signature: (Ljava/lang/String;I)V
- */
-JNIEXPORT void JNICALL Java_com_sleepycat_db_DbEnv_set_1encrypt
-  (JNIEnv *, jobject, jstring, jint);
-
-/*
- * Class:     com_sleepycat_db_DbEnv
- * Method:    _set_errcall
- * Signature: (Lcom/sleepycat/db/DbErrcall;)V
- */
-JNIEXPORT void JNICALL Java_com_sleepycat_db_DbEnv__1set_1errcall
-  (JNIEnv *, jobject, jobject);
-
-/*
- * Class:     com_sleepycat_db_DbEnv
- * Method:    _set_errpfx
- * Signature: (Ljava/lang/String;)V
- */
-JNIEXPORT void JNICALL Java_com_sleepycat_db_DbEnv__1set_1errpfx
-  (JNIEnv *, jobject, jstring);
-
-/*
- * Class:     com_sleepycat_db_DbEnv
- * Method:    feedback_changed
- * Signature: (Lcom/sleepycat/db/DbEnvFeedback;)V
- */
-JNIEXPORT void JNICALL Java_com_sleepycat_db_DbEnv_feedback_1changed
-  (JNIEnv *, jobject, jobject);
-
-/*
- * Class:     com_sleepycat_db_DbEnv
- * Method:    set_verbose
- * Signature: (IZ)V
- */
-JNIEXPORT void JNICALL Java_com_sleepycat_db_DbEnv_set_1verbose
-  (JNIEnv *, jobject, jint, jboolean);
-
-/*
- * Class:     com_sleepycat_db_DbEnv
- * Method:    set_data_dir
- * Signature: (Ljava/lang/String;)V
- */
-JNIEXPORT void JNICALL Java_com_sleepycat_db_DbEnv_set_1data_1dir
-  (JNIEnv *, jobject, jstring);
-
-/*
- * Class:     com_sleepycat_db_DbEnv
- * Method:    set_lg_bsize
- * Signature: (I)V
- */
-JNIEXPORT void JNICALL Java_com_sleepycat_db_DbEnv_set_1lg_1bsize
-  (JNIEnv *, jobject, jint);
-
-/*
- * Class:     com_sleepycat_db_DbEnv
- * Method:    set_lg_dir
- * Signature: (Ljava/lang/String;)V
- */
-JNIEXPORT void JNICALL Java_com_sleepycat_db_DbEnv_set_1lg_1dir
-  (JNIEnv *, jobject, jstring);
-
-/*
- * Class:     com_sleepycat_db_DbEnv
- * Method:    set_lg_max
- * Signature: (I)V
- */
-JNIEXPORT void JNICALL Java_com_sleepycat_db_DbEnv_set_1lg_1max
-  (JNIEnv *, jobject, jint);
-
-/*
- * Class:     com_sleepycat_db_DbEnv
- * Method:    set_lg_regionmax
- * Signature: (I)V
- */
-JNIEXPORT void JNICALL Java_com_sleepycat_db_DbEnv_set_1lg_1regionmax
-  (JNIEnv *, jobject, jint);
-
-/*
- * Class:     com_sleepycat_db_DbEnv
- * Method:    set_lk_conflicts
- * Signature: ([[B)V
- */
-JNIEXPORT void JNICALL Java_com_sleepycat_db_DbEnv_set_1lk_1conflicts
-  (JNIEnv *, jobject, jobjectArray);
-
-/*
- * Class:     com_sleepycat_db_DbEnv
- * Method:    set_lk_detect
- * Signature: (I)V
- */
-JNIEXPORT void JNICALL Java_com_sleepycat_db_DbEnv_set_1lk_1detect
-  (JNIEnv *, jobject, jint);
-
-/*
- * Class:     com_sleepycat_db_DbEnv
- * Method:    set_lk_max
- * Signature: (I)V
- */
-JNIEXPORT void JNICALL Java_com_sleepycat_db_DbEnv_set_1lk_1max
-  (JNIEnv *, jobject, jint);
-
-/*
- * Class:     com_sleepycat_db_DbEnv
- * Method:    set_lk_max_lockers
- * Signature: (I)V
- */
-JNIEXPORT void JNICALL Java_com_sleepycat_db_DbEnv_set_1lk_1max_1lockers
-  (JNIEnv *, jobject, jint);
-
-/*
- * Class:     com_sleepycat_db_DbEnv
- * Method:    set_lk_max_locks
- * Signature: (I)V
- */
-JNIEXPORT void JNICALL Java_com_sleepycat_db_DbEnv_set_1lk_1max_1locks
-  (JNIEnv *, jobject, jint);
-
-/*
- * Class:     com_sleepycat_db_DbEnv
- * Method:    set_lk_max_objects
- * Signature: (I)V
- */
-JNIEXPORT void JNICALL Java_com_sleepycat_db_DbEnv_set_1lk_1max_1objects
-  (JNIEnv *, jobject, jint);
-
-/*
- * Class:     com_sleepycat_db_DbEnv
- * Method:    set_mp_mmapsize
- * Signature: (J)V
- */
-JNIEXPORT void JNICALL Java_com_sleepycat_db_DbEnv_set_1mp_1mmapsize
-  (JNIEnv *, jobject, jlong);
-
-/*
- * Class:     com_sleepycat_db_DbEnv
- * Method:    set_flags
- * Signature: (IZ)V
- */
-JNIEXPORT void JNICALL Java_com_sleepycat_db_DbEnv_set_1flags
-  (JNIEnv *, jobject, jint, jboolean);
-
-/*
- * Class:     com_sleepycat_db_DbEnv
- * Method:    set_rep_limit
- * Signature: (II)V
- */
-JNIEXPORT void JNICALL Java_com_sleepycat_db_DbEnv_set_1rep_1limit
-  (JNIEnv *, jobject, jint, jint);
-
-/*
- * Class:     com_sleepycat_db_DbEnv
- * Method:    rep_transport_changed
- * Signature: (ILcom/sleepycat/db/DbRepTransport;)V
- */
-JNIEXPORT void JNICALL Java_com_sleepycat_db_DbEnv_rep_1transport_1changed
-  (JNIEnv *, jobject, jint, jobject);
-
-/*
- * Class:     com_sleepycat_db_DbEnv
- * Method:    set_rpc_server
- * Signature: (Lcom/sleepycat/db/DbClient;Ljava/lang/String;JJI)V
- */
-JNIEXPORT void JNICALL Java_com_sleepycat_db_DbEnv_set_1rpc_1server
-  (JNIEnv *, jobject, jobject, jstring, jlong, jlong, jint);
-
-/*
- * Class:     com_sleepycat_db_DbEnv
- * Method:    set_shm_key
- * Signature: (J)V
- */
-JNIEXPORT void JNICALL Java_com_sleepycat_db_DbEnv_set_1shm_1key
-  (JNIEnv *, jobject, jlong);
-
-/*
- * Class:     com_sleepycat_db_DbEnv
- * Method:    set_tas_spins
- * Signature: (I)V
- */
-JNIEXPORT void JNICALL Java_com_sleepycat_db_DbEnv_set_1tas_1spins
-  (JNIEnv *, jobject, jint);
-
-/*
- * Class:     com_sleepycat_db_DbEnv
- * Method:    set_timeout
- * Signature: (JI)V
- */
-JNIEXPORT void JNICALL Java_com_sleepycat_db_DbEnv_set_1timeout
-  (JNIEnv *, jobject, jlong, jint);
-
-/*
- * Class:     com_sleepycat_db_DbEnv
- * Method:    set_tmp_dir
- * Signature: (Ljava/lang/String;)V
- */
-JNIEXPORT void JNICALL Java_com_sleepycat_db_DbEnv_set_1tmp_1dir
-  (JNIEnv *, jobject, jstring);
-
-/*
- * Class:     com_sleepycat_db_DbEnv
- * Method:    app_dispatch_changed
- * Signature: (Lcom/sleepycat/db/DbAppDispatch;)V
- */
-JNIEXPORT void JNICALL Java_com_sleepycat_db_DbEnv_app_1dispatch_1changed
-  (JNIEnv *, jobject, jobject);
-
-/*
- * Class:     com_sleepycat_db_DbEnv
- * Method:    set_tx_max
- * Signature: (I)V
- */
-JNIEXPORT void JNICALL Java_com_sleepycat_db_DbEnv_set_1tx_1max
-  (JNIEnv *, jobject, jint);
-
-/*
- * Class:     com_sleepycat_db_DbEnv
- * Method:    _set_tx_timestamp
- * Signature: (J)V
- */
-JNIEXPORT void JNICALL Java_com_sleepycat_db_DbEnv__1set_1tx_1timestamp
-  (JNIEnv *, jobject, jlong);
-
-/*
- * Class:     com_sleepycat_db_DbEnv
- * Method:    get_version_major
- * Signature: ()I
- */
-JNIEXPORT jint JNICALL Java_com_sleepycat_db_DbEnv_get_1version_1major
-  (JNIEnv *, jclass);
-
-/*
- * Class:     com_sleepycat_db_DbEnv
- * Method:    get_version_minor
- * Signature: ()I
- */
-JNIEXPORT jint JNICALL Java_com_sleepycat_db_DbEnv_get_1version_1minor
-  (JNIEnv *, jclass);
-
-/*
- * Class:     com_sleepycat_db_DbEnv
- * Method:    get_version_patch
- * Signature: ()I
- */
-JNIEXPORT jint JNICALL Java_com_sleepycat_db_DbEnv_get_1version_1patch
-  (JNIEnv *, jclass);
-
-/*
- * Class:     com_sleepycat_db_DbEnv
- * Method:    get_version_string
- * Signature: ()Ljava/lang/String;
- */
-JNIEXPORT jstring JNICALL Java_com_sleepycat_db_DbEnv_get_1version_1string
-  (JNIEnv *, jclass);
-
-/*
- * Class:     com_sleepycat_db_DbEnv
- * Method:    strerror
- * Signature: (I)Ljava/lang/String;
- */
-JNIEXPORT jstring JNICALL Java_com_sleepycat_db_DbEnv_strerror
-  (JNIEnv *, jclass, jint);
-
-/*
- * Class:     com_sleepycat_db_DbEnv
- * Method:    lock_detect
- * Signature: (II)I
- */
-JNIEXPORT jint JNICALL Java_com_sleepycat_db_DbEnv_lock_1detect
-  (JNIEnv *, jobject, jint, jint);
-
-/*
- * Class:     com_sleepycat_db_DbEnv
- * Method:    lock_get
- * Signature: (IILcom/sleepycat/db/Dbt;I)Lcom/sleepycat/db/DbLock;
- */
-JNIEXPORT jobject JNICALL Java_com_sleepycat_db_DbEnv_lock_1get
-  (JNIEnv *, jobject, jint, jint, jobject, jint);
-
-/*
- * Class:     com_sleepycat_db_DbEnv
- * Method:    lock_put
- * Signature: (Lcom/sleepycat/db/DbLock;)V
- */
-JNIEXPORT void JNICALL Java_com_sleepycat_db_DbEnv_lock_1put
-  (JNIEnv *, jobject, jobject);
-
-/*
- * Class:     com_sleepycat_db_DbEnv
- * Method:    lock_id
- * Signature: ()I
- */
-JNIEXPORT jint JNICALL Java_com_sleepycat_db_DbEnv_lock_1id
-  (JNIEnv *, jobject);
-
-/*
- * Class:     com_sleepycat_db_DbEnv
- * Method:    lock_id_free
- * Signature: (I)V
- */
-JNIEXPORT void JNICALL Java_com_sleepycat_db_DbEnv_lock_1id_1free
-  (JNIEnv *, jobject, jint);
-
-/*
- * Class:     com_sleepycat_db_DbEnv
- * Method:    lock_stat
- * Signature: (I)Lcom/sleepycat/db/DbLockStat;
- */
-JNIEXPORT jobject JNICALL Java_com_sleepycat_db_DbEnv_lock_1stat
-  (JNIEnv *, jobject, jint);
-
-/*
- * Class:     com_sleepycat_db_DbEnv
- * Method:    lock_vec
- * Signature: (II[Lcom/sleepycat/db/DbLockRequest;II)V
- */
-JNIEXPORT void JNICALL Java_com_sleepycat_db_DbEnv_lock_1vec
-  (JNIEnv *, jobject, jint, jint, jobjectArray, jint, jint);
-
-/*
- * Class:     com_sleepycat_db_DbEnv
- * Method:    log_archive
- * Signature: (I)[Ljava/lang/String;
- */
-JNIEXPORT jobjectArray JNICALL Java_com_sleepycat_db_DbEnv_log_1archive
-  (JNIEnv *, jobject, jint);
-
-/*
- * Class:     com_sleepycat_db_DbEnv
- * Method:    log_compare
- * Signature: (Lcom/sleepycat/db/DbLsn;Lcom/sleepycat/db/DbLsn;)I
- */
-JNIEXPORT jint JNICALL Java_com_sleepycat_db_DbEnv_log_1compare
-  (JNIEnv *, jclass, jobject, jobject);
-
-/*
- * Class:     com_sleepycat_db_DbEnv
- * Method:    log_cursor
- * Signature: (I)Lcom/sleepycat/db/DbLogc;
- */
-JNIEXPORT jobject JNICALL Java_com_sleepycat_db_DbEnv_log_1cursor
-  (JNIEnv *, jobject, jint);
-
-/*
- * Class:     com_sleepycat_db_DbEnv
- * Method:    log_file
- * Signature: (Lcom/sleepycat/db/DbLsn;)Ljava/lang/String;
- */
-JNIEXPORT jstring JNICALL Java_com_sleepycat_db_DbEnv_log_1file
-  (JNIEnv *, jobject, jobject);
-
-/*
- * Class:     com_sleepycat_db_DbEnv
- * Method:    log_flush
- * Signature: (Lcom/sleepycat/db/DbLsn;)V
- */
-JNIEXPORT void JNICALL Java_com_sleepycat_db_DbEnv_log_1flush
-  (JNIEnv *, jobject, jobject);
-
-/*
- * Class:     com_sleepycat_db_DbEnv
- * Method:    log_put
- * Signature: (Lcom/sleepycat/db/DbLsn;Lcom/sleepycat/db/Dbt;I)V
- */
-JNIEXPORT void JNICALL Java_com_sleepycat_db_DbEnv_log_1put
-  (JNIEnv *, jobject, jobject, jobject, jint);
-
-/*
- * Class:     com_sleepycat_db_DbEnv
- * Method:    log_stat
- * Signature: (I)Lcom/sleepycat/db/DbLogStat;
- */
-JNIEXPORT jobject JNICALL Java_com_sleepycat_db_DbEnv_log_1stat
-  (JNIEnv *, jobject, jint);
-
-/*
- * Class:     com_sleepycat_db_DbEnv
- * Method:    memp_stat
- * Signature: (I)Lcom/sleepycat/db/DbMpoolStat;
- */
-JNIEXPORT jobject JNICALL Java_com_sleepycat_db_DbEnv_memp_1stat
-  (JNIEnv *, jobject, jint);
-
-/*
- * Class:     com_sleepycat_db_DbEnv
- * Method:    memp_fstat
- * Signature: (I)[Lcom/sleepycat/db/DbMpoolFStat;
- */
-JNIEXPORT jobjectArray JNICALL Java_com_sleepycat_db_DbEnv_memp_1fstat
-  (JNIEnv *, jobject, jint);
-
-/*
- * Class:     com_sleepycat_db_DbEnv
- * Method:    memp_trickle
- * Signature: (I)I
- */
-JNIEXPORT jint JNICALL Java_com_sleepycat_db_DbEnv_memp_1trickle
-  (JNIEnv *, jobject, jint);
-
-/*
- * Class:     com_sleepycat_db_DbEnv
- * Method:    rep_elect
- * Signature: (III)I
- */
-JNIEXPORT jint JNICALL Java_com_sleepycat_db_DbEnv_rep_1elect
-  (JNIEnv *, jobject, jint, jint, jint);
-
-/*
- * Class:     com_sleepycat_db_DbEnv
- * Method:    rep_process_message
- * Signature: (Lcom/sleepycat/db/Dbt;Lcom/sleepycat/db/Dbt;Lcom/sleepycat/db/DbEnv$RepProcessMessage;)I
- */
-JNIEXPORT jint JNICALL Java_com_sleepycat_db_DbEnv_rep_1process_1message
-  (JNIEnv *, jobject, jobject, jobject, jobject);
-
-/*
- * Class:     com_sleepycat_db_DbEnv
- * Method:    rep_start
- * Signature: (Lcom/sleepycat/db/Dbt;I)V
- */
-JNIEXPORT void JNICALL Java_com_sleepycat_db_DbEnv_rep_1start
-  (JNIEnv *, jobject, jobject, jint);
-
-/*
- * Class:     com_sleepycat_db_DbEnv
- * Method:    rep_stat
- * Signature: (I)Lcom/sleepycat/db/DbRepStat;
- */
-JNIEXPORT jobject JNICALL Java_com_sleepycat_db_DbEnv_rep_1stat
-  (JNIEnv *, jobject, jint);
-
-/*
- * Class:     com_sleepycat_db_DbEnv
- * Method:    txn_begin
- * Signature: (Lcom/sleepycat/db/DbTxn;I)Lcom/sleepycat/db/DbTxn;
- */
-JNIEXPORT jobject JNICALL Java_com_sleepycat_db_DbEnv_txn_1begin
-  (JNIEnv *, jobject, jobject, jint);
-
-/*
- * Class:     com_sleepycat_db_DbEnv
- * Method:    txn_checkpoint
- * Signature: (III)V
- */
-JNIEXPORT void JNICALL Java_com_sleepycat_db_DbEnv_txn_1checkpoint
-  (JNIEnv *, jobject, jint, jint, jint);
-
-/*
- * Class:     com_sleepycat_db_DbEnv
- * Method:    txn_recover
- * Signature: (II)[Lcom/sleepycat/db/DbPreplist;
- */
-JNIEXPORT jobjectArray JNICALL Java_com_sleepycat_db_DbEnv_txn_1recover
-  (JNIEnv *, jobject, jint, jint);
-
-/*
- * Class:     com_sleepycat_db_DbEnv
- * Method:    txn_stat
- * Signature: (I)Lcom/sleepycat/db/DbTxnStat;
- */
-JNIEXPORT jobject JNICALL Java_com_sleepycat_db_DbEnv_txn_1stat
-  (JNIEnv *, jobject, jint);
-
-#ifdef __cplusplus
-}
-#endif
-#endif
diff --git a/storage/bdb/libdb_java/com_sleepycat_db_DbLock.h b/storage/bdb/libdb_java/com_sleepycat_db_DbLock.h
deleted file mode 100644
index 9f3d77d44bc..00000000000
--- a/storage/bdb/libdb_java/com_sleepycat_db_DbLock.h
+++ /dev/null
@@ -1,21 +0,0 @@
-/* DO NOT EDIT THIS FILE - it is machine generated */
-#include 
-/* Header for class com_sleepycat_db_DbLock */
-
-#ifndef _Included_com_sleepycat_db_DbLock
-#define _Included_com_sleepycat_db_DbLock
-#ifdef __cplusplus
-extern "C" {
-#endif
-/*
- * Class:     com_sleepycat_db_DbLock
- * Method:    finalize
- * Signature: ()V
- */
-JNIEXPORT void JNICALL Java_com_sleepycat_db_DbLock_finalize
-  (JNIEnv *, jobject);
-
-#ifdef __cplusplus
-}
-#endif
-#endif
diff --git a/storage/bdb/libdb_java/com_sleepycat_db_DbLogc.h b/storage/bdb/libdb_java/com_sleepycat_db_DbLogc.h
deleted file mode 100644
index 8d029c761ba..00000000000
--- a/storage/bdb/libdb_java/com_sleepycat_db_DbLogc.h
+++ /dev/null
@@ -1,37 +0,0 @@
-/* DO NOT EDIT THIS FILE - it is machine generated */
-#include 
-/* Header for class com_sleepycat_db_DbLogc */
-
-#ifndef _Included_com_sleepycat_db_DbLogc
-#define _Included_com_sleepycat_db_DbLogc
-#ifdef __cplusplus
-extern "C" {
-#endif
-/*
- * Class:     com_sleepycat_db_DbLogc
- * Method:    close
- * Signature: (I)V
- */
-JNIEXPORT void JNICALL Java_com_sleepycat_db_DbLogc_close
-  (JNIEnv *, jobject, jint);
-
-/*
- * Class:     com_sleepycat_db_DbLogc
- * Method:    get
- * Signature: (Lcom/sleepycat/db/DbLsn;Lcom/sleepycat/db/Dbt;I)I
- */
-JNIEXPORT jint JNICALL Java_com_sleepycat_db_DbLogc_get
-  (JNIEnv *, jobject, jobject, jobject, jint);
-
-/*
- * Class:     com_sleepycat_db_DbLogc
- * Method:    finalize
- * Signature: ()V
- */
-JNIEXPORT void JNICALL Java_com_sleepycat_db_DbLogc_finalize
-  (JNIEnv *, jobject);
-
-#ifdef __cplusplus
-}
-#endif
-#endif
diff --git a/storage/bdb/libdb_java/com_sleepycat_db_DbLsn.h b/storage/bdb/libdb_java/com_sleepycat_db_DbLsn.h
deleted file mode 100644
index 080fa0a8758..00000000000
--- a/storage/bdb/libdb_java/com_sleepycat_db_DbLsn.h
+++ /dev/null
@@ -1,29 +0,0 @@
-/* DO NOT EDIT THIS FILE - it is machine generated */
-#include 
-/* Header for class com_sleepycat_db_DbLsn */
-
-#ifndef _Included_com_sleepycat_db_DbLsn
-#define _Included_com_sleepycat_db_DbLsn
-#ifdef __cplusplus
-extern "C" {
-#endif
-/*
- * Class:     com_sleepycat_db_DbLsn
- * Method:    finalize
- * Signature: ()V
- */
-JNIEXPORT void JNICALL Java_com_sleepycat_db_DbLsn_finalize
-  (JNIEnv *, jobject);
-
-/*
- * Class:     com_sleepycat_db_DbLsn
- * Method:    init_lsn
- * Signature: ()V
- */
-JNIEXPORT void JNICALL Java_com_sleepycat_db_DbLsn_init_1lsn
-  (JNIEnv *, jobject);
-
-#ifdef __cplusplus
-}
-#endif
-#endif
diff --git a/storage/bdb/libdb_java/com_sleepycat_db_DbTxn.h b/storage/bdb/libdb_java/com_sleepycat_db_DbTxn.h
deleted file mode 100644
index 59641c041a4..00000000000
--- a/storage/bdb/libdb_java/com_sleepycat_db_DbTxn.h
+++ /dev/null
@@ -1,61 +0,0 @@
-/* DO NOT EDIT THIS FILE - it is machine generated */
-#include 
-/* Header for class com_sleepycat_db_DbTxn */
-
-#ifndef _Included_com_sleepycat_db_DbTxn
-#define _Included_com_sleepycat_db_DbTxn
-#ifdef __cplusplus
-extern "C" {
-#endif
-/*
- * Class:     com_sleepycat_db_DbTxn
- * Method:    abort
- * Signature: ()V
- */
-JNIEXPORT void JNICALL Java_com_sleepycat_db_DbTxn_abort
-  (JNIEnv *, jobject);
-
-/*
- * Class:     com_sleepycat_db_DbTxn
- * Method:    commit
- * Signature: (I)V
- */
-JNIEXPORT void JNICALL Java_com_sleepycat_db_DbTxn_commit
-  (JNIEnv *, jobject, jint);
-
-/*
- * Class:     com_sleepycat_db_DbTxn
- * Method:    discard
- * Signature: (I)V
- */
-JNIEXPORT void JNICALL Java_com_sleepycat_db_DbTxn_discard
-  (JNIEnv *, jobject, jint);
-
-/*
- * Class:     com_sleepycat_db_DbTxn
- * Method:    id
- * Signature: ()I
- */
-JNIEXPORT jint JNICALL Java_com_sleepycat_db_DbTxn_id
-  (JNIEnv *, jobject);
-
-/*
- * Class:     com_sleepycat_db_DbTxn
- * Method:    prepare
- * Signature: ([B)V
- */
-JNIEXPORT void JNICALL Java_com_sleepycat_db_DbTxn_prepare
-  (JNIEnv *, jobject, jbyteArray);
-
-/*
- * Class:     com_sleepycat_db_DbTxn
- * Method:    set_timeout
- * Signature: (JI)V
- */
-JNIEXPORT void JNICALL Java_com_sleepycat_db_DbTxn_set_1timeout
-  (JNIEnv *, jobject, jlong, jint);
-
-#ifdef __cplusplus
-}
-#endif
-#endif
diff --git a/storage/bdb/libdb_java/com_sleepycat_db_DbUtil.h b/storage/bdb/libdb_java/com_sleepycat_db_DbUtil.h
deleted file mode 100644
index 7f8495590c0..00000000000
--- a/storage/bdb/libdb_java/com_sleepycat_db_DbUtil.h
+++ /dev/null
@@ -1,22 +0,0 @@
-/* DO NOT EDIT THIS FILE - it is machine generated */
-#include 
-/* Header for class com_sleepycat_db_DbUtil */
-
-#ifndef _Included_com_sleepycat_db_DbUtil
-#define _Included_com_sleepycat_db_DbUtil
-#ifdef __cplusplus
-extern "C" {
-#endif
-/* Inaccessible static: big_endian */
-/*
- * Class:     com_sleepycat_db_DbUtil
- * Method:    is_big_endian
- * Signature: ()Z
- */
-JNIEXPORT jboolean JNICALL Java_com_sleepycat_db_DbUtil_is_1big_1endian
-  (JNIEnv *, jclass);
-
-#ifdef __cplusplus
-}
-#endif
-#endif
diff --git a/storage/bdb/libdb_java/com_sleepycat_db_Dbc.h b/storage/bdb/libdb_java/com_sleepycat_db_Dbc.h
deleted file mode 100644
index 447ab234844..00000000000
--- a/storage/bdb/libdb_java/com_sleepycat_db_Dbc.h
+++ /dev/null
@@ -1,77 +0,0 @@
-/* DO NOT EDIT THIS FILE - it is machine generated */
-#include 
-/* Header for class com_sleepycat_db_Dbc */
-
-#ifndef _Included_com_sleepycat_db_Dbc
-#define _Included_com_sleepycat_db_Dbc
-#ifdef __cplusplus
-extern "C" {
-#endif
-/*
- * Class:     com_sleepycat_db_Dbc
- * Method:    close
- * Signature: ()V
- */
-JNIEXPORT void JNICALL Java_com_sleepycat_db_Dbc_close
-  (JNIEnv *, jobject);
-
-/*
- * Class:     com_sleepycat_db_Dbc
- * Method:    count
- * Signature: (I)I
- */
-JNIEXPORT jint JNICALL Java_com_sleepycat_db_Dbc_count
-  (JNIEnv *, jobject, jint);
-
-/*
- * Class:     com_sleepycat_db_Dbc
- * Method:    del
- * Signature: (I)I
- */
-JNIEXPORT jint JNICALL Java_com_sleepycat_db_Dbc_del
-  (JNIEnv *, jobject, jint);
-
-/*
- * Class:     com_sleepycat_db_Dbc
- * Method:    dup
- * Signature: (I)Lcom/sleepycat/db/Dbc;
- */
-JNIEXPORT jobject JNICALL Java_com_sleepycat_db_Dbc_dup
-  (JNIEnv *, jobject, jint);
-
-/*
- * Class:     com_sleepycat_db_Dbc
- * Method:    get
- * Signature: (Lcom/sleepycat/db/Dbt;Lcom/sleepycat/db/Dbt;I)I
- */
-JNIEXPORT jint JNICALL Java_com_sleepycat_db_Dbc_get
-  (JNIEnv *, jobject, jobject, jobject, jint);
-
-/*
- * Class:     com_sleepycat_db_Dbc
- * Method:    pget
- * Signature: (Lcom/sleepycat/db/Dbt;Lcom/sleepycat/db/Dbt;Lcom/sleepycat/db/Dbt;I)I
- */
-JNIEXPORT jint JNICALL Java_com_sleepycat_db_Dbc_pget
-  (JNIEnv *, jobject, jobject, jobject, jobject, jint);
-
-/*
- * Class:     com_sleepycat_db_Dbc
- * Method:    put
- * Signature: (Lcom/sleepycat/db/Dbt;Lcom/sleepycat/db/Dbt;I)I
- */
-JNIEXPORT jint JNICALL Java_com_sleepycat_db_Dbc_put
-  (JNIEnv *, jobject, jobject, jobject, jint);
-
-/*
- * Class:     com_sleepycat_db_Dbc
- * Method:    finalize
- * Signature: ()V
- */
-JNIEXPORT void JNICALL Java_com_sleepycat_db_Dbc_finalize
-  (JNIEnv *, jobject);
-
-#ifdef __cplusplus
-}
-#endif
-#endif
diff --git a/storage/bdb/libdb_java/com_sleepycat_db_Dbt.h b/storage/bdb/libdb_java/com_sleepycat_db_Dbt.h
deleted file mode 100644
index c09bd8e6131..00000000000
--- a/storage/bdb/libdb_java/com_sleepycat_db_Dbt.h
+++ /dev/null
@@ -1,37 +0,0 @@
-/* DO NOT EDIT THIS FILE - it is machine generated */
-#include 
-/* Header for class com_sleepycat_db_Dbt */
-
-#ifndef _Included_com_sleepycat_db_Dbt
-#define _Included_com_sleepycat_db_Dbt
-#ifdef __cplusplus
-extern "C" {
-#endif
-/*
- * Class:     com_sleepycat_db_Dbt
- * Method:    finalize
- * Signature: ()V
- */
-JNIEXPORT void JNICALL Java_com_sleepycat_db_Dbt_finalize
-  (JNIEnv *, jobject);
-
-/*
- * Class:     com_sleepycat_db_Dbt
- * Method:    init
- * Signature: ()V
- */
-JNIEXPORT void JNICALL Java_com_sleepycat_db_Dbt_init
-  (JNIEnv *, jobject);
-
-/*
- * Class:     com_sleepycat_db_Dbt
- * Method:    create_data
- * Signature: ()[B
- */
-JNIEXPORT jbyteArray JNICALL Java_com_sleepycat_db_Dbt_create_1data
-  (JNIEnv *, jobject);
-
-#ifdef __cplusplus
-}
-#endif
-#endif
diff --git a/storage/bdb/libdb_java/com_sleepycat_db_xa_DbXAResource.h b/storage/bdb/libdb_java/com_sleepycat_db_xa_DbXAResource.h
deleted file mode 100644
index 00e9e2e6893..00000000000
--- a/storage/bdb/libdb_java/com_sleepycat_db_xa_DbXAResource.h
+++ /dev/null
@@ -1,95 +0,0 @@
-/* DO NOT EDIT THIS FILE - it is machine generated */
-#include 
-/* Header for class com_sleepycat_db_xa_DbXAResource */
-
-#ifndef _Included_com_sleepycat_db_xa_DbXAResource
-#define _Included_com_sleepycat_db_xa_DbXAResource
-#ifdef __cplusplus
-extern "C" {
-#endif
-/* Inaccessible static: unique_rmid */
-/* Inaccessible static: class_00024com_00024sleepycat_00024db_00024xa_00024DbXAResource */
-/*
- * Class:     com_sleepycat_db_xa_DbXAResource
- * Method:    _init
- * Signature: (Ljava/lang/String;II)V
- */
-JNIEXPORT void JNICALL Java_com_sleepycat_db_xa_DbXAResource__1init
-  (JNIEnv *, jobject, jstring, jint, jint);
-
-/*
- * Class:     com_sleepycat_db_xa_DbXAResource
- * Method:    _close
- * Signature: (Ljava/lang/String;II)V
- */
-JNIEXPORT void JNICALL Java_com_sleepycat_db_xa_DbXAResource__1close
-  (JNIEnv *, jobject, jstring, jint, jint);
-
-/*
- * Class:     com_sleepycat_db_xa_DbXAResource
- * Method:    _commit
- * Signature: (Ljavax/transaction/xa/Xid;IZ)V
- */
-JNIEXPORT void JNICALL Java_com_sleepycat_db_xa_DbXAResource__1commit
-  (JNIEnv *, jobject, jobject, jint, jboolean);
-
-/*
- * Class:     com_sleepycat_db_xa_DbXAResource
- * Method:    _end
- * Signature: (Ljavax/transaction/xa/Xid;II)V
- */
-JNIEXPORT void JNICALL Java_com_sleepycat_db_xa_DbXAResource__1end
-  (JNIEnv *, jobject, jobject, jint, jint);
-
-/*
- * Class:     com_sleepycat_db_xa_DbXAResource
- * Method:    _forget
- * Signature: (Ljavax/transaction/xa/Xid;I)V
- */
-JNIEXPORT void JNICALL Java_com_sleepycat_db_xa_DbXAResource__1forget
-  (JNIEnv *, jobject, jobject, jint);
-
-/*
- * Class:     com_sleepycat_db_xa_DbXAResource
- * Method:    _prepare
- * Signature: (Ljavax/transaction/xa/Xid;I)I
- */
-JNIEXPORT jint JNICALL Java_com_sleepycat_db_xa_DbXAResource__1prepare
-  (JNIEnv *, jobject, jobject, jint);
-
-/*
- * Class:     com_sleepycat_db_xa_DbXAResource
- * Method:    _recover
- * Signature: (II)[Ljavax/transaction/xa/Xid;
- */
-JNIEXPORT jobjectArray JNICALL Java_com_sleepycat_db_xa_DbXAResource__1recover
-  (JNIEnv *, jobject, jint, jint);
-
-/*
- * Class:     com_sleepycat_db_xa_DbXAResource
- * Method:    _rollback
- * Signature: (Ljavax/transaction/xa/Xid;I)V
- */
-JNIEXPORT void JNICALL Java_com_sleepycat_db_xa_DbXAResource__1rollback
-  (JNIEnv *, jobject, jobject, jint);
-
-/*
- * Class:     com_sleepycat_db_xa_DbXAResource
- * Method:    _start
- * Signature: (Ljavax/transaction/xa/Xid;II)V
- */
-JNIEXPORT void JNICALL Java_com_sleepycat_db_xa_DbXAResource__1start
-  (JNIEnv *, jobject, jobject, jint, jint);
-
-/*
- * Class:     com_sleepycat_db_xa_DbXAResource
- * Method:    xa_attach
- * Signature: (Ljavax/transaction/xa/Xid;Ljava/lang/Integer;)Lcom/sleepycat/db/xa/DbXAResource$DbAttach;
- */
-JNIEXPORT jobject JNICALL Java_com_sleepycat_db_xa_DbXAResource_xa_1attach
-  (JNIEnv *, jclass, jobject, jobject);
-
-#ifdef __cplusplus
-}
-#endif
-#endif
diff --git a/storage/bdb/libdb_java/java_Db.c b/storage/bdb/libdb_java/java_Db.c
deleted file mode 100644
index 465c40f7d5a..00000000000
--- a/storage/bdb/libdb_java/java_Db.c
+++ /dev/null
@@ -1,982 +0,0 @@
-/*-
- * See the file LICENSE for redistribution information.
- *
- * Copyright (c) 1997-2002
- *	Sleepycat Software.  All rights reserved.
- */
-#include "db_config.h"
-
-#ifndef lint
-static const char revid[] = "$Id: java_Db.c,v 11.80 2002/08/29 14:22:23 margo Exp $";
-#endif /* not lint */
-
-#include 
-#include 
-#include 
-
-#include "db_int.h"
-#include "dbinc/db_page.h"
-#include "dbinc/btree.h"
-#include "dbinc_auto/db_ext.h"
-#include "java_util.h"
-#include "java_stat_auto.h"
-#include "com_sleepycat_db_Db.h"
-
-/* This struct is used in Db.verify and its callback */
-struct verify_callback_struct {
-	JNIEnv *env;
-	jobject streamobj;
-	jbyteArray bytes;
-	int nbytes;
-	jmethodID writemid;
-};
-
-JAVADB_GET_FLD(Db, jint, flags_1raw, DB, flags)
-
-JAVADB_SET_METH(Db, jint, flags, DB, flags)
-JAVADB_SET_METH(Db, jint, h_1ffactor, DB, h_ffactor)
-JAVADB_SET_METH(Db, jint, h_1nelem, DB, h_nelem)
-JAVADB_SET_METH(Db, jint, lorder, DB, lorder)
-JAVADB_SET_METH(Db, jint, re_1delim, DB, re_delim)
-JAVADB_SET_METH(Db, jint, re_1len, DB, re_len)
-JAVADB_SET_METH(Db, jint, re_1pad, DB, re_pad)
-JAVADB_SET_METH(Db, jint, q_1extentsize, DB, q_extentsize)
-JAVADB_SET_METH(Db, jint, bt_1maxkey, DB, bt_maxkey)
-JAVADB_SET_METH(Db, jint, bt_1minkey, DB, bt_minkey)
-
-/*
- * This only gets called once ever, at the beginning of execution
- * and can be used to initialize unchanging methodIds, fieldIds, etc.
- */
-JNIEXPORT void JNICALL Java_com_sleepycat_db_Db_one_1time_1init
-  (JNIEnv *jnienv,  /*Db.class*/ jclass jthisclass)
-{
-	COMPQUIET(jthisclass, NULL);
-
-	one_time_init(jnienv);
-}
-
-JNIEXPORT void JNICALL Java_com_sleepycat_db_Db__1init
-  (JNIEnv *jnienv, /*Db*/ jobject jthis, /*DbEnv*/ jobject jdbenv, jint flags)
-{
-	int err;
-	DB *db;
-	DB_JAVAINFO *dbinfo;
-	DB_ENV *dbenv;
-
-	dbenv = get_DB_ENV(jnienv, jdbenv);
-	dbinfo = get_DB_JAVAINFO(jnienv, jthis);
-	DB_ASSERT(dbinfo == NULL);
-
-	err = db_create(&db, dbenv, flags);
-	if (verify_return(jnienv, err, 0)) {
-		set_private_dbobj(jnienv, name_DB, jthis, db);
-		dbinfo = dbji_construct(jnienv, jthis, flags);
-		set_private_info(jnienv, name_DB, jthis, dbinfo);
-		db->api_internal = dbinfo;
-	}
-}
-
-JNIEXPORT void JNICALL Java_com_sleepycat_db_Db__1associate
-    (JNIEnv *jnienv, /*Db*/ jobject jthis, /* DbTxn */ jobject jtxn,
-     /*Db*/ jobject jsecondary, /*DbSecondaryKeyCreate*/ jobject jcallback,
-     jint flags)
-{
-	DB *db, *secondary;
-	DB_JAVAINFO *second_info;
-	DB_TXN *txn;
-
-	db = get_DB(jnienv, jthis);
-	txn = get_DB_TXN(jnienv, jtxn);
-	secondary = get_DB(jnienv, jsecondary);
-
-	second_info = (DB_JAVAINFO*)secondary->api_internal;
-	dbji_set_assoc_object(second_info, jnienv, db, txn, secondary,
-			      jcallback, flags);
-
-}
-
-JNIEXPORT jint JNICALL Java_com_sleepycat_db_Db__1close
-  (JNIEnv *jnienv, /*Db*/ jobject jthis, jint flags)
-{
-	int err;
-	DB *db;
-	DB_JAVAINFO *dbinfo;
-
-	db = get_DB(jnienv, jthis);
-	dbinfo = get_DB_JAVAINFO(jnienv, jthis);
-	if (!verify_non_null(jnienv, db))
-		return (0);
-
-	/*
-	 * Null out the private data to indicate the DB is invalid.
-	 * We do this in advance to help guard against multithreading
-	 * issues.
-	 */
-	set_private_dbobj(jnienv, name_DB, jthis, 0);
-
-	err = db->close(db, flags);
-	verify_return(jnienv, err, 0);
-	dbji_dealloc(dbinfo, jnienv);
-
-	return (err);
-}
-
-/*
- * We are being notified that the parent DbEnv has closed.
- * Zero out the pointer to the DB, since it is no longer
- * valid, to prevent mistakes.  The user will get a null
- * pointer exception if they try to use this Db again.
- */
-JNIEXPORT void JNICALL Java_com_sleepycat_db_Db__1notify_1internal
-  (JNIEnv *jnienv, /*Db*/ jobject jthis)
-{
-	set_private_dbobj(jnienv, name_DB, jthis, 0);
-}
-
-JNIEXPORT void JNICALL Java_com_sleepycat_db_Db_append_1recno_1changed
-  (JNIEnv *jnienv, /*Db*/ jobject jthis, /*DbAppendRecno*/ jobject jcallback)
-{
-	DB *db;
-	DB_JAVAINFO *dbinfo;
-
-	db = get_DB(jnienv, jthis);
-	if (!verify_non_null(jnienv, db))
-		return;
-
-	dbinfo = (DB_JAVAINFO*)db->api_internal;
-	dbji_set_append_recno_object(dbinfo, jnienv, db, jcallback);
-}
-
-JNIEXPORT void JNICALL Java_com_sleepycat_db_Db_bt_1compare_1changed
-  (JNIEnv *jnienv, /*Db*/ jobject jthis, /*DbBtreeCompare*/ jobject jbtcompare)
-{
-	DB *db;
-	DB_JAVAINFO *dbinfo;
-
-	db = get_DB(jnienv, jthis);
-	if (!verify_non_null(jnienv, db))
-		return;
-
-	dbinfo = (DB_JAVAINFO*)db->api_internal;
-	dbji_set_bt_compare_object(dbinfo, jnienv, db, jbtcompare);
-}
-
-JNIEXPORT void JNICALL Java_com_sleepycat_db_Db_bt_1prefix_1changed
-  (JNIEnv *jnienv, /*Db*/ jobject jthis, /*DbBtreePrefix*/ jobject jbtprefix)
-{
-	DB *db;
-	DB_JAVAINFO *dbinfo;
-
-	db = get_DB(jnienv, jthis);
-	if (!verify_non_null(jnienv, db))
-		return;
-
-	dbinfo = (DB_JAVAINFO*)db->api_internal;
-	dbji_set_bt_prefix_object(dbinfo, jnienv, db, jbtprefix);
-}
-
-JNIEXPORT jobject JNICALL Java_com_sleepycat_db_Db_cursor
-  (JNIEnv *jnienv, /*Db*/ jobject jthis, /*DbTxn*/ jobject txnid, jint flags)
-{
-	int err;
-	DBC *dbc;
-	DB *db = get_DB(jnienv, jthis);
-	DB_TXN *dbtxnid = get_DB_TXN(jnienv, txnid);
-
-	if (!verify_non_null(jnienv, db))
-		return (NULL);
-	err = db->cursor(db, dbtxnid, &dbc, flags);
-	verify_return(jnienv, err, 0);
-	return (get_Dbc(jnienv, dbc));
-}
-
-JNIEXPORT jint JNICALL Java_com_sleepycat_db_Db_del
-  (JNIEnv *jnienv, /*Db*/ jobject jthis, /*DbTxn*/ jobject txnid,
-   /*Dbt*/ jobject key, jint dbflags)
-{
-	int err;
-	DB_TXN *dbtxnid;
-	DB *db;
-	LOCKED_DBT lkey;
-
-	err = 0;
-	db = get_DB(jnienv, jthis);
-	if (!verify_non_null(jnienv, db))
-		return (0);
-
-	dbtxnid = get_DB_TXN(jnienv, txnid);
-	if (locked_dbt_get(&lkey, jnienv, db->dbenv, key, inOp) != 0)
-		goto out;
-
-	err = db->del(db, dbtxnid, &lkey.javainfo->dbt, dbflags);
-	if (!DB_RETOK_DBDEL(err))
-		verify_return(jnienv, err, 0);
-
- out:
-	locked_dbt_put(&lkey, jnienv, db->dbenv);
-	return (err);
-}
-
-JNIEXPORT void JNICALL Java_com_sleepycat_db_Db_dup_1compare_1changed
-  (JNIEnv *jnienv, /*Db*/ jobject jthis, /*DbDupCompare*/ jobject jdupcompare)
-{
-	DB *db;
-	DB_JAVAINFO *dbinfo;
-
-	db = get_DB(jnienv, jthis);
-	if (!verify_non_null(jnienv, db))
-		return;
-
-	dbinfo = (DB_JAVAINFO*)db->api_internal;
-	dbji_set_dup_compare_object(dbinfo, jnienv, db, jdupcompare);
-}
-
-JNIEXPORT void JNICALL Java_com_sleepycat_db_Db_err
-  (JNIEnv *jnienv, /*Db*/ jobject jthis, jint ecode, jstring msg)
-{
-	DB *db;
-	LOCKED_STRING ls_msg;
-
-	if (locked_string_get(&ls_msg, jnienv, msg) != 0)
-		goto out;
-	db = get_DB(jnienv, jthis);
-	if (!verify_non_null(jnienv, db))
-		goto out;
-
-	db->err(db, ecode, "%s", ls_msg.string);
-
- out:
-	locked_string_put(&ls_msg, jnienv);
-}
-
-JNIEXPORT void JNICALL Java_com_sleepycat_db_Db_errx
-  (JNIEnv *jnienv, /*Db*/ jobject jthis, jstring msg)
-{
-	LOCKED_STRING ls_msg;
-	DB *db = get_DB(jnienv, jthis);
-
-	if (locked_string_get(&ls_msg, jnienv, msg) != 0)
-		goto out;
-	if (!verify_non_null(jnienv, db))
-		goto out;
-
-	db->errx(db, "%s", ls_msg.string);
-
- out:
-	locked_string_put(&ls_msg, jnienv);
-}
-
-JNIEXPORT jint JNICALL Java_com_sleepycat_db_Db_fd
-  (JNIEnv *jnienv, /*Db*/ jobject jthis)
-{
-	int err;
-	int return_value = 0;
-	DB *db = get_DB(jnienv, jthis);
-
-	if (!verify_non_null(jnienv, db))
-		return (0);
-
-	err = db->fd(db, &return_value);
-	verify_return(jnienv, err, 0);
-
-	return (return_value);
-}
-
-JNIEXPORT void JNICALL Java_com_sleepycat_db_Db_set_1encrypt
-  (JNIEnv *jnienv, /*Db*/ jobject jthis, jstring jpasswd, jint flags)
-{
-	int err;
-	DB *db;
-	LOCKED_STRING ls_passwd;
-
-	db = get_DB(jnienv, jthis);
-	if (!verify_non_null(jnienv, db))
-		return;
-	if (locked_string_get(&ls_passwd, jnienv, jpasswd) != 0)
-		goto out;
-
-	err = db->set_encrypt(db, ls_passwd.string, flags);
-	verify_return(jnienv, err, 0);
-
-out:	locked_string_put(&ls_passwd, jnienv);
-}
-
-JNIEXPORT void JNICALL Java_com_sleepycat_db_Db_feedback_1changed
-  (JNIEnv *jnienv, /*Db*/ jobject jthis, /*DbFeedback*/ jobject jfeedback)
-{
-	DB *db;
-	DB_JAVAINFO *dbinfo;
-
-	db = get_DB(jnienv, jthis);
-	if (!verify_non_null(jnienv, db))
-		return;
-
-	dbinfo = (DB_JAVAINFO*)db->api_internal;
-	dbji_set_feedback_object(dbinfo, jnienv, db, jfeedback);
-}
-
-JNIEXPORT jint JNICALL Java_com_sleepycat_db_Db_get
-  (JNIEnv *jnienv, /*Db*/ jobject jthis, /*DbTxn*/ jobject txnid,
-   /*Dbt*/ jobject key, /*Dbt*/ jobject data, jint flags)
-{
-	int err, op_flags, retry;
-	DB *db;
-	DB_ENV *dbenv;
-	OpKind keyop, dataop;
-	DB_TXN *dbtxnid;
-	LOCKED_DBT lkey, ldata;
-
-	err = 0;
-	db = get_DB(jnienv, jthis);
-	if (!verify_non_null(jnienv, db))
-		goto out3;
-	dbenv = db->dbenv;
-
-	/* Depending on flags, the key may be input/output. */
-	keyop = inOp;
-	dataop = outOp;
-	op_flags = flags & DB_OPFLAGS_MASK;
-	if (op_flags == DB_SET_RECNO) {
-		keyop = inOutOp;
-	}
-	else if (op_flags == DB_GET_BOTH) {
-		keyop = inOutOp;
-		dataop = inOutOp;
-	}
-
-	dbtxnid = get_DB_TXN(jnienv, txnid);
-
-	if (locked_dbt_get(&lkey, jnienv, dbenv, key, keyop) != 0)
-		goto out2;
-	if (locked_dbt_get(&ldata, jnienv, dbenv, data, dataop) != 0)
-		goto out1;
-	for (retry = 0; retry < 3; retry++) {
-		err = db->get(db,
-		    dbtxnid, &lkey.javainfo->dbt, &ldata.javainfo->dbt, flags);
-
-		/*
-		 * If we failed due to lack of memory in our DBT arrays,
-		 * retry.
-		 */
-		if (err != ENOMEM)
-			break;
-		if (!locked_dbt_realloc(&lkey, jnienv, dbenv) &&
-		    !locked_dbt_realloc(&ldata, jnienv, dbenv))
-			break;
-	}
- out1:
-	locked_dbt_put(&ldata, jnienv, dbenv);
- out2:
-	locked_dbt_put(&lkey, jnienv, dbenv);
- out3:
-	if (!DB_RETOK_DBGET(err)) {
-		if (verify_dbt(jnienv, err, &lkey) &&
-		    verify_dbt(jnienv, err, &ldata))
-			verify_return(jnienv, err, 0);
-	}
-	return (err);
-}
-
-JNIEXPORT void JNICALL Java_com_sleepycat_db_Db_hash_1changed
-  (JNIEnv *jnienv, /*Db*/ jobject jthis, /*DbHash*/ jobject jhash)
-{
-	DB *db;
-	DB_JAVAINFO *dbinfo;
-
-	db = get_DB(jnienv, jthis);
-	if (!verify_non_null(jnienv, db))
-		return;
-
-	dbinfo = (DB_JAVAINFO*)db->api_internal;
-	dbji_set_h_hash_object(dbinfo, jnienv, db, jhash);
-}
-
-JNIEXPORT jobject JNICALL Java_com_sleepycat_db_Db_join
-  (JNIEnv *jnienv, /*Db*/ jobject jthis, /*Dbc[]*/ jobjectArray curslist,
-   jint flags)
-{
-	int err;
-	DB *db;
-	int count;
-	DBC **newlist;
-	DBC *dbc;
-	int i;
-	int size;
-
-	db = get_DB(jnienv, jthis);
-	count = (*jnienv)->GetArrayLength(jnienv, curslist);
-	size = sizeof(DBC *) * (count+1);
-	if ((err = __os_malloc(db->dbenv, size, &newlist)) != 0) {
-		if (!verify_return(jnienv, err, 0))
-			return (NULL);
-	}
-
-	/* Convert the java array of Dbc's to a C array of DBC's. */
-	for (i = 0; i < count; i++) {
-		jobject jobj =
-		    (*jnienv)->GetObjectArrayElement(jnienv, curslist, i);
-		if (jobj == 0) {
-			/*
-			 * An embedded null in the array is treated
-			 * as an endpoint.
-			 */
-			newlist[i] = 0;
-			break;
-		}
-		else {
-			newlist[i] = get_DBC(jnienv, jobj);
-		}
-	}
-	newlist[count] = 0;
-
-	if (!verify_non_null(jnienv, db))
-		return (NULL);
-
-	err = db->join(db, newlist, &dbc, flags);
-	verify_return(jnienv, err, 0);
-	__os_free(db->dbenv, newlist);
-
-	return (get_Dbc(jnienv, dbc));
-}
-
-JNIEXPORT void JNICALL Java_com_sleepycat_db_Db_key_1range
-  (JNIEnv *jnienv, /*Db*/ jobject jthis, /*DbTxn*/ jobject txnid,
-   /*Dbt*/ jobject jkey, jobject /*DbKeyRange*/ range, jint flags)
-{
-	int err;
-	DB *db;
-	DB_TXN *dbtxnid;
-	LOCKED_DBT lkey;
-	DB_KEY_RANGE result;
-	jfieldID fid;
-	jclass krclass;
-
-	db = get_DB(jnienv, jthis);
-	dbtxnid = get_DB_TXN(jnienv, txnid);
-	if (!verify_non_null(jnienv, db))
-		return;
-	if (!verify_non_null(jnienv, range))
-		return;
-	if (locked_dbt_get(&lkey, jnienv, db->dbenv, jkey, inOp) != 0)
-		goto out;
-	err = db->key_range(db, dbtxnid, &lkey.javainfo->dbt, &result, flags);
-	if (verify_return(jnienv, err, 0)) {
-		/* fill in the values of the DbKeyRange structure */
-		if ((krclass = get_class(jnienv, "DbKeyRange")) == NULL)
-			return;	/* An exception has been posted. */
-		fid = (*jnienv)->GetFieldID(jnienv, krclass, "less", "D");
-		(*jnienv)->SetDoubleField(jnienv, range, fid, result.less);
-		fid = (*jnienv)->GetFieldID(jnienv, krclass, "equal", "D");
-		(*jnienv)->SetDoubleField(jnienv, range, fid, result.equal);
-		fid = (*jnienv)->GetFieldID(jnienv, krclass, "greater", "D");
-		(*jnienv)->SetDoubleField(jnienv, range, fid, result.greater);
-	}
- out:
-	locked_dbt_put(&lkey, jnienv, db->dbenv);
-}
-
-JNIEXPORT jint JNICALL Java_com_sleepycat_db_Db_pget
-  (JNIEnv *jnienv, /*Db*/ jobject jthis, /*DbTxn*/ jobject txnid,
-   /*Dbt*/ jobject key, /*Dbt*/ jobject rkey, /*Dbt*/ jobject data, jint flags)
-{
-	int err, op_flags, retry;
-	DB *db;
-	DB_ENV *dbenv;
-	OpKind keyop, rkeyop, dataop;
-	DB_TXN *dbtxnid;
-	LOCKED_DBT lkey, lrkey, ldata;
-
-	err = 0;
-	db = get_DB(jnienv, jthis);
-	if (!verify_non_null(jnienv, db))
-		goto out4;
-	dbenv = db->dbenv;
-
-	/* Depending on flags, the key may be input/output. */
-	keyop = inOp;
-	rkeyop = outOp;
-	dataop = outOp;
-	op_flags = flags & DB_OPFLAGS_MASK;
-	if (op_flags == DB_SET_RECNO) {
-		keyop = inOutOp;
-	}
-	else if (op_flags == DB_GET_BOTH) {
-		keyop = inOutOp;
-		rkeyop = inOutOp;
-		dataop = inOutOp;
-	}
-
-	dbtxnid = get_DB_TXN(jnienv, txnid);
-
-	if (locked_dbt_get(&lkey, jnienv, dbenv, key, keyop) != 0)
-		goto out3;
-	if (locked_dbt_get(&lrkey, jnienv, dbenv, rkey, rkeyop) != 0)
-		goto out2;
-	if (locked_dbt_get(&ldata, jnienv, dbenv, data, dataop) != 0)
-		goto out1;
-	for (retry = 0; retry < 3; retry++) {
-		err = db->pget(db, dbtxnid, &lkey.javainfo->dbt,
-		    &lrkey.javainfo->dbt, &ldata.javainfo->dbt, flags);
-
-		/*
-		 * If we failed due to lack of memory in our DBT arrays,
-		 * retry.
-		 */
-		if (err != ENOMEM)
-			break;
-		if (!locked_dbt_realloc(&lkey, jnienv, dbenv) &&
-		    !locked_dbt_realloc(&lrkey, jnienv, dbenv) &&
-		    !locked_dbt_realloc(&ldata, jnienv, dbenv))
-			break;
-	}
- out1:
-	locked_dbt_put(&ldata, jnienv, dbenv);
- out2:
-	locked_dbt_put(&lrkey, jnienv, dbenv);
- out3:
-	locked_dbt_put(&lkey, jnienv, dbenv);
- out4:
-	if (!DB_RETOK_DBGET(err)) {
-		if (verify_dbt(jnienv, err, &lkey) &&
-		    verify_dbt(jnienv, err, &lrkey) &&
-		    verify_dbt(jnienv, err, &ldata))
-			verify_return(jnienv, err, 0);
-	}
-	return (err);
-}
-
-JNIEXPORT jint JNICALL Java_com_sleepycat_db_Db_put
-  (JNIEnv *jnienv, /*Db*/ jobject jthis, /*DbTxn*/ jobject txnid,
-   /*Dbt*/ jobject key, /*Dbt*/ jobject data, jint flags)
-{
-	int err;
-	DB *db;
-	DB_ENV *dbenv;
-	DB_TXN *dbtxnid;
-	LOCKED_DBT lkey, ldata;
-	OpKind keyop;
-
-	err = 0;
-	db = get_DB(jnienv, jthis);
-	dbtxnid = get_DB_TXN(jnienv, txnid);
-	if (!verify_non_null(jnienv, db))
-		return (0);   /* error will be thrown, retval doesn't matter */
-	dbenv = db->dbenv;
-
-	/*
-	 * For DB_APPEND, the key may be output-only;  for all other flags,
-	 * it's input-only.
-	 */
-	if ((flags & DB_OPFLAGS_MASK) == DB_APPEND)
-		keyop = outOp;
-	else
-		keyop = inOp;
-
-	if (locked_dbt_get(&lkey, jnienv, dbenv, key, keyop) != 0)
-		goto out2;
-	if (locked_dbt_get(&ldata, jnienv, dbenv, data, inOp) != 0)
-		goto out1;
-
-	if (!verify_non_null(jnienv, db))
-		goto out1;
-
-	err = db->put(db,
-	    dbtxnid, &lkey.javainfo->dbt, &ldata.javainfo->dbt, flags);
-	if (!DB_RETOK_DBPUT(err))
-		verify_return(jnienv, err, 0);
-
- out1:
-	locked_dbt_put(&ldata, jnienv, dbenv);
- out2:
-	locked_dbt_put(&lkey, jnienv, dbenv);
-	return (err);
-}
-
-JNIEXPORT void JNICALL Java_com_sleepycat_db_Db__1remove
-  (JNIEnv *jnienv, /*Db*/ jobject jthis,
-   jstring file, jstring database, jint flags)
-{
-	int err;
-	DB *db;
-	DB_JAVAINFO *dbinfo;
-	LOCKED_STRING ls_file;
-	LOCKED_STRING ls_database;
-
-	db = get_DB(jnienv, jthis);
-	dbinfo = get_DB_JAVAINFO(jnienv, jthis);
-
-	if (!verify_non_null(jnienv, db))
-		return;
-	if (locked_string_get(&ls_file, jnienv, file) != 0)
-		goto out2;
-	if (locked_string_get(&ls_database, jnienv, database) != 0)
-		goto out1;
-	err = db->remove(db, ls_file.string, ls_database.string, flags);
-
-	set_private_dbobj(jnienv, name_DB, jthis, 0);
-	verify_return(jnienv, err, EXCEPTION_FILE_NOT_FOUND);
-
- out1:
-	locked_string_put(&ls_database, jnienv);
- out2:
-	locked_string_put(&ls_file, jnienv);
-
-	dbji_dealloc(dbinfo, jnienv);
-}
-
-JNIEXPORT void JNICALL Java_com_sleepycat_db_Db__1rename
-  (JNIEnv *jnienv, /*Db*/ jobject jthis,
-   jstring file, jstring database, jstring newname, jint flags)
-{
-	int err;
-	DB *db;
-	DB_JAVAINFO *dbinfo;
-	LOCKED_STRING ls_file;
-	LOCKED_STRING ls_database;
-	LOCKED_STRING ls_newname;
-
-	db = get_DB(jnienv, jthis);
-	dbinfo = get_DB_JAVAINFO(jnienv, jthis);
-	if (!verify_non_null(jnienv, db))
-		return;
-	if (locked_string_get(&ls_file, jnienv, file) != 0)
-		goto out3;
-	if (locked_string_get(&ls_database, jnienv, database) != 0)
-		goto out2;
-	if (locked_string_get(&ls_newname, jnienv, newname) != 0)
-		goto out1;
-
-	err = db->rename(db, ls_file.string, ls_database.string,
-			 ls_newname.string, flags);
-
-	verify_return(jnienv, err, EXCEPTION_FILE_NOT_FOUND);
-	set_private_dbobj(jnienv, name_DB, jthis, 0);
-
- out1:
-	locked_string_put(&ls_newname, jnienv);
- out2:
-	locked_string_put(&ls_database, jnienv);
- out3:
-	locked_string_put(&ls_file, jnienv);
-
-	dbji_dealloc(dbinfo, jnienv);
-}
-
-JAVADB_METHOD(Db_set_1pagesize, (JAVADB_ARGS, jlong pagesize), DB,
-    set_pagesize, (c_this, (u_int32_t)pagesize))
-JAVADB_METHOD(Db_set_1cachesize,
-    (JAVADB_ARGS, jint gbytes, jint bytes, jint ncaches), DB,
-    set_cachesize, (c_this, gbytes, bytes, ncaches))
-JAVADB_METHOD(Db_set_1cache_1priority, (JAVADB_ARGS, jint priority), DB,
-    set_cache_priority, (c_this, (DB_CACHE_PRIORITY)priority))
-
-JNIEXPORT void JNICALL
-  Java_com_sleepycat_db_Db_set_1re_1source
-  (JNIEnv *jnienv, /*Db*/ jobject jthis, jstring re_source)
-{
-	int err;
-	DB *db;
-
-	db = get_DB(jnienv, jthis);
-	if (verify_non_null(jnienv, db)) {
-
-		/* XXX does the string from get_c_string ever get freed? */
-		if (re_source != NULL)
-			err = db->set_re_source(db,
-			    get_c_string(jnienv, re_source));
-		else
-			err = db->set_re_source(db, 0);
-
-		verify_return(jnienv, err, 0);
-	}
-}
-
-JNIEXPORT jobject JNICALL Java_com_sleepycat_db_Db_stat
-  (JNIEnv *jnienv, jobject jthis, jint flags)
-{
-	DB *db;
-	DB_BTREE_STAT *bstp;
-	DB_HASH_STAT *hstp;
-	DB_QUEUE_STAT *qstp;
-	DBTYPE dbtype;
-	jobject retval;
-	jclass dbclass;
-	size_t bytesize;
-	void *statp;
-
-	bytesize = 0;
-	retval = NULL;
-	statp = NULL;
-
-	db = get_DB(jnienv, jthis);
-	if (!verify_non_null(jnienv, db))
-		return (NULL);
-
-	if (verify_return(jnienv, db->stat(db, &statp, flags), 0) &&
-	    verify_return(jnienv, db->get_type(db, &dbtype), 0)) {
-		switch (dbtype) {
-			/* Btree and recno share the same stat structure */
-		case DB_BTREE:
-		case DB_RECNO:
-			bstp = (DB_BTREE_STAT *)statp;
-			bytesize = sizeof(DB_BTREE_STAT);
-			retval = create_default_object(jnienv,
-						       name_DB_BTREE_STAT);
-			if ((dbclass =
-			    get_class(jnienv, name_DB_BTREE_STAT)) == NULL)
-				break;	/* An exception has been posted. */
-
-			__jv_fill_bt_stat(jnienv, dbclass, retval, bstp);
-			break;
-
-			/* Hash stat structure */
-		case DB_HASH:
-			hstp = (DB_HASH_STAT *)statp;
-			bytesize = sizeof(DB_HASH_STAT);
-			retval = create_default_object(jnienv,
-						       name_DB_HASH_STAT);
-			if ((dbclass =
-			    get_class(jnienv, name_DB_HASH_STAT)) == NULL)
-				break;	/* An exception has been posted. */
-
-			__jv_fill_h_stat(jnienv, dbclass, retval, hstp);
-			break;
-
-		case DB_QUEUE:
-			qstp = (DB_QUEUE_STAT *)statp;
-			bytesize = sizeof(DB_QUEUE_STAT);
-			retval = create_default_object(jnienv,
-						       name_DB_QUEUE_STAT);
-			if ((dbclass =
-			    get_class(jnienv, name_DB_QUEUE_STAT)) == NULL)
-				break;	/* An exception has been posted. */
-
-			__jv_fill_qam_stat(jnienv, dbclass, retval, qstp);
-			break;
-
-			/* That's all the database types we're aware of! */
-		default:
-			report_exception(jnienv,
-					 "Db.stat not implemented for types"
-					 " other than BTREE, HASH, QUEUE,"
-					 " and RECNO",
-					 EINVAL, 0);
-			break;
-		}
-		if (bytesize != 0)
-			__os_ufree(db->dbenv, statp);
-	}
-	return (retval);
-}
-
-JAVADB_METHOD(Db_sync, (JAVADB_ARGS, jint flags), DB,
-    sync, (c_this, flags))
-
-JNIEXPORT jboolean JNICALL Java_com_sleepycat_db_Db_get_1byteswapped
-  (JNIEnv *jnienv, /*Db*/ jobject jthis)
-{
-	DB *db;
-	int err, isbyteswapped;
-
-	/* This value should never be seen, because of the exception. */
-	isbyteswapped = 0;
-
-	db = get_DB(jnienv, jthis);
-	if (!verify_non_null(jnienv, db))
-		return (0);
-
-	err = db->get_byteswapped(db, &isbyteswapped);
-	(void)verify_return(jnienv, err, 0);
-
-	return ((jboolean)isbyteswapped);
-}
-
-JNIEXPORT jint JNICALL Java_com_sleepycat_db_Db_get_1type
-  (JNIEnv *jnienv, /*Db*/ jobject jthis)
-{
-	DB *db;
-	int err;
-	DBTYPE dbtype;
-
-	/* This value should never be seen, because of the exception. */
-	dbtype = DB_UNKNOWN;
-
-	db = get_DB(jnienv, jthis);
-	if (!verify_non_null(jnienv, db))
-		return (0);
-
-	err = db->get_type(db, &dbtype);
-	(void)verify_return(jnienv, err, 0);
-
-	return ((jint)dbtype);
-}
-
-JNIEXPORT void JNICALL Java_com_sleepycat_db_Db__1open
-  (JNIEnv *jnienv, /*Db*/ jobject jthis, /*DbTxn*/ jobject txnid,
-   jstring file, jstring database, jint type, jint flags, jint mode)
-{
-	int err;
-	DB *db;
-	DB_TXN *dbtxnid;
-	LOCKED_STRING ls_file;
-	LOCKED_STRING ls_database;
-
-	/* Java is assumed to be threaded */
-	flags |= DB_THREAD;
-
-	db = get_DB(jnienv, jthis);
-
-	dbtxnid = get_DB_TXN(jnienv, txnid);
-	if (locked_string_get(&ls_file, jnienv, file) != 0)
-		goto out2;
-	if (locked_string_get(&ls_database, jnienv, database) != 0)
-		goto out1;
-	if (verify_non_null(jnienv, db)) {
-		err = db->open(db, dbtxnid, ls_file.string, ls_database.string,
-			       (DBTYPE)type, flags, mode);
-		verify_return(jnienv, err, EXCEPTION_FILE_NOT_FOUND);
-	}
- out1:
-	locked_string_put(&ls_database, jnienv);
- out2:
-	locked_string_put(&ls_file, jnienv);
-}
-
-JNIEXPORT jint JNICALL Java_com_sleepycat_db_Db_truncate
-  (JNIEnv *jnienv, /*Db*/ jobject jthis, /*DbTxn*/ jobject jtxnid, jint flags)
-{
-	int err;
-	DB *db;
-	u_int32_t count;
-	DB_TXN *dbtxnid;
-
-	db = get_DB(jnienv, jthis);
-	dbtxnid = get_DB_TXN(jnienv, jtxnid);
-	count = 0;
-	if (verify_non_null(jnienv, db)) {
-		err = db->truncate(db, dbtxnid, &count, flags);
-		verify_return(jnienv, err, 0);
-	}
-	return (jint)count;
-}
-
-JNIEXPORT void JNICALL Java_com_sleepycat_db_Db_upgrade
-  (JNIEnv *jnienv, /*Db*/ jobject jthis, jstring name,
-   jint flags)
-{
-	int err;
-	DB *db = get_DB(jnienv, jthis);
-	LOCKED_STRING ls_name;
-
-	if (verify_non_null(jnienv, db)) {
-		if (locked_string_get(&ls_name, jnienv, name) != 0)
-			goto out;
-		err = db->upgrade(db, ls_name.string, flags);
-		verify_return(jnienv, err, 0);
-	}
- out:
-	locked_string_put(&ls_name, jnienv);
-}
-
-static int java_verify_callback(void *handle, const void *str_arg)
-{
-	char *str;
-	struct verify_callback_struct *vc;
-	int len;
-	JNIEnv *jnienv;
-
-	str = (char *)str_arg;
-	vc = (struct verify_callback_struct *)handle;
-	jnienv = vc->env;
-	len = strlen(str)+1;
-	if (len > vc->nbytes) {
-		vc->nbytes = len;
-		vc->bytes = (*jnienv)->NewByteArray(jnienv, len);
-	}
-
-	if (vc->bytes != NULL) {
-		(*jnienv)->SetByteArrayRegion(jnienv, vc->bytes, 0, len,
-		    (jbyte*)str);
-		(*jnienv)->CallVoidMethod(jnienv, vc->streamobj,
-		    vc->writemid, vc->bytes, 0, len-1);
-	}
-
-	if ((*jnienv)->ExceptionOccurred(jnienv) != NULL)
-		return (EIO);
-
-	return (0);
-}
-
-JNIEXPORT void JNICALL Java_com_sleepycat_db_Db_verify
-  (JNIEnv *jnienv, /*Db*/ jobject jthis, jstring name,
-   jstring subdb, jobject stream, jint flags)
-{
-	int err;
-	DB *db;
-	LOCKED_STRING ls_name;
-	LOCKED_STRING ls_subdb;
-	struct verify_callback_struct vcs;
-	jclass streamclass;
-
-	db = get_DB(jnienv, jthis);
-	if (!verify_non_null(jnienv, db))
-		return;
-	if (locked_string_get(&ls_name, jnienv, name) != 0)
-		goto out2;
-	if (locked_string_get(&ls_subdb, jnienv, subdb) != 0)
-		goto out1;
-
-	/* set up everything we need for the callbacks */
-	vcs.env = jnienv;
-	vcs.streamobj = stream;
-	vcs.nbytes = 100;
-	if ((vcs.bytes = (*jnienv)->NewByteArray(jnienv, vcs.nbytes)) == NULL)
-		goto out1;
-
-	/* get the method ID for OutputStream.write(byte[], int, int); */
-	streamclass = (*jnienv)->FindClass(jnienv, "java/io/OutputStream");
-	vcs.writemid = (*jnienv)->GetMethodID(jnienv, streamclass,
-					      "write", "([BII)V");
-
-	/* invoke verify - this will invoke the callback repeatedly. */
-	err = __db_verify_internal(db, ls_name.string, ls_subdb.string,
-				   &vcs, java_verify_callback, flags);
-	verify_return(jnienv, err, 0);
-
-out1:
-	locked_string_put(&ls_subdb, jnienv);
-out2:
-	locked_string_put(&ls_name, jnienv);
-}
-
-JNIEXPORT void JNICALL Java_com_sleepycat_db_Db__1finalize
-    (JNIEnv *jnienv, jobject jthis,
-     jobject /*DbErrcall*/ errcall, jstring errpfx)
-{
-	DB_JAVAINFO *dbinfo;
-	DB *db;
-
-	dbinfo = get_DB_JAVAINFO(jnienv, jthis);
-	db = get_DB(jnienv, jthis);
-	DB_ASSERT(dbinfo != NULL);
-
-	/*
-	 * Note: We can never be sure if the underlying DB is attached to
-	 * a DB_ENV that was already closed.  Sure, that's a user error,
-	 * but it shouldn't crash the VM.  Therefore, we cannot just
-	 * automatically close if the handle indicates we are not yet
-	 * closed.  The best we can do is detect this and report it.
-	 */
-	if (db != NULL) {
-		/* If this error occurs, this object was never closed. */
-		report_errcall(jnienv, errcall, errpfx,
-			       "Db.finalize: open Db object destroyed");
-	}
-
-	/* Shouldn't see this object again, but just in case */
-	set_private_dbobj(jnienv, name_DB, jthis, 0);
-	set_private_info(jnienv, name_DB, jthis, 0);
-
-	dbji_destroy(dbinfo, jnienv);
-}
diff --git a/storage/bdb/libdb_java/java_DbEnv.c b/storage/bdb/libdb_java/java_DbEnv.c
deleted file mode 100644
index 651c38a0e3d..00000000000
--- a/storage/bdb/libdb_java/java_DbEnv.c
+++ /dev/null
@@ -1,1450 +0,0 @@
-/*-
- * See the file LICENSE for redistribution information.
- *
- * Copyright (c) 1997-2002
- *	Sleepycat Software.  All rights reserved.
- */
-#include "db_config.h"
-
-#ifndef lint
-static const char revid[] = "$Id: java_DbEnv.c,v 11.105 2002/08/29 14:22:23 margo Exp $";
-#endif /* not lint */
-
-#include 
-#include 
-#include 
-
-#include "db_int.h"
-#include "java_util.h"
-#include "java_stat_auto.h"
-#include "com_sleepycat_db_DbEnv.h"
-
-/* We keep these lined up, and alphabetical by field name,
- * for comparison with C++'s list.
- */
-JAVADB_SET_METH_STR(DbEnv,   data_1dir, DB_ENV, data_dir)
-JAVADB_SET_METH(DbEnv, jint, lg_1bsize, DB_ENV, lg_bsize)
-JAVADB_SET_METH_STR(DbEnv,   lg_1dir, DB_ENV, lg_dir)
-JAVADB_SET_METH(DbEnv, jint, lg_1max, DB_ENV, lg_max)
-JAVADB_SET_METH(DbEnv, jint, lg_1regionmax, DB_ENV, lg_regionmax)
-JAVADB_SET_METH(DbEnv, jint, lk_1detect, DB_ENV, lk_detect)
-JAVADB_SET_METH(DbEnv, jint, lk_1max, DB_ENV, lk_max)
-JAVADB_SET_METH(DbEnv, jint, lk_1max_1locks, DB_ENV, lk_max_locks)
-JAVADB_SET_METH(DbEnv, jint, lk_1max_1lockers, DB_ENV, lk_max_lockers)
-JAVADB_SET_METH(DbEnv, jint, lk_1max_1objects, DB_ENV, lk_max_objects)
-/* mp_mmapsize is declared below, it needs an extra cast */
-JAVADB_SET_METH_STR(DbEnv,   tmp_1dir, DB_ENV, tmp_dir)
-JAVADB_SET_METH(DbEnv, jint, tx_1max, DB_ENV, tx_max)
-
-static void DbEnv_errcall_callback(const char *prefix, char *message)
-{
-	JNIEnv *jnienv;
-	DB_ENV_JAVAINFO *envinfo = (DB_ENV_JAVAINFO *)prefix;
-	jstring pre;
-
-	/*
-	 * Note: these error cases are "impossible", and would
-	 * normally warrant an exception.  However, without
-	 * a jnienv, we cannot throw an exception...
-	 * We don't want to trap or exit, since the point of
-	 * this facility is for the user to completely control
-	 * error situations.
-	 */
-	if (envinfo == NULL) {
-		/*
-		 * Something is *really* wrong here, the
-		 * prefix is set in every environment created.
-		 */
-		fprintf(stderr, "Error callback failed!\n");
-		fprintf(stderr, "error: %s\n", message);
-		return;
-	}
-
-	/* Should always succeed... */
-	jnienv = dbjie_get_jnienv(envinfo);
-
-	if (jnienv == NULL) {
-
-		/* But just in case... */
-		fprintf(stderr, "Cannot attach to current thread!\n");
-		fprintf(stderr, "error: %s\n", message);
-		return;
-	}
-
-	pre = dbjie_get_errpfx(envinfo, jnienv);
-	report_errcall(jnienv, dbjie_get_errcall(envinfo), pre, message);
-}
-
-static void DbEnv_initialize(JNIEnv *jnienv, DB_ENV *dbenv,
-			     /*DbEnv*/ jobject jenv,
-			     /*DbErrcall*/ jobject jerrcall,
-			     int is_dbopen)
-{
-	DB_ENV_JAVAINFO *envinfo;
-
-	envinfo = get_DB_ENV_JAVAINFO(jnienv, jenv);
-	DB_ASSERT(envinfo == NULL);
-	envinfo = dbjie_construct(jnienv, jenv, jerrcall, is_dbopen);
-	set_private_info(jnienv, name_DB_ENV, jenv, envinfo);
-	dbenv->set_errpfx(dbenv, (const char*)envinfo);
-	dbenv->set_errcall(dbenv, DbEnv_errcall_callback);
-	dbenv->api2_internal = envinfo;
-	set_private_dbobj(jnienv, name_DB_ENV, jenv, dbenv);
-}
-
-/*
- * This is called when this DbEnv was made on behalf of a Db
- * created directly (without a parent DbEnv), and the Db is
- * being closed.  We'll zero out the pointer to the DB_ENV,
- * since it is no longer valid, to prevent mistakes.
- */
-JNIEXPORT void JNICALL Java_com_sleepycat_db_DbEnv__1notify_1db_1close
-  (JNIEnv *jnienv, /*DbEnv*/ jobject jthis)
-{
-	DB_ENV_JAVAINFO *dbenvinfo;
-
-	set_private_dbobj(jnienv, name_DB_ENV, jthis, 0);
-	dbenvinfo = get_DB_ENV_JAVAINFO(jnienv, jthis);
-	if (dbenvinfo != NULL)
-		dbjie_dealloc(dbenvinfo, jnienv);
-}
-
-JNIEXPORT void JNICALL Java_com_sleepycat_db_DbEnv_feedback_1changed
-  (JNIEnv *jnienv, /*DbEnv*/ jobject jthis,
-   /*DbEnvFeedback*/ jobject jfeedback)
-{
-	DB_ENV *dbenv;
-	DB_ENV_JAVAINFO *dbenvinfo;
-
-	dbenv = get_DB_ENV(jnienv, jthis);
-	dbenvinfo = get_DB_ENV_JAVAINFO(jnienv, jthis);
-	if (!verify_non_null(jnienv, dbenv) ||
-	    !verify_non_null(jnienv, dbenvinfo))
-		return;
-
-	dbjie_set_feedback_object(dbenvinfo, jnienv, dbenv, jfeedback);
-}
-
-JNIEXPORT void JNICALL Java_com_sleepycat_db_DbEnv__1init
-  (JNIEnv *jnienv, /*DbEnv*/ jobject jthis, jobject /*DbErrcall*/ jerrcall,
-   jint flags)
-{
-	int err;
-	DB_ENV *dbenv;
-
-	err = db_env_create(&dbenv, flags);
-	if (verify_return(jnienv, err, 0))
-		DbEnv_initialize(jnienv, dbenv, jthis, jerrcall, 0);
-}
-
-JNIEXPORT void JNICALL Java_com_sleepycat_db_DbEnv__1init_1using_1db
-  (JNIEnv *jnienv, /*DbEnv*/ jobject jthis, jobject /*DbErrcall*/ jerrcall,
-   /*Db*/ jobject jdb)
-{
-	DB_ENV *dbenv;
-	DB *db;
-
-	db = get_DB(jnienv, jdb);
-	dbenv = db->dbenv;
-	DbEnv_initialize(jnienv, dbenv, jthis, jerrcall, 0);
-}
-
-JNIEXPORT void JNICALL Java_com_sleepycat_db_DbEnv_open
-  (JNIEnv *jnienv, /*DbEnv*/ jobject jthis, jstring db_home,
-   jint flags, jint mode)
-{
-	int err;
-	DB_ENV *dbenv;
-	LOCKED_STRING ls_home;
-	DB_ENV_JAVAINFO *dbenvinfo;
-
-	dbenv = get_DB_ENV(jnienv, jthis);
-	dbenvinfo = get_DB_ENV_JAVAINFO(jnienv, jthis);
-	if (!verify_non_null(jnienv, dbenv) ||
-	    !verify_non_null(jnienv, dbenvinfo))
-		return;
-	if (locked_string_get(&ls_home, jnienv, db_home) != 0)
-		goto out;
-
-	/* Java is assumed to be threaded. */
-	flags |= DB_THREAD;
-
-	err = dbenv->open(dbenv, ls_home.string, flags, mode);
-	verify_return(jnienv, err, EXCEPTION_FILE_NOT_FOUND);
- out:
-	locked_string_put(&ls_home, jnienv);
-}
-
-JNIEXPORT void JNICALL Java_com_sleepycat_db_DbEnv_remove
-  (JNIEnv *jnienv, /*DbEnv*/ jobject jthis, jstring db_home, jint flags)
-{
-	DB_ENV *dbenv;
-	DB_ENV_JAVAINFO *dbenvinfo;
-	LOCKED_STRING ls_home;
-	int err = 0;
-
-	dbenv = get_DB_ENV(jnienv, jthis);
-	dbenvinfo = get_DB_ENV_JAVAINFO(jnienv, jthis);
-	if (!verify_non_null(jnienv, dbenv))
-		return;
-	if (locked_string_get(&ls_home, jnienv, db_home) != 0)
-		goto out;
-
-	err = dbenv->remove(dbenv, ls_home.string, flags);
-	set_private_dbobj(jnienv, name_DB_ENV, jthis, 0);
-
-	verify_return(jnienv, err, 0);
- out:
-	locked_string_put(&ls_home, jnienv);
-
-	if (dbenvinfo != NULL)
-		dbjie_dealloc(dbenvinfo, jnienv);
-}
-
-JNIEXPORT void JNICALL Java_com_sleepycat_db_DbEnv__1close
-  (JNIEnv *jnienv, /*DbEnv*/ jobject jthis, jint flags)
-{
-	int err;
-	DB_ENV *dbenv;
-	DB_ENV_JAVAINFO *dbenvinfo;
-
-	dbenv = get_DB_ENV(jnienv, jthis);
-	dbenvinfo = get_DB_ENV_JAVAINFO(jnienv, jthis);
-	if (!verify_non_null(jnienv, dbenv))
-		return;
-
-	err = dbenv->close(dbenv, flags);
-	set_private_dbobj(jnienv, name_DB_ENV, jthis, 0);
-
-	if (dbenvinfo != NULL)
-		dbjie_dealloc(dbenvinfo, jnienv);
-
-	/* Throw an exception if the close failed. */
-	verify_return(jnienv, err, 0);
-}
-
-JNIEXPORT void JNICALL Java_com_sleepycat_db_DbEnv_dbremove
-  (JNIEnv *jnienv, /*DbEnv*/ jobject jthis, /*DbTxn*/ jobject jtxn,
-   jstring name, jstring subdb, jint flags)
-{
-	LOCKED_STRING ls_name, ls_subdb;
-	DB_ENV *dbenv;
-	DB_TXN *txn;
-	int err;
-
-	dbenv = get_DB_ENV(jnienv, jthis);
-	if (!verify_non_null(jnienv, dbenv))
-		return;
-	txn = get_DB_TXN(jnienv, jtxn);
-	if (locked_string_get(&ls_name, jnienv, name) != 0)
-		return;
-	if (locked_string_get(&ls_subdb, jnienv, subdb) != 0)
-		goto err1;
-
-	err = dbenv->dbremove(dbenv, txn, ls_name.string, ls_subdb.string,
-	    flags);
-
-	/* Throw an exception if the dbremove failed. */
-	verify_return(jnienv, err, 0);
-
-	locked_string_put(&ls_subdb, jnienv);
-err1:	locked_string_put(&ls_name, jnienv);
-}
-
-JNIEXPORT void JNICALL Java_com_sleepycat_db_DbEnv_dbrename
-  (JNIEnv *jnienv, /*DbEnv*/ jobject jthis, /*DbTxn*/ jobject jtxn,
-   jstring name, jstring subdb, jstring newname, jint flags)
-{
-	LOCKED_STRING ls_name, ls_subdb, ls_newname;
-	DB_ENV *dbenv;
-	DB_TXN *txn;
-	int err;
-
-	dbenv = get_DB_ENV(jnienv, jthis);
-	if (!verify_non_null(jnienv, dbenv))
-		return;
-	txn = get_DB_TXN(jnienv, jtxn);
-	if (locked_string_get(&ls_name, jnienv, name) != 0)
-		return;
-	if (locked_string_get(&ls_subdb, jnienv, subdb) != 0)
-		goto err2;
-	if (locked_string_get(&ls_newname, jnienv, newname) != 0)
-		goto err1;
-
-	err = dbenv->dbrename(dbenv, txn, ls_name.string, ls_subdb.string,
-	    ls_newname.string, flags);
-
-	/* Throw an exception if the dbrename failed. */
-	verify_return(jnienv, err, 0);
-
-	locked_string_put(&ls_newname, jnienv);
-err1:	locked_string_put(&ls_subdb, jnienv);
-err2:	locked_string_put(&ls_name, jnienv);
-}
-
-JNIEXPORT void JNICALL Java_com_sleepycat_db_DbEnv_err
-  (JNIEnv *jnienv, /*DbEnv*/ jobject jthis, jint ecode, jstring msg)
-{
-	LOCKED_STRING ls_msg;
-	DB_ENV *dbenv;
-
-	dbenv = get_DB_ENV(jnienv, jthis);
-	if (!verify_non_null(jnienv, dbenv))
-		return;
-
-	if (locked_string_get(&ls_msg, jnienv, msg) != 0)
-		goto out;
-
-	dbenv->err(dbenv, ecode, "%s", ls_msg.string);
- out:
-	locked_string_put(&ls_msg, jnienv);
-}
-
-JNIEXPORT void JNICALL Java_com_sleepycat_db_DbEnv_errx
-  (JNIEnv *jnienv, /*DbEnv*/ jobject jthis, jstring msg)
-{
-	LOCKED_STRING ls_msg;
-	DB_ENV *dbenv;
-
-	dbenv = get_DB_ENV(jnienv, jthis);
-	if (!verify_non_null(jnienv, dbenv))
-		return;
-
-	if (locked_string_get(&ls_msg, jnienv, msg) != 0)
-		goto out;
-
-	dbenv->errx(dbenv, "%s", ls_msg.string);
- out:
-	locked_string_put(&ls_msg, jnienv);
-}
-
-/*static*/
-JNIEXPORT jstring JNICALL Java_com_sleepycat_db_DbEnv_strerror
-  (JNIEnv *jnienv, jclass jthis_class, jint ecode)
-{
-	const char *message;
-
-	COMPQUIET(jthis_class, NULL);
-	message = db_strerror(ecode);
-	return (get_java_string(jnienv, message));
-}
-
-JAVADB_METHOD(DbEnv_set_1cachesize,
-    (JAVADB_ARGS, jint gbytes, jint bytes, jint ncaches), DB_ENV,
-    set_cachesize, (c_this, gbytes, bytes, ncaches))
-
-JNIEXPORT void JNICALL Java_com_sleepycat_db_DbEnv_set_1encrypt
-  (JNIEnv *jnienv, /*DbEnv*/ jobject jthis, jstring jpasswd, jint flags)
-{
-	int err;
-	DB_ENV *dbenv;
-	LOCKED_STRING ls_passwd;
-
-	dbenv = get_DB_ENV(jnienv, jthis);
-	if (!verify_non_null(jnienv, dbenv))
-		return;
-	if (locked_string_get(&ls_passwd, jnienv, jpasswd) != 0)
-		goto out;
-
-	err = dbenv->set_encrypt(dbenv, ls_passwd.string, flags);
-	verify_return(jnienv, err, 0);
-
-out:	locked_string_put(&ls_passwd, jnienv);
-}
-
-JAVADB_METHOD(DbEnv_set_1flags,
-    (JAVADB_ARGS, jint flags, jboolean onoff), DB_ENV,
-    set_flags, (c_this, flags, onoff ? 1 : 0))
-
-JAVADB_METHOD(DbEnv_set_1mp_1mmapsize, (JAVADB_ARGS, jlong value), DB_ENV,
-    set_mp_mmapsize, (c_this, (size_t)value))
-
-JAVADB_METHOD(DbEnv_set_1tas_1spins, (JAVADB_ARGS, jint spins), DB_ENV,
-    set_tas_spins, (c_this, (u_int32_t)spins))
-
-JAVADB_METHOD(DbEnv_set_1timeout,
-    (JAVADB_ARGS, jlong timeout, jint flags), DB_ENV,
-    set_timeout, (c_this, (u_int32_t)timeout, flags))
-
-JNIEXPORT void JNICALL Java_com_sleepycat_db_DbEnv_set_1lk_1conflicts
-  (JNIEnv *jnienv, /*DbEnv*/ jobject jthis, jobjectArray array)
-{
-	DB_ENV *dbenv;
-	DB_ENV_JAVAINFO *dbenvinfo;
-	int err;
-	jsize i, len;
-	u_char *newarr;
-	int bytesize;
-
-	dbenv = get_DB_ENV(jnienv, jthis);
-	dbenvinfo = get_DB_ENV_JAVAINFO(jnienv, jthis);
-	if (!verify_non_null(jnienv, dbenv) ||
-	    !verify_non_null(jnienv, dbenvinfo))
-		return;
-
-	len = (*jnienv)->GetArrayLength(jnienv, array);
-	bytesize = sizeof(u_char) * len * len;
-
-	if ((err = __os_malloc(dbenv, bytesize, &newarr)) != 0) {
-		if (!verify_return(jnienv, err, 0))
-			return;
-	}
-
-	for (i=0; iGetObjectArrayElement(jnienv, array, i);
-		(*jnienv)->GetByteArrayRegion(jnienv, (jbyteArray)subArray,
-					      0, len,
-					      (jbyte *)&newarr[i*len]);
-	}
-	dbjie_set_conflict(dbenvinfo, newarr, bytesize);
-	err = dbenv->set_lk_conflicts(dbenv, newarr, len);
-	verify_return(jnienv, err, 0);
-}
-
-JNIEXPORT jint JNICALL
-  Java_com_sleepycat_db_DbEnv_rep_1elect
-  (JNIEnv *jnienv, /* DbEnv */ jobject jthis, jint nsites, jint pri,
-   jint timeout)
-{
-	DB_ENV *dbenv;
-	int err, id;
-
-	if (!verify_non_null(jnienv, jthis))
-		return (DB_EID_INVALID);
-
-	dbenv = get_DB_ENV(jnienv, jthis);
-
-	err = dbenv->rep_elect(dbenv, (int)nsites,
-	    (int)pri, (u_int32_t)timeout, &id);
-	verify_return(jnienv, err, 0);
-
-	return ((jint)id);
-}
-
-JNIEXPORT jint JNICALL
-  Java_com_sleepycat_db_DbEnv_rep_1process_1message
-  (JNIEnv *jnienv, /* DbEnv */ jobject jthis, /* Dbt */ jobject control,
-  /* Dbt */ jobject rec, /* RepProcessMessage */ jobject result)
-{
-	DB_ENV *dbenv;
-	LOCKED_DBT cdbt, rdbt;
-	int err, envid;
-
-	if (!verify_non_null(jnienv, jthis) || !verify_non_null(jnienv, result))
-		return (-1);
-
-	dbenv = get_DB_ENV(jnienv, jthis);
-	err = 0;
-
-	/* The DBTs are always inputs. */
-	if (locked_dbt_get(&cdbt, jnienv, dbenv, control, inOp) != 0)
-		goto out2;
-	if (locked_dbt_get(&rdbt, jnienv, dbenv, rec, inOp) != 0)
-		goto out1;
-
-	envid = (*jnienv)->GetIntField(jnienv,
-	    result, fid_RepProcessMessage_envid);
-
-	err = dbenv->rep_process_message(dbenv, &cdbt.javainfo->dbt,
-	    &rdbt.javainfo->dbt, &envid);
-
-	if (err == DB_REP_NEWMASTER)
-		(*jnienv)->SetIntField(jnienv,
-		    result, fid_RepProcessMessage_envid, envid);
-	else if (!DB_RETOK_REPPMSG(err))
-		verify_return(jnienv, err, 0);
-
-out1:	locked_dbt_put(&rdbt, jnienv, dbenv);
-out2:	locked_dbt_put(&cdbt, jnienv, dbenv);
-
-	return (err);
-}
-
-JNIEXPORT void JNICALL
-  Java_com_sleepycat_db_DbEnv_rep_1start
-  (JNIEnv *jnienv, /* DbEnv */ jobject jthis, /* Dbt */ jobject cookie,
-   jint flags)
-{
-	DB_ENV *dbenv;
-	DBT *dbtp;
-	LOCKED_DBT ldbt;
-	int err;
-
-	if (!verify_non_null(jnienv, jthis))
-		return;
-
-	dbenv = get_DB_ENV(jnienv, jthis);
-
-	/* The Dbt cookie may be null;  if so, pass in a NULL DBT. */
-	if (cookie != NULL) {
-		if (locked_dbt_get(&ldbt, jnienv, dbenv, cookie, inOp) != 0)
-			goto out;
-		dbtp = &ldbt.javainfo->dbt;
-	} else
-		dbtp = NULL;
-
-	err = dbenv->rep_start(dbenv, dbtp, flags);
-	verify_return(jnienv, err, 0);
-
-out:	if (cookie != NULL)
-		locked_dbt_put(&ldbt, jnienv, dbenv);
-}
-
-JNIEXPORT jobject JNICALL Java_com_sleepycat_db_DbEnv_rep_1stat
-  (JNIEnv *jnienv, /*DbEnv*/ jobject jthis, jint flags)
-{
-	int err;
-	DB_ENV *dbenv = get_DB_ENV(jnienv, jthis);
-	DB_REP_STAT *statp = NULL;
-	jobject retval = NULL;
-	jclass dbclass;
-
-	if (!verify_non_null(jnienv, dbenv))
-		return (NULL);
-
-	err = dbenv->rep_stat(dbenv, &statp, (u_int32_t)flags);
-	if (verify_return(jnienv, err, 0)) {
-		if ((dbclass = get_class(jnienv, name_DB_REP_STAT)) == NULL ||
-		    (retval =
-		      create_default_object(jnienv, name_DB_REP_STAT)) == NULL)
-			goto err;	/* An exception has been posted. */
-
-		__jv_fill_rep_stat(jnienv, dbclass, retval, statp);
-
-err:		__os_ufree(dbenv, statp);
-	}
-	return (retval);
-}
-
-JNIEXPORT void JNICALL
-Java_com_sleepycat_db_DbEnv_set_1rep_1limit
-  (JNIEnv *jnienv, /* DbEnv */ jobject jthis, jint gbytes, jint bytes)
-{
-	DB_ENV *dbenv;
-	int err;
-
-	dbenv = get_DB_ENV(jnienv, jthis);
-
-	if (verify_non_null(jnienv, dbenv)) {
-		err = dbenv->set_rep_limit(dbenv,
-		    (u_int32_t)gbytes, (u_int32_t)bytes);
-		verify_return(jnienv, err, 0);
-	}
-}
-
-JNIEXPORT void JNICALL
-  Java_com_sleepycat_db_DbEnv_rep_1transport_1changed
-  (JNIEnv *jnienv, /*DbEnv*/ jobject jthis, jint envid,
-   /* DbRepTransport */ jobject jreptransport)
-{
-	DB_ENV *dbenv;
-	DB_ENV_JAVAINFO *dbenvinfo;
-
-	dbenv = get_DB_ENV(jnienv, jthis);
-	dbenvinfo = get_DB_ENV_JAVAINFO(jnienv, jthis);
-	if (!verify_non_null(jnienv, dbenv) ||
-	    !verify_non_null(jnienv, dbenvinfo) ||
-	    !verify_non_null(jnienv, jreptransport))
-		return;
-
-	dbjie_set_rep_transport_object(dbenvinfo,
-	    jnienv, dbenv, envid, jreptransport);
-}
-
-JNIEXPORT void JNICALL
-  Java_com_sleepycat_db_DbEnv_set_1rpc_1server
-  (JNIEnv *jnienv, /*DbEnv*/ jobject jthis, /*DbClient*/ jobject jclient,
-   jstring jhost, jlong tsec, jlong ssec, jint flags)
-{
-	int err;
-	DB_ENV *dbenv = get_DB_ENV(jnienv, jthis);
-	const char *host = (*jnienv)->GetStringUTFChars(jnienv, jhost, NULL);
-
-	if (jclient != NULL) {
-		report_exception(jnienv, "DbEnv.set_rpc_server client arg "
-				 "must be null; reserved for future use",
-				 EINVAL, 0);
-		return;
-	}
-	if (verify_non_null(jnienv, dbenv)) {
-		err = dbenv->set_rpc_server(dbenv, NULL, host,
-					(long)tsec, (long)ssec, flags);
-
-		/* Throw an exception if the call failed. */
-		verify_return(jnienv, err, 0);
-	}
-}
-
-JAVADB_METHOD(DbEnv_set_1shm_1key, (JAVADB_ARGS, jlong shm_key), DB_ENV,
-    set_shm_key, (c_this, (long)shm_key))
-
-JNIEXPORT void JNICALL
-  Java_com_sleepycat_db_DbEnv__1set_1tx_1timestamp
-  (JNIEnv *jnienv, /*DbEnv*/ jobject jthis, jlong seconds)
-{
-	int err;
-	DB_ENV *dbenv = get_DB_ENV(jnienv, jthis);
-	time_t time = seconds;
-
-	if (verify_non_null(jnienv, dbenv)) {
-		err = dbenv->set_tx_timestamp(dbenv, &time);
-
-		/* Throw an exception if the call failed. */
-		verify_return(jnienv, err, 0);
-	}
-}
-
-JAVADB_METHOD(DbEnv_set_1verbose,
-    (JAVADB_ARGS, jint which, jboolean onoff), DB_ENV,
-    set_verbose, (c_this, which, onoff ? 1 : 0))
-
-/*static*/
-JNIEXPORT jint JNICALL Java_com_sleepycat_db_DbEnv_get_1version_1major
-  (JNIEnv * jnienv, jclass this_class)
-{
-	COMPQUIET(jnienv, NULL);
-	COMPQUIET(this_class, NULL);
-
-	return (DB_VERSION_MAJOR);
-}
-
-/*static*/
-JNIEXPORT jint JNICALL Java_com_sleepycat_db_DbEnv_get_1version_1minor
-  (JNIEnv * jnienv, jclass this_class)
-{
-	COMPQUIET(jnienv, NULL);
-	COMPQUIET(this_class, NULL);
-
-	return (DB_VERSION_MINOR);
-}
-
-/*static*/
-JNIEXPORT jint JNICALL Java_com_sleepycat_db_DbEnv_get_1version_1patch
-  (JNIEnv * jnienv, jclass this_class)
-{
-	COMPQUIET(jnienv, NULL);
-	COMPQUIET(this_class, NULL);
-
-	return (DB_VERSION_PATCH);
-}
-
-/*static*/
-JNIEXPORT jstring JNICALL Java_com_sleepycat_db_DbEnv_get_1version_1string
-  (JNIEnv *jnienv, jclass this_class)
-{
-	COMPQUIET(this_class, NULL);
-
-	return ((*jnienv)->NewStringUTF(jnienv, DB_VERSION_STRING));
-}
-
-JNIEXPORT jint JNICALL Java_com_sleepycat_db_DbEnv_lock_1id
-  (JNIEnv *jnienv, /*DbEnv*/ jobject jthis)
-{
-	int err;
-	u_int32_t id;
-	DB_ENV *dbenv = get_DB_ENV(jnienv, jthis);
-
-	if (!verify_non_null(jnienv, dbenv))
-		return (-1);
-	err = dbenv->lock_id(dbenv, &id);
-	verify_return(jnienv, err, 0);
-	return (id);
-}
-
-JAVADB_METHOD(DbEnv_lock_1id_1free, (JAVADB_ARGS, jint id), DB_ENV,
-    lock_id_free, (c_this, id))
-
-JNIEXPORT jobject JNICALL Java_com_sleepycat_db_DbEnv_lock_1stat
-  (JNIEnv *jnienv, /*DbEnv*/ jobject jthis, jint flags)
-{
-	int err;
-	DB_ENV *dbenv = get_DB_ENV(jnienv, jthis);
-	DB_LOCK_STAT *statp = NULL;
-	jobject retval = NULL;
-	jclass dbclass;
-
-	if (!verify_non_null(jnienv, dbenv))
-		return (NULL);
-
-	err = dbenv->lock_stat(dbenv, &statp, (u_int32_t)flags);
-	if (verify_return(jnienv, err, 0)) {
-		if ((dbclass = get_class(jnienv, name_DB_LOCK_STAT)) == NULL ||
-		    (retval =
-		      create_default_object(jnienv, name_DB_LOCK_STAT)) == NULL)
-			goto err;	/* An exception has been posted. */
-
-		__jv_fill_lock_stat(jnienv, dbclass, retval, statp);
-
-err:		__os_ufree(dbenv, statp);
-	}
-	return (retval);
-}
-
-JNIEXPORT jint JNICALL Java_com_sleepycat_db_DbEnv_lock_1detect
-  (JNIEnv *jnienv, /*DbEnv*/ jobject jthis, jint atype, jint flags)
-{
-	int err;
-	DB_ENV *dbenv = get_DB_ENV(jnienv, jthis);
-	int aborted;
-
-	if (!verify_non_null(jnienv, dbenv))
-		return (0);
-	err = dbenv->lock_detect(dbenv, atype, flags, &aborted);
-	verify_return(jnienv, err, 0);
-	return (aborted);
-}
-
-JNIEXPORT /*DbLock*/ jobject JNICALL Java_com_sleepycat_db_DbEnv_lock_1get
-  (JNIEnv *jnienv, /*DbEnv*/ jobject jthis, /*u_int32_t*/ jint locker,
-   jint flags, /*const Dbt*/ jobject obj, /*db_lockmode_t*/ jint lock_mode)
-{
-	int err;
-	DB_ENV *dbenv;
-	DB_LOCK *dblock;
-	LOCKED_DBT lobj;
-	/*DbLock*/ jobject retval;
-
-	dbenv = get_DB_ENV(jnienv, jthis);
-	if (!verify_non_null(jnienv, dbenv))
-		return (NULL);
-
-	if ((err = __os_malloc(dbenv, sizeof(DB_LOCK), &dblock)) != 0)
-		if (!verify_return(jnienv, err, 0))
-			return (NULL);
-
-	memset(dblock, 0, sizeof(DB_LOCK));
-	err = 0;
-	retval = NULL;
-	if (locked_dbt_get(&lobj, jnienv, dbenv, obj, inOp) != 0)
-		goto out;
-
-	err = dbenv->lock_get(dbenv, locker, flags, &lobj.javainfo->dbt,
-		       (db_lockmode_t)lock_mode, dblock);
-
-	if (err == DB_LOCK_NOTGRANTED)
-		report_notgranted_exception(jnienv,
-					    "DbEnv.lock_get not granted",
-					    DB_LOCK_GET, lock_mode, obj,
-					    NULL, -1);
-	else if (verify_return(jnienv, err, 0)) {
-		retval = create_default_object(jnienv, name_DB_LOCK);
-		set_private_dbobj(jnienv, name_DB_LOCK, retval, dblock);
-	}
-
- out:
-	locked_dbt_put(&lobj, jnienv, dbenv);
-	return (retval);
-}
-
-JNIEXPORT void JNICALL Java_com_sleepycat_db_DbEnv_lock_1vec
-  (JNIEnv *jnienv, /*DbEnv*/ jobject jthis, /*u_int32_t*/ jint locker,
-   jint flags, /*const Dbt*/ jobjectArray list, jint offset, jint count)
-{
-	DB_ENV *dbenv;
-	DB_LOCKREQ *lockreq;
-	DB_LOCKREQ *prereq;	/* preprocessed requests */
-	DB_LOCKREQ *failedreq;
-	DB_LOCK *lockp;
-	LOCKED_DBT *locked_dbts;
-	int err;
-	int alloc_err;
-	int i;
-	size_t bytesize;
-	size_t ldbtsize;
-	jobject jlockreq;
-	db_lockop_t op;
-	jobject jobj;
-	jobject jlock;
-	int completed;
-
-	dbenv = get_DB_ENV(jnienv, jthis);
-	if (!verify_non_null(jnienv, dbenv))
-		goto out0;
-
-	if ((*jnienv)->GetArrayLength(jnienv, list) < offset + count) {
-		report_exception(jnienv,
-				 "DbEnv.lock_vec array not large enough",
-				 0, 0);
-		goto out0;
-	}
-
-	bytesize = sizeof(DB_LOCKREQ) * count;
-	if ((err = __os_malloc(dbenv, bytesize, &lockreq)) != 0) {
-		verify_return(jnienv, err, 0);
-		goto out0;
-	}
-	memset(lockreq, 0, bytesize);
-
-	ldbtsize = sizeof(LOCKED_DBT) * count;
-	if ((err = __os_malloc(dbenv, ldbtsize, &locked_dbts)) != 0) {
-		verify_return(jnienv, err, 0);
-		goto out1;
-	}
-	memset(lockreq, 0, ldbtsize);
-	prereq = &lockreq[0];
-
-	/* fill in the lockreq array */
-	for (i = 0, prereq = &lockreq[0]; i < count; i++, prereq++) {
-		jlockreq = (*jnienv)->GetObjectArrayElement(jnienv, list,
-		    offset + i);
-		if (jlockreq == NULL) {
-			report_exception(jnienv,
-					 "DbEnv.lock_vec list entry is null",
-					 0, 0);
-			goto out2;
-		}
-		op = (*jnienv)->GetIntField(jnienv, jlockreq,
-		    fid_DbLockRequest_op);
-		prereq->op = op;
-
-		switch (op) {
-		case DB_LOCK_GET_TIMEOUT:
-			/* Needed: mode, timeout, obj.  Returned: lock. */
-			prereq->op = (*jnienv)->GetIntField(jnienv, jlockreq,
-			    fid_DbLockRequest_timeout);
-			/* FALLTHROUGH */
-		case DB_LOCK_GET:
-			/* Needed: mode, obj.  Returned: lock. */
-			prereq->mode = (*jnienv)->GetIntField(jnienv, jlockreq,
-			    fid_DbLockRequest_mode);
-			jobj = (*jnienv)->GetObjectField(jnienv, jlockreq,
-			    fid_DbLockRequest_obj);
-			if ((err = locked_dbt_get(&locked_dbts[i], jnienv,
-			    dbenv, jobj, inOp)) != 0)
-				goto out2;
-			prereq->obj = &locked_dbts[i].javainfo->dbt;
-			break;
-		case DB_LOCK_PUT:
-			/* Needed: lock.  Ignored: mode, obj. */
-			jlock = (*jnienv)->GetObjectField(jnienv, jlockreq,
-				fid_DbLockRequest_lock);
-			if (!verify_non_null(jnienv, jlock))
-				goto out2;
-			lockp = get_DB_LOCK(jnienv, jlock);
-			if (!verify_non_null(jnienv, lockp))
-				goto out2;
-
-			prereq->lock = *lockp;
-			break;
-		case DB_LOCK_PUT_ALL:
-		case DB_LOCK_TIMEOUT:
-			/* Needed: (none).  Ignored: lock, mode, obj. */
-			break;
-		case DB_LOCK_PUT_OBJ:
-			/* Needed: obj.  Ignored: lock, mode. */
-			jobj = (*jnienv)->GetObjectField(jnienv, jlockreq,
-				fid_DbLockRequest_obj);
-			if ((err = locked_dbt_get(&locked_dbts[i], jnienv,
-					   dbenv, jobj, inOp)) != 0)
-				goto out2;
-			prereq->obj = &locked_dbts[i].javainfo->dbt;
-			break;
-		default:
-			report_exception(jnienv,
-					 "DbEnv.lock_vec bad op value",
-					 0, 0);
-			goto out2;
-		}
-	}
-
-	err = dbenv->lock_vec(dbenv, locker, flags, lockreq, count, &failedreq);
-	if (err == 0)
-		completed = count;
-	else
-		completed = failedreq - lockreq;
-
-	/* do post processing for any and all requests that completed */
-	for (i = 0; i < completed; i++) {
-		op = lockreq[i].op;
-		if (op == DB_LOCK_PUT) {
-			/*
-			 * After a successful put, the DbLock can no longer
-			 * be used, so we release the storage related to it.
-			 */
-			jlockreq = (*jnienv)->GetObjectArrayElement(jnienv,
-			    list, i + offset);
-			jlock = (*jnienv)->GetObjectField(jnienv, jlockreq,
-				fid_DbLockRequest_lock);
-			lockp = get_DB_LOCK(jnienv, jlock);
-			__os_free(NULL, lockp);
-			set_private_dbobj(jnienv, name_DB_LOCK, jlock, 0);
-		}
-		else if (op == DB_LOCK_GET) {
-			/*
-			 * Store the lock that was obtained.
-			 * We need to create storage for it since
-			 * the lockreq array only exists during this
-			 * method call.
-			 */
-			alloc_err = __os_malloc(dbenv, sizeof(DB_LOCK), &lockp);
-			if (!verify_return(jnienv, alloc_err, 0))
-				goto out2;
-
-			*lockp = lockreq[i].lock;
-
-			jlockreq = (*jnienv)->GetObjectArrayElement(jnienv,
-			    list, i + offset);
-			jlock = create_default_object(jnienv, name_DB_LOCK);
-			set_private_dbobj(jnienv, name_DB_LOCK, jlock, lockp);
-			(*jnienv)->SetObjectField(jnienv, jlockreq,
-						  fid_DbLockRequest_lock,
-						  jlock);
-		}
-	}
-
-	/* If one of the locks was not granted, build the exception now. */
-	if (err == DB_LOCK_NOTGRANTED && i < count) {
-		jlockreq = (*jnienv)->GetObjectArrayElement(jnienv,
-							    list, i + offset);
-		jobj = (*jnienv)->GetObjectField(jnienv, jlockreq,
-						 fid_DbLockRequest_obj);
-		jlock = (*jnienv)->GetObjectField(jnienv, jlockreq,
-						  fid_DbLockRequest_lock);
-		report_notgranted_exception(jnienv,
-					    "DbEnv.lock_vec incomplete",
-					    lockreq[i].op,
-					    lockreq[i].mode,
-					    jobj,
-					    jlock,
-					    i);
-	}
-	else
-		verify_return(jnienv, err, 0);
-
- out2:
-	/* Free the dbts that we have locked */
-	for (i = 0 ; i < (prereq - lockreq); i++) {
-		if ((op = lockreq[i].op) == DB_LOCK_GET ||
-		    op == DB_LOCK_PUT_OBJ)
-			locked_dbt_put(&locked_dbts[i], jnienv, dbenv);
-	}
-	__os_free(dbenv, locked_dbts);
-
- out1:
-	__os_free(dbenv, lockreq);
-
- out0:
-	return;
-}
-
-JNIEXPORT void JNICALL Java_com_sleepycat_db_DbEnv_lock_1put
-  (JNIEnv *jnienv, jobject jthis, /*DbLock*/ jobject jlock)
-{
-	int err;
-	DB_ENV *dbenv;
-	DB_LOCK *dblock;
-
-	dbenv = get_DB_ENV(jnienv, jthis);
-	if (!verify_non_null(jnienv, dbenv))
-		return;
-
-	dblock = get_DB_LOCK(jnienv, jlock);
-	if (!verify_non_null(jnienv, dblock))
-		return;
-
-	err = dbenv->lock_put(dbenv, dblock);
-	if (verify_return(jnienv, err, 0)) {
-		/*
-		 * After a successful put, the DbLock can no longer
-		 * be used, so we release the storage related to it
-		 * (allocated in DbEnv.lock_get()).
-		 */
-		__os_free(NULL, dblock);
-
-		set_private_dbobj(jnienv, name_DB_LOCK, jlock, 0);
-	}
-}
-
-JNIEXPORT jobjectArray JNICALL Java_com_sleepycat_db_DbEnv_log_1archive
-  (JNIEnv *jnienv, /*DbEnv*/ jobject jthis, jint flags)
-{
-	int err, len, i;
-	char** ret;
-	jclass stringClass;
-	jobjectArray strarray;
-	DB_ENV *dbenv;
-
-	dbenv = get_DB_ENV(jnienv, jthis);
-	strarray = NULL;
-	if (!verify_non_null(jnienv, dbenv))
-		return (0);
-	err = dbenv->log_archive(dbenv, &ret, flags);
-	if (!verify_return(jnienv, err, 0))
-		return (0);
-
-	if (ret != NULL) {
-		len = 0;
-		while (ret[len] != NULL)
-			len++;
-		stringClass = (*jnienv)->FindClass(jnienv, "java/lang/String");
-		if ((strarray = (*jnienv)->NewObjectArray(jnienv,
-		    len, stringClass, 0)) == NULL)
-			goto out;
-		for (i=0; iNewStringUTF(jnienv, ret[i]);
-			(*jnienv)->SetObjectArrayElement(jnienv, strarray,
-			     i, str);
-		}
-	}
-out:	return (strarray);
-}
-
-JNIEXPORT jint JNICALL Java_com_sleepycat_db_DbEnv_log_1compare
-  (JNIEnv *jnienv, jclass jthis_class,
-   /*DbLsn*/ jobject lsn0, /*DbLsn*/ jobject lsn1)
-{
-	DB_LSN *dblsn0;
-	DB_LSN *dblsn1;
-
-	COMPQUIET(jthis_class, NULL);
-	dblsn0 = get_DB_LSN(jnienv, lsn0);
-	dblsn1 = get_DB_LSN(jnienv, lsn1);
-
-	return (log_compare(dblsn0, dblsn1));
-}
-
-JNIEXPORT jobject JNICALL Java_com_sleepycat_db_DbEnv_log_1cursor
-  (JNIEnv *jnienv, /*DbEnv*/ jobject jthis, jint flags)
-{
-	int err;
-	DB_LOGC *dblogc;
-	DB_ENV *dbenv = get_DB_ENV(jnienv, jthis);
-
-	if (!verify_non_null(jnienv, dbenv))
-		return (NULL);
-	err = dbenv->log_cursor(dbenv, &dblogc, flags);
-	verify_return(jnienv, err, 0);
-	return (get_DbLogc(jnienv, dblogc));
-}
-
-JNIEXPORT jstring JNICALL Java_com_sleepycat_db_DbEnv_log_1file
-  (JNIEnv *jnienv, /*DbEnv*/ jobject jthis, /*DbLsn*/ jobject lsn)
-{
-	int err;
-	DB_ENV *dbenv = get_DB_ENV(jnienv, jthis);
-	DB_LSN *dblsn = get_DB_LSN(jnienv, lsn);
-	char filename[FILENAME_MAX+1] = "";
-
-	if (!verify_non_null(jnienv, dbenv))
-		return (NULL);
-
-	err = dbenv->log_file(dbenv, dblsn, filename, FILENAME_MAX);
-	verify_return(jnienv, err, 0);
-	filename[FILENAME_MAX] = '\0'; /* just to be sure */
-	return (get_java_string(jnienv, filename));
-}
-
-JAVADB_METHOD(DbEnv_log_1flush,
-    (JAVADB_ARGS, /*DbLsn*/ jobject lsn), DB_ENV,
-    log_flush, (c_this, get_DB_LSN(jnienv, lsn)))
-
-JNIEXPORT void JNICALL Java_com_sleepycat_db_DbEnv_log_1put
-  (JNIEnv *jnienv, /*DbEnv*/ jobject jthis, /*DbLsn*/ jobject lsn,
-   /*DbDbt*/ jobject data, jint flags)
-{
-	int err;
-	DB_ENV *dbenv;
-	DB_LSN *dblsn;
-	LOCKED_DBT ldata;
-
-	dbenv = get_DB_ENV(jnienv, jthis);
-	dblsn = get_DB_LSN(jnienv, lsn);
-	if (!verify_non_null(jnienv, dbenv))
-		return;
-
-	/* log_put's DB_LSN argument may not be NULL. */
-	if (!verify_non_null(jnienv, dblsn))
-		return;
-
-	if (locked_dbt_get(&ldata, jnienv, dbenv, data, inOp) != 0)
-		goto out;
-
-	err = dbenv->log_put(dbenv, dblsn, &ldata.javainfo->dbt, flags);
-	verify_return(jnienv, err, 0);
- out:
-	locked_dbt_put(&ldata, jnienv, dbenv);
-}
-
-JNIEXPORT jobject JNICALL Java_com_sleepycat_db_DbEnv_log_1stat
-  (JNIEnv *jnienv, /*DbEnv*/ jobject jthis, jint flags)
-{
-	int err;
-	DB_ENV *dbenv;
-	DB_LOG_STAT *statp;
-	jobject retval;
-	jclass dbclass;
-
-	retval = NULL;
-	statp = NULL;
-	dbenv = get_DB_ENV(jnienv, jthis);
-	if (!verify_non_null(jnienv, dbenv))
-		return (NULL);
-
-	err = dbenv->log_stat(dbenv, &statp, (u_int32_t)flags);
-	if (verify_return(jnienv, err, 0)) {
-		if ((dbclass = get_class(jnienv, name_DB_LOG_STAT)) == NULL ||
-		    (retval =
-		       create_default_object(jnienv, name_DB_LOG_STAT)) == NULL)
-			goto err;	/* An exception has been posted. */
-
-		__jv_fill_log_stat(jnienv, dbclass, retval, statp);
-
-err:		__os_ufree(dbenv, statp);
-	}
-	return (retval);
-}
-
-JNIEXPORT jobject JNICALL Java_com_sleepycat_db_DbEnv_memp_1stat
-  (JNIEnv *jnienv, /*DbEnv*/ jobject jthis, jint flags)
-{
-	int err;
-	jclass dbclass;
-	DB_ENV *dbenv;
-	DB_MPOOL_STAT *statp;
-	jobject retval;
-
-	retval = NULL;
-	statp = NULL;
-	dbenv = get_DB_ENV(jnienv, jthis);
-	if (!verify_non_null(jnienv, dbenv))
-		return (NULL);
-
-	err = dbenv->memp_stat(dbenv, &statp, 0, (u_int32_t)flags);
-	if (verify_return(jnienv, err, 0)) {
-		if ((dbclass = get_class(jnienv, name_DB_MPOOL_STAT)) == NULL ||
-		    (retval =
-		     create_default_object(jnienv, name_DB_MPOOL_STAT)) == NULL)
-			goto err;	/* An exception has been posted. */
-
-		__jv_fill_mpool_stat(jnienv, dbclass, retval, statp);
-
-err:		__os_ufree(dbenv, statp);
-	}
-	return (retval);
-}
-
-JNIEXPORT jobjectArray JNICALL Java_com_sleepycat_db_DbEnv_memp_1fstat
-  (JNIEnv *jnienv, /*DbEnv*/ jobject jthis, jint flags)
-{
-	int err, i, len;
-	jclass fstat_class;
-	DB_ENV *dbenv;
-	DB_MPOOL_FSTAT **fstatp;
-	jobjectArray retval;
-	jfieldID filename_id;
-	jstring jfilename;
-
-	fstatp = NULL;
-	retval = NULL;
-	dbenv = get_DB_ENV(jnienv, jthis);
-	if (!verify_non_null(jnienv, dbenv))
-		return (NULL);
-
-	err = dbenv->memp_stat(dbenv, 0, &fstatp, (u_int32_t)flags);
-	if (verify_return(jnienv, err, 0)) {
-		len = 0;
-		while (fstatp[len] != NULL)
-			len++;
-		if ((fstat_class =
-			get_class(jnienv, name_DB_MPOOL_FSTAT)) == NULL ||
-		    (retval = (*jnienv)->NewObjectArray(jnienv, len,
-			fstat_class, 0)) == NULL)
-			goto err;
-		for (i=0; iSetObjectArrayElement(jnienv, retval,
-			    i, obj);
-
-			/* Set the string field. */
-			filename_id = (*jnienv)->GetFieldID(jnienv,
-			    fstat_class, "file_name", string_signature);
-			jfilename = get_java_string(jnienv,
-			    fstatp[i]->file_name);
-			(*jnienv)->SetObjectField(jnienv, obj,
-			    filename_id, jfilename);
-			set_int_field(jnienv, fstat_class, obj,
-			    "st_pagesize", fstatp[i]->st_pagesize);
-			set_int_field(jnienv, fstat_class, obj,
-			    "st_cache_hit", fstatp[i]->st_cache_hit);
-			set_int_field(jnienv, fstat_class, obj,
-			    "st_cache_miss", fstatp[i]->st_cache_miss);
-			set_int_field(jnienv, fstat_class, obj,
-			    "st_map", fstatp[i]->st_map);
-			set_int_field(jnienv, fstat_class, obj,
-			    "st_page_create", fstatp[i]->st_page_create);
-			set_int_field(jnienv, fstat_class, obj,
-			    "st_page_in", fstatp[i]->st_page_in);
-			set_int_field(jnienv, fstat_class, obj,
-			    "st_page_out", fstatp[i]->st_page_out);
-			__os_ufree(dbenv, fstatp[i]);
-		}
-err:		__os_ufree(dbenv, fstatp);
-	}
-	return (retval);
-}
-
-JNIEXPORT jint JNICALL Java_com_sleepycat_db_DbEnv_memp_1trickle
-  (JNIEnv *jnienv, /*DbEnv*/ jobject jthis, jint pct)
-{
-	int err;
-	DB_ENV *dbenv = get_DB_ENV(jnienv, jthis);
-	int result = 0;
-
-	if (verify_non_null(jnienv, dbenv)) {
-		err = dbenv->memp_trickle(dbenv, pct, &result);
-		verify_return(jnienv, err, 0);
-	}
-	return (result);
-}
-
-JNIEXPORT jobject JNICALL Java_com_sleepycat_db_DbEnv_txn_1begin
-  (JNIEnv *jnienv, /*DbEnv*/ jobject jthis, /*DbTxn*/ jobject pid, jint flags)
-{
-	int err;
-	DB_TXN *dbpid, *result;
-	DB_ENV *dbenv;
-
-	dbenv = get_DB_ENV(jnienv, jthis);
-	if (!verify_non_null(jnienv, dbenv))
-		return (0);
-
-	dbpid = get_DB_TXN(jnienv, pid);
-	result = 0;
-
-	err = dbenv->txn_begin(dbenv, dbpid, &result, flags);
-	if (!verify_return(jnienv, err, 0))
-		return (0);
-	return (get_DbTxn(jnienv, result));
-}
-
-JAVADB_METHOD(DbEnv_txn_1checkpoint,
-    (JAVADB_ARGS, jint kbyte, jint min, jint flags), DB_ENV,
-    txn_checkpoint, (c_this, kbyte, min, flags))
-
-JNIEXPORT void JNICALL Java_com_sleepycat_db_DbEnv_app_1dispatch_1changed
-  (JNIEnv *jnienv, /*DbEnv*/ jobject jthis, /*DbFeedback*/ jobject jappdispatch)
-{
-	DB_ENV *dbenv;
-	DB_ENV_JAVAINFO *dbenvinfo;
-
-	dbenv = get_DB_ENV(jnienv, jthis);
-	dbenvinfo = get_DB_ENV_JAVAINFO(jnienv, jthis);
-	if (!verify_non_null(jnienv, dbenv) ||
-	    !verify_non_null(jnienv, dbenvinfo))
-		return;
-
-	dbjie_set_app_dispatch_object(dbenvinfo, jnienv, dbenv, jappdispatch);
-}
-
-JNIEXPORT jobjectArray JNICALL Java_com_sleepycat_db_DbEnv_txn_1recover
-  (JNIEnv *jnienv, /*DbEnv*/ jobject jthis, jint count, jint flags)
-{
-	int err;
-	DB_ENV *dbenv;
-	DB_PREPLIST *preps;
-	long retcount;
-	int i;
-	char signature[128];
-	size_t bytesize;
-	jobject retval;
-	jobject obj;
-	jobject txnobj;
-	jbyteArray bytearr;
-	jclass preplist_class;
-	jfieldID txn_fieldid;
-	jfieldID gid_fieldid;
-
-	retval = NULL;
-	dbenv = get_DB_ENV(jnienv, jthis);
-	if (!verify_non_null(jnienv, dbenv))
-		return (NULL);
-
-	/*
-	 * We need to allocate some local storage for the
-	 * returned preplist, and that requires us to do
-	 * our own argument validation.
-	 */
-	if (count <= 0) {
-		verify_return(jnienv, EINVAL, 0);
-		goto out;
-	}
-
-	bytesize = sizeof(DB_PREPLIST) * count;
-	if ((err = __os_malloc(dbenv, bytesize, &preps)) != 0) {
-		verify_return(jnienv, err, 0);
-		goto out;
-	}
-
-	err = dbenv->txn_recover(dbenv, preps, count, &retcount, flags);
-
-	if (verify_return(jnienv, err, 0)) {
-		if ((preplist_class =
-		    get_class(jnienv, name_DB_PREPLIST)) == NULL ||
-		    (retval = (*jnienv)->NewObjectArray(jnienv, retcount,
-		    preplist_class, 0)) == NULL)
-			goto err;
-
-		(void)snprintf(signature, sizeof(signature),
-		    "L%s%s;", DB_PACKAGE_NAME, name_DB_TXN);
-		txn_fieldid = (*jnienv)->GetFieldID(jnienv, preplist_class,
-						    "txn", signature);
-		gid_fieldid = (*jnienv)->GetFieldID(jnienv, preplist_class,
-						    "gid", "[B");
-
-		for (i=0; iSetObjectArrayElement(jnienv,
-			    retval, i, obj);
-
-			/* Set the txn field. */
-			txnobj = get_DbTxn(jnienv, preps[i].txn);
-			(*jnienv)->SetObjectField(jnienv,
-			    obj, txn_fieldid, txnobj);
-
-			/* Build the gid array and set the field. */
-			if ((bytearr = (*jnienv)->NewByteArray(jnienv,
-			    sizeof(preps[i].gid))) == NULL)
-				goto err;
-			(*jnienv)->SetByteArrayRegion(jnienv, bytearr, 0,
-			    sizeof(preps[i].gid), (jbyte *)&preps[i].gid[0]);
-			(*jnienv)->SetObjectField(jnienv, obj,
-			    gid_fieldid, bytearr);
-		}
-	}
-err:	__os_free(dbenv, preps);
-out:	return (retval);
-}
-
-JNIEXPORT jobject JNICALL Java_com_sleepycat_db_DbEnv_txn_1stat
-  (JNIEnv *jnienv, /*DbEnv*/ jobject jthis, jint flags)
-{
-	int err;
-	DB_ENV *dbenv;
-	DB_TXN_STAT *statp;
-	jobject retval, obj;
-	jclass dbclass, active_class;
-	char active_signature[512];
-	jfieldID arrid;
-	jobjectArray actives;
-	unsigned int i;
-
-	retval = NULL;
-	statp = NULL;
-	dbenv = get_DB_ENV(jnienv, jthis);
-	if (!verify_non_null(jnienv, dbenv))
-		return (NULL);
-
-	err = dbenv->txn_stat(dbenv, &statp, (u_int32_t)flags);
-	if (verify_return(jnienv, err, 0)) {
-		if ((dbclass = get_class(jnienv, name_DB_TXN_STAT)) == NULL ||
-		    (retval =
-		       create_default_object(jnienv, name_DB_TXN_STAT)) == NULL)
-			goto err;
-
-		/* Set the individual fields */
-		__jv_fill_txn_stat(jnienv, dbclass, retval, statp);
-
-		if ((active_class =
-		    get_class(jnienv, name_DB_TXN_STAT_ACTIVE)) == NULL ||
-		    (actives = (*jnienv)->NewObjectArray(jnienv,
-		    statp->st_nactive, active_class, 0)) == NULL)
-			goto err;
-
-		/*
-		 * Set the st_txnarray field.  This is a little more involved
-		 * than other fields, since the type is an array, so none
-		 * of our utility functions help.
-		 */
-		(void)snprintf(active_signature, sizeof(active_signature),
-		    "[L%s%s;", DB_PACKAGE_NAME, name_DB_TXN_STAT_ACTIVE);
-
-		arrid = (*jnienv)->GetFieldID(jnienv, dbclass, "st_txnarray",
-					      active_signature);
-		(*jnienv)->SetObjectField(jnienv, retval, arrid, actives);
-
-		/* Now fill the in the elements of st_txnarray. */
-		for (i=0; ist_nactive; i++) {
-			obj = create_default_object(jnienv,
-						name_DB_TXN_STAT_ACTIVE);
-			(*jnienv)->SetObjectArrayElement(jnienv,
-						actives, i, obj);
-
-			set_int_field(jnienv, active_class, obj,
-				      "txnid", statp->st_txnarray[i].txnid);
-			set_int_field(jnienv, active_class, obj, "parentid",
-				      statp->st_txnarray[i].parentid);
-			set_lsn_field(jnienv, active_class, obj,
-				      "lsn", statp->st_txnarray[i].lsn);
-		}
-
-err:		__os_ufree(dbenv, statp);
-	}
-	return (retval);
-}
-
-/* See discussion on errpfx, errcall in DB_ENV_JAVAINFO */
-JNIEXPORT void JNICALL Java_com_sleepycat_db_DbEnv__1set_1errcall
-  (JNIEnv *jnienv, /*DbEnv*/ jobject jthis, jobject errcall)
-{
-	DB_ENV *dbenv;
-	DB_ENV_JAVAINFO *dbenvinfo;
-
-	dbenv = get_DB_ENV(jnienv, jthis);
-	dbenvinfo = get_DB_ENV_JAVAINFO(jnienv, jthis);
-
-	if (verify_non_null(jnienv, dbenv) &&
-	    verify_non_null(jnienv, dbenvinfo)) {
-		dbjie_set_errcall(dbenvinfo, jnienv, errcall);
-	}
-}
-
-JNIEXPORT void JNICALL Java_com_sleepycat_db_DbEnv__1set_1errpfx
-  (JNIEnv *jnienv, /*DbEnv*/ jobject jthis, jstring str)
-{
-	DB_ENV *dbenv;
-	DB_ENV_JAVAINFO *dbenvinfo;
-
-	dbenv = get_DB_ENV(jnienv, jthis);
-	dbenvinfo = get_DB_ENV_JAVAINFO(jnienv, jthis);
-
-	if (verify_non_null(jnienv, dbenv) &&
-	    verify_non_null(jnienv, dbenvinfo)) {
-		dbjie_set_errpfx(dbenvinfo, jnienv, str);
-	}
-}
-
-JNIEXPORT void JNICALL Java_com_sleepycat_db_DbEnv__1finalize
-    (JNIEnv *jnienv, /*DbEnv*/ jobject jthis,
-     jobject /*DbErrcall*/ errcall, jstring errpfx)
-{
-	DB_ENV *dbenv;
-	DB_ENV_JAVAINFO *envinfo;
-
-	dbenv = get_DB_ENV(jnienv, jthis);
-	envinfo = get_DB_ENV_JAVAINFO(jnienv, jthis);
-	DB_ASSERT(envinfo != NULL);
-
-	/* Note:  We detect and report unclosed DbEnvs. */
-	if (dbenv != NULL && envinfo != NULL && !dbjie_is_dbopen(envinfo)) {
-
-		/* If this error occurs, this object was never closed. */
-		report_errcall(jnienv, errcall, errpfx,
-			       "DbEnv.finalize: open DbEnv object destroyed");
-	}
-
-	/* Shouldn't see this object again, but just in case */
-	set_private_dbobj(jnienv, name_DB_ENV, jthis, 0);
-	set_private_info(jnienv, name_DB_ENV, jthis, 0);
-
-	dbjie_destroy(envinfo, jnienv);
-}
diff --git a/storage/bdb/libdb_java/java_DbLock.c b/storage/bdb/libdb_java/java_DbLock.c
deleted file mode 100644
index 00a9836bfa0..00000000000
--- a/storage/bdb/libdb_java/java_DbLock.c
+++ /dev/null
@@ -1,30 +0,0 @@
-/*-
- * See the file LICENSE for redistribution information.
- *
- * Copyright (c) 1997-2002
- *	Sleepycat Software.  All rights reserved.
- */
-#include "db_config.h"
-
-#ifndef lint
-static const char revid[] = "$Id: java_DbLock.c,v 11.12 2002/02/28 21:27:38 ubell Exp $";
-#endif /* not lint */
-
-#include 
-#include 
-#include 
-
-#include "db_int.h"
-#include "java_util.h"
-#include "com_sleepycat_db_DbLock.h"
-
-JNIEXPORT void JNICALL Java_com_sleepycat_db_DbLock_finalize
-  (JNIEnv *jnienv, jobject jthis)
-{
-	DB_LOCK *dblock = get_DB_LOCK(jnienv, jthis);
-	if (dblock) {
-		/* Free any data related to DB_LOCK here */
-		__os_free(NULL, dblock);
-	}
-	set_private_dbobj(jnienv, name_DB_LOCK, jthis, 0); /* paranoia */
-}
diff --git a/storage/bdb/libdb_java/java_DbLogc.c b/storage/bdb/libdb_java/java_DbLogc.c
deleted file mode 100644
index 69294d9baac..00000000000
--- a/storage/bdb/libdb_java/java_DbLogc.c
+++ /dev/null
@@ -1,110 +0,0 @@
-/*-
- * See the file LICENSE for redistribution information.
- *
- * Copyright (c) 1997-2002
- *	Sleepycat Software.  All rights reserved.
- */
-#include "db_config.h"
-
-#ifndef lint
-static const char revid[] = "$Id: java_DbLogc.c,v 11.6 2002/07/02 12:03:03 mjc Exp $";
-#endif /* not lint */
-
-#include 
-#include 
-#include 
-#include 
-#ifdef DIAGNOSTIC
-#include 
-#endif
-
-#include "db_int.h"
-#include "java_util.h"
-#include "com_sleepycat_db_DbLogc.h"
-
-JNIEXPORT void JNICALL Java_com_sleepycat_db_DbLogc_close
-  (JNIEnv *jnienv, jobject jthis, jint flags)
-{
-	int err;
-	DB_LOGC *dblogc = get_DB_LOGC(jnienv, jthis);
-
-	if (!verify_non_null(jnienv, dblogc))
-		return;
-	err = dblogc->close(dblogc, flags);
-	if (verify_return(jnienv, err, 0)) {
-		set_private_dbobj(jnienv, name_DB_LOGC, jthis, 0);
-	}
-}
-
-JNIEXPORT jint JNICALL Java_com_sleepycat_db_DbLogc_get
-  (JNIEnv *jnienv, jobject jthis,
-   /*DbLsn*/ jobject lsn, /*Dbt*/ jobject data, jint flags)
-{
-	int err, retry;
-	DB_LOGC *dblogc;
-	DB_LSN *dblsn;
-	LOCKED_DBT ldata;
-	OpKind dataop;
-
-	/*
-	 * Depending on flags, the user may be supplying the key,
-	 * or else we may have to retrieve it.
-	 */
-	err = 0;
-	dataop = outOp;
-
-	dblogc = get_DB_LOGC(jnienv, jthis);
-	dblsn = get_DB_LSN(jnienv, lsn);
-	if (locked_dbt_get(&ldata, jnienv, dblogc->dbenv, data, dataop) != 0)
-		goto out1;
-
-	if (!verify_non_null(jnienv, dblogc))
-		goto out1;
-
-	for (retry = 0; retry < 3; retry++) {
-		err = dblogc->get(dblogc, dblsn, &ldata.javainfo->dbt, flags);
-
-		/*
-		 * If we failed due to lack of memory in our DBT arrays,
-		 * retry.
-		 */
-		if (err != ENOMEM)
-			break;
-		if (!locked_dbt_realloc(&ldata, jnienv, dblogc->dbenv))
-			break;
-	}
- out1:
-	locked_dbt_put(&ldata, jnienv, dblogc->dbenv);
-	if (!DB_RETOK_LGGET(err)) {
-		if (verify_dbt(jnienv, err, &ldata))
-			verify_return(jnienv, err, 0);
-	}
-	return (err);
-}
-
-JNIEXPORT void JNICALL Java_com_sleepycat_db_DbLogc_finalize
-  (JNIEnv *jnienv, jobject jthis)
-{
-	/*
-	 * Free any data related to DB_LOGC here.
-	 * If we ever have java-only data embedded in the DB_LOGC
-	 * and need to do this, we'll have to track DbLogc's
-	 * according to which DbEnv owns them, just as
-	 * we track Db's according to which DbEnv owns them.
-	 * That's necessary to avoid double freeing that
-	 * comes about when closes interact with GC.
-	 */
-
-#ifdef DIAGNOSTIC
-	DB_LOGC *dblogc;
-
-	dblogc = get_DB_LOGC(jnienv, jthis);
-	if (dblogc != NULL)
-		fprintf(stderr, "Java API: DbLogc has not been closed\n");
-#else
-
-	COMPQUIET(jnienv, NULL);
-	COMPQUIET(jthis, NULL);
-
-#endif
-}
diff --git a/storage/bdb/libdb_java/java_DbLsn.c b/storage/bdb/libdb_java/java_DbLsn.c
deleted file mode 100644
index d53082826f4..00000000000
--- a/storage/bdb/libdb_java/java_DbLsn.c
+++ /dev/null
@@ -1,43 +0,0 @@
-/*-
- * See the file LICENSE for redistribution information.
- *
- * Copyright (c) 1997-2002
- *	Sleepycat Software.  All rights reserved.
- */
-#include "db_config.h"
-
-#ifndef lint
-static const char revid[] = "$Id: java_DbLsn.c,v 11.12 2002/05/07 16:12:41 dda Exp $";
-#endif /* not lint */
-
-#include 
-#include 
-#include 
-#include               /* needed for FILENAME_MAX */
-
-#include "db_int.h"
-#include "java_util.h"
-#include "com_sleepycat_db_DbLsn.h"
-
-JNIEXPORT void JNICALL Java_com_sleepycat_db_DbLsn_init_1lsn
-  (JNIEnv *jnienv, /*DbLsn*/ jobject jthis)
-{
-	/*
-	 * Note: the DB_LSN object stored in the private_dbobj_
-	 * is allocated in get_DbLsn() or get_DB_LSN().
-	 */
-
-	COMPQUIET(jnienv, NULL);
-	COMPQUIET(jthis, NULL);
-}
-
-JNIEXPORT void JNICALL Java_com_sleepycat_db_DbLsn_finalize
-  (JNIEnv *jnienv, jobject jthis)
-{
-	DB_LSN *dblsn;
-
-	dblsn = get_DB_LSN(jnienv, jthis);
-	if (dblsn) {
-		(void)__os_free(NULL, dblsn);
-	}
-}
diff --git a/storage/bdb/libdb_java/java_DbTxn.c b/storage/bdb/libdb_java/java_DbTxn.c
deleted file mode 100644
index 51195501b77..00000000000
--- a/storage/bdb/libdb_java/java_DbTxn.c
+++ /dev/null
@@ -1,67 +0,0 @@
-/*-
- * See the file LICENSE for redistribution information.
- *
- * Copyright (c) 1997-2002
- *	Sleepycat Software.  All rights reserved.
- */
-#include "db_config.h"
-
-#ifndef lint
-static const char revid[] = "$Id: java_DbTxn.c,v 11.16 2002/08/06 05:19:05 bostic Exp $";
-#endif /* not lint */
-
-#include 
-#include 
-#include 
-#include 
-
-#include "db_int.h"
-#include "java_util.h"
-#include "com_sleepycat_db_DbTxn.h"
-
-JAVADB_METHOD(DbTxn_abort, (JAVADB_ARGS), DB_TXN,
-    abort, (c_this))
-JAVADB_METHOD(DbTxn_commit, (JAVADB_ARGS, jint flags), DB_TXN,
-    commit, (c_this, flags))
-JAVADB_METHOD(DbTxn_discard, (JAVADB_ARGS, jint flags), DB_TXN,
-    discard, (c_this, flags))
-
-JNIEXPORT jint JNICALL Java_com_sleepycat_db_DbTxn_id
-  (JNIEnv *jnienv, jobject jthis)
-{
-	int retval = 0;
-	DB_TXN *dbtxn = get_DB_TXN(jnienv, jthis);
-	if (!verify_non_null(jnienv, dbtxn))
-		return (-1);
-
-	/* No error to check for from DB_TXN->id */
-	retval = dbtxn->id(dbtxn);
-	return (retval);
-}
-
-JNIEXPORT void JNICALL Java_com_sleepycat_db_DbTxn_prepare
-  (JNIEnv *jnienv, jobject jthis, jbyteArray gid)
-{
-	int err;
-	DB_TXN *dbtxn;
-	jbyte *c_array;
-
-	dbtxn = get_DB_TXN(jnienv, jthis);
-	if (!verify_non_null(jnienv, dbtxn))
-		return;
-
-	if (gid == NULL ||
-	    (*jnienv)->GetArrayLength(jnienv, gid) < DB_XIDDATASIZE) {
-		report_exception(jnienv, "DbTxn.prepare gid array "
-				 "must be >= 128 bytes", EINVAL, 0);
-		return;
-	}
-	c_array = (*jnienv)->GetByteArrayElements(jnienv, gid, NULL);
-	err = dbtxn->prepare(dbtxn, (u_int8_t *)c_array);
-	(*jnienv)->ReleaseByteArrayElements(jnienv, gid, c_array, 0);
-	verify_return(jnienv, err, 0);
-}
-
-JAVADB_METHOD(DbTxn_set_1timeout,
-    (JAVADB_ARGS, jlong timeout, jint flags), DB_TXN,
-    set_timeout, (c_this, (u_int32_t)timeout, flags))
diff --git a/storage/bdb/libdb_java/java_DbUtil.c b/storage/bdb/libdb_java/java_DbUtil.c
deleted file mode 100644
index edcbc6d9f15..00000000000
--- a/storage/bdb/libdb_java/java_DbUtil.c
+++ /dev/null
@@ -1,27 +0,0 @@
-/*-
- * See the file LICENSE for redistribution information.
- *
- * Copyright (c) 2001-2002
- *	Sleepycat Software.  All rights reserved.
- */
-#include "db_config.h"
-
-#ifndef lint
-static const char revid[] = "$Id: java_DbUtil.c,v 1.5 2002/01/11 15:52:44 bostic Exp $";
-#endif /* not lint */
-
-#include 
-
-#include "db_int.h"
-#include "java_util.h"
-#include "com_sleepycat_db_DbUtil.h"
-
-JNIEXPORT jboolean JNICALL
-Java_com_sleepycat_db_DbUtil_is_1big_1endian (JNIEnv *jnienv,
-    jclass jthis_class)
-{
-	COMPQUIET(jnienv, NULL);
-	COMPQUIET(jthis_class, NULL);
-
-	return (__db_isbigendian() ? JNI_TRUE : JNI_FALSE);
-}
diff --git a/storage/bdb/libdb_java/java_DbXAResource.c b/storage/bdb/libdb_java/java_DbXAResource.c
deleted file mode 100644
index 609529bfe83..00000000000
--- a/storage/bdb/libdb_java/java_DbXAResource.c
+++ /dev/null
@@ -1,288 +0,0 @@
-/*-
- * See the file LICENSE for redistribution information.
- *
- * Copyright (c) 1997-2001
- *	Sleepycat Software.  All rights reserved.
- */
-#include "db_config.h"
-
-#ifndef lint
-static const char revid[] = "$Id: java_DbXAResource.c,v 11.6 2002/08/06 05:19:06 bostic Exp $";
-#endif /* not lint */
-
-#include 
-#include 
-#include 
-#include 
-#ifdef DIAGNOSTIC
-#include 
-#endif
-
-#include "db_int.h"
-#include "java_util.h"
-#include "dbinc/xa.h"
-#include "dbinc_auto/xa_ext.h"
-#include "com_sleepycat_db_xa_DbXAResource.h"
-
-JNIEXPORT void JNICALL Java_com_sleepycat_db_xa_DbXAResource__1init
-  (JNIEnv *jnienv, jobject jthis, jstring home, jint rmid, jint flags)
-{
-	int err;
-	LOCKED_STRING ls_home;
-	jclass cl;
-	jmethodID mid;
-
-	COMPQUIET(jthis, NULL);
-	if (locked_string_get(&ls_home, jnienv, home) != 0)
-		goto out;
-	if ((err = __db_xa_open((char *)ls_home.string,
-				rmid, flags)) != XA_OK) {
-		verify_return(jnienv, err, EXCEPTION_XA);
-	}
-
-	/*
-	 * Now create the DbEnv object, it will get attached
-	 * to the DB_ENV just made in __db_xa_open.
-	 */
-	if ((cl = get_class(jnienv, name_DB_ENV)) == NULL)
-		goto out;
-
-	mid = (*jnienv)->GetStaticMethodID(jnienv, cl,
-					   "_create_DbEnv_for_XA", "(II)V");
-	(*jnienv)->CallStaticVoidMethod(jnienv, cl, mid, 0, rmid);
-
- out:
-	locked_string_put(&ls_home, jnienv);
-}
-
-JNIEXPORT void JNICALL Java_com_sleepycat_db_xa_DbXAResource__1close
-  (JNIEnv *jnienv, jobject jthis, jstring home, jint rmid, jint flags)
-{
-	int err;
-	LOCKED_STRING ls_home;
-
-	COMPQUIET(jthis, NULL);
-	if (locked_string_get(&ls_home, jnienv, home) != 0)
-		goto out;
-	if ((err = __db_xa_close((char *)ls_home.string,
-				 rmid, flags)) != XA_OK)
-		verify_return(jnienv, err, EXCEPTION_XA);
- out:
-	locked_string_put(&ls_home, jnienv);
-}
-
-JNIEXPORT void JNICALL Java_com_sleepycat_db_xa_DbXAResource__1commit
-  (JNIEnv *jnienv, jobject jthis, jobject jxid, jint rmid,
-   jboolean onePhase)
-{
-	XID xid;
-	long flags;
-	int err;
-
-	COMPQUIET(jthis, NULL);
-	if (!get_XID(jnienv, jxid, &xid))
-		return;
-	flags = 0;
-	if (onePhase == JNI_TRUE)
-		flags |= TMONEPHASE;
-	if ((err = __db_xa_commit(&xid, rmid, flags)) != XA_OK)
-		verify_return(jnienv, err, EXCEPTION_XA);
-}
-
-JNIEXPORT void JNICALL Java_com_sleepycat_db_xa_DbXAResource__1end
-  (JNIEnv *jnienv, jobject jthis, jobject jxid, jint rmid, jint flags)
-{
-	XID xid;
-	int err;
-
-	COMPQUIET(jthis, NULL);
-	if (!get_XID(jnienv, jxid, &xid))
-		return;
-	if ((err = __db_xa_end(&xid, rmid, flags)) != XA_OK)
-		verify_return(jnienv, err, EXCEPTION_XA);
-}
-
-JNIEXPORT void JNICALL Java_com_sleepycat_db_xa_DbXAResource__1forget
-  (JNIEnv *jnienv, jobject jthis, jobject jxid, jint rmid)
-{
-	XID xid;
-	int err;
-
-	COMPQUIET(jthis, NULL);
-	if (!get_XID(jnienv, jxid, &xid))
-		return;
-	if ((err = __db_xa_forget(&xid, rmid, 0)) != XA_OK)
-		verify_return(jnienv, err, EXCEPTION_XA);
-}
-
-JNIEXPORT jint JNICALL Java_com_sleepycat_db_xa_DbXAResource__1prepare
-  (JNIEnv *jnienv, jobject jthis, jobject jxid, jint rmid)
-{
-	XID xid;
-	int err;
-
-	COMPQUIET(jthis, NULL);
-	if (!get_XID(jnienv, jxid, &xid))
-		return (0);
-	err = __db_xa_prepare(&xid, rmid, 0);
-	if (err != XA_OK && err != XA_RDONLY)
-		verify_return(jnienv, err, EXCEPTION_XA);
-
-	return (err);
-}
-
-JNIEXPORT jobjectArray JNICALL Java_com_sleepycat_db_xa_DbXAResource__1recover
-  (JNIEnv *jnienv, jobject jthis, jint rmid, jint flags)
-{
-	XID *xids;
-	int err;
-	int total;
-	int cnt;
-	int i;
-	int curflags;
-	size_t nbytes;
-	jclass xid_class;
-	jmethodID mid;
-	jobject obj;
-	jobjectArray retval;
-
-	COMPQUIET(jthis, NULL);
-	total = 0;
-	cnt = 0;
-	xids = NULL;
-	flags &= ~(DB_FIRST | DB_LAST | DB_NEXT);
-
-	/* Repeatedly call __db_xa_recover to fill up an array of XIDs */
-	curflags = flags | DB_FIRST;
-	do {
-		total += cnt;
-		nbytes = sizeof(XID) * (total + 10);
-		if ((err = __os_realloc(NULL, nbytes, &xids)) != 0) {
-			if (xids != NULL)
-				__os_free(NULL, xids);
-			verify_return(jnienv, XAER_NOTA, EXCEPTION_XA);
-			return (NULL);
-		}
-		cnt = __db_xa_recover(&xids[total], 10, rmid, curflags);
-		curflags = flags | DB_NEXT;
-	} while (cnt > 0);
-
-	if (xids != NULL)
-		__os_free(NULL, xids);
-
-	if (cnt < 0) {
-		verify_return(jnienv, cnt, EXCEPTION_XA);
-		return (NULL);
-	}
-
-	/* Create the java DbXid array and fill it up */
-	if ((xid_class = get_class(jnienv, name_DB_XID)) == NULL)
-		return (NULL);
-	mid = (*jnienv)->GetMethodID(jnienv, xid_class, "",
-				     "(I[B[B)V");
-	if ((retval = (*jnienv)->NewObjectArray(jnienv, total, xid_class, 0))
-	    == NULL)
-		goto out;
-
-	for (i = 0; i < total; i++) {
-		jobject gtrid;
-		jobject bqual;
-		jsize gtrid_len;
-		jsize bqual_len;
-
-		gtrid_len = (jsize)xids[i].gtrid_length;
-		bqual_len = (jsize)xids[i].bqual_length;
-		gtrid = (*jnienv)->NewByteArray(jnienv, gtrid_len);
-		bqual = (*jnienv)->NewByteArray(jnienv, bqual_len);
-		if (gtrid == NULL || bqual == NULL)
-			goto out;
-		(*jnienv)->SetByteArrayRegion(jnienv, gtrid, 0, gtrid_len,
-		    (jbyte *)&xids[i].data[0]);
-		(*jnienv)->SetByteArrayRegion(jnienv, bqual, 0, bqual_len,
-		    (jbyte *)&xids[i].data[gtrid_len]);
-		if ((obj = (*jnienv)->NewObject(jnienv, xid_class, mid,
-		    (jint)xids[i].formatID, gtrid, bqual)) == NULL)
-			goto out;
-		(*jnienv)->SetObjectArrayElement(jnienv, retval, i, obj);
-	}
-out:	return (retval);
-}
-
-JNIEXPORT void JNICALL Java_com_sleepycat_db_xa_DbXAResource__1rollback
-  (JNIEnv *jnienv, jobject jthis, jobject jxid, jint rmid)
-{
-	XID xid;
-	int err;
-
-	COMPQUIET(jthis, NULL);
-	if (!get_XID(jnienv, jxid, &xid))
-		return;
-	if ((err = __db_xa_rollback(&xid, rmid, 0)) != XA_OK)
-		verify_return(jnienv, err, EXCEPTION_XA);
-}
-
-JNIEXPORT void JNICALL Java_com_sleepycat_db_xa_DbXAResource__1start
-  (JNIEnv *jnienv, jobject jthis, jobject jxid, jint rmid, jint flags)
-{
-	XID xid;
-	int err;
-
-	COMPQUIET(jthis, NULL);
-	if (!get_XID(jnienv, jxid, &xid))
-		return;
-
-	if ((err = __db_xa_start(&xid, rmid, flags)) != XA_OK)
-		verify_return(jnienv, err, EXCEPTION_XA);
-}
-
-JNIEXPORT jobject JNICALL Java_com_sleepycat_db_xa_DbXAResource_xa_1attach
-  (JNIEnv *jnienv, jclass jthisclass, jobject jxid, jobject jrmid)
-{
-	XID xid;
-	XID *xidp;
-	int ret;
-	DB_ENV *env;
-	DB_TXN *txn;
-	int rmid;
-	int *rmidp;
-	jobject jtxn;
-	jobject jenv;
-	jclass cl;
-	jmethodID mid;
-
-	COMPQUIET(jthisclass, NULL);
-	if (jxid == NULL) {
-		xidp = NULL;
-	}
-	else {
-		xidp = &xid;
-		if (!get_XID(jnienv, jxid, &xid))
-			return (NULL);
-	}
-	if (jrmid == NULL) {
-		rmidp = NULL;
-	}
-	else {
-		rmidp = &rmid;
-		rmid = (int)(*jnienv)->CallIntMethod(jnienv, jrmid,
-						     mid_Integer_intValue);
-	}
-
-	if ((ret = db_env_xa_attach(rmidp, xidp, &env, &txn)) != 0) {
-		/*
-		 * DB_NOTFOUND is a normal return, it means we
-		 * have no current transaction,
-		 */
-		if (ret != DB_NOTFOUND)
-			verify_return(jnienv, ret, 0);
-		return (NULL);
-	}
-
-	jenv = ((DB_ENV_JAVAINFO *)env->api2_internal)->jenvref;
-	jtxn = get_DbTxn(jnienv, txn);
-	if ((cl = get_class(jnienv, name_DB_XAATTACH)) == NULL)
-		return (NULL);
-	mid = (*jnienv)->GetMethodID(jnienv, cl, "",
-		     "(Lcom/sleepycat/db/DbEnv;Lcom/sleepycat/db/DbTxn;)V");
-	return (*jnienv)->NewObject(jnienv, cl, mid, jenv, jtxn);
-}
diff --git a/storage/bdb/libdb_java/java_Dbc.c b/storage/bdb/libdb_java/java_Dbc.c
deleted file mode 100644
index 63ab368fc03..00000000000
--- a/storage/bdb/libdb_java/java_Dbc.c
+++ /dev/null
@@ -1,278 +0,0 @@
-/*-
- * See the file LICENSE for redistribution information.
- *
- * Copyright (c) 1997-2002
- *	Sleepycat Software.  All rights reserved.
- */
-#include "db_config.h"
-
-#ifndef lint
-static const char revid[] = "$Id: java_Dbc.c,v 11.23 2002/08/06 05:19:06 bostic Exp $";
-#endif /* not lint */
-
-#include 
-#include 
-#include 
-#include 
-#ifdef DIAGNOSTIC
-#include 
-#endif
-
-#include "db_int.h"
-#include "java_util.h"
-#include "com_sleepycat_db_Dbc.h"
-
-JNIEXPORT void JNICALL Java_com_sleepycat_db_Dbc_close
-  (JNIEnv *jnienv, jobject jthis)
-{
-	int err;
-	DBC *dbc = get_DBC(jnienv, jthis);
-
-	if (!verify_non_null(jnienv, dbc))
-		return;
-	err = dbc->c_close(dbc);
-	if (verify_return(jnienv, err, 0)) {
-		set_private_dbobj(jnienv, name_DBC, jthis, 0);
-	}
-}
-
-JNIEXPORT jint JNICALL Java_com_sleepycat_db_Dbc_count
-  (JNIEnv *jnienv, jobject jthis, jint flags)
-{
-	int err;
-	DBC *dbc = get_DBC(jnienv, jthis);
-	db_recno_t count;
-
-	if (!verify_non_null(jnienv, dbc))
-		return (0);
-	err = dbc->c_count(dbc, &count, flags);
-	verify_return(jnienv, err, 0);
-	return (count);
-}
-
-JAVADB_METHOD_INT(Dbc_del, (JAVADB_ARGS, jint flags), DBC,
-    c_del, (c_this, flags), DB_RETOK_DBCDEL)
-
-JNIEXPORT jobject JNICALL Java_com_sleepycat_db_Dbc_dup
-  (JNIEnv *jnienv, jobject jthis, jint flags)
-{
-	int err;
-	DBC *dbc = get_DBC(jnienv, jthis);
-	DBC *dbc_ret = NULL;
-
-	if (!verify_non_null(jnienv, dbc))
-		return (0);
-	err = dbc->c_dup(dbc, &dbc_ret, flags);
-	if (!verify_return(jnienv, err, 0))
-		return (0);
-
-	return (get_Dbc(jnienv, dbc_ret));
-}
-
-JNIEXPORT jint JNICALL Java_com_sleepycat_db_Dbc_get
-  (JNIEnv *jnienv, jobject jthis,
-   /*Dbt*/ jobject key, /*Dbt*/ jobject data, jint flags)
-{
-	int err, retry, op_flags;
-	DBC *dbc;
-	DB_ENV *dbenv;
-	LOCKED_DBT lkey, ldata;
-	OpKind keyop, dataop;
-
-	/*
-	 * Depending on flags, the user may be supplying the key,
-	 * or else we may have to retrieve it.
-	 */
-	err = 0;
-	keyop = outOp;
-	dataop = outOp;
-
-	op_flags = flags & DB_OPFLAGS_MASK;
-	if (op_flags == DB_SET) {
-		keyop = inOp;
-	}
-	else if (op_flags == DB_SET_RANGE ||
-		 op_flags == DB_SET_RECNO) {
-		keyop = inOutOp;
-	}
-	else if (op_flags == DB_GET_BOTH || op_flags == DB_GET_BOTH_RANGE) {
-		keyop = inOutOp;
-		dataop = inOutOp;
-	}
-
-	dbc = get_DBC(jnienv, jthis);
-	if (!verify_non_null(jnienv, dbc))
-		return (0);
-	dbenv = dbc->dbp->dbenv;
-
-	if (locked_dbt_get(&lkey, jnienv, dbenv, key, keyop) != 0)
-		goto out2;
-	if (locked_dbt_get(&ldata, jnienv, dbenv, data, dataop) != 0)
-		goto out1;
-
-	if (!verify_non_null(jnienv, dbc))
-		goto out1;
-
-	for (retry = 0; retry < 3; retry++) {
-		err = dbc->c_get(dbc,
-		    &lkey.javainfo->dbt, &ldata.javainfo->dbt, flags);
-
-		/*
-		 * If we failed due to lack of memory in our DBT arrays,
-		 * retry.
-		 */
-		if (err != ENOMEM)
-			break;
-		if (!locked_dbt_realloc(&lkey, jnienv,
-		    dbenv) && !locked_dbt_realloc(&ldata, jnienv, dbenv))
-			break;
-	}
- out1:
-	locked_dbt_put(&ldata, jnienv, dbenv);
- out2:
-	locked_dbt_put(&lkey, jnienv, dbenv);
-	if (!DB_RETOK_DBCGET(err)) {
-		if (verify_dbt(jnienv, err, &lkey) &&
-		    verify_dbt(jnienv, err, &ldata))
-			verify_return(jnienv, err, 0);
-	}
-	return (err);
-}
-
-JNIEXPORT jint JNICALL Java_com_sleepycat_db_Dbc_pget
-  (JNIEnv *jnienv, jobject jthis,
-   /*Dbt*/ jobject key, /*Dbt*/ jobject pkey, /*Dbt*/ jobject data, jint flags)
-{
-	int err, retry, op_flags;
-	DBC *dbc;
-	DB_ENV *dbenv;
-	LOCKED_DBT lkey, lpkey, ldata;
-	OpKind keyop, pkeyop, dataop;
-
-	/*
-	 * Depending on flags, the user may be supplying the key,
-	 * or else we may have to retrieve it.
-	 */
-	err = 0;
-	keyop = outOp;
-	pkeyop = outOp;
-	dataop = outOp;
-
-	op_flags = flags & DB_OPFLAGS_MASK;
-	if (op_flags == DB_SET) {
-		keyop = inOp;
-	}
-	else if (op_flags == DB_SET_RANGE ||
-		 op_flags == DB_SET_RECNO) {
-		keyop = inOutOp;
-	}
-	else if (op_flags == DB_GET_BOTH || op_flags == DB_GET_BOTH_RANGE) {
-		pkeyop = inOutOp;
-		keyop = inOutOp;
-		dataop = inOutOp;
-	}
-
-	dbc = get_DBC(jnienv, jthis);
-	if (!verify_non_null(jnienv, dbc))
-		return (0);
-	dbenv = dbc->dbp->dbenv;
-	if (locked_dbt_get(&lkey, jnienv, dbenv, key, keyop) != 0)
-		goto out3;
-	if (locked_dbt_get(&lpkey, jnienv, dbenv, pkey, pkeyop) != 0)
-		goto out2;
-	if (locked_dbt_get(&ldata, jnienv, dbenv, data, dataop) != 0)
-		goto out1;
-
-	if (!verify_non_null(jnienv, dbc))
-		goto out1;
-
-	for (retry = 0; retry < 3; retry++) {
-		err = dbc->c_pget(dbc, &lkey.javainfo->dbt,
-		&lpkey.javainfo->dbt, &ldata.javainfo->dbt, flags);
-
-		/*
-		 * If we failed due to lack of memory in our DBT arrays,
-		 * retry.
-		 */
-		if (err != ENOMEM)
-			break;
-		if (!locked_dbt_realloc(&lkey, jnienv, dbenv) &&
-		    !locked_dbt_realloc(&lpkey, jnienv, dbenv) &&
-		    !locked_dbt_realloc(&ldata, jnienv, dbenv))
-			break;
-	}
- out1:
-	locked_dbt_put(&ldata, jnienv, dbenv);
- out2:
-	locked_dbt_put(&lpkey, jnienv, dbenv);
- out3:
-	locked_dbt_put(&lkey, jnienv, dbenv);
-	if (!DB_RETOK_DBCGET(err)) {
-		if (verify_dbt(jnienv, err, &lkey) &&
-		    verify_dbt(jnienv, err, &lpkey) &&
-		    verify_dbt(jnienv, err, &ldata))
-			verify_return(jnienv, err, 0);
-	}
-	return (err);
-}
-
-JNIEXPORT jint JNICALL Java_com_sleepycat_db_Dbc_put
-  (JNIEnv *jnienv, jobject jthis,
-   /*Dbt*/ jobject key, /*Dbt*/ jobject data, jint flags)
-{
-	int err;
-	DBC *dbc;
-	DB_ENV *dbenv;
-	LOCKED_DBT lkey, ldata;
-	OpKind keyop;
-
-	err = 0;
-	dbc = get_DBC(jnienv, jthis);
-	if (!verify_non_null(jnienv, dbc))
-		return (0);
-	dbenv = dbc->dbp->dbenv;
-	keyop = (dbc->dbp->type == DB_RECNO &&
-	    (flags == DB_BEFORE || flags == DB_AFTER)) ? outOp : inOp;
-	if (locked_dbt_get(&lkey, jnienv, dbenv, key, keyop) != 0)
-		goto out2;
-	if (locked_dbt_get(&ldata, jnienv, dbenv, data, inOp) != 0)
-		goto out1;
-
-	if (!verify_non_null(jnienv, dbc))
-		goto out1;
-	err = dbc->c_put(dbc, &lkey.javainfo->dbt, &ldata.javainfo->dbt, flags);
-	if (!DB_RETOK_DBCPUT(err))
-		verify_return(jnienv, err, 0);
- out1:
-	locked_dbt_put(&ldata, jnienv, dbenv);
- out2:
-	locked_dbt_put(&lkey, jnienv, dbenv);
-	return (err);
-}
-
-JNIEXPORT void JNICALL Java_com_sleepycat_db_Dbc_finalize
-  (JNIEnv *jnienv, jobject jthis)
-{
-	/*
-	 * Free any data related to DBC here.
-	 * If we ever have java-only data embedded in the DBC
-	 * and need to do this, we'll have to track Dbc's
-	 * according to which Db owns them, just as
-	 * we track Db's according to which DbEnv owns them.
-	 * That's necessary to avoid double freeing that
-	 * comes about when closes interact with GC.
-	 */
-
-#ifdef DIAGNOSTIC
-	DBC *dbc;
-
-	dbc = get_DBC(jnienv, jthis);
-	if (dbc != NULL)
-		fprintf(stderr, "Java API: Dbc has not been closed\n");
-#else
-
-	COMPQUIET(jnienv, NULL);
-	COMPQUIET(jthis, NULL);
-
-#endif
-}
diff --git a/storage/bdb/libdb_java/java_Dbt.c b/storage/bdb/libdb_java/java_Dbt.c
deleted file mode 100644
index d21109f3408..00000000000
--- a/storage/bdb/libdb_java/java_Dbt.c
+++ /dev/null
@@ -1,59 +0,0 @@
-/*-
- * See the file LICENSE for redistribution information.
- *
- * Copyright (c) 1997-2002
- *	Sleepycat Software.  All rights reserved.
- */
-#include "db_config.h"
-
-#ifndef lint
-static const char revid[] = "$Id: java_Dbt.c,v 11.18 2002/06/20 11:11:55 mjc Exp $";
-#endif /* not lint */
-
-#include 
-#include 
-#include 
-#include 
-
-#include "db_int.h"
-#include "java_util.h"
-#include "com_sleepycat_db_Dbt.h"
-
-JNIEXPORT void JNICALL Java_com_sleepycat_db_Dbt_init
-  (JNIEnv *jnienv, jobject jthis)
-{
-	DBT_JAVAINFO *dbtji;
-
-	dbtji = dbjit_construct();
-	set_private_dbobj(jnienv, name_DBT, jthis, dbtji);
-}
-
-JNIEXPORT jbyteArray JNICALL Java_com_sleepycat_db_Dbt_create_1data
-  (JNIEnv *jnienv, jobject jthis)
-{
-	DBT_JAVAINFO *db_this;
-	jbyteArray arr = NULL;
-	int len;
-
-	db_this = get_DBT_JAVAINFO(jnienv, jthis);
-	if (verify_non_null(jnienv, db_this)) {
-		len = db_this->dbt.size;
-		if ((arr = (*jnienv)->NewByteArray(jnienv, len)) == NULL)
-			goto out;
-		(*jnienv)->SetByteArrayRegion(jnienv, arr, 0, len,
-					      db_this->dbt.data);
-	}
-out:	return (arr);
-}
-
-JNIEXPORT void JNICALL Java_com_sleepycat_db_Dbt_finalize
-  (JNIEnv *jnienv, jobject jthis)
-{
-	DBT_JAVAINFO *dbtji;
-
-	dbtji = get_DBT_JAVAINFO(jnienv, jthis);
-	if (dbtji) {
-		/* Free any data related to DBT here */
-		dbjit_destroy(dbtji);
-	}
-}
diff --git a/storage/bdb/libdb_java/java_info.c b/storage/bdb/libdb_java/java_info.c
deleted file mode 100644
index 22fcbd23d46..00000000000
--- a/storage/bdb/libdb_java/java_info.c
+++ /dev/null
@@ -1,1125 +0,0 @@
-/*-
- * See the file LICENSE for redistribution information.
- *
- * Copyright (c) 1997-2002
- *	Sleepycat Software.  All rights reserved.
- */
-#include "db_config.h"
-
-#ifndef lint
-static const char revid[] = "$Id: java_info.c,v 11.46 2002/08/29 14:22:23 margo Exp $";
-#endif /* not lint */
-
-#include 
-#include 
-#include 
-#include 
-
-#include "db_int.h"
-#include "java_util.h"
-
-/****************************************************************
- *
- * Callback functions
- */
-
-static int Db_assoc_callback(DB *db,
-			     const DBT *key,
-			     const DBT *data,
-			     DBT *retval)
-{
-	DB_JAVAINFO *dbinfo;
-
-	DB_ASSERT(db != NULL);
-	dbinfo = (DB_JAVAINFO *)db->api_internal;
-	return (dbji_call_assoc(dbinfo, db, dbinfo->jdbref,
-	    key, data, retval));
-}
-
-static void Db_feedback_callback(DB *db, int opcode, int percent)
-{
-	DB_JAVAINFO *dbinfo;
-
-	DB_ASSERT(db != NULL);
-	dbinfo = (DB_JAVAINFO *)db->api_internal;
-	dbji_call_feedback(dbinfo, db, dbinfo->jdbref, opcode, percent);
-}
-
-static int Db_append_recno_callback(DB *db, DBT *dbt, db_recno_t recno)
-{
-	DB_JAVAINFO *dbinfo;
-
-	dbinfo = (DB_JAVAINFO *)db->api_internal;
-	return (dbji_call_append_recno(dbinfo, db, dbinfo->jdbref, dbt, recno));
-}
-
-static int Db_bt_compare_callback(DB *db, const DBT *dbt1, const DBT *dbt2)
-{
-	DB_JAVAINFO *dbinfo;
-
-	dbinfo = (DB_JAVAINFO *)db->api_internal;
-	return (dbji_call_bt_compare(dbinfo, db, dbinfo->jdbref, dbt1, dbt2));
-}
-
-static size_t Db_bt_prefix_callback(DB *db, const DBT *dbt1, const DBT *dbt2)
-{
-	DB_JAVAINFO *dbinfo;
-
-	dbinfo = (DB_JAVAINFO *)db->api_internal;
-	return (dbji_call_bt_prefix(dbinfo, db, dbinfo->jdbref, dbt1, dbt2));
-}
-
-static int Db_dup_compare_callback(DB *db, const DBT *dbt1, const DBT *dbt2)
-{
-	DB_JAVAINFO *dbinfo;
-
-	dbinfo = (DB_JAVAINFO *)db->api_internal;
-	return (dbji_call_dup_compare(dbinfo, db, dbinfo->jdbref, dbt1, dbt2));
-}
-
-static u_int32_t Db_h_hash_callback(DB *db, const void *data, u_int32_t len)
-{
-	DB_JAVAINFO *dbinfo;
-
-	dbinfo = (DB_JAVAINFO *)db->api_internal;
-	return (dbji_call_h_hash(dbinfo, db, dbinfo->jdbref, data, len));
-}
-
-static void DbEnv_feedback_callback(DB_ENV *dbenv, int opcode, int percent)
-{
-	DB_ENV_JAVAINFO *dbinfo;
-
-	DB_ASSERT(dbenv != NULL);
-	dbinfo = (DB_ENV_JAVAINFO *)dbenv->api2_internal;
-	dbjie_call_feedback(dbinfo, dbenv, dbinfo->jenvref, opcode, percent);
-}
-
-static int DbEnv_rep_transport_callback(DB_ENV *dbenv,
-					const DBT *control, const DBT *rec,
-					int envid, u_int32_t flags)
-{
-	DB_ENV_JAVAINFO *dbinfo;
-
-	dbinfo = (DB_ENV_JAVAINFO *)dbenv->api2_internal;
-	return (dbjie_call_rep_transport(dbinfo, dbenv,
-	    dbinfo->jenvref, control, rec, envid, (int)flags));
-}
-
-static int DbEnv_app_dispatch_callback(DB_ENV *dbenv, DBT *dbt,
-				     DB_LSN *lsn, db_recops recops)
-{
-	DB_ENV_JAVAINFO *dbinfo;
-
-	DB_ASSERT(dbenv != NULL);
-	dbinfo = (DB_ENV_JAVAINFO *)dbenv->api2_internal;
-	return (dbjie_call_app_dispatch(dbinfo, dbenv, dbinfo->jenvref, dbt,
-	    lsn, recops));
-}
-
-/****************************************************************
- *
- * Implementation of class DBT_javainfo
- */
-DBT_JAVAINFO *
-dbjit_construct()
-{
-	DBT_JAVAINFO *dbjit;
-	int err;
-
-	/*XXX should return err*/
-	if ((err = __os_malloc(NULL, sizeof(DBT_JAVAINFO), &dbjit)) != 0)
-		return (NULL);
-
-	memset(dbjit, 0, sizeof(DBT_JAVAINFO));
-	return (dbjit);
-}
-
-void dbjit_destroy(DBT_JAVAINFO *dbjit)
-{
-	DB_ASSERT(!F_ISSET(dbjit, DBT_JAVAINFO_LOCKED));
-	/* Extra paranoia */
-	memset(dbjit, 0, sizeof(DBT_JAVAINFO));
-	(void)__os_free(NULL, dbjit);
-}
-
-/****************************************************************
- *
- * Implementation of class DB_ENV_JAVAINFO
- */
-
-/* create/initialize an object */
-DB_ENV_JAVAINFO *
-dbjie_construct(JNIEnv *jnienv,
-		jobject jenv,
-		jobject default_errcall,
-		int is_dbopen)
-{
-	DB_ENV_JAVAINFO *dbjie;
-	int err;
-
-	/*XXX should return err*/
-	if ((err = __os_malloc(NULL, sizeof(DB_ENV_JAVAINFO), &dbjie)) != 0)
-		return (NULL);
-	memset(dbjie, 0, sizeof(DB_ENV_JAVAINFO));
-	dbjie->is_dbopen = is_dbopen;
-
-	if ((*jnienv)->GetJavaVM(jnienv, &dbjie->javavm) != 0) {
-		__os_free(NULL, dbjie);
-		report_exception(jnienv, "cannot get Java VM", 0, 0);
-		return (NULL);
-	}
-
-	/*
-	 * The default error call just prints to the 'System.err'
-	 * stream.  If the user does set_errcall to null, we'll
-	 * want to have a reference to set it back to.
-	 *
-	 * Why do we have always set db_errcall to our own callback?
-	 * Because it makes the interaction between setting the
-	 * error prefix, error stream, and user's error callback
-	 * that much easier.
-	 */
-	dbjie->default_errcall = NEW_GLOBAL_REF(jnienv, default_errcall);
-	dbjie->errcall = NEW_GLOBAL_REF(jnienv, default_errcall);
-	dbjie->jenvref = NEW_GLOBAL_REF(jnienv, jenv);
-	return (dbjie);
-}
-
-/* release all objects held by this this one */
-void dbjie_dealloc(DB_ENV_JAVAINFO *dbjie, JNIEnv *jnienv)
-{
-	if (dbjie->feedback != NULL) {
-		DELETE_GLOBAL_REF(jnienv, dbjie->feedback);
-		dbjie->feedback = NULL;
-	}
-	if (dbjie->app_dispatch != NULL) {
-		DELETE_GLOBAL_REF(jnienv, dbjie->app_dispatch);
-		dbjie->app_dispatch = NULL;
-	}
-	if (dbjie->errcall != NULL) {
-		DELETE_GLOBAL_REF(jnienv, dbjie->errcall);
-		dbjie->errcall = NULL;
-	}
-	if (dbjie->default_errcall != NULL) {
-		DELETE_GLOBAL_REF(jnienv, dbjie->default_errcall);
-		dbjie->default_errcall = NULL;
-	}
-	if (dbjie->jenvref != NULL) {
-		DELETE_GLOBAL_REF(jnienv, dbjie->jenvref);
-		dbjie->jenvref = NULL;
-	}
-
-	if (dbjie->conflict != NULL) {
-		__os_free(NULL, dbjie->conflict);
-		dbjie->conflict = NULL;
-		dbjie->conflict_size = 0;
-	}
-	if (dbjie->errpfx != NULL) {
-		__os_free(NULL, dbjie->errpfx);
-		dbjie->errpfx = NULL;
-	}
-}
-
-/* free this object, releasing anything allocated on its behalf */
-void dbjie_destroy(DB_ENV_JAVAINFO *dbjie, JNIEnv *jnienv)
-{
-	dbjie_dealloc(dbjie, jnienv);
-
-	/* Extra paranoia */
-	memset(dbjie, 0, sizeof(DB_ENV_JAVAINFO));
-	(void)__os_free(NULL, dbjie);
-}
-
-/*
- * Attach to the current thread that is running and
- * return that.  We use the java virtual machine
- * that we saved in the constructor.
- */
-JNIEnv *
-dbjie_get_jnienv(DB_ENV_JAVAINFO *dbjie)
-{
-	/*
-	 * Note:
-	 * Different versions of the JNI disagree on the signature
-	 * for AttachCurrentThread.  The most recent documentation
-	 * seems to say that (JNIEnv **) is correct, but newer
-	 * JNIs seem to use (void **), oddly enough.
-	 */
-#ifdef JNI_VERSION_1_2
-	void *attachret = 0;
-#else
-	JNIEnv *attachret = 0;
-#endif
-
-	/*
-	 * This should always succeed, as we are called via
-	 * some Java activity.  I think therefore I am (a thread).
-	 */
-	if ((*dbjie->javavm)->AttachCurrentThread(dbjie->javavm, &attachret, 0)
-	    != 0)
-		return (0);
-
-	return ((JNIEnv *)attachret);
-}
-
-jstring
-dbjie_get_errpfx(DB_ENV_JAVAINFO *dbjie, JNIEnv *jnienv)
-{
-	return (get_java_string(jnienv, dbjie->errpfx));
-}
-
-void
-dbjie_set_errcall(DB_ENV_JAVAINFO *dbjie, JNIEnv *jnienv, jobject new_errcall)
-{
-	/*
-	 * If the new_errcall is null, we'll set the error call
-	 * to the default one.
-	 */
-	if (new_errcall == NULL)
-		new_errcall = dbjie->default_errcall;
-
-	DELETE_GLOBAL_REF(jnienv, dbjie->errcall);
-	dbjie->errcall = NEW_GLOBAL_REF(jnienv, new_errcall);
-}
-
-void
-dbjie_set_errpfx(DB_ENV_JAVAINFO *dbjie, JNIEnv *jnienv, jstring errpfx)
-{
-	if (dbjie->errpfx != NULL)
-		__os_free(NULL, dbjie->errpfx);
-
-	if (errpfx)
-		dbjie->errpfx = get_c_string(jnienv, errpfx);
-	else
-		dbjie->errpfx = NULL;
-}
-
-void
-dbjie_set_conflict(DB_ENV_JAVAINFO *dbjie, u_char *newarr, size_t size)
-{
-	if (dbjie->conflict != NULL)
-		(void)__os_free(NULL, dbjie->conflict);
-	dbjie->conflict = newarr;
-	dbjie->conflict_size = size;
-}
-
-void dbjie_set_feedback_object(DB_ENV_JAVAINFO *dbjie, JNIEnv *jnienv,
-			       DB_ENV *dbenv, jobject jfeedback)
-{
-	int err;
-
-	if (dbjie->feedback != NULL) {
-		DELETE_GLOBAL_REF(jnienv, dbjie->feedback);
-	}
-	if (jfeedback == NULL) {
-		if ((err = dbenv->set_feedback(dbenv, NULL)) != 0)
-			report_exception(jnienv, "set_feedback failed",
-					 err, 0);
-	}
-	else {
-		if ((err = dbenv->set_feedback(dbenv,
-					       DbEnv_feedback_callback)) != 0)
-			report_exception(jnienv, "set_feedback failed",
-					 err, 0);
-	}
-
-	dbjie->feedback = NEW_GLOBAL_REF(jnienv, jfeedback);
-}
-
-void dbjie_call_feedback(DB_ENV_JAVAINFO *dbjie, DB_ENV *dbenv, jobject jenv,
-			 int opcode, int percent)
-{
-	JNIEnv *jnienv;
-	jclass feedback_class;
-	jmethodID id;
-
-	COMPQUIET(dbenv, NULL);
-	jnienv = dbjie_get_jnienv(dbjie);
-	if (jnienv == NULL) {
-		fprintf(stderr, "Cannot attach to current thread!\n");
-		return;
-	}
-
-	if ((feedback_class =
-	    get_class(jnienv, name_DbEnvFeedback)) == NULL) {
-		fprintf(stderr, "Cannot find callback class %s\n",
-		    name_DbEnvFeedback);
-		return;	/* An exception has been posted. */
-	}
-	id = (*jnienv)->GetMethodID(jnienv, feedback_class,
-				    "feedback",
-				    "(Lcom/sleepycat/db/DbEnv;II)V");
-	if (!id) {
-		fprintf(stderr, "Cannot find callback method feedback\n");
-		return;
-	}
-
-	(*jnienv)->CallVoidMethod(jnienv, dbjie->feedback, id,
-				  jenv, (jint)opcode, (jint)percent);
-}
-
-void dbjie_set_rep_transport_object(DB_ENV_JAVAINFO *dbjie, JNIEnv *jnienv,
-				    DB_ENV *dbenv, int id, jobject jtransport)
-{
-	int err;
-
-	if (dbjie->rep_transport != NULL)
-		DELETE_GLOBAL_REF(jnienv, dbjie->rep_transport);
-
-	err = dbenv->set_rep_transport(dbenv, id,
-	    DbEnv_rep_transport_callback);
-	verify_return(jnienv, err, 0);
-
-	dbjie->rep_transport = NEW_GLOBAL_REF(jnienv, jtransport);
-}
-
-int dbjie_call_rep_transport(DB_ENV_JAVAINFO *dbjie, DB_ENV *dbenv,
-			     jobject jenv, const DBT *control,
-			     const DBT *rec, int flags, int envid)
-{
-	JNIEnv *jnienv;
-	jclass rep_transport_class;
-	jmethodID jid;
-	jobject jcdbt, jrdbt;
-
-	COMPQUIET(dbenv, NULL);
-	jnienv = dbjie_get_jnienv(dbjie);
-	if (jnienv == NULL) {
-		fprintf(stderr, "Cannot attach to current thread!\n");
-		return (0);
-	}
-
-	if ((rep_transport_class =
-	    get_class(jnienv, name_DbRepTransport)) == NULL) {
-		fprintf(stderr, "Cannot find callback class %s\n",
-		    name_DbRepTransport);
-		return (0);	/* An exception has been posted. */
-	}
-	jid = (*jnienv)->GetMethodID(jnienv, rep_transport_class,
-				     "send",
-				     "(Lcom/sleepycat/db/DbEnv;"
-				     "Lcom/sleepycat/db/Dbt;"
-				     "Lcom/sleepycat/db/Dbt;II)I");
-
-	if (!jid) {
-		fprintf(stderr, "Cannot find callback method send\n");
-		return (0);
-	}
-
-	jcdbt = get_const_Dbt(jnienv, control, NULL);
-	jrdbt = get_const_Dbt(jnienv, rec, NULL);
-
-	return (*jnienv)->CallIntMethod(jnienv, dbjie->rep_transport, jid, jenv,
-					jcdbt, jrdbt, flags, envid);
-}
-
-void dbjie_set_app_dispatch_object(DB_ENV_JAVAINFO *dbjie, JNIEnv *jnienv,
-				 DB_ENV *dbenv, jobject japp_dispatch)
-{
-	int err;
-
-	if (dbjie->app_dispatch != NULL) {
-		DELETE_GLOBAL_REF(jnienv, dbjie->app_dispatch);
-	}
-	if (japp_dispatch == NULL) {
-		if ((err = dbenv->set_app_dispatch(dbenv, NULL)) != 0)
-			report_exception(jnienv, "set_app_dispatch failed",
-					 err, 0);
-	}
-	else {
-		if ((err = dbenv->set_app_dispatch(dbenv,
-		    DbEnv_app_dispatch_callback)) != 0)
-			report_exception(jnienv, "set_app_dispatch failed",
-					 err, 0);
-	}
-
-	dbjie->app_dispatch = NEW_GLOBAL_REF(jnienv, japp_dispatch);
-}
-
-int dbjie_call_app_dispatch(DB_ENV_JAVAINFO *dbjie, DB_ENV *dbenv, jobject jenv,
-			  DBT *dbt, DB_LSN *lsn, int recops)
-{
-	JNIEnv *jnienv;
-	jclass app_dispatch_class;
-	jmethodID id;
-	jobject jdbt;
-	jobject jlsn;
-
-	COMPQUIET(dbenv, NULL);
-	jnienv = dbjie_get_jnienv(dbjie);
-	if (jnienv == NULL) {
-		fprintf(stderr, "Cannot attach to current thread!\n");
-		return (0);
-	}
-
-	if ((app_dispatch_class =
-	    get_class(jnienv, name_DbTxnRecover)) == NULL) {
-		fprintf(stderr, "Cannot find callback class %s\n",
-		    name_DbTxnRecover);
-		return (0);	/* An exception has been posted. */
-	}
-	id = (*jnienv)->GetMethodID(jnienv, app_dispatch_class,
-				    "app_dispatch",
-				    "(Lcom/sleepycat/db/DbEnv;"
-				    "Lcom/sleepycat/db/Dbt;"
-				    "Lcom/sleepycat/db/DbLsn;"
-				    "I)I");
-	if (!id) {
-		fprintf(stderr, "Cannot find callback method app_dispatch\n");
-		return (0);
-	}
-
-	jdbt = get_Dbt(jnienv, dbt, NULL);
-
-	if (lsn == NULL)
-		jlsn = NULL;
-	else
-		jlsn = get_DbLsn(jnienv, *lsn);
-
-	return (*jnienv)->CallIntMethod(jnienv, dbjie->app_dispatch, id, jenv,
-					jdbt, jlsn, recops);
-}
-
-jobject dbjie_get_errcall(DB_ENV_JAVAINFO *dbjie)
-{
-	return (dbjie->errcall);
-}
-
-jint dbjie_is_dbopen(DB_ENV_JAVAINFO *dbjie)
-{
-	return (dbjie->is_dbopen);
-}
-
-/****************************************************************
- *
- * Implementation of class DB_JAVAINFO
- */
-
-DB_JAVAINFO *dbji_construct(JNIEnv *jnienv, jobject jdb, jint flags)
-{
-	DB_JAVAINFO *dbji;
-	int err;
-
-	/*XXX should return err*/
-	if ((err = __os_malloc(NULL, sizeof(DB_JAVAINFO), &dbji)) != 0)
-		return (NULL);
-
-	memset(dbji, 0, sizeof(DB_JAVAINFO));
-
-	if ((*jnienv)->GetJavaVM(jnienv, &dbji->javavm) != 0) {
-		report_exception(jnienv, "cannot get Java VM", 0, 0);
-		(void)__os_free(NULL, dbji);
-		return (NULL);
-	}
-	dbji->jdbref = NEW_GLOBAL_REF(jnienv, jdb);
-	dbji->construct_flags = flags;
-	return (dbji);
-}
-
-void
-dbji_dealloc(DB_JAVAINFO *dbji, JNIEnv *jnienv)
-{
-	if (dbji->append_recno != NULL) {
-		DELETE_GLOBAL_REF(jnienv, dbji->append_recno);
-		dbji->append_recno = NULL;
-	}
-	if (dbji->assoc != NULL) {
-		DELETE_GLOBAL_REF(jnienv, dbji->assoc);
-		dbji->assoc = NULL;
-	}
-	if (dbji->bt_compare != NULL) {
-		DELETE_GLOBAL_REF(jnienv, dbji->bt_compare);
-		dbji->bt_compare = NULL;
-	}
-	if (dbji->bt_prefix != NULL) {
-		DELETE_GLOBAL_REF(jnienv, dbji->bt_prefix);
-		dbji->bt_prefix = NULL;
-	}
-	if (dbji->dup_compare != NULL) {
-		DELETE_GLOBAL_REF(jnienv, dbji->dup_compare);
-		dbji->dup_compare = NULL;
-	}
-	if (dbji->feedback != NULL) {
-		DELETE_GLOBAL_REF(jnienv, dbji->feedback);
-		dbji->feedback = NULL;
-	}
-	if (dbji->h_hash != NULL) {
-		DELETE_GLOBAL_REF(jnienv, dbji->h_hash);
-		dbji->h_hash = NULL;
-	}
-	if (dbji->jdbref != NULL) {
-		DELETE_GLOBAL_REF(jnienv, dbji->jdbref);
-		dbji->jdbref = NULL;
-	}
-}
-
-void
-dbji_destroy(DB_JAVAINFO *dbji, JNIEnv *jnienv)
-{
-	dbji_dealloc(dbji, jnienv);
-	__os_free(NULL, dbji);
-}
-
-JNIEnv *dbji_get_jnienv(DB_JAVAINFO *dbji)
-{
-	/*
-	 * Note:
-	 * Different versions of the JNI disagree on the signature
-	 * for AttachCurrentThread.  The most recent documentation
-	 * seems to say that (JNIEnv **) is correct, but newer
-	 * JNIs seem to use (void **), oddly enough.
-	 */
-#ifdef JNI_VERSION_1_2
-	void *attachret = 0;
-#else
-	JNIEnv *attachret = 0;
-#endif
-
-	/*
-	 * This should always succeed, as we are called via
-	 * some Java activity.  I think therefore I am (a thread).
-	 */
-	if ((*dbji->javavm)->AttachCurrentThread(dbji->javavm, &attachret, 0)
-	    != 0)
-		return (0);
-
-	return ((JNIEnv *)attachret);
-}
-
-jint dbji_get_flags(DB_JAVAINFO *dbji)
-{
-	return (dbji->construct_flags);
-}
-
-void dbji_set_feedback_object(DB_JAVAINFO *dbji, JNIEnv *jnienv,
-			      DB *db, jobject jfeedback)
-{
-	jclass feedback_class;
-
-	if (dbji->feedback_method_id == NULL) {
-		if ((feedback_class =
-		    get_class(jnienv, name_DbFeedback)) == NULL)
-			return;	/* An exception has been posted. */
-		dbji->feedback_method_id =
-			(*jnienv)->GetMethodID(jnienv, feedback_class,
-					       "feedback",
-					       "(Lcom/sleepycat/db/Db;II)V");
-		if (dbji->feedback_method_id == NULL) {
-			/*
-			 * XXX
-			 * We should really have a better way
-			 * to translate this to a Java exception class.
-			 * In theory, it shouldn't happen.
-			 */
-			report_exception(jnienv, "Cannot find callback method",
-					 EFAULT, 0);
-			return;
-		}
-	}
-
-	if (dbji->feedback != NULL) {
-		DELETE_GLOBAL_REF(jnienv, dbji->feedback);
-	}
-	if (jfeedback == NULL) {
-		db->set_feedback(db, NULL);
-	}
-	else {
-		db->set_feedback(db, Db_feedback_callback);
-	}
-
-	dbji->feedback = NEW_GLOBAL_REF(jnienv, jfeedback);
-
-}
-
-void dbji_call_feedback(DB_JAVAINFO *dbji, DB *db, jobject jdb,
-			int opcode, int percent)
-{
-	JNIEnv *jnienv;
-
-	COMPQUIET(db, NULL);
-	jnienv = dbji_get_jnienv(dbji);
-	if (jnienv == NULL) {
-		fprintf(stderr, "Cannot attach to current thread!\n");
-		return;
-	}
-
-	DB_ASSERT(dbji->feedback_method_id != NULL);
-	(*jnienv)->CallVoidMethod(jnienv, dbji->feedback,
-				  dbji->feedback_method_id,
-				  jdb, (jint)opcode, (jint)percent);
-}
-
-void dbji_set_append_recno_object(DB_JAVAINFO *dbji, JNIEnv *jnienv,
-				  DB *db, jobject jcallback)
-{
-	jclass append_recno_class;
-
-	if (dbji->append_recno_method_id == NULL) {
-		if ((append_recno_class =
-		    get_class(jnienv, name_DbAppendRecno)) == NULL)
-			return;	/* An exception has been posted. */
-		dbji->append_recno_method_id =
-			(*jnienv)->GetMethodID(jnienv, append_recno_class,
-					       "db_append_recno",
-					       "(Lcom/sleepycat/db/Db;"
-					       "Lcom/sleepycat/db/Dbt;I)V");
-		if (dbji->append_recno_method_id == NULL) {
-			/*
-			 * XXX
-			 * We should really have a better way
-			 * to translate this to a Java exception class.
-			 * In theory, it shouldn't happen.
-			 */
-			report_exception(jnienv, "Cannot find callback method",
-					 EFAULT, 0);
-			return;
-		}
-	}
-
-	if (dbji->append_recno != NULL) {
-		DELETE_GLOBAL_REF(jnienv, dbji->append_recno);
-	}
-	if (jcallback == NULL) {
-		db->set_append_recno(db, NULL);
-	}
-	else {
-		db->set_append_recno(db, Db_append_recno_callback);
-	}
-
-	dbji->append_recno = NEW_GLOBAL_REF(jnienv, jcallback);
-}
-
-extern int dbji_call_append_recno(DB_JAVAINFO *dbji, DB *db, jobject jdb,
-				  DBT *dbt, jint recno)
-{
-	JNIEnv *jnienv;
-	jobject jresult;
-	DBT_JAVAINFO *dbtji;
-	LOCKED_DBT lresult;
-	DB_ENV *dbenv;
-	u_char *bytearray;
-	int err;
-
-	jnienv = dbji_get_jnienv(dbji);
-	dbenv = db->dbenv;
-	if (jnienv == NULL) {
-		fprintf(stderr, "Cannot attach to current thread!\n");
-		return (0);
-	}
-
-	jresult = get_Dbt(jnienv, dbt, &dbtji);
-
-	DB_ASSERT(dbji->append_recno_method_id != NULL);
-	(*jnienv)->CallVoidMethod(jnienv, dbji->append_recno,
-				  dbji->append_recno_method_id,
-				  jdb, jresult, recno);
-
-	/*
-	 * The underlying C API requires that an errno be returned
-	 * on error.  Java users know nothing of errnos, so we
-	 * allow them to throw exceptions instead.  We leave the
-	 * exception in place and return DB_JAVA_CALLBACK to the C API
-	 * that called us.  Eventually the DB->get will fail and
-	 * when java prepares to throw an exception in
-	 * report_exception(), this will be spotted as a special case,
-	 * and the original exception will be preserved.
-	 *
-	 * Note: we have sometimes noticed strange behavior with
-	 * exceptions under Linux 1.1.7 JVM.  (i.e. multiple calls
-	 * to ExceptionOccurred() may report different results).
-	 * Currently we don't know of any problems related to this
-	 * in our code, but if it pops up in the future, users are
-	 * encouranged to get a more recent JVM.
-	 */
-	if ((*jnienv)->ExceptionOccurred(jnienv) != NULL)
-		return (DB_JAVA_CALLBACK);
-
-	/*
-	 * Now get the DBT back from java, because the user probably
-	 * changed it.  We'll have to copy back the array too and let
-	 * our caller free it.
-	 *
-	 * We expect that the user *has* changed the DBT (why else would
-	 * they set up an append_recno callback?) so we don't
-	 * worry about optimizing the unchanged case.
-	 */
-	if ((err = locked_dbt_get(&lresult, jnienv, dbenv, jresult, inOp)) != 0)
-		return (err);
-
-	memcpy(dbt, &lresult.javainfo->dbt, sizeof(DBT));
-	if ((err = __os_malloc(dbenv, dbt->size, &bytearray)) != 0)
-		goto out;
-
-	memcpy(bytearray, dbt->data, dbt->size);
-	dbt->data = bytearray;
-	dbt->flags |= DB_DBT_APPMALLOC;
-
- out:
-	locked_dbt_put(&lresult, jnienv, dbenv);
-	return (err);
-}
-
-void dbji_set_assoc_object(DB_JAVAINFO *dbji, JNIEnv *jnienv,
-			       DB *db, DB_TXN *txn, DB *second,
-			       jobject jcallback, int flags)
-{
-	jclass assoc_class;
-	int err;
-
-	if (dbji->assoc_method_id == NULL) {
-		if ((assoc_class =
-		    get_class(jnienv, name_DbSecondaryKeyCreate)) == NULL)
-			return;	/* An exception has been posted. */
-		dbji->assoc_method_id =
-			(*jnienv)->GetMethodID(jnienv, assoc_class,
-					       "secondary_key_create",
-					       "(Lcom/sleepycat/db/Db;"
-					       "Lcom/sleepycat/db/Dbt;"
-					       "Lcom/sleepycat/db/Dbt;"
-					       "Lcom/sleepycat/db/Dbt;)I");
-		if (dbji->assoc_method_id == NULL) {
-			/*
-			 * XXX
-			 * We should really have a better way
-			 * to translate this to a Java exception class.
-			 * In theory, it shouldn't happen.
-			 */
-			report_exception(jnienv, "Cannot find callback method",
-					 EFAULT, 0);
-			return;
-		}
-	}
-
-	if (dbji->assoc != NULL) {
-		DELETE_GLOBAL_REF(jnienv, dbji->assoc);
-		dbji->assoc = NULL;
-	}
-
-	if (jcallback == NULL)
-		err = db->associate(db, txn, second, NULL, flags);
-	else
-		err = db->associate(db, txn, second, Db_assoc_callback, flags);
-
-	if (verify_return(jnienv, err, 0))
-		dbji->assoc = NEW_GLOBAL_REF(jnienv, jcallback);
-}
-
-extern int dbji_call_assoc(DB_JAVAINFO *dbji, DB *db, jobject jdb,
-			   const DBT *key, const DBT *value, DBT *result)
-{
-	JNIEnv *jnienv;
-	jobject jresult;
-	LOCKED_DBT lresult;
-	DB_ENV *dbenv;
-	int err;
-	int sz;
-	u_char *bytearray;
-	jint retval;
-
-	jnienv = dbji_get_jnienv(dbji);
-	if (jnienv == NULL) {
-		fprintf(stderr, "Cannot attach to current thread!\n");
-		return (0);
-	}
-
-	DB_ASSERT(dbji->assoc_method_id != NULL);
-
-	dbenv = db->dbenv;
-	jresult = create_default_object(jnienv, name_DBT);
-
-	retval = (*jnienv)->CallIntMethod(jnienv, dbji->assoc,
-					  dbji->assoc_method_id, jdb,
-					  get_const_Dbt(jnienv, key, NULL),
-					  get_const_Dbt(jnienv, value, NULL),
-					  jresult);
-	if (retval != 0)
-		return (retval);
-
-	if ((*jnienv)->ExceptionOccurred(jnienv) != NULL)
-		return (DB_JAVA_CALLBACK);
-
-	if ((err = locked_dbt_get(&lresult, jnienv, dbenv, jresult, inOp)) != 0)
-		return (err);
-
-	sz = lresult.javainfo->dbt.size;
-	if (sz > 0) {
-		bytearray = (u_char *)lresult.javainfo->dbt.data;
-
-		/*
-		 * If the byte array is in the range of one of the
-		 * arrays passed to us we can use it directly.
-		 * If not, we must create our own array and
-		 * fill it in with the java array.  Since
-		 * the java array may disappear and we don't
-		 * want to keep its memory locked indefinitely,
-		 * we cannot just pin the array.
-		 *
-		 * XXX consider pinning the array, and having
-		 * some way for the C layer to notify the java
-		 * layer when it can be unpinned.
-		 */
-		if ((bytearray < (u_char *)key->data ||
-		     bytearray + sz > (u_char *)key->data + key->size) &&
-		    (bytearray < (u_char *)value->data ||
-		     bytearray + sz > (u_char *)value->data + value->size)) {
-
-			result->flags |= DB_DBT_APPMALLOC;
-			if ((err = __os_malloc(dbenv, sz, &bytearray)) != 0)
-				goto out;
-			memcpy(bytearray, lresult.javainfo->dbt.data, sz);
-		}
-		result->data = bytearray;
-		result->size = sz;
-	}
- out:
-	locked_dbt_put(&lresult, jnienv, dbenv);
-	return (err);
-}
-
-void dbji_set_bt_compare_object(DB_JAVAINFO *dbji, JNIEnv *jnienv,
-				DB *db, jobject jcompare)
-{
-	jclass bt_compare_class;
-
-	if (dbji->bt_compare_method_id == NULL) {
-		if ((bt_compare_class =
-		    get_class(jnienv, name_DbBtreeCompare)) == NULL)
-			return;	/* An exception has been posted. */
-		dbji->bt_compare_method_id =
-			(*jnienv)->GetMethodID(jnienv, bt_compare_class,
-					       "bt_compare",
-					       "(Lcom/sleepycat/db/Db;"
-					       "Lcom/sleepycat/db/Dbt;"
-					       "Lcom/sleepycat/db/Dbt;)I");
-		if (dbji->bt_compare_method_id == NULL) {
-			/*
-			 * XXX
-			 * We should really have a better way
-			 * to translate this to a Java exception class.
-			 * In theory, it shouldn't happen.
-			 */
-			report_exception(jnienv, "Cannot find callback method",
-					 EFAULT, 0);
-			return;
-		}
-	}
-
-	if (dbji->bt_compare != NULL) {
-		DELETE_GLOBAL_REF(jnienv, dbji->bt_compare);
-	}
-	if (jcompare == NULL) {
-		db->set_bt_compare(db, NULL);
-	}
-	else {
-		db->set_bt_compare(db, Db_bt_compare_callback);
-	}
-
-	dbji->bt_compare = NEW_GLOBAL_REF(jnienv, jcompare);
-}
-
-int dbji_call_bt_compare(DB_JAVAINFO *dbji, DB *db, jobject jdb,
-			 const DBT *dbt1, const DBT *dbt2)
-{
-	JNIEnv *jnienv;
-	jobject jdbt1, jdbt2;
-
-	COMPQUIET(db, NULL);
-	jnienv = dbji_get_jnienv(dbji);
-	if (jnienv == NULL) {
-		fprintf(stderr, "Cannot attach to current thread!\n");
-		return (0);
-	}
-
-	jdbt1 = get_const_Dbt(jnienv, dbt1, NULL);
-	jdbt2 = get_const_Dbt(jnienv, dbt2, NULL);
-
-	DB_ASSERT(dbji->bt_compare_method_id != NULL);
-	return (*jnienv)->CallIntMethod(jnienv, dbji->bt_compare,
-					dbji->bt_compare_method_id,
-					jdb, jdbt1, jdbt2);
-}
-
-void dbji_set_bt_prefix_object(DB_JAVAINFO *dbji, JNIEnv *jnienv,
-				DB *db, jobject jprefix)
-{
-	jclass bt_prefix_class;
-
-	if (dbji->bt_prefix_method_id == NULL) {
-		if ((bt_prefix_class =
-		    get_class(jnienv, name_DbBtreePrefix)) == NULL)
-			return;	/* An exception has been posted. */
-		dbji->bt_prefix_method_id =
-			(*jnienv)->GetMethodID(jnienv, bt_prefix_class,
-					       "bt_prefix",
-					       "(Lcom/sleepycat/db/Db;"
-					       "Lcom/sleepycat/db/Dbt;"
-					       "Lcom/sleepycat/db/Dbt;)I");
-		if (dbji->bt_prefix_method_id == NULL) {
-			/*
-			 * XXX
-			 * We should really have a better way
-			 * to translate this to a Java exception class.
-			 * In theory, it shouldn't happen.
-			 */
-			report_exception(jnienv, "Cannot find callback method",
-					 EFAULT, 0);
-			return;
-		}
-	}
-
-	if (dbji->bt_prefix != NULL) {
-		DELETE_GLOBAL_REF(jnienv, dbji->bt_prefix);
-	}
-	if (jprefix == NULL) {
-		db->set_bt_prefix(db, NULL);
-	}
-	else {
-		db->set_bt_prefix(db, Db_bt_prefix_callback);
-	}
-
-	dbji->bt_prefix = NEW_GLOBAL_REF(jnienv, jprefix);
-}
-
-size_t dbji_call_bt_prefix(DB_JAVAINFO *dbji, DB *db, jobject jdb,
-			   const DBT *dbt1, const DBT *dbt2)
-{
-	JNIEnv *jnienv;
-	jobject jdbt1, jdbt2;
-
-	COMPQUIET(db, NULL);
-	jnienv = dbji_get_jnienv(dbji);
-	if (jnienv == NULL) {
-		fprintf(stderr, "Cannot attach to current thread!\n");
-		return (0);
-	}
-
-	jdbt1 = get_const_Dbt(jnienv, dbt1, NULL);
-	jdbt2 = get_const_Dbt(jnienv, dbt2, NULL);
-
-	DB_ASSERT(dbji->bt_prefix_method_id != NULL);
-	return (size_t)(*jnienv)->CallIntMethod(jnienv, dbji->bt_prefix,
-						dbji->bt_prefix_method_id,
-						jdb, jdbt1, jdbt2);
-}
-
-void dbji_set_dup_compare_object(DB_JAVAINFO *dbji, JNIEnv *jnienv,
-				DB *db, jobject jcompare)
-{
-	jclass dup_compare_class;
-
-	if (dbji->dup_compare_method_id == NULL) {
-		if ((dup_compare_class =
-		    get_class(jnienv, name_DbDupCompare)) == NULL)
-			return;	/* An exception has been posted. */
-		dbji->dup_compare_method_id =
-			(*jnienv)->GetMethodID(jnienv, dup_compare_class,
-					       "dup_compare",
-					       "(Lcom/sleepycat/db/Db;"
-					       "Lcom/sleepycat/db/Dbt;"
-					       "Lcom/sleepycat/db/Dbt;)I");
-		if (dbji->dup_compare_method_id == NULL) {
-			/*
-			 * XXX
-			 * We should really have a better way
-			 * to translate this to a Java exception class.
-			 * In theory, it shouldn't happen.
-			 */
-			report_exception(jnienv, "Cannot find callback method",
-					 EFAULT, 0);
-			return;
-		}
-	}
-
-	if (dbji->dup_compare != NULL)
-		DELETE_GLOBAL_REF(jnienv, dbji->dup_compare);
-
-	if (jcompare == NULL)
-		db->set_dup_compare(db, NULL);
-	else
-		db->set_dup_compare(db, Db_dup_compare_callback);
-
-	dbji->dup_compare = NEW_GLOBAL_REF(jnienv, jcompare);
-}
-
-int dbji_call_dup_compare(DB_JAVAINFO *dbji, DB *db, jobject jdb,
-			 const DBT *dbt1, const DBT *dbt2)
-{
-	JNIEnv *jnienv;
-	jobject jdbt1, jdbt2;
-
-	COMPQUIET(db, NULL);
-	jnienv = dbji_get_jnienv(dbji);
-	if (jnienv == NULL) {
-		fprintf(stderr, "Cannot attach to current thread!\n");
-		return (0);
-	}
-
-	jdbt1 = get_const_Dbt(jnienv, dbt1, NULL);
-	jdbt2 = get_const_Dbt(jnienv, dbt2, NULL);
-
-	DB_ASSERT(dbji->dup_compare_method_id != NULL);
-	return (*jnienv)->CallIntMethod(jnienv, dbji->dup_compare,
-					dbji->dup_compare_method_id,
-					jdb, jdbt1, jdbt2);
-}
-
-void dbji_set_h_hash_object(DB_JAVAINFO *dbji, JNIEnv *jnienv,
-				DB *db, jobject jhash)
-{
-	jclass h_hash_class;
-
-	if (dbji->h_hash_method_id == NULL) {
-		if ((h_hash_class =
-		    get_class(jnienv, name_DbHash)) == NULL)
-			return;	/* An exception has been posted. */
-		dbji->h_hash_method_id =
-			(*jnienv)->GetMethodID(jnienv, h_hash_class,
-					       "hash",
-					       "(Lcom/sleepycat/db/Db;"
-					       "[BI)I");
-		if (dbji->h_hash_method_id == NULL) {
-			/*
-			 * XXX
-			 * We should really have a better way
-			 * to translate this to a Java exception class.
-			 * In theory, it shouldn't happen.
-			 */
-			report_exception(jnienv, "Cannot find callback method",
-					 EFAULT, 0);
-			return;
-		}
-	}
-
-	if (dbji->h_hash != NULL)
-		DELETE_GLOBAL_REF(jnienv, dbji->h_hash);
-
-	if (jhash == NULL)
-		db->set_h_hash(db, NULL);
-	else
-		db->set_h_hash(db, Db_h_hash_callback);
-
-	dbji->h_hash = NEW_GLOBAL_REF(jnienv, jhash);
-}
-
-int dbji_call_h_hash(DB_JAVAINFO *dbji, DB *db, jobject jdb,
-		     const void *data, int len)
-{
-	JNIEnv *jnienv;
-	jbyteArray jdata;
-
-	COMPQUIET(db, NULL);
-	jnienv = dbji_get_jnienv(dbji);
-	if (jnienv == NULL) {
-		fprintf(stderr, "Cannot attach to current thread!\n");
-		return (0);
-	}
-
-	DB_ASSERT(dbji->h_hash_method_id != NULL);
-
-	if ((jdata = (*jnienv)->NewByteArray(jnienv, len)) == NULL)
-		return (0);	/* An exception has been posted by the JVM */
-	(*jnienv)->SetByteArrayRegion(jnienv, jdata, 0, len, (void *)data);
-	return (*jnienv)->CallIntMethod(jnienv, dbji->h_hash,
-					dbji->h_hash_method_id,
-					jdb, jdata, len);
-}
diff --git a/storage/bdb/libdb_java/java_info.h b/storage/bdb/libdb_java/java_info.h
deleted file mode 100644
index bda83db420e..00000000000
--- a/storage/bdb/libdb_java/java_info.h
+++ /dev/null
@@ -1,221 +0,0 @@
-/*-
- * See the file LICENSE for redistribution information.
- *
- * Copyright (c) 1997-2002
- *	Sleepycat Software.  All rights reserved.
- *
- * $Id: java_info.h,v 11.35 2002/08/29 14:22:23 margo Exp $
- */
-
-#ifndef _JAVA_INFO_H_
-#define	_JAVA_INFO_H_
-
-/*
- * "Info" classes for Java implementation of Berkeley DB API.
- * These classes hold extra information for which there is
- * no room or counterpart in the base classes used in the C API.
- * In the case of a DBT, the DBT_javainfo class is stored in the
- * 'private' variable of the java Dbt, and the DBT_javainfo is subclassed
- * from a DBT.  In the case of DB and DB_ENV, the appropriate
- * info objects are pointed to by the DB and DB_ENV objects.
- * This is convenient to implement callbacks.
- */
-
-/****************************************************************
- *
- * Declaration of class DBT_javainfo
- *
- * A DBT_javainfo is created whenever a Dbt (java) object is created,
- * and a pointer to it is stored in its private info storage.
- * It is subclassed from DBT, because we must retain some extra
- * information in it while it is in use.  In particular, when
- * a java array is associated with it, we need to keep a Globally
- * Locked reference to it so it is not GC'd.  This reference is
- * destroyed when the Dbt is GC'd.
- */
-typedef struct _dbt_javainfo
-{
-	DBT dbt;
-	DB *db;			/* associated DB */
-	jobject dbtref;		/* the java Dbt object */
-	jbyteArray array;	/* the java array object -
-				   this is only valid during the API call */
-	int offset;		/* offset into the Java array */
-
-#define	DBT_JAVAINFO_LOCKED	0x01	/* a LOCKED_DBT has been created */
-	u_int32_t flags;
-}
-DBT_JAVAINFO;	/* used with all 'dbtji' functions */
-
-/* create/initialize a DBT_JAVAINFO object */
-extern DBT_JAVAINFO *dbjit_construct();
-
-/* free this DBT_JAVAINFO, releasing anything allocated on its behalf */
-extern void dbjit_destroy(DBT_JAVAINFO *dbjit);
-
-/****************************************************************
- *
- * Declaration of class DB_ENV_JAVAINFO
- *
- * A DB_ENV_JAVAINFO is allocated and stuffed into the cj_internal
- * and the db_errpfx for every DB_ENV created.  It holds a
- * little extra info that is needed to support callbacks.
- *
- * There's a bit of trickery here, because we have built this
- * above a layer that has a C function callback that gets
- * invoked when an error occurs.  One of the C callback's arguments
- * is the prefix from the DB_ENV, but since we stuffed a pointer
- * to our own DB_ENV_JAVAINFO into the prefix, we get that object as an
- * argument to the C callback.  Thus, the C callback can have
- * access to much more than just the prefix, and it needs that
- * to call back into the Java enviroment.
- *
- * The DB_ENV_JAVAINFO object holds a copy of the Java Virtual Machine,
- * which is needed to attach to the current running thread
- * whenever we need to make a callback.  (This is more reliable
- * than our previous approach, which was to save the thread
- * that created the DbEnv).  It also has the Java callback object,
- * as well as a 'default' callback object that is used when the
- * caller sets the callback to null.  It also has the original
- * error prefix, since we overwrote the one in the DB_ENV.
- * There are also fields that are unrelated to the handling
- * of callbacks, but are convenient to attach to a DB_ENV.
- *
- * Note: We assume that the Java layer is the only one
- * fiddling with the contents of db_errpfx, db_errcall, cj_internal
- * for a DB_ENV that was created via Java.  Since the Java layer should
- * have the only pointer to such a DB_ENV, this should be true.
- */
-typedef struct _db_env_javainfo
-{
-	JavaVM *javavm;
-	int is_dbopen;
-	char *errpfx;
-	jobject jenvref;	/* global reference */
-	jobject default_errcall; /* global reference */
-	jobject errcall;	/* global reference */
-	jobject feedback;	/* global reference */
-	jobject rep_transport;	/* global reference */
-	jobject app_dispatch;	/* global reference */
-	jobject recovery_init;	/* global reference */
-	u_char *conflict;
-	size_t conflict_size;
-	jint construct_flags;
-}
-DB_ENV_JAVAINFO;	/* used with all 'dbjie' functions */
-
-/* create/initialize an object */
-extern DB_ENV_JAVAINFO *dbjie_construct(JNIEnv *jnienv,
-		       jobject jenv,
-		       jobject default_errcall,
-		       int is_dbopen);
-
-/* release all objects held by this this one */
-extern void dbjie_dealloc(DB_ENV_JAVAINFO *, JNIEnv *jnienv);
-
-/* free this object, releasing anything allocated on its behalf */
-extern void dbjie_destroy(DB_ENV_JAVAINFO *, JNIEnv *jnienv);
-
-/* This gets the environment for the current thread */
-extern JNIEnv *dbjie_get_jnienv(DB_ENV_JAVAINFO *);
-
-extern void dbjie_set_errpfx(DB_ENV_JAVAINFO *, JNIEnv *jnienv,
-			     jstring errpfx);
-extern jstring dbjie_get_errpfx(DB_ENV_JAVAINFO *, JNIEnv *jnienv);
-extern void dbjie_set_errcall(DB_ENV_JAVAINFO *, JNIEnv *jnienv,
-			      jobject new_errcall);
-extern void dbjie_set_conflict(DB_ENV_JAVAINFO *, u_char *v, size_t sz);
-extern void dbjie_set_feedback_object(DB_ENV_JAVAINFO *, JNIEnv *jnienv,
-				      DB_ENV *dbenv, jobject value);
-extern void dbjie_call_feedback(DB_ENV_JAVAINFO *, DB_ENV *dbenv, jobject jenv,
-				int opcode, int percent);
-extern void dbjie_set_recovery_init_object(DB_ENV_JAVAINFO *, JNIEnv *jnienv,
-					   DB_ENV *dbenv, jobject value);
-extern int dbjie_call_recovery_init(DB_ENV_JAVAINFO *, DB_ENV *dbenv,
-				    jobject jenv);
-extern void dbjie_set_rep_transport_object(DB_ENV_JAVAINFO *, JNIEnv *jnienv,
-					   DB_ENV *dbenv, int id, jobject obj);
-extern int dbjie_call_rep_transport(DB_ENV_JAVAINFO *, DB_ENV *dbenv,
-				    jobject jenv, const DBT *control,
-				    const DBT *rec, int envid, int flags);
-extern void dbjie_set_app_dispatch_object(DB_ENV_JAVAINFO *, JNIEnv *jnienv,
-					DB_ENV *dbenv, jobject value);
-extern int dbjie_call_app_dispatch(DB_ENV_JAVAINFO *,
-				 DB_ENV *dbenv, jobject jenv,
-				 DBT *dbt, DB_LSN *lsn, int recops);
-extern jobject dbjie_get_errcall(DB_ENV_JAVAINFO *) ;
-extern jint dbjie_is_dbopen(DB_ENV_JAVAINFO *);
-
-/****************************************************************
- *
- * Declaration of class DB_JAVAINFO
- *
- * A DB_JAVAINFO is allocated and stuffed into the cj_internal field
- * for every DB created.  It holds a little extra info that is needed
- * to support callbacks.
- *
- * Note: We assume that the Java layer is the only one
- * fiddling with the contents of cj_internal
- * for a DB that was created via Java.  Since the Java layer should
- * have the only pointer to such a DB, this should be true.
- */
-typedef struct _db_javainfo
-{
-	JavaVM *javavm;
-	jobject jdbref;		/* global reference */
-	jobject append_recno;	/* global reference */
-	jobject assoc;		/* global reference */
-	jobject bt_compare;	/* global reference */
-	jobject bt_prefix;	/* global reference */
-	jobject dup_compare;	/* global reference */
-	jobject feedback;	/* global reference */
-	jobject h_hash;		/* global reference */
-	jmethodID append_recno_method_id;
-	jmethodID assoc_method_id;
-	jmethodID bt_compare_method_id;
-	jmethodID bt_prefix_method_id;
-	jmethodID dup_compare_method_id;
-	jmethodID feedback_method_id;
-	jmethodID h_hash_method_id;
-	jint construct_flags;
-} DB_JAVAINFO;
-
-/* create/initialize an object */
-extern DB_JAVAINFO *dbji_construct(JNIEnv *jnienv, jobject jdb, jint flags);
-
-/* release all objects held by this this one */
-extern void dbji_dealloc(DB_JAVAINFO *, JNIEnv *jnienv);
-
-/* free this object, releasing anything allocated on its behalf */
-extern void dbji_destroy(DB_JAVAINFO *, JNIEnv *jnienv);
-
-/* This gets the environment for the current thread */
-extern JNIEnv *dbji_get_jnienv();
-extern jint dbji_get_flags();
-
-extern void dbji_set_feedback_object(DB_JAVAINFO *, JNIEnv *jnienv, DB *db, jobject value);
-extern void dbji_call_feedback(DB_JAVAINFO *, DB *db, jobject jdb,
-			       int opcode, int percent);
-
-extern void dbji_set_append_recno_object(DB_JAVAINFO *, JNIEnv *jnienv, DB *db, jobject value);
-extern int dbji_call_append_recno(DB_JAVAINFO *, DB *db, jobject jdb,
-				  DBT *dbt, jint recno);
-extern void dbji_set_assoc_object(DB_JAVAINFO *, JNIEnv *jnienv,
-				  DB *db, DB_TXN *txn, DB *second,
-				  jobject value, int flags);
-extern int dbji_call_assoc(DB_JAVAINFO *, DB *db, jobject jdb,
-			   const DBT *key, const DBT* data, DBT *result);
-extern void dbji_set_bt_compare_object(DB_JAVAINFO *, JNIEnv *jnienv, DB *db, jobject value);
-extern int dbji_call_bt_compare(DB_JAVAINFO *, DB *db, jobject jdb,
-				const DBT *dbt1, const DBT *dbt2);
-extern void dbji_set_bt_prefix_object(DB_JAVAINFO *, JNIEnv *jnienv, DB *db, jobject value);
-extern size_t dbji_call_bt_prefix(DB_JAVAINFO *, DB *db, jobject jdb,
-				  const DBT *dbt1, const DBT *dbt2);
-extern void dbji_set_dup_compare_object(DB_JAVAINFO *, JNIEnv *jnienv, DB *db, jobject value);
-extern int dbji_call_dup_compare(DB_JAVAINFO *, DB *db, jobject jdb,
-				 const DBT *dbt1, const DBT *dbt2);
-extern void dbji_set_h_hash_object(DB_JAVAINFO *, JNIEnv *jnienv, DB *db, jobject value);
-extern int dbji_call_h_hash(DB_JAVAINFO *, DB *db, jobject jdb,
-			    const void *data, int len);
-
-#endif /* !_JAVA_INFO_H_ */
diff --git a/storage/bdb/libdb_java/java_locked.c b/storage/bdb/libdb_java/java_locked.c
deleted file mode 100644
index 9534a387b40..00000000000
--- a/storage/bdb/libdb_java/java_locked.c
+++ /dev/null
@@ -1,321 +0,0 @@
-/*-
- * See the file LICENSE for redistribution information.
- *
- * Copyright (c) 1997-2002
- *	Sleepycat Software.  All rights reserved.
- */
-#include "db_config.h"
-
-#ifndef lint
-static const char revid[] = "$Id: java_locked.c,v 11.32 2002/08/06 05:19:07 bostic Exp $";
-#endif /* not lint */
-
-#include 
-#include 
-#include 
-#include 
-
-#include "db_int.h"
-#include "java_util.h"
-
-/****************************************************************
- *
- * Implementation of functions to manipulate LOCKED_DBT.
- */
-int
-locked_dbt_get(LOCKED_DBT *ldbt, JNIEnv *jnienv, DB_ENV *dbenv,
-	       jobject jdbt, OpKind kind)
-{
-	DBT *dbt;
-
-	COMPQUIET(dbenv, NULL);
-	ldbt->jdbt = jdbt;
-	ldbt->java_array_len = 0;
-	ldbt->flags = 0;
-	ldbt->kind = kind;
-	ldbt->java_data = 0;
-	ldbt->before_data = 0;
-	ldbt->javainfo =
-		(DBT_JAVAINFO *)get_private_dbobj(jnienv, name_DBT, jdbt);
-
-	if (!verify_non_null(jnienv, ldbt->javainfo)) {
-		report_exception(jnienv, "Dbt is gc'ed?", 0, 0);
-		F_SET(ldbt, LOCKED_ERROR);
-		return (EINVAL);
-	}
-	if (F_ISSET(ldbt->javainfo, DBT_JAVAINFO_LOCKED)) {
-		report_exception(jnienv, "Dbt is already in use", 0, 0);
-		F_SET(ldbt, LOCKED_ERROR);
-		return (EINVAL);
-	}
-	dbt = &ldbt->javainfo->dbt;
-
-	if ((*jnienv)->GetBooleanField(jnienv,
-	    jdbt, fid_Dbt_must_create_data) != 0)
-		F_SET(ldbt, LOCKED_CREATE_DATA);
-	else
-		ldbt->javainfo->array =
-			(*jnienv)->GetObjectField(jnienv, jdbt, fid_Dbt_data);
-
-	dbt->size = (*jnienv)->GetIntField(jnienv, jdbt, fid_Dbt_size);
-	dbt->ulen = (*jnienv)->GetIntField(jnienv, jdbt, fid_Dbt_ulen);
-	dbt->dlen = (*jnienv)->GetIntField(jnienv, jdbt, fid_Dbt_dlen);
-	dbt->doff = (*jnienv)->GetIntField(jnienv, jdbt, fid_Dbt_doff);
-	dbt->flags = (*jnienv)->GetIntField(jnienv, jdbt, fid_Dbt_flags);
-	ldbt->javainfo->offset = (*jnienv)->GetIntField(jnienv, jdbt,
-						    fid_Dbt_offset);
-
-	/*
-	 * If no flags are set, use default behavior of DB_DBT_MALLOC.
-	 * We can safely set dbt->flags because flags will never be copied
-	 * back to the Java Dbt.
-	 */
-	if (kind != inOp &&
-	    !F_ISSET(dbt, DB_DBT_USERMEM | DB_DBT_MALLOC | DB_DBT_REALLOC))
-		F_SET(dbt, DB_DBT_MALLOC);
-
-	/*
-	 * If this is requested to be realloc with an existing array,
-	 * we cannot use the underlying realloc, because the array we
-	 * will pass in is allocated by the Java VM, not us, so it
-	 * cannot be realloced.  We simulate the reallocation by using
-	 * USERMEM and reallocating the java array when a ENOMEM error
-	 * occurs.  We change the flags during the operation, and they
-	 * are reset when the operation completes (in locked_dbt_put).
-	 */
-	if (F_ISSET(dbt, DB_DBT_REALLOC) && ldbt->javainfo->array != NULL) {
-		F_CLR(dbt, DB_DBT_REALLOC);
-		F_SET(dbt, DB_DBT_USERMEM);
-		F_SET(ldbt, LOCKED_REALLOC_NONNULL);
-	}
-
-	if ((F_ISSET(dbt, DB_DBT_USERMEM) || kind != outOp) &&
-	    !F_ISSET(ldbt, LOCKED_CREATE_DATA)) {
-
-		/*
-		 * If writing with DB_DBT_USERMEM
-		 * or it's a set (or get/set) operation,
-		 * then the data should point to a java array.
-		 * Note that outOp means data is coming out of the database
-		 * (it's a get).  inOp means data is going into the database
-		 * (either a put, or a key input).
-		 */
-		if (!ldbt->javainfo->array) {
-			report_exception(jnienv, "Dbt.data is null", 0, 0);
-			F_SET(ldbt, LOCKED_ERROR);
-			return (EINVAL);
-		}
-
-		/* Verify other parameters */
-		ldbt->java_array_len = (*jnienv)->GetArrayLength(jnienv,
-							ldbt->javainfo->array);
-		if (ldbt->javainfo->offset < 0 ) {
-			report_exception(jnienv, "Dbt.offset illegal", 0, 0);
-			F_SET(ldbt, LOCKED_ERROR);
-			return (EINVAL);
-		}
-		if (dbt->size + ldbt->javainfo->offset > ldbt->java_array_len) {
-			report_exception(jnienv,
-			 "Dbt.size + Dbt.offset greater than array length",
-					 0, 0);
-			F_SET(ldbt, LOCKED_ERROR);
-			return (EINVAL);
-		}
-
-		ldbt->java_data = (*jnienv)->GetByteArrayElements(jnienv,
-						ldbt->javainfo->array,
-						(jboolean *)0);
-
-		dbt->data = ldbt->before_data = ldbt->java_data +
-			ldbt->javainfo->offset;
-	}
-	else if (!F_ISSET(ldbt, LOCKED_CREATE_DATA)) {
-
-		/*
-		 * If writing with DB_DBT_MALLOC or DB_DBT_REALLOC with
-		 * a null array, then the data is allocated by DB.
-		 */
-		dbt->data = ldbt->before_data = 0;
-	}
-
-	/*
-	 * RPC makes the assumption that if dbt->size is non-zero, there
-	 * is data to copy from dbt->data.  We may have set dbt->size
-	 * to a non-zero integer above but decided not to point
-	 * dbt->data at anything.  (One example is if we're doing an outOp
-	 * with an already-used Dbt whose values we expect to just
-	 * overwrite.)
-	 *
-	 * Clean up the dbt fields so we don't run into trouble.
-	 * (Note that doff, dlen, and flags all may contain meaningful
-	 * values.)
-	 */
-	if (dbt->data == NULL)
-		dbt->size = dbt->ulen = 0;
-
-	F_SET(ldbt->javainfo, DBT_JAVAINFO_LOCKED);
-	return (0);
-}
-
-/*
- * locked_dbt_put must be called for any LOCKED_DBT struct before a
- * java handler returns to the user.  It can be thought of as the
- * LOCKED_DBT destructor.  It copies any information from temporary
- * structures back to user accessible arrays, and of course must free
- * memory and remove references.  The LOCKED_DBT itself is not freed,
- * as it is expected to be a stack variable.
- *
- * Note that after this call, the LOCKED_DBT can still be used in
- * limited ways, e.g. to look at values in the C DBT.
- */
-void
-locked_dbt_put(LOCKED_DBT *ldbt, JNIEnv *jnienv, DB_ENV *dbenv)
-{
-	DBT *dbt;
-
-	dbt = &ldbt->javainfo->dbt;
-
-	/*
-	 * If the error flag was set, we never succeeded
-	 * in allocating storage.
-	 */
-	if (F_ISSET(ldbt, LOCKED_ERROR))
-		return;
-
-	if (((F_ISSET(dbt, DB_DBT_USERMEM) ||
-	      F_ISSET(ldbt, LOCKED_REALLOC_NONNULL)) ||
-	     ldbt->kind == inOp) && !F_ISSET(ldbt, LOCKED_CREATE_DATA)) {
-
-		/*
-		 * If writing with DB_DBT_USERMEM or it's a set
-		 * (or get/set) operation, then the data may be already in
-		 * the java array, in which case, we just need to release it.
-		 * If DB didn't put it in the array (indicated by the
-		 * dbt->data changing), we need to do that
-		 */
-		if (ldbt->before_data != ldbt->java_data) {
-			(*jnienv)->SetByteArrayRegion(jnienv,
-						      ldbt->javainfo->array,
-						      ldbt->javainfo->offset,
-						      dbt->ulen,
-						      ldbt->before_data);
-		}
-		(*jnienv)->ReleaseByteArrayElements(jnienv,
-						    ldbt->javainfo->array,
-						    ldbt->java_data, 0);
-		dbt->data = 0;
-	}
-	else if (F_ISSET(dbt, DB_DBT_MALLOC | DB_DBT_REALLOC) &&
-	    ldbt->kind != inOp && !F_ISSET(ldbt, LOCKED_CREATE_DATA)) {
-
-		/*
-		 * If writing with DB_DBT_MALLOC, or DB_DBT_REALLOC
-		 * with a zero buffer, then the data was allocated by
-		 * DB.  If dbt->data is zero, it means an error
-		 * occurred (and should have been already reported).
-		 */
-		if (dbt->data) {
-
-			/*
-			 * In the case of SET_RANGE, the key is inOutOp
-			 * and when not found, its data will be left as
-			 * its original value.  Only copy and free it
-			 * here if it has been allocated by DB
-			 * (dbt->data has changed).
-			 */
-			if (dbt->data != ldbt->before_data) {
-				jbyteArray newarr;
-
-				if ((newarr = (*jnienv)->NewByteArray(jnienv,
-				    dbt->size)) == NULL) {
-					/* The JVM has posted an exception. */
-					F_SET(ldbt, LOCKED_ERROR);
-					return;
-				}
-				(*jnienv)->SetObjectField(jnienv, ldbt->jdbt,
-							  fid_Dbt_data,
-							  newarr);
-				ldbt->javainfo->offset = 0;
-				(*jnienv)->SetByteArrayRegion(jnienv,
-					      newarr, 0, dbt->size,
-					      (jbyte *)dbt->data);
-				(void)__os_ufree(dbenv, dbt->data);
-				dbt->data = 0;
-			}
-		}
-	}
-
-	/*
-	 * The size field may have changed after a DB API call,
-	 * so we set that back too.
-	 */
-	(*jnienv)->SetIntField(jnienv, ldbt->jdbt, fid_Dbt_size, dbt->size);
-	ldbt->javainfo->array = NULL;
-	F_CLR(ldbt->javainfo, DBT_JAVAINFO_LOCKED);
-}
-
-/*
- * Realloc the java array to receive data if the DBT used
- * DB_DBT_REALLOC flag with a non-null data array, and the last
- * operation set the size field to an amount greater than ulen.
- * Return 1 if these conditions are met, otherwise 0.  This is used
- * internally to simulate the operations needed for DB_DBT_REALLOC.
- */
-int locked_dbt_realloc(LOCKED_DBT *ldbt, JNIEnv *jnienv, DB_ENV *dbenv)
-{
-	DBT *dbt;
-
-	COMPQUIET(dbenv, NULL);
-	dbt = &ldbt->javainfo->dbt;
-
-	if (!F_ISSET(ldbt, LOCKED_REALLOC_NONNULL) ||
-	    F_ISSET(ldbt, LOCKED_ERROR) || dbt->size <= dbt->ulen)
-		return (0);
-
-	(*jnienv)->ReleaseByteArrayElements(jnienv, ldbt->javainfo->array,
-					    ldbt->java_data, 0);
-
-	/*
-	 * We allocate a new array of the needed size.
-	 * We'll set the offset to 0, as the old offset
-	 * really doesn't make any sense.
-	 */
-	if ((ldbt->javainfo->array = (*jnienv)->NewByteArray(jnienv,
-	    dbt->size)) == NULL) {
-		F_SET(ldbt, LOCKED_ERROR);
-		return (0);
-	}
-
-	ldbt->java_array_len = dbt->ulen = dbt->size;
-	ldbt->javainfo->offset = 0;
-	(*jnienv)->SetObjectField(jnienv, ldbt->jdbt, fid_Dbt_data,
-	    ldbt->javainfo->array);
-	ldbt->java_data = (*jnienv)->GetByteArrayElements(jnienv,
-	    ldbt->javainfo->array, (jboolean *)0);
-	memcpy(ldbt->java_data, ldbt->before_data, dbt->ulen);
-	dbt->data = ldbt->before_data = ldbt->java_data;
-	return (1);
-}
-
-/****************************************************************
- *
- * Implementation of functions to manipulate LOCKED_STRING.
- */
-int
-locked_string_get(LOCKED_STRING *ls, JNIEnv *jnienv, jstring jstr)
-{
-	ls->jstr = jstr;
-
-	if (jstr == 0)
-		ls->string = 0;
-	else
-		ls->string = (*jnienv)->GetStringUTFChars(jnienv, jstr,
-							  (jboolean *)0);
-	return (0);
-}
-
-void locked_string_put(LOCKED_STRING *ls, JNIEnv *jnienv)
-{
-	if (ls->jstr)
-		(*jnienv)->ReleaseStringUTFChars(jnienv, ls->jstr, ls->string);
-}
diff --git a/storage/bdb/libdb_java/java_locked.h b/storage/bdb/libdb_java/java_locked.h
deleted file mode 100644
index a79d929abee..00000000000
--- a/storage/bdb/libdb_java/java_locked.h
+++ /dev/null
@@ -1,82 +0,0 @@
-/*-
- * See the file LICENSE for redistribution information.
- *
- * Copyright (c) 1997-2002
- *	Sleepycat Software.  All rights reserved.
- *
- * $Id: java_locked.h,v 11.18 2002/05/07 16:12:42 dda Exp $
- */
-
-#ifndef _JAVA_LOCKED_H_
-#define	_JAVA_LOCKED_H_
-
-/*
- * Used as argument to locked_dbt_get().
- */
-typedef enum _OpKind {
-	inOp,		/* setting data in database (passing data in) */
-	outOp,		/* getting data from database to user memory */
-	inOutOp		/* both getting/setting data */
-} OpKind;
-
-/*
- * LOCKED_DBT
- *
- * A stack variable LOCKED_DBT should be declared for each Dbt used in a
- * native call to the DB API.  Before the DBT can be used, locked_dbt_get()
- * must be called to temporarily convert any java array found in the
- * Dbt (which has a pointer to a DBT_JAVAINFO struct) to actual bytes
- * in memory that remain locked in place.  These bytes are used during
- * the call to the DB C API, and are released and/or copied back when
- * locked_dbt_put is called.
- */
-typedef struct _locked_dbt
-{
-	/* these are accessed externally to locked_dbt_ functions */
-	DBT_JAVAINFO *javainfo;
-	unsigned int java_array_len;
-	jobject jdbt;
-
-	/* these are for used internally by locked_dbt_ functions */
-	jbyte *java_data;
-	jbyte *before_data;
-	OpKind kind;
-
-#define	LOCKED_ERROR		0x01	/* error occurred */
-#define	LOCKED_CREATE_DATA	0x02	/* must create data on the fly */
-#define	LOCKED_REALLOC_NONNULL	0x04	/* DB_DBT_REALLOC flag, nonnull data */
-	u_int32_t flags;
-} LOCKED_DBT;
-
-/* Fill the LOCKED_DBT struct and lock the Java byte array */
-extern int locked_dbt_get(LOCKED_DBT *, JNIEnv *, DB_ENV *, jobject, OpKind);
-
-/* unlock the Java byte array */
-extern void locked_dbt_put(LOCKED_DBT *, JNIEnv *, DB_ENV *);
-
-/* realloc the Java byte array */
-extern int locked_dbt_realloc(LOCKED_DBT *, JNIEnv *, DB_ENV *);
-
-/*
- * LOCKED_STRING
- *
- * A LOCKED_STRING exists temporarily to convert a java jstring object
- * to a char *.  Because the memory for the char * string is
- * managed by the JVM, it must be released when we are done
- * looking at it.  Typically, locked_string_get() is called at the
- * beginning of a function for each jstring object, and locked_string_put
- * is called at the end of each function for each LOCKED_STRING.
- */
-typedef struct _locked_string
-{
-	/* this accessed externally to locked_string_ functions */
-	const char *string;
-
-	/* this is used internally by locked_string_ functions */
-	jstring jstr;
-} LOCKED_STRING;
-
-extern int locked_string_get(LOCKED_STRING *, JNIEnv *jnienv, jstring jstr);
-extern void locked_string_put(LOCKED_STRING *, JNIEnv *jnienv);  /* this unlocks and frees mem */
-
-#endif /* !_JAVA_LOCKED_H_ */
diff --git a/storage/bdb/libdb_java/java_util.c b/storage/bdb/libdb_java/java_util.c
deleted file mode 100644
index 5a538ee0785..00000000000
--- a/storage/bdb/libdb_java/java_util.c
+++ /dev/null
@@ -1,890 +0,0 @@
-/*-
- * See the file LICENSE for redistribution information.
- *
- * Copyright (c) 1997-2002
- *	Sleepycat Software.  All rights reserved.
- */
-#include "db_config.h"
-
-#ifndef lint
-static const char revid[] = "$Id: java_util.c,v 11.49 2002/09/13 03:09:30 mjc Exp $";
-#endif /* not lint */
-
-#include 
-#include 
-
-#include "db_int.h"
-#include "java_util.h"
-
-#ifdef DB_WIN32
-#define	sys_errlist _sys_errlist
-#define	sys_nerr _sys_nerr
-#endif
-
-const char * const name_DB                 = "Db";
-const char * const name_DB_BTREE_STAT      = "DbBtreeStat";
-const char * const name_DBC                = "Dbc";
-const char * const name_DB_DEADLOCK_EX     = "DbDeadlockException";
-const char * const name_DB_ENV             = "DbEnv";
-const char * const name_DB_EXCEPTION       = "DbException";
-const char * const name_DB_HASH_STAT       = "DbHashStat";
-const char * const name_DB_LOCK            = "DbLock";
-const char * const name_DB_LOCK_STAT       = "DbLockStat";
-const char * const name_DB_LOCKNOTGRANTED_EX = "DbLockNotGrantedException";
-const char * const name_DB_LOGC            = "DbLogc";
-const char * const name_DB_LOG_STAT        = "DbLogStat";
-const char * const name_DB_LSN             = "DbLsn";
-const char * const name_DB_MEMORY_EX       = "DbMemoryException";
-const char * const name_DB_MPOOL_FSTAT     = "DbMpoolFStat";
-const char * const name_DB_MPOOL_STAT      = "DbMpoolStat";
-const char * const name_DB_PREPLIST        = "DbPreplist";
-const char * const name_DB_QUEUE_STAT      = "DbQueueStat";
-const char * const name_DB_REP_STAT        = "DbRepStat";
-const char * const name_DB_RUNRECOVERY_EX  = "DbRunRecoveryException";
-const char * const name_DBT                = "Dbt";
-const char * const name_DB_TXN             = "DbTxn";
-const char * const name_DB_TXN_STAT        = "DbTxnStat";
-const char * const name_DB_TXN_STAT_ACTIVE = "DbTxnStat$Active";
-const char * const name_DB_UTIL            = "DbUtil";
-const char * const name_DbAppendRecno      = "DbAppendRecno";
-const char * const name_DbBtreeCompare     = "DbBtreeCompare";
-const char * const name_DbBtreePrefix      = "DbBtreePrefix";
-const char * const name_DbDupCompare       = "DbDupCompare";
-const char * const name_DbEnvFeedback      = "DbEnvFeedback";
-const char * const name_DbErrcall          = "DbErrcall";
-const char * const name_DbHash             = "DbHash";
-const char * const name_DbLockRequest      = "DbLockRequest";
-const char * const name_DbFeedback         = "DbFeedback";
-const char * const name_DbRecoveryInit     = "DbRecoveryInit";
-const char * const name_DbRepTransport	   = "DbRepTransport";
-const char * const name_DbSecondaryKeyCreate = "DbSecondaryKeyCreate";
-const char * const name_DbTxnRecover       = "DbTxnRecover";
-const char * const name_RepElectResult = "DbEnv$RepElectResult";
-const char * const name_RepProcessMessage = "DbEnv$RepProcessMessage";
-
-const char * const string_signature    = "Ljava/lang/String;";
-
-jfieldID fid_Dbt_data;
-jfieldID fid_Dbt_offset;
-jfieldID fid_Dbt_size;
-jfieldID fid_Dbt_ulen;
-jfieldID fid_Dbt_dlen;
-jfieldID fid_Dbt_doff;
-jfieldID fid_Dbt_flags;
-jfieldID fid_Dbt_private_dbobj_;
-jfieldID fid_Dbt_must_create_data;
-jfieldID fid_DbLockRequest_op;
-jfieldID fid_DbLockRequest_mode;
-jfieldID fid_DbLockRequest_timeout;
-jfieldID fid_DbLockRequest_obj;
-jfieldID fid_DbLockRequest_lock;
-jfieldID fid_RepProcessMessage_envid;
-
-/****************************************************************
- *
- * Utility functions used by "glue" functions.
- */
-
-/*
- * Do any one time initialization, especially initializing any
- * unchanging methodIds, fieldIds, etc.
- */
-void one_time_init(JNIEnv *jnienv)
-{
-    jclass cl;
-
-    if ((cl = get_class(jnienv, name_DBT)) == NULL)
-	return;	/* An exception has been posted. */
-    fid_Dbt_data = (*jnienv)->GetFieldID(jnienv, cl, "data", "[B");
-    fid_Dbt_offset = (*jnienv)->GetFieldID(jnienv, cl, "offset", "I");
-    fid_Dbt_size = (*jnienv)->GetFieldID(jnienv, cl, "size", "I");
-    fid_Dbt_ulen = (*jnienv)->GetFieldID(jnienv, cl, "ulen", "I");
-    fid_Dbt_dlen = (*jnienv)->GetFieldID(jnienv, cl, "dlen", "I");
-    fid_Dbt_doff = (*jnienv)->GetFieldID(jnienv, cl, "doff", "I");
-    fid_Dbt_flags = (*jnienv)->GetFieldID(jnienv, cl, "flags", "I");
-    fid_Dbt_must_create_data = (*jnienv)->GetFieldID(jnienv, cl,
-						     "must_create_data", "Z");
-    fid_Dbt_private_dbobj_ =
-	(*jnienv)->GetFieldID(jnienv, cl, "private_dbobj_", "J");
-
-    if ((cl = get_class(jnienv, name_DbLockRequest)) == NULL)
-	return;	/* An exception has been posted. */
-    fid_DbLockRequest_op = (*jnienv)->GetFieldID(jnienv, cl, "op", "I");
-    fid_DbLockRequest_mode = (*jnienv)->GetFieldID(jnienv, cl, "mode", "I");
-    fid_DbLockRequest_timeout =
-	(*jnienv)->GetFieldID(jnienv, cl, "timeout", "I");
-    fid_DbLockRequest_obj = (*jnienv)->GetFieldID(jnienv, cl, "obj",
-						  "Lcom/sleepycat/db/Dbt;");
-    fid_DbLockRequest_lock = (*jnienv)->GetFieldID(jnienv, cl, "lock",
-						   "Lcom/sleepycat/db/DbLock;");
-
-    if ((cl = get_class(jnienv, name_RepProcessMessage)) == NULL)
-	return;	/* An exception has been posted. */
-    fid_RepProcessMessage_envid =
-	(*jnienv)->GetFieldID(jnienv, cl, "envid", "I");
-}
-
-/*
- * Get the private data from a Db* object that points back to a C DB_* object.
- * The private data is stored in the object as a Java long (64 bits),
- * which is long enough to store a pointer on current architectures.
- */
-void *get_private_dbobj(JNIEnv *jnienv, const char *classname,
-			jobject obj)
-{
-	jclass dbClass;
-	jfieldID id;
-	long_to_ptr lp;
-
-	if (!obj)
-		return (0);
-
-	if ((dbClass = get_class(jnienv, classname)) == NULL)
-		return (NULL);	/* An exception has been posted. */
-	id = (*jnienv)->GetFieldID(jnienv, dbClass, "private_dbobj_", "J");
-	lp.java_long = (*jnienv)->GetLongField(jnienv, obj, id);
-	return (lp.ptr);
-}
-
-/*
- * Set the private data in a Db* object that points back to a C DB_* object.
- * The private data is stored in the object as a Java long (64 bits),
- * which is long enough to store a pointer on current architectures.
- */
-void set_private_dbobj(JNIEnv *jnienv, const char *classname,
-		       jobject obj, void *value)
-{
-	long_to_ptr lp;
-	jclass dbClass;
-	jfieldID id;
-
-	lp.java_long = 0;	/* no junk in case sizes mismatch */
-	lp.ptr = value;
-	if ((dbClass = get_class(jnienv, classname)) == NULL)
-		return;	/* An exception has been posted. */
-	id = (*jnienv)->GetFieldID(jnienv, dbClass, "private_dbobj_", "J");
-	(*jnienv)->SetLongField(jnienv, obj, id, lp.java_long);
-}
-
-/*
- * Get the private data in a Db/DbEnv object that holds additional 'side data'.
- * The private data is stored in the object as a Java long (64 bits),
- * which is long enough to store a pointer on current architectures.
- */
-void *get_private_info(JNIEnv *jnienv, const char *classname,
-		       jobject obj)
-{
-	jclass dbClass;
-	jfieldID id;
-	long_to_ptr lp;
-
-	if (!obj)
-		return (NULL);
-
-	if ((dbClass = get_class(jnienv, classname)) == NULL)
-		return (NULL);	/* An exception has been posted. */
-	id = (*jnienv)->GetFieldID(jnienv, dbClass, "private_info_", "J");
-	lp.java_long = (*jnienv)->GetLongField(jnienv, obj, id);
-	return (lp.ptr);
-}
-
-/*
- * Set the private data in a Db/DbEnv object that holds additional 'side data'.
- * The private data is stored in the object as a Java long (64 bits),
- * which is long enough to store a pointer on current architectures.
- */
-void set_private_info(JNIEnv *jnienv, const char *classname,
-		      jobject obj, void *value)
-{
-	long_to_ptr lp;
-	jclass dbClass;
-	jfieldID id;
-
-	lp.java_long = 0;	/* no junk in case sizes mismatch */
-	lp.ptr = value;
-	if ((dbClass = get_class(jnienv, classname)) == NULL)
-		return;	/* An exception has been posted. */
-	id = (*jnienv)->GetFieldID(jnienv, dbClass, "private_info_", "J");
-	(*jnienv)->SetLongField(jnienv, obj, id, lp.java_long);
-}
-
-/*
- * Given a non-qualified name (e.g. "foo"), get the class handle
- * for the fully qualified name (e.g. "com.sleepycat.db.foo")
- */
-jclass get_class(JNIEnv *jnienv, const char *classname)
-{
-	/*
-	 * Note: PERFORMANCE: It should be possible to cache jclass's.
-	 * If we do a NewGlobalRef on each one, we can keep them
-	 * around in a table.  A jclass is a jobject, and
-	 * since NewGlobalRef returns a jobject, it isn't
-	 * technically right, but it would likely work with
-	 * most implementations.  Possibly make it configurable.
-	 */
-	char fullname[128];
-
-	(void)snprintf(fullname, sizeof(fullname),
-	    "%s%s", DB_PACKAGE_NAME, classname);
-	return ((*jnienv)->FindClass(jnienv, fullname));
-}
-
-/*
- * Given a fully qualified name (e.g. "java.util.Hashtable")
- * return the jclass object.  If it can't be found, an
- * exception is raised and NULL is return.
- * This is appropriate to be used for classes that may
- * not be present.
- */
-jclass get_fully_qualified_class(JNIEnv *jnienv, const char *classname)
-{
-	jclass result;
-
-	result = ((*jnienv)->FindClass(jnienv, classname));
-	if (result == NULL) {
-		jclass cnfe;
-		char message[1024];
-
-		cnfe = (*jnienv)->FindClass(jnienv,
-				    "java/lang/ClassNotFoundException");
-		strncpy(message, classname, sizeof(message));
-		strncat(message, ": class not found", sizeof(message));
-		(*jnienv)->ThrowNew(jnienv, cnfe, message);
-	}
-	return (result);
-}
-
-/*
- * Set an individual field in a Db* object.
- * The field must be a DB object type.
- */
-void set_object_field(JNIEnv *jnienv, jclass class_of_this,
-		      jobject jthis, const char *object_classname,
-		      const char *name_of_field, jobject obj)
-{
-	char signature[512];
-	jfieldID id;
-
-	(void)snprintf(signature, sizeof(signature),
-	    "L%s%s;", DB_PACKAGE_NAME, object_classname);
-	id  = (*jnienv)->GetFieldID(
-	    jnienv, class_of_this, name_of_field, signature);
-	(*jnienv)->SetObjectField(jnienv, jthis, id, obj);
-}
-
-/*
- * Set an individual field in a Db* object.
- * The field must be an integer type.
- */
-void set_int_field(JNIEnv *jnienv, jclass class_of_this,
-		   jobject jthis, const char *name_of_field, jint value)
-{
-	jfieldID id  =
-	    (*jnienv)->GetFieldID(jnienv, class_of_this, name_of_field, "I");
-	(*jnienv)->SetIntField(jnienv, jthis, id, value);
-}
-
-/*
- * Set an individual field in a Db* object.
- * The field must be an integer type.
- */
-void set_long_field(JNIEnv *jnienv, jclass class_of_this,
-		    jobject jthis, const char *name_of_field, jlong value)
-{
-	jfieldID id  = (*jnienv)->GetFieldID(jnienv, class_of_this,
-					     name_of_field, "J");
-	(*jnienv)->SetLongField(jnienv, jthis, id, value);
-}
-
-/*
- * Set an individual field in a Db* object.
- * The field must be an integer type.
- */
-void set_lsn_field(JNIEnv *jnienv, jclass class_of_this,
-		   jobject jthis, const char *name_of_field, DB_LSN value)
-{
-	set_object_field(jnienv, class_of_this, jthis, name_DB_LSN,
-			 name_of_field, get_DbLsn(jnienv, value));
-}
-
-/*
- * Report an exception back to the java side.
- */
-void report_exception(JNIEnv *jnienv, const char *text,
-		      int err, unsigned long expect_mask)
-{
-	jstring textString;
-	jclass dbexcept;
-	jclass javaexcept;
-	jthrowable obj;
-
-	textString = NULL;
-	dbexcept = NULL;
-	javaexcept = NULL;
-
-	switch (err) {
-	/*
-	 * DB_JAVA_CALLBACK is returned by
-	 * dbji_call_append_recno() (the append_recno callback)
-	 * when the Java version of the callback has thrown
-	 * an exception, and we want to pass the exception on.
-	 * The exception has already been thrown, we
-	 * don't want to throw a new one.
-	 */
-		case DB_JAVA_CALLBACK:
-			break;
-		case ENOENT:
-			/*
-			 * In this case there is a corresponding
-			 * standard java exception type that we'll use.
-			 * First we make sure that the calling function
-			 * expected this kind of error, if not we give
-			 * an 'internal error' DbException, since
-			 * we must not throw an exception type that isn't
-			 * declared in the signature.
-			 *
-			 * We'll make this a little more general if/when
-			 * we add more java standard exceptions.
-			 */
-			if ((expect_mask & EXCEPTION_FILE_NOT_FOUND) != 0) {
-				javaexcept = (*jnienv)->FindClass(jnienv,
-				    "java/io/FileNotFoundException");
-			}
-			else {
-				char errstr[1024];
-
-				snprintf(errstr, sizeof(errstr),
-				  "internal error: unexpected errno: %s",
-					 text);
-				textString = get_java_string(jnienv,
-							     errstr);
-				dbexcept = get_class(jnienv,
-						     name_DB_EXCEPTION);
-			}
-			break;
-		case DB_RUNRECOVERY:
-			dbexcept = get_class(jnienv,
-					     name_DB_RUNRECOVERY_EX);
-			break;
-		case DB_LOCK_DEADLOCK:
-			dbexcept = get_class(jnienv, name_DB_DEADLOCK_EX);
-			break;
-		default:
-			dbexcept = get_class(jnienv, name_DB_EXCEPTION);
-			break;
-	}
-	if (dbexcept != NULL) {
-		if (textString == NULL)
-			textString = get_java_string(jnienv, text);
-		if ((obj = create_exception(jnienv, textString, err, dbexcept))
-		    != NULL)
-			(*jnienv)->Throw(jnienv, obj);
-		/* Otherwise, an exception has been posted. */
-	}
-	else if (javaexcept != NULL)
-		(*jnienv)->ThrowNew(jnienv, javaexcept, text);
-	else
-		fprintf(stderr,
-		    "report_exception: failed to create an exception\n");
-}
-
-/*
- * Report an exception back to the java side, for the specific
- * case of DB_LOCK_NOTGRANTED, as more things are added to the
- * constructor of this type of exception.
- */
-void report_notgranted_exception(JNIEnv *jnienv, const char *text,
-				 db_lockop_t op, db_lockmode_t mode,
-				 jobject jdbt, jobject jlock, int index)
-{
-	jstring textString;
-	jclass dbexcept;
-	jthrowable obj;
-	jmethodID mid;
-
-	if ((dbexcept = get_class(jnienv, name_DB_LOCKNOTGRANTED_EX)) == NULL)
-		return;	/* An exception has been posted. */
-	textString = get_java_string(jnienv, text);
-
-	mid = (*jnienv)->GetMethodID(jnienv, dbexcept, "",
-				     "(Ljava/lang/String;II"
-				     "Lcom/sleepycat/db/Dbt;"
-				     "Lcom/sleepycat/db/DbLock;I)V");
-	if ((obj = (jthrowable)(*jnienv)->NewObject(jnienv, dbexcept,
-	    mid, textString, op, mode, jdbt, jlock, index)) != NULL)
-		(*jnienv)->Throw(jnienv, obj);
-	else
-		fprintf(stderr,
-	    "report_notgranted_exception: failed to create an exception\n");
-}
-
-/*
- * Create an exception object and return it.
- * The given class must have a constructor that has a
- * constructor with args (java.lang.String text, int errno);
- * DbException and its subclasses fit this bill.
- */
-jobject create_exception(JNIEnv *jnienv, jstring text,
-				  int err, jclass dbexcept)
-{
-	jthrowable obj;
-	jmethodID mid;
-
-	mid = (*jnienv)->GetMethodID(jnienv, dbexcept, "",
-				     "(Ljava/lang/String;I)V");
-	if (mid != NULL)
-		obj = (jthrowable)(*jnienv)->NewObject(jnienv, dbexcept, mid,
-					       text, err);
-	else {
-		fprintf(stderr, "Cannot get exception init method ID!\n");
-		obj = NULL;
-	}
-
-	return (obj);
-}
-
-/*
- * Report an error via the errcall mechanism.
- */
-void report_errcall(JNIEnv *jnienv, jobject errcall,
-		    jstring prefix, const char *message)
-{
-	jmethodID id;
-	jclass errcall_class;
-	jstring msg;
-
-	if ((errcall_class = get_class(jnienv, name_DbErrcall)) == NULL)
-		return;	/* An exception has been posted. */
-	msg = get_java_string(jnienv, message);
-
-	id = (*jnienv)->GetMethodID(jnienv, errcall_class,
-				 "errcall",
-				 "(Ljava/lang/String;Ljava/lang/String;)V");
-	if (id == NULL) {
-		fprintf(stderr, "Cannot get errcall methodID!\n");
-		fprintf(stderr, "error: %s\n", message);
-		return;
-	}
-
-	(*jnienv)->CallVoidMethod(jnienv, errcall, id, prefix, msg);
-}
-
-/*
- * If the object is null, report an exception and return false (0),
- * otherwise return true (1).
- */
-int verify_non_null(JNIEnv *jnienv, void *obj)
-{
-	if (obj == NULL) {
-		report_exception(jnienv, "null object", EINVAL, 0);
-		return (0);
-	}
-	return (1);
-}
-
-/*
- * If the error code is non-zero, report an exception and return false (0),
- * otherwise return true (1).
- */
-int verify_return(JNIEnv *jnienv, int err, unsigned long expect_mask)
-{
-	if (err == 0)
-		return (1);
-
-	report_exception(jnienv, db_strerror(err), err, expect_mask);
-	return (0);
-}
-
-/*
- * Verify that there was no memory error due to undersized Dbt.
- * If there is report a DbMemoryException, with the Dbt attached
- * and return false (0), otherwise return true (1).
- */
-int verify_dbt(JNIEnv *jnienv, int err, LOCKED_DBT *ldbt)
-{
-	DBT *dbt;
-	jobject exception;
-	jstring text;
-	jclass dbexcept;
-	jmethodID mid;
-
-	if (err != ENOMEM)
-		return (1);
-
-	dbt = &ldbt->javainfo->dbt;
-	if (!F_ISSET(dbt, DB_DBT_USERMEM) || dbt->size <= dbt->ulen)
-		return (1);
-
-	/* Create/throw an exception of type DbMemoryException */
-	if ((dbexcept = get_class(jnienv, name_DB_MEMORY_EX)) == NULL)
-		return (1);	/* An exception has been posted. */
-	text = get_java_string(jnienv,
-			       "Dbt not large enough for available data");
-	exception = create_exception(jnienv, text, ENOMEM, dbexcept);
-
-	/* Attach the dbt to the exception */
-	mid = (*jnienv)->GetMethodID(jnienv, dbexcept, "set_dbt",
-				     "(L" DB_PACKAGE_NAME "Dbt;)V");
-	(*jnienv)->CallVoidMethod(jnienv, exception, mid, ldbt->jdbt);
-	(*jnienv)->Throw(jnienv, exception);
-	return (0);
-}
-
-/*
- * Create an object of the given class, calling its default constructor.
- */
-jobject create_default_object(JNIEnv *jnienv, const char *class_name)
-{
-	jmethodID id;
-	jclass dbclass;
-
-	if ((dbclass = get_class(jnienv, class_name)) == NULL)
-		return (NULL);	/* An exception has been posted. */
-	id = (*jnienv)->GetMethodID(jnienv, dbclass, "", "()V");
-	return ((*jnienv)->NewObject(jnienv, dbclass, id));
-}
-
-/*
- * Convert an DB object to a Java encapsulation of that object.
- * Note: This implementation creates a new Java object on each call,
- * so it is generally useful when a new DB object has just been created.
- */
-jobject convert_object(JNIEnv *jnienv, const char *class_name, void *dbobj)
-{
-	jobject jo;
-
-	if (!dbobj)
-		return (0);
-
-	jo = create_default_object(jnienv, class_name);
-	set_private_dbobj(jnienv, class_name, jo, dbobj);
-	return (jo);
-}
-
-/*
- * Create a copy of the string
- */
-char *dup_string(const char *str)
-{
-	int len;
-	char *retval;
-	int err;
-
-	len = strlen(str) + 1;
-	if ((err = __os_malloc(NULL, sizeof(char)*len, &retval)) != 0)
-		return (NULL);
-	strncpy(retval, str, len);
-	return (retval);
-}
-
-/*
- * Create a java string from the given string
- */
-jstring get_java_string(JNIEnv *jnienv, const char* string)
-{
-	if (string == 0)
-		return (0);
-	return ((*jnienv)->NewStringUTF(jnienv, string));
-}
-
-/*
- * Create a copy of the java string using __os_malloc.
- * Caller must free it.
- */
-char *get_c_string(JNIEnv *jnienv, jstring jstr)
-{
-	const char *utf;
-	char *retval;
-
-	utf = (*jnienv)->GetStringUTFChars(jnienv, jstr, NULL);
-	retval = dup_string(utf);
-	(*jnienv)->ReleaseStringUTFChars(jnienv, jstr, utf);
-	return (retval);
-}
-
-/*
- * Convert a java object to the various C pointers they represent.
- */
-DB *get_DB(JNIEnv *jnienv, jobject obj)
-{
-	return ((DB *)get_private_dbobj(jnienv, name_DB, obj));
-}
-
-DB_BTREE_STAT *get_DB_BTREE_STAT(JNIEnv *jnienv, jobject obj)
-{
-	return ((DB_BTREE_STAT *)
-	    get_private_dbobj(jnienv, name_DB_BTREE_STAT, obj));
-}
-
-DBC *get_DBC(JNIEnv *jnienv, jobject obj)
-{
-	return ((DBC *)get_private_dbobj(jnienv, name_DBC, obj));
-}
-
-DB_ENV *get_DB_ENV(JNIEnv *jnienv, jobject obj)
-{
-	return ((DB_ENV *)get_private_dbobj(jnienv, name_DB_ENV, obj));
-}
-
-DB_ENV_JAVAINFO *get_DB_ENV_JAVAINFO(JNIEnv *jnienv, jobject obj)
-{
-	return ((DB_ENV_JAVAINFO *)get_private_info(jnienv, name_DB_ENV, obj));
-}
-
-DB_HASH_STAT *get_DB_HASH_STAT(JNIEnv *jnienv, jobject obj)
-{
-	return ((DB_HASH_STAT *)
-	    get_private_dbobj(jnienv, name_DB_HASH_STAT, obj));
-}
-
-DB_JAVAINFO *get_DB_JAVAINFO(JNIEnv *jnienv, jobject obj)
-{
-	return ((DB_JAVAINFO *)get_private_info(jnienv, name_DB, obj));
-}
-
-DB_LOCK *get_DB_LOCK(JNIEnv *jnienv, jobject obj)
-{
-	return ((DB_LOCK *)get_private_dbobj(jnienv, name_DB_LOCK, obj));
-}
-
-DB_LOGC *get_DB_LOGC(JNIEnv *jnienv, jobject obj)
-{
-	return ((DB_LOGC *)get_private_dbobj(jnienv, name_DB_LOGC, obj));
-}
-
-DB_LOG_STAT *get_DB_LOG_STAT(JNIEnv *jnienv, jobject obj)
-{
-	return ((DB_LOG_STAT *)
-	    get_private_dbobj(jnienv, name_DB_LOG_STAT, obj));
-}
-
-DB_LSN *get_DB_LSN(JNIEnv *jnienv, /* DbLsn */ jobject obj) {
-	/*
-	 * DbLsns that are created from within java (new DbLsn()) rather
-	 * than from within C (get_DbLsn()) may not have a "private" DB_LSN
-	 * structure allocated for them yet.  We can't do this in the
-	 * actual constructor (init_lsn()), because there's no way to pass
-	 * in an initializing value in, and because the get_DbLsn()/
-	 * convert_object() code path needs a copy of the pointer before
-	 * the constructor gets called.  Thus, get_DbLsn() allocates and
-	 * fills a DB_LSN for the object it's about to create.
-	 *
-	 * Since "new DbLsn()" may reasonably be passed as an argument to
-	 * functions such as DbEnv.log_put(), though, we need to make sure
-	 * that DB_LSN's get allocated when the object was created from
-	 * Java, too.  Here, we lazily allocate a new private DB_LSN if
-	 * and only if it turns out that we don't already have one.
-	 *
-	 * The only exception is if the DbLsn object is a Java null
-	 * (in which case the jobject will also be NULL). Then a NULL
-	 * DB_LSN is legitimate.
-	 */
-	DB_LSN *lsnp;
-	int err;
-
-	if (obj == NULL)
-		return (NULL);
-
-	lsnp = (DB_LSN *)get_private_dbobj(jnienv, name_DB_LSN, obj);
-	if (lsnp == NULL) {
-		if ((err = __os_malloc(NULL, sizeof(DB_LSN), &lsnp)) != 0)
-			return (NULL);
-		memset(lsnp, 0, sizeof(DB_LSN));
-		set_private_dbobj(jnienv, name_DB_LSN, obj, lsnp);
-	}
-
-	return (lsnp);
-}
-
-DB_MPOOL_FSTAT *get_DB_MPOOL_FSTAT(JNIEnv *jnienv, jobject obj)
-{
-	return ((DB_MPOOL_FSTAT *)
-	    get_private_dbobj(jnienv, name_DB_MPOOL_FSTAT, obj));
-}
-
-DB_MPOOL_STAT *get_DB_MPOOL_STAT(JNIEnv *jnienv, jobject obj)
-{
-	return ((DB_MPOOL_STAT *)
-	    get_private_dbobj(jnienv, name_DB_MPOOL_STAT, obj));
-}
-
-DB_QUEUE_STAT *get_DB_QUEUE_STAT(JNIEnv *jnienv, jobject obj)
-{
-	return ((DB_QUEUE_STAT *)
-	    get_private_dbobj(jnienv, name_DB_QUEUE_STAT, obj));
-}
-
-DB_TXN *get_DB_TXN(JNIEnv *jnienv, jobject obj)
-{
-	return ((DB_TXN *)get_private_dbobj(jnienv, name_DB_TXN, obj));
-}
-
-DB_TXN_STAT *get_DB_TXN_STAT(JNIEnv *jnienv, jobject obj)
-{
-	return ((DB_TXN_STAT *)
-	    get_private_dbobj(jnienv, name_DB_TXN_STAT, obj));
-}
-
-DBT *get_DBT(JNIEnv *jnienv, jobject obj)
-{
-	DBT_JAVAINFO *ji;
-
-	ji = (DBT_JAVAINFO *)get_private_dbobj(jnienv, name_DBT, obj);
-	if (ji == NULL)
-		return (NULL);
-	else
-		return (&ji->dbt);
-}
-
-DBT_JAVAINFO *get_DBT_JAVAINFO(JNIEnv *jnienv, jobject obj)
-{
-	return ((DBT_JAVAINFO *)get_private_dbobj(jnienv, name_DBT, obj));
-}
-
-/*
- * Convert a C pointer to the various Java objects they represent.
- */
-jobject get_DbBtreeStat(JNIEnv *jnienv, DB_BTREE_STAT *dbobj)
-{
-	return (convert_object(jnienv, name_DB_BTREE_STAT, dbobj));
-}
-
-jobject get_Dbc(JNIEnv *jnienv, DBC *dbobj)
-{
-	return (convert_object(jnienv, name_DBC, dbobj));
-}
-
-jobject get_DbHashStat(JNIEnv *jnienv, DB_HASH_STAT *dbobj)
-{
-	return (convert_object(jnienv, name_DB_HASH_STAT, dbobj));
-}
-
-jobject get_DbLogc(JNIEnv *jnienv, DB_LOGC *dbobj)
-{
-	return (convert_object(jnienv, name_DB_LOGC, dbobj));
-}
-
-jobject get_DbLogStat(JNIEnv *jnienv, DB_LOG_STAT *dbobj)
-{
-	return (convert_object(jnienv, name_DB_LOG_STAT, dbobj));
-}
-
-/*
- * LSNs are different since they are really normally
- * treated as by-value objects.  We actually create
- * a pointer to the LSN and store that, deleting it
- * when the LSN is GC'd.
- */
-jobject get_DbLsn(JNIEnv *jnienv, DB_LSN dbobj)
-{
-	DB_LSN *lsnp;
-	int err;
-
-	if ((err = __os_malloc(NULL, sizeof(DB_LSN), &lsnp)) != 0)
-		return (NULL);
-
-	memset(lsnp, 0, sizeof(DB_LSN));
-	*lsnp = dbobj;
-	return (convert_object(jnienv, name_DB_LSN, lsnp));
-}
-
-/*
- * Shared code for get_Dbt and get_const_Dbt.
- *
- * XXX
- * Currently we make no distinction in implementation of these
- * two kinds of Dbts, although in the future we may want to.
- * (It's probably easier to make the optimizations listed below
- * with readonly Dbts).
- *
- * Dbt's created via this function are only used for a short lifetime,
- * during callback functions.  In the future, we should consider taking
- * advantage of this by having a pool of Dbt objects instead of creating
- * new ones each time.   Because of multithreading, we may need an
- * arbitrary number.  We might also have sharing of the byte arrays
- * used by the Dbts.
- */
-static jobject get_Dbt_shared(JNIEnv *jnienv, const DBT *dbt, int readonly,
-			      DBT_JAVAINFO **ret_info)
-{
-	jobject jdbt;
-	DBT_JAVAINFO *dbtji;
-
-	COMPQUIET(readonly, 0);
-
-	/* A NULL DBT should become a null Dbt. */
-	if (dbt == NULL)
-		return (NULL);
-
-	/*
-	 * Note that a side effect of creating a Dbt object
-	 * is the creation of the attached DBT_JAVAINFO object
-	 * (see the native implementation of Dbt.init())
-	 * A DBT_JAVAINFO object contains its own DBT.
-	 */
-	jdbt = create_default_object(jnienv, name_DBT);
-	dbtji = get_DBT_JAVAINFO(jnienv, jdbt);
-	memcpy(&dbtji->dbt, dbt, sizeof(DBT));
-
-	/*
-	 * Set the boolean indicator so that the Java side knows to
-	 * call back when it wants to look at the array.  This avoids
-	 * needlessly creating/copying arrays that may never be looked at.
-	 */
-	(*jnienv)->SetBooleanField(jnienv, jdbt, fid_Dbt_must_create_data, 1);
-	(*jnienv)->SetIntField(jnienv, jdbt, fid_Dbt_size, dbt->size);
-
-	if (ret_info != NULL)
-	    *ret_info = dbtji;
-	return (jdbt);
-}
-
-/*
- * Get a writeable Dbt.
- *
- * Currently we're sharing code with get_const_Dbt.
- * It really shouldn't be this way, we have a DBT that we can
- * change, and have some mechanism for copying back
- * any changes to the original DBT.
- */
-jobject get_Dbt(JNIEnv *jnienv, DBT *dbt,
-		DBT_JAVAINFO **ret_info)
-{
-	return (get_Dbt_shared(jnienv, dbt, 0, ret_info));
-}
-
-/*
- * Get a Dbt that we promise not to change, or at least
- * if there are changes, they don't matter and won't get
- * seen by anyone.
- */
-jobject get_const_Dbt(JNIEnv *jnienv, const DBT *dbt,
-		      DBT_JAVAINFO **ret_info)
-{
-	return (get_Dbt_shared(jnienv, dbt, 1, ret_info));
-}
-
-jobject get_DbMpoolFStat(JNIEnv *jnienv, DB_MPOOL_FSTAT *dbobj)
-{
-	return (convert_object(jnienv, name_DB_MPOOL_FSTAT, dbobj));
-}
-
-jobject get_DbMpoolStat(JNIEnv *jnienv, DB_MPOOL_STAT *dbobj)
-{
-	return (convert_object(jnienv, name_DB_MPOOL_STAT, dbobj));
-}
-
-jobject get_DbQueueStat(JNIEnv *jnienv, DB_QUEUE_STAT *dbobj)
-{
-	return (convert_object(jnienv, name_DB_QUEUE_STAT, dbobj));
-}
-
-jobject get_DbTxn(JNIEnv *jnienv, DB_TXN *dbobj)
-{
-	return (convert_object(jnienv, name_DB_TXN, dbobj));
-}
-
-jobject get_DbTxnStat(JNIEnv *jnienv, DB_TXN_STAT *dbobj)
-{
-	return (convert_object(jnienv, name_DB_TXN_STAT, dbobj));
-}
diff --git a/storage/bdb/libdb_java/java_util.h b/storage/bdb/libdb_java/java_util.h
deleted file mode 100644
index 08187f6b51f..00000000000
--- a/storage/bdb/libdb_java/java_util.h
+++ /dev/null
@@ -1,441 +0,0 @@
-/*-
- * See the file LICENSE for redistribution information.
- *
- * Copyright (c) 1997-2002
- *	Sleepycat Software.  All rights reserved.
- *
- * $Id: java_util.h,v 11.44 2002/08/29 14:22:24 margo Exp $
- */
-
-#ifndef _JAVA_UTIL_H_
-#define	_JAVA_UTIL_H_
-
-#ifdef _MSC_VER
-
-/*
- * These are level 4 warnings that are explicitly disabled.
- * With Visual C++, by default you do not see above level 3 unless
- * you use /W4.  But we like to compile with the highest level
- * warnings to catch other errors.
- *
- * 4201: nameless struct/union
- *       triggered by standard include file 
- *
- * 4244: '=' : convert from '__int64' to 'unsigned int', possible loss of data
- *       results from making size_t data members correspond to jlongs
- *
- * 4514: unreferenced inline function has been removed
- *       jni.h defines methods that are not called
- *
- * 4127: conditional expression is constant
- *       occurs because of arg in JAVADB_RW_ACCESS_STRING macro
- */
-#pragma warning(disable: 4244 4201 4514 4127)
-
-#endif
-
-#include "db_config.h"
-#include "db.h"
-#include "db_int.h"
-#include 
-#include "java_info.h"
-#include "java_locked.h"
-#include              /* needed for memset */
-
-#define	DB_PACKAGE_NAME "com/sleepycat/db/"
-
-/* Union to convert longs to pointers (see {get,set}_private_dbobj). */
-typedef union {
-    jlong java_long;
-    void *ptr;
-} long_to_ptr;
-
-/****************************************************************
- *
- * Utility functions and definitions used by "glue" functions.
- */
-
-#define	NOT_IMPLEMENTED(str) \
-	report_exception(jnienv, str /*concatenate*/ ": not implemented", 0)
-
-/*
- * Get, delete a global reference.
- * Making this operation a function call allows for
- * easier tracking for debugging.  Global references
- * are mostly grabbed at 'open' and 'close' points,
- * so there shouldn't be a big performance hit.
- *
- * Macro-izing this makes it easier to add debugging code
- * to track unreleased references.
- */
-#ifdef DBJAVA_DEBUG
-#include 
-static void wrdebug(const char *str)
-{
-	write(2, str, strlen(str));
-	write(2, "\n", 1);
-}
-
-static jobject debug_new_global_ref(JNIEnv *jnienv, jobject obj, const char *s)
-{
-	wrdebug(s);
-	return ((*jnienv)->NewGlobalRef(jnienv, obj));
-}
-
-static void debug_delete_global_ref(JNIEnv *jnienv, jobject obj, const char *s)
-{
-	wrdebug(s);
-	(*jnienv)->DeleteGlobalRef(jnienv, obj);
-}
-
-#define	NEW_GLOBAL_REF(jnienv, obj)  \
-	debug_new_global_ref(jnienv, obj, "+Ref: " #obj)
-#define	DELETE_GLOBAL_REF(jnienv, obj) \
-	debug_delete_global_ref(jnienv, obj, "-Ref: " #obj)
-#else
-#define	NEW_GLOBAL_REF(jnienv, obj)     (*jnienv)->NewGlobalRef(jnienv, obj)
-#define	DELETE_GLOBAL_REF(jnienv, obj)  (*jnienv)->DeleteGlobalRef(jnienv, obj)
-#define	wrdebug(x)
-#endif
-
-/*
- * Do any one time initialization, especially initializing any
- * unchanging methodIds, fieldIds, etc.
- */
-void one_time_init(JNIEnv *jnienv);
-
-/*
- * Get the current JNIEnv from the java VM.
- * If the jvm argument is null, uses the default
- * jvm stored during the first invocation.
- */
-JNIEnv *get_jnienv(JavaVM *jvm);
-
-/*
- * Get the private data from a Db* object that points back to a C DB_* object.
- * The private data is stored in the object as a Java long (64 bits),
- * which is long enough to store a pointer on current architectures.
- */
-void *get_private_dbobj(JNIEnv *jnienv, const char *classname,
-		       jobject obj);
-
-/*
- * Set the private data in a Db* object that points back to a C DB_* object.
- * The private data is stored in the object as a Java long (64 bits),
- * which is long enough to store a pointer on current architectures.
- */
-void set_private_dbobj(JNIEnv *jnienv, const char *classname,
-		      jobject obj, void *value);
-
-/*
- * Get the private data in a Db/DbEnv object that holds additional 'side data'.
- * The private data is stored in the object as a Java long (64 bits),
- * which is long enough to store a pointer on current architectures.
- */
-void *get_private_info(JNIEnv *jnienv, const char *classname,
-		       jobject obj);
-
-/*
- * Set the private data in a Db/DbEnv object that holds additional 'side data'.
- * The private data is stored in the object as a Java long (64 bits),
- * which is long enough to store a pointer on current architectures.
- */
-void set_private_info(JNIEnv *jnienv, const char *classname,
-		      jobject obj, void *value);
-
-/*
- * Given a non-qualified name (e.g. "foo"), get the class handle
- * for the fully qualified name (e.g. "com.sleepycat.db.foo")
- */
-jclass get_class(JNIEnv *jnienv, const char *classname);
-
-/*
- * Set an individual field in a Db* object.
- * The field must be a DB object type.
- */
-void set_object_field(JNIEnv *jnienv, jclass class_of_this,
-		      jobject jthis, const char *object_classname,
-		      const char *name_of_field, jobject obj);
-
-/*
- * Set an individual field in a Db* object.
- * The field must be an integer type.
- */
-void set_int_field(JNIEnv *jnienv, jclass class_of_this,
-		   jobject jthis, const char *name_of_field, jint value);
-
-/*
- * Set an individual field in a Db* object.
- * The field must be an integer type.
- */
-void set_long_field(JNIEnv *jnienv, jclass class_of_this,
-			jobject jthis, const char *name_of_field, jlong value);
-
-/*
- * Set an individual field in a Db* object.
- * The field must be an DbLsn type.
- */
-void set_lsn_field(JNIEnv *jnienv, jclass class_of_this,
-		   jobject jthis, const char *name_of_field, DB_LSN value);
-
-/*
- * Values of flags for verify_return() and report_exception().
- * These indicate what sort of exceptions the method may throw
- * (in addition to DbException).
- */
-static const u_int32_t EXCEPTION_FILE_NOT_FOUND = 0x0001; /*FileNotFound*/
-
-/*
- * Report an exception back to the java side.
- */
-void report_exception(JNIEnv *jnienv, const char *text,
-		      int err, unsigned long expect_mask);
-
-/*
- * Report an exception back to the java side, for the specific
- * case of DB_LOCK_NOTGRANTED, as more things are added to the
- * constructor of this type of exception.
- */
-void report_notgranted_exception(JNIEnv *jnienv, const char *text,
-				 db_lockop_t op, db_lockmode_t mode,
-				 jobject jdbt, jobject jlock, int index);
-
-/*
- * Create an exception object and return it.
- * The given class must have a constructor that has a
- * constructor with args (java.lang.String text, int errno);
- * DbException and its subclasses fit this bill.
- */
-jobject create_exception(JNIEnv *jnienv, jstring text,
-			 int err, jclass dbexcept);
-
-/*
- * Report an error via the errcall mechanism.
- */
-void report_errcall(JNIEnv *jnienv, jobject errcall,
-		    jstring prefix, const char *message);
-
-/*
- * If the object is null, report an exception and return false (0),
- * otherwise return true (1).
- */
-int verify_non_null(JNIEnv *jnienv, void *obj);
-
-/*
- * If the error code is non-zero, report an exception and return false (0),
- * otherwise return true (1).
- */
-int verify_return(JNIEnv *jnienv, int err, unsigned long flags);
-
-/*
- * Verify that there was no memory error due to undersized Dbt.
- * If there is report a DbMemoryException, with the Dbt attached
- * and return false (0), otherwise return true (1).
- */
-int verify_dbt(JNIEnv *jnienv, int err, LOCKED_DBT *locked_dbt);
-
-/*
- * Create an object of the given class, calling its default constructor.
- */
-jobject create_default_object(JNIEnv *jnienv, const char *class_name);
-
-/*
- * Create a Dbt object, , calling its default constructor.
- */
-jobject create_dbt(JNIEnv *jnienv, const char *class_name);
-
-/*
- * Convert an DB object to a Java encapsulation of that object.
- * Note: This implementation creates a new Java object on each call,
- * so it is generally useful when a new DB object has just been created.
- */
-jobject convert_object(JNIEnv *jnienv, const char *class_name, void *dbobj);
-
-/*
- * Create a copy of the java string using __os_malloc.
- * Caller must free it.
- */
-char *get_c_string(JNIEnv *jnienv, jstring jstr);
-
-/*
- * Create a java string from the given string
- */
-jstring get_java_string(JNIEnv *jnienv, const char* string);
-
-/*
- * Convert a java object to the various C pointers they represent.
- */
-DB             *get_DB            (JNIEnv *jnienv, jobject obj);
-DB_BTREE_STAT  *get_DB_BTREE_STAT (JNIEnv *jnienv, jobject obj);
-DBC            *get_DBC           (JNIEnv *jnienv, jobject obj);
-DB_ENV         *get_DB_ENV        (JNIEnv *jnienv, jobject obj);
-DB_ENV_JAVAINFO *get_DB_ENV_JAVAINFO (JNIEnv *jnienv, jobject obj);
-DB_HASH_STAT   *get_DB_HASH_STAT  (JNIEnv *jnienv, jobject obj);
-DB_JAVAINFO    *get_DB_JAVAINFO   (JNIEnv *jnienv, jobject obj);
-DB_LOCK        *get_DB_LOCK       (JNIEnv *jnienv, jobject obj);
-DB_LOGC        *get_DB_LOGC       (JNIEnv *jnienv, jobject obj);
-DB_LOG_STAT    *get_DB_LOG_STAT   (JNIEnv *jnienv, jobject obj);
-DB_LSN         *get_DB_LSN        (JNIEnv *jnienv, jobject obj);
-DB_MPOOL_FSTAT *get_DB_MPOOL_FSTAT(JNIEnv *jnienv, jobject obj);
-DB_MPOOL_STAT  *get_DB_MPOOL_STAT (JNIEnv *jnienv, jobject obj);
-DB_QUEUE_STAT  *get_DB_QUEUE_STAT (JNIEnv *jnienv, jobject obj);
-DB_TXN         *get_DB_TXN        (JNIEnv *jnienv, jobject obj);
-DB_TXN_STAT    *get_DB_TXN_STAT   (JNIEnv *jnienv, jobject obj);
-DBT            *get_DBT           (JNIEnv *jnienv, jobject obj);
-DBT_JAVAINFO   *get_DBT_JAVAINFO  (JNIEnv *jnienv, jobject obj);
-
-/*
- * From a C object, create a Java object.
- */
-jobject get_DbBtreeStat  (JNIEnv *jnienv, DB_BTREE_STAT *dbobj);
-jobject get_Dbc          (JNIEnv *jnienv, DBC *dbobj);
-jobject get_DbHashStat   (JNIEnv *jnienv, DB_HASH_STAT *dbobj);
-jobject get_DbLogc       (JNIEnv *jnienv, DB_LOGC *dbobj);
-jobject get_DbLogStat    (JNIEnv *jnienv, DB_LOG_STAT *dbobj);
-jobject get_DbLsn        (JNIEnv *jnienv, DB_LSN dbobj);
-jobject get_DbMpoolStat  (JNIEnv *jnienv, DB_MPOOL_STAT *dbobj);
-jobject get_DbMpoolFStat (JNIEnv *jnienv, DB_MPOOL_FSTAT *dbobj);
-jobject get_DbQueueStat  (JNIEnv *jnienv, DB_QUEUE_STAT *dbobj);
-jobject get_const_Dbt    (JNIEnv *jnienv, const DBT *dbt, DBT_JAVAINFO **retp);
-jobject get_Dbt          (JNIEnv *jnienv, DBT *dbt, DBT_JAVAINFO **retp);
-jobject get_DbTxn        (JNIEnv *jnienv, DB_TXN *dbobj);
-jobject get_DbTxnStat    (JNIEnv *jnienv, DB_TXN_STAT *dbobj);
-
-/* The java names of DB classes */
-extern const char * const name_DB;
-extern const char * const name_DB_BTREE_STAT;
-extern const char * const name_DBC;
-extern const char * const name_DB_DEADLOCK_EX;
-extern const char * const name_DB_ENV;
-extern const char * const name_DB_EXCEPTION;
-extern const char * const name_DB_HASH_STAT;
-extern const char * const name_DB_LOCK;
-extern const char * const name_DB_LOCK_STAT;
-extern const char * const name_DB_LOGC;
-extern const char * const name_DB_LOG_STAT;
-extern const char * const name_DB_LSN;
-extern const char * const name_DB_MEMORY_EX;
-extern const char * const name_DB_MPOOL_FSTAT;
-extern const char * const name_DB_MPOOL_STAT;
-extern const char * const name_DB_LOCKNOTGRANTED_EX;
-extern const char * const name_DB_PREPLIST;
-extern const char * const name_DB_QUEUE_STAT;
-extern const char * const name_DB_REP_STAT;
-extern const char * const name_DB_RUNRECOVERY_EX;
-extern const char * const name_DBT;
-extern const char * const name_DB_TXN;
-extern const char * const name_DB_TXN_STAT;
-extern const char * const name_DB_TXN_STAT_ACTIVE;
-extern const char * const name_DB_UTIL;
-extern const char * const name_DbAppendRecno;
-extern const char * const name_DbBtreeCompare;
-extern const char * const name_DbBtreePrefix;
-extern const char * const name_DbDupCompare;
-extern const char * const name_DbEnvFeedback;
-extern const char * const name_DbErrcall;
-extern const char * const name_DbFeedback;
-extern const char * const name_DbHash;
-extern const char * const name_DbRecoveryInit;
-extern const char * const name_DbRepTransport;
-extern const char * const name_DbSecondaryKeyCreate;
-extern const char * const name_DbTxnRecover;
-extern const char * const name_RepElectResult;
-extern const char * const name_RepProcessMessage;
-
-extern const char * const string_signature;
-
-extern jfieldID fid_Dbt_data;
-extern jfieldID fid_Dbt_offset;
-extern jfieldID fid_Dbt_size;
-extern jfieldID fid_Dbt_ulen;
-extern jfieldID fid_Dbt_dlen;
-extern jfieldID fid_Dbt_doff;
-extern jfieldID fid_Dbt_flags;
-extern jfieldID fid_Dbt_must_create_data;
-extern jfieldID fid_DbLockRequest_op;
-extern jfieldID fid_DbLockRequest_mode;
-extern jfieldID fid_DbLockRequest_timeout;
-extern jfieldID fid_DbLockRequest_obj;
-extern jfieldID fid_DbLockRequest_lock;
-extern jfieldID fid_RepProcessMessage_envid;
-
-#define JAVADB_ARGS JNIEnv *jnienv, jobject jthis
-
-#define	JAVADB_GET_FLD(j_class, j_fieldtype, j_field, c_type, c_field)	      \
-JNIEXPORT j_fieldtype JNICALL						      \
-  Java_com_sleepycat_db_##j_class##_get_1##j_field			      \
-  (JAVADB_ARGS)								      \
-{									      \
-	c_type *db= get_##c_type(jnienv, jthis);			      \
-									      \
-	if (verify_non_null(jnienv, db))				      \
-		return (db->c_field);					      \
-	return (0);							      \
-}
-
-#define	JAVADB_SET_FLD(j_class, j_fieldtype, j_field, c_type, c_field)	      \
-JNIEXPORT void JNICALL							      \
-  Java_com_sleepycat_db_##j_class##_set_1##j_field			      \
-  (JAVADB_ARGS, j_fieldtype value)					      \
-{									      \
-	c_type *db= get_##c_type(jnienv, jthis);			      \
-									      \
-	if (verify_non_null(jnienv, db))				      \
-		db->c_field = value;					      \
-}
-
-#define	JAVADB_METHOD(_meth, _argspec, c_type, c_meth, _args)		      \
-JNIEXPORT void JNICALL Java_com_sleepycat_db_##_meth _argspec		      \
-{									      \
-	c_type *c_this = get_##c_type(jnienv, jthis);			      \
-	int ret;							      \
-									      \
-	if (!verify_non_null(jnienv, c_this))				      \
-		return;							      \
-	ret = c_this->c_meth _args;					      \
-	if (!DB_RETOK_STD(ret))						      \
-		report_exception(jnienv, db_strerror(ret), ret, 0);	      \
-}
-
-#define	JAVADB_METHOD_INT(_meth, _argspec, c_type, c_meth, _args, _retok)     \
-JNIEXPORT jint JNICALL Java_com_sleepycat_db_##_meth _argspec		      \
-{									      \
-	c_type *c_this = get_##c_type(jnienv, jthis);			      \
-	int ret;							      \
-									      \
-	if (!verify_non_null(jnienv, c_this))				      \
-		return (0);						      \
-	ret = c_this->c_meth _args;					      \
-	if (!_retok(ret))						      \
-		report_exception(jnienv, db_strerror(ret), ret, 0);	      \
-	return ((jint)ret);						      \
-}
-
-#define	JAVADB_SET_METH(j_class, j_type, j_fld, c_type, c_field)	      \
-    JAVADB_METHOD(j_class##_set_1##j_fld, (JAVADB_ARGS, j_type val), c_type,  \
-    set_##c_field, (c_this, val))
-
-#define	JAVADB_SET_METH_STR(j_class, j_fld, c_type, c_field)		      \
-    JAVADB_METHOD(j_class##_set_1##j_fld, (JAVADB_ARGS, jstring val), c_type, \
-    set_##c_field, (c_this, (*jnienv)->GetStringUTFChars(jnienv, val, NULL)))
-
-
-/*
- * These macros are used by code generated by the s_java script.
- */
-#define JAVADB_STAT_INT(env, cl, jobj, statp, name) \
-	set_int_field(jnienv, cl, jobj, #name, statp->name)
-
-#define JAVADB_STAT_LSN(env, cl, jobj, statp, name) \
-	set_lsn_field(jnienv, cl, jobj, #name, statp->name)
-
-#define JAVADB_STAT_LONG(env, cl, jobj, statp, name) \
-	set_long_field(jnienv, cl, jobj, #name, statp->name)
-
-/*
- * We build the active list separately.
- */
-#define JAVADB_STAT_ACTIVE(env, cl, jobj, statp, name) \
-	do {} while(0)
-
-#endif /* !_JAVA_UTIL_H_ */
diff --git a/storage/bdb/lock/Design b/storage/bdb/lock/Design
index f0bb5c6e99c..0fcdca2a211 100644
--- a/storage/bdb/lock/Design
+++ b/storage/bdb/lock/Design
@@ -1,4 +1,4 @@
-# $Id: Design,v 11.5 2002/02/01 19:07:18 bostic Exp $
+# $Id: Design,v 12.0 2004/11/17 03:44:06 bostic Exp $
 
 Synchronization in the Locking Subsystem
 
diff --git a/storage/bdb/lock/lock.c b/storage/bdb/lock/lock.c
index 2b4f63e0da9..c3e0173f05c 100644
--- a/storage/bdb/lock/lock.c
+++ b/storage/bdb/lock/lock.c
@@ -1,10 +1,10 @@
 /*-
  * See the file LICENSE for redistribution information.
  *
- * Copyright (c) 1996-2004
+ * Copyright (c) 1996-2005
  *	Sleepycat Software.  All rights reserved.
  *
- * $Id: lock.c,v 11.167 2004/10/15 16:59:41 bostic Exp $
+ * $Id: lock.c,v 12.19 2005/10/15 15:16:57 bostic Exp $
  */
 
 #include "db_config.h"
@@ -29,7 +29,7 @@ static int  __lock_is_parent __P((DB_LOCKTAB *, u_int32_t, DB_LOCKER *));
 static int  __lock_put_internal __P((DB_LOCKTAB *,
 		struct __db_lock *, u_int32_t,  u_int32_t));
 static int  __lock_put_nolock __P((DB_ENV *, DB_LOCK *, int *, u_int32_t));
-static void __lock_remove_waiter __P((DB_LOCKTAB *,
+static int __lock_remove_waiter __P((DB_LOCKTAB *,
 		DB_LOCKOBJ *, struct __db_lock *, db_status_t));
 static int __lock_trade __P((DB_ENV *, DB_LOCK *, u_int32_t));
 
@@ -50,7 +50,8 @@ __lock_vec_pp(dbenv, locker, flags, list, nlist, elistp)
 	int nlist;
 	DB_LOCKREQ *list, **elistp;
 {
-	int rep_check, ret;
+	DB_THREAD_INFO *ip;
+	int ret;
 
 	PANIC_CHECK(dbenv);
 	ENV_REQUIRES_CONFIG(dbenv,
@@ -61,12 +62,10 @@ __lock_vec_pp(dbenv, locker, flags, list, nlist, elistp)
 	     "DB_ENV->lock_vec", flags, DB_LOCK_NOWAIT)) != 0)
 		return (ret);
 
-	rep_check = IS_ENV_REPLICATED(dbenv) ? 1 : 0;
-	if (rep_check)
-		__env_rep_enter(dbenv);
-	ret = __lock_vec(dbenv, locker, flags, list, nlist, elistp);
-	if (rep_check)
-		__env_db_rep_exit(dbenv);
+	ENV_ENTER(dbenv, ip);
+	REPLICATION_WRAP(dbenv,
+	    (__lock_vec(dbenv, locker, flags, list, nlist, elistp)), ret);
+	ENV_LEAVE(dbenv, ip);
 	return (ret);
 }
 
@@ -108,7 +107,7 @@ __lock_vec(dbenv, locker, flags, list, nlist, elistp)
 	region = lt->reginfo.primary;
 
 	run_dd = 0;
-	LOCKREGION(dbenv, (DB_LOCKTAB *)dbenv->lk_handle);
+	LOCK_SYSTEM_LOCK(dbenv);
 	for (i = 0, ret = 0; i < nlist && ret == 0; i++)
 		switch (list[i].op) {
 		case DB_LOCK_GET_TIMEOUT:
@@ -119,7 +118,7 @@ __lock_vec(dbenv, locker, flags, list, nlist, elistp)
 				LOCK_INIT(list[i].lock);
 				break;
 			}
-			ret = __lock_get_internal(dbenv->lk_handle,
+			ret = __lock_get_internal(lt,
 			    locker, flags, list[i].obj,
 			    list[i].mode, list[i].timeout, &list[i].lock);
 			break;
@@ -190,7 +189,7 @@ __lock_vec(dbenv, locker, flags, list, nlist, elistp)
 				    locker_links, __db_lock);
 				if (writes == 1 ||
 				    lp->mode == DB_LOCK_READ ||
-				    lp->mode == DB_LOCK_DIRTY) {
+				    lp->mode == DB_LOCK_READ_UNCOMMITTED) {
 					SH_LIST_REMOVE(lp,
 					    locker_links, __db_lock);
 					sh_obj = (DB_LOCKOBJ *)
@@ -342,7 +341,7 @@ __lock_vec(dbenv, locker, flags, list, nlist, elistp)
 	if (ret == 0 && region->detect != DB_LOCK_NORUN &&
 	     (region->need_dd || LOCK_TIME_ISVALID(®ion->next_timeout)))
 		run_dd = 1;
-	UNLOCKREGION(dbenv, (DB_LOCKTAB *)dbenv->lk_handle);
+	LOCK_SYSTEM_UNLOCK(dbenv);
 
 	if (run_dd)
 		(void)__lock_detect(dbenv, region->detect, &did_abort);
@@ -368,7 +367,8 @@ __lock_get_pp(dbenv, locker, flags, obj, lock_mode, lock)
 	db_lockmode_t lock_mode;
 	DB_LOCK *lock;
 {
-	int rep_check, ret;
+	DB_THREAD_INFO *ip;
+	int ret;
 
 	PANIC_CHECK(dbenv);
 	ENV_REQUIRES_CONFIG(dbenv,
@@ -379,12 +379,10 @@ __lock_get_pp(dbenv, locker, flags, obj, lock_mode, lock)
 	    DB_LOCK_NOWAIT | DB_LOCK_UPGRADE | DB_LOCK_SWITCH)) != 0)
 		return (ret);
 
-	rep_check = IS_ENV_REPLICATED(dbenv) ? 1 : 0;
-	if (rep_check)
-		__env_rep_enter(dbenv);
-	ret = __lock_get(dbenv, locker, flags, obj, lock_mode, lock);
-	if (rep_check)
-		__env_db_rep_exit(dbenv);
+	ENV_ENTER(dbenv, ip);
+	REPLICATION_WRAP(dbenv,
+	    (__lock_get(dbenv, locker, flags, obj, lock_mode, lock)), ret);
+	ENV_LEAVE(dbenv, ip);
 	return (ret);
 }
 
@@ -403,17 +401,19 @@ __lock_get(dbenv, locker, flags, obj, lock_mode, lock)
 	db_lockmode_t lock_mode;
 	DB_LOCK *lock;
 {
+	DB_LOCKTAB *lt;
 	int ret;
 
+	lt = dbenv->lk_handle;
+
 	if (IS_RECOVERING(dbenv)) {
 		LOCK_INIT(*lock);
 		return (0);
 	}
 
-	LOCKREGION(dbenv, (DB_LOCKTAB *)dbenv->lk_handle);
-	ret = __lock_get_internal(dbenv->lk_handle,
-	    locker, flags, obj, lock_mode, 0, lock);
-	UNLOCKREGION(dbenv, (DB_LOCKTAB *)dbenv->lk_handle);
+	LOCK_SYSTEM_LOCK(dbenv);
+	ret = __lock_get_internal(lt, locker, flags, obj, lock_mode, 0, lock);
+	LOCK_SYSTEM_UNLOCK(dbenv);
 	return (ret);
 }
 
@@ -434,11 +434,12 @@ __lock_get_internal(lt, locker, flags, obj, lock_mode, timeout, lock)
 	db_timeout_t timeout;
 	DB_LOCK *lock;
 {
-	struct __db_lock *newl, *lp, *wwrite;
+	struct __db_lock *newl, *lp;
 	DB_ENV *dbenv;
 	DB_LOCKER *sh_locker;
 	DB_LOCKOBJ *sh_obj;
 	DB_LOCKREGION *region;
+	DB_THREAD_INFO *ip;
 	u_int32_t holder, locker_ndx, obj_ndx;
 	int did_abort, ihold, grant_dirty, no_dd, ret, t_ret;
 
@@ -464,20 +465,16 @@ __lock_get_internal(lt, locker, flags, obj, lock_mode, timeout, lock)
 	no_dd = ret = 0;
 	newl = NULL;
 
-	/*
-	 * If we are not going to reuse this lock, invalidate it
-	 * so that if we fail it will not look like a valid lock.
-	 */
-	if (!LF_ISSET(DB_LOCK_UPGRADE | DB_LOCK_SWITCH))
-		LOCK_INIT(*lock);
-
 	/* Check that the lock mode is valid.  */
 	if (lock_mode >= (db_lockmode_t)region->stat.st_nmodes) {
 		__db_err(dbenv, "DB_ENV->lock_get: invalid lock mode %lu",
 		    (u_long)lock_mode);
 		return (EINVAL);
 	}
-	region->stat.st_nrequests++;
+	if (LF_ISSET(DB_LOCK_UPGRADE))
+		region->stat.st_nupgrade++;
+	else if (!LF_ISSET(DB_LOCK_SWITCH))
+		region->stat.st_nrequests++;
 
 	if (obj == NULL) {
 		DB_ASSERT(LOCK_ISSET(*lock));
@@ -490,6 +487,13 @@ __lock_get_internal(lt, locker, flags, obj, lock_mode, timeout, lock)
 			goto err;
 	}
 
+	/*
+	 * If we are not going to reuse this lock, invalidate it
+	 * so that if we fail it will not look like a valid lock.
+	 */
+	if (!LF_ISSET(DB_LOCK_UPGRADE | DB_LOCK_SWITCH))
+		LOCK_INIT(*lock);
+
 	/* Get the locker, we may need it to find our parent. */
 	LOCKER_LOCK(lt, region, locker, locker_ndx);
 	if ((ret = __lock_getlocker(lt, locker,
@@ -534,7 +538,6 @@ __lock_get_internal(lt, locker, flags, obj, lock_mode, timeout, lock)
 	ihold = 0;
 	grant_dirty = 0;
 	holder = 0;
-	wwrite = NULL;
 
 	/*
 	 * SWITCH is a special case, used by the queue access method
@@ -548,6 +551,7 @@ __lock_get_internal(lt, locker, flags, obj, lock_mode, timeout, lock)
 	else
 		lp = SH_TAILQ_FIRST(&sh_obj->holders, __db_lock);
 	for (; lp != NULL; lp = SH_TAILQ_NEXT(lp, links, __db_lock)) {
+		DB_ASSERT(lp->status != DB_LSTAT_FREE);
 		if (locker == lp->holder) {
 			if (lp->mode == lock_mode &&
 			    lp->status == DB_LSTAT_HELD) {
@@ -568,9 +572,6 @@ __lock_get_internal(lt, locker, flags, obj, lock_mode, timeout, lock)
 				goto done;
 			} else {
 				ihold = 1;
-				if (lock_mode == DB_LOCK_WRITE &&
-				    lp->mode == DB_LOCK_WWRITE)
-					wwrite = lp;
 			}
 		} else if (__lock_is_parent(lt, lp->holder, sh_locker))
 			ihold = 1;
@@ -583,17 +584,14 @@ __lock_get_internal(lt, locker, flags, obj, lock_mode, timeout, lock)
 		}
 	}
 
-	/* If we want a write lock and we have a was write, upgrade. */
-	if (wwrite != NULL)
-		LF_SET(DB_LOCK_UPGRADE);
-
 	/*
 	 * If there are conflicting holders we will have to wait.  An upgrade
 	 * or dirty reader goes to the head of the queue, everyone else to the
 	 * back.
 	 */
 	if (lp != NULL) {
-		if (LF_ISSET(DB_LOCK_UPGRADE) || lock_mode == DB_LOCK_DIRTY)
+		if (LF_ISSET(DB_LOCK_UPGRADE) ||
+		    lock_mode == DB_LOCK_READ_UNCOMMITTED)
 			action = HEAD;
 		else
 			action = TAIL;
@@ -646,7 +644,8 @@ __lock_get_internal(lt, locker, flags, obj, lock_mode, timeout, lock)
 			 */
 			if (lp == NULL)
 				action = GRANT;
-			else if (lock_mode == DB_LOCK_DIRTY && grant_dirty) {
+			else if (grant_dirty &&
+			    lock_mode == DB_LOCK_READ_UNCOMMITTED) {
 				/*
 				 * An upgrade will be at the head of the
 				 * queue.
@@ -658,7 +657,7 @@ __lock_get_internal(lt, locker, flags, obj, lock_mode, timeout, lock)
 					action = SECOND;
 				else
 					action = GRANT;
-			} else if (lock_mode == DB_LOCK_DIRTY)
+			} else if (lock_mode == DB_LOCK_READ_UNCOMMITTED)
 				action = SECOND;
 			else
 				action = TAIL;
@@ -680,6 +679,23 @@ __lock_get_internal(lt, locker, flags, obj, lock_mode, timeout, lock)
 		if (++region->stat.st_nlocks > region->stat.st_maxnlocks)
 			region->stat.st_maxnlocks = region->stat.st_nlocks;
 
+		/*
+		 * Allocate a mutex if we do not have a mutex backing the lock.
+		 *
+		 * Use the lock mutex to block the thread; lock the mutex
+		 * when it is allocated so that we will block when we try
+		 * to lock it again.  We will wake up when another thread
+		 * grants the lock and releases the mutex.  We leave it
+		 * locked for the next use of this lock object.
+		 */
+		if (newl->mtx_lock == MUTEX_INVALID) {
+			if ((ret = __mutex_alloc(dbenv, MTX_LOGICAL_LOCK,
+			    DB_MUTEX_LOGICAL_LOCK | DB_MUTEX_SELF_BLOCK,
+			    &newl->mtx_lock)) != 0)
+				goto err;
+			MUTEX_LOCK(dbenv, newl->mtx_lock);
+		}
+
 		newl->holder = locker;
 		newl->refcount = 1;
 		newl->mode = lock_mode;
@@ -700,15 +716,7 @@ __lock_get_internal(lt, locker, flags, obj, lock_mode, timeout, lock)
 		break;
 
 	case UPGRADE:
-upgrade:	if (wwrite != NULL) {
-			lp = wwrite;
-			lp->refcount++;
-			lock->off = R_OFFSET(<->reginfo, lp);
-			lock->gen = lp->gen;
-			lock->mode = lock_mode;
-		}
-		else
-			lp = R_ADDR(<->reginfo, lock->off);
+upgrade:	lp = R_ADDR(<->reginfo, lock->off);
 		if (IS_WRITELOCK(lock_mode) && !IS_WRITELOCK(lp->mode))
 			sh_locker->nwrites++;
 		lp->mode = lock_mode;
@@ -728,7 +736,7 @@ upgrade:	if (wwrite != NULL) {
 	case SECOND:
 		if (LF_ISSET(DB_LOCK_NOWAIT)) {
 			ret = DB_LOCK_NOTGRANTED;
-			region->stat.st_nnowaits++;
+			region->stat.st_lock_nowait++;
 			goto err;
 		}
 		if ((lp = SH_TAILQ_FIRST(&sh_obj->waiters, __db_lock)) == NULL)
@@ -754,18 +762,11 @@ upgrade:	if (wwrite != NULL) {
 		if (LF_ISSET(DB_LOCK_SWITCH) &&
 		    (ret = __lock_put_nolock(dbenv,
 		    lock, &ihold, DB_LOCK_NOWAITERS)) != 0) {
-			__lock_remove_waiter(lt, sh_obj, newl, DB_LSTAT_FREE);
+			(void)__lock_remove_waiter(
+			    lt, sh_obj, newl, DB_LSTAT_FREE);
 			goto err;
 		}
 
-		/*
-		 * This is really a blocker for the thread.  It should be
-		 * initialized locked, so that when we try to acquire it, we
-		 * block.
-		 */
-		newl->status = DB_LSTAT_WAITING;
-		region->stat.st_nconflicts++;
-		region->need_dd = 1;
 		/*
 		 * First check to see if this txn has expired.
 		 * If not then see if the lock timeout is past
@@ -808,17 +809,29 @@ upgrade:	if (wwrite != NULL) {
 		    LOCK_TIME_GREATER(
 		    ®ion->next_timeout, &sh_locker->lk_expire)))
 			region->next_timeout = sh_locker->lk_expire;
-		UNLOCKREGION(dbenv, (DB_LOCKTAB *)dbenv->lk_handle);
+
+		newl->status = DB_LSTAT_WAITING;
+		region->stat.st_lock_wait++;
+		/* We are about to block, deadlock detector must run. */
+		region->need_dd = 1;
+
+		LOCK_SYSTEM_UNLOCK(dbenv);
 
 		/*
-		 * We are about to wait; before waiting, see if the deadlock
-		 * detector should be run.
+		 * Before waiting, see if the deadlock detector should run.
 		 */
 		if (region->detect != DB_LOCK_NORUN && !no_dd)
 			(void)__lock_detect(dbenv, region->detect, &did_abort);
 
-		MUTEX_LOCK(dbenv, &newl->mutex);
-		LOCKREGION(dbenv, (DB_LOCKTAB *)dbenv->lk_handle);
+		ip = NULL;
+		if (dbenv->thr_hashtab != NULL &&
+		     (ret = __env_set_state(dbenv, &ip, THREAD_BLOCKED)) != 0)
+			goto err;
+		MUTEX_LOCK(dbenv, newl->mtx_lock);
+		if (ip != NULL)
+			ip->dbth_state = THREAD_ACTIVE;
+
+		LOCK_SYSTEM_LOCK(dbenv);
 
 		/* Turn off lock timeout. */
 		if (newl->status != DB_LSTAT_EXPIRED)
@@ -828,9 +841,6 @@ upgrade:	if (wwrite != NULL) {
 		case DB_LSTAT_ABORTED:
 			ret = DB_LOCK_DEADLOCK;
 			goto err;
-		case DB_LSTAT_NOTEXIST:
-			ret = DB_LOCK_NOTEXIST;
-			goto err;
 		case DB_LSTAT_EXPIRED:
 expired:		SHOBJECT_LOCK(lt, region, sh_obj, obj_ndx);
 			if ((ret = __lock_put_internal(lt, newl,
@@ -876,8 +886,11 @@ expired:		SHOBJECT_LOCK(lt, region, sh_obj, obj_ndx);
 	lock->gen = newl->gen;
 	lock->mode = newl->mode;
 	sh_locker->nlocks++;
-	if (IS_WRITELOCK(newl->mode))
+	if (IS_WRITELOCK(newl->mode)) {
 		sh_locker->nwrites++;
+		if (newl->mode == DB_LOCK_WWRITE)
+			F_SET(sh_locker, DB_LOCKER_DIRTY);
+	}
 
 	return (0);
 
@@ -902,32 +915,29 @@ __lock_put_pp(dbenv, lock)
 	DB_ENV *dbenv;
 	DB_LOCK *lock;
 {
-	int rep_check, ret;
+	DB_THREAD_INFO *ip;
+	int ret;
 
 	PANIC_CHECK(dbenv);
 	ENV_REQUIRES_CONFIG(dbenv,
 	    dbenv->lk_handle, "DB_LOCK->lock_put", DB_INIT_LOCK);
 
-	rep_check = IS_ENV_REPLICATED(dbenv) ? 1 : 0;
-	if (rep_check)
-		__env_rep_enter(dbenv);
-	ret = __lock_put(dbenv, lock, 0);
-	if (rep_check)
-		__env_db_rep_exit(dbenv);
+	ENV_ENTER(dbenv, ip);
+	REPLICATION_WRAP(dbenv, (__lock_put(dbenv, lock)), ret);
+	ENV_LEAVE(dbenv, ip);
 	return (ret);
 }
 
 /*
  * __lock_put --
  *
- * PUBLIC: int  __lock_put __P((DB_ENV *, DB_LOCK *, u_int32_t));
+ * PUBLIC: int  __lock_put __P((DB_ENV *, DB_LOCK *));
  *  Internal lock_put interface.
  */
 int
-__lock_put(dbenv, lock, flags)
+__lock_put(dbenv, lock)
 	DB_ENV *dbenv;
 	DB_LOCK *lock;
-	u_int32_t flags;
 {
 	DB_LOCKTAB *lt;
 	int ret, run_dd;
@@ -937,9 +947,9 @@ __lock_put(dbenv, lock, flags)
 
 	lt = dbenv->lk_handle;
 
-	LOCKREGION(dbenv, lt);
-	ret = __lock_put_nolock(dbenv, lock, &run_dd, flags);
-	UNLOCKREGION(dbenv, lt);
+	LOCK_SYSTEM_LOCK(dbenv);
+	ret = __lock_put_nolock(dbenv, lock, &run_dd, 0);
+	LOCK_SYSTEM_UNLOCK(dbenv);
 
 	/*
 	 * Only run the lock detector if put told us to AND we are running
@@ -979,15 +989,8 @@ __lock_put_nolock(dbenv, lock, runp, flags)
 		return (EINVAL);
 	}
 
-	if (LF_ISSET(DB_LOCK_DOWNGRADE) &&
-	     lock->mode == DB_LOCK_WRITE && lockp->refcount > 1) {
-		ret = __lock_downgrade(dbenv,
-		    lock, DB_LOCK_WWRITE, DB_LOCK_NOREGION);
-		if (ret == 0)
-			lockp->refcount--;
-	} else
-		ret = __lock_put_internal(lt,
-		    lockp, lock->ndx, flags | DB_LOCK_UNLINK | DB_LOCK_FREE);
+	ret = __lock_put_internal(lt,
+	    lockp, lock->ndx, flags | DB_LOCK_UNLINK | DB_LOCK_FREE);
 	LOCK_INIT(*lock);
 
 	*runp = 0;
@@ -1036,7 +1039,9 @@ __lock_downgrade(dbenv, lock, new_mode, flags)
 	region = lt->reginfo.primary;
 
 	if (!LF_ISSET(DB_LOCK_NOREGION))
-		LOCKREGION(dbenv, lt);
+		LOCK_SYSTEM_LOCK(dbenv);
+
+	region->stat.st_ndowngrade++;
 
 	lockp = R_ADDR(<->reginfo, lock->off);
 	if (lock->gen != lockp->gen) {
@@ -1057,18 +1062,15 @@ __lock_downgrade(dbenv, lock, new_mode, flags)
 	if (IS_WRITELOCK(lockp->mode) && !IS_WRITELOCK(new_mode))
 		sh_locker->nwrites--;
 
-	if (new_mode == DB_LOCK_WWRITE)
-		F_SET(sh_locker, DB_LOCKER_DIRTY);
-
 	lockp->mode = new_mode;
 	lock->mode = new_mode;
 
 	/* Get the object associated with this lock. */
 	obj = (DB_LOCKOBJ *)((u_int8_t *)lockp + lockp->obj);
-	(void)__lock_promote(lt, obj, LF_ISSET(DB_LOCK_NOWAITERS));
+	ret = __lock_promote(lt, obj, NULL, LF_ISSET(DB_LOCK_NOWAITERS));
 
 out:	if (!LF_ISSET(DB_LOCK_NOREGION))
-		UNLOCKREGION(dbenv, lt);
+		LOCK_SYSTEM_UNLOCK(dbenv);
 
 	return (ret);
 }
@@ -1113,10 +1115,18 @@ __lock_put_internal(lt, lockp, obj_ndx, flags)
 	/* Get the object associated with this lock. */
 	sh_obj = (DB_LOCKOBJ *)((u_int8_t *)lockp + lockp->obj);
 
-	/* Remove this lock from its holders/waitlist. */
-	if (lockp->status != DB_LSTAT_HELD && lockp->status != DB_LSTAT_PENDING)
-		__lock_remove_waiter(lt, sh_obj, lockp, DB_LSTAT_FREE);
-	else {
+	/*
+	 * Remove this lock from its holders/waitlist.  Set its status
+	 * to ABORTED.  It may get freed below, but if not then the
+	 * waiter has been aborted (it will panic if the lock is
+	 * free).
+	 */
+	if (lockp->status != DB_LSTAT_HELD &&
+	    lockp->status != DB_LSTAT_PENDING) {
+		if ((ret = __lock_remove_waiter(
+		    lt, sh_obj, lockp, DB_LSTAT_ABORTED)) != 0)
+			return (ret);
+	} else {
 		SH_TAILQ_REMOVE(&sh_obj->holders, lockp, links, __db_lock);
 		lockp->links.stqe_prev = -1;
 	}
@@ -1124,8 +1134,9 @@ __lock_put_internal(lt, lockp, obj_ndx, flags)
 	if (LF_ISSET(DB_LOCK_NOPROMOTE))
 		state_changed = 0;
 	else
-		state_changed = __lock_promote(lt,
-		    sh_obj, LF_ISSET(DB_LOCK_REMOVE | DB_LOCK_NOWAITERS));
+		if ((ret = __lock_promote(lt, sh_obj, &state_changed,
+		    LF_ISSET(DB_LOCK_NOWAITERS))) != 0)
+			return (ret);
 
 	/* Check if object should be reclaimed. */
 	if (SH_TAILQ_FIRST(&sh_obj->holders, __db_lock) == NULL &&
@@ -1174,16 +1185,13 @@ __lock_freelock(lt, lockp, locker, flags)
 
 	dbenv = lt->dbenv;
 	region = lt->reginfo.primary;
-	ret = 0;
 
 	if (LF_ISSET(DB_LOCK_UNLINK)) {
 		LOCKER_LOCK(lt, region, locker, indx);
 		if ((ret = __lock_getlocker(lt,
 		    locker, indx, 0, &sh_locker)) != 0 || sh_locker == NULL) {
-			if (ret == 0)
-				ret = EINVAL;
 			__db_err(dbenv, __db_locker_invalid);
-			return (ret);
+			return (ret == 0 ? EINVAL : ret);
 		}
 
 		SH_LIST_REMOVE(lockp, locker_links, __db_lock);
@@ -1195,13 +1203,23 @@ __lock_freelock(lt, lockp, locker, flags)
 	}
 
 	if (LF_ISSET(DB_LOCK_FREE)) {
+		/*
+		 * If the lock is not held we cannot be sure of its mutex
+		 * state so we just destroy it and let it be re-created
+		 * when needed.
+		 */
+		if (lockp->mtx_lock != MUTEX_INVALID &&
+		     lockp->status != DB_LSTAT_HELD &&
+		     lockp->status != DB_LSTAT_EXPIRED &&
+		     (ret = __mutex_free(dbenv, &lockp->mtx_lock)) != 0)
+			return (ret);
 		lockp->status = DB_LSTAT_FREE;
 		SH_TAILQ_INSERT_HEAD(
 		    ®ion->free_locks, lockp, links, __db_lock);
 		region->stat.st_nlocks--;
 	}
 
-	return (ret);
+	return (0);
 }
 
 /*
@@ -1382,14 +1400,13 @@ __lock_inherit_locks(lt, locker, flags)
 		if (ret == 0 && sh_locker != NULL)
 			ret = EINVAL;
 		__db_err(dbenv, __db_locker_invalid);
-		goto err;
+		return (ret);
 	}
 
 	/* Make sure we are a child transaction. */
 	if (sh_locker->parent_locker == INVALID_ROFF) {
 		__db_err(dbenv, "Not a child transaction");
-		ret = EINVAL;
-		goto err;
+		return (EINVAL);
 	}
 	sh_parent = R_ADDR(<->reginfo, sh_locker->parent_locker);
 	F_SET(sh_locker, DB_LOCKER_DELETED);
@@ -1405,7 +1422,7 @@ __lock_inherit_locks(lt, locker, flags)
 			    "Parent locker is not valid");
 			ret = EINVAL;
 		}
-		goto err;
+		return (ret);
 	}
 
 	/*
@@ -1449,15 +1466,16 @@ __lock_inherit_locks(lt, locker, flags)
 		 * reference count, because there might be a sibling waiting,
 		 * who will now be allowed to make forward progress.
 		 */
-		(void)__lock_promote(lt, obj,
-		    LF_ISSET(DB_LOCK_NOWAITERS));
+		if ((ret = __lock_promote(
+		    lt, obj, NULL, LF_ISSET(DB_LOCK_NOWAITERS))) != 0)
+			return (ret);
 	}
 
 	/* Transfer child counts to parent. */
 	sh_parent->nlocks += sh_locker->nlocks;
 	sh_parent->nwrites += sh_locker->nwrites;
 
-err:	return (ret);
+	return (ret);
 }
 
 /*
@@ -1466,12 +1484,14 @@ err:	return (ret);
  * Look through the waiters and holders lists and decide which (if any)
  * locks can be promoted.   Promote any that are eligible.
  *
- * PUBLIC: int __lock_promote __P((DB_LOCKTAB *, DB_LOCKOBJ *, u_int32_t));
+ * PUBLIC: int __lock_promote
+ * PUBLIC:    __P((DB_LOCKTAB *, DB_LOCKOBJ *, int *, u_int32_t));
  */
 int
-__lock_promote(lt, obj, flags)
+__lock_promote(lt, obj, state_changedp, flags)
 	DB_LOCKTAB *lt;
 	DB_LOCKOBJ *obj;
+	int *state_changedp;
 	u_int32_t flags;
 {
 	struct __db_lock *lp_w, *lp_h, *next_waiter;
@@ -1510,10 +1530,6 @@ __lock_promote(lt, obj, flags)
 		if (LF_ISSET(DB_LOCK_NOWAITERS) && lp_w->mode == DB_LOCK_WAIT)
 			continue;
 
-		if (LF_ISSET(DB_LOCK_REMOVE)) {
-			__lock_remove_waiter(lt, obj, lp_w, DB_LSTAT_NOTEXIST);
-			continue;
-		}
 		for (lp_h = SH_TAILQ_FIRST(&obj->holders, __db_lock);
 		    lp_h != NULL;
 		    lp_h = SH_TAILQ_NEXT(lp_h, links, __db_lock)) {
@@ -1523,8 +1539,10 @@ __lock_promote(lt, obj, flags)
 				    region, lp_w->holder, locker_ndx);
 				if ((__lock_getlocker(lt, lp_w->holder,
 				    locker_ndx, 0, &sh_locker)) != 0) {
-					DB_ASSERT(0);
-					break;
+					__db_err(lt->dbenv,
+					   "Locker %#lx missing",
+					   (u_long)lp_w->holder);
+					return (__db_panic(lt->dbenv, EINVAL));
 				}
 				if (!__lock_is_parent(lt,
 				    lp_h->holder, sh_locker))
@@ -1540,7 +1558,7 @@ __lock_promote(lt, obj, flags)
 		SH_TAILQ_INSERT_TAIL(&obj->holders, lp_w, links);
 
 		/* Wake up waiter. */
-		MUTEX_UNLOCK(lt->dbenv, &lp_w->mutex);
+		MUTEX_UNLOCK(lt->dbenv, lp_w->mtx_lock);
 		state_changed = 1;
 	}
 
@@ -1550,7 +1568,11 @@ __lock_promote(lt, obj, flags)
 	 */
 	if (had_waiters && SH_TAILQ_FIRST(&obj->waiters, __db_lock) == NULL)
 		SH_TAILQ_REMOVE(®ion->dd_objs, obj, dd_links, __db_lockobj);
-	return (state_changed);
+
+	if (state_changedp != NULL)
+		*state_changedp = state_changed;
+
+	return (0);
 }
 
 /*
@@ -1563,7 +1585,7 @@ __lock_promote(lt, obj, flags)
  *
  * This must be called with the Object bucket locked.
  */
-static void
+static int
 __lock_remove_waiter(lt, sh_obj, lockp, status)
 	DB_LOCKTAB *lt;
 	DB_LOCKOBJ *sh_obj;
@@ -1589,7 +1611,9 @@ __lock_remove_waiter(lt, sh_obj, lockp, status)
 	 * Wake whoever is waiting on this lock.
 	 */
 	if (do_wakeup)
-		MUTEX_UNLOCK(lt->dbenv, &lockp->mutex);
+		MUTEX_UNLOCK(lt->dbenv, lockp->mtx_lock);
+
+	return (0);
 }
 
 /*
diff --git a/storage/bdb/lock/lock_deadlock.c b/storage/bdb/lock/lock_deadlock.c
index 8caf821007e..17aa6e5af12 100644
--- a/storage/bdb/lock/lock_deadlock.c
+++ b/storage/bdb/lock/lock_deadlock.c
@@ -1,10 +1,10 @@
 /*-
  * See the file LICENSE for redistribution information.
  *
- * Copyright (c) 1996-2004
+ * Copyright (c) 1996-2005
  *	Sleepycat Software.  All rights reserved.
  *
- * $Id: lock_deadlock.c,v 11.86 2004/10/15 16:59:42 bostic Exp $
+ * $Id: lock_deadlock.c,v 12.10 2005/10/07 20:21:30 ubell Exp $
  */
 
 #include "db_config.h"
@@ -30,7 +30,7 @@
 }
 
 #define	SET_MAP(M, B)	((M)[(B) / 32] |= (1 << ((B) % 32)))
-#define	CLR_MAP(M, B)	((M)[(B) / 32] &= ~(1 << ((B) % 32)))
+#define	CLR_MAP(M, B)	((M)[(B) / 32] &= ~((u_int)1 << ((B) % 32)))
 
 #define	OR_MAP(D, S, N)	{						\
 	u_int32_t __i;							\
@@ -51,7 +51,7 @@ typedef struct {
 	db_pgno_t	pgno;
 } locker_info;
 
-static int __dd_abort __P((DB_ENV *, locker_info *));
+static int __dd_abort __P((DB_ENV *, locker_info *, int *));
 static int __dd_build __P((DB_ENV *,
 	    u_int32_t, u_int32_t **, u_int32_t *, u_int32_t *, locker_info **));
 static int __dd_find __P((DB_ENV *,
@@ -77,7 +77,8 @@ __lock_detect_pp(dbenv, flags, atype, abortp)
 	u_int32_t flags, atype;
 	int *abortp;
 {
-	int ret, rep_check;
+	DB_THREAD_INFO *ip;
+	int ret;
 
 	PANIC_CHECK(dbenv);
 	ENV_REQUIRES_CONFIG(dbenv,
@@ -103,12 +104,9 @@ __lock_detect_pp(dbenv, flags, atype, abortp)
 		return (EINVAL);
 	}
 
-	rep_check = IS_ENV_REPLICATED(dbenv) ? 1 : 0;
-	if (rep_check)
-		__env_rep_enter(dbenv);
-	ret = __lock_detect(dbenv, atype, abortp);
-	if (rep_check)
-		__env_db_rep_exit(dbenv);
+	ENV_ENTER(dbenv, ip);
+	REPLICATION_WRAP(dbenv, (__lock_detect(dbenv, atype, abortp)), ret);
+	ENV_LEAVE(dbenv, ip);
 	return (ret);
 }
 
@@ -126,13 +124,12 @@ __lock_detect(dbenv, atype, abortp)
 {
 	DB_LOCKREGION *region;
 	DB_LOCKTAB *lt;
-	DB_TXNMGR *tmgr;
 	db_timeval_t now;
 	locker_info *idmap;
 	u_int32_t *bitmap, *copymap, **deadp, **free_me, *tmpmap;
 	u_int32_t i, cid, keeper, killid, limit, nalloc, nlockers;
 	u_int32_t lock_max, txn_max;
-	int ret;
+	int ret, status;
 
 	/*
 	 * If this environment is a replication client, then we must use the
@@ -148,7 +145,7 @@ __lock_detect(dbenv, atype, abortp)
 		*abortp = 0;
 
 	/* Check if a detector run is necessary. */
-	LOCKREGION(dbenv, lt);
+	LOCK_SYSTEM_LOCK(dbenv);
 
 	/* Make a pass only if auto-detect would run. */
 	region = lt->reginfo.primary;
@@ -157,7 +154,7 @@ __lock_detect(dbenv, atype, abortp)
 	if (region->need_dd == 0 &&
 	     (!LOCK_TIME_ISVALID(®ion->next_timeout) ||
 	     !__lock_expired(dbenv, &now, ®ion->next_timeout))) {
-		UNLOCKREGION(dbenv, lt);
+		LOCK_SYSTEM_UNLOCK(dbenv);
 		return (0);
 	}
 	if (region->need_dd == 0)
@@ -169,33 +166,19 @@ __lock_detect(dbenv, atype, abortp)
 	/* Build the waits-for bitmap. */
 	ret = __dd_build(dbenv, atype, &bitmap, &nlockers, &nalloc, &idmap);
 	lock_max = region->stat.st_cur_maxid;
-	UNLOCKREGION(dbenv, lt);
-
-	/*
-	 * We need the cur_maxid from the txn region as well.  In order
-	 * to avoid tricky synchronization between the lock and txn
-	 * regions, we simply unlock the lock region and then lock the
-	 * txn region.  This introduces a small window during which the
-	 * transaction system could then wrap.  We're willing to return
-	 * the wrong answer for "oldest" or "youngest" in those rare
-	 * circumstances.
-	 */
-	tmgr = dbenv->tx_handle;
-	if (tmgr != NULL) {
-		R_LOCK(dbenv, &tmgr->reginfo);
-		txn_max = ((DB_TXNREGION *)tmgr->reginfo.primary)->cur_maxid;
-		R_UNLOCK(dbenv, &tmgr->reginfo);
-	} else
-		txn_max = TXN_MAXIMUM;
+	LOCK_SYSTEM_UNLOCK(dbenv);
 	if (ret != 0 || atype == DB_LOCK_EXPIRE)
 		return (ret);
 
+	/* If there are no lockers, there are no deadlocks. */
 	if (nlockers == 0)
 		return (0);
+
 #ifdef DIAGNOSTIC
 	if (FLD_ISSET(dbenv->verbose, DB_VERB_WAITSFOR))
 		__dd_debug(dbenv, idmap, bitmap, nlockers, nalloc);
 #endif
+
 	/* Now duplicate the bitmaps so we can verify deadlock participants. */
 	if ((ret = __os_calloc(dbenv, (size_t)nlockers,
 	    sizeof(u_int32_t) * nalloc, ©map)) != 0)
@@ -210,6 +193,23 @@ __lock_detect(dbenv, atype, abortp)
 	    __dd_find(dbenv, bitmap, idmap, nlockers, nalloc, &deadp)) != 0)
 		return (ret);
 
+	/*
+	 * We need the cur_maxid from the txn region as well.  In order
+	 * to avoid tricky synchronization between the lock and txn
+	 * regions, we simply unlock the lock region and then lock the
+	 * txn region.  This introduces a small window during which the
+	 * transaction system could then wrap.  We're willing to return
+	 * the wrong answer for "oldest" or "youngest" in those rare
+	 * circumstances.
+	 */
+	if (TXN_ON(dbenv)) {
+		TXN_SYSTEM_LOCK(dbenv);
+		txn_max = ((DB_TXNREGION *)((DB_TXNMGR *)
+		    dbenv->tx_handle)->reginfo.primary)->cur_maxid;
+		TXN_SYSTEM_UNLOCK(dbenv);
+	} else
+		txn_max = TXN_MAXIMUM;
+
 	killid = BAD_KILLID;
 	free_me = deadp;
 	for (; *deadp != NULL; deadp++) {
@@ -327,23 +327,23 @@ dokill:		if (killid == BAD_KILLID) {
 				 * break the deadlock, signal to run
 				 * detection again.
 				 */
-				LOCKREGION(dbenv, lt);
+				LOCK_SYSTEM_LOCK(dbenv);
 				region->need_dd = 1;
-				UNLOCKREGION(dbenv, lt);
+				LOCK_SYSTEM_UNLOCK(dbenv);
 				killid = keeper;
 			}
 		}
 
 		/* Kill the locker with lockid idmap[killid]. */
-		if ((ret = __dd_abort(dbenv, &idmap[killid])) != 0) {
-			/*
-			 * It's possible that the lock was already aborted;
-			 * this isn't necessarily a problem, so do not treat
-			 * it as an error.
-			 */
-			if (ret == DB_ALREADY_ABORTED)
-				ret = 0;
-			else
+		if ((ret = __dd_abort(dbenv, &idmap[killid], &status)) != 0)
+			break;
+
+		/*
+		 * It's possible that the lock was already aborted; this isn't
+		 * necessarily a problem, so do not treat it as an error.
+		 */
+		if (status != 0) {
+			if (status != DB_ALREADY_ABORTED)
 				__db_err(dbenv,
 				    "warning: unable to abort locker %lx",
 				    (u_long)idmap[killid].id);
@@ -367,7 +367,7 @@ err:	if (free_me != NULL)
  * Utilities
  */
 
-# define DD_INVALID_ID	((u_int32_t) -1)
+#define	DD_INVALID_ID	((u_int32_t) -1)
 
 static int
 __dd_build(dbenv, atype, bmp, nlockers, allocp, idmap)
@@ -384,26 +384,47 @@ __dd_build(dbenv, atype, bmp, nlockers, allocp, idmap)
 	db_timeval_t now, min_timeout;
 	u_int32_t *bitmap, count, dd, *entryp, id, ndx, nentries, *tmpmap;
 	u_int8_t *pptr;
-	int expire_only, is_first, ret;
+	int is_first, ret;
 
 	lt = dbenv->lk_handle;
 	region = lt->reginfo.primary;
 	LOCK_SET_TIME_INVALID(&now);
 	LOCK_SET_TIME_MAX(&min_timeout);
-	expire_only = atype == DB_LOCK_EXPIRE;
 
 	/*
-	 * While we always check for expired timeouts, if we are called
-	 * with DB_LOCK_EXPIRE, then we are only checking for timeouts
-	 * (i.e., not doing deadlock detection at all).  If we aren't
-	 * doing real deadlock detection, then we can skip a significant,
-	 * amount of the processing.  In particular we do not build
-	 * the conflict array and our caller needs to expect this.
+	 * While we always check for expired timeouts, if we are called with
+	 * DB_LOCK_EXPIRE, then we are only checking for timeouts (i.e., not
+	 * doing deadlock detection at all).  If we aren't doing real deadlock
+	 * detection, then we can skip a significant, amount of the processing.
+	 * In particular we do not build the conflict array and our caller
+	 * needs to expect this.
 	 */
-	if (expire_only) {
-		count = 0;
-		nentries = 0;
-		goto obj_loop;
+	if (atype == DB_LOCK_EXPIRE) {
+		for (op = SH_TAILQ_FIRST(®ion->dd_objs, __db_lockobj);
+		    op != NULL;
+		    op = SH_TAILQ_NEXT(op, dd_links, __db_lockobj))
+			for (lp = SH_TAILQ_FIRST(&op->waiters, __db_lock);
+			    lp != NULL;
+			    lp = SH_TAILQ_NEXT(lp, links, __db_lock)) {
+				LOCKER_LOCK(lt, region, lp->holder, ndx);
+				if ((ret = __lock_getlocker(lt,
+				    lp->holder, ndx, 0, &lockerp)) != 0)
+					continue;
+				if (lp->status == DB_LSTAT_WAITING) {
+					if (__lock_expired(dbenv,
+					    &now, &lockerp->lk_expire)) {
+						lp->status = DB_LSTAT_EXPIRED;
+						MUTEX_UNLOCK(
+						    dbenv, lp->mtx_lock);
+						continue;
+					}
+					if (LOCK_TIME_GREATER(
+					    &min_timeout, &lockerp->lk_expire))
+						min_timeout =
+						    lockerp->lk_expire;
+				}
+			}
+		goto done;
 	}
 
 	/*
@@ -413,7 +434,6 @@ __dd_build(dbenv, atype, bmp, nlockers, allocp, idmap)
 	 * mutex the second time.
 	 */
 retry:	count = region->stat.st_nlockers;
-
 	if (count == 0) {
 		*nlockers = 0;
 		return (0);
@@ -477,6 +497,8 @@ retry:	count = region->stat.st_nlockers;
 			case DB_LOCK_MAXWRITE:
 				id_array[lip->dd_id].count = lip->nwrites;
 				break;
+			default:
+				break;
 			}
 			if (F_ISSET(lip, DB_LOCKER_INABORT))
 				id_array[lip->dd_id].in_abort = 1;
@@ -492,11 +514,8 @@ retry:	count = region->stat.st_nlockers;
 	 * list and add an entry in the waitsfor matrix for each waiter/holder
 	 * combination.
 	 */
-obj_loop:
 	for (op = SH_TAILQ_FIRST(®ion->dd_objs, __db_lockobj);
 	    op != NULL; op = SH_TAILQ_NEXT(op, dd_links, __db_lockobj)) {
-		if (expire_only)
-			goto look_waiters;
 		CLEAR_MAP(tmpmap, nentries);
 
 		/*
@@ -524,6 +543,8 @@ obj_loop:
 				case DB_LOCK_MAXWRITE:
 					id_array[dd].count += lockerp->nwrites;
 					break;
+				default:
+					break;
 				}
 				if (F_ISSET(lockerp, DB_LOCKER_INABORT))
 					id_array[dd].in_abort = 1;
@@ -544,7 +565,6 @@ obj_loop:
 		 * Next, for each waiter, we set its row in the matrix
 		 * equal to the map of holders we set up above.
 		 */
-look_waiters:
 		for (is_first = 1,
 		    lp = SH_TAILQ_FIRST(&op->waiters, __db_lock);
 		    lp != NULL;
@@ -558,18 +578,14 @@ look_waiters:
 				if (__lock_expired(dbenv,
 				    &now, &lockerp->lk_expire)) {
 					lp->status = DB_LSTAT_EXPIRED;
-					MUTEX_UNLOCK(dbenv, &lp->mutex);
+					MUTEX_UNLOCK(dbenv, lp->mtx_lock);
 					continue;
 				}
 				if (LOCK_TIME_GREATER(
 				    &min_timeout, &lockerp->lk_expire))
 					min_timeout = lockerp->lk_expire;
-
 			}
 
-			if (expire_only)
-				continue;
-
 			if (lockerp->dd_id == DD_INVALID_ID) {
 				dd = ((DB_LOCKER *)R_ADDR(<->reginfo,
 				    lockerp->master_locker))->dd_id;
@@ -583,6 +599,8 @@ look_waiters:
 				case DB_LOCK_MAXWRITE:
 					id_array[dd].count += lockerp->nwrites;
 					break;
+				default:
+					break;
 				}
 			} else
 				dd = lockerp->dd_id;
@@ -612,15 +630,6 @@ look_waiters:
 		}
 	}
 
-	if (LOCK_TIME_ISVALID(®ion->next_timeout)) {
-		if (LOCK_TIME_ISMAX(&min_timeout))
-			LOCK_SET_TIME_INVALID(®ion->next_timeout);
-		else
-			region->next_timeout = min_timeout;
-	}
-	if (expire_only)
-		return (0);
-
 	/* Now for each locker; record its last lock. */
 	for (id = 0; id < count; id++) {
 		if (!id_array[id].valid)
@@ -680,6 +689,12 @@ get_lock:		id_array[id].last_lock = R_OFFSET(<->reginfo, lp);
 	*bmp = bitmap;
 	*allocp = nentries;
 	__os_free(dbenv, tmpmap);
+done:	if (LOCK_TIME_ISVALID(®ion->next_timeout)) {
+		if (LOCK_TIME_ISMAX(&min_timeout))
+			LOCK_SET_TIME_INVALID(®ion->next_timeout);
+		else
+			region->next_timeout = min_timeout;
+	}
 	return (0);
 }
 
@@ -750,9 +765,10 @@ __dd_find(dbenv, bmp, idmap, nlockers, nalloc, deadp)
 }
 
 static int
-__dd_abort(dbenv, info)
+__dd_abort(dbenv, info, statusp)
 	DB_ENV *dbenv;
 	locker_info *info;
+	int *statusp;
 {
 	struct __db_lock *lockp;
 	DB_LOCKER *lockerp;
@@ -762,37 +778,39 @@ __dd_abort(dbenv, info)
 	u_int32_t ndx;
 	int ret;
 
+	*statusp = 0;
+
 	lt = dbenv->lk_handle;
 	region = lt->reginfo.primary;
+	ret = 0;
 
-	LOCKREGION(dbenv, lt);
+	LOCK_SYSTEM_LOCK(dbenv);
 
 	/*
-	 * Get the locker.  If its gone or was aborted while
-	 * we were detecting return that.
+	 * Get the locker.  If it's gone or was aborted while we were
+	 * detecting, return that.
 	 */
 	LOCKER_LOCK(lt, region, info->last_locker_id, ndx);
 	if ((ret = __lock_getlocker(lt,
-	    info->last_locker_id, ndx, 0, &lockerp)) != 0 ||
-	    lockerp == NULL || F_ISSET(lockerp, DB_LOCKER_INABORT)) {
-		if (ret == 0)
-			ret = DB_ALREADY_ABORTED;
+	    info->last_locker_id, ndx, 0, &lockerp)) != 0)
+		goto err;
+	if (lockerp == NULL || F_ISSET(lockerp, DB_LOCKER_INABORT)) {
+		*statusp = DB_ALREADY_ABORTED;
 		goto out;
 	}
 
 	/*
-	 * Find the locker's last lock.
-	 * It is possible for this lock to have been freed,
-	 * either though a timeout or another detector run.
+	 * Find the locker's last lock.  It is possible for this lock to have
+	 * been freed, either though a timeout or another detector run.
 	 */
 	if ((lockp = SH_LIST_FIRST(&lockerp->heldby, __db_lock)) == NULL) {
-		ret = DB_ALREADY_ABORTED;
+		*statusp = DB_ALREADY_ABORTED;
 		goto out;
 	}
 	if (R_OFFSET(<->reginfo, lockp) != info->last_lock ||
 	    lockp->holder != lockerp->id ||
 	    lockp->obj != info->last_obj || lockp->status != DB_LSTAT_WAITING) {
-		ret = DB_ALREADY_ABORTED;
+		*statusp = DB_ALREADY_ABORTED;
 		goto out;
 	}
 
@@ -812,15 +830,12 @@ __dd_abort(dbenv, info)
 		SH_TAILQ_REMOVE(®ion->dd_objs,
 		    sh_obj, dd_links, __db_lockobj);
 	else
-		ret = __lock_promote(lt, sh_obj, 0);
-	MUTEX_UNLOCK(dbenv, &lockp->mutex);
+		ret = __lock_promote(lt, sh_obj, NULL, 0);
+	MUTEX_UNLOCK(dbenv, lockp->mtx_lock);
 
 	region->stat.st_ndeadlocks++;
-	UNLOCKREGION(dbenv, lt);
-
-	return (0);
-
-out:	UNLOCKREGION(dbenv, lt);
+err:
+out:	LOCK_SYSTEM_UNLOCK(dbenv);
 	return (ret);
 }
 
diff --git a/storage/bdb/lock/lock_failchk.c b/storage/bdb/lock/lock_failchk.c
new file mode 100644
index 00000000000..8dcdadcc5b1
--- /dev/null
+++ b/storage/bdb/lock/lock_failchk.c
@@ -0,0 +1,105 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2005
+ *	Sleepycat Software.  All rights reserved.
+ *
+ * $Id: lock_failchk.c,v 12.3 2005/10/14 15:06:44 bostic Exp $
+ */
+
+#include "db_config.h"
+
+#ifndef NO_SYSTEM_INCLUDES
+#include 
+
+#include 
+#endif
+
+#include "db_int.h"
+#include "dbinc/db_shash.h"
+#include "dbinc/lock.h"
+#include "dbinc/txn.h"
+
+/*
+ * __lock_failchk --
+ *	Check for locks held by dead threads of control.
+ *
+ * PUBLIC: int __lock_failchk __P((DB_ENV *));
+ */
+int
+__lock_failchk(dbenv)
+	DB_ENV *dbenv;
+{
+	DB_LOCKER *lip;
+	DB_LOCKREGION *lrp;
+	DB_LOCKREQ request;
+	DB_LOCKTAB *lt;
+	u_int32_t i;
+	int ret;
+	char buf[DB_THREADID_STRLEN];
+
+	lt = dbenv->lk_handle;
+	lrp = lt->reginfo.primary;
+
+retry:	LOCK_SYSTEM_LOCK(dbenv);
+
+	ret = 0;
+	for (i = 0; i < lrp->locker_t_size; i++)
+		for (lip =
+		    SH_TAILQ_FIRST(<->locker_tab[i], __db_locker);
+		    lip != NULL;
+		    lip = SH_TAILQ_NEXT(lip, links, __db_locker)) {
+			/*
+			 * If the locker is transactional, we can ignore it;
+			 * __txn_failchk aborts any transactions the locker
+			 * is involved in.
+			 */
+			if (lip->id >= TXN_MINIMUM)
+				continue;
+
+			/* If the locker is still alive, it's not a problem. */
+			if (dbenv->is_alive(dbenv, lip->pid, lip->tid))
+				continue;
+
+			/*
+			 * We can only deal with read locks.  If the locker
+			 * holds write locks we have to assume a Berkeley DB
+			 * operation was interrupted with only 1-of-N pages
+			 * modified.
+			 */
+			if (lip->nwrites != 0) {
+				ret = __db_failed(dbenv,
+				     "locker has write locks",
+				     lip->pid, lip->tid);
+				break;
+			}
+
+			/*
+			 * Discard the locker and its read locks.
+			 */
+			__db_msg(dbenv, "Freeing locks for locker %#lx: %s",
+			    (u_long)lip->id, dbenv->thread_id_string(
+			    dbenv, lip->pid, lip->tid, buf));
+			LOCK_SYSTEM_UNLOCK(dbenv);
+			memset(&request, 0, sizeof(request));
+			request.op = DB_LOCK_PUT_ALL;
+			if ((ret = __lock_vec(
+			    dbenv, lip->id, 0, &request, 1, NULL)) != 0)
+				return (ret);
+
+			/*
+			 * This locker is most likely referenced by a cursor
+			 * which is owned by a dead thread.  Normally the
+			 * cursor would be available for other threads
+			 * but we assume the dead thread will never release
+			 * it.
+			 */
+			if ((ret = __lock_freefamilylocker(lt, lip->id)) != 0)
+				return (ret);
+			goto retry;
+		}
+
+	LOCK_SYSTEM_UNLOCK(dbenv);
+
+	return (ret);
+}
diff --git a/storage/bdb/lock/lock_id.c b/storage/bdb/lock/lock_id.c
index 02f85765de6..3e6789b0663 100644
--- a/storage/bdb/lock/lock_id.c
+++ b/storage/bdb/lock/lock_id.c
@@ -1,10 +1,10 @@
 /*-
  * See the file LICENSE for redistribution information.
  *
- * Copyright (c) 1996-2004
+ * Copyright (c) 1996-2005
  *	Sleepycat Software.  All rights reserved.
  *
- * $Id: lock_id.c,v 11.146 2004/10/15 16:59:42 bostic Exp $
+ * $Id: lock_id.c,v 12.10 2005/10/14 15:15:16 ubell Exp $
  */
 
 #include "db_config.h"
@@ -32,18 +32,16 @@ __lock_id_pp(dbenv, idp)
 	DB_ENV *dbenv;
 	u_int32_t *idp;
 {
-	int rep_check, ret;
+	DB_THREAD_INFO *ip;
+	int ret;
 
 	PANIC_CHECK(dbenv);
 	ENV_REQUIRES_CONFIG(dbenv,
 	    dbenv->lk_handle, "DB_ENV->lock_id", DB_INIT_LOCK);
 
-	rep_check = IS_ENV_REPLICATED(dbenv) ? 1 : 0;
-	if (rep_check)
-		__env_rep_enter(dbenv);
-	ret = __lock_id(dbenv, idp);
-	if (rep_check)
-		__env_db_rep_exit(dbenv);
+	ENV_ENTER(dbenv, ip);
+	REPLICATION_WRAP(dbenv, (__lock_id(dbenv, idp, NULL)), ret);
+	ENV_LEAVE(dbenv, ip);
 	return (ret);
 }
 
@@ -51,31 +49,34 @@ __lock_id_pp(dbenv, idp)
  * __lock_id --
  *	DB_ENV->lock_id.
  *
- * PUBLIC: int  __lock_id __P((DB_ENV *, u_int32_t *));
+ * PUBLIC: int  __lock_id __P((DB_ENV *, u_int32_t *, DB_LOCKER **));
  */
 int
-__lock_id(dbenv, idp)
+__lock_id(dbenv, idp, lkp)
 	DB_ENV *dbenv;
 	u_int32_t *idp;
+	DB_LOCKER **lkp;
 {
 	DB_LOCKER *lk;
 	DB_LOCKTAB *lt;
 	DB_LOCKREGION *region;
-	u_int32_t *ids, locker_ndx;
+	u_int32_t id, *ids, locker_ndx;
 	int nids, ret;
 
 	lt = dbenv->lk_handle;
 	region = lt->reginfo.primary;
 	ret = 0;
 
+	id = DB_LOCK_INVALIDID;
+	lk = NULL;
+
+	LOCK_SYSTEM_LOCK(dbenv);
+
 	/*
-	 * Allocate a new lock id.  If we wrap around then we
-	 * find the minimum currently in use and make sure we
-	 * can stay below that.  This code is similar to code
-	 * in __txn_begin_int for recovering txn ids.
-	 */
-	LOCKREGION(dbenv, lt);
-	/*
+	 * Allocate a new lock id.  If we wrap around then we find the minimum
+	 * currently in use and make sure we can stay below that.  This code is
+	 * similar to code in __txn_begin_int for recovering txn ids.
+	 *
 	 * Our current valid range can span the maximum valid value, so check
 	 * for it and wrap manually.
 	 */
@@ -98,17 +99,36 @@ __lock_id(dbenv, idp)
 			    ®ion->stat.st_id, ®ion->stat.st_cur_maxid);
 		__os_free(dbenv, ids);
 	}
-	*idp = ++region->stat.st_id;
+	id = ++region->stat.st_id;
 
 	/* Allocate a locker for this id. */
-	LOCKER_LOCK(lt, region, *idp, locker_ndx);
-	ret = __lock_getlocker(lt, *idp, locker_ndx, 1, &lk);
+	LOCKER_LOCK(lt, region, id, locker_ndx);
+	ret = __lock_getlocker(lt, id, locker_ndx, 1, &lk);
 
-err:	UNLOCKREGION(dbenv, lt);
+err:	LOCK_SYSTEM_UNLOCK(dbenv);
 
+	if (idp)
+		*idp = id;
+	if (lkp)
+		*lkp = lk;
 	return (ret);
 }
 
+/*
+ * __lock_set_thread_id --
+ *	Set the thread_id in an existing locker.
+ * PUBLIC: void __lock_set_thread_id __P((DB_LOCKER *, pid_t, db_threadid_t));
+ */
+void
+__lock_set_thread_id(lref, pid, tid)
+	DB_LOCKER *lref;
+	pid_t pid;
+	db_threadid_t tid;
+{
+	lref->pid = pid;
+	lref->tid = tid;
+}
+
 /*
  * __lock_id_free_pp --
  *	DB_ENV->lock_id_free pre/post processing.
@@ -120,18 +140,16 @@ __lock_id_free_pp(dbenv, id)
 	DB_ENV *dbenv;
 	u_int32_t id;
 {
-	int rep_check, ret;
+	DB_THREAD_INFO *ip;
+	int ret;
 
 	PANIC_CHECK(dbenv);
 	ENV_REQUIRES_CONFIG(dbenv,
 	    dbenv->lk_handle, "DB_ENV->lock_id_free", DB_INIT_LOCK);
 
-	rep_check = IS_ENV_REPLICATED(dbenv) ? 1 : 0;
-	if (rep_check)
-		__env_rep_enter(dbenv);
-	ret = __lock_id_free(dbenv, id);
-	if (rep_check)
-		__env_db_rep_exit(dbenv);
+	ENV_ENTER(dbenv, ip);
+	REPLICATION_WRAP(dbenv, (__lock_id_free(dbenv, id)), ret);
+	ENV_LEAVE(dbenv, ip);
 	return (ret);
 }
 
@@ -159,7 +177,7 @@ __lock_id_free(dbenv, id)
 	lt = dbenv->lk_handle;
 	region = lt->reginfo.primary;
 
-	LOCKREGION(dbenv, lt);
+	LOCK_SYSTEM_LOCK(dbenv);
 	LOCKER_LOCK(lt, region, id, locker_ndx);
 	if ((ret = __lock_getlocker(lt, id, locker_ndx, 0, &sh_locker)) != 0)
 		goto err;
@@ -178,7 +196,7 @@ __lock_id_free(dbenv, id)
 
 	__lock_freelocker(lt, region, sh_locker, locker_ndx);
 
-err:	UNLOCKREGION(dbenv, lt);
+err:	LOCK_SYSTEM_UNLOCK(dbenv);
 	return (ret);
 }
 
@@ -251,6 +269,7 @@ __lock_getlocker(lt, locker, indx, create, retp)
 			region->stat.st_maxnlockers = region->stat.st_nlockers;
 
 		sh_locker->id = locker;
+		dbenv->thread_id(dbenv, &sh_locker->pid, &sh_locker->tid);
 		sh_locker->dd_id = 0;
 		sh_locker->master_locker = INVALID_ROFF;
 		sh_locker->parent_locker = INVALID_ROFF;
@@ -291,12 +310,11 @@ __lock_addfamilylocker(dbenv, pid, id)
 
 	lt = dbenv->lk_handle;
 	region = lt->reginfo.primary;
-	LOCKREGION(dbenv, lt);
+	LOCK_SYSTEM_LOCK(dbenv);
 
 	/* get/create the  parent locker info */
 	LOCKER_LOCK(lt, region, pid, ndx);
-	if ((ret = __lock_getlocker(dbenv->lk_handle,
-	    pid, ndx, 1, &mlockerp)) != 0)
+	if ((ret = __lock_getlocker(lt, pid, ndx, 1, &mlockerp)) != 0)
 		goto err;
 
 	/*
@@ -307,8 +325,7 @@ __lock_addfamilylocker(dbenv, pid, id)
 	 * family be created at the same time.
 	 */
 	LOCKER_LOCK(lt, region, id, ndx);
-	if ((ret = __lock_getlocker(dbenv->lk_handle,
-	    id, ndx, 1, &lockerp)) != 0)
+	if ((ret = __lock_getlocker(lt, id, ndx, 1, &lockerp)) != 0)
 		goto err;
 
 	/* Point to our parent. */
@@ -330,8 +347,7 @@ __lock_addfamilylocker(dbenv, pid, id)
 	SH_LIST_INSERT_HEAD(
 	    &mlockerp->child_locker, lockerp, child_link, __db_locker);
 
-err:
-	UNLOCKREGION(dbenv, lt);
+err:	LOCK_SYSTEM_UNLOCK(dbenv);
 
 	return (ret);
 }
@@ -358,7 +374,7 @@ __lock_freefamilylocker(lt, locker)
 	dbenv = lt->dbenv;
 	region = lt->reginfo.primary;
 
-	LOCKREGION(dbenv, lt);
+	LOCK_SYSTEM_LOCK(dbenv);
 	LOCKER_LOCK(lt, region, locker, indx);
 
 	if ((ret = __lock_getlocker(lt,
@@ -377,8 +393,7 @@ __lock_freefamilylocker(lt, locker)
 
 	__lock_freelocker(lt, region, sh_locker, indx);
 
-err:
-	UNLOCKREGION(dbenv, lt);
+err:	LOCK_SYSTEM_UNLOCK(dbenv);
 	return (ret);
 }
 
diff --git a/storage/bdb/lock/lock_list.c b/storage/bdb/lock/lock_list.c
index 5851dc7fadf..c2bbd805ec7 100644
--- a/storage/bdb/lock/lock_list.c
+++ b/storage/bdb/lock/lock_list.c
@@ -1,10 +1,10 @@
 /*-
  * See the file LICENSE for redistribution information.
  *
- * Copyright (c) 1996-2004
+ * Copyright (c) 1996-2005
  *	Sleepycat Software.  All rights reserved.
  *
- * $Id: lock_list.c,v 11.146 2004/09/22 03:48:29 bostic Exp $
+ * $Id: lock_list.c,v 12.5 2005/10/20 18:26:04 bostic Exp $
  */
 
 #include "db_config.h"
@@ -240,16 +240,30 @@ __lock_get_list(dbenv, locker, flags, lock_mode, list)
 	u_int16_t npgno, size;
 	u_int32_t i, nlocks;
 	int ret;
-	void *dp;
+	void *data, *dp;
 
 	if (list->size == 0)
 		return (0);
 	ret = 0;
+	data = NULL;
+
 	lt = dbenv->lk_handle;
 	dp = list->data;
 
+	/*
+	 * There is no assurance log records will be aligned.  If not, then
+	 * copy the data to an aligned region so the rest of the code does
+	 * not have to worry about it.
+	 */
+	if ((uintptr_t)dp != DB_ALIGN((uintptr_t)dp, sizeof(u_int32_t))) {
+		if ((ret = __os_malloc(dbenv, list->size, &data)) != 0)
+			return (ret);
+		memcpy(data, list->data, list->size);
+		dp = data;
+	}
+
 	GET_COUNT(dp, nlocks);
-	LOCKREGION(dbenv, dbenv->lk_handle);
+	LOCK_SYSTEM_LOCK(dbenv);
 
 	for (i = 0; i < nlocks; i++) {
 		GET_PCOUNT(dp, npgno);
@@ -271,8 +285,9 @@ __lock_get_list(dbenv, locker, flags, lock_mode, list)
 		lock->pgno = save_pgno;
 	}
 
-err:
-	UNLOCKREGION(dbenv, dbenv->lk_handle);
+err:	LOCK_SYSTEM_UNLOCK(dbenv);
+	if (data != NULL)
+		__os_free(dbenv, data);
 	return (ret);
 }
 
diff --git a/storage/bdb/lock/lock_method.c b/storage/bdb/lock/lock_method.c
index d571794931d..0548f50ca15 100644
--- a/storage/bdb/lock/lock_method.c
+++ b/storage/bdb/lock/lock_method.c
@@ -1,10 +1,10 @@
 /*-
  * See the file LICENSE for redistribution information.
  *
- * Copyright (c) 1996-2004
+ * Copyright (c) 1996-2005
  *	Sleepycat Software.  All rights reserved.
  *
- * $Id: lock_method.c,v 11.44 2004/06/01 21:50:05 bostic Exp $
+ * $Id: lock_method.c,v 12.6 2005/08/08 14:56:49 bostic Exp $
  */
 
 #include "db_config.h"
@@ -12,33 +12,13 @@
 #ifndef NO_SYSTEM_INCLUDES
 #include 
 
-#ifdef HAVE_RPC
-#include 
-#endif
-
 #include 
 #endif
 
-#ifdef HAVE_RPC
-#include "db_server.h"
-#endif
-
 #include "db_int.h"
 #include "dbinc/db_shash.h"
 #include "dbinc/lock.h"
 
-#ifdef HAVE_RPC
-#include "dbinc_auto/rpc_client_ext.h"
-#endif
-
-static int __lock_get_lk_conflicts __P((DB_ENV *, const u_int8_t **, int *));
-static int __lock_set_lk_conflicts __P((DB_ENV *, u_int8_t *, int));
-static int __lock_get_lk_detect __P((DB_ENV *, u_int32_t *));
-static int __lock_get_lk_max_lockers __P((DB_ENV *, u_int32_t *));
-static int __lock_get_lk_max_locks __P((DB_ENV *, u_int32_t *));
-static int __lock_get_lk_max_objects __P((DB_ENV *, u_int32_t *));
-static int __lock_get_env_timeout __P((DB_ENV *, db_timeout_t *, u_int32_t));
-
 /*
  * __lock_dbenv_create --
  *	Lock specific creation of the DB_ENV structure.
@@ -55,59 +35,9 @@ __lock_dbenv_create(dbenv)
 	 * state or turn off mutex locking, and so we can neither check
 	 * the panic state or acquire a mutex in the DB_ENV create path.
 	 */
-
 	dbenv->lk_max = DB_LOCK_DEFAULT_N;
 	dbenv->lk_max_lockers = DB_LOCK_DEFAULT_N;
 	dbenv->lk_max_objects = DB_LOCK_DEFAULT_N;
-
-#ifdef	HAVE_RPC
-	if (F_ISSET(dbenv, DB_ENV_RPCCLIENT)) {
-		dbenv->get_lk_conflicts = __dbcl_get_lk_conflicts;
-		dbenv->set_lk_conflicts = __dbcl_set_lk_conflict;
-		dbenv->get_lk_detect = __dbcl_get_lk_detect;
-		dbenv->set_lk_detect = __dbcl_set_lk_detect;
-		dbenv->set_lk_max = __dbcl_set_lk_max;
-		dbenv->get_lk_max_lockers = __dbcl_get_lk_max_lockers;
-		dbenv->set_lk_max_lockers = __dbcl_set_lk_max_lockers;
-		dbenv->get_lk_max_locks = __dbcl_get_lk_max_locks;
-		dbenv->set_lk_max_locks = __dbcl_set_lk_max_locks;
-		dbenv->get_lk_max_objects = __dbcl_get_lk_max_objects;
-		dbenv->set_lk_max_objects = __dbcl_set_lk_max_objects;
-
-		dbenv->lock_detect = __dbcl_lock_detect;
-		dbenv->lock_get = __dbcl_lock_get;
-		dbenv->lock_id = __dbcl_lock_id;
-		dbenv->lock_id_free = __dbcl_lock_id_free;
-		dbenv->lock_put = __dbcl_lock_put;
-		dbenv->lock_stat = __dbcl_lock_stat;
-		dbenv->lock_stat_print = NULL;
-		dbenv->lock_vec = __dbcl_lock_vec;
-	} else
-#endif
-	{
-		dbenv->get_lk_conflicts = __lock_get_lk_conflicts;
-		dbenv->set_lk_conflicts = __lock_set_lk_conflicts;
-		dbenv->get_lk_detect = __lock_get_lk_detect;
-		dbenv->set_lk_detect = __lock_set_lk_detect;
-		dbenv->set_lk_max = __lock_set_lk_max;
-		dbenv->get_lk_max_lockers = __lock_get_lk_max_lockers;
-		dbenv->set_lk_max_lockers = __lock_set_lk_max_lockers;
-		dbenv->get_lk_max_locks = __lock_get_lk_max_locks;
-		dbenv->set_lk_max_locks = __lock_set_lk_max_locks;
-		dbenv->get_lk_max_objects = __lock_get_lk_max_objects;
-		dbenv->set_lk_max_objects = __lock_set_lk_max_objects;
-		dbenv->get_timeout = __lock_get_env_timeout;
-		dbenv->set_timeout = __lock_set_env_timeout;
-
-		dbenv->lock_detect = __lock_detect_pp;
-		dbenv->lock_get = __lock_get_pp;
-		dbenv->lock_id = __lock_id_pp;
-		dbenv->lock_id_free = __lock_id_free_pp;
-		dbenv->lock_put = __lock_put_pp;
-		dbenv->lock_stat = __lock_stat_pp;
-		dbenv->lock_stat_print = __lock_stat_print_pp;
-		dbenv->lock_vec = __lock_vec_pp;
-	}
 }
 
 /*
@@ -131,24 +61,30 @@ __lock_dbenv_close(dbenv)
 /*
  * __lock_get_lk_conflicts
  *	Get the conflicts matrix.
+ *
+ * PUBLIC: int __lock_get_lk_conflicts
+ * PUBLIC:     __P((DB_ENV *, const u_int8_t **, int *));
  */
-static int
+int
 __lock_get_lk_conflicts(dbenv, lk_conflictsp, lk_modesp)
 	DB_ENV *dbenv;
 	const u_int8_t **lk_conflictsp;
 	int *lk_modesp;
 {
+	DB_LOCKTAB *lt;
+
 	ENV_NOT_CONFIGURED(dbenv,
 	    dbenv->lk_handle, "DB_ENV->get_lk_conflicts", DB_INIT_LOCK);
 
+	lt = dbenv->lk_handle;
+
 	if (LOCKING_ON(dbenv)) {
 		/* Cannot be set after open, no lock required to read. */
 		if (lk_conflictsp != NULL)
-			*lk_conflictsp =
-			    ((DB_LOCKTAB *)dbenv->lk_handle)->conflicts;
+			*lk_conflictsp = lt->conflicts;
 		if (lk_modesp != NULL)
-			*lk_modesp = ((DB_LOCKREGION *)((DB_LOCKTAB *)
-			    dbenv->lk_handle)->reginfo.primary)->stat.st_nmodes;
+			*lk_modesp = ((DB_LOCKREGION *)
+			    (lt->reginfo.primary))->stat.st_nmodes;
 	} else {
 		if (lk_conflictsp != NULL)
 			*lk_conflictsp = dbenv->lk_conflicts;
@@ -161,8 +97,10 @@ __lock_get_lk_conflicts(dbenv, lk_conflictsp, lk_modesp)
 /*
  * __lock_set_lk_conflicts
  *	Set the conflicts matrix.
+ *
+ * PUBLIC: int __lock_set_lk_conflicts __P((DB_ENV *, u_int8_t *, int));
  */
-static int
+int
 __lock_set_lk_conflicts(dbenv, lk_conflicts, lk_modes)
 	DB_ENV *dbenv;
 	u_int8_t *lk_conflicts;
@@ -186,7 +124,10 @@ __lock_set_lk_conflicts(dbenv, lk_conflicts, lk_modes)
 	return (0);
 }
 
-static int
+/*
+ * PUBLIC: int __lock_get_lk_detect __P((DB_ENV *, u_int32_t *));
+ */
+int
 __lock_get_lk_detect(dbenv, lk_detectp)
 	DB_ENV *dbenv;
 	u_int32_t *lk_detectp;
@@ -198,10 +139,9 @@ __lock_get_lk_detect(dbenv, lk_detectp)
 
 	if (LOCKING_ON(dbenv)) {
 		lt = dbenv->lk_handle;
-		LOCKREGION(dbenv, lt);
-		*lk_detectp = ((DB_LOCKREGION *)
-		    ((DB_LOCKTAB *)dbenv->lk_handle)->reginfo.primary)->detect;
-		UNLOCKREGION(dbenv, lt);
+		LOCK_SYSTEM_LOCK(dbenv);
+		*lk_detectp = ((DB_LOCKREGION *)lt->reginfo.primary)->detect;
+		LOCK_SYSTEM_UNLOCK(dbenv);
 	} else
 		*lk_detectp = dbenv->lk_detect;
 	return (0);
@@ -246,7 +186,7 @@ __lock_set_lk_detect(dbenv, lk_detect)
 	if (LOCKING_ON(dbenv)) {
 		lt = dbenv->lk_handle;
 		region = lt->reginfo.primary;
-		LOCKREGION(dbenv, lt);
+		LOCK_SYSTEM_LOCK(dbenv);
 		/*
 		 * Check for incompatible automatic deadlock detection requests.
 		 * There are scenarios where changing the detector configuration
@@ -265,7 +205,7 @@ __lock_set_lk_detect(dbenv, lk_detect)
 		} else
 			if (region->detect == DB_LOCK_NORUN)
 				region->detect = lk_detect;
-		UNLOCKREGION(dbenv, lt);
+		LOCK_SYSTEM_UNLOCK(dbenv);
 	} else
 		dbenv->lk_detect = lk_detect;
 
@@ -291,7 +231,10 @@ __lock_set_lk_max(dbenv, lk_max)
 	return (0);
 }
 
-static int
+/*
+ * PUBLIC: int __lock_get_lk_max_locks __P((DB_ENV *, u_int32_t *));
+ */
+int
 __lock_get_lk_max_locks(dbenv, lk_maxp)
 	DB_ENV *dbenv;
 	u_int32_t *lk_maxp;
@@ -325,7 +268,10 @@ __lock_set_lk_max_locks(dbenv, lk_max)
 	return (0);
 }
 
-static int
+/*
+ * PUBLIC: int __lock_get_lk_max_lockers __P((DB_ENV *, u_int32_t *));
+ */
+int
 __lock_get_lk_max_lockers(dbenv, lk_maxp)
 	DB_ENV *dbenv;
 	u_int32_t *lk_maxp;
@@ -359,7 +305,10 @@ __lock_set_lk_max_lockers(dbenv, lk_max)
 	return (0);
 }
 
-static int
+/*
+ * PUBLIC: int __lock_get_lk_max_objects __P((DB_ENV *, u_int32_t *));
+ */
+int
 __lock_get_lk_max_objects(dbenv, lk_maxp)
 	DB_ENV *dbenv;
 	u_int32_t *lk_maxp;
@@ -393,7 +342,11 @@ __lock_set_lk_max_objects(dbenv, lk_max)
 	return (0);
 }
 
-static int
+/*
+ * PUBLIC: int __lock_get_env_timeout
+ * PUBLIC:     __P((DB_ENV *, db_timeout_t *, u_int32_t));
+ */
+int
 __lock_get_env_timeout(dbenv, timeoutp, flag)
 	DB_ENV *dbenv;
 	db_timeout_t *timeoutp;
@@ -410,7 +363,7 @@ __lock_get_env_timeout(dbenv, timeoutp, flag)
 	if (LOCKING_ON(dbenv)) {
 		lt = dbenv->lk_handle;
 		region = lt->reginfo.primary;
-		LOCKREGION(dbenv, lt);
+		LOCK_SYSTEM_LOCK(dbenv);
 		switch (flag) {
 		case DB_SET_LOCK_TIMEOUT:
 			*timeoutp = region->lk_timeout;
@@ -422,7 +375,7 @@ __lock_get_env_timeout(dbenv, timeoutp, flag)
 			ret = 1;
 			break;
 		}
-		UNLOCKREGION(dbenv, lt);
+		LOCK_SYSTEM_UNLOCK(dbenv);
 	} else
 		switch (flag) {
 		case DB_SET_LOCK_TIMEOUT:
@@ -465,7 +418,7 @@ __lock_set_env_timeout(dbenv, timeout, flags)
 	if (LOCKING_ON(dbenv)) {
 		lt = dbenv->lk_handle;
 		region = lt->reginfo.primary;
-		LOCKREGION(dbenv, lt);
+		LOCK_SYSTEM_LOCK(dbenv);
 		switch (flags) {
 		case DB_SET_LOCK_TIMEOUT:
 			region->lk_timeout = timeout;
@@ -477,7 +430,7 @@ __lock_set_env_timeout(dbenv, timeout, flags)
 			ret = 1;
 			break;
 		}
-		UNLOCKREGION(dbenv, lt);
+		LOCK_SYSTEM_UNLOCK(dbenv);
 	} else
 		switch (flags) {
 		case DB_SET_LOCK_TIMEOUT:
diff --git a/storage/bdb/lock/lock_region.c b/storage/bdb/lock/lock_region.c
index b03dc74f15e..f616f40699a 100644
--- a/storage/bdb/lock/lock_region.c
+++ b/storage/bdb/lock/lock_region.c
@@ -1,10 +1,10 @@
 /*-
  * See the file LICENSE for redistribution information.
  *
- * Copyright (c) 1996-2004
+ * Copyright (c) 1996-2005
  *	Sleepycat Software.  All rights reserved.
  *
- * $Id: lock_region.c,v 11.82 2004/10/15 16:59:42 bostic Exp $
+ * $Id: lock_region.c,v 12.6 2005/10/07 20:21:31 ubell Exp $
  */
 
 #include "db_config.h"
@@ -23,10 +23,6 @@ static int  __lock_region_init __P((DB_ENV *, DB_LOCKTAB *));
 static size_t
 	    __lock_region_size __P((DB_ENV *));
 
-#ifdef HAVE_MUTEX_SYSTEM_RESOURCES
-static size_t __lock_region_maint __P((DB_ENV *));
-#endif
-
 /*
  * The conflict arrays are set up such that the row is the lock you are
  * holding and the column is the lock that is desired.
@@ -73,7 +69,9 @@ __lock_open(dbenv)
 	DB_LOCKREGION *region;
 	DB_LOCKTAB *lt;
 	size_t size;
-	int ret;
+	int region_locked, ret;
+
+	region_locked = 0;
 
 	/* Create the lock table structure. */
 	if ((ret = __os_calloc(dbenv, 1, sizeof(DB_LOCKTAB), <)) != 0)
@@ -100,6 +98,16 @@ __lock_open(dbenv)
 	region = lt->reginfo.primary =
 	    R_ADDR(<->reginfo, lt->reginfo.rp->primary);
 
+	/* Set remaining pointers into region. */
+	lt->conflicts = R_ADDR(<->reginfo, region->conf_off);
+	lt->obj_tab = R_ADDR(<->reginfo, region->obj_off);
+	lt->locker_tab = R_ADDR(<->reginfo, region->locker_off);
+
+	dbenv->lk_handle = lt;
+
+	LOCK_SYSTEM_LOCK(dbenv);
+	region_locked = 1;
+
 	if (dbenv->lk_detect != DB_LOCK_NORUN) {
 		/*
 		 * Check for incompatible automatic deadlock detection requests.
@@ -131,22 +139,18 @@ __lock_open(dbenv)
 	if (dbenv->tx_timeout != 0)
 		region->tx_timeout = dbenv->tx_timeout;
 
-	/* Set remaining pointers into region. */
-	lt->conflicts = R_ADDR(<->reginfo, region->conf_off);
-	lt->obj_tab = R_ADDR(<->reginfo, region->obj_off);
-	lt->locker_tab = R_ADDR(<->reginfo, region->locker_off);
+	LOCK_SYSTEM_UNLOCK(dbenv);
+	region_locked = 0;
 
-	R_UNLOCK(dbenv, <->reginfo);
-
-	dbenv->lk_handle = lt;
 	return (0);
 
-err:	if (lt->reginfo.addr != NULL) {
-		if (F_ISSET(<->reginfo, REGION_CREATE))
-			ret = __db_panic(dbenv, ret);
-		R_UNLOCK(dbenv, <->reginfo);
+err:	dbenv->lk_handle = NULL;
+	if (lt->reginfo.addr != NULL) {
+		if (region_locked)
+			LOCK_SYSTEM_UNLOCK(dbenv);
 		(void)__db_r_detach(dbenv, <->reginfo, 0);
 	}
+
 	__os_free(dbenv, lt);
 	return (ret);
 }
@@ -165,9 +169,6 @@ __lock_region_init(dbenv, lt)
 	DB_LOCKER *lidp;
 	DB_LOCKOBJ *op;
 	DB_LOCKREGION *region;
-#ifdef HAVE_MUTEX_SYSTEM_RESOURCES
-	size_t maint_size;
-#endif
 	u_int32_t i;
 	u_int8_t *addr;
 	int lk_modes, ret;
@@ -179,6 +180,10 @@ __lock_region_init(dbenv, lt)
 	region = lt->reginfo.primary;
 	memset(region, 0, sizeof(*region));
 
+	if ((ret = __mutex_alloc(
+	    dbenv, MTX_LOCK_REGION, 0, ®ion->mtx_region)) != 0)
+		return (ret);
+
 	/* Select a conflict matrix if none specified. */
 	if (dbenv->lk_modes == 0)
 		if (CDB_LOCKING(dbenv)) {
@@ -229,33 +234,15 @@ __lock_region_init(dbenv, lt)
 	__db_hashinit(addr, region->locker_t_size);
 	region->locker_off = R_OFFSET(<->reginfo, addr);
 
-#ifdef	HAVE_MUTEX_SYSTEM_RESOURCES
-	maint_size = __lock_region_maint(dbenv);
-	/* Allocate room for the locker maintenance info and initialize it. */
-	if ((ret = __db_shalloc(<->reginfo,
-	    sizeof(REGMAINT) + maint_size, 0, &addr)) != 0)
-		goto mem_err;
-	__db_maintinit(<->reginfo, addr, maint_size);
-	region->maint_off = R_OFFSET(<->reginfo, addr);
-#endif
-
-	/*
-	 * Initialize locks onto a free list. Initialize and lock the mutex
-	 * so that when we need to block, all we need do is try to acquire
-	 * the mutex.
-	 */
+	/* Initialize locks onto a free list. */
 	SH_TAILQ_INIT(®ion->free_locks);
 	for (i = 0; i < region->stat.st_maxlocks; ++i) {
 		if ((ret = __db_shalloc(<->reginfo,
-		    sizeof(struct __db_lock), MUTEX_ALIGN, &lp)) != 0)
+		    sizeof(struct __db_lock), 0, &lp)) != 0)
 			goto mem_err;
-		lp->status = DB_LSTAT_FREE;
+		lp->mtx_lock = MUTEX_INVALID;
 		lp->gen = 0;
-		if ((ret = __db_mutex_setup(dbenv, <->reginfo, &lp->mutex,
-		    MUTEX_LOGICAL_LOCK | MUTEX_NO_RLOCK | MUTEX_SELF_BLOCK))
-		    != 0)
-			return (ret);
-		MUTEX_LOCK(dbenv, &lp->mutex);
+		lp->status = DB_LSTAT_FREE;
 		SH_TAILQ_INSERT_HEAD(®ion->free_locks, lp, links, __db_lock);
 	}
 
@@ -316,14 +303,13 @@ __lock_dbenv_refresh(dbenv)
 	 */
 	if (F_ISSET(dbenv, DB_ENV_PRIVATE)) {
 		/* Discard the conflict matrix. */
-		__db_shalloc_free(reginfo, R_ADDR(<->reginfo, lr->conf_off));
+		__db_shalloc_free(reginfo, R_ADDR(reginfo, lr->conf_off));
 
 		/* Discard the object hash table. */
-		__db_shalloc_free(reginfo, R_ADDR(<->reginfo, lr->obj_off));
+		__db_shalloc_free(reginfo, R_ADDR(reginfo, lr->obj_off));
 
 		/* Discard the locker hash table. */
-		__db_shalloc_free(
-		    reginfo, R_ADDR(<->reginfo, lr->locker_off));
+		__db_shalloc_free(reginfo, R_ADDR(reginfo, lr->locker_off));
 
 		/* Discard locks. */
 		while ((lp =
@@ -359,6 +345,19 @@ __lock_dbenv_refresh(dbenv)
 	return (ret);
 }
 
+/*
+ * __lock_region_mutex_count --
+ *	Return the number of mutexes the lock region will need.
+ *
+ * PUBLIC: u_int32_t __lock_region_mutex_count __P((DB_ENV *));
+ */
+u_int32_t
+__lock_region_mutex_count(dbenv)
+	DB_ENV *dbenv;
+{
+	return (dbenv->lk_max);
+}
+
 /*
  * __lock_region_size --
  *	Return the region size.
@@ -377,20 +376,16 @@ __lock_region_size(dbenv)
 	retval += __db_shalloc_size(sizeof(DB_LOCKREGION), 0);
 	retval += __db_shalloc_size(
 	    (size_t)(dbenv->lk_modes * dbenv->lk_modes), 0);
-	retval += __db_shalloc_size(__db_tablesize
-	    (dbenv->lk_max_lockers) * (sizeof(DB_HASHTAB)), 0);
-	retval += __db_shalloc_size(__db_tablesize
-	    (dbenv->lk_max_objects) * (sizeof(DB_HASHTAB)), 0);
-#ifdef HAVE_MUTEX_SYSTEM_RESOURCES
+	retval += __db_shalloc_size(
+	    __db_tablesize(dbenv->lk_max_objects) * (sizeof(DB_HASHTAB)), 0);
+	retval += __db_shalloc_size(
+	    __db_tablesize(dbenv->lk_max_lockers) * (sizeof(DB_HASHTAB)), 0);
 	retval +=
-	    __db_shalloc_size(sizeof(REGMAINT) + __lock_region_maint(dbenv), 0);
-#endif
-	retval += __db_shalloc_size
-	    (sizeof(struct __db_lock), MUTEX_ALIGN) * dbenv->lk_max;
+	    __db_shalloc_size(sizeof(struct __db_lock), 0) * dbenv->lk_max;
 	retval +=
-	    __db_shalloc_size(sizeof(DB_LOCKOBJ), 1) * dbenv->lk_max_objects;
+	    __db_shalloc_size(sizeof(DB_LOCKOBJ), 0) * dbenv->lk_max_objects;
 	retval +=
-	    __db_shalloc_size(sizeof(DB_LOCKER), 1) * dbenv->lk_max_lockers;
+	    __db_shalloc_size(sizeof(DB_LOCKER), 0) * dbenv->lk_max_lockers;
 
 	/*
 	 * Include 16 bytes of string space per lock.  DB doesn't use it
@@ -403,51 +398,3 @@ __lock_region_size(dbenv)
 
 	return (retval);
 }
-
-#ifdef HAVE_MUTEX_SYSTEM_RESOURCES
-/*
- * __lock_region_maint --
- *	Return the amount of space needed for region maintenance info.
- */
-static size_t
-__lock_region_maint(dbenv)
-	DB_ENV *dbenv;
-{
-	size_t s;
-
-	s = sizeof(DB_MUTEX *) * dbenv->lk_max;
-	return (s);
-}
-#endif
-
-/*
- * __lock_region_destroy
- *	Destroy any region maintenance info.
- *
- * PUBLIC: void __lock_region_destroy __P((DB_ENV *, REGINFO *));
- */
-void
-__lock_region_destroy(dbenv, infop)
-	DB_ENV *dbenv;
-	REGINFO *infop;
-{
-	/*
-	 * This routine is called in two cases: when discarding the mutexes
-	 * from a previous Berkeley DB run, during recovery, and two, when
-	 * discarding the mutexes as we shut down the database environment.
-	 * In the latter case, we also need to discard shared memory segments,
-	 * this is the last time we use them, and the last region-specific
-	 * call we make.
-	 */
-#ifdef HAVE_MUTEX_SYSTEM_RESOURCES
-	DB_LOCKREGION *lt;
-
-	lt = R_ADDR(infop, infop->rp->primary);
-
-	__db_shlocks_destroy(infop, R_ADDR(infop, lt->maint_off));
-	if (infop->primary != NULL && F_ISSET(dbenv, DB_ENV_PRIVATE))
-		__db_shalloc_free(infop, R_ADDR(infop, lt->maint_off));
-#endif
-	if (infop->primary != NULL && F_ISSET(dbenv, DB_ENV_PRIVATE))
-		__db_shalloc_free(infop, infop->primary);
-}
diff --git a/storage/bdb/lock/lock_stat.c b/storage/bdb/lock/lock_stat.c
index 7cf56bb90ce..0b40c6a528f 100644
--- a/storage/bdb/lock/lock_stat.c
+++ b/storage/bdb/lock/lock_stat.c
@@ -1,10 +1,10 @@
 /*-
  * See the file LICENSE for redistribution information.
  *
- * Copyright (c) 1996-2004
+ * Copyright (c) 1996-2005
  *	Sleepycat Software.  All rights reserved.
  *
- * $Id: lock_stat.c,v 11.64 2004/10/15 16:59:42 bostic Exp $
+ * $Id: lock_stat.c,v 12.11 2005/10/07 20:21:31 ubell Exp $
  */
 
 #include "db_config.h"
@@ -54,7 +54,8 @@ __lock_stat_pp(dbenv, statp, flags)
 	DB_LOCK_STAT **statp;
 	u_int32_t flags;
 {
-	int rep_check, ret;
+	DB_THREAD_INFO *ip;
+	int ret;
 
 	PANIC_CHECK(dbenv);
 	ENV_REQUIRES_CONFIG(dbenv,
@@ -64,12 +65,9 @@ __lock_stat_pp(dbenv, statp, flags)
 	    "DB_ENV->lock_stat", flags, DB_STAT_CLEAR)) != 0)
 		return (ret);
 
-	rep_check = IS_ENV_REPLICATED(dbenv) ? 1 : 0;
-	if (rep_check)
-		__env_rep_enter(dbenv);
-	ret = __lock_stat(dbenv, statp, flags);
-	if (rep_check)
-		__env_db_rep_exit(dbenv);
+	ENV_ENTER(dbenv, ip);
+	REPLICATION_WRAP(dbenv, (__lock_stat(dbenv, statp, flags)), ret);
+	ENV_LEAVE(dbenv, ip);
 	return (ret);
 }
 
@@ -95,20 +93,20 @@ __lock_stat(dbenv, statp, flags)
 		return (ret);
 
 	/* Copy out the global statistics. */
-	R_LOCK(dbenv, <->reginfo);
+	LOCK_SYSTEM_LOCK(dbenv);
 
 	region = lt->reginfo.primary;
 	memcpy(stats, ®ion->stat, sizeof(*stats));
 	stats->st_locktimeout = region->lk_timeout;
 	stats->st_txntimeout = region->tx_timeout;
 
-	stats->st_region_wait = lt->reginfo.rp->mutex.mutex_set_wait;
-	stats->st_region_nowait = lt->reginfo.rp->mutex.mutex_set_nowait;
+	__mutex_set_wait_info(dbenv, region->mtx_region,
+	    &stats->st_region_wait, &stats->st_region_nowait);
 	stats->st_regsize = lt->reginfo.rp->size;
 	if (LF_ISSET(DB_STAT_CLEAR)) {
 		tmp = region->stat;
 		memset(®ion->stat, 0, sizeof(region->stat));
-		MUTEX_CLEAR(<->reginfo.rp->mutex);
+		__mutex_clear(dbenv, region->mtx_region);
 
 		region->stat.st_id = tmp.st_id;
 		region->stat.st_cur_maxid = tmp.st_cur_maxid;
@@ -124,7 +122,7 @@ __lock_stat(dbenv, statp, flags)
 		region->stat.st_nmodes = tmp.st_nmodes;
 	}
 
-	R_UNLOCK(dbenv, <->reginfo);
+	LOCK_SYSTEM_UNLOCK(dbenv);
 
 	*statp = stats;
 	return (0);
@@ -141,7 +139,8 @@ __lock_stat_print_pp(dbenv, flags)
 	DB_ENV *dbenv;
 	u_int32_t flags;
 {
-	int rep_check, ret;
+	DB_THREAD_INFO *ip;
+	int ret;
 
 	PANIC_CHECK(dbenv);
 	ENV_REQUIRES_CONFIG(dbenv,
@@ -154,12 +153,9 @@ __lock_stat_print_pp(dbenv, flags)
 	    flags, DB_STAT_CLEAR | DB_STAT_LOCK_FLAGS)) != 0)
 		return (ret);
 
-	rep_check = IS_ENV_REPLICATED(dbenv) ? 1 : 0;
-	if (rep_check)
-		__env_rep_enter(dbenv);
-	ret = __lock_stat_print(dbenv, flags);
-	if (rep_check)
-		__env_db_rep_exit(dbenv);
+	ENV_ENTER(dbenv, ip);
+	REPLICATION_WRAP(dbenv, (__lock_stat_print(dbenv, flags)), ret);
+	ENV_LEAVE(dbenv, ip);
 	return (ret);
 }
 
@@ -235,11 +231,15 @@ __lock_print_stats(dbenv, flags)
 	__db_dl(dbenv,
 	    "Total number of locks released", (u_long)sp->st_nreleases);
 	__db_dl(dbenv,
-  "Total number of lock requests failing because DB_LOCK_NOWAIT was set",
-	    (u_long)sp->st_nnowaits);
+	    "Total number of locks upgraded", (u_long)sp->st_nupgrade);
 	__db_dl(dbenv,
-  "Total number of locks not immediately available due to conflicts",
-	    (u_long)sp->st_nconflicts);
+	    "Total number of locks downgraded", (u_long)sp->st_ndowngrade);
+	__db_dl(dbenv,
+	  "Lock requests not available due to conflicts, for which we waited",
+	    (u_long)sp->st_lock_wait);
+	__db_dl(dbenv,
+  "Lock requests not available due to conflicts, for which we did not wait",
+	    (u_long)sp->st_lock_nowait);
 	__db_dl(dbenv, "Number of deadlocks", (u_long)sp->st_ndeadlocks);
 	__db_dl(dbenv, "Lock timeout value", (u_long)sp->st_locktimeout);
 	__db_dl(dbenv, "Number of locks that have timed out",
@@ -283,19 +283,19 @@ __lock_print_all(dbenv, flags)
 	lrp = lt->reginfo.primary;
 	DB_MSGBUF_INIT(&mb);
 
-	LOCKREGION(dbenv, lt);
+	LOCK_SYSTEM_LOCK(dbenv);
 
 	__db_print_reginfo(dbenv, <->reginfo, "Lock");
 
 	if (LF_ISSET(DB_STAT_ALL | DB_STAT_LOCK_PARAMS)) {
 		__db_msg(dbenv, "%s", DB_GLOBAL(db_line));
 		__db_msg(dbenv, "Lock region parameters:");
+		__mutex_print_debug_single(dbenv,
+		    "Lock region region mutex", lrp->mtx_region, flags);
 		STAT_ULONG("locker table size", lrp->locker_t_size);
 		STAT_ULONG("object table size", lrp->object_t_size);
 		STAT_ULONG("obj_off", lrp->obj_off);
-		STAT_ULONG("osynch_off", lrp->osynch_off);
 		STAT_ULONG("locker_off", lrp->locker_off);
-		STAT_ULONG("lsynch_off", lrp->lsynch_off);
 		STAT_ULONG("need_dd", lrp->need_dd);
 		if (LOCK_TIME_ISVALID(&lrp->next_timeout) &&
 		    strftime(buf, sizeof(buf), "%m-%d-%H:%M:%S",
@@ -341,7 +341,7 @@ __lock_print_all(dbenv, flags)
 			}
 		}
 	}
-	UNLOCKREGION(dbenv, lt);
+	LOCK_SYSTEM_UNLOCK(dbenv);
 
 	return (0);
 }
@@ -355,11 +355,12 @@ __lock_dump_locker(dbenv, mbp, lt, lip)
 {
 	struct __db_lock *lp;
 	time_t s;
-	char buf[64];
+	char buf[DB_THREADID_STRLEN];
 
 	__db_msgadd(dbenv,
-	    mbp, "%8lx dd=%2ld locks held %-4d write locks %-4d",
-	    (u_long)lip->id, (long)lip->dd_id, lip->nlocks, lip->nwrites);
+	    mbp, "%8lx dd=%2ld locks held %-4d write locks %-4d pid/thread %s",
+	    (u_long)lip->id, (long)lip->dd_id, lip->nlocks, lip->nwrites,
+	    dbenv->thread_id_string(dbenv, lip->pid, lip->tid, buf));
 	__db_msgadd(
 	    dbenv, mbp, "%s", F_ISSET(lip, DB_LOCKER_DELETED) ? "(D)" : "   ");
 	if (LOCK_TIME_ISVALID(&lip->tx_expire)) {
@@ -447,9 +448,6 @@ __lock_printlock(lt, mbp, lp, ispgno)
 	}
 
 	switch (lp->mode) {
-	case DB_LOCK_DIRTY:
-		mode = "DIRTY_READ";
-		break;
 	case DB_LOCK_IREAD:
 		mode = "IREAD";
 		break;
@@ -465,6 +463,9 @@ __lock_printlock(lt, mbp, lp, ispgno)
 	case DB_LOCK_READ:
 		mode = "READ";
 		break;
+	case DB_LOCK_READ_UNCOMMITTED:
+		mode = "READ_UNCOMMITTED";
+		break;
 	case DB_LOCK_WRITE:
 		mode = "WRITE";
 		break;
@@ -491,9 +492,6 @@ __lock_printlock(lt, mbp, lp, ispgno)
 	case DB_LSTAT_HELD:
 		status = "HELD";
 		break;
-	case DB_LSTAT_NOTEXIST:
-		status = "NOTEXIST";
-		break;
 	case DB_LSTAT_PENDING:
 		status = "PENDING";
 		break;
diff --git a/storage/bdb/lock/lock_timer.c b/storage/bdb/lock/lock_timer.c
index 55efb6c6c02..f05eac860c0 100644
--- a/storage/bdb/lock/lock_timer.c
+++ b/storage/bdb/lock/lock_timer.c
@@ -1,10 +1,10 @@
 /*-
  * See the file LICENSE for redistribution information.
  *
- * Copyright (c) 1996-2004
+ * Copyright (c) 1996-2005
  *	Sleepycat Software.  All rights reserved.
  *
- * $Id: lock_timer.c,v 11.141 2004/03/24 20:51:39 bostic Exp $
+ * $Id: lock_timer.c,v 12.2 2005/07/20 16:51:44 bostic Exp $
  */
 
 #include "db_config.h"
@@ -38,14 +38,11 @@ __lock_set_timeout(dbenv, locker, timeout, op)
 	db_timeout_t timeout;
 	u_int32_t op;
 {
-	DB_LOCKTAB *lt;
 	int ret;
 
-	lt = dbenv->lk_handle;
-
-	LOCKREGION(dbenv, lt);
+	LOCK_SYSTEM_LOCK(dbenv);
 	ret = __lock_set_timeout_internal(dbenv, locker, timeout, op);
-	UNLOCKREGION(dbenv, lt);
+	LOCK_SYSTEM_UNLOCK(dbenv);
 	return (ret);
 }
 
@@ -127,7 +124,7 @@ __lock_inherit_timeout(dbenv, parent, locker)
 	lt = dbenv->lk_handle;
 	region = lt->reginfo.primary;
 	ret = 0;
-	LOCKREGION(dbenv, lt);
+	LOCK_SYSTEM_LOCK(dbenv);
 
 	/* If the parent does not exist, we are done. */
 	LOCKER_LOCK(lt, region, parent, locker_ndx);
@@ -162,8 +159,7 @@ __lock_inherit_timeout(dbenv, parent, locker)
 	}
 
 done:
-err:
-	UNLOCKREGION(dbenv, lt);
+err:	LOCK_SYSTEM_UNLOCK(dbenv);
 	return (ret);
 }
 
diff --git a/storage/bdb/lock/lock_util.c b/storage/bdb/lock/lock_util.c
index 0c38d72ac69..a8f7b123cd4 100644
--- a/storage/bdb/lock/lock_util.c
+++ b/storage/bdb/lock/lock_util.c
@@ -1,10 +1,10 @@
 /*-
  * See the file LICENSE for redistribution information.
  *
- * Copyright (c) 1996-2004
+ * Copyright (c) 1996-2005
  *	Sleepycat Software.  All rights reserved.
  *
- * $Id: lock_util.c,v 11.12 2004/09/22 03:48:29 bostic Exp $
+ * $Id: lock_util.c,v 12.2 2005/06/16 20:23:11 bostic Exp $
  */
 
 #include "db_config.h"
@@ -120,21 +120,6 @@ __lock_lhash(lock_obj)
 	return (__ham_func5(NULL, obj_data, lock_obj->lockobj.size));
 }
 
-/*
- * __lock_locker_hash --
- *	Hash function for entering lockers into the locker hash table.
- *	Since these are simply 32-bit unsigned integers, just return
- *	the locker value.
- *
- * PUBLIC: u_int32_t __lock_locker_hash __P((u_int32_t));
- */
-u_int32_t
-__lock_locker_hash(locker)
-	u_int32_t locker;
-{
-	return (locker);
-}
-
 /*
  * __lock_nomem --
  *	Report a lack of some resource.
diff --git a/storage/bdb/log/log.c b/storage/bdb/log/log.c
index 14e888abd29..6e82eea3757 100644
--- a/storage/bdb/log/log.c
+++ b/storage/bdb/log/log.c
@@ -1,10 +1,10 @@
 /*-
  * See the file LICENSE for redistribution information.
  *
- * Copyright (c) 1996-2004
+ * Copyright (c) 1996-2005
  *	Sleepycat Software.  All rights reserved.
  *
- * $Id: log.c,v 11.161 2004/10/15 16:59:42 bostic Exp $
+ * $Id: log.c,v 12.15 2005/10/14 15:20:24 bostic Exp $
  */
 
 #include "db_config.h"
@@ -40,7 +40,10 @@ __log_open(dbenv)
 {
 	DB_LOG *dblp;
 	LOG *lp;
-	int ret;
+	u_int8_t *bulk;
+	int region_locked, ret;
+
+	region_locked = 0;
 
 	/* Create/initialize the DB_LOG structure. */
 	if ((ret = __os_calloc(dbenv, 1, sizeof(DB_LOG), &dblp)) != 0)
@@ -71,19 +74,16 @@ __log_open(dbenv)
 	/* Set the local addresses. */
 	lp = dblp->reginfo.primary =
 	    R_ADDR(&dblp->reginfo, dblp->reginfo.rp->primary);
+	dblp->bufp = R_ADDR(&dblp->reginfo, lp->buffer_off);
 
 	/*
-	 * If the region is threaded, then we have to lock both the handles
-	 * and the region, and we need to allocate a mutex for that purpose.
+	 * If the region is threaded, we have to lock the DBREG list, and we
+	 * need to allocate a mutex for that purpose.
 	 */
-	if (F_ISSET(dbenv, DB_ENV_THREAD) &&
-	    (ret = __db_mutex_setup(dbenv, &dblp->reginfo, &dblp->mutexp,
-	    MUTEX_ALLOC | MUTEX_NO_RLOCK)) != 0)
+	if ((ret = __mutex_alloc(dbenv,
+	    MTX_LOG_REGION, DB_MUTEX_THREAD, &dblp->mtx_dbreg)) != 0)
 		goto err;
 
-	/* Initialize the rest of the structure. */
-	dblp->bufp = R_ADDR(&dblp->reginfo, lp->buffer_off);
-
 	/*
 	 * Set the handle -- we may be about to run recovery, which allocates
 	 * log cursors.  Log cursors require logging be already configured,
@@ -124,8 +124,23 @@ __log_open(dbenv)
 		    (ret = __log_newfile(dblp, NULL, 0)) != 0)
 			goto err;
 
-		/* Initialize replication's next-expected LSN value. */
+		/*
+		 * Initialize replication's next-expected LSN value
+		 * and replication's bulk buffer.
+		 */
 		lp->ready_lsn = lp->lsn;
+		if (IS_ENV_REPLICATED(dbenv)) {
+			if ((ret = __db_shalloc(&dblp->reginfo, MEGABYTE, 0,
+			    &bulk)) != 0)
+				goto err;
+			lp->bulk_buf = R_OFFSET(&dblp->reginfo, bulk);
+			lp->bulk_len = MEGABYTE;
+			lp->bulk_off = 0;
+		} else {
+			lp->bulk_buf = INVALID_ROFF;
+			lp->bulk_len = 0;
+			lp->bulk_off = 0;
+		}
 	} else {
 		/*
 		 * A process joining the region may have reset the log file
@@ -133,6 +148,9 @@ __log_open(dbenv)
 		 * create.  We need to check that the size is reasonable given
 		 * the buffer size in the region.
 		 */
+		LOG_SYSTEM_LOCK(dbenv);
+		region_locked = 1;
+
 		 if (dbenv->lg_size != 0) {
 			if ((ret =
 			    __log_check_sizes(dbenv, dbenv->lg_size, 0)) != 0)
@@ -146,22 +164,21 @@ __log_open(dbenv)
 			F_SET(dbenv, DB_ENV_LOG_AUTOREMOVE);
 		if (lp->db_log_inmemory)
 			F_SET(dbenv, DB_ENV_LOG_INMEMORY);
+
+		LOG_SYSTEM_UNLOCK(dbenv);
+		region_locked = 0;
 	}
 
-	R_UNLOCK(dbenv, &dblp->reginfo);
 	return (0);
 
 err:	dbenv->lg_handle = NULL;
 	if (dblp->reginfo.addr != NULL) {
-		if (F_ISSET(&dblp->reginfo, REGION_CREATE))
-			ret = __db_panic(dbenv, ret);
-		R_UNLOCK(dbenv, &dblp->reginfo);
+		if (region_locked)
+			LOG_SYSTEM_UNLOCK(dbenv);
 		(void)__db_r_detach(dbenv, &dblp->reginfo, 0);
 	}
 
-	if (dblp->mutexp != NULL)
-		__db_mutex_free(dbenv, &dblp->reginfo, dblp->mutexp);
-
+	(void)__mutex_free(dbenv, &dblp->mtx_dbreg);
 	__os_free(dbenv, dblp);
 
 	return (ret);
@@ -176,13 +193,9 @@ __log_init(dbenv, dblp)
 	DB_ENV *dbenv;
 	DB_LOG *dblp;
 {
-	DB_MUTEX *flush_mutexp;
 	LOG *lp;
 	int ret;
 	void *p;
-#ifdef  HAVE_MUTEX_SYSTEM_RESOURCES
-	u_int8_t *addr;
-#endif
 
 	/*
 	 * This is the first point where we can validate the buffer size,
@@ -194,13 +207,17 @@ __log_init(dbenv, dblp)
 		return (ret);
 
 	if ((ret = __db_shalloc(&dblp->reginfo,
-	    sizeof(*lp), MUTEX_ALIGN, &dblp->reginfo.primary)) != 0)
+	    sizeof(*lp), 0, &dblp->reginfo.primary)) != 0)
 		goto mem_err;
 	dblp->reginfo.rp->primary =
 	    R_OFFSET(&dblp->reginfo, dblp->reginfo.primary);
 	lp = dblp->reginfo.primary;
 	memset(lp, 0, sizeof(*lp));
 
+	if ((ret =
+	    __mutex_alloc(dbenv, MTX_LOG_REGION, 0, &lp->mtx_region)) != 0)
+		return (ret);
+
 	lp->fid_max = 0;
 	SH_TAILQ_INIT(&lp->fq);
 	lp->free_fid_stack = INVALID_ROFF;
@@ -224,31 +241,11 @@ __log_init(dbenv, dblp)
 	 */
 	ZERO_LSN(lp->cached_ckp_lsn);
 
-#ifdef  HAVE_MUTEX_SYSTEM_RESOURCES
-	/* Allocate room for the log maintenance info and initialize it. */
-	if ((ret = __db_shalloc(&dblp->reginfo,
-	    sizeof(REGMAINT) + LG_MAINT_SIZE, 0, &addr)) != 0)
-		goto mem_err;
-	__db_maintinit(&dblp->reginfo, addr, LG_MAINT_SIZE);
-	lp->maint_off = R_OFFSET(&dblp->reginfo, addr);
-#endif
-
-	if ((ret = __db_mutex_setup(dbenv, &dblp->reginfo, &lp->fq_mutex,
-	    MUTEX_NO_RLOCK)) != 0)
+	if ((ret =
+	    __mutex_alloc(dbenv, MTX_LOG_FILENAME, 0, &lp->mtx_filelist)) != 0)
 		return (ret);
-
-	/*
-	 * We must create a place for the flush mutex separately; mutexes have
-	 * to be aligned to MUTEX_ALIGN, and the only way to guarantee that is
-	 * to make sure they're at the beginning of a shalloc'ed chunk.
-	 */
-	if ((ret = __db_shalloc(&dblp->reginfo,
-	    sizeof(DB_MUTEX), MUTEX_ALIGN, &flush_mutexp)) != 0)
-		goto mem_err;
-	if ((ret = __db_mutex_setup(dbenv, &dblp->reginfo, flush_mutexp,
-	    MUTEX_NO_RLOCK)) != 0)
+	if ((ret = __mutex_alloc(dbenv, MTX_LOG_FLUSH, 0, &lp->mtx_flush)) != 0)
 		return (ret);
-	lp->flush_mutex_off = R_OFFSET(&dblp->reginfo, flush_mutexp);
 
 	/* Initialize the buffer. */
 	if ((ret = __db_shalloc(&dblp->reginfo, dbenv->lg_bsize, 0, &p)) != 0) {
@@ -258,6 +255,7 @@ mem_err:	__db_err(dbenv, "Unable to allocate memory for the log buffer");
 	lp->regionmax = dbenv->lg_regionmax;
 	lp->buffer_off = R_OFFSET(&dblp->reginfo, p);
 	lp->buffer_size = dbenv->lg_bsize;
+	lp->filemode = dbenv->lg_filemode;
 	lp->log_size = lp->log_nsize = dbenv->lg_size;
 
 	/* Initialize the commit Queue. */
@@ -276,7 +274,7 @@ mem_err:	__db_err(dbenv, "Unable to allocate memory for the log buffer");
 	 */
 	lp->persist.magic = DB_LOGMAGIC;
 	lp->persist.version = DB_LOGVERSION;
-	lp->persist.mode = (u_int32_t)dbenv->db_mode;
+	lp->persist.notused = 0;
 
 	/* Migrate persistent flags from the DB_ENV into the region. */
 	if (F_ISSET(dbenv, DB_ENV_LOG_AUTOREMOVE))
@@ -750,17 +748,10 @@ __log_valid(dblp, number, set_persist, fhpp, flags, statusp)
 	 * set the region's persistent information based on the headers.
 	 *
 	 * Override the current log file size.
-	 *
-	 * XXX
-	 * Always use the persistent header's mode, regardless of what was set
-	 * in the current environment.  We've always done it this way, but it's
-	 * probably a bug -- I can't think of a way not-changing the mode would
-	 * be a problem, though.
 	 */
 	if (set_persist) {
 		lp = dblp->reginfo.primary;
 		lp->log_size = persist->log_size;
-		lp->persist.mode = persist->mode;
 	}
 
 err:	if (fname != NULL)
@@ -791,6 +782,7 @@ __log_dbenv_refresh(dbenv)
 	DB_LOG *dblp;
 	LOG *lp;
 	REGINFO *reginfo;
+	struct __fname *fnp;
 	int ret, t_ret;
 
 	dblp = dbenv->lg_handle;
@@ -798,9 +790,19 @@ __log_dbenv_refresh(dbenv)
 	lp = reginfo->primary;
 
 	/* We may have opened files as part of XA; if so, close them. */
-	F_SET(dblp, DBLOG_RECOVER);
 	ret = __dbreg_close_files(dbenv);
 
+	/*
+	 * After we close the files, check for any unlogged closes left in
+	 * the shared memory queue.  If we find any, we need to panic the
+	 * region.  Note, just set "ret" -- a panic overrides any previously
+	 * set error return.
+	 */
+	for (fnp = SH_TAILQ_FIRST(&lp->fq, __fname); fnp != NULL;
+	    fnp = SH_TAILQ_NEXT(fnp, q, __fname))
+		if (F_ISSET(fnp, DB_FNAME_NOTLOGGED))
+			ret = __db_panic(dbenv, EINVAL);
+
 	/*
 	 * If a private region, return the memory to the heap.  Not needed for
 	 * filesystem-backed or system shared memory regions, that memory isn't
@@ -808,8 +810,9 @@ __log_dbenv_refresh(dbenv)
 	 */
 	if (F_ISSET(dbenv, DB_ENV_PRIVATE)) {
 		/* Discard the flush mutex. */
-		__db_shalloc_free(reginfo,
-		    R_ADDR(reginfo, lp->flush_mutex_off));
+		if ((t_ret =
+		    __mutex_free(dbenv, &lp->mtx_flush)) != 0 && ret == 0)
+			ret = t_ret;
 
 		/* Discard the buffer. */
 		__db_shalloc_free(reginfo, R_ADDR(reginfo, lp->buffer_off));
@@ -820,9 +823,9 @@ __log_dbenv_refresh(dbenv)
 			    R_ADDR(reginfo, lp->free_fid_stack));
 	}
 
-	/* Discard the per-thread lock. */
-	if (dblp->mutexp != NULL)
-		__db_mutex_free(dbenv, reginfo, dblp->mutexp);
+	/* Discard the per-thread DBREG mutex. */
+	if ((t_ret = __mutex_free(dbenv, &dblp->mtx_dbreg)) != 0 && ret == 0)
+		ret = t_ret;
 
 	/* Detach from the region. */
 	if ((t_ret = __db_r_detach(dbenv, reginfo, 0)) != 0 && ret == 0)
@@ -848,9 +851,9 @@ __log_dbenv_refresh(dbenv)
  * __log_get_cached_ckp_lsn --
  *	Retrieve any last checkpoint LSN that we may have found on startup.
  *
- * PUBLIC: void __log_get_cached_ckp_lsn __P((DB_ENV *, DB_LSN *));
+ * PUBLIC: int __log_get_cached_ckp_lsn __P((DB_ENV *, DB_LSN *));
  */
-void
+int
 __log_get_cached_ckp_lsn(dbenv, ckp_lsnp)
 	DB_ENV *dbenv;
 	DB_LSN *ckp_lsnp;
@@ -861,9 +864,29 @@ __log_get_cached_ckp_lsn(dbenv, ckp_lsnp)
 	dblp = (DB_LOG *)dbenv->lg_handle;
 	lp = (LOG *)dblp->reginfo.primary;
 
-	R_LOCK(dbenv, &dblp->reginfo);
+	LOG_SYSTEM_LOCK(dbenv);
 	*ckp_lsnp = lp->cached_ckp_lsn;
-	R_UNLOCK(dbenv, &dblp->reginfo);
+	LOG_SYSTEM_UNLOCK(dbenv);
+
+	return (0);
+}
+
+/*
+ * __log_region_mutex_count --
+ *	Return the number of mutexes the log region will need.
+ *
+ * PUBLIC: u_int32_t __log_region_mutex_count __P((DB_ENV *));
+ */
+u_int32_t
+__log_region_mutex_count(dbenv)
+	DB_ENV *dbenv;
+{
+	/*
+	 * We need a few assorted mutexes, and one per transaction waiting
+	 * on the group commit list.  We can't know how many that will be,
+	 * but it should be bounded by the maximum active transactions.
+	 */
+	return (dbenv->tx_max + 5);
 }
 
 /*
@@ -881,44 +904,15 @@ __log_region_size(dbenv)
 	size_t s;
 
 	s = dbenv->lg_regionmax + dbenv->lg_bsize;
-#ifdef HAVE_MUTEX_SYSTEM_RESOURCES
-	if (F_ISSET(dbenv, DB_ENV_THREAD))
-		s += sizeof(REGMAINT) + LG_MAINT_SIZE;
-#endif
-	return (s);
-}
 
-/*
- * __log_region_destroy
- *	Destroy any region maintenance info.
- *
- * PUBLIC: void __log_region_destroy __P((DB_ENV *, REGINFO *));
- */
-void
-__log_region_destroy(dbenv, infop)
-	DB_ENV *dbenv;
-	REGINFO *infop;
-{
 	/*
-	 * This routine is called in two cases: when discarding the mutexes
-	 * from a previous Berkeley DB run, during recovery, and two, when
-	 * discarding the mutexes as we shut down the database environment.
-	 * In the latter case, we also need to discard shared memory segments,
-	 * this is the last time we use them, and the last region-specific
-	 * call we make.
+	 * If running with replication, add in space for bulk buffer.
+	 * Allocate a megabyte and a little bit more space.
 	 */
-#ifdef HAVE_MUTEX_SYSTEM_RESOURCES
-	LOG *lp;
+	if (IS_ENV_REPLICATED(dbenv))
+		s += MEGABYTE;
 
-	lp = R_ADDR(infop, infop->rp->primary);
-
-	/* Destroy mutexes. */
-	__db_shlocks_destroy(infop, R_ADDR(infop, lp->maint_off));
-	if (infop->primary != NULL && F_ISSET(dbenv, DB_ENV_PRIVATE))
-		__db_shalloc_free(infop, R_ADDR(infop, lp->maint_off));
-#endif
-	if (infop->primary != NULL && F_ISSET(dbenv, DB_ENV_PRIVATE))
-		__db_shalloc_free(infop, infop->primary);
+	return (s);
 }
 
 /*
@@ -940,7 +934,6 @@ __log_vtruncate(dbenv, lsn, ckplsn, trunclsn)
 	DB_LOG *dblp;
 	DB_LOGC *logc;
 	DB_LSN end_lsn;
-	DB_MUTEX *flush_mutexp;
 	LOG *lp;
 	u_int32_t bytes, c_len;
 	int ret, t_ret;
@@ -960,7 +953,7 @@ __log_vtruncate(dbenv, lsn, ckplsn, trunclsn)
 	dblp = (DB_LOG *)dbenv->lg_handle;
 	lp = (LOG *)dblp->reginfo.primary;
 
-	R_LOCK(dbenv, &dblp->reginfo);
+	LOG_SYSTEM_LOCK(dbenv);
 
 	/*
 	 * Flush the log so we can simply initialize the in-memory buffer
@@ -1000,11 +993,10 @@ __log_vtruncate(dbenv, lsn, ckplsn, trunclsn)
 	 * If the saved lsn is greater than our new end of log, reset it
 	 * to our current end of log.
 	 */
-	flush_mutexp = R_ADDR(&dblp->reginfo, lp->flush_mutex_off);
-	MUTEX_LOCK(dbenv, flush_mutexp);
+	MUTEX_LOCK(dbenv, lp->mtx_flush);
 	if (log_compare(&lp->s_lsn, lsn) > 0)
 		lp->s_lsn = lp->lsn;
-	MUTEX_UNLOCK(dbenv, flush_mutexp);
+	MUTEX_UNLOCK(dbenv, lp->mtx_flush);
 
 	/* Initialize the in-region buffer to a pristine state. */
 	ZERO_LSN(lp->f_lsn);
@@ -1017,17 +1009,14 @@ __log_vtruncate(dbenv, lsn, ckplsn, trunclsn)
 	if ((ret = __log_zero(dbenv, &lp->lsn, &end_lsn)) != 0)
 		goto err;
 
-err:	R_UNLOCK(dbenv, &dblp->reginfo);
+err:	LOG_SYSTEM_UNLOCK(dbenv);
 	return (ret);
 }
 
 /*
  * __log_is_outdated --
- *	Used by the replication system to identify if a client's logs
- * are too old.  The log represented by dbenv is compared to the file
- * number passed in fnum.  If the log file fnum does not exist and is
- * lower-numbered than the current logs, the we return *outdatedp non
- * zero, else we return it 0.
+ *	Used by the replication system to identify if a client's logs are too
+ *	old.
  *
  * PUBLIC: int __log_is_outdated __P((DB_ENV *, u_int32_t, int *));
  */
@@ -1046,12 +1035,17 @@ __log_is_outdated(dbenv, fnum, outdatedp)
 
 	dblp = dbenv->lg_handle;
 
+	/*
+	 * The log represented by dbenv is compared to the file number passed
+	 * in fnum.  If the log file fnum does not exist and is lower-numbered
+	 * than the current logs, return *outdatedp non-zero, else we return 0.
+	 */
 	if (F_ISSET(dbenv, DB_ENV_LOG_INMEMORY)) {
-		R_LOCK(dbenv, &dblp->reginfo);
+		LOG_SYSTEM_LOCK(dbenv);
 		lp = (LOG *)dblp->reginfo.primary;
 		filestart = SH_TAILQ_FIRST(&lp->logfiles, __db_filestart);
-		*outdatedp = (fnum < filestart->file);
-		R_UNLOCK(dbenv, &dblp->reginfo);
+		*outdatedp = filestart == NULL ? 0 : (fnum < filestart->file);
+		LOG_SYSTEM_UNLOCK(dbenv);
 		return (0);
 	}
 
@@ -1068,10 +1062,10 @@ __log_is_outdated(dbenv, fnum, outdatedp)
 	 * too little.  If it's too little, then we need to indicate
 	 * that the LSN is outdated.
 	 */
-	R_LOCK(dbenv, &dblp->reginfo);
+	LOG_SYSTEM_LOCK(dbenv);
 	lp = (LOG *)dblp->reginfo.primary;
 	cfile = lp->lsn.file;
-	R_UNLOCK(dbenv, &dblp->reginfo);
+	LOG_SYSTEM_UNLOCK(dbenv);
 
 	if (cfile > fnum)
 		*outdatedp = 1;
@@ -1279,10 +1273,13 @@ __log_inmem_chkspace(dblp, len)
 	DB_LOG *dblp;
 	size_t len;
 {
+	DB_ENV *dbenv;
 	LOG *lp;
 	DB_LSN active_lsn, old_active_lsn;
 	struct __db_filestart *filestart;
+	int ret;
 
+	dbenv = dblp->dbenv;
 	lp = dblp->reginfo.primary;
 
 	DB_ASSERT(lp->db_log_inmemory);
@@ -1299,7 +1296,7 @@ __log_inmem_chkspace(dblp, len)
 	 * don't even bother checking: in that case we can always overwrite old
 	 * log records, because we're never going to abort.
 	 */
-	while (TXN_ON(dblp->dbenv) &&
+	while (TXN_ON(dbenv) &&
 	    RINGBUF_LEN(lp, lp->b_off, lp->a_off) <= len) {
 		old_active_lsn = lp->active_lsn;
 		active_lsn = lp->lsn;
@@ -1308,14 +1305,15 @@ __log_inmem_chkspace(dblp, len)
 		 * Drop the log region lock so we don't hold it while
 		 * taking the transaction region lock.
 		 */
-		R_UNLOCK(dblp->dbenv, &dblp->reginfo);
-		__txn_getactive(dblp->dbenv, &active_lsn);
-		R_LOCK(dblp->dbenv, &dblp->reginfo);
+		LOG_SYSTEM_UNLOCK(dbenv);
+		if ((ret = __txn_getactive(dbenv, &active_lsn)) != 0)
+			return (ret);
+		LOG_SYSTEM_LOCK(dbenv);
 		active_lsn.offset = 0;
 
 		/* If we didn't make any progress, give up. */
 		if (log_compare(&active_lsn, &old_active_lsn) == 0) {
-			__db_err(dblp->dbenv,
+			__db_err(dbenv,
       "In-memory log buffer is full (an active transaction spans the buffer)");
 			return (DB_LOG_BUFFER_FULL);
 		}
diff --git a/storage/bdb/log/log_archive.c b/storage/bdb/log/log_archive.c
index 8b9e58163d2..267997eadbf 100644
--- a/storage/bdb/log/log_archive.c
+++ b/storage/bdb/log/log_archive.c
@@ -1,10 +1,10 @@
 /*-
  * See the file LICENSE for redistribution information.
  *
- * Copyright (c) 1997-2004
+ * Copyright (c) 1997-2005
  *	Sleepycat Software.  All rights reserved.
  *
- * $Id: log_archive.c,v 11.62 2004/07/16 21:38:59 mjc Exp $
+ * $Id: log_archive.c,v 12.9 2005/11/04 17:27:58 ubell Exp $
  */
 
 #include "db_config.h"
@@ -14,7 +14,6 @@
 
 #include 
 #include 
-#include 
 #endif
 
 #include "db_int.h"
@@ -41,7 +40,8 @@ __log_archive_pp(dbenv, listp, flags)
 	char ***listp;
 	u_int32_t flags;
 {
-	int rep_check, ret;
+	DB_THREAD_INFO *ip;
+	int ret;
 
 	PANIC_CHECK(dbenv);
 	ENV_REQUIRES_CONFIG(dbenv,
@@ -61,12 +61,9 @@ __log_archive_pp(dbenv, listp, flags)
 			return (ret);
 	}
 
-	rep_check = IS_ENV_REPLICATED(dbenv) ? 1 : 0;
-	if (rep_check)
-		__env_rep_enter(dbenv);
-	ret = __log_archive(dbenv, listp, flags);
-	if (rep_check)
-		__env_db_rep_exit(dbenv);
+	ENV_ENTER(dbenv, ip);
+	REPLICATION_WRAP(dbenv, (__log_archive(dbenv, listp, flags)), ret);
+	ENV_LEAVE(dbenv, ip);
 	return (ret);
 }
 
@@ -85,11 +82,10 @@ __log_archive(dbenv, listp, flags)
 	LOG *lp;
 	DB_LOGC *logc;
 	DB_LSN stable_lsn;
-	__txn_ckp_args *ckp_args;
 	u_int array_size, n;
 	u_int32_t fnum;
 	int ret, t_ret;
-	char **array, **arrayp, *name, *p, *pref, buf[MAXPATHLEN];
+	char **array, **arrayp, *name, *p, *pref;
 
 	dblp = dbenv->lg_handle;
 	lp = (LOG *)dblp->reginfo.primary;
@@ -117,24 +113,10 @@ __log_archive(dbenv, listp, flags)
 		return (0);
 
 	/*
-	 * Get the absolute pathname of the current directory.  It would
-	 * be nice to get the shortest pathname of the database directory,
-	 * but that's just not possible.
-	 *
-	 * XXX
-	 * Can't trust getcwd(3) to set a valid errno.  If it doesn't, just
-	 * guess that we ran out of memory.
+	 * Prepend the original absolute pathname if the user wants an
+	 * absolute path to the database environment directory.
 	 */
-	if (LF_ISSET(DB_ARCH_ABS)) {
-		__os_set_errno(0);
-		if ((pref = getcwd(buf, sizeof(buf))) == NULL) {
-			if (__os_get_errno() == 0)
-				__os_set_errno(ENOMEM);
-			ret = __os_get_errno();
-			goto err;
-		}
-	} else
-		pref = NULL;
+	pref = LF_ISSET(DB_ARCH_ABS) ? dbenv->db_abshome : NULL;
 
 	LF_CLR(DB_ARCH_ABS);
 	switch (flags) {
@@ -159,44 +141,18 @@ __log_archive(dbenv, listp, flags)
 		__log_autoremove(dbenv);
 		goto err;
 	case 0:
-		memset(&rec, 0, sizeof(rec));
-		if (!TXN_ON(dbenv)) {
-			__log_get_cached_ckp_lsn(dbenv, &stable_lsn);
-			if (IS_ZERO_LSN(stable_lsn) && (ret =
-			     __txn_findlastckp(dbenv, &stable_lsn, NULL)) != 0)
-				goto err;
-			if (IS_ZERO_LSN(stable_lsn))
-				goto err;
-		}
-		else if (__txn_getckp(dbenv, &stable_lsn) != 0) {
-			/*
-			 * A failure return means that there's no checkpoint
-			 * in the log (so we are not going to be deleting
-			 * any log files).
-			 */
-			goto err;
-		}
-		if ((ret = __log_cursor(dbenv, &logc)) != 0)
-			goto err;
-		if ((ret = __log_c_get(logc, &stable_lsn, &rec, DB_SET)) != 0 ||
-		    (ret = __txn_ckp_read(dbenv, rec.data, &ckp_args)) != 0) {
-			/*
-			 * A return of DB_NOTFOUND may only mean that the
-			 * checkpoint LSN is before the beginning of the
-			 * log files that we still have.  This is not
-			 * an error;  it just means our work is done.
-			 */
+
+		ret = __log_get_stable_lsn(dbenv, &stable_lsn);
+		/*
+		 * A return of DB_NOTFOUND means the checkpoint LSN
+		 * is before the beginning of the log files we have.
+		 * This is not an error; it just means we're done.
+		 */
+		if (ret != 0) {
 			if (ret == DB_NOTFOUND)
 				ret = 0;
-			if ((t_ret = __log_c_close(logc)) != 0 && ret == 0)
-				ret = t_ret;
 			goto err;
 		}
-		if ((ret = __log_c_close(logc)) != 0)
-			goto err;
-		stable_lsn = ckp_args->ckp_lsn;
-		__os_free(dbenv, ckp_args);
-
 		/* Remove any log files before the last stable LSN. */
 		fnum = stable_lsn.file - 1;
 		break;
@@ -276,6 +232,63 @@ err:		if (array != NULL) {
 	return (ret);
 }
 
+/*
+ * __log_get_stable_lsn --
+ *	Get the stable lsn based on where checkpoints are.
+ *
+ * PUBLIC: int __log_get_stable_lsn __P((DB_ENV *, DB_LSN *));
+ */
+int
+__log_get_stable_lsn(dbenv, stable_lsn)
+	DB_ENV *dbenv;
+	DB_LSN *stable_lsn;
+{
+	DB_LOGC *logc;
+	DBT rec;
+	__txn_ckp_args *ckp_args;
+	int ret, t_ret;
+
+	ret = 0;
+	memset(&rec, 0, sizeof(rec));
+	if (!TXN_ON(dbenv)) {
+		if ((ret = __log_get_cached_ckp_lsn(dbenv, stable_lsn)) != 0)
+			goto err;
+		/*
+		 * No need to check for a return value of DB_NOTFOUND;
+		 * __txn_findlastckp returns 0 if no checkpoint record
+		 * is found.  Instead of checking the return value, we
+		 * check to see if the return LSN has been filled in.
+		 */
+		if (IS_ZERO_LSN(*stable_lsn) && (ret =
+		     __txn_findlastckp(dbenv, stable_lsn, NULL)) != 0)
+			goto err;
+		/*
+		 * If the LSN has not been filled in return DB_NOTFOUND
+		 * so that the caller knows it may be done.
+		 */
+		if (IS_ZERO_LSN(*stable_lsn)) {
+			ret = DB_NOTFOUND;
+			goto err;
+		}
+	} else if ((ret = __txn_getckp(dbenv, stable_lsn)) != 0)
+		goto err;
+	if ((ret = __log_cursor(dbenv, &logc)) != 0)
+		goto err;
+	/*
+	 * If we can read it, set the stable_lsn to the ckp_lsn in the
+	 * checkpoint record.
+	 */
+	if ((ret = __log_c_get(logc, stable_lsn, &rec, DB_SET)) == 0 &&
+	    (ret = __txn_ckp_read(dbenv, rec.data, &ckp_args)) == 0) {
+		*stable_lsn = ckp_args->ckp_lsn;
+		__os_free(dbenv, ckp_args);
+	}
+	if ((t_ret = __log_c_close(logc)) != 0 && ret == 0)
+		ret = t_ret;
+err:
+	return (ret);
+}
+
 /*
  * __log_autoremove --
  *	Delete any non-essential log files.
@@ -286,17 +299,31 @@ void
 __log_autoremove(dbenv)
 	DB_ENV *dbenv;
 {
+	int ret;
 	char **begin, **list;
 
-	if (__log_archive(dbenv, &list, DB_ARCH_ABS) != 0)
+	/*
+	 * Complain if there's an error, but don't return the error to our
+	 * caller.  Auto-remove is done when writing a log record, and we
+	 * don't want to fail a write, which could fail the corresponding
+	 * committing transaction, for a permissions error.
+	 */
+	if ((ret = __log_archive(dbenv, &list, DB_ARCH_ABS)) != 0) {
+		if (ret != DB_NOTFOUND)
+			__db_err(dbenv,
+			    "log file auto-remove: %s", db_strerror(ret));
 		return;
+	}
 
+	/*
+	 * Remove the files.  No error message needed for __os_unlink failure,
+	 * the underlying OS layer has its own error handling.
+	 */
 	if (list != NULL) {
 		for (begin = list; *list != NULL; ++list)
 			(void)__os_unlink(dbenv, *list);
 		__os_ufree(dbenv, begin);
 	}
-	return;
 }
 
 /*
diff --git a/storage/bdb/log/log_compare.c b/storage/bdb/log/log_compare.c
index 97d0367ea8e..d4347b152aa 100644
--- a/storage/bdb/log/log_compare.c
+++ b/storage/bdb/log/log_compare.c
@@ -1,10 +1,10 @@
 /*-
  * See the file LICENSE for redistribution information.
  *
- * Copyright (c) 1996-2004
+ * Copyright (c) 1996-2005
  *	Sleepycat Software.  All rights reserved.
  *
- * $Id: log_compare.c,v 11.8 2004/01/28 03:36:17 bostic Exp $
+ * $Id: log_compare.c,v 12.1 2005/06/16 20:23:12 bostic Exp $
  */
 
 #include "db_config.h"
diff --git a/storage/bdb/log/log_debug.c b/storage/bdb/log/log_debug.c
new file mode 100644
index 00000000000..0484cfef26d
--- /dev/null
+++ b/storage/bdb/log/log_debug.c
@@ -0,0 +1,160 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1999-2005
+ *	Sleepycat Software.  All rights reserved.
+ *
+ * $Id: log_debug.c,v 1.5 2005/10/14 01:17:09 bostic Exp $
+ */
+
+#include "db_config.h"
+
+#ifndef NO_SYSTEM_INCLUDES
+#include 
+
+#include 
+#include 
+#endif
+
+#include "db_int.h"
+#include "dbinc/db_page.h"
+#include "dbinc/db_am.h"
+#include "dbinc/log.h"
+
+static int __log_printf_int __P((DB_ENV *, DB_TXN *, const char *, va_list));
+
+/*
+ * __log_printf_capi --
+ *	Write a printf-style format string into the DB log.
+ *
+ * PUBLIC: int __log_printf_capi __P((DB_ENV *, DB_TXN *, const char *, ...))
+ * PUBLIC:    __attribute__ ((__format__ (__printf__, 3, 4)));
+ */
+int
+#ifdef STDC_HEADERS
+__log_printf_capi(DB_ENV *dbenv, DB_TXN *txnid, const char *fmt, ...)
+#else
+__log_printf_capi(dbenv, txnid, fmt, va_alist)
+	DB_ENV *dbenv;
+	DB_TXN *txnid;
+	const char *fmt;
+	va_dcl
+#endif
+{
+	va_list ap;
+	int ret;
+
+#ifdef STDC_HEADERS
+	va_start(ap, fmt);
+#else
+	va_start(ap);
+#endif
+	ret = __log_printf_pp(dbenv, txnid, fmt, ap);
+	va_end(ap);
+
+	return (ret);
+}
+
+/*
+ * __log_printf_pp --
+ *	Handle the arguments and call an internal routine to do the work.
+ *
+ *	The reason this routine isn't just folded into __log_printf_capi
+ *	is because the C++ API has to call a C API routine, and you can
+ *	only pass variadic arguments to a single routine.
+ *
+ * PUBLIC: int __log_printf_pp
+ * PUBLIC:     __P((DB_ENV *, DB_TXN *, const char *, va_list));
+ */
+int
+__log_printf_pp(dbenv, txnid, fmt, ap)
+	DB_ENV *dbenv;
+	DB_TXN *txnid;
+	const char *fmt;
+	va_list ap;
+{
+	DB_THREAD_INFO *ip;
+	int rep_check, ret, t_ret;
+
+	PANIC_CHECK(dbenv);
+	ENV_REQUIRES_CONFIG(dbenv,
+	    dbenv->lg_handle, "DB_ENV->log_printf", DB_INIT_LOG);
+
+	ENV_ENTER(dbenv, ip);
+	rep_check = IS_ENV_REPLICATED(dbenv) ? 1 : 0;
+	if (rep_check && (ret = __env_rep_enter(dbenv, 0)) != 0)
+		return (ret);
+
+	ret = __log_printf_int(dbenv, txnid, fmt, ap);
+
+	if (rep_check && (t_ret = __env_db_rep_exit(dbenv)) != 0 && (ret) == 0)
+		ret = t_ret;
+	va_end(ap);
+	ENV_LEAVE(dbenv, ip);
+
+	return (ret);
+}
+
+/*
+ * __log_printf --
+ *	Write a printf-style format string into the DB log.
+ *
+ * PUBLIC: int __log_printf __P((DB_ENV *, DB_TXN *, const char *, ...))
+ * PUBLIC:    __attribute__ ((__format__ (__printf__, 3, 4)));
+ */
+int
+#ifdef STDC_HEADERS
+__log_printf(DB_ENV *dbenv, DB_TXN *txnid, const char *fmt, ...)
+#else
+__log_printf(dbenv, txnid, fmt, va_alist)
+	DB_ENV *dbenv;
+	DB_TXN *txnid;
+	const char *fmt;
+	va_dcl
+#endif
+{
+	va_list ap;
+	int ret;
+
+#ifdef STDC_HEADERS
+	va_start(ap, fmt);
+#else
+	va_start(ap);
+#endif
+	ret = __log_printf_int(dbenv, txnid, fmt, ap);
+	va_end(ap);
+
+	return (ret);
+}
+
+/*
+ * __log_printf_int --
+ *	Write a printf-style format string into the DB log (internal).
+ */
+static int
+__log_printf_int(dbenv, txnid, fmt, ap)
+	DB_ENV *dbenv;
+	DB_TXN *txnid;
+	const char *fmt;
+	va_list ap;
+{
+	DBT opdbt, msgdbt;
+	DB_LSN lsn;
+	char __logbuf[2048];	/* !!!: END OF THE STACK DON'T TRUST SPRINTF. */
+
+	if (!DBENV_LOGGING(dbenv)) {
+		__db_err(dbenv, "Logging not currently permitted");
+		return (EAGAIN);
+	}
+
+	memset(&opdbt, 0, sizeof(opdbt));
+	opdbt.data = "DIAGNOSTIC";
+	opdbt.size = sizeof("DIAGNOSTIC") - 1;
+
+	memset(&msgdbt, 0, sizeof(msgdbt));
+	msgdbt.data = __logbuf;
+	msgdbt.size = (u_int32_t)vsnprintf(__logbuf, sizeof(__logbuf), fmt, ap);
+
+	return (__db_debug_log(
+	    dbenv, txnid, &lsn, 0, &opdbt, -1, &msgdbt, NULL, 0));
+}
diff --git a/storage/bdb/log/log_get.c b/storage/bdb/log/log_get.c
index 2e8f09a6d90..ccd1f3c3860 100644
--- a/storage/bdb/log/log_get.c
+++ b/storage/bdb/log/log_get.c
@@ -1,10 +1,10 @@
 /*-
  * See the file LICENSE for redistribution information.
  *
- * Copyright (c) 1996-2004
+ * Copyright (c) 1996-2005
  *	Sleepycat Software.  All rights reserved.
  *
- * $Id: log_get.c,v 11.110 2004/09/17 22:00:31 mjc Exp $
+ * $Id: log_get.c,v 12.16 2005/10/21 17:13:42 bostic Exp $
  */
 
 #include "db_config.h"
@@ -30,7 +30,7 @@ static int __log_c_get_int __P((DB_LOGC *, DB_LSN *, DBT *, u_int32_t));
 static int __log_c_hdrchk __P((DB_LOGC *, DB_LSN *, HDR *, int *));
 static int __log_c_incursor __P((DB_LOGC *, DB_LSN *, HDR *, u_int8_t **));
 static int __log_c_inregion __P((DB_LOGC *,
-	       DB_LSN *, RLOCK *, DB_LSN *, HDR *, u_int8_t **));
+	       DB_LSN *, RLOCK *, DB_LSN *, HDR *, u_int8_t **, int *));
 static int __log_c_io __P((DB_LOGC *,
 	       u_int32_t, u_int32_t, void *, size_t *, int *));
 static int __log_c_ondisk __P((DB_LOGC *,
@@ -50,7 +50,8 @@ __log_cursor_pp(dbenv, logcp, flags)
 	DB_LOGC **logcp;
 	u_int32_t flags;
 {
-	int rep_check, ret;
+	DB_THREAD_INFO *ip;
+	int ret;
 
 	PANIC_CHECK(dbenv);
 	ENV_REQUIRES_CONFIG(dbenv,
@@ -60,12 +61,9 @@ __log_cursor_pp(dbenv, logcp, flags)
 	if ((ret = __db_fchk(dbenv, "DB_ENV->log_cursor", flags, 0)) != 0)
 		return (ret);
 
-	rep_check = IS_ENV_REPLICATED(dbenv) ? 1 : 0;
-	if (rep_check)
-		__env_rep_enter(dbenv);
-	ret = __log_cursor(dbenv, logcp);
-	if (rep_check)
-		__env_db_rep_exit(dbenv);
+	ENV_ENTER(dbenv, ip);
+	REPLICATION_WRAP(dbenv, (__log_cursor(dbenv, logcp)), ret);
+	ENV_LEAVE(dbenv, ip);
 	return (ret);
 }
 
@@ -116,8 +114,9 @@ __log_c_close_pp(logc, flags)
 	DB_LOGC *logc;
 	u_int32_t flags;
 {
+	DB_THREAD_INFO *ip;
 	DB_ENV *dbenv;
-	int rep_check, ret;
+	int ret;
 
 	dbenv = logc->dbenv;
 
@@ -125,12 +124,9 @@ __log_c_close_pp(logc, flags)
 	if ((ret = __db_fchk(dbenv, "DB_LOGC->close", flags, 0)) != 0)
 		return (ret);
 
-	rep_check = IS_ENV_REPLICATED(dbenv) ? 1 : 0;
-	if (rep_check)
-		__env_rep_enter(dbenv);
-	ret = __log_c_close(logc);
-	if (rep_check)
-		__env_db_rep_exit(dbenv);
+	ENV_ENTER(dbenv, ip);
+	REPLICATION_WRAP(dbenv, (__log_c_close(logc)), ret);
+	ENV_LEAVE(dbenv, ip);
 	return (ret);
 }
 
@@ -174,7 +170,8 @@ __log_c_get_pp(logc, alsn, dbt, flags)
 	u_int32_t flags;
 {
 	DB_ENV *dbenv;
-	int rep_check, ret;
+	DB_THREAD_INFO *ip;
+	int ret;
 
 	dbenv = logc->dbenv;
 
@@ -199,12 +196,9 @@ __log_c_get_pp(logc, alsn, dbt, flags)
 		return (__db_ferr(dbenv, "DB_LOGC->get", 1));
 	}
 
-	rep_check = IS_ENV_REPLICATED(dbenv) ? 1 : 0;
-	if (rep_check)
-		__env_rep_enter(dbenv);
-	ret = __log_c_get(logc, alsn, dbt, flags);
-	if (rep_check)
-		__env_db_rep_exit(dbenv);
+	ENV_ENTER(dbenv, ip);
+	REPLICATION_WRAP(dbenv, (__log_c_get(logc, alsn, dbt, flags)), ret);
+	ENV_LEAVE(dbenv, ip);
 	return (ret);
 }
 
@@ -300,9 +294,10 @@ __log_c_get_int(logc, alsn, dbt, flags)
 	logfile_validity status;
 	u_int32_t cnt;
 	u_int8_t *rp;
-	int eof, is_hmac, ret;
+	int eof, is_hmac, need_cksum, ret;
 
 	dbenv = logc->dbenv;
+	db_cipher = dbenv->crypto_handle;
 	dblp = dbenv->lg_handle;
 	lp = dblp->reginfo.primary;
 	is_hmac = 0;
@@ -391,7 +386,7 @@ __log_c_get_int(logc, alsn, dbt, flags)
 	case DB_LAST:				/* Last log record. */
 		if (rlock == L_NONE) {
 			rlock = L_ACQUIRED;
-			R_LOCK(dbenv, &dblp->reginfo);
+			LOG_SYSTEM_LOCK(dbenv);
 		}
 		nlsn.file = lp->lsn.file;
 		nlsn.offset = lp->lsn.offset - lp->len;
@@ -422,7 +417,11 @@ next_file:	++nlsn.file;
 		hdr.size = HDR_NORMAL_SZ;
 		is_hmac = 0;
 	}
-	/* Check to see if the record is in the cursor's buffer. */
+
+	/*
+	 * Check to see if the record is in the cursor's buffer -- if so,
+	 * we'll need to checksum it.
+	 */
 	if ((ret = __log_c_incursor(logc, &nlsn, &hdr, &rp)) != 0)
 		goto err;
 	if (rp != NULL)
@@ -434,7 +433,7 @@ next_file:	++nlsn.file;
 	 * buffer.  Else, check the region's buffer.
 	 *
 	 * If the record isn't in the region's buffer, then either logs are
-	 * in-memory, and we're done, or  we're going to have to read the
+	 * in-memory, and we're done, or we're going to have to read the
 	 * record from disk.  We want to make a point of not reading past the
 	 * end of the logical log (after recovery, there may be data after the
 	 * end of the logical log, not to mention the log file may have been
@@ -449,10 +448,18 @@ next_file:	++nlsn.file;
 		F_CLR(logc, DB_LOG_DISK);
 
 		if ((ret = __log_c_inregion(logc,
-		    &nlsn, &rlock, &last_lsn, &hdr, &rp)) != 0)
+		    &nlsn, &rlock, &last_lsn, &hdr, &rp, &need_cksum)) != 0)
 			goto err;
-		if (rp != NULL)
-			goto cksum;
+		if (rp != NULL) {
+			/*
+			 * If we read the entire record from the in-memory log
+			 * buffer, we don't need to checksum it, nor do we need
+			 * to worry about vtruncate issues.
+			 */
+			if (need_cksum)
+				goto cksum;
+			goto from_memory;
+		}
 		if (lp->db_log_inmemory)
 			goto nohdr;
 	}
@@ -467,7 +474,7 @@ next_file:	++nlsn.file;
 	 */
 	if (rlock == L_ACQUIRED) {
 		rlock = L_NONE;
-		R_UNLOCK(dbenv, &dblp->reginfo);
+		LOG_SYSTEM_UNLOCK(dbenv);
 	}
 	if ((ret = __log_c_ondisk(
 	    logc, &nlsn, &last_lsn, flags, &hdr, &rp, &eof)) != 0)
@@ -485,13 +492,13 @@ next_file:	++nlsn.file;
 
 cksum:	/*
 	 * Discard the region lock if we're still holding it.  (The path to
-	 * get here is that we acquired the lock because of the caller's
-	 * flag argument, but we found the record in the cursor's buffer.
-	 * Improbable, but it's easy to avoid.
+	 * get here is we acquired the region lock because of the caller's
+	 * flag argument, but we found the record in the in-memory or cursor
+	 * buffers.  Improbable, but it's easy to avoid.)
 	 */
 	if (rlock == L_ACQUIRED) {
 		rlock = L_NONE;
-		R_UNLOCK(dbenv, &dblp->reginfo);
+		LOG_SYSTEM_UNLOCK(dbenv);
 	}
 
 	/*
@@ -500,7 +507,6 @@ cksum:	/*
 	 * OK if we're searching for the end of the log, and very, very bad
 	 * if we're reading random log records.
 	 */
-	db_cipher = dbenv->crypto_handle;
 	if ((ret = __db_check_chksum(dbenv, db_cipher,
 	    hdr.chksum, rp + hdr.size, hdr.len - hdr.size, is_hmac)) != 0) {
 		if (F_ISSET(logc, DB_LOG_SILENT_ERR)) {
@@ -550,6 +556,18 @@ nohdr:		switch (flags) {
 		}
 	}
 
+from_memory:
+	/*
+	 * Discard the region lock if we're still holding it.  (The path to
+	 * get here is we acquired the region lock because of the caller's
+	 * flag argument, but we found the record in the in-memory or cursor
+	 * buffers.  Improbable, but it's easy to avoid.)
+	 */
+	if (rlock == L_ACQUIRED) {
+		rlock = L_NONE;
+		LOG_SYSTEM_UNLOCK(dbenv);
+	}
+
 	/* Copy the record into the user's DBT. */
 	if ((ret = __db_retcopy(dbenv, dbt, rp + hdr.size,
 	    (u_int32_t)(hdr.len - hdr.size),
@@ -579,7 +597,7 @@ nohdr:		switch (flags) {
 	logc->c_prev = hdr.prev;
 
 err:	if (rlock == L_ACQUIRED)
-		R_UNLOCK(dbenv, &dblp->reginfo);
+		LOG_SYSTEM_UNLOCK(dbenv);
 
 	return (ret);
 }
@@ -644,12 +662,13 @@ __log_c_incursor(logc, lsn, hdr, pp)
  *	Check to see if the requested record is in the region's buffer.
  */
 static int
-__log_c_inregion(logc, lsn, rlockp, last_lsn, hdr, pp)
+__log_c_inregion(logc, lsn, rlockp, last_lsn, hdr, pp, need_cksump)
 	DB_LOGC *logc;
 	DB_LSN *lsn, *last_lsn;
 	RLOCK *rlockp;
 	HDR *hdr;
 	u_int8_t **pp;
+	int *need_cksump;
 {
 	DB_ENV *dbenv;
 	DB_LOG *dblp;
@@ -666,11 +685,12 @@ __log_c_inregion(logc, lsn, rlockp, last_lsn, hdr, pp)
 	ret = 0;
 	b_region = 0;
 	*pp = NULL;
+	*need_cksump = 0;
 
 	/* If we haven't yet acquired the log region lock, do so. */
 	if (*rlockp == L_NONE) {
 		*rlockp = L_ACQUIRED;
-		R_LOCK(dbenv, &dblp->reginfo);
+		LOG_SYSTEM_LOCK(dbenv);
 	}
 
 	/*
@@ -737,7 +757,7 @@ __log_c_inregion(logc, lsn, rlockp, last_lsn, hdr, pp)
 	 *
 	 * There is one case where the header check can fail: on a scan through
 	 * in-memory logs, when we reach the end of a file we can read an empty
-	 * heady.  In that case, it's safe to return zero, here: it will be
+	 * header.  In that case, it's safe to return zero, here: it will be
 	 * caught in our caller.  Otherwise, the LSN is bogus.  Fail hard.
 	 */
 	if (lp->db_log_inmemory || log_compare(lsn, &lp->f_lsn) > 0) {
@@ -754,7 +774,7 @@ __log_c_inregion(logc, lsn, rlockp, last_lsn, hdr, pp)
 		} else if (lsn->offset + hdr->len > lp->w_off + lp->buffer_size)
 			return (DB_NOTFOUND);
 		if (logc->bp_size <= hdr->len) {
-			len = (size_t)DB_ALIGN(hdr->len * 2, 128);
+			len = (size_t)DB_ALIGN((uintmax_t)hdr->len * 2, 128);
 			if ((ret =
 			    __os_realloc(logc->dbenv, len, &logc->bp)) != 0)
 				 return (ret);
@@ -803,7 +823,7 @@ __log_c_inregion(logc, lsn, rlockp, last_lsn, hdr, pp)
 	 * of waiting.
 	 */
 	if (logc->bp_size <= b_region + b_disk) {
-		len = (size_t)DB_ALIGN((b_region + b_disk) * 2, 128);
+		len = (size_t)DB_ALIGN((uintmax_t)(b_region + b_disk) * 2, 128);
 		if ((ret = __os_realloc(logc->dbenv, len, &logc->bp)) != 0)
 			return (ret);
 		logc->bp_size = (u_int32_t)len;
@@ -816,7 +836,7 @@ __log_c_inregion(logc, lsn, rlockp, last_lsn, hdr, pp)
 	/* Release the region lock. */
 	if (*rlockp == L_ACQUIRED) {
 		*rlockp = L_NONE;
-		R_UNLOCK(dbenv, &dblp->reginfo);
+		LOG_SYSTEM_UNLOCK(dbenv);
 	}
 
 	/*
@@ -831,6 +851,9 @@ __log_c_inregion(logc, lsn, rlockp, last_lsn, hdr, pp)
 			return (ret);
 		if (nr < b_disk)
 			return (__log_c_shortread(logc, lsn, 0));
+
+		/* We read bytes from the disk, we'll need to checksum them. */
+		*need_cksump = 1;
 	}
 
 	/* Copy the header information into the caller's structure. */
@@ -898,7 +921,7 @@ __log_c_ondisk(logc, lsn, last_lsn, flags, hdr, pp, eofp)
 	 * Make sure we have enough space.
 	 */
 	if (logc->bp_size <= hdr->len) {
-		len = (size_t)DB_ALIGN(hdr->len * 2, 128);
+		len = (size_t)DB_ALIGN((uintmax_t)hdr->len * 2, 128);
 		if ((ret = __os_realloc(dbenv, len, &logc->bp)) != 0)
 			return (ret);
 		logc->bp_size = (u_int32_t)len;
@@ -1041,11 +1064,13 @@ __log_c_io(logc, fnum, offset, p, nrp, eofp)
 {
 	DB_ENV *dbenv;
 	DB_LOG *dblp;
+	LOG *lp;
 	int ret;
 	char *np;
 
 	dbenv = logc->dbenv;
 	dblp = dbenv->lg_handle;
+	lp = dblp->reginfo.primary;
 
 	/*
 	 * If we've switched files, discard the current file handle and acquire
@@ -1098,6 +1123,7 @@ __log_c_io(logc, fnum, offset, p, nrp, eofp)
 	}
 
 	/* Read the data. */
+	++lp->stat.st_rcount;
 	if ((ret = __os_read(dbenv, logc->c_fhp, p, *nrp, nrp)) != 0) {
 		if (!F_ISSET(logc, DB_LOG_SILENT_ERR))
 			__db_err(dbenv,
@@ -1178,3 +1204,134 @@ __log_c_set_maxrec(logc, np)
 
 	return (0);
 }
+
+#ifdef HAVE_REPLICATION
+/*
+ * __log_rep_split --
+ *	- Split a log buffer into individual records.
+ *
+ * This is used by a replication client to process a bulk log message from the
+ * master and convert it into individual __rep_apply requests.
+ *
+ * PUBLIC: int __log_rep_split __P((DB_ENV *, REP_CONTROL *, DBT *, DB_LSN *));
+ */
+int
+__log_rep_split(dbenv, rp, rec, ret_lsnp)
+	DB_ENV *dbenv;
+	REP_CONTROL *rp;
+	DBT *rec;
+	DB_LSN *ret_lsnp;
+{
+	DB_LSN save_lsn, tmp_lsn;
+	DB_REP *db_rep;
+	DBT logrec;
+	REP *rep;
+	REP_CONTROL tmprp;
+	u_int32_t len;
+	int is_dup, is_perm, ret, save_ret;
+	u_int8_t *p, *ep;
+#ifdef DIAGNOSTIC
+	DB_MSGBUF mb;
+#endif
+
+	memset(&logrec, 0, sizeof(logrec));
+	memset(&save_lsn, 0, sizeof(save_lsn));
+	memset(&tmp_lsn, 0, sizeof(tmp_lsn));
+	/*
+	 * We're going to be modifying the rp LSN contents so make
+	 * our own private copy to play with.
+	 */
+	memcpy(&tmprp, rp, sizeof(tmprp));
+	/*
+	 * We send the bulk buffer on a PERM record, so often we will have
+	 * DB_LOG_PERM set.  However, we only want to mark the last LSN
+	 * we have as a PERM record.  So clear it here, and when we're on
+	 * the last record below, set it.
+	 */
+	is_perm = F_ISSET(rp, DB_LOG_PERM);
+	F_CLR(&tmprp, DB_LOG_PERM);
+	ret = save_ret = 0;
+	db_rep = dbenv->rep_handle;
+	rep = db_rep->region;
+	for (ep = (u_int8_t *)rec->data + rec->size, p = (u_int8_t *)rec->data;
+	    p < ep; ) {
+		/*
+		 * First thing in the buffer is the length.  Then the LSN
+		 * of this record, then the record itself.
+		 */
+		/*
+		 * XXX
+		 * If/when we add architecture neutral log files we may want
+		 * to send/receive these lengths in network byte order.
+		 */
+		memcpy(&len, p, sizeof(len));
+		p += sizeof(len);
+		memcpy(&tmprp.lsn, p, sizeof(DB_LSN));
+		p += sizeof(DB_LSN);
+		logrec.data = p;
+		logrec.size = len;
+		RPRINT(dbenv, rep, (dbenv, &mb,
+		    "log_rep_split: Processing LSN [%lu][%lu]",
+		    (u_long)tmprp.lsn.file, (u_long)tmprp.lsn.offset));
+		RPRINT(dbenv, rep, (dbenv, &mb,
+    "log_rep_split: p %#lx ep %#lx logrec data %#lx, size %lu (%#lx)",
+		    P_TO_ULONG(p), P_TO_ULONG(ep), P_TO_ULONG(logrec.data),
+		    (u_long)logrec.size, (u_long)logrec.size));
+		is_dup = 0;
+		p += len;
+		if (p >= ep && is_perm)
+			F_SET(&tmprp, DB_LOG_PERM);
+		ret = __rep_apply(dbenv, &tmprp, &logrec, &tmp_lsn, &is_dup);
+		RPRINT(dbenv, rep, (dbenv, &mb,
+		    "log_split: rep_apply ret %d, tmp_lsn [%lu][%lu]",
+		    ret, (u_long)tmp_lsn.file, (u_long)tmp_lsn.offset));
+#if 0
+		/*
+		 * This buffer may be old and we've already gotten these
+		 * records.  Short-circuit processing this buffer.
+		 */
+		if (is_dup)
+			goto out;
+#endif
+		switch (ret) {
+		/*
+		 * If we received the pieces we need for running recovery,
+		 * short-circuit because recovery will truncate the log to
+		 * the LSN we want anyway.
+		 */
+		case DB_REP_LOGREADY:
+			goto out;
+		/*
+		 * If we just handled a special record, retain that information.
+		 */
+		case DB_REP_ISPERM:
+		case DB_REP_NOTPERM:
+		case DB_REP_STARTUPDONE:
+			save_ret = ret;
+			save_lsn = tmp_lsn;
+			ret = 0;
+			break;
+		/*
+		 * Normal processing, do nothing, just continue.
+		 */
+		case 0:
+			break;
+		/*
+		 * If we get an error, then stop immediately.
+		 */
+		default:
+			goto out;
+		}
+	}
+out:
+	/*
+	 * If we finish processing successfully, set our return values
+	 * based on what we saw.
+	 */
+	if (ret == 0) {
+		ret = save_ret;
+		*ret_lsnp = save_lsn;
+	}
+	return (ret);
+}
+#endif
diff --git a/storage/bdb/log/log_method.c b/storage/bdb/log/log_method.c
index 1565a53a077..bc37722655d 100644
--- a/storage/bdb/log/log_method.c
+++ b/storage/bdb/log/log_method.c
@@ -1,10 +1,10 @@
 /*-
  * See the file LICENSE for redistribution information.
  *
- * Copyright (c) 1999-2004
+ * Copyright (c) 1999-2005
  *	Sleepycat Software.  All rights reserved.
  *
- * $Id: log_method.c,v 11.50 2004/09/22 16:26:15 bostic Exp $
+ * $Id: log_method.c,v 12.4 2005/07/21 18:21:25 bostic Exp $
  */
 
 #include "db_config.h"
@@ -12,30 +12,13 @@
 #ifndef NO_SYSTEM_INCLUDES
 #include 
 
-#ifdef HAVE_RPC
-#include 
-#endif
-
 #include 
 #include 
 #endif
 
-#ifdef HAVE_RPC
-#include "db_server.h"
-#endif
-
 #include "db_int.h"
 #include "dbinc/log.h"
 
-#ifdef HAVE_RPC
-#include "dbinc_auto/rpc_client_ext.h"
-#endif
-
-static int __log_get_lg_bsize __P((DB_ENV *, u_int32_t *));
-static int __log_get_lg_dir __P((DB_ENV *, const char **));
-static int __log_get_lg_max __P((DB_ENV *, u_int32_t *));
-static int __log_get_lg_regionmax __P((DB_ENV *, u_int32_t *));
-
 /*
  * __log_dbenv_create --
  *	Log specific initialization of the DB_ENV structure.
@@ -52,55 +35,14 @@ __log_dbenv_create(dbenv)
 	 * state or turn off mutex locking, and so we can neither check
 	 * the panic state or acquire a mutex in the DB_ENV create path.
 	 */
-
 	dbenv->lg_bsize = 0;
 	dbenv->lg_regionmax = LG_BASE_REGION_SIZE;
-
-#ifdef	HAVE_RPC
-	/*
-	 * If we have a client, overwrite what we just setup to
-	 * point to client functions.
-	 */
-	if (F_ISSET(dbenv, DB_ENV_RPCCLIENT)) {
-		dbenv->get_lg_bsize = __dbcl_get_lg_bsize;
-		dbenv->set_lg_bsize = __dbcl_set_lg_bsize;
-		dbenv->get_lg_dir = __dbcl_get_lg_dir;
-		dbenv->set_lg_dir = __dbcl_set_lg_dir;
-		dbenv->get_lg_max = __dbcl_get_lg_max;
-		dbenv->set_lg_max = __dbcl_set_lg_max;
-		dbenv->get_lg_regionmax = __dbcl_get_lg_regionmax;
-		dbenv->set_lg_regionmax = __dbcl_set_lg_regionmax;
-
-		dbenv->log_archive = __dbcl_log_archive;
-		dbenv->log_cursor = __dbcl_log_cursor;
-		dbenv->log_file = __dbcl_log_file;
-		dbenv->log_flush = __dbcl_log_flush;
-		dbenv->log_put = __dbcl_log_put;
-		dbenv->log_stat = __dbcl_log_stat;
-		dbenv->log_stat_print = NULL;
-	} else
-#endif
-	{
-		dbenv->get_lg_bsize = __log_get_lg_bsize;
-		dbenv->set_lg_bsize = __log_set_lg_bsize;
-		dbenv->get_lg_dir = __log_get_lg_dir;
-		dbenv->set_lg_dir = __log_set_lg_dir;
-		dbenv->get_lg_max = __log_get_lg_max;
-		dbenv->set_lg_max = __log_set_lg_max;
-		dbenv->get_lg_regionmax = __log_get_lg_regionmax;
-		dbenv->set_lg_regionmax = __log_set_lg_regionmax;
-
-		dbenv->log_archive = __log_archive_pp;
-		dbenv->log_cursor = __log_cursor_pp;
-		dbenv->log_file = __log_file_pp;
-		dbenv->log_flush = __log_flush_pp;
-		dbenv->log_put = __log_put_pp;
-		dbenv->log_stat = __log_stat_pp;
-		dbenv->log_stat_print = __log_stat_print_pp;
-	}
 }
 
-static int
+/*
+ * PUBLIC: int __log_get_lg_bsize __P((DB_ENV *, u_int32_t *));
+ */
+int
 __log_get_lg_bsize(dbenv, lg_bsizep)
 	DB_ENV *dbenv;
 	u_int32_t *lg_bsizep;
@@ -134,7 +76,63 @@ __log_set_lg_bsize(dbenv, lg_bsize)
 	return (0);
 }
 
-static int
+/*
+ * PUBLIC: int __log_get_lg_filemode __P((DB_ENV *, int *));
+ */
+int
+__log_get_lg_filemode(dbenv, lg_modep)
+	DB_ENV *dbenv;
+	int *lg_modep;
+{
+	DB_LOG *dblp;
+
+	ENV_NOT_CONFIGURED(dbenv,
+	    dbenv->lg_handle, "DB_ENV->get_lg_filemode", DB_INIT_LOG);
+
+	if (LOGGING_ON(dbenv)) {
+		dblp = dbenv->lg_handle;
+		LOG_SYSTEM_LOCK(dbenv);
+		*lg_modep = ((LOG *)dblp->reginfo.primary)->filemode;
+		LOG_SYSTEM_UNLOCK(dbenv);
+	} else
+		*lg_modep = dbenv->lg_filemode;
+
+	return (0);
+}
+
+/*
+ * __log_set_lg_filemode --
+ *	DB_ENV->set_lg_filemode.
+ *
+ * PUBLIC: int __log_set_lg_filemode __P((DB_ENV *, int));
+ */
+int
+__log_set_lg_filemode(dbenv, lg_mode)
+	DB_ENV *dbenv;
+	int lg_mode;
+{
+	DB_LOG *dblp;
+	LOG *lp;
+
+	ENV_NOT_CONFIGURED(dbenv,
+	    dbenv->lg_handle, "DB_ENV->set_lg_filemode", DB_INIT_LOG);
+
+	if (LOGGING_ON(dbenv)) {
+		dblp = dbenv->lg_handle;
+		lp = dblp->reginfo.primary;
+		LOG_SYSTEM_LOCK(dbenv);
+		lp->filemode = lg_mode;
+		LOG_SYSTEM_UNLOCK(dbenv);
+	} else
+		dbenv->lg_filemode = lg_mode;
+
+	return (0);
+}
+
+/*
+ * PUBLIC: int __log_get_lg_max __P((DB_ENV *, u_int32_t *));
+ */
+int
 __log_get_lg_max(dbenv, lg_maxp)
 	DB_ENV *dbenv;
 	u_int32_t *lg_maxp;
@@ -146,9 +144,9 @@ __log_get_lg_max(dbenv, lg_maxp)
 
 	if (LOGGING_ON(dbenv)) {
 		dblp = dbenv->lg_handle;
-		R_LOCK(dbenv, &dblp->reginfo);
+		LOG_SYSTEM_LOCK(dbenv);
 		*lg_maxp = ((LOG *)dblp->reginfo.primary)->log_nsize;
-		R_UNLOCK(dbenv, &dblp->reginfo);
+		LOG_SYSTEM_UNLOCK(dbenv);
 	} else
 		*lg_maxp = dbenv->lg_size;
 
@@ -178,16 +176,19 @@ __log_set_lg_max(dbenv, lg_max)
 			return (ret);
 		dblp = dbenv->lg_handle;
 		lp = dblp->reginfo.primary;
-		R_LOCK(dbenv, &dblp->reginfo);
+		LOG_SYSTEM_LOCK(dbenv);
 		lp->log_nsize = lg_max;
-		R_UNLOCK(dbenv, &dblp->reginfo);
+		LOG_SYSTEM_UNLOCK(dbenv);
 	} else
 		dbenv->lg_size = lg_max;
 
 	return (0);
 }
 
-static int
+/*
+ * PUBLIC: int __log_get_lg_regionmax __P((DB_ENV *, u_int32_t *));
+ */
+int
 __log_get_lg_regionmax(dbenv, lg_regionmaxp)
 	DB_ENV *dbenv;
 	u_int32_t *lg_regionmaxp;
@@ -228,7 +229,10 @@ __log_set_lg_regionmax(dbenv, lg_regionmax)
 	return (0);
 }
 
-static int
+/*
+ * PUBLIC: int __log_get_lg_dir __P((DB_ENV *, const char **));
+ */
+int
 __log_get_lg_dir(dbenv, dirp)
 	DB_ENV *dbenv;
 	const char **dirp;
diff --git a/storage/bdb/log/log_put.c b/storage/bdb/log/log_put.c
index 86deffe8641..2bd128d6190 100644
--- a/storage/bdb/log/log_put.c
+++ b/storage/bdb/log/log_put.c
@@ -1,10 +1,10 @@
 /*-
  * See the file LICENSE for redistribution information.
  *
- * Copyright (c) 1996-2004
+ * Copyright (c) 1996-2005
  *	Sleepycat Software.  All rights reserved.
  *
- * $Id: log_put.c,v 11.168 2004/10/15 16:59:42 bostic Exp $
+ * $Id: log_put.c,v 12.22 2005/10/31 02:22:30 bostic Exp $
  */
 
 #include "db_config.h"
@@ -57,7 +57,8 @@ __log_put_pp(dbenv, lsnp, udbt, flags)
 	const DBT *udbt;
 	u_int32_t flags;
 {
-	int rep_check, ret;
+	DB_THREAD_INFO *ip;
+	int ret;
 
 	PANIC_CHECK(dbenv);
 	ENV_REQUIRES_CONFIG(dbenv,
@@ -80,12 +81,9 @@ __log_put_pp(dbenv, lsnp, udbt, flags)
 		return (EINVAL);
 	}
 
-	rep_check = IS_ENV_REPLICATED(dbenv) ? 1 : 0;
-	if (rep_check)
-		__env_rep_enter(dbenv);
-	ret = __log_put(dbenv, lsnp, udbt, flags);
-	if (rep_check)
-		__env_db_rep_exit(dbenv);
+	ENV_ENTER(dbenv, ip);
+	REPLICATION_WRAP(dbenv, (__log_put(dbenv, lsnp, udbt, flags)), ret);
+	ENV_LEAVE(dbenv, ip);
 	return (ret);
 }
 
@@ -106,19 +104,39 @@ __log_put(dbenv, lsnp, udbt, flags)
 	DBT *dbt, t;
 	DB_LOG *dblp;
 	DB_LSN lsn, old_lsn;
+	DB_REP *db_rep;
 	HDR hdr;
 	LOG *lp;
+	REP *rep;
+	REP_BULK bulk;
 	int lock_held, need_free, ret;
 	u_int8_t *key;
 
 	dblp = dbenv->lg_handle;
 	lp = dblp->reginfo.primary;
 	db_cipher = dbenv->crypto_handle;
+	db_rep = dbenv->rep_handle;
+	if (db_rep != NULL)
+		rep = db_rep->region;
+	else
+		rep = NULL;
+
 	dbt = &t;
 	t = *udbt;
 	lock_held = need_free = 0;
 	ZERO_LSN(old_lsn);
 
+	/*
+	 * If we are not a rep application, but are sharing a master rep env,
+	 * we should not be writing log records.
+	 */
+	if (IS_REP_MASTER(dbenv) && dbenv->rep_send == NULL) {
+		__db_err(dbenv, "%s %s",
+		    "Non-replication DB_ENV handle attempting",
+		    "to modify a replicated environment");
+		return (EINVAL);
+	}
+
 	/*
 	 * If we are coming from the logging code, we use an internal flag,
 	 * DB_LOG_NOCOPY, because we know we can overwrite/encrypt the log
@@ -143,29 +161,16 @@ __log_put(dbenv, lsnp, udbt, flags)
 		key = db_cipher->mac_key;
 	else
 		key = NULL;
-	/* Otherwise, we actually have a record to put.  Put it. */
 
 	/* Before we grab the region lock, calculate the record's checksum. */
 	__db_chksum(dbt->data, dbt->size, key, hdr.chksum);
 
-	R_LOCK(dbenv, &dblp->reginfo);
+	LOG_SYSTEM_LOCK(dbenv);
 	lock_held = 1;
 
 	if ((ret = __log_put_next(dbenv, &lsn, dbt, &hdr, &old_lsn)) != 0)
 		goto panic_check;
 
-	/*
-	 * If we are not a rep application, but are sharing a master rep env,
-	 * we should not be writing log records.
-	 */
-	if (IS_REP_MASTER(dbenv) && dbenv->rep_send == NULL) {
-		__db_err(dbenv, "%s %s",
-		    "Non-replication DB_ENV handle attempting",
-		    "to modify a replicated environment");
-		ret = EINVAL;
-		goto err;
-	}
-
 	/*
 	 * Assign the return LSN before dropping the region lock.  Necessary
 	 * in case the lsn is a begin_lsn from a TXN_DETAIL structure passed
@@ -178,7 +183,7 @@ __log_put(dbenv, lsnp, udbt, flags)
 		 * Replication masters need to drop the lock to send messages,
 		 * but want to drop and reacquire it a minimal number of times.
 		 */
-		R_UNLOCK(dbenv, &dblp->reginfo);
+		LOG_SYSTEM_UNLOCK(dbenv);
 		lock_held = 0;
 
 		/*
@@ -192,26 +197,53 @@ __log_put(dbenv, lsnp, udbt, flags)
 		 * want to return failure.
 		 */
 		if (!IS_ZERO_LSN(old_lsn))
-			(void)__rep_send_message(dbenv,
-			    DB_EID_BROADCAST, REP_NEWFILE, &old_lsn, NULL, 0);
+			(void)__rep_send_message(dbenv, DB_EID_BROADCAST,
+			    REP_NEWFILE, &old_lsn, NULL, 0, 0);
 
 		/*
-		 * Then send the log record itself on to our clients.
-		 *
+		 * If we're doing bulk processing put it in the bulk buffer.
+		 */
+		ret = 0;
+		if (FLD_ISSET(rep->config, REP_C_BULK)) {
+			/*
+			 * Bulk could have been turned on by another process.
+			 * If so, set the address into the bulk region now.
+			 */
+			if (db_rep->bulk == NULL)
+				db_rep->bulk = R_ADDR(&dblp->reginfo,
+				    lp->bulk_buf);
+			memset(&bulk, 0, sizeof(bulk));
+			bulk.addr = db_rep->bulk;
+			bulk.offp = &lp->bulk_off;
+			bulk.len = lp->bulk_len;
+			bulk.type = REP_BULK_LOG;
+			bulk.eid = DB_EID_BROADCAST;
+			bulk.flagsp = &lp->bulk_flags;
+			ret = __rep_bulk_message(dbenv, &bulk, NULL,
+			    &lsn, udbt, flags);
+		}
+		if (!FLD_ISSET(rep->config, REP_C_BULK) ||
+		    ret == DB_REP_BULKOVF) {
+			/*
+			 * Then send the log record itself on to our clients.
+			 */
+			/*
+			 * !!!
+			 * In the crypto case, we MUST send the udbt, not the
+			 * now-encrypted dbt.  Clients have no way to decrypt
+			 * without the header.
+			 */
+			ret = __rep_send_message(dbenv, DB_EID_BROADCAST,
+			    REP_LOG, &lsn, udbt, flags, 0);
+		}
+		/*
 		 * If the send fails and we're a commit or checkpoint,
 		 * there's nothing we can do;  the record's in the log.
-		 * Flush it, even if we're running with TXN_NOSYNC, on the
-		 * grounds that it should be in durable form somewhere.
+		 * Flush it, even if we're running with TXN_NOSYNC,
+		 * on the grounds that it should be in durable
+		 * form somewhere.
 		 */
-		/*
-		 * !!!
-		 * In the crypto case, we MUST send the udbt, not the
-		 * now-encrypted dbt.  Clients have no way to decrypt
-		 * without the header.
-		 */
-		if ((__rep_send_message(dbenv,
-		    DB_EID_BROADCAST, REP_LOG, &lsn, udbt, flags) != 0) &&
-		    LF_ISSET(DB_LOG_PERM))
+		if (ret != 0 && LF_ISSET(DB_LOG_PERM))
 			LF_SET(DB_FLUSH);
 	}
 
@@ -225,7 +257,7 @@ __log_put(dbenv, lsnp, udbt, flags)
 	 */
 	if (LF_ISSET(DB_FLUSH | DB_LOG_WRNOSYNC)) {
 		if (!lock_held) {
-			R_LOCK(dbenv, &dblp->reginfo);
+			LOG_SYSTEM_LOCK(dbenv);
 			lock_held = 1;
 		}
 		if ((ret = __log_flush_commit(dbenv, &lsn, flags)) != 0)
@@ -239,6 +271,9 @@ __log_put(dbenv, lsnp, udbt, flags)
 	if (LF_ISSET(DB_LOG_CHKPNT))
 		lp->stat.st_wc_bytes = lp->stat.st_wc_mbytes = 0;
 
+	/* Increment count of records added to the log. */
+	++lp->stat.st_record;
+
 	if (0) {
 panic_check:	/*
 		 * Writing log records cannot fail if we're a replication
@@ -252,7 +287,7 @@ panic_check:	/*
 	}
 
 err:	if (lock_held)
-		R_UNLOCK(dbenv, &dblp->reginfo);
+		LOG_SYSTEM_UNLOCK(dbenv);
 	if (need_free)
 		__os_free(dbenv, dbt->data);
 
@@ -267,13 +302,14 @@ err:	if (lock_held)
 }
 
 /*
- * __log_txn_lsn --
+ * __log_current_lsn --
+ *	Return the current LSN.
  *
- * PUBLIC: void __log_txn_lsn
+ * PUBLIC: int __log_current_lsn
  * PUBLIC:     __P((DB_ENV *, DB_LSN *, u_int32_t *, u_int32_t *));
  */
-void
-__log_txn_lsn(dbenv, lsnp, mbytesp, bytesp)
+int
+__log_current_lsn(dbenv, lsnp, mbytesp, bytesp)
 	DB_ENV *dbenv;
 	DB_LSN *lsnp;
 	u_int32_t *mbytesp, *bytesp;
@@ -284,14 +320,15 @@ __log_txn_lsn(dbenv, lsnp, mbytesp, bytesp)
 	dblp = dbenv->lg_handle;
 	lp = dblp->reginfo.primary;
 
-	R_LOCK(dbenv, &dblp->reginfo);
+	LOG_SYSTEM_LOCK(dbenv);
 
 	/*
 	 * We are trying to get the LSN of the last entry in the log.  We use
-	 * this in two places: 1) DB_ENV->txn_checkpoint uses it as a first
+	 * this in three places: 1) DB_ENV->txn_checkpoint uses it as a first
 	 * value when trying to compute an LSN such that all transactions begun
 	 * before it are complete.   2) DB_ENV->txn_begin uses it as the
-	 * begin_lsn.
+	 * begin_lsn. 3) While opening a file to see if we've gotten rid of
+	 * too many log files.
 	 *
 	 * Typically, it's easy to get the last written LSN, you simply look
 	 * at the current log pointer and back up the number of bytes of the
@@ -315,7 +352,9 @@ __log_txn_lsn(dbenv, lsnp, mbytesp, bytesp)
 		*bytesp = (u_int32_t)(lp->stat.st_wc_bytes + lp->b_off);
 	}
 
-	R_UNLOCK(dbenv, &dblp->reginfo);
+	LOG_SYSTEM_UNLOCK(dbenv);
+
+	return (0);
 }
 
 /*
@@ -443,11 +482,11 @@ __log_flush_commit(dbenv, lsnp, flags)
 	 * Else, make sure that the commit record does not get out after we
 	 * abort the transaction.  Do this by overwriting the commit record
 	 * in the buffer.  (Note that other commits in this buffer will wait
-	 * wait until a successful write happens, we do not wake them.)  We
-	 * point at the right part of the buffer and write an abort record
-	 * over the commit.  We must then try and flush the buffer again,
-	 * since the interesting part of the buffer may have actually made
-	 * it out to disk before there was a failure, we can't know for sure.
+	 * until a successful write happens, we do not wake them.)  We point
+	 * at the right part of the buffer and write an abort record over the
+	 * commit.  We must then try and flush the buffer again, since the
+	 * interesting part of the buffer may have actually made it out to
+	 * disk before there was a failure, we can't know for sure.
 	 */
 	if (__txn_force_abort(dbenv,
 	    dblp->bufp + flush_lsn.offset - lp->w_off) == 0)
@@ -483,10 +522,11 @@ __log_newfile(dblp, lsnp, logfile)
 	dbenv = dblp->dbenv;
 	lp = dblp->reginfo.primary;
 
-	DB_ASSERT(logfile == 0 || logfile > lp->lsn.file);
-
-	/* If we're not at the beginning of a file already, start a new one. */
-	if (lp->lsn.offset != 0) {
+	/*
+	 * If we're not specifying a specific log file number and we're
+	 * not at the beginning of a file already, start a new one.
+	 */
+	if (logfile == 0 && lp->lsn.offset != 0) {
 		/*
 		 * Flush the log so this file is out and can be closed.  We
 		 * cannot release the region lock here because we need to
@@ -526,6 +566,7 @@ __log_newfile(dblp, lsnp, logfile)
 	if (logfile != 0) {
 		lp->lsn.file = logfile;
 		lp->lsn.offset = 0;
+		lp->w_off = 0;
 		if ((ret = __log_newfh(dblp, 1)) != 0)
 			return (ret);
 	}
@@ -690,22 +731,32 @@ __log_flush_pp(dbenv, lsn)
 	DB_ENV *dbenv;
 	const DB_LSN *lsn;
 {
-	int rep_check, ret;
+	DB_THREAD_INFO *ip;
+	int ret;
 
 	PANIC_CHECK(dbenv);
 	ENV_REQUIRES_CONFIG(dbenv,
 	    dbenv->lg_handle, "DB_ENV->log_flush", DB_INIT_LOG);
 
-	rep_check = IS_ENV_REPLICATED(dbenv) ? 1 : 0;
-	if (rep_check)
-		__env_rep_enter(dbenv);
-	ret = __log_flush(dbenv, lsn);
-	if (rep_check)
-		__env_db_rep_exit(dbenv);
-
+	ENV_ENTER(dbenv, ip);
+	REPLICATION_WRAP(dbenv, (__log_flush(dbenv, lsn)), ret);
+	ENV_LEAVE(dbenv, ip);
 	return (ret);
 }
 
+/*
+ * See if we need to wait.  s_lsn is not locked so some care is needed.
+ * The sync point can only move forward.  The lsnp->file cannot be
+ * greater than the s_lsn.file.  If the file we want is in the past
+ * we are done.  If the file numbers are the same check the offset.
+ * This all assumes we can read an 32-bit quantity in one state or
+ * the other, not in transition.
+ */
+#define	ALREADY_FLUSHED(lp, lsnp)				\
+	(((lp)->s_lsn.file > (lsnp)->file) ||		\
+	((lp)->s_lsn.file == (lsnp)->file &&		\
+	    (lp)->s_lsn.offset > (lsnp)->offset))
+
 /*
  * __log_flush --
  *	DB_ENV->log_flush
@@ -718,12 +769,16 @@ __log_flush(dbenv, lsn)
 	const DB_LSN *lsn;
 {
 	DB_LOG *dblp;
+	LOG *lp;
 	int ret;
 
 	dblp = dbenv->lg_handle;
-	R_LOCK(dbenv, &dblp->reginfo);
+	lp = dblp->reginfo.primary;
+	if (lsn != NULL && ALREADY_FLUSHED(lp, lsn))
+		return (0);
+	LOG_SYSTEM_LOCK(dbenv);
 	ret = __log_flush_int(dblp, lsn, 1);
-	R_UNLOCK(dbenv, &dblp->reginfo);
+	LOG_SYSTEM_UNLOCK(dbenv);
 	return (ret);
 }
 
@@ -743,7 +798,6 @@ __log_flush_int(dblp, lsnp, release)
 	struct __db_commit *commit;
 	DB_ENV *dbenv;
 	DB_LSN flush_lsn, f_lsn;
-	DB_MUTEX *flush_mutexp;
 	LOG *lp;
 	size_t b_off;
 	u_int32_t ncommit, w_off;
@@ -751,7 +805,6 @@ __log_flush_int(dblp, lsnp, release)
 
 	dbenv = dblp->dbenv;
 	lp = dblp->reginfo.primary;
-	flush_mutexp = R_ADDR(&dblp->reginfo, lp->flush_mutex_off);
 	ncommit = 0;
 	ret = 0;
 
@@ -782,22 +835,8 @@ __log_flush_int(dblp, lsnp, release)
 		    "from another environment");
 		return (__db_panic(dbenv, DB_RUNRECOVERY));
 	} else {
-		/*
-		 * See if we need to wait.  s_lsn is not locked so some
-		 * care is needed.  The sync point can only move forward.
-		 * The lsnp->file cannot be greater than the s_lsn.file.
-		 * If the file we want is in the past we are done.
-		 * If the file numbers are the same check the offset.
-		 * This all assumes we can read an integer in one
-		 * state or the other, not in transition.
-		 */
-		if (lp->s_lsn.file > lsnp->file)
+		if (ALREADY_FLUSHED(lp, lsnp))
 			return (0);
-
-		if (lp->s_lsn.file == lsnp->file &&
-		    lp->s_lsn.offset > lsnp->offset)
-			return (0);
-
 		flush_lsn = *lsnp;
 	}
 
@@ -809,17 +848,15 @@ __log_flush_int(dblp, lsnp, release)
 		if ((commit = SH_TAILQ_FIRST(
 		    &lp->free_commits, __db_commit)) == NULL) {
 			if ((ret = __db_shalloc(&dblp->reginfo,
-			    sizeof(struct __db_commit),
-			    MUTEX_ALIGN, &commit)) != 0)
+			    sizeof(struct __db_commit), 0, &commit)) != 0)
 				goto flush;
 			memset(commit, 0, sizeof(*commit));
-			if ((ret = __db_mutex_setup(dbenv, &dblp->reginfo,
-			    &commit->mutex, MUTEX_SELF_BLOCK |
-			    MUTEX_NO_RLOCK)) != 0) {
+			if ((ret = __mutex_alloc(dbenv, MTX_TXN_COMMIT,
+			    DB_MUTEX_SELF_BLOCK, &commit->mtx_txnwait)) != 0) {
 				__db_shalloc_free(&dblp->reginfo, commit);
 				return (ret);
 			}
-			MUTEX_LOCK(dbenv, &commit->mutex);
+			MUTEX_LOCK(dbenv, commit->mtx_txnwait);
 		} else
 			SH_TAILQ_REMOVE(
 			    &lp->free_commits, commit, links, __db_commit);
@@ -836,10 +873,10 @@ __log_flush_int(dblp, lsnp, release)
 		commit->lsn = flush_lsn;
 		SH_TAILQ_INSERT_HEAD(
 		    &lp->commits, commit, links, __db_commit);
-		R_UNLOCK(dbenv, &dblp->reginfo);
+		LOG_SYSTEM_UNLOCK(dbenv);
 		/* Wait here for the in-progress flush to finish. */
-		MUTEX_LOCK(dbenv, &commit->mutex);
-		R_LOCK(dbenv, &dblp->reginfo);
+		MUTEX_LOCK(dbenv, commit->mtx_txnwait);
+		LOG_SYSTEM_LOCK(dbenv);
 
 		lp->ncommit--;
 		/*
@@ -862,7 +899,7 @@ __log_flush_int(dblp, lsnp, release)
 	 * Protect flushing with its own mutex so we can release
 	 * the region lock except during file switches.
 	 */
-flush:	MUTEX_LOCK(dbenv, flush_mutexp);
+flush:	MUTEX_LOCK(dbenv, lp->mtx_flush);
 
 	/*
 	 * If the LSN is less than or equal to the last-sync'd LSN, we're done.
@@ -873,7 +910,7 @@ flush:	MUTEX_LOCK(dbenv, flush_mutexp);
 	if (flush_lsn.file < lp->s_lsn.file ||
 	    (flush_lsn.file == lp->s_lsn.file &&
 	    flush_lsn.offset < lp->s_lsn.offset)) {
-		MUTEX_UNLOCK(dbenv, flush_mutexp);
+		MUTEX_UNLOCK(dbenv, lp->mtx_flush);
 		goto done;
 	}
 
@@ -889,14 +926,14 @@ flush:	MUTEX_LOCK(dbenv, flush_mutexp);
 	if (lp->b_off != 0 && log_compare(&flush_lsn, &lp->f_lsn) >= 0) {
 		if ((ret = __log_write(dblp,
 		    dblp->bufp, (u_int32_t)lp->b_off)) != 0) {
-			MUTEX_UNLOCK(dbenv, flush_mutexp);
+			MUTEX_UNLOCK(dbenv, lp->mtx_flush);
 			goto done;
 		}
 
 		lp->b_off = 0;
 	} else if (dblp->lfhp == NULL || dblp->lfname != lp->lsn.file)
 		if ((ret = __log_newfh(dblp, 0)) != 0) {
-			MUTEX_UNLOCK(dbenv, flush_mutexp);
+			MUTEX_UNLOCK(dbenv, lp->mtx_flush);
 			goto done;
 		}
 
@@ -910,13 +947,13 @@ flush:	MUTEX_LOCK(dbenv, flush_mutexp);
 	f_lsn = lp->f_lsn;
 	lp->in_flush++;
 	if (release)
-		R_UNLOCK(dbenv, &dblp->reginfo);
+		LOG_SYSTEM_UNLOCK(dbenv);
 
 	/* Sync all writes to disk. */
 	if ((ret = __os_fsync(dbenv, dblp->lfhp)) != 0) {
-		MUTEX_UNLOCK(dbenv, flush_mutexp);
+		MUTEX_UNLOCK(dbenv, lp->mtx_flush);
 		if (release)
-			R_LOCK(dbenv, &dblp->reginfo);
+			LOG_SYSTEM_LOCK(dbenv);
 		ret = __db_panic(dbenv, ret);
 		return (ret);
 	}
@@ -933,9 +970,9 @@ flush:	MUTEX_LOCK(dbenv, flush_mutexp);
 	if (b_off == 0)
 		lp->s_lsn.offset = w_off;
 
-	MUTEX_UNLOCK(dbenv, flush_mutexp);
+	MUTEX_UNLOCK(dbenv, lp->mtx_flush);
 	if (release)
-		R_LOCK(dbenv, &dblp->reginfo);
+		LOG_SYSTEM_LOCK(dbenv);
 
 	lp->in_flush--;
 	++lp->stat.st_scount;
@@ -952,13 +989,13 @@ done:
 		    commit != NULL;
 		    commit = SH_TAILQ_NEXT(commit, links, __db_commit))
 			if (log_compare(&lp->s_lsn, &commit->lsn) > 0) {
-				MUTEX_UNLOCK(dbenv, &commit->mutex);
+				MUTEX_UNLOCK(dbenv, commit->mtx_txnwait);
 				SH_TAILQ_REMOVE(
 				    &lp->commits, commit, links, __db_commit);
 				ncommit++;
 			} else if (first == 1) {
 				F_SET(commit, DB_COMMIT_FLUSH);
-				MUTEX_UNLOCK(dbenv, &commit->mutex);
+				MUTEX_UNLOCK(dbenv, commit->mtx_txnwait);
 				SH_TAILQ_REMOVE(
 				    &lp->commits, commit, links, __db_commit);
 				/*
@@ -1083,13 +1120,16 @@ __log_write(dblp, addr, len)
 	 * guarantees unwritten blocks are zero-filled, we set the size of the
 	 * file in advance.  This increases sync performance on some systems,
 	 * because they don't need to update metadata on every sync.
+	 *
+	 * Ignore any error -- we may have run out of disk space, but that's no
+	 * reason to quit.
 	 */
 #ifdef HAVE_FILESYSTEM_NOTZERO
 	if (lp->w_off == 0 && !__os_fs_notzero())
 #else
 	if (lp->w_off == 0)
 #endif
-		ret = __db_fileinit(dbenv, dblp->lfhp, lp->log_size, 0);
+		(void)__db_file_extend(dbenv, dblp->lfhp, lp->log_size);
 
 	/*
 	 * Seek to the offset in the file (someone may have written it
@@ -1130,7 +1170,8 @@ __log_file_pp(dbenv, lsn, namep, len)
 	char *namep;
 	size_t len;
 {
-	int rep_check, ret;
+	DB_THREAD_INFO *ip;
+	int ret;
 
 	PANIC_CHECK(dbenv);
 	ENV_REQUIRES_CONFIG(dbenv,
@@ -1142,12 +1183,9 @@ __log_file_pp(dbenv, lsn, namep, len)
 		return (EINVAL);
 	}
 
-	rep_check = IS_ENV_REPLICATED(dbenv) ? 1 : 0;
-	if (rep_check)
-		__env_rep_enter(dbenv);
-	ret = __log_file(dbenv, lsn, namep, len);
-	if (rep_check)
-		__env_db_rep_exit(dbenv);
+	ENV_ENTER(dbenv, ip);
+	REPLICATION_WRAP(dbenv, (__log_file(dbenv, lsn, namep, len)), ret);
+	ENV_LEAVE(dbenv, ip);
 	return (ret);
 }
 
@@ -1167,9 +1205,9 @@ __log_file(dbenv, lsn, namep, len)
 	char *name;
 
 	dblp = dbenv->lg_handle;
-	R_LOCK(dbenv, &dblp->reginfo);
+	LOG_SYSTEM_LOCK(dbenv);
 	ret = __log_name(dblp, lsn->file, &name, NULL, 0);
-	R_UNLOCK(dbenv, &dblp->reginfo);
+	LOG_SYSTEM_UNLOCK(dbenv);
 	if (ret != 0)
 		return (ret);
 
@@ -1209,7 +1247,7 @@ __log_newfh(dblp, create)
 		dblp->lfhp = NULL;
 	}
 
-	flags = DB_OSO_LOG | DB_OSO_SEQ |
+	flags = DB_OSO_SEQ |
 	    (create ? DB_OSO_CREATE : 0) |
 	    (F_ISSET(dbenv, DB_ENV_DIRECT_LOG) ? DB_OSO_DIRECT : 0) |
 	    (F_ISSET(dbenv, DB_ENV_DSYNC_LOG) ? DB_OSO_DSYNC : 0);
@@ -1242,7 +1280,7 @@ __log_name(dblp, filenumber, namep, fhpp, flags)
 {
 	DB_ENV *dbenv;
 	LOG *lp;
-	int ret;
+	int mode, ret;
 	char *oname;
 	char old[sizeof(LFPREFIX) + 5 + 20], new[sizeof(LFPREFIX) + 10 + 20];
 
@@ -1274,11 +1312,29 @@ __log_name(dblp, filenumber, namep, fhpp, flags)
 	    DB_APP_LOG, new, 0, NULL, namep)) != 0 || fhpp == NULL)
 		return (ret);
 
+	/* The application may have specified an absolute file mode. */
+	if (lp->filemode == 0)
+		mode = dbenv->db_mode;
+	else {
+		LF_SET(DB_OSO_ABSMODE);
+		mode = lp->filemode;
+	}
+
 	/* Open the new-style file -- if we succeed, we're done. */
-	if ((ret = __os_open_extend(dbenv, *namep, 0, flags,
-	    (int)lp->persist.mode, fhpp)) == 0)
+	if ((ret = __os_open_extend(dbenv, *namep, 0, flags, mode, fhpp)) == 0)
 		return (0);
 
+	/*
+	 * If the open failed for reason other than the file
+	 * not being there, complain loudly, the wrong user
+	 * probably started up the application.
+	 */
+	if (ret != ENOENT) {
+		__db_err(dbenv,
+		     "%s: log file unreadable: %s", *namep, db_strerror(ret));
+		return (__db_panic(dbenv, ret));
+	}
+
 	/*
 	 * The open failed... if the DB_RDONLY flag isn't set, we're done,
 	 * the caller isn't interested in old-style files.
@@ -1299,8 +1355,7 @@ __log_name(dblp, filenumber, namep, fhpp, flags)
 	 * space allocated for the new-style name and return the old-style
 	 * name to the caller.
 	 */
-	if ((ret =
-	    __os_open(dbenv, oname, flags, (int)lp->persist.mode, fhpp)) == 0) {
+	if ((ret = __os_open(dbenv, oname, flags, mode, fhpp)) == 0) {
 		__os_free(dbenv, *namep);
 		*namep = oname;
 		return (0);
@@ -1326,8 +1381,8 @@ err:	__os_free(dbenv, oname);
  * flushed, when log switches files, etc.  This is just a thin PUBLIC wrapper
  * for __log_putr with a slightly prettier interface.
  *
- * Note that the db_rep->db_mutexp should be held when this is called.
- * Note that we acquire the log region lock while holding db_mutexp.
+ * Note that the REP->mtx_clientdb should be held when this is called.
+ * Note that we acquire the log region mutex while holding mtx_clientdb.
  *
  * PUBLIC: int __log_rep_put __P((DB_ENV *, DB_LSN *, const DBT *));
  */
@@ -1347,7 +1402,7 @@ __log_rep_put(dbenv, lsnp, rec)
 	dblp = dbenv->lg_handle;
 	lp = dblp->reginfo.primary;
 
-	R_LOCK(dbenv, &dblp->reginfo);
+	LOG_SYSTEM_LOCK(dbenv);
 	memset(&hdr, 0, sizeof(HDR));
 	t = *rec;
 	dbt = &t;
@@ -1369,10 +1424,10 @@ __log_rep_put(dbenv, lsnp, rec)
 	ret = __log_putr(dblp, lsnp, dbt, lp->lsn.offset - lp->len, &hdr);
 err:
 	/*
-	 * !!! Assume caller holds db_rep->db_mutex to modify ready_lsn.
+	 * !!! Assume caller holds REP->mtx_clientdb to modify ready_lsn.
 	 */
 	lp->ready_lsn = lp->lsn;
-	R_UNLOCK(dbenv, &dblp->reginfo);
+	LOG_SYSTEM_UNLOCK(dbenv);
 	if (need_free)
 		__os_free(dbenv, t.data);
 	return (ret);
diff --git a/storage/bdb/log/log_stat.c b/storage/bdb/log/log_stat.c
index ce8c8af33c8..7c6c503a37d 100644
--- a/storage/bdb/log/log_stat.c
+++ b/storage/bdb/log/log_stat.c
@@ -1,10 +1,10 @@
 /*-
  * See the file LICENSE for redistribution information.
  *
- * Copyright (c) 1996-2004
+ * Copyright (c) 1996-2005
  *	Sleepycat Software.  All rights reserved.
  *
- * $Id: log_stat.c,v 11.149 2004/10/15 16:59:42 bostic Exp $
+ * $Id: log_stat.c,v 12.10 2005/10/10 19:06:22 bostic Exp $
  */
 
 #include "db_config.h"
@@ -37,7 +37,8 @@ __log_stat_pp(dbenv, statp, flags)
 	DB_LOG_STAT **statp;
 	u_int32_t flags;
 {
-	int rep_check, ret;
+	DB_THREAD_INFO *ip;
+	int ret;
 
 	PANIC_CHECK(dbenv);
 	ENV_REQUIRES_CONFIG(dbenv,
@@ -47,12 +48,9 @@ __log_stat_pp(dbenv, statp, flags)
 	    "DB_ENV->log_stat", flags, DB_STAT_CLEAR)) != 0)
 		return (ret);
 
-	rep_check = IS_ENV_REPLICATED(dbenv) ? 1 : 0;
-	if (rep_check)
-		__env_rep_enter(dbenv);
-	ret = __log_stat(dbenv, statp, flags);
-	if (rep_check)
-		__env_db_rep_exit(dbenv);
+	ENV_ENTER(dbenv, ip);
+	REPLICATION_WRAP(dbenv, (__log_stat(dbenv, statp, flags)), ret);
+	ENV_LEAVE(dbenv, ip);
 	return (ret);
 }
 
@@ -80,21 +78,21 @@ __log_stat(dbenv, statp, flags)
 		return (ret);
 
 	/* Copy out the global statistics. */
-	R_LOCK(dbenv, &dblp->reginfo);
+	LOG_SYSTEM_LOCK(dbenv);
 	*stats = lp->stat;
 	if (LF_ISSET(DB_STAT_CLEAR))
 		memset(&lp->stat, 0, sizeof(lp->stat));
 
 	stats->st_magic = lp->persist.magic;
 	stats->st_version = lp->persist.version;
-	stats->st_mode = (int)lp->persist.mode;
+	stats->st_mode = lp->filemode;
 	stats->st_lg_bsize = lp->buffer_size;
 	stats->st_lg_size = lp->log_nsize;
 
-	stats->st_region_wait = dblp->reginfo.rp->mutex.mutex_set_wait;
-	stats->st_region_nowait = dblp->reginfo.rp->mutex.mutex_set_nowait;
+	__mutex_set_wait_info(dbenv, lp->mtx_region,
+	    &stats->st_region_wait, &stats->st_region_nowait);
 	if (LF_ISSET(DB_STAT_CLEAR))
-		MUTEX_CLEAR(&dblp->reginfo.rp->mutex);
+		__mutex_clear(dbenv, lp->mtx_region);
 	stats->st_regsize = dblp->reginfo.rp->size;
 
 	stats->st_cur_file = lp->lsn.file;
@@ -102,7 +100,7 @@ __log_stat(dbenv, statp, flags)
 	stats->st_disk_file = lp->s_lsn.file;
 	stats->st_disk_offset = lp->s_lsn.offset;
 
-	R_UNLOCK(dbenv, &dblp->reginfo);
+	LOG_SYSTEM_UNLOCK(dbenv);
 
 	*statp = stats;
 	return (0);
@@ -119,7 +117,8 @@ __log_stat_print_pp(dbenv, flags)
 	DB_ENV *dbenv;
 	u_int32_t flags;
 {
-	int rep_check, ret;
+	DB_THREAD_INFO *ip;
+	int ret;
 
 	PANIC_CHECK(dbenv);
 	ENV_REQUIRES_CONFIG(dbenv,
@@ -129,12 +128,9 @@ __log_stat_print_pp(dbenv, flags)
 	    flags, DB_STAT_ALL | DB_STAT_CLEAR)) != 0)
 		return (ret);
 
-	rep_check = IS_ENV_REPLICATED(dbenv) ? 1 : 0;
-	if (rep_check)
-		__env_rep_enter(dbenv);
-	ret = __log_stat_print(dbenv, flags);
-	if (rep_check)
-		__env_db_rep_exit(dbenv);
+	ENV_ENTER(dbenv, ip);
+	REPLICATION_WRAP(dbenv, (__log_stat_print(dbenv, flags)), ret);
+	ENV_LEAVE(dbenv, ip);
 	return (ret);
 }
 
@@ -198,14 +194,16 @@ __log_print_stats(dbenv, flags)
 	else
 		__db_msg(dbenv, "%lu\tCurrent log file size",
 		    (u_long)sp->st_lg_size);
+	__db_dl(dbenv, "Records entered into the log", (u_long)sp->st_record);
 	__db_dlbytes(dbenv, "Log bytes written",
 	    (u_long)0, (u_long)sp->st_w_mbytes, (u_long)sp->st_w_bytes);
 	__db_dlbytes(dbenv, "Log bytes written since last checkpoint",
 	    (u_long)0, (u_long)sp->st_wc_mbytes, (u_long)sp->st_wc_bytes);
-	__db_dl(dbenv, "Total log file writes", (u_long)sp->st_wcount);
-	__db_dl(dbenv, "Total log file write due to overflow",
+	__db_dl(dbenv, "Total log file I/O writes", (u_long)sp->st_wcount);
+	__db_dl(dbenv, "Total log file I/O writes due to overflow",
 	    (u_long)sp->st_wcount_fill);
 	__db_dl(dbenv, "Total log file flushes", (u_long)sp->st_scount);
+	__db_dl(dbenv, "Total log file I/O reads", (u_long)sp->st_rcount);
 	STAT_ULONG("Current log file number", sp->st_cur_file);
 	STAT_ULONG("Current log file offset", sp->st_cur_offset);
 	STAT_ULONG("On-disk log file number", sp->st_disk_file);
@@ -243,48 +241,43 @@ __log_print_all(dbenv, flags)
 		{ 0,			NULL }
 	};
 	DB_LOG *dblp;
-	DB_MUTEX *flush_mutexp;
 	LOG *lp;
 
 	dblp = dbenv->lg_handle;
 	lp = (LOG *)dblp->reginfo.primary;
 
-	R_LOCK(dbenv, &dblp->reginfo);
+	LOG_SYSTEM_LOCK(dbenv);
 
 	__db_print_reginfo(dbenv, &dblp->reginfo, "Log");
 
 	__db_msg(dbenv, "%s", DB_GLOBAL(db_line));
 	__db_msg(dbenv, "DB_LOG handle information:");
-
-	__db_print_mutex(
-	    dbenv, NULL, dblp->mutexp, "DB_LOG handle mutex", flags);
+	__mutex_print_debug_single(
+	    dbenv, "DB_LOG handle mutex", dblp->mtx_dbreg, flags);
 	STAT_ULONG("Log file name", dblp->lfname);
-	if (dblp->lfhp == NULL)
-		STAT_ISSET("Log file handle", dblp->lfhp);
-	else
-		__db_print_fh(dbenv, dblp->lfhp, flags);
+	__db_print_fh(dbenv, "Log file handle", dblp->lfhp, flags);
 	__db_prflags(dbenv, NULL, dblp->flags, fn, NULL, "\tFlags");
 
 	__db_msg(dbenv, "%s", DB_GLOBAL(db_line));
 	__db_msg(dbenv, "LOG handle information:");
-
-	__db_print_mutex(
-	    dbenv, NULL, &lp->fq_mutex, "file name list mutex", flags);
+	__mutex_print_debug_single(
+	    dbenv, "LOG region mutex", lp->mtx_region, flags);
+	__mutex_print_debug_single(
+	    dbenv, "File name list mutex", lp->mtx_filelist, flags);
 
 	STAT_HEX("persist.magic", lp->persist.magic);
 	STAT_ULONG("persist.version", lp->persist.version);
 	__db_dlbytes(dbenv,
 	    "persist.log_size", (u_long)0, (u_long)0, lp->persist.log_size);
-	STAT_FMT("persist.mode", "%#lo", u_long, lp->persist.mode);
+	STAT_FMT("log file permissions mode", "%#lo", u_long, lp->filemode);
 	STAT_LSN("current file offset LSN", &lp->lsn);
 	STAT_LSN("first buffer byte LSN", &lp->lsn);
 	STAT_ULONG("current buffer offset", lp->b_off);
 	STAT_ULONG("current file write offset", lp->w_off);
 	STAT_ULONG("length of last record", lp->len);
 	STAT_LONG("log flush in progress", lp->in_flush);
-
-	flush_mutexp = R_ADDR(&dblp->reginfo, lp->flush_mutex_off);
-	__db_print_mutex(dbenv, NULL, flush_mutexp, "Log flush mutex", flags);
+	__mutex_print_debug_single(
+	    dbenv, "Log flush mutex", lp->mtx_flush, flags);
 
 	STAT_LSN("last sync LSN", &lp->s_lsn);
 
@@ -305,8 +298,7 @@ __log_print_all(dbenv, flags)
 	STAT_ULONG("transactions waiting to commit", lp->ncommit);
 	STAT_LSN("LSN of first commit", &lp->t_lsn);
 
-	__dbreg_print_dblist(dbenv, flags);
-	R_UNLOCK(dbenv, &dblp->reginfo);
+	LOG_SYSTEM_UNLOCK(dbenv);
 
 	return (0);
 }
diff --git a/storage/bdb/mp/mp_alloc.c b/storage/bdb/mp/mp_alloc.c
index 79aa84f9929..edbaac9ce89 100644
--- a/storage/bdb/mp/mp_alloc.c
+++ b/storage/bdb/mp/mp_alloc.c
@@ -1,10 +1,10 @@
 /*-
  * See the file LICENSE for redistribution information.
  *
- * Copyright (c) 1996-2004
+ * Copyright (c) 1996-2005
  *	Sleepycat Software.  All rights reserved.
  *
- * $Id: mp_alloc.c,v 11.47 2004/10/15 16:59:42 bostic Exp $
+ * $Id: mp_alloc.c,v 12.6 2005/08/12 13:17:22 bostic Exp $
  */
 
 #include "db_config.h"
@@ -39,12 +39,12 @@ __memp_alloc(dbmp, infop, mfp, len, offsetp, retp)
 	BH *bhp;
 	DB_ENV *dbenv;
 	DB_MPOOL_HASH *dbht, *hp, *hp_end, *hp_tmp;
-	DB_MUTEX *mutexp;
 	MPOOL *c_mp;
 	MPOOLFILE *bh_mfp;
 	size_t freed_space;
-	u_int32_t buckets, buffers, high_priority, priority, put_counter;
-	u_int32_t total_buckets;
+	db_mutex_t mutex;
+	u_int32_t buckets, buffers, high_priority, priority;
+	u_int32_t put_counter, total_buckets;
 	int aggressive, giveup, ret;
 	void *p;
 
@@ -69,7 +69,8 @@ __memp_alloc(dbmp, infop, mfp, len, offsetp, retp)
 	if (mfp != NULL)
 		len = (sizeof(BH) - sizeof(u_int8_t)) + mfp->stat.st_pagesize;
 
-	R_LOCK(dbenv, infop);
+	MPOOL_REGION_LOCK(dbenv, infop);
+
 	/*
 	 * Anything newer than 1/10th of the buffer pool is ignored during
 	 * allocation (unless allocation starts failing).
@@ -85,10 +86,10 @@ __memp_alloc(dbmp, infop, mfp, len, offsetp, retp)
 	 * we need in the hopes it will coalesce into a contiguous chunk of the
 	 * right size.  In the latter case we branch back here and try again.
 	 */
-alloc:	if ((ret = __db_shalloc(infop, len, MUTEX_ALIGN, &p)) == 0) {
+alloc:	if ((ret = __db_shalloc(infop, len, 0, &p)) == 0) {
 		if (mfp != NULL)
 			c_mp->stat.st_pages++;
-		R_UNLOCK(dbenv, infop);
+		MPOOL_REGION_UNLOCK(dbenv, infop);
 
 found:		if (offsetp != NULL)
 			*offsetp = R_OFFSET(infop, p);
@@ -113,12 +114,13 @@ found:		if (offsetp != NULL)
 		}
 		return (0);
 	} else if (giveup || c_mp->stat.st_pages == 0) {
-		R_UNLOCK(dbenv, infop);
+		MPOOL_REGION_UNLOCK(dbenv, infop);
 
 		__db_err(dbenv,
 		    "unable to allocate space from the buffer cache");
 		return (ret);
 	}
+	ret = 0;
 
 	/*
 	 * We re-attempt the allocation every time we've freed 3 times what
@@ -187,7 +189,7 @@ found:		if (offsetp != NULL)
 		if ((++buckets % c_mp->htab_buckets) == 0) {
 			if (freed_space > 0)
 				goto alloc;
-			R_UNLOCK(dbenv, infop);
+			MPOOL_REGION_UNLOCK(dbenv, infop);
 
 			switch (++aggressive) {
 			case 1:
@@ -211,7 +213,7 @@ found:		if (offsetp != NULL)
 				break;
 			}
 
-			R_LOCK(dbenv, infop);
+			MPOOL_REGION_LOCK(dbenv, infop);
 			goto alloc;
 		}
 
@@ -239,9 +241,9 @@ found:		if (offsetp != NULL)
 		priority = hp->hash_priority;
 
 		/* Unlock the region and lock the hash bucket. */
-		R_UNLOCK(dbenv, infop);
-		mutexp = &hp->hash_mutex;
-		MUTEX_LOCK(dbenv, mutexp);
+		MPOOL_REGION_UNLOCK(dbenv, infop);
+		mutex = hp->mtx_hash;
+		MUTEX_LOCK(dbenv, mutex);
 
 #ifdef DIAGNOSTIC
 		__memp_check_order(hp);
@@ -299,14 +301,20 @@ found:		if (offsetp != NULL)
 		 */
 		if (mfp != NULL &&
 		    mfp->stat.st_pagesize == bh_mfp->stat.st_pagesize) {
-			__memp_bhfree(dbmp, hp, bhp, 0);
-
+			if ((ret = __memp_bhfree(dbmp, hp, bhp, 0)) != 0) {
+				MUTEX_UNLOCK(dbenv, mutex);
+				return (ret);
+			}
 			p = bhp;
 			goto found;
 		}
 
 		freed_space += __db_shalloc_sizeof(bhp);
-		__memp_bhfree(dbmp, hp, bhp, BH_FREE_FREEMEM);
+		if ((ret =
+		    __memp_bhfree(dbmp, hp, bhp, BH_FREE_FREEMEM)) != 0) {
+			MUTEX_UNLOCK(dbenv, mutex);
+			return (ret);
+		}
 		if (aggressive > 1)
 			aggressive = 1;
 
@@ -316,9 +324,9 @@ found:		if (offsetp != NULL)
 		 * hash bucket lock has already been discarded.
 		 */
 		if (0) {
-next_hb:		MUTEX_UNLOCK(dbenv, mutexp);
+next_hb:		MUTEX_UNLOCK(dbenv, mutex);
 		}
-		R_LOCK(dbenv, infop);
+		MPOOL_REGION_LOCK(dbenv, infop);
 
 		/*
 		 * Retry the allocation as soon as we've freed up sufficient
@@ -351,9 +359,8 @@ __memp_bad_buffer(hp)
 	 * Find the highest priority buffer in the bucket.  Buffers are
 	 * sorted by priority, so it's the last one in the bucket.
 	 */
-	priority = bhp->priority;
-	if (!SH_TAILQ_EMPTY(&hp->hash_bucket))
-	  priority = SH_TAILQ_LAST(&hp->hash_bucket, hq, __bh)->priority;
+	priority = SH_TAILQ_EMPTY(&hp->hash_bucket) ? bhp->priority :
+	    SH_TAILQ_LASTP(&hp->hash_bucket, hq, __bh)->priority;
 
 	/*
 	 * Set our buffer's priority to be just as bad, and append it to
@@ -363,7 +370,10 @@ __memp_bad_buffer(hp)
 	SH_TAILQ_INSERT_TAIL(&hp->hash_bucket, bhp, hq);
 
 	/* Reset the hash bucket's priority. */
-	hp->hash_priority = SH_TAILQ_FIRST(&hp->hash_bucket, __bh)->priority;
+	hp->hash_priority = SH_TAILQ_FIRSTP(&hp->hash_bucket, __bh)->priority;
+#ifdef DIAGNOSTIC
+	__memp_check_order(hp);
+#endif
 }
 
 #ifdef DIAGNOSTIC
diff --git a/storage/bdb/mp/mp_bh.c b/storage/bdb/mp/mp_bh.c
index b2869814662..b2721b801a6 100644
--- a/storage/bdb/mp/mp_bh.c
+++ b/storage/bdb/mp/mp_bh.c
@@ -1,10 +1,10 @@
 /*-
  * See the file LICENSE for redistribution information.
  *
- * Copyright (c) 1996-2004
+ * Copyright (c) 1996-2005
  *	Sleepycat Software.  All rights reserved.
  *
- * $Id: mp_bh.c,v 11.99 2004/10/15 16:59:42 bostic Exp $
+ * $Id: mp_bh.c,v 12.11 2005/10/20 18:57:07 bostic Exp $
  */
 
 #include "db_config.h"
@@ -58,14 +58,14 @@ __memp_bhwrite(dbmp, hp, mfp, bhp, open_extents)
 	 * Walk the process' DB_MPOOLFILE list and find a file descriptor for
 	 * the file.  We also check that the descriptor is open for writing.
 	 */
-	MUTEX_THREAD_LOCK(dbenv, dbmp->mutexp);
+	MUTEX_LOCK(dbenv, dbmp->mutex);
 	for (dbmfp = TAILQ_FIRST(&dbmp->dbmfq);
 	    dbmfp != NULL; dbmfp = TAILQ_NEXT(dbmfp, q))
 		if (dbmfp->mfp == mfp && !F_ISSET(dbmfp, MP_READONLY)) {
 			++dbmfp->ref;
 			break;
 		}
-	MUTEX_THREAD_UNLOCK(dbenv, dbmp->mutexp);
+	MUTEX_UNLOCK(dbenv, dbmp->mutex);
 
 	if (dbmfp != NULL) {
 		/*
@@ -78,14 +78,14 @@ __memp_bhwrite(dbmp, hp, mfp, bhp, open_extents)
 			if (mfp->no_backing_file)
 				return (EPERM);
 
-			MUTEX_THREAD_LOCK(dbenv, dbmp->mutexp);
+			MUTEX_LOCK(dbenv, dbmp->mutex);
 			if (dbmfp->fhp == NULL)
 				ret = __db_appname(dbenv, DB_APP_TMP, NULL,
 				    F_ISSET(dbenv, DB_ENV_DIRECT_DB) ?
 				    DB_OSO_DIRECT : 0, &dbmfp->fhp, NULL);
 			else
 				ret = 0;
-			MUTEX_THREAD_UNLOCK(dbenv, dbmp->mutexp);
+			MUTEX_UNLOCK(dbenv, dbmp->mutex);
 			if (ret != 0) {
 				__db_err(dbenv,
 				    "unable to create temporary backing file");
@@ -125,22 +125,22 @@ __memp_bhwrite(dbmp, hp, mfp, bhp, open_extents)
 	 * has already been closed in another process, in which case it should
 	 * be marked dead.
 	 */
-	if (F_ISSET(mfp, MP_TEMP))
+	if (F_ISSET(mfp, MP_TEMP) || mfp->no_backing_file)
 		return (EPERM);
 
 	/*
 	 * It's not a page from a file we've opened.  If the file requires
-	 * input/output processing, see if this process has ever registered
-	 * information as to how to write this type of file.  If not, there's
-	 * nothing we can do.
+	 * application-specific input/output processing, see if this process
+	 * has ever registered information as to how to write this type of
+	 * file.  If not, there's nothing we can do.
 	 */
-	if (mfp->ftype != 0) {
-		MUTEX_THREAD_LOCK(dbenv, dbmp->mutexp);
+	if (mfp->ftype != 0 && mfp->ftype != DB_FTYPE_SET) {
+		MUTEX_LOCK(dbenv, dbmp->mutex);
 		for (mpreg = LIST_FIRST(&dbmp->dbregq);
 		    mpreg != NULL; mpreg = LIST_NEXT(mpreg, q))
 			if (mpreg->ftype == mfp->ftype)
 				break;
-		MUTEX_THREAD_UNLOCK(dbenv, dbmp->mutexp);
+		MUTEX_UNLOCK(dbenv, dbmp->mutex);
 		if (mpreg == NULL)
 			return (EPERM);
 	}
@@ -177,12 +177,12 @@ pgwrite:
 	 * Discard our reference, and, if we're the last reference, make sure
 	 * the file eventually gets closed.
 	 */
-	MUTEX_THREAD_LOCK(dbenv, dbmp->mutexp);
+	MUTEX_LOCK(dbenv, dbmp->mutex);
 	if (dbmfp->ref == 1)
 		F_SET(dbmfp, MP_FLUSH);
 	else
 		--dbmfp->ref;
-	MUTEX_THREAD_UNLOCK(dbenv, dbmp->mutexp);
+	MUTEX_UNLOCK(dbenv, dbmp->mutex);
 
 	return (ret);
 }
@@ -191,12 +191,12 @@ pgwrite:
  * __memp_pgread --
  *	Read a page from a file.
  *
- * PUBLIC: int __memp_pgread __P((DB_MPOOLFILE *, DB_MUTEX *, BH *, int));
+ * PUBLIC: int __memp_pgread __P((DB_MPOOLFILE *, db_mutex_t, BH *, int));
  */
 int
-__memp_pgread(dbmfp, mutexp, bhp, can_create)
+__memp_pgread(dbmfp, mutex, bhp, can_create)
 	DB_MPOOLFILE *dbmfp;
-	DB_MUTEX *mutexp;
+	db_mutex_t mutex;
 	BH *bhp;
 	int can_create;
 {
@@ -215,8 +215,8 @@ __memp_pgread(dbmfp, mutexp, bhp, can_create)
 
 	/* Lock the buffer and swap the hash bucket lock for the buffer lock. */
 	F_SET(bhp, BH_LOCKED | BH_TRASH);
-	MUTEX_LOCK(dbenv, &bhp->mutex);
-	MUTEX_UNLOCK(dbenv, mutexp);
+	MUTEX_LOCK(dbenv, bhp->mtx_bh);
+	MUTEX_UNLOCK(dbenv, mutex);
 
 	/*
 	 * Temporary files may not yet have been created.  We don't create
@@ -247,7 +247,8 @@ __memp_pgread(dbmfp, mutexp, bhp, can_create)
 		}
 
 		/* Clear any bytes that need to be cleared. */
-		len = mfp->clear_len == 0 ? pagesize : mfp->clear_len;
+		len = mfp->clear_len == DB_CLEARLEN_NOTSET ?
+		    pagesize : mfp->clear_len;
 		memset(bhp->buf, 0, len);
 
 #if defined(DIAGNOSTIC) || defined(UMRW)
@@ -266,8 +267,8 @@ __memp_pgread(dbmfp, mutexp, bhp, can_create)
 	ret = mfp->ftype == 0 ? 0 : __memp_pg(dbmfp, bhp, 1);
 
 	/* Unlock the buffer and reacquire the hash bucket lock. */
-err:	MUTEX_UNLOCK(dbenv, &bhp->mutex);
-	MUTEX_LOCK(dbenv, mutexp);
+err:	MUTEX_UNLOCK(dbenv, bhp->mtx_bh);
+	MUTEX_LOCK(dbenv, mutex);
 
 	/*
 	 * If no errors occurred, the data is now valid, clear the BH_TRASH
@@ -312,8 +313,8 @@ __memp_pgwrite(dbenv, dbmfp, hp, bhp)
 	 */
 	if (!F_ISSET(bhp, BH_LOCKED)) {
 		F_SET(bhp, BH_LOCKED);
-		MUTEX_LOCK(dbenv, &bhp->mutex);
-		MUTEX_UNLOCK(dbenv, &hp->hash_mutex);
+		MUTEX_LOCK(dbenv, bhp->mtx_bh);
+		MUTEX_UNLOCK(dbenv, hp->mtx_hash);
 	}
 
 	/*
@@ -335,7 +336,8 @@ __memp_pgwrite(dbenv, dbmfp, hp, bhp)
 	if (LOGGING_ON(dbenv) && mfp->lsn_off != -1 &&
 	    !IS_CLIENT_PGRECOVER(dbenv)) {
 		memcpy(&lsn, bhp->buf + mfp->lsn_off, sizeof(DB_LSN));
-		if ((ret = __log_flush(dbenv, &lsn)) != 0)
+		if (!IS_NOT_LOGGED_LSN(lsn) &&
+		    (ret = __log_flush(dbenv, &lsn)) != 0)
 			goto err;
 	}
 
@@ -367,17 +369,15 @@ __memp_pgwrite(dbenv, dbmfp, hp, bhp)
 		 * fail the first test, acquire the log mutex and check again.
 		 */
 		DB_LOG *dblp;
-		DB_MUTEX *mtx;
 		LOG *lp;
 
 		dblp = dbenv->lg_handle;
 		lp = dblp->reginfo.primary;
 		if (!lp->db_log_inmemory &&
 		    log_compare(&lp->s_lsn, &LSN(bhp->buf)) <= 0) {
-			mtx = R_ADDR(&dblp->reginfo, lp->flush_mutex_off);
-			MUTEX_LOCK(dbenv, mtx);
+			MUTEX_LOCK(dbenv, lp->mtx_flush);
 			DB_ASSERT(log_compare(&lp->s_lsn, &LSN(bhp->buf)) > 0);
-			MUTEX_UNLOCK(dbenv, mtx);
+			MUTEX_UNLOCK(dbenv, lp->mtx_flush);
 		}
 	}
 #endif
@@ -411,8 +411,8 @@ file_dead:
 	 *
 	 * Unlock the buffer and reacquire the hash lock.
 	 */
-	MUTEX_UNLOCK(dbenv, &bhp->mutex);
-	MUTEX_LOCK(dbenv, &hp->hash_mutex);
+	MUTEX_UNLOCK(dbenv, bhp->mtx_bh);
+	MUTEX_LOCK(dbenv, hp->mtx_hash);
 
 	/*
 	 * If we rewrote the page, it will need processing by the pgin
@@ -462,42 +462,39 @@ __memp_pg(dbmfp, bhp, is_pgin)
 	dbmp = dbenv->mp_handle;
 	mfp = dbmfp->mfp;
 
-	MUTEX_THREAD_LOCK(dbenv, dbmp->mutexp);
+	if ((ftype = mfp->ftype) == DB_FTYPE_SET)
+		mpreg = dbmp->pg_inout;
+	else {
+		MUTEX_LOCK(dbenv, dbmp->mutex);
+		for (mpreg = LIST_FIRST(&dbmp->dbregq);
+		    mpreg != NULL; mpreg = LIST_NEXT(mpreg, q))
+			if (ftype == mpreg->ftype)
+				break;
+		MUTEX_UNLOCK(dbenv, dbmp->mutex);
+	}
+	if (mpreg == NULL)
+		return (0);
 
-	ftype = mfp->ftype;
-	for (mpreg = LIST_FIRST(&dbmp->dbregq);
-	    mpreg != NULL; mpreg = LIST_NEXT(mpreg, q)) {
-		if (ftype != mpreg->ftype)
-			continue;
-		if (mfp->pgcookie_len == 0)
-			dbtp = NULL;
-		else {
-			dbt.size = (u_int32_t)mfp->pgcookie_len;
-			dbt.data = R_ADDR(dbmp->reginfo, mfp->pgcookie_off);
-			dbtp = &dbt;
-		}
-		MUTEX_THREAD_UNLOCK(dbenv, dbmp->mutexp);
-
-		if (is_pgin) {
-			if (mpreg->pgin != NULL &&
-			    (ret = mpreg->pgin(dbenv,
-			    bhp->pgno, bhp->buf, dbtp)) != 0)
-				goto err;
-		} else
-			if (mpreg->pgout != NULL &&
-			    (ret = mpreg->pgout(dbenv,
-			    bhp->pgno, bhp->buf, dbtp)) != 0)
-				goto err;
-		break;
+	if (mfp->pgcookie_len == 0)
+		dbtp = NULL;
+	else {
+		dbt.size = (u_int32_t)mfp->pgcookie_len;
+		dbt.data = R_ADDR(dbmp->reginfo, mfp->pgcookie_off);
+		dbtp = &dbt;
 	}
 
-	if (mpreg == NULL)
-		MUTEX_THREAD_UNLOCK(dbenv, dbmp->mutexp);
+	if (is_pgin) {
+		if (mpreg->pgin != NULL &&
+		    (ret = mpreg->pgin(dbenv, bhp->pgno, bhp->buf, dbtp)) != 0)
+			goto err;
+	} else
+		if (mpreg->pgout != NULL &&
+		    (ret = mpreg->pgout(dbenv, bhp->pgno, bhp->buf, dbtp)) != 0)
+			goto err;
 
 	return (0);
 
-err:	MUTEX_THREAD_UNLOCK(dbenv, dbmp->mutexp);
-	__db_err(dbenv, "%s: %s failed for page %lu",
+err:	__db_err(dbenv, "%s: %s failed for page %lu",
 	    __memp_fn(dbmfp), is_pgin ? "pgin" : "pgout", (u_long)bhp->pgno);
 	return (ret);
 }
@@ -506,10 +503,10 @@ err:	MUTEX_THREAD_UNLOCK(dbenv, dbmp->mutexp);
  * __memp_bhfree --
  *	Free a bucket header and its referenced data.
  *
- * PUBLIC: void __memp_bhfree
+ * PUBLIC: int __memp_bhfree
  * PUBLIC:     __P((DB_MPOOL *, DB_MPOOL_HASH *, BH *, u_int32_t));
  */
-void
+int
 __memp_bhfree(dbmp, hp, bhp, flags)
 	DB_MPOOL *dbmp;
 	DB_MPOOL_HASH *hp;
@@ -520,6 +517,7 @@ __memp_bhfree(dbmp, hp, bhp, flags)
 	MPOOL *c_mp, *mp;
 	MPOOLFILE *mfp;
 	u_int32_t n_cache;
+	int ret, t_ret;
 
 	/*
 	 * Assumes the hash bucket is locked and the MPOOL is not.
@@ -535,44 +533,57 @@ __memp_bhfree(dbmp, hp, bhp, flags)
 	SH_TAILQ_REMOVE(&hp->hash_bucket, bhp, hq, __bh);
 	if (bhp->priority == hp->hash_priority)
 		hp->hash_priority =
-		    SH_TAILQ_FIRST(&hp->hash_bucket, __bh) == NULL ?
-		    0 : SH_TAILQ_FIRST(&hp->hash_bucket, __bh)->priority;
+		    SH_TAILQ_EMPTY(&hp->hash_bucket) ?
+		    0 : SH_TAILQ_FIRSTP(&hp->hash_bucket, __bh)->priority;
+#ifdef DIAGNOSTIC
+	__memp_check_order(hp);
+#endif
 
 	/*
 	 * Discard the hash bucket's mutex, it's no longer needed, and
 	 * we don't want to be holding it when acquiring other locks.
 	 */
 	if (!LF_ISSET(BH_FREE_UNLOCKED))
-		MUTEX_UNLOCK(dbenv, &hp->hash_mutex);
+		MUTEX_UNLOCK(dbenv, hp->mtx_hash);
 
 	/*
 	 * Find the underlying MPOOLFILE and decrement its reference count.
 	 * If this is its last reference, remove it.
 	 */
 	mfp = R_ADDR(dbmp->reginfo, bhp->mf_offset);
-	MUTEX_LOCK(dbenv, &mfp->mutex);
+	MUTEX_LOCK(dbenv, mfp->mutex);
 	if (--mfp->block_cnt == 0 && mfp->mpf_cnt == 0)
-		(void)__memp_mf_discard(dbmp, mfp);
-	else
-		MUTEX_UNLOCK(dbenv, &mfp->mutex);
-
-	R_LOCK(dbenv, &dbmp->reginfo[n_cache]);
+		ret = __memp_mf_discard(dbmp, mfp);
+	else {
+		ret = 0;
+		MUTEX_UNLOCK(dbenv, mfp->mutex);
+	}
 
 	/*
-	 * Clear the mutex this buffer recorded; requires the region lock
-	 * be held.
+	 * Free the associated mutex.
+	 *
+	 * XXX
+	 * This is wrong.  We fast-path the allocation of replacement buffers
+	 * by checking the required size, we shouldn't reallocate the mutex in
+	 * that case, either.  (Note that we should probably reset the mutex
+	 * statistics in case we re-use the mutex, though.)
 	 */
-	__db_shlocks_clear(&bhp->mutex, &dbmp->reginfo[n_cache],
-	    R_ADDR(&dbmp->reginfo[n_cache], mp->maint_off));
+	if ((t_ret = __mutex_free(dbenv, &bhp->mtx_bh)) != 0 && ret == 0)
+		ret = t_ret;
 
 	/*
-	 * If we're not reusing the buffer immediately, free the buffer header
-	 * and data for real.
+	 * If we're not reusing the buffer immediately, free the buffer for
+	 * real.
 	 */
 	if (LF_ISSET(BH_FREE_FREEMEM)) {
+		MPOOL_REGION_LOCK(dbenv, &dbmp->reginfo[n_cache]);
+
 		__db_shalloc_free(&dbmp->reginfo[n_cache], bhp);
 		c_mp = dbmp->reginfo[n_cache].primary;
 		c_mp->stat.st_pages--;
+
+		MPOOL_REGION_UNLOCK(dbenv, &dbmp->reginfo[n_cache]);
 	}
-	R_UNLOCK(dbenv, &dbmp->reginfo[n_cache]);
+
+	return (ret);
 }
diff --git a/storage/bdb/mp/mp_fget.c b/storage/bdb/mp/mp_fget.c
index a955fc1e638..9cecc2ad86d 100644
--- a/storage/bdb/mp/mp_fget.c
+++ b/storage/bdb/mp/mp_fget.c
@@ -1,10 +1,10 @@
 /*-
  * See the file LICENSE for redistribution information.
  *
- * Copyright (c) 1996-2004
+ * Copyright (c) 1996-2005
  *	Sleepycat Software.  All rights reserved.
  *
- * $Id: mp_fget.c,v 11.96 2004/10/15 16:59:42 bostic Exp $
+ * $Id: mp_fget.c,v 12.8 2005/10/12 17:53:36 bostic Exp $
  */
 
 #include "db_config.h"
@@ -35,6 +35,7 @@ __memp_fget_pp(dbmfp, pgnoaddr, flags, addrp)
 	void *addrp;
 {
 	DB_ENV *dbenv;
+	DB_THREAD_INFO *ip;
 	int rep_check, ret;
 
 	dbenv = dbmfp->dbenv;
@@ -70,9 +71,11 @@ __memp_fget_pp(dbmfp, pgnoaddr, flags, addrp)
 		}
 	}
 
+	ENV_ENTER(dbenv, ip);
+
 	rep_check = IS_ENV_REPLICATED(dbenv) ? 1 : 0;
-	if (rep_check)
-		__op_rep_enter(dbenv);
+	if (rep_check && (ret = __op_rep_enter(dbenv)) != 0)
+		goto err;
 	ret = __memp_fget(dbmfp, pgnoaddr, flags, addrp);
 	/*
 	 * We only decrement the count in op_rep_exit if the operation fails.
@@ -80,7 +83,12 @@ __memp_fget_pp(dbmfp, pgnoaddr, flags, addrp)
 	 * pinned in memp_fput.
 	 */
 	if (ret != 0 && rep_check)
-		__op_rep_exit(dbenv);
+		(void)__op_rep_exit(dbenv);
+
+	/* Similarly if an app has a page pinned it is ACTIVE. */
+err:	if (ret != 0)
+		ENV_LEAVE(dbenv, ip);
+
 	return (ret);
 }
 
@@ -125,9 +133,9 @@ __memp_fget(dbmfp, pgnoaddr, flags, addrp)
 	switch (flags) {
 	case DB_MPOOL_LAST:
 		/* Get the last page number in the file. */
-		R_LOCK(dbenv, dbmp->reginfo);
+		MPOOL_SYSTEM_LOCK(dbenv);
 		*pgnoaddr = mfp->last_pgno;
-		R_UNLOCK(dbenv, dbmp->reginfo);
+		MPOOL_SYSTEM_UNLOCK(dbenv);
 		break;
 	case DB_MPOOL_NEW:
 		/*
@@ -183,7 +191,7 @@ hb_search:
 
 	/* Search the hash chain for the page. */
 retry:	st_hsearch = 0;
-	MUTEX_LOCK(dbenv, &hp->hash_mutex);
+	MUTEX_LOCK(dbenv, hp->mtx_hash);
 	for (bhp = SH_TAILQ_FIRST(&hp->hash_bucket, __bh);
 	    bhp != NULL; bhp = SH_TAILQ_NEXT(bhp, hq, __bh)) {
 		++st_hsearch;
@@ -197,7 +205,7 @@ retry:	st_hsearch = 0;
 		 * unchanged.
 		 */
 		if (bhp->ref == UINT16_MAX) {
-			MUTEX_UNLOCK(dbenv, &hp->hash_mutex);
+			MUTEX_UNLOCK(dbenv, hp->mtx_hash);
 
 			__db_err(dbenv,
 			    "%s: page %lu: reference count overflow",
@@ -225,12 +233,12 @@ retry:	st_hsearch = 0;
 			if (!first && bhp->ref_sync != 0) {
 				--bhp->ref;
 				b_incr = 0;
-				MUTEX_UNLOCK(dbenv, &hp->hash_mutex);
+				MUTEX_UNLOCK(dbenv, hp->mtx_hash);
 				__os_yield(dbenv, 1);
 				goto retry;
 			}
 
-			MUTEX_UNLOCK(dbenv, &hp->hash_mutex);
+			MUTEX_UNLOCK(dbenv, hp->mtx_hash);
 			/*
 			 * Explicitly yield the processor if not the first pass
 			 * through this loop -- if we don't, we might run to the
@@ -240,10 +248,10 @@ retry:	st_hsearch = 0;
 			if (!first)
 				__os_yield(dbenv, 1);
 
-			MUTEX_LOCK(dbenv, &bhp->mutex);
+			MUTEX_LOCK(dbenv, bhp->mtx_bh);
 			/* Wait for I/O to finish... */
-			MUTEX_UNLOCK(dbenv, &bhp->mutex);
-			MUTEX_LOCK(dbenv, &hp->hash_mutex);
+			MUTEX_UNLOCK(dbenv, bhp->mtx_bh);
+			MUTEX_LOCK(dbenv, hp->mtx_hash);
 		}
 
 		++mfp->stat.st_cache_hit;
@@ -293,10 +301,9 @@ retry:	st_hsearch = 0;
 		 * If not, complain and get out.
 		 */
 		if (flags == DB_MPOOL_FREE) {
-			if (bhp->ref == 1) {
-				__memp_bhfree(dbmp, hp, bhp, BH_FREE_FREEMEM);
-				return (0);
-			}
+			if (bhp->ref == 1)
+				return (__memp_bhfree(
+				    dbmp, hp, bhp, BH_FREE_FREEMEM));
 			__db_err(dbenv,
 			    "File %s: freeing pinned buffer for page %lu",
 				__memp_fns(dbmp, mfp), (u_long)*pgnoaddr);
@@ -312,7 +319,7 @@ retry:	st_hsearch = 0;
 		 * if the page exists, and allocate structures so we can add
 		 * the page to the buffer pool.
 		 */
-		MUTEX_UNLOCK(dbenv, &hp->hash_mutex);
+		MUTEX_UNLOCK(dbenv, hp->mtx_hash);
 
 		/*
 		 * The buffer is not in the pool, so we don't need to free it.
@@ -328,7 +335,7 @@ alloc:		/*
 		COMPQUIET(n_cache, 0);
 
 		extending = ret = 0;
-		R_LOCK(dbenv, dbmp->reginfo);
+		MPOOL_SYSTEM_LOCK(dbenv);
 		switch (flags) {
 		case DB_MPOOL_NEW:
 			extending = 1;
@@ -352,7 +359,7 @@ alloc:		/*
 			ret = *pgnoaddr > mfp->last_pgno ? DB_PAGE_NOTFOUND : 0;
 			break;
 		}
-		R_UNLOCK(dbenv, dbmp->reginfo);
+		MPOOL_SYSTEM_UNLOCK(dbenv);
 		if (ret != 0)
 			goto err;
 
@@ -382,7 +389,7 @@ alloc:		/*
 		 * again.
 		 */
 		if (extending)
-			R_LOCK(dbenv, dbmp->reginfo);
+			MPOOL_SYSTEM_LOCK(dbenv);
 
 		/*
 		 * DB_MPOOL_NEW does not guarantee you a page unreferenced by
@@ -416,13 +423,15 @@ alloc:		/*
 				 * flags == DB_MPOOL_NEW, so extending is set
 				 * and we're holding the region locked.
 				 */
-				R_UNLOCK(dbenv, dbmp->reginfo);
+				MPOOL_SYSTEM_UNLOCK(dbenv);
 
-				R_LOCK(dbenv, &dbmp->reginfo[n_cache]);
+				MPOOL_REGION_LOCK(
+				    dbenv, &dbmp->reginfo[n_cache]);
 				__db_shalloc_free(
 				    &dbmp->reginfo[n_cache], alloc_bhp);
 				c_mp->stat.st_pages--;
-				R_UNLOCK(dbenv, &dbmp->reginfo[n_cache]);
+				MPOOL_REGION_UNLOCK(
+				    dbenv, &dbmp->reginfo[n_cache]);
 
 				alloc_bhp = NULL;
 				goto alloc;
@@ -438,7 +447,7 @@ alloc:		/*
 			if (*pgnoaddr > mfp->last_pgno)
 				mfp->last_pgno = *pgnoaddr;
 
-			R_UNLOCK(dbenv, dbmp->reginfo);
+			MPOOL_SYSTEM_UNLOCK(dbenv);
 			if (ret != 0)
 				goto err;
 		}
@@ -455,12 +464,12 @@ alloc:		/*
 		 * lock, we have to release the hash bucket and re-acquire it.
 		 * That's OK, because we have the buffer pinned down.
 		 */
-		MUTEX_UNLOCK(dbenv, &hp->hash_mutex);
-		R_LOCK(dbenv, &dbmp->reginfo[n_cache]);
+		MUTEX_UNLOCK(dbenv, hp->mtx_hash);
+		MPOOL_REGION_LOCK(dbenv, &dbmp->reginfo[n_cache]);
 		__db_shalloc_free(&dbmp->reginfo[n_cache], alloc_bhp);
 		c_mp->stat.st_pages--;
 		alloc_bhp = NULL;
-		R_UNLOCK(dbenv, &dbmp->reginfo[n_cache]);
+		MPOOL_REGION_UNLOCK(dbenv, &dbmp->reginfo[n_cache]);
 
 		/*
 		 * We can't use the page we found in the pool if DB_MPOOL_NEW
@@ -477,7 +486,7 @@ alloc:		/*
 		}
 
 		/* We can use the page -- get the bucket lock. */
-		MUTEX_LOCK(dbenv, &hp->hash_mutex);
+		MUTEX_LOCK(dbenv, hp->mtx_hash);
 		break;
 	case SECOND_MISS:
 		/*
@@ -504,8 +513,19 @@ alloc:		/*
 		bhp->pgno = *pgnoaddr;
 		bhp->mf_offset = mf_offset;
 		SH_TAILQ_INSERT_TAIL(&hp->hash_bucket, bhp, hq);
+
+		/*
+		 * Allocate the mutex.  This is the last BH initialization step,
+		 * because it's the only one that can fail, and everything else
+		 * must be set up or we can't jump to the err label because it
+		 * will call __memp_bhfree.
+		 */
+		if ((ret = __mutex_alloc(
+		    dbenv, MTX_MPOOL_BUFFER, 0, &bhp->mtx_bh)) != 0)
+			goto err;
+
 		hp->hash_priority =
-		    SH_TAILQ_FIRST(&hp->hash_bucket, __bh)->priority;
+		    SH_TAILQ_FIRSTP(&hp->hash_bucket, __bh)->priority;
 
 		/* If we extended the file, make sure the page is never lost. */
 		if (extending) {
@@ -532,7 +552,7 @@ alloc:		/*
 		 * if DB_MPOOL_CREATE is set.
 		 */
 		if (extending) {
-			if (mfp->clear_len == 0)
+			if (mfp->clear_len == DB_CLEARLEN_NOTSET)
 				memset(bhp->buf, 0, mfp->stat.st_pagesize);
 			else {
 				memset(bhp->buf, 0, mfp->clear_len);
@@ -552,19 +572,9 @@ alloc:		/*
 		}
 
 		/* Increment buffer count referenced by MPOOLFILE. */
-		MUTEX_LOCK(dbenv, &mfp->mutex);
+		MUTEX_LOCK(dbenv, mfp->mutex);
 		++mfp->block_cnt;
-		MUTEX_UNLOCK(dbenv, &mfp->mutex);
-
-		/*
-		 * Initialize the mutex.  This is the last initialization step,
-		 * because it's the only one that can fail, and everything else
-		 * must be set up or we can't jump to the err label because it
-		 * will call __memp_bhfree.
-		 */
-		if ((ret = __db_mutex_setup(dbenv,
-		    &dbmp->reginfo[n_cache], &bhp->mutex, 0)) != 0)
-			goto err;
+		MUTEX_UNLOCK(dbenv, mfp->mutex);
 	}
 
 	DB_ASSERT(bhp->ref != 0);
@@ -580,7 +590,7 @@ alloc:		/*
 		SH_TAILQ_REMOVE(&hp->hash_bucket, bhp, hq, __bh);
 		SH_TAILQ_INSERT_TAIL(&hp->hash_bucket, bhp, hq);
 		hp->hash_priority =
-		    SH_TAILQ_FIRST(&hp->hash_bucket, __bh)->priority;
+		    SH_TAILQ_FIRSTP(&hp->hash_bucket, __bh)->priority;
 	}
 
 	/*
@@ -598,7 +608,7 @@ alloc:		/*
 	 */
 	if (F_ISSET(bhp, BH_TRASH) &&
 	    (ret = __memp_pgread(dbmfp,
-	    &hp->hash_mutex, bhp, LF_ISSET(DB_MPOOL_CREATE) ? 1 : 0)) != 0)
+	    hp->mtx_hash, bhp, LF_ISSET(DB_MPOOL_CREATE) ? 1 : 0)) != 0)
 		goto err;
 
 	/*
@@ -611,14 +621,17 @@ alloc:		/*
 			goto err;
 		F_CLR(bhp, BH_CALLPGIN);
 	}
+#ifdef DIAGNOSTIC
+	__memp_check_order(hp);
+#endif
 
-	MUTEX_UNLOCK(dbenv, &hp->hash_mutex);
+	MUTEX_UNLOCK(dbenv, hp->mtx_hash);
 
 #ifdef DIAGNOSTIC
 	/* Update the file's pinned reference count. */
-	R_LOCK(dbenv, dbmp->reginfo);
+	MPOOL_SYSTEM_LOCK(dbenv);
 	++dbmfp->pinref;
-	R_UNLOCK(dbenv, dbmp->reginfo);
+	MPOOL_SYSTEM_UNLOCK(dbenv);
 
 	/*
 	 * We want to switch threads as often as possible, and at awkward
@@ -638,19 +651,19 @@ err:	/*
 	 */
 	if (b_incr) {
 		if (bhp->ref == 1)
-			__memp_bhfree(dbmp, hp, bhp, BH_FREE_FREEMEM);
+			(void)__memp_bhfree(dbmp, hp, bhp, BH_FREE_FREEMEM);
 		else {
 			--bhp->ref;
-			MUTEX_UNLOCK(dbenv, &hp->hash_mutex);
+			MUTEX_UNLOCK(dbenv, hp->mtx_hash);
 		}
 	}
 
 	/* If alloc_bhp is set, free the memory. */
 	if (alloc_bhp != NULL) {
-		R_LOCK(dbenv, &dbmp->reginfo[n_cache]);
+		MPOOL_REGION_LOCK(dbenv, &dbmp->reginfo[n_cache]);
 		__db_shalloc_free(&dbmp->reginfo[n_cache], alloc_bhp);
 		c_mp->stat.st_pages--;
-		R_UNLOCK(dbenv, &dbmp->reginfo[n_cache]);
+		MPOOL_REGION_UNLOCK(dbenv, &dbmp->reginfo[n_cache]);
 	}
 
 	return (ret);
diff --git a/storage/bdb/mp/mp_fmethod.c b/storage/bdb/mp/mp_fmethod.c
index e27800c0d7b..6fc5d5719f2 100644
--- a/storage/bdb/mp/mp_fmethod.c
+++ b/storage/bdb/mp/mp_fmethod.c
@@ -1,10 +1,10 @@
 /*-
  * See the file LICENSE for redistribution information.
  *
- * Copyright (c) 1996-2004
+ * Copyright (c) 1996-2005
  *	Sleepycat Software.  All rights reserved.
  *
- * $Id: mp_fmethod.c,v 11.142 2004/10/15 16:59:42 bostic Exp $
+ * $Id: mp_fmethod.c,v 12.7 2005/10/07 20:21:32 ubell Exp $
  */
 
 #include "db_config.h"
@@ -12,26 +12,14 @@
 #ifndef NO_SYSTEM_INCLUDES
 #include 
 
-#ifdef HAVE_RPC
-#include 
-#endif
-
 #include 
 #endif
 
-#ifdef HAVE_RPC
-#include "db_server.h"
-#endif
-
 #include "db_int.h"
 #include "dbinc/db_shash.h"
 #include "dbinc/log.h"
 #include "dbinc/mp.h"
 
-#ifdef HAVE_RPC
-#include "dbinc_auto/rpc_client_ext.h"
-#endif
-
 static int __memp_get_clear_len __P((DB_MPOOLFILE *, u_int32_t *));
 static int __memp_get_lsn_offset __P((DB_MPOOLFILE *, int32_t *));
 static int __memp_get_maxsize __P((DB_MPOOLFILE *, u_int32_t *, u_int32_t *));
@@ -52,7 +40,8 @@ __memp_fcreate_pp(dbenv, retp, flags)
 	DB_MPOOLFILE **retp;
 	u_int32_t flags;
 {
-	int rep_check, ret;
+	DB_THREAD_INFO *ip;
+	int ret;
 
 	PANIC_CHECK(dbenv);
 
@@ -60,12 +49,9 @@ __memp_fcreate_pp(dbenv, retp, flags)
 	if ((ret = __db_fchk(dbenv, "DB_ENV->memp_fcreate", flags, 0)) != 0)
 		return (ret);
 
-	rep_check = IS_ENV_REPLICATED(dbenv) ? 1 : 0;
-	if (rep_check)
-		__env_rep_enter(dbenv);
-	ret = __memp_fcreate(dbenv, retp);
-	if (rep_check)
-		__env_db_rep_exit(dbenv);
+	ENV_ENTER(dbenv, ip);
+	REPLICATION_WRAP(dbenv, (__memp_fcreate(dbenv, retp)), ret);
+	ENV_LEAVE(dbenv, ip);
 	return (ret);
 }
 
@@ -92,57 +78,28 @@ __memp_fcreate(dbenv, retp)
 	dbmfp->dbenv = dbenv;
 	dbmfp->mfp = INVALID_ROFF;
 
-#ifdef HAVE_RPC
-	if (F_ISSET(dbenv, DB_ENV_RPCCLIENT)) {
-		dbmfp->get_clear_len = __dbcl_memp_get_clear_len;
-		dbmfp->set_clear_len = __dbcl_memp_set_clear_len;
-		dbmfp->get_fileid = __dbcl_memp_get_fileid;
-		dbmfp->set_fileid = __dbcl_memp_set_fileid;
-		dbmfp->get_flags = __dbcl_memp_get_flags;
-		dbmfp->set_flags = __dbcl_memp_set_flags;
-		dbmfp->get_ftype = __dbcl_memp_get_ftype;
-		dbmfp->set_ftype = __dbcl_memp_set_ftype;
-		dbmfp->get_lsn_offset = __dbcl_memp_get_lsn_offset;
-		dbmfp->set_lsn_offset = __dbcl_memp_set_lsn_offset;
-		dbmfp->get_maxsize = __dbcl_memp_get_maxsize;
-		dbmfp->set_maxsize = __dbcl_memp_set_maxsize;
-		dbmfp->get_pgcookie = __dbcl_memp_get_pgcookie;
-		dbmfp->set_pgcookie = __dbcl_memp_set_pgcookie;
-		dbmfp->get_priority = __dbcl_memp_get_priority;
-		dbmfp->set_priority = __dbcl_memp_set_priority;
-
-		dbmfp->get = __dbcl_memp_fget;
-		dbmfp->open = __dbcl_memp_fopen;
-		dbmfp->put = __dbcl_memp_fput;
-		dbmfp->set = __dbcl_memp_fset;
-		dbmfp->sync = __dbcl_memp_fsync;
-	} else
-#endif
-	{
-		dbmfp->get_clear_len = __memp_get_clear_len;
-		dbmfp->set_clear_len = __memp_set_clear_len;
-		dbmfp->get_fileid = __memp_get_fileid;
-		dbmfp->set_fileid = __memp_set_fileid;
-		dbmfp->get_flags = __memp_get_flags;
-		dbmfp->set_flags = __memp_set_flags;
-		dbmfp->get_ftype = __memp_get_ftype;
-		dbmfp->set_ftype = __memp_set_ftype;
-		dbmfp->get_lsn_offset = __memp_get_lsn_offset;
-		dbmfp->set_lsn_offset = __memp_set_lsn_offset;
-		dbmfp->get_maxsize = __memp_get_maxsize;
-		dbmfp->set_maxsize = __memp_set_maxsize;
-		dbmfp->get_pgcookie = __memp_get_pgcookie;
-		dbmfp->set_pgcookie = __memp_set_pgcookie;
-		dbmfp->get_priority = __memp_get_priority;
-		dbmfp->set_priority = __memp_set_priority;
-
-		dbmfp->get = __memp_fget_pp;
-		dbmfp->open = __memp_fopen_pp;
-		dbmfp->put = __memp_fput_pp;
-		dbmfp->set = __memp_fset_pp;
-		dbmfp->sync = __memp_fsync_pp;
-	}
 	dbmfp->close = __memp_fclose_pp;
+	dbmfp->get = __memp_fget_pp;
+	dbmfp->get_clear_len = __memp_get_clear_len;
+	dbmfp->get_fileid = __memp_get_fileid;
+	dbmfp->get_flags = __memp_get_flags;
+	dbmfp->get_ftype = __memp_get_ftype;
+	dbmfp->get_lsn_offset = __memp_get_lsn_offset;
+	dbmfp->get_maxsize = __memp_get_maxsize;
+	dbmfp->get_pgcookie = __memp_get_pgcookie;
+	dbmfp->get_priority = __memp_get_priority;
+	dbmfp->open = __memp_fopen_pp;
+	dbmfp->put = __memp_fput_pp;
+	dbmfp->set = __memp_fset_pp;
+	dbmfp->set_clear_len = __memp_set_clear_len;
+	dbmfp->set_fileid = __memp_set_fileid;
+	dbmfp->set_flags = __memp_set_flags;
+	dbmfp->set_ftype = __memp_set_ftype;
+	dbmfp->set_lsn_offset = __memp_set_lsn_offset;
+	dbmfp->set_maxsize = __memp_set_maxsize;
+	dbmfp->set_pgcookie = __memp_set_pgcookie;
+	dbmfp->set_priority = __memp_set_priority;
+	dbmfp->sync = __memp_fsync_pp;
 
 	*retp = dbmfp;
 	return (0);
@@ -365,7 +322,6 @@ __memp_get_maxsize(dbmfp, gbytesp, bytesp)
 	u_int32_t *gbytesp, *bytesp;
 {
 	DB_ENV *dbenv;
-	DB_MPOOL *dbmp;
 	MPOOLFILE *mfp;
 
 	if ((mfp = dbmfp->mfp) == NULL) {
@@ -373,15 +329,14 @@ __memp_get_maxsize(dbmfp, gbytesp, bytesp)
 		*bytesp = dbmfp->bytes;
 	} else {
 		dbenv = dbmfp->dbenv;
-		dbmp = dbenv->mp_handle;
 
-		R_LOCK(dbenv, dbmp->reginfo);
+		MPOOL_SYSTEM_LOCK(dbenv);
 		*gbytesp = (u_int32_t)
 		    (mfp->maxpgno / (GIGABYTE / mfp->stat.st_pagesize));
 		*bytesp = (u_int32_t)
 		    ((mfp->maxpgno % (GIGABYTE / mfp->stat.st_pagesize)) *
 		    mfp->stat.st_pagesize);
-		R_UNLOCK(dbenv, dbmp->reginfo);
+		MPOOL_SYSTEM_UNLOCK(dbenv);
 	}
 
 	return (0);
@@ -397,7 +352,6 @@ __memp_set_maxsize(dbmfp, gbytes, bytes)
 	u_int32_t gbytes, bytes;
 {
 	DB_ENV *dbenv;
-	DB_MPOOL *dbmp;
 	MPOOLFILE *mfp;
 
 	if ((mfp = dbmfp->mfp) == NULL) {
@@ -405,15 +359,14 @@ __memp_set_maxsize(dbmfp, gbytes, bytes)
 		dbmfp->bytes = bytes;
 	} else {
 		dbenv = dbmfp->dbenv;
-		dbmp = dbenv->mp_handle;
 
-		R_LOCK(dbenv, dbmp->reginfo);
+		MPOOL_SYSTEM_LOCK(dbenv);
 		mfp->maxpgno = (db_pgno_t)
 		    (gbytes * (GIGABYTE / mfp->stat.st_pagesize));
 		mfp->maxpgno += (db_pgno_t)
 		    ((bytes + mfp->stat.st_pagesize - 1) /
 		    mfp->stat.st_pagesize);
-		R_UNLOCK(dbenv, dbmp->reginfo);
+		MPOOL_SYSTEM_UNLOCK(dbenv);
 	}
 
 	return (0);
@@ -537,7 +490,7 @@ __memp_set_priority(dbmfp, priority)
 
 	/* Update the underlying file if we've already opened it. */
 	if (dbmfp->mfp != NULL)
-		dbmfp->mfp->priority = priority;
+		dbmfp->mfp->priority = dbmfp->priority;
 
 	return (0);
 }
@@ -549,22 +502,22 @@ __memp_set_priority(dbmfp, priority)
  * !!!
  * Undocumented interface: DB private.
  *
- * PUBLIC: void __memp_last_pgno __P((DB_MPOOLFILE *, db_pgno_t *));
+ * PUBLIC: int __memp_last_pgno __P((DB_MPOOLFILE *, db_pgno_t *));
  */
-void
+int
 __memp_last_pgno(dbmfp, pgnoaddr)
 	DB_MPOOLFILE *dbmfp;
 	db_pgno_t *pgnoaddr;
 {
 	DB_ENV *dbenv;
-	DB_MPOOL *dbmp;
 
 	dbenv = dbmfp->dbenv;
-	dbmp = dbenv->mp_handle;
 
-	R_LOCK(dbenv, dbmp->reginfo);
+	MPOOL_SYSTEM_LOCK(dbenv);
 	*pgnoaddr = dbmfp->mfp->last_pgno;
-	R_UNLOCK(dbenv, dbmp->reginfo);
+	MPOOL_SYSTEM_UNLOCK(dbenv);
+
+	return (0);
 }
 
 /*
diff --git a/storage/bdb/mp/mp_fopen.c b/storage/bdb/mp/mp_fopen.c
index 7e302f89804..04103371ab9 100644
--- a/storage/bdb/mp/mp_fopen.c
+++ b/storage/bdb/mp/mp_fopen.c
@@ -1,10 +1,10 @@
 /*-
  * See the file LICENSE for redistribution information.
  *
- * Copyright (c) 1996-2004
+ * Copyright (c) 1996-2005
  *	Sleepycat Software.  All rights reserved.
  *
- * $Id: mp_fopen.c,v 11.143 2004/10/15 16:59:43 bostic Exp $
+ * $Id: mp_fopen.c,v 12.16 2005/10/31 02:22:31 bostic Exp $
  */
 
 #include "db_config.h"
@@ -36,7 +36,8 @@ __memp_fopen_pp(dbmfp, path, flags, mode, pagesize)
 	size_t pagesize;
 {
 	DB_ENV *dbenv;
-	int rep_check, ret;
+	DB_THREAD_INFO *ip;
+	int ret;
 
 	dbenv = dbmfp->dbenv;
 
@@ -70,12 +71,10 @@ __memp_fopen_pp(dbmfp, path, flags, mode, pagesize)
 		return (EINVAL);
 	}
 
-	rep_check = IS_ENV_REPLICATED(dbenv) ? 1 : 0;
-	if (rep_check)
-		__env_rep_enter(dbenv);
-	ret = __memp_fopen(dbmfp, NULL, path, flags, mode, pagesize);
-	if (rep_check)
-		__env_db_rep_exit(dbenv);
+	ENV_ENTER(dbenv, ip);
+	REPLICATION_WRAP(dbenv,
+	    (__memp_fopen(dbmfp, NULL, path, flags, mode, pagesize)), ret);
+	ENV_LEAVE(dbenv, ip);
 	return (ret);
 }
 
@@ -102,14 +101,14 @@ __memp_fopen(dbmfp, mfp, path, flags, mode, pgsize)
 	db_pgno_t last_pgno;
 	size_t maxmap;
 	u_int32_t mbytes, bytes, oflags, pagesize;
-	int refinc, ret;
+	int created_fileid, refinc, ret;
 	char *rpath;
 	void *p;
 
 	dbenv = dbmfp->dbenv;
 	dbmp = dbenv->mp_handle;
 	mp = dbmp->reginfo[0].primary;
-	refinc = ret = 0;
+	created_fileid = refinc = ret = 0;
 	rpath = NULL;
 
 	/*
@@ -127,9 +126,20 @@ __memp_fopen(dbmfp, mfp, path, flags, mode, pgsize)
 	 */
 	DB_ASSERT(mfp == NULL || path == NULL);
 
-	if (mfp == NULL && path == NULL)
+	/* If this handle is already open, return. */
+	if (F_ISSET(dbmfp, MP_OPEN_CALLED))
+		return (0);
+
+	if (path == NULL && mfp == NULL)
 		goto alloc;
 
+	/*
+	 * If there's no backing file, we can join existing files in the cache,
+	 * but there's nothing to read from disk.
+	 */
+	if (FLD_ISSET(dbmfp->config_flags, DB_MPOOL_NOFILE))
+		goto check_mpoolfile;
+
 	/*
 	 * Our caller may be able to tell us which underlying MPOOLFILE we
 	 * need a handle for.
@@ -140,12 +150,12 @@ __memp_fopen(dbmfp, mfp, path, flags, mode, pgsize)
 		 * failed creating the file DB_AM_DISCARD).  Increment the ref
 		 * count so the file cannot become dead and be unlinked.
 		 */
-		MUTEX_LOCK(dbenv, &mfp->mutex);
+		MUTEX_LOCK(dbenv, mfp->mutex);
 		if (!mfp->deadfile) {
 			++mfp->mpf_cnt;
 			refinc = 1;
 		}
-		MUTEX_UNLOCK(dbenv, &mfp->mutex);
+		MUTEX_UNLOCK(dbenv, mfp->mutex);
 
 		/*
 		 * Test one last time to see if the file is dead -- it may have
@@ -168,6 +178,15 @@ __memp_fopen(dbmfp, mfp, path, flags, mode, pgsize)
 		oflags |= DB_OSO_RDONLY;
 	}
 
+	/*
+	 * XXX
+	 * A grievous layering violation, the DB_DSYNC_DB flag was left in
+	 * the DB_ENV structure and not driven through the cache API.  This
+	 * needs to be fixed when the general API configuration is fixed.
+	 */
+	if (F_ISSET(dbenv, DB_ENV_DSYNC_DB))
+		oflags |= DB_OSO_DSYNC;
+
 	/*
 	 * Get the real name for this file and open it.
 	 *
@@ -179,7 +198,7 @@ __memp_fopen(dbmfp, mfp, path, flags, mode, pgsize)
 	 * the region, __memp_nameop may be simultaneously renaming the file.
 	 */
 	if (mfp != NULL) {
-		R_LOCK(dbenv, dbmp->reginfo);
+		MPOOL_SYSTEM_LOCK(dbenv);
 		path = R_ADDR(dbmp->reginfo, mfp->path_off);
 	}
 	if ((ret =
@@ -187,7 +206,8 @@ __memp_fopen(dbmfp, mfp, path, flags, mode, pgsize)
 		ret = __os_open_extend(dbenv,
 		    rpath, (u_int32_t)pagesize, oflags, mode, &dbmfp->fhp);
 	if (mfp != NULL)
-		R_UNLOCK(dbenv, dbmp->reginfo);
+		MPOOL_SYSTEM_UNLOCK(dbenv);
+
 	if (ret != 0) {
 		/* If it's a Queue extent file, it may not exist, that's OK. */
 		if (!LF_ISSET(DB_EXTENT))
@@ -200,9 +220,8 @@ __memp_fopen(dbmfp, mfp, path, flags, mode, pgsize)
 	 * underlying file handle across seek and read/write calls.
 	 */
 	dbmfp->fhp->ref = 1;
-	if (F_ISSET(dbenv, DB_ENV_THREAD) &&
-	    (ret = __db_mutex_setup(dbenv, dbmp->reginfo,
-	    &dbmfp->fhp->mutexp, MUTEX_ALLOC | MUTEX_THREAD)) != 0)
+	if ((ret = __mutex_alloc(
+	    dbenv, MTX_MPOOL_FH, DB_MUTEX_THREAD, &dbmfp->fhp->mtx_fh)) != 0)
 		goto err;
 
 	/*
@@ -227,19 +246,18 @@ __memp_fopen(dbmfp, mfp, path, flags, mode, pgsize)
 	 * don't use timestamps, otherwise there'd be no chance of any
 	 * other process joining the party.
 	 */
-	if (!F_ISSET(dbmfp, MP_FILEID_SET) &&
-	    (ret = __os_fileid(dbenv, rpath, 0, dbmfp->fileid)) != 0)
-		goto err;
+	if (!F_ISSET(dbmfp, MP_FILEID_SET)) {
+		if  ((ret = __os_fileid(dbenv, rpath, 0, dbmfp->fileid)) != 0)
+			goto err;
+		created_fileid = 1;
+	}
 
 	if (mfp != NULL)
 		goto have_mfp;
 
+check_mpoolfile:
 	/*
-	 * If not creating a temporary file, walk the list of MPOOLFILE's,
-	 * looking for a matching file.  Files backed by temporary files
-	 * or previously removed files can't match.
-	 *
-	 * DB_TRUNCATE support.
+	 * Walk the list of MPOOLFILE's, looking for a matching file.
 	 *
 	 * The fileID is a filesystem unique number (e.g., a UNIX dev/inode
 	 * pair) plus a timestamp.  If files are removed and created in less
@@ -255,17 +273,36 @@ __memp_fopen(dbmfp, mfp, path, flags, mode, pgsize)
 	 * a matching entry, we ensure that it's never found again, and we
 	 * create a new entry for the current request.
 	 */
-	R_LOCK(dbenv, dbmp->reginfo);
+	MPOOL_SYSTEM_LOCK(dbenv);
 	for (mfp = SH_TAILQ_FIRST(&mp->mpfq, __mpoolfile);
 	    mfp != NULL; mfp = SH_TAILQ_NEXT(mfp, q, __mpoolfile)) {
 		/* Skip dead files and temporary files. */
 		if (mfp->deadfile || F_ISSET(mfp, MP_TEMP))
 			continue;
 
-		/* Skip non-matching files. */
-		if (memcmp(dbmfp->fileid, R_ADDR(dbmp->reginfo,
-		    mfp->fileid_off), DB_FILE_ID_LEN) != 0)
-			continue;
+		/*
+		 * Any remaining DB_MPOOL_NOFILE databases are in-memory
+		 * named databases and need only match other in-memory
+		 * databases with the same name.
+		 */
+		if (FLD_ISSET(dbmfp->config_flags, DB_MPOOL_NOFILE)) {
+			if (!mfp->no_backing_file)
+				continue;
+
+			if (strcmp(path, R_ADDR(dbmp->reginfo, mfp->path_off)))
+				continue;
+
+			/*
+			 * We matched an in-memory file; grab the fileid if
+			 * it is set in the region, but not in the dbmfp.
+			 */
+			if (!F_ISSET(dbmfp, MP_FILEID_SET))
+				__memp_set_fileid(dbmfp,
+				    R_ADDR(dbmp->reginfo, mfp->fileid_off));
+		} else
+			if (memcmp(dbmfp->fileid, R_ADDR(dbmp->reginfo,
+			    mfp->fileid_off), DB_FILE_ID_LEN) != 0)
+				continue;
 
 		/*
 		 * If the file is being truncated, remove it from the system
@@ -276,15 +313,17 @@ __memp_fopen(dbmfp, mfp, path, flags, mode, pgsize)
 		 * loop, but I like the idea of checking all the entries.
 		 */
 		if (LF_ISSET(DB_TRUNCATE)) {
-			MUTEX_LOCK(dbenv, &mfp->mutex);
+			MUTEX_LOCK(dbenv, mfp->mutex);
 			mfp->deadfile = 1;
-			MUTEX_UNLOCK(dbenv, &mfp->mutex);
+			MUTEX_UNLOCK(dbenv, mfp->mutex);
 			continue;
 		}
 
 		/*
 		 * Some things about a file cannot be changed: the clear length,
-		 * page size, or lSN location.
+		 * page size, or LSN location.  However, if this is an attempt
+		 * to open a named in-memory file, we may not yet have that
+		 * information. so accept uninitialized entries.
 		 *
 		 * The file type can change if the application's pre- and post-
 		 * processing needs change.  For example, an application that
@@ -295,13 +334,17 @@ __memp_fopen(dbmfp, mfp, path, flags, mode, pgsize)
 		 * We do not check to see if the pgcookie information changed,
 		 * or update it if it is.
 		 */
-		if (dbmfp->clear_len != mfp->clear_len ||
-		    pagesize != mfp->stat.st_pagesize ||
-		    dbmfp->lsn_offset != mfp->lsn_off) {
+		if ((dbmfp->clear_len != DB_CLEARLEN_NOTSET &&
+		    mfp->clear_len != DB_CLEARLEN_NOTSET &&
+		    dbmfp->clear_len != mfp->clear_len) ||
+		    (pagesize != 0 && pagesize != mfp->stat.st_pagesize) ||
+		    (dbmfp->lsn_offset != -1 &&
+		    mfp->lsn_off != DB_LSN_OFF_NOTSET &&
+		    dbmfp->lsn_offset != mfp->lsn_off)) {
 			__db_err(dbenv,
 		    "%s: clear length, page size or LSN location changed",
 			    path);
-			R_UNLOCK(dbenv, dbmp->reginfo);
+			MPOOL_SYSTEM_UNLOCK(dbenv);
 			ret = EINVAL;
 			goto err;
 		}
@@ -318,26 +361,48 @@ __memp_fopen(dbmfp, mfp, path, flags, mode, pgsize)
 		 * deadfile because the reference count is 0 blocks us finding
 		 * the file without knowing it's about to be marked dead.
 		 */
-		MUTEX_LOCK(dbenv, &mfp->mutex);
+		MUTEX_LOCK(dbenv, mfp->mutex);
 		if (mfp->deadfile) {
-			MUTEX_UNLOCK(dbenv, &mfp->mutex);
+			MUTEX_UNLOCK(dbenv, mfp->mutex);
 			continue;
 		}
 		++mfp->mpf_cnt;
 		refinc = 1;
-		MUTEX_UNLOCK(dbenv, &mfp->mutex);
+		MUTEX_UNLOCK(dbenv, mfp->mutex);
 
+		/* Initialize any fields that are not yet set. */
 		if (dbmfp->ftype != 0)
 			mfp->ftype = dbmfp->ftype;
+		if (dbmfp->clear_len != DB_CLEARLEN_NOTSET)
+			mfp->clear_len = dbmfp->clear_len;
+		if (dbmfp->lsn_offset != -1)
+			mfp->lsn_off = dbmfp->lsn_offset;
 
 		break;
 	}
-	R_UNLOCK(dbenv, dbmp->reginfo);
+	MPOOL_SYSTEM_UNLOCK(dbenv);
 
 	if (mfp != NULL)
 		goto have_mfp;
 
-alloc:	/* Allocate and initialize a new MPOOLFILE. */
+alloc:	/*
+	 * If we get here and we created a FILEID, then it's OK to set
+	 * the dbmfp as having its FILEID_SET, because we aren't trying
+	 * to match an existing file in the mpool.
+	 */
+	if (created_fileid)
+		F_SET(dbmfp, MP_FILEID_SET);
+	/*
+	 * If we didn't find the file and this is an in-memory file, then
+	 *  the create flag should be set.
+	 */
+	if (FLD_ISSET(dbmfp->config_flags, DB_MPOOL_NOFILE) &&
+	    !LF_ISSET(DB_CREATE)) {
+		ret = ENOENT;
+		goto err;
+	}
+
+	/* Allocate and initialize a new MPOOLFILE. */
 	if ((ret = __memp_alloc(
 	    dbmp, dbmp->reginfo, NULL, sizeof(MPOOLFILE), NULL, &mfp)) != 0)
 		goto err;
@@ -360,19 +425,28 @@ alloc:	/* Allocate and initialize a new MPOOLFILE. */
 	if (FLD_ISSET(dbmfp->config_flags, DB_MPOOL_UNLINK))
 		mfp->unlink_on_close = 1;
 
-	if (LF_ISSET(DB_TXN_NOT_DURABLE))
-		F_SET(mfp, MP_NOT_DURABLE);
 	if (LF_ISSET(DB_DURABLE_UNKNOWN | DB_RDONLY))
 		F_SET(mfp, MP_DURABLE_UNKNOWN);
 	if (LF_ISSET(DB_DIRECT))
 		F_SET(mfp, MP_DIRECT);
 	if (LF_ISSET(DB_EXTENT))
 		F_SET(mfp, MP_EXTENT);
+	if (LF_ISSET(DB_TXN_NOT_DURABLE))
+		F_SET(mfp, MP_NOT_DURABLE);
 	F_SET(mfp, MP_CAN_MMAP);
 
+	/*
+	 * An in-memory database with no name is a temp file.  Named
+	 * in-memory databases get an artificially  bumped reference
+	 * count so they don't disappear on close; they need a remove
+	 * to make them disappear.
+	 */
 	if (path == NULL)
 		F_SET(mfp, MP_TEMP);
-	else {
+	else if (FLD_ISSET(dbmfp->config_flags, DB_MPOOL_NOFILE))
+		mfp->mpf_cnt++;
+
+	if (path != NULL && !FLD_ISSET(dbmfp->config_flags, DB_MPOOL_NOFILE)) {
 		/*
 		 * Don't permit files that aren't a multiple of the pagesize,
 		 * and find the number of the last page in the file, all the
@@ -407,19 +481,33 @@ alloc:	/* Allocate and initialize a new MPOOLFILE. */
 			--last_pgno;
 		mfp->orig_last_pgno = mfp->last_pgno = last_pgno;
 
-		/* Copy the file path into shared memory. */
-		if ((ret = __memp_alloc(dbmp, dbmp->reginfo,
-		    NULL, strlen(path) + 1, &mfp->path_off, &p)) != 0)
+		/*
+		 * Get the file ID if we weren't given one.  Generated file ID's
+		 * don't use timestamps, otherwise there'd be no chance of any
+		 * other process joining the party.
+		 */
+		if (!F_ISSET(dbmfp, MP_FILEID_SET) &&
+		    (ret = __os_fileid(dbenv, rpath, 0, dbmfp->fileid)) != 0)
 			goto err;
-		memcpy(p, path, strlen(path) + 1);
 
-		/* Copy the file identification string into shared memory. */
+	}
+
+	/* Copy the file identification string into shared memory. */
+	if (F_ISSET(dbmfp, MP_FILEID_SET)) {
 		if ((ret = __memp_alloc(dbmp, dbmp->reginfo,
 		    NULL, DB_FILE_ID_LEN, &mfp->fileid_off, &p)) != 0)
 			goto err;
 		memcpy(p, dbmfp->fileid, DB_FILE_ID_LEN);
 	}
 
+	/* Copy the file path into shared memory. */
+	if (path != NULL) {
+		if ((ret = __memp_alloc(dbmp, dbmp->reginfo,
+		    NULL, strlen(path) + 1, &mfp->path_off, &p)) != 0)
+			goto err;
+		memcpy(p, path, strlen(path) + 1);
+	}
+
 	/* Copy the page cookie into shared memory. */
 	if (dbmfp->pgcookie == NULL || dbmfp->pgcookie->size == 0) {
 		mfp->pgcookie_len = 0;
@@ -432,17 +520,16 @@ alloc:	/* Allocate and initialize a new MPOOLFILE. */
 		mfp->pgcookie_len = dbmfp->pgcookie->size;
 	}
 
+	if ((ret =
+	    __mutex_alloc(dbenv, MTX_MPOOLFILE_HANDLE, 0, &mfp->mutex)) != 0)
+		goto err;
+
 	/*
 	 * Prepend the MPOOLFILE to the list of MPOOLFILE's.
 	 */
-	R_LOCK(dbenv, dbmp->reginfo);
-	ret = __db_mutex_setup(dbenv, dbmp->reginfo, &mfp->mutex,
-	    MUTEX_NO_RLOCK);
-	if (ret == 0)
-		SH_TAILQ_INSERT_HEAD(&mp->mpfq, mfp, q, __mpoolfile);
-	R_UNLOCK(dbenv, dbmp->reginfo);
-	if (ret != 0)
-		goto err;
+	MPOOL_SYSTEM_LOCK(dbenv);
+	SH_TAILQ_INSERT_HEAD(&mp->mpfq, mfp, q, __mpoolfile);
+	MPOOL_SYSTEM_UNLOCK(dbenv);
 
 have_mfp:
 	/*
@@ -492,7 +579,10 @@ have_mfp:
 	 */
 #define	DB_MAXMMAPSIZE	(10 * 1024 * 1024)	/* 10 MB. */
 	if (F_ISSET(mfp, MP_CAN_MMAP)) {
-		if (path == NULL)
+		maxmap = dbenv->mp_mmapsize == 0 ?
+		    DB_MAXMMAPSIZE : dbenv->mp_mmapsize;
+		if (path == NULL ||
+		    FLD_ISSET(dbmfp->config_flags, DB_MPOOL_NOFILE))
 			F_CLR(mfp, MP_CAN_MMAP);
 		else if (!F_ISSET(dbmfp, MP_READONLY))
 			F_CLR(mfp, MP_CAN_MMAP);
@@ -501,10 +591,10 @@ have_mfp:
 		else if (LF_ISSET(DB_NOMMAP) || F_ISSET(dbenv, DB_ENV_NOMMAP))
 			F_CLR(mfp, MP_CAN_MMAP);
 		else {
-			R_LOCK(dbenv, dbmp->reginfo);
+			MPOOL_SYSTEM_LOCK(dbenv);
 			maxmap = mp->mp_mmapsize == 0 ?
 			    DB_MAXMMAPSIZE : mp->mp_mmapsize;
-			R_UNLOCK(dbenv, dbmp->reginfo);
+			MPOOL_SYSTEM_UNLOCK(dbenv);
 			if (mbytes > maxmap / MEGABYTE ||
 			    (mbytes == maxmap / MEGABYTE &&
 			    bytes >= maxmap % MEGABYTE))
@@ -529,26 +619,24 @@ have_mfp:
 	 *
 	 * Add the file to the process' list of DB_MPOOLFILEs.
 	 */
-	MUTEX_THREAD_LOCK(dbenv, dbmp->mutexp);
+	MUTEX_LOCK(dbenv, dbmp->mutex);
 
-	for (tmp_dbmfp = TAILQ_FIRST(&dbmp->dbmfq);
-	    tmp_dbmfp != NULL; tmp_dbmfp = TAILQ_NEXT(tmp_dbmfp, q))
-		if (dbmfp->mfp == tmp_dbmfp->mfp &&
-		    (F_ISSET(dbmfp, MP_READONLY) ||
-		    !F_ISSET(tmp_dbmfp, MP_READONLY))) {
-			if (dbmfp->fhp->mutexp != NULL)
-				__db_mutex_free(
-				    dbenv, dbmp->reginfo, dbmfp->fhp->mutexp);
-			(void)__os_closehandle(dbenv, dbmfp->fhp);
-
-			++tmp_dbmfp->fhp->ref;
-			dbmfp->fhp = tmp_dbmfp->fhp;
-			break;
-		}
+	if (dbmfp->fhp != NULL)
+		for (tmp_dbmfp = TAILQ_FIRST(&dbmp->dbmfq);
+		    tmp_dbmfp != NULL; tmp_dbmfp = TAILQ_NEXT(tmp_dbmfp, q))
+			if (dbmfp->mfp == tmp_dbmfp->mfp &&
+			    (F_ISSET(dbmfp, MP_READONLY) ||
+			    !F_ISSET(tmp_dbmfp, MP_READONLY))) {
+				__mutex_free(dbenv, &dbmfp->fhp->mtx_fh);
+				(void)__os_closehandle(dbenv, dbmfp->fhp);
+				++tmp_dbmfp->fhp->ref;
+				dbmfp->fhp = tmp_dbmfp->fhp;
+				break;
+			}
 
 	TAILQ_INSERT_TAIL(&dbmp->dbmfq, dbmfp, q);
 
-	MUTEX_THREAD_UNLOCK(dbenv, dbmp->mutexp);
+	MUTEX_UNLOCK(dbenv, dbmp->mutex);
 
 	if (0) {
 err:		if (refinc) {
@@ -558,9 +646,9 @@ err:		if (refinc) {
 			 * error trying to open the file, so we probably cannot
 			 * unlink it anyway.
 			 */
-			MUTEX_LOCK(dbenv, &mfp->mutex);
+			MUTEX_LOCK(dbenv, mfp->mutex);
 			--mfp->mpf_cnt;
-			MUTEX_UNLOCK(dbenv, &mfp->mutex);
+			MUTEX_UNLOCK(dbenv, mfp->mutex);
 		}
 
 	}
@@ -581,7 +669,8 @@ __memp_fclose_pp(dbmfp, flags)
 	u_int32_t flags;
 {
 	DB_ENV *dbenv;
-	int rep_check, ret, t_ret;
+	DB_THREAD_INFO *ip;
+	int ret;
 
 	dbenv = dbmfp->dbenv;
 
@@ -591,15 +680,11 @@ __memp_fclose_pp(dbmfp, flags)
 	 * !!!
 	 * DB_MPOOL_DISCARD: Undocumented flag: DB private.
 	 */
-	ret = __db_fchk(dbenv, "DB_MPOOLFILE->close", flags, DB_MPOOL_DISCARD);
+	(void)__db_fchk(dbenv, "DB_MPOOLFILE->close", flags, DB_MPOOL_DISCARD);
 
-	rep_check = IS_ENV_REPLICATED(dbenv) ? 1 : 0;
-	if (rep_check)
-		__env_rep_enter(dbenv);
-	if ((t_ret = __memp_fclose(dbmfp, flags)) != 0 && ret == 0)
-		ret = t_ret;
-	if (rep_check)
-		__env_db_rep_exit(dbenv);
+	ENV_ENTER(dbenv, ip);
+	REPLICATION_WRAP(dbenv, (__memp_fclose(dbmfp, flags)), ret);
+	ENV_LEAVE(dbenv, ip);
 	return (ret);
 }
 
@@ -637,7 +722,7 @@ __memp_fclose(dbmfp, flags)
 	if (dbmp == NULL)
 		goto done;
 
-	MUTEX_THREAD_LOCK(dbenv, dbmp->mutexp);
+	MUTEX_LOCK(dbenv, dbmp->mutex);
 
 	DB_ASSERT(dbmfp->ref >= 1);
 	if ((ref = --dbmfp->ref) == 0 && F_ISSET(dbmfp, MP_OPEN_CALLED))
@@ -649,7 +734,7 @@ __memp_fclose(dbmfp, flags)
 	 */
 	if (ref == 0 && dbmfp->fhp != NULL && --dbmfp->fhp->ref > 0)
 		dbmfp->fhp = NULL;
-	MUTEX_THREAD_UNLOCK(dbenv, dbmp->mutexp);
+	MUTEX_UNLOCK(dbenv, dbmp->mutex);
 	if (ref != 0)
 		return (0);
 
@@ -670,11 +755,9 @@ __memp_fclose(dbmfp, flags)
 	 * files may not yet have been created.
 	 */
 	if (dbmfp->fhp != NULL) {
-		if (dbmfp->fhp->mutexp != NULL) {
-			__db_mutex_free(
-			    dbenv, dbmp->reginfo, dbmfp->fhp->mutexp);
-			dbmfp->fhp->mutexp = NULL;
-		}
+		if ((t_ret =
+		    __mutex_free(dbenv, &dbmfp->fhp->mtx_fh)) != 0 && ret == 0)
+			ret = t_ret;
 		if ((t_ret = __os_closehandle(dbenv, dbmfp->fhp)) != 0) {
 			__db_err(dbenv, "%s: %s",
 			    __memp_fn(dbmfp), db_strerror(t_ret));
@@ -704,11 +787,12 @@ __memp_fclose(dbmfp, flags)
 	 * when we try to flush them.
 	 */
 	deleted = 0;
-	MUTEX_LOCK(dbenv, &mfp->mutex);
+	MUTEX_LOCK(dbenv, mfp->mutex);
 	if (--mfp->mpf_cnt == 0 || LF_ISSET(DB_MPOOL_DISCARD)) {
 		if (LF_ISSET(DB_MPOOL_DISCARD) ||
-		    F_ISSET(mfp, MP_TEMP) || mfp->unlink_on_close)
+		    F_ISSET(mfp, MP_TEMP) || mfp->unlink_on_close) {
 			mfp->deadfile = 1;
+		}
 		if (mfp->unlink_on_close) {
 			if ((t_ret = __db_appname(dbmp->dbenv,
 			    DB_APP_DATA, R_ADDR(dbmp->reginfo,
@@ -729,7 +813,7 @@ __memp_fclose(dbmfp, flags)
 		}
 	}
 	if (deleted == 0)
-		MUTEX_UNLOCK(dbenv, &mfp->mutex);
+		MUTEX_UNLOCK(dbenv, mfp->mutex);
 
 done:	/* Discard the DB_MPOOLFILE structure. */
 	if (dbmfp->pgcookie != NULL) {
@@ -755,10 +839,11 @@ __memp_mf_discard(dbmp, mfp)
 	DB_ENV *dbenv;
 	DB_MPOOL_STAT *sp;
 	MPOOL *mp;
-	int need_sync, ret;
+	int need_sync, ret, t_ret;
 
 	dbenv = dbmp->dbenv;
 	mp = dbmp->reginfo[0].primary;
+	ret = 0;
 
 	/*
 	 * Expects caller to be holding the MPOOLFILE mutex.
@@ -779,14 +864,18 @@ __memp_mf_discard(dbmp, mfp)
 	 */
 	mfp->deadfile = 1;
 
-	/* Discard the mutex we're holding. */
-	MUTEX_UNLOCK(dbenv, &mfp->mutex);
+	/* Discard the mutex we're holding and return it too the pool. */
+	MUTEX_UNLOCK(dbenv, mfp->mutex);
+	if ((t_ret = __mutex_free(dbenv, &mfp->mutex)) != 0 && ret == 0)
+		ret = t_ret;
 
 	/* Lock the region and delete from the list of MPOOLFILEs. */
-	R_LOCK(dbenv, dbmp->reginfo);
+	MPOOL_SYSTEM_LOCK(dbenv);
 	SH_TAILQ_REMOVE(&mp->mpfq, mfp, q, __mpoolfile);
 
-	ret = need_sync ? __memp_mf_sync(dbmp, mfp) : 0;
+	if (need_sync &&
+	    (t_ret = __memp_mf_sync(dbmp, mfp, 1)) != 0 && ret == 0)
+		ret = t_ret;
 
 	/* Copy the statistics into the region. */
 	sp = &mp->stat;
@@ -797,10 +886,6 @@ __memp_mf_discard(dbmp, mfp)
 	sp->st_page_in += mfp->stat.st_page_in;
 	sp->st_page_out += mfp->stat.st_page_out;
 
-	/* Clear the mutex this MPOOLFILE recorded. */
-	__db_shlocks_clear(&mfp->mutex, dbmp->reginfo,
-	    R_ADDR(dbmp->reginfo, mp->maint_off));
-
 	/* Free the space. */
 	if (mfp->path_off != 0)
 		__db_shalloc_free(&dbmp->reginfo[0],
@@ -813,7 +898,73 @@ __memp_mf_discard(dbmp, mfp)
 		    R_ADDR(dbmp->reginfo, mfp->pgcookie_off));
 	__db_shalloc_free(&dbmp->reginfo[0], mfp);
 
-	R_UNLOCK(dbenv, dbmp->reginfo);
+	MPOOL_SYSTEM_UNLOCK(dbenv);
 
 	return (ret);
 }
+
+/*
+ * __memp_inmemlist --
+ *	Return a list of the named in-memory databases.
+ *
+ * PUBLIC: int __memp_inmemlist __P((DB_ENV *, char ***, int *));
+ */
+int
+__memp_inmemlist(dbenv, namesp, cntp)
+	DB_ENV *dbenv;
+	char ***namesp;
+	int *cntp;
+{
+	DB_MPOOL *dbmp;
+	MPOOL *mp;
+	MPOOLFILE *mfp;
+
+	int arraysz, cnt, ret;
+	char **names;
+
+	names = NULL;
+	dbmp = dbenv->mp_handle;
+	mp = dbmp->reginfo[0].primary;
+
+	arraysz = cnt = 0;
+	MPOOL_SYSTEM_LOCK(dbenv);
+	for (mfp = SH_TAILQ_FIRST(&mp->mpfq, __mpoolfile);
+	    mfp != NULL; mfp = SH_TAILQ_NEXT(mfp, q, __mpoolfile)) {
+		/* Skip dead files and temporary files. */
+		if (mfp->deadfile || F_ISSET(mfp, MP_TEMP))
+			continue;
+
+		/* Skip entries that allow files. */
+		if (!mfp->no_backing_file)
+			continue;
+
+		/* We found one. */
+		if (cnt >= arraysz) {
+			arraysz += 100;
+			if ((ret = __os_realloc(dbenv,
+			    (u_int)arraysz * sizeof(names[0]), &names)) != 0)
+				goto nomem;
+		}
+		if ((ret = __os_strdup(dbenv,
+		    R_ADDR(dbmp->reginfo, mfp->path_off), &names[cnt])) != 0)
+			goto nomem;
+
+		cnt++;
+	}
+	MPOOL_SYSTEM_UNLOCK(dbenv);
+	*namesp = names;
+	*cntp = cnt;
+	return (0);
+
+nomem:	MPOOL_SYSTEM_UNLOCK(dbenv);
+	if (names != NULL) {
+		while (--cnt >= 0)
+			__os_free(dbenv, names[cnt]);
+		__os_free(dbenv, names);
+	}
+
+	/* Make sure we don't return any garbage. */
+	*cntp = 0;
+	*namesp = NULL;
+	return (ret);
+}
diff --git a/storage/bdb/mp/mp_fput.c b/storage/bdb/mp/mp_fput.c
index a21eb6733f4..6c3f9145a97 100644
--- a/storage/bdb/mp/mp_fput.c
+++ b/storage/bdb/mp/mp_fput.c
@@ -1,10 +1,10 @@
 /*-
  * See the file LICENSE for redistribution information.
  *
- * Copyright (c) 1996-2004
+ * Copyright (c) 1996-2005
  *	Sleepycat Software.  All rights reserved.
  *
- * $Id: mp_fput.c,v 11.59 2004/10/15 16:59:43 bostic Exp $
+ * $Id: mp_fput.c,v 12.7 2005/10/07 20:21:33 ubell Exp $
  */
 
 #include "db_config.h"
@@ -19,7 +19,7 @@
 #include "dbinc/log.h"
 #include "dbinc/mp.h"
 
-static void __memp_reset_lru __P((DB_ENV *, REGINFO *));
+static int __memp_reset_lru __P((DB_ENV *, REGINFO *));
 
 /*
  * __memp_fput_pp --
@@ -34,14 +34,20 @@ __memp_fput_pp(dbmfp, pgaddr, flags)
 	u_int32_t flags;
 {
 	DB_ENV *dbenv;
-	int ret;
+	DB_THREAD_INFO *ip;
+	int ret, t_ret;
 
 	dbenv = dbmfp->dbenv;
 	PANIC_CHECK(dbenv);
 
+	ENV_ENTER(dbenv, ip);
+
 	ret = __memp_fput(dbmfp, pgaddr, flags);
-	if (IS_ENV_REPLICATED(dbenv))
-		__op_rep_exit(dbenv);
+	if (IS_ENV_REPLICATED(dbenv) &&
+	    (t_ret = __op_rep_exit(dbenv)) != 0 && ret == 0)
+		ret = t_ret;
+
+	ENV_LEAVE(dbenv, ip);
 	return (ret);
 }
 
@@ -64,26 +70,37 @@ __memp_fput(dbmfp, pgaddr, flags)
 	MPOOL *c_mp;
 	MPOOLFILE *mfp;
 	u_int32_t n_cache;
-	int adjust, ret;
+	int adjust, ret, t_ret;
 
 	dbenv = dbmfp->dbenv;
 	MPF_ILLEGAL_BEFORE_OPEN(dbmfp, "DB_MPOOLFILE->put");
-
 	dbmp = dbenv->mp_handle;
-	/* Validate arguments. */
+	ret = 0;
+
+	/*
+	 * Check arguments, but don't fail because we want to unpin the page
+	 * regardless.  The problem is when running with replication.  There
+	 * is a reference count we incremented when __memp_fget was called,
+	 * and we need to unpin the page and decrement that reference count.
+	 * If we see flag problems, mark the page dirty.
+	 */
 	if (flags) {
-		if ((ret = __db_fchk(dbenv, "memp_fput", flags,
-		    DB_MPOOL_CLEAN | DB_MPOOL_DIRTY | DB_MPOOL_DISCARD)) != 0)
-			return (ret);
-		if ((ret = __db_fcchk(dbenv, "memp_fput",
-		    flags, DB_MPOOL_CLEAN, DB_MPOOL_DIRTY)) != 0)
-			return (ret);
+		if (__db_fchk(dbenv, "memp_fput", flags,
+		    DB_MPOOL_CLEAN | DB_MPOOL_DIRTY | DB_MPOOL_DISCARD) != 0 ||
+		    __db_fcchk(dbenv, "memp_fput", flags,
+		    DB_MPOOL_CLEAN, DB_MPOOL_DIRTY) != 0) {
+			flags = DB_MPOOL_DIRTY;
+			ret = EINVAL;
+			DB_ASSERT(0);
+		}
 
 		if (LF_ISSET(DB_MPOOL_DIRTY) && F_ISSET(dbmfp, MP_READONLY)) {
 			__db_err(dbenv,
 			    "%s: dirty flag set for readonly file page",
 			    __memp_fn(dbmfp));
-			return (EACCES);
+			flags = 0;
+			ret = EINVAL;
+			DB_ASSERT(0);
 		}
 	}
 
@@ -98,24 +115,19 @@ __memp_fput(dbmfp, pgaddr, flags)
 		return (0);
 
 #ifdef DIAGNOSTIC
-	{
 	/*
 	 * Decrement the per-file pinned buffer count (mapped pages aren't
 	 * counted).
 	 */
-	R_LOCK(dbenv, dbmp->reginfo);
+	MPOOL_SYSTEM_LOCK(dbenv);
 	if (dbmfp->pinref == 0) {
+		MPOOL_SYSTEM_UNLOCK(dbenv);
 		__db_err(dbenv,
 		    "%s: more pages returned than retrieved", __memp_fn(dbmfp));
-		ret = __db_panic(dbenv, EINVAL);
-	} else {
-		ret = 0;
-		--dbmfp->pinref;
-	}
-	R_UNLOCK(dbenv, dbmp->reginfo);
-	if (ret != 0)
-		return (ret);
+		return (__db_panic(dbenv, EACCES));
 	}
+	--dbmfp->pinref;
+	MPOOL_SYSTEM_UNLOCK(dbenv);
 #endif
 
 	/* Convert a page address to a buffer header and hash bucket. */
@@ -125,7 +137,7 @@ __memp_fput(dbmfp, pgaddr, flags)
 	hp = R_ADDR(&dbmp->reginfo[n_cache], c_mp->htab);
 	hp = &hp[NBUCKET(c_mp, bhp->mf_offset, bhp->pgno)];
 
-	MUTEX_LOCK(dbenv, &hp->hash_mutex);
+	MUTEX_LOCK(dbenv, hp->mtx_hash);
 
 	/* Set/clear the page bits. */
 	if (LF_ISSET(DB_MPOOL_CLEAN) &&
@@ -146,10 +158,10 @@ __memp_fput(dbmfp, pgaddr, flags)
 	 * application returns a page twice.
 	 */
 	if (bhp->ref == 0) {
-		MUTEX_UNLOCK(dbenv, &hp->hash_mutex);
+		MUTEX_UNLOCK(dbenv, hp->mtx_hash);
 		__db_err(dbenv, "%s: page %lu: unpinned page returned",
 		    __memp_fn(dbmfp), (u_long)bhp->pgno);
-		return (__db_panic(dbenv, EINVAL));
+		return (__db_panic(dbenv, EACCES));
 	}
 
 	/* Note the activity so allocation won't decide to quit. */
@@ -170,7 +182,7 @@ __memp_fput(dbmfp, pgaddr, flags)
 	 * discard flags (for now) and leave the buffer's priority alone.
 	 */
 	if (--bhp->ref > 1 || (bhp->ref == 1 && !F_ISSET(bhp, BH_LOCKED))) {
-		MUTEX_UNLOCK(dbenv, &hp->hash_mutex);
+		MUTEX_UNLOCK(dbenv, hp->mtx_hash);
 		return (0);
 	}
 
@@ -224,7 +236,7 @@ __memp_fput(dbmfp, pgaddr, flags)
 
 done:
 	/* Reset the hash bucket's priority. */
-	hp->hash_priority = SH_TAILQ_FIRST(&hp->hash_bucket, __bh)->priority;
+	hp->hash_priority = SH_TAILQ_FIRSTP(&hp->hash_bucket, __bh)->priority;
 
 #ifdef DIAGNOSTIC
 	__memp_check_order(hp);
@@ -241,23 +253,25 @@ done:
 	if (F_ISSET(bhp, BH_LOCKED) && bhp->ref_sync != 0)
 		--bhp->ref_sync;
 
-	MUTEX_UNLOCK(dbenv, &hp->hash_mutex);
+	MUTEX_UNLOCK(dbenv, hp->mtx_hash);
 
 	/*
 	 * On every buffer put we update the buffer generation number and check
 	 * for wraparound.
 	 */
 	if (++c_mp->lru_count == UINT32_MAX)
-		__memp_reset_lru(dbenv, dbmp->reginfo);
+		if ((t_ret =
+		    __memp_reset_lru(dbenv, dbmp->reginfo)) != 0 && ret == 0)
+			ret = t_ret;
 
-	return (0);
+	return (ret);
 }
 
 /*
  * __memp_reset_lru --
  *	Reset the cache LRU counter.
  */
-static void
+static int
 __memp_reset_lru(dbenv, infop)
 	DB_ENV *dbenv;
 	REGINFO *infop;
@@ -287,12 +301,14 @@ __memp_reset_lru(dbenv, infop)
 		if (SH_TAILQ_FIRST(&hp->hash_bucket, __bh) == NULL)
 			continue;
 
-		MUTEX_LOCK(dbenv, &hp->hash_mutex);
+		MUTEX_LOCK(dbenv, hp->mtx_hash);
 		for (bhp = SH_TAILQ_FIRST(&hp->hash_bucket, __bh);
 		    bhp != NULL; bhp = SH_TAILQ_NEXT(bhp, hq, __bh))
 			if (bhp->priority != UINT32_MAX &&
 			    bhp->priority > MPOOL_BASE_DECREMENT)
 				bhp->priority -= MPOOL_BASE_DECREMENT;
-		MUTEX_UNLOCK(dbenv, &hp->hash_mutex);
+		MUTEX_UNLOCK(dbenv, hp->mtx_hash);
 	}
+
+	return (0);
 }
diff --git a/storage/bdb/mp/mp_fset.c b/storage/bdb/mp/mp_fset.c
index 0d29c41dfe9..b3898e46554 100644
--- a/storage/bdb/mp/mp_fset.c
+++ b/storage/bdb/mp/mp_fset.c
@@ -1,10 +1,10 @@
 /*-
  * See the file LICENSE for redistribution information.
  *
- * Copyright (c) 1996-2004
+ * Copyright (c) 1996-2005
  *	Sleepycat Software.  All rights reserved.
  *
- * $Id: mp_fset.c,v 11.34 2004/10/15 16:59:43 bostic Exp $
+ * $Id: mp_fset.c,v 12.5 2005/10/07 20:21:33 ubell Exp $
  */
 
 #include "db_config.h"
@@ -32,7 +32,8 @@ __memp_fset_pp(dbmfp, pgaddr, flags)
 	u_int32_t flags;
 {
 	DB_ENV *dbenv;
-	int rep_check, ret;
+	DB_THREAD_INFO *ip;
+	int ret;
 
 	dbenv = dbmfp->dbenv;
 
@@ -56,12 +57,9 @@ __memp_fset_pp(dbmfp, pgaddr, flags)
 		return (EACCES);
 	}
 
-	rep_check = IS_ENV_REPLICATED(dbenv) ? 1 : 0;
-	if (rep_check)
-		__env_rep_enter(dbenv);
-	ret = __memp_fset(dbmfp, pgaddr, flags);
-	if (rep_check)
-		__env_db_rep_exit(dbenv);
+	ENV_ENTER(dbenv, ip);
+	REPLICATION_WRAP(dbenv, (__memp_fset(dbmfp, pgaddr, flags)), ret);
+	ENV_LEAVE(dbenv, ip);
 	return (ret);
 }
 
@@ -94,7 +92,7 @@ __memp_fset(dbmfp, pgaddr, flags)
 	hp = R_ADDR(&dbmp->reginfo[n_cache], c_mp->htab);
 	hp = &hp[NBUCKET(c_mp, bhp->mf_offset, bhp->pgno)];
 
-	MUTEX_LOCK(dbenv, &hp->hash_mutex);
+	MUTEX_LOCK(dbenv, hp->mtx_hash);
 
 	/* Set/clear the page bits. */
 	if (LF_ISSET(DB_MPOOL_CLEAN) &&
@@ -110,6 +108,6 @@ __memp_fset(dbmfp, pgaddr, flags)
 	if (LF_ISSET(DB_MPOOL_DISCARD))
 		F_SET(bhp, BH_DISCARD);
 
-	MUTEX_UNLOCK(dbenv, &hp->hash_mutex);
+	MUTEX_UNLOCK(dbenv, hp->mtx_hash);
 	return (0);
 }
diff --git a/storage/bdb/mp/mp_method.c b/storage/bdb/mp/mp_method.c
index b149bfc13da..646524aea20 100644
--- a/storage/bdb/mp/mp_method.c
+++ b/storage/bdb/mp/mp_method.c
@@ -1,10 +1,10 @@
 /*-
  * See the file LICENSE for redistribution information.
  *
- * Copyright (c) 1996-2004
+ * Copyright (c) 1996-2005
  *	Sleepycat Software.  All rights reserved.
  *
- * $Id: mp_method.c,v 11.58 2004/10/15 16:59:43 bostic Exp $
+ * $Id: mp_method.c,v 12.15 2005/10/12 12:45:10 margo Exp $
  */
 
 #include "db_config.h"
@@ -12,29 +12,13 @@
 #ifndef NO_SYSTEM_INCLUDES
 #include 
 
-#ifdef HAVE_RPC
-#include 
-#endif
-
 #include 
 #endif
 
-#ifdef HAVE_RPC
-#include "db_server.h"
-#endif
-
 #include "db_int.h"
 #include "dbinc/db_shash.h"
 #include "dbinc/mp.h"
 
-#ifdef HAVE_RPC
-#include "dbinc_auto/rpc_client_ext.h"
-#endif
-
-static int __memp_get_mp_max_openfd __P((DB_ENV *, int *));
-static int __memp_get_mp_max_write __P((DB_ENV *, int *, int *));
-static int __memp_get_mp_mmapsize __P((DB_ENV *, size_t *));
-
 /*
  * __memp_dbenv_create --
  *	Mpool specific creation of the DB_ENV structure.
@@ -61,40 +45,6 @@ __memp_dbenv_create(dbenv)
 	dbenv->mp_bytes =
 	    32 * ((8 * 1024) + sizeof(BH)) + 37 * sizeof(DB_MPOOL_HASH);
 	dbenv->mp_ncache = 1;
-
-#ifdef HAVE_RPC
-	if (F_ISSET(dbenv, DB_ENV_RPCCLIENT)) {
-		dbenv->get_cachesize = __dbcl_env_get_cachesize;
-		dbenv->set_cachesize = __dbcl_env_cachesize;
-		dbenv->get_mp_max_openfd = __dbcl_get_mp_max_openfd;
-		dbenv->set_mp_max_openfd = __dbcl_set_mp_max_openfd;
-		dbenv->get_mp_max_write = __dbcl_get_mp_max_write;
-		dbenv->set_mp_max_write = __dbcl_set_mp_max_write;
-		dbenv->get_mp_mmapsize = __dbcl_get_mp_mmapsize;
-		dbenv->set_mp_mmapsize = __dbcl_set_mp_mmapsize;
-		dbenv->memp_register = __dbcl_memp_register;
-		dbenv->memp_stat = __dbcl_memp_stat;
-		dbenv->memp_stat_print = NULL;
-		dbenv->memp_sync = __dbcl_memp_sync;
-		dbenv->memp_trickle = __dbcl_memp_trickle;
-	} else
-#endif
-	{
-		dbenv->get_cachesize = __memp_get_cachesize;
-		dbenv->set_cachesize = __memp_set_cachesize;
-		dbenv->get_mp_max_openfd = __memp_get_mp_max_openfd;
-		dbenv->set_mp_max_openfd = __memp_set_mp_max_openfd;
-		dbenv->get_mp_max_write = __memp_get_mp_max_write;
-		dbenv->set_mp_max_write = __memp_set_mp_max_write;
-		dbenv->get_mp_mmapsize = __memp_get_mp_mmapsize;
-		dbenv->set_mp_mmapsize = __memp_set_mp_mmapsize;
-		dbenv->memp_register = __memp_register_pp;
-		dbenv->memp_stat = __memp_stat_pp;
-		dbenv->memp_stat_print = __memp_stat_print_pp;
-		dbenv->memp_sync = __memp_sync_pp;
-		dbenv->memp_trickle = __memp_trickle_pp;
-	}
-	dbenv->memp_fcreate = __memp_fcreate_pp;
 }
 
 /*
@@ -158,7 +108,7 @@ __memp_set_cachesize(dbenv, gbytes, bytes, arg_ncache)
 	 * You can only store 4GB-1 in an unsigned 32-bit value, so correct for
 	 * applications that specify 4GB cache sizes -- we know what they meant.
 	 */
-	if (gbytes / ncache == 4 && bytes == 0) {
+	if (sizeof(roff_t) == 4 && gbytes / ncache == 4 && bytes == 0) {
 		--gbytes;
 		bytes = GIGABYTE - 1;
 	} else {
@@ -166,11 +116,25 @@ __memp_set_cachesize(dbenv, gbytes, bytes, arg_ncache)
 		bytes %= GIGABYTE;
 	}
 
-	/* Avoid too-large cache sizes, they result in a region size of zero. */
-	if (gbytes / ncache > 4 || (gbytes / ncache == 4 && bytes != 0)) {
-		__db_err(dbenv, "individual cache size too large");
-		return (EINVAL);
-	}
+	/*
+	 * !!!
+	 * With 32-bit region offsets, individual cache regions must be smaller
+	 * than 4GB.  Also, cache sizes larger than 10TB would cause 32-bit
+	 * wrapping in the calculation of the number of hash buckets.  See
+	 * __memp_open for details.
+	 */
+	if (sizeof(roff_t) <= 4) {
+		if (gbytes / ncache >= 4) {
+			__db_err(dbenv,
+			    "individual cache size too large: maximum is 4GB");
+			return (EINVAL);
+		}
+	} else
+		if (gbytes / ncache > 10000) {
+			__db_err(dbenv,
+			    "individual cache size too large: maximum is 10TB");
+			return (EINVAL);
+		}
 
 	/*
 	 * If the application requested less than 500Mb, increase the cachesize
@@ -197,7 +161,10 @@ __memp_set_cachesize(dbenv, gbytes, bytes, arg_ncache)
 	return (0);
 }
 
-static int
+/*
+ * PUBLIC: int __memp_get_mp_max_openfd __P((DB_ENV *, int *));
+ */
+int
 __memp_get_mp_max_openfd(dbenv, maxopenfdp)
 	DB_ENV *dbenv;
 	int *maxopenfdp;
@@ -211,9 +178,9 @@ __memp_get_mp_max_openfd(dbenv, maxopenfdp)
 	if (MPOOL_ON(dbenv)) {
 		dbmp = dbenv->mp_handle;
 		mp = dbmp->reginfo[0].primary;
-		R_LOCK(dbenv, dbmp->reginfo);
+		MPOOL_SYSTEM_LOCK(dbenv);
 		*maxopenfdp = mp->mp_maxopenfd;
-		R_UNLOCK(dbenv, dbmp->reginfo);
+		MPOOL_SYSTEM_UNLOCK(dbenv);
 	} else
 		*maxopenfdp = dbenv->mp_maxopenfd;
 	return (0);
@@ -238,15 +205,18 @@ __memp_set_mp_max_openfd(dbenv, maxopenfd)
 	if (MPOOL_ON(dbenv)) {
 		dbmp = dbenv->mp_handle;
 		mp = dbmp->reginfo[0].primary;
-		R_LOCK(dbenv, dbmp->reginfo);
+		MPOOL_SYSTEM_LOCK(dbenv);
 		mp->mp_maxopenfd = maxopenfd;
-		R_UNLOCK(dbenv, dbmp->reginfo);
+		MPOOL_SYSTEM_UNLOCK(dbenv);
 	} else
 		dbenv->mp_maxopenfd = maxopenfd;
 	return (0);
 }
 
-static int
+/*
+ * PUBLIC: int __memp_get_mp_max_write __P((DB_ENV *, int *, int *));
+ */
+int
 __memp_get_mp_max_write(dbenv, maxwritep, maxwrite_sleepp)
 	DB_ENV *dbenv;
 	int *maxwritep, *maxwrite_sleepp;
@@ -255,15 +225,15 @@ __memp_get_mp_max_write(dbenv, maxwritep, maxwrite_sleepp)
 	MPOOL *mp;
 
 	ENV_NOT_CONFIGURED(dbenv,
-	    dbenv->mp_handle, "DB_ENV->get_mp_max_openfd", DB_INIT_MPOOL);
+	    dbenv->mp_handle, "DB_ENV->get_mp_max_write", DB_INIT_MPOOL);
 
 	if (MPOOL_ON(dbenv)) {
 		dbmp = dbenv->mp_handle;
 		mp = dbmp->reginfo[0].primary;
-		R_LOCK(dbenv, dbmp->reginfo);
+		MPOOL_SYSTEM_LOCK(dbenv);
 		*maxwritep = mp->mp_maxwrite;
 		*maxwrite_sleepp = mp->mp_maxwrite_sleep;
-		R_UNLOCK(dbenv, dbmp->reginfo);
+		MPOOL_SYSTEM_UNLOCK(dbenv);
 	} else {
 		*maxwritep = dbenv->mp_maxwrite;
 		*maxwrite_sleepp = dbenv->mp_maxwrite_sleep;
@@ -286,15 +256,15 @@ __memp_set_mp_max_write(dbenv, maxwrite, maxwrite_sleep)
 	MPOOL *mp;
 
 	ENV_NOT_CONFIGURED(dbenv,
-	    dbenv->mp_handle, "DB_ENV->get_mp_max_openfd", DB_INIT_MPOOL);
+	    dbenv->mp_handle, "DB_ENV->get_mp_max_write", DB_INIT_MPOOL);
 
 	if (MPOOL_ON(dbenv)) {
 		dbmp = dbenv->mp_handle;
 		mp = dbmp->reginfo[0].primary;
-		R_LOCK(dbenv, dbmp->reginfo);
+		MPOOL_SYSTEM_LOCK(dbenv);
 		mp->mp_maxwrite = maxwrite;
 		mp->mp_maxwrite_sleep = maxwrite_sleep;
-		R_UNLOCK(dbenv, dbmp->reginfo);
+		MPOOL_SYSTEM_UNLOCK(dbenv);
 	} else {
 		dbenv->mp_maxwrite = maxwrite;
 		dbenv->mp_maxwrite_sleep = maxwrite_sleep;
@@ -302,7 +272,10 @@ __memp_set_mp_max_write(dbenv, maxwrite, maxwrite_sleep)
 	return (0);
 }
 
-static int
+/*
+ * PUBLIC: int __memp_get_mp_mmapsize __P((DB_ENV *, size_t *));
+ */
+int
 __memp_get_mp_mmapsize(dbenv, mp_mmapsizep)
 	DB_ENV *dbenv;
 	size_t *mp_mmapsizep;
@@ -316,9 +289,9 @@ __memp_get_mp_mmapsize(dbenv, mp_mmapsizep)
 	if (MPOOL_ON(dbenv)) {
 		dbmp = dbenv->mp_handle;
 		mp = dbmp->reginfo[0].primary;
-		R_LOCK(dbenv, dbmp->reginfo);
+		MPOOL_SYSTEM_LOCK(dbenv);
 		*mp_mmapsizep = mp->mp_mmapsize;
-		R_UNLOCK(dbenv, dbmp->reginfo);
+		MPOOL_SYSTEM_UNLOCK(dbenv);
 	} else
 		*mp_mmapsizep = dbenv->mp_mmapsize;
 	return (0);
@@ -344,9 +317,9 @@ __memp_set_mp_mmapsize(dbenv, mp_mmapsize)
 	if (MPOOL_ON(dbenv)) {
 		dbmp = dbenv->mp_handle;
 		mp = dbmp->reginfo[0].primary;
-		R_LOCK(dbenv, dbmp->reginfo);
+		MPOOL_SYSTEM_LOCK(dbenv);
 		mp->mp_mmapsize = mp_mmapsize;
-		R_UNLOCK(dbenv, dbmp->reginfo);
+		MPOOL_SYSTEM_UNLOCK(dbenv);
 	} else
 		dbenv->mp_mmapsize = mp_mmapsize;
 	return (0);
@@ -357,32 +330,29 @@ __memp_set_mp_mmapsize(dbenv, mp_mmapsize)
  *	Remove or rename a file in the pool.
  *
  * PUBLIC: int __memp_nameop __P((DB_ENV *,
- * PUBLIC:     u_int8_t *, const char *, const char *, const char *));
+ * PUBLIC:     u_int8_t *, const char *, const char *, const char *, int));
  *
  * XXX
  * Undocumented interface: DB private.
  */
 int
-__memp_nameop(dbenv, fileid, newname, fullold, fullnew)
+__memp_nameop(dbenv, fileid, newname, fullold, fullnew, inmem)
 	DB_ENV *dbenv;
 	u_int8_t *fileid;
 	const char *newname, *fullold, *fullnew;
+	int inmem;
 {
 	DB_MPOOL *dbmp;
 	MPOOL *mp;
-	MPOOLFILE *mfp;
+	MPOOLFILE *save_mfp, *mfp;
 	roff_t newname_off;
-	int locked, ret;
+	int is_remove, locked, ret;
 	void *p;
 
-	/* We get passed either a two names, or two NULLs. */
-	DB_ASSERT(
-	    (newname == NULL && fullnew == NULL) ||
-	    (newname != NULL && fullnew != NULL));
-
-	locked = 0;
+	ret = locked = 0;
 	dbmp = NULL;
-
+	save_mfp = mfp = NULL;
+	is_remove = newname == NULL;
 	if (!MPOOL_ON(dbenv))
 		goto fsop;
 
@@ -399,7 +369,7 @@ __memp_nameop(dbenv, fileid, newname, fullold, fullnew)
 	 * If this is a rename, allocate first, because we can't recursively
 	 * grab the region lock.
 	 */
-	if (newname == NULL) {
+	if (is_remove) {
 		p = NULL;
 		COMPQUIET(newname_off, INVALID_ROFF);
 	} else {
@@ -410,11 +380,14 @@ __memp_nameop(dbenv, fileid, newname, fullold, fullnew)
 	}
 
 	locked = 1;
-	R_LOCK(dbenv, dbmp->reginfo);
+	MPOOL_SYSTEM_LOCK(dbenv);
 
 	/*
-	 * Find the file -- if mpool doesn't know about this file, that's not
-	 * an error -- we may not have it open.
+	 * Find the file -- if mpool doesn't know about this file, that may
+	 * not be an error -- if the file is not a memory-only file and it
+	 * is not open, it won't show up here.  If this is a memory file
+	 * then on a rename, we need to make sure that the new name does
+	 * not exist.
 	 */
 	for (mfp = SH_TAILQ_FIRST(&mp->mpfq, __mpoolfile);
 	    mfp != NULL; mfp = SH_TAILQ_NEXT(mfp, q, __mpoolfile)) {
@@ -422,16 +395,30 @@ __memp_nameop(dbenv, fileid, newname, fullold, fullnew)
 		if (mfp->deadfile || F_ISSET(mfp, MP_TEMP))
 			continue;
 
-		/* Ignore non-matching files. */
+		if (!is_remove && inmem && mfp->no_backing_file &&
+		    strcmp(newname, R_ADDR(dbmp->reginfo, mfp->path_off))
+		    == 0) {
+			ret = EEXIST;
+			goto err;
+		}
+
+		/* Try to match on fileid. */
 		if (memcmp(fileid, R_ADDR(
 		    dbmp->reginfo, mfp->fileid_off), DB_FILE_ID_LEN) != 0)
 			continue;
 
-		/* If newname is NULL, we're removing the file. */
-		if (newname == NULL) {
-			MUTEX_LOCK(dbenv, &mfp->mutex);
+		if (is_remove) {
+			MUTEX_LOCK(dbenv, mfp->mutex);
+			/*
+			 * In-memory dbs have an artificially incremented
+			 * ref count so that they do not ever get reclaimed
+			 * as long as they exist.  Since we are now deleting
+			 * the database, we need to dec that count.
+			 */
+			if (mfp->no_backing_file)
+				mfp->mpf_cnt--;
 			mfp->deadfile = 1;
-			MUTEX_UNLOCK(dbenv, &mfp->mutex);
+			MUTEX_UNLOCK(dbenv, mfp->mutex);
 		} else {
 			/*
 			 * Else, it's a rename.  We've allocated memory
@@ -440,32 +427,47 @@ __memp_nameop(dbenv, fileid, newname, fullold, fullnew)
 			p = R_ADDR(dbmp->reginfo, mfp->path_off);
 			mfp->path_off = newname_off;
 		}
-		break;
+		save_mfp = mfp;
+		if (!inmem || is_remove)
+			break;
 	}
 
 	/* Delete the memory we no longer need. */
 	if (p != NULL)
 		__db_shalloc_free(&dbmp->reginfo[0], p);
 
-fsop:	if (newname == NULL) {
-		/*
-		 * !!!
-		 * Replication may ask us to unlink a file that's been
-		 * renamed.  Don't complain if it doesn't exist.
-		 */
-		if ((ret = __os_unlink(dbenv, fullold)) == ENOENT)
-			ret = 0;
-	} else {
-		/* Defensive only, fullname should never be NULL. */
-		DB_ASSERT(fullnew != NULL);
-		if (fullnew == NULL)
-			return (EINVAL);
-
-		ret = __os_rename(dbenv, fullold, fullnew, 1);
+fsop:	if (save_mfp == NULL && inmem) {
+		ret = ENOENT;
+		goto err;
 	}
 
-	if (locked)
-		R_UNLOCK(dbenv, dbmp->reginfo);
+	/*
+	 * If this is a real file, then save_mfp could be NULL, because
+	 * mpool isn't turned on, and we still need to do the file ops.
+	 */
+	if (save_mfp == NULL || !save_mfp->no_backing_file) {
+		if (is_remove) {
+			/*
+			 * !!!
+			 * Replication may ask us to unlink a file that's been
+			 * renamed.  Don't complain if it doesn't exist.
+			 */
+			if ((ret = __os_unlink(dbenv, fullold)) == ENOENT)
+				ret = 0;
+		} else {
+			/*
+			 * Defensive only, fullname should never be
+			 * NULL.
+			 */
+			DB_ASSERT(fullnew != NULL);
+			if (fullnew == NULL)
+				return (EINVAL);
+			ret = __os_rename(dbenv, fullold, fullnew, 1);
+		}
+	}
+
+err:	if (locked)
+		MPOOL_SYSTEM_UNLOCK(dbenv);
 
 	return (ret);
 }
@@ -494,7 +496,7 @@ __memp_get_refcnt(dbenv, fileid, refp)
 	dbmp = dbenv->mp_handle;
 	mp = dbmp->reginfo[0].primary;
 
-	R_LOCK(dbenv, dbmp->reginfo);
+	MPOOL_SYSTEM_LOCK(dbenv);
 	/*
 	 * Find the file -- if mpool doesn't know about this file, the
 	 * reference count is 0.
@@ -511,14 +513,17 @@ __memp_get_refcnt(dbenv, fileid, refp)
 		    dbmp->reginfo, mfp->fileid_off), DB_FILE_ID_LEN) != 0)
 			continue;
 
+		MUTEX_LOCK(dbenv, mfp->mutex);
 		*refp = mfp->mpf_cnt;
+		MUTEX_UNLOCK(dbenv, mfp->mutex);
 		break;
 	}
-	R_UNLOCK(dbenv, dbmp->reginfo);
+	MPOOL_SYSTEM_UNLOCK(dbenv);
 
 	return (0);
 }
 
+#ifdef HAVE_FTRUNCATE
 /*
  * __memp_ftruncate __
  *	Truncate the file.
@@ -532,20 +537,20 @@ __memp_ftruncate(dbmfp, pgno, flags)
 	u_int32_t flags;
 {
 	DB_ENV *dbenv;
-	DB_MPOOL *dbmp;
 	void *pagep;
 	db_pgno_t last_pgno, pg;
+	u_int32_t mbytes, bytes, pgsize;
 	int ret;
 
-	COMPQUIET(flags, 0);
 	dbenv = dbmfp->dbenv;
-	dbmp = dbenv->mp_handle;
 
-	R_LOCK(dbenv, dbmp->reginfo);
+	MPOOL_SYSTEM_LOCK(dbenv);
 	last_pgno = dbmfp->mfp->last_pgno;
-	R_UNLOCK(dbenv, dbmp->reginfo);
+	MPOOL_SYSTEM_UNLOCK(dbenv);
 
 	if (pgno > last_pgno) {
+		if (LF_ISSET(MP_TRUNC_RECOVER))
+			return (0);
 		__db_err(dbenv, "Truncate beyond the end of file");
 		return (EINVAL);
 	}
@@ -557,14 +562,192 @@ __memp_ftruncate(dbmfp, pgno, flags)
 			return (ret);
 	} while (pg++ < last_pgno);
 
-	if (!F_ISSET(dbmfp->mfp, MP_TEMP) &&
+	/*
+	 * If we are aborting an extend of a file, the call to __os_truncate
+	 * could extend the file if the new page(s) had not yet been written
+	 * to disk.  If we are out of disk space, avoid generating an error on
+	 * the truncate if we are actually extending the file. [#12743]
+	 */
+	if (!F_ISSET(dbmfp->mfp, MP_TEMP) && !dbmfp->mfp->no_backing_file &&
 	    (ret = __os_truncate(dbenv,
-	    dbmfp->fhp, pgno, dbmfp->mfp->stat.st_pagesize)) != 0)
-		return (ret);
+	    dbmfp->fhp, pgno, dbmfp->mfp->stat.st_pagesize)) != 0) {
+		if ((__os_ioinfo(dbenv,
+		    NULL, dbmfp->fhp, &mbytes, &bytes, NULL)) != 0)
+			return (ret);
+		pgsize = dbmfp->mfp->stat.st_pagesize;
+		if (pgno < (mbytes * (MEGABYTE / pgsize)) + (bytes / pgsize))
+			return (ret);
+		ret = 0;
+	}
 
-	R_LOCK(dbenv, dbmp->reginfo);
+	/*
+	 * This set could race with another thread of control that extending
+	 * the file.  It's not a problem because we should have the page
+	 * locked at a higher level of the system.
+	 */
+	MPOOL_SYSTEM_LOCK(dbenv);
 	dbmfp->mfp->last_pgno = pgno - 1;
-	R_UNLOCK(dbenv, dbmp->reginfo);
+	MPOOL_SYSTEM_UNLOCK(dbenv);
 
 	return (ret);
 }
+
+/*
+ * Support routines for maintaining a sorted freelist
+ * while we try to rearrange and truncate the file.
+ */
+
+/*
+ * __memp_alloc_freelist -- allocate mpool space for the freelist.
+ *
+ * PUBLIC: int __memp_alloc_freelist __P((DB_MPOOLFILE *,
+ * PUBLIC:	 u_int32_t, db_pgno_t **));
+ */
+int
+__memp_alloc_freelist(dbmfp, nelems, listp)
+	DB_MPOOLFILE *dbmfp;
+	u_int32_t nelems;
+	db_pgno_t **listp;
+{
+	DB_ENV *dbenv;
+	DB_MPOOL *dbmp;
+	MPOOLFILE *mfp;
+	void *retp;
+	int ret;
+
+	dbenv = dbmfp->dbenv;
+	dbmp = dbenv->mp_handle;
+	mfp = dbmfp->mfp;
+
+	*listp = NULL;
+
+	/*
+	 * These fields are protected because the database layer
+	 * has the metapage locked while manipulating them.
+	 */
+	mfp->free_ref++;
+	if (mfp->free_size != 0)
+		return (EBUSY);
+
+	/* Allocate at least a few slots. */
+	mfp->free_cnt = nelems;
+	if (nelems == 0)
+		nelems = 50;
+
+	if ((ret = __memp_alloc(dbmp, dbmp->reginfo,
+	    NULL, nelems * sizeof(db_pgno_t), &mfp->free_list, &retp)) != 0)
+		return (ret);
+
+	mfp->free_size = nelems * sizeof(db_pgno_t);
+
+	*listp = retp;
+
+	return (0);
+}
+
+/*
+ * __memp_free_freelist -- free the list.
+ *
+ * PUBLIC: void __memp_free_freelist __P((DB_MPOOLFILE *));
+ */
+void
+__memp_free_freelist(dbmfp)
+	DB_MPOOLFILE *dbmfp;
+{
+	DB_ENV *dbenv;
+	DB_MPOOL *dbmp;
+	MPOOLFILE *mfp;
+
+	dbenv = dbmfp->dbenv;
+	dbmp = dbenv->mp_handle;
+	mfp = dbmfp->mfp;
+
+	DB_ASSERT(mfp->free_ref > 0);
+	if (--mfp->free_ref > 0)
+		return;
+
+	DB_ASSERT(mfp->free_size != 0);
+
+	__db_shalloc_free(dbmp->reginfo, R_ADDR(dbmp->reginfo, mfp->free_list));
+
+	mfp->free_cnt = 0;
+	mfp->free_list = 0;
+	mfp->free_size = 0;
+}
+
+/*
+ * __memp_get_freelst -- return current list.
+ *
+ * PUBLIC: int __memp_get_freelist __P((
+ * PUBLIC:	DB_MPOOLFILE *, u_int32_t *, db_pgno_t **));
+ */
+int
+__memp_get_freelist(dbmfp, nelemp, listp)
+	DB_MPOOLFILE *dbmfp;
+	u_int32_t *nelemp;
+	db_pgno_t **listp;
+{
+	MPOOLFILE *mfp;
+	DB_ENV *dbenv;
+	DB_MPOOL *dbmp;
+
+	dbenv = dbmfp->dbenv;
+	dbmp = dbenv->mp_handle;
+	mfp = dbmfp->mfp;
+
+	if (mfp->free_size == 0) {
+		*nelemp = 0;
+		*listp = NULL;
+		return (0);
+	}
+
+	*nelemp = mfp->free_cnt;
+	*listp = R_ADDR(dbmp->reginfo, mfp->free_list);
+
+	return (0);
+}
+
+/*
+ * __memp_extend_freelist -- extend the list.
+ *
+ * PUBLIC: int __memp_extend_freelist __P((
+ * PUBLIC:	DB_MPOOLFILE *, u_int32_t , db_pgno_t **));
+ */
+int
+__memp_extend_freelist(dbmfp, count, listp)
+	DB_MPOOLFILE *dbmfp;
+	u_int32_t count;
+	db_pgno_t **listp;
+{
+	DB_ENV *dbenv;
+	DB_MPOOL *dbmp;
+	MPOOLFILE *mfp;
+	int ret;
+	void *retp;
+
+	dbenv = dbmfp->dbenv;
+	dbmp = dbenv->mp_handle;
+	mfp = dbmfp->mfp;
+
+	if (mfp->free_size == 0)
+		return (EINVAL);
+
+	if (count * sizeof(db_pgno_t) > mfp->free_size) {
+		mfp->free_size =
+		     (size_t)DB_ALIGN(count * sizeof(db_pgno_t), 512);
+		*listp = R_ADDR(dbmp->reginfo, mfp->free_list);
+		if ((ret = __memp_alloc(dbmp, dbmp->reginfo,
+		    NULL, mfp->free_size, &mfp->free_list, &retp)) != 0)
+			return (ret);
+
+		memcpy(retp, *listp, mfp->free_cnt * sizeof(db_pgno_t));
+
+		__db_shalloc_free(dbmp->reginfo, *listp);
+	}
+
+	mfp->free_cnt = count;
+	*listp = R_ADDR(dbmp->reginfo, mfp->free_list);
+
+	return (0);
+}
+#endif
diff --git a/storage/bdb/mp/mp_region.c b/storage/bdb/mp/mp_region.c
index 3c7ee6a4b62..d16e79b6edf 100644
--- a/storage/bdb/mp/mp_region.c
+++ b/storage/bdb/mp/mp_region.c
@@ -1,10 +1,10 @@
 /*-
  * See the file LICENSE for redistribution information.
  *
- * Copyright (c) 1996-2004
+ * Copyright (c) 1996-2005
  *	Sleepycat Software.  All rights reserved.
  *
- * $Id: mp_region.c,v 11.68 2004/10/15 16:59:43 bostic Exp $
+ * $Id: mp_region.c,v 12.7 2005/08/08 14:30:03 bostic Exp $
  */
 
 #include "db_config.h"
@@ -20,10 +20,8 @@
 #include "dbinc/mp.h"
 
 static int	__memp_init __P((DB_ENV *, DB_MPOOL *, u_int, u_int32_t));
-static void	__memp_init_config __P((DB_ENV *, MPOOL *));
-#ifdef HAVE_MUTEX_SYSTEM_RESOURCES
-static size_t	__memp_region_maint __P((REGINFO *));
-#endif
+static int	__memp_init_config __P((DB_ENV *, MPOOL *));
+static void	__memp_region_size __P((DB_ENV *, roff_t *, u_int32_t *));
 
 /*
  * __memp_open --
@@ -43,20 +41,8 @@ __memp_open(dbenv)
 	u_int32_t htab_buckets, *regids;
 	int ret;
 
-	/* Figure out how big each cache region is. */
-	reg_size = (dbenv->mp_gbytes / dbenv->mp_ncache) * GIGABYTE;
-	reg_size += ((dbenv->mp_gbytes %
-	    dbenv->mp_ncache) * GIGABYTE) / dbenv->mp_ncache;
-	reg_size += dbenv->mp_bytes / dbenv->mp_ncache;
-
-	/*
-	 * Figure out how many hash buckets each region will have.  Assume we
-	 * want to keep the hash chains with under 10 pages on each chain.  We
-	 * don't know the pagesize in advance, and it may differ for different
-	 * files.  Use a pagesize of 1K for the calculation -- we walk these
-	 * chains a lot, they must be kept short.
-	 */
-	htab_buckets = __db_tablesize((u_int32_t)(reg_size / (1 * 1024)) / 10);
+	/* Calculate the region size and hash bucket count. */
+	__memp_region_size(dbenv, ®_size, &htab_buckets);
 
 	/* Create and initialize the DB_MPOOL structure. */
 	if ((ret = __os_calloc(dbenv, 1, sizeof(*dbmp), &dbmp)) != 0)
@@ -116,14 +102,9 @@ __memp_open(dbenv)
 			if ((ret =
 			    __memp_init(dbenv, dbmp, i, htab_buckets)) != 0)
 				goto err;
-			R_UNLOCK(dbenv, &dbmp->reginfo[i]);
 
 			regids[i] = dbmp->reginfo[i].id;
 		}
-
-		__memp_init_config(dbenv, mp);
-
-		R_UNLOCK(dbenv, dbmp->reginfo);
 	} else {
 		/*
 		 * Determine how many regions there are going to be, allocate
@@ -140,21 +121,6 @@ __memp_open(dbenv)
 			dbmp->reginfo[i].id = INVALID_REGION_ID;
 		dbmp->reginfo[0] = reginfo;
 
-		__memp_init_config(dbenv, mp);
-
-		/*
-		 * We have to unlock the primary mpool region before we attempt
-		 * to join the additional mpool regions.  If we don't, we can
-		 * deadlock.  The scenario is that we hold the primary mpool
-		 * region lock.  We then try to attach to an additional mpool
-		 * region, which requires the acquisition/release of the main
-		 * region lock (to search the list of regions).  If another
-		 * thread of control already holds the main region lock and is
-		 * waiting on our primary mpool region lock, we'll deadlock.
-		 * See [#4696] for more information.
-		 */
-		R_UNLOCK(dbenv, dbmp->reginfo);
-
 		/* Join remaining regions. */
 		regids = R_ADDR(dbmp->reginfo, mp->regids);
 		for (i = 1; i < dbmp->nreg; ++i) {
@@ -165,7 +131,6 @@ __memp_open(dbenv)
 			if ((ret = __db_r_attach(
 			    dbenv, &dbmp->reginfo[i], 0)) != 0)
 				goto err;
-			R_UNLOCK(dbenv, &dbmp->reginfo[i]);
 		}
 	}
 
@@ -175,28 +140,28 @@ __memp_open(dbenv)
 		    R_ADDR(&dbmp->reginfo[i], dbmp->reginfo[i].rp->primary);
 
 	/* If the region is threaded, allocate a mutex to lock the handles. */
-	if (F_ISSET(dbenv, DB_ENV_THREAD) &&
-	    (ret = __db_mutex_setup(dbenv, dbmp->reginfo, &dbmp->mutexp,
-	    MUTEX_ALLOC | MUTEX_THREAD)) != 0)
+	if ((ret = __mutex_alloc(
+	    dbenv, MTX_MPOOL_HANDLE, DB_MUTEX_THREAD, &dbmp->mutex)) != 0)
 		goto err;
 
 	dbenv->mp_handle = dbmp;
+
+	/* A process joining the region may reset the mpool configuration. */
+	if ((ret = __memp_init_config(dbenv, mp)) != 0)
+		return (ret);
+
 	return (0);
 
-err:	if (dbmp->reginfo != NULL && dbmp->reginfo[0].addr != NULL) {
-		if (F_ISSET(dbmp->reginfo, REGION_CREATE))
-			ret = __db_panic(dbenv, ret);
-
-		R_UNLOCK(dbenv, dbmp->reginfo);
-
+err:	dbenv->mp_handle = NULL;
+	if (dbmp->reginfo != NULL && dbmp->reginfo[0].addr != NULL) {
 		for (i = 0; i < dbmp->nreg; ++i)
 			if (dbmp->reginfo[i].id != INVALID_REGION_ID)
 				(void)__db_r_detach(
 				    dbenv, &dbmp->reginfo[i], 0);
 		__os_free(dbenv, dbmp->reginfo);
 	}
-	if (dbmp->mutexp != NULL)
-		__db_mutex_free(dbenv, dbmp->reginfo, dbmp->mutexp);
+
+	(void)__mutex_free(dbenv, &dbmp->mutex);
 	__os_free(dbenv, dbmp);
 	return (ret);
 }
@@ -215,30 +180,21 @@ __memp_init(dbenv, dbmp, reginfo_off, htab_buckets)
 	DB_MPOOL_HASH *htab;
 	MPOOL *mp;
 	REGINFO *reginfo;
-#ifdef HAVE_MUTEX_SYSTEM_RESOURCES
-	size_t maint_size;
-#endif
 	u_int32_t i;
 	int ret;
 	void *p;
 
 	reginfo = &dbmp->reginfo[reginfo_off];
-	if ((ret = __db_shalloc(reginfo,
-	    sizeof(MPOOL), MUTEX_ALIGN, ®info->primary)) != 0)
+	if ((ret = __db_shalloc(
+	    reginfo, sizeof(MPOOL), 0, ®info->primary)) != 0)
 		goto mem_err;
 	reginfo->rp->primary = R_OFFSET(reginfo, reginfo->primary);
 	mp = reginfo->primary;
 	memset(mp, 0, sizeof(*mp));
 
-#ifdef	HAVE_MUTEX_SYSTEM_RESOURCES
-	maint_size = __memp_region_maint(reginfo);
-	/* Allocate room for the maintenance info and initialize it. */
-	if ((ret = __db_shalloc(reginfo,
-	    sizeof(REGMAINT) + maint_size, 0, &p)) != 0)
-		goto mem_err;
-	__db_maintinit(reginfo, p, maint_size);
-	mp->maint_off = R_OFFSET(reginfo, p);
-#endif
+	if ((ret =
+	    __mutex_alloc(dbenv, MTX_MPOOL_REGION, 0, &mp->mtx_region)) != 0)
+		return (ret);
 
 	if (reginfo_off == 0) {
 		SH_TAILQ_INIT(&mp->mpfq);
@@ -254,12 +210,12 @@ __memp_init(dbenv, dbmp, reginfo_off, htab_buckets)
 
 	/* Allocate hash table space and initialize it. */
 	if ((ret = __db_shalloc(reginfo,
-	    htab_buckets * sizeof(DB_MPOOL_HASH), MUTEX_ALIGN, &htab)) != 0)
+	    htab_buckets * sizeof(DB_MPOOL_HASH), 0, &htab)) != 0)
 		goto mem_err;
 	mp->htab = R_OFFSET(reginfo, htab);
 	for (i = 0; i < htab_buckets; i++) {
-		if ((ret = __db_mutex_setup(dbenv,
-		    reginfo, &htab[i].hash_mutex, MUTEX_NO_RLOCK)) != 0)
+		if ((ret = __mutex_alloc(
+		    dbenv, MTX_MPOOL_HASH_BUCKET, 0, &htab[i].mtx_hash)) != 0)
 			return (ret);
 		SH_TAILQ_INIT(&htab[i].hash_bucket);
 		htab[i].hash_page_dirty = htab[i].hash_priority = 0;
@@ -278,16 +234,78 @@ mem_err:__db_err(dbenv, "Unable to allocate memory for mpool region");
 	return (ret);
 }
 
+/*
+ * __memp_region_size --
+ *	Size the region and figure out how many hash buckets we'll have.
+ */
+static void
+__memp_region_size(dbenv, reg_sizep, htab_bucketsp)
+	DB_ENV *dbenv;
+	roff_t *reg_sizep;
+	u_int32_t *htab_bucketsp;
+{
+	roff_t reg_size;
+
+	/* Figure out how big each cache region is. */
+	reg_size = (roff_t)(dbenv->mp_gbytes / dbenv->mp_ncache) * GIGABYTE;
+	reg_size += ((roff_t)(dbenv->mp_gbytes %
+	    dbenv->mp_ncache) * GIGABYTE) / dbenv->mp_ncache;
+	reg_size += dbenv->mp_bytes / dbenv->mp_ncache;
+	*reg_sizep = reg_size;
+
+	/*
+	 * Figure out how many hash buckets each region will have.  Assume we
+	 * want to keep the hash chains with under 10 pages on each chain.  We
+	 * don't know the pagesize in advance, and it may differ for different
+	 * files.  Use a pagesize of 1K for the calculation -- we walk these
+	 * chains a lot, they must be kept short.
+	 *
+	 * XXX
+	 * Cache sizes larger than 10TB would cause 32-bit wrapping in the
+	 * calculation of the number of hash buckets.  This probably isn't
+	 * something we need to worry about right now, but is checked when the
+	 * cache size is set.
+	 */
+	*htab_bucketsp = __db_tablesize((u_int32_t)(reg_size / (10 * 1024)));
+}
+
+/*
+ * __memp_region_mutex_count --
+ *	Return the number of mutexes the mpool region will need.
+ *
+ * PUBLIC: u_int32_t __memp_region_mutex_count __P((DB_ENV *));
+ */
+u_int32_t
+__memp_region_mutex_count(dbenv)
+	DB_ENV *dbenv;
+{
+	roff_t reg_size;
+	u_int32_t htab_buckets;
+
+	__memp_region_size(dbenv, ®_size, &htab_buckets);
+
+	/*
+	 * We need a couple of mutexes for the region itself, and one for each
+	 * file handle (MPOOLFILE).  More importantly, each configured cache
+	 * has one mutex per hash bucket and buffer header.  Hash buckets are
+	 * configured to have 10 pages or fewer on each chain, but we don't
+	 * want to fail if we have a large number of 512 byte pages, so double
+	 * the guess.
+	 */
+	return (dbenv->mp_ncache * htab_buckets * 21 + 50);
+}
+
 /*
  * __memp_init_config --
  *	Initialize shared configuration information.
  */
-static void
+static int
 __memp_init_config(dbenv, mp)
 	DB_ENV *dbenv;
 	MPOOL *mp;
 {
-	/* A process joining the region may reset the mpool configuration. */
+	MPOOL_SYSTEM_LOCK(dbenv);
+
 	if (dbenv->mp_mmapsize != 0)
 		mp->mp_mmapsize = dbenv->mp_mmapsize;
 	if (dbenv->mp_maxopenfd != 0)
@@ -296,6 +314,10 @@ __memp_init_config(dbenv, mp)
 		mp->mp_maxwrite = dbenv->mp_maxwrite;
 	if (dbenv->mp_maxwrite_sleep != 0)
 		mp->mp_maxwrite_sleep = dbenv->mp_maxwrite_sleep;
+
+	MPOOL_SYSTEM_UNLOCK(dbenv);
+
+	return (0);
 }
 
 /*
@@ -336,8 +358,11 @@ __memp_dbenv_refresh(dbenv)
 			    bucket < mp->htab_buckets; ++hp, ++bucket)
 				while ((bhp = SH_TAILQ_FIRST(
 				    &hp->hash_bucket, __bh)) != NULL)
-					__memp_bhfree(dbmp, hp, bhp,
-					    BH_FREE_FREEMEM | BH_FREE_UNLOCKED);
+					if ((t_ret = __memp_bhfree(
+					    dbmp, hp, bhp,
+					    BH_FREE_FREEMEM |
+					    BH_FREE_UNLOCKED)) != 0 && ret == 0)
+						ret = t_ret;
 		}
 
 	/* Discard DB_MPOOLFILEs. */
@@ -346,14 +371,16 @@ __memp_dbenv_refresh(dbenv)
 			ret = t_ret;
 
 	/* Discard DB_MPREGs. */
+	if (dbmp->pg_inout != NULL)
+		__os_free(dbenv, dbmp->pg_inout);
 	while ((mpreg = LIST_FIRST(&dbmp->dbregq)) != NULL) {
 		LIST_REMOVE(mpreg, q);
 		__os_free(dbenv, mpreg);
 	}
 
 	/* Discard the DB_MPOOL thread mutex. */
-	if (dbmp->mutexp != NULL)
-		__db_mutex_free(dbenv, dbmp->reginfo, dbmp->mutexp);
+	if ((t_ret = __mutex_free(dbenv, &dbmp->mutex)) != 0 && ret == 0)
+		ret = t_ret;
 
 	if (F_ISSET(dbenv, DB_ENV_PRIVATE)) {
 		/* Discard REGION IDs. */
@@ -383,62 +410,3 @@ __memp_dbenv_refresh(dbenv)
 	dbenv->mp_handle = NULL;
 	return (ret);
 }
-
-#ifdef HAVE_MUTEX_SYSTEM_RESOURCES
-/*
- * __memp_region_maint --
- *	Return the amount of space needed for region maintenance info.
- *
- */
-static size_t
-__memp_region_maint(infop)
-	REGINFO *infop;
-{
-	size_t s;
-	int numlocks;
-
-	/*
-	 * For mutex maintenance we need one mutex per possible page.
-	 * Compute the maximum number of pages this cache can have.
-	 * Also add in an mpool mutex and mutexes for all dbenv and db
-	 * handles.
-	 */
-	numlocks = ((infop->rp->size / DB_MIN_PGSIZE) + 1);
-	numlocks += DB_MAX_HANDLES;
-	s = sizeof(roff_t) * numlocks;
-	return (s);
-}
-#endif
-
-/*
- * __memp_region_destroy
- *	Destroy any region maintenance info.
- *
- * PUBLIC: void __memp_region_destroy __P((DB_ENV *, REGINFO *));
- */
-void
-__memp_region_destroy(dbenv, infop)
-	DB_ENV *dbenv;
-	REGINFO *infop;
-{
-	/*
-	 * This routine is called in two cases: when discarding the mutexes
-	 * from a previous Berkeley DB run, during recovery, and two, when
-	 * discarding the mutexes as we shut down the database environment.
-	 * In the latter case, we also need to discard shared memory segments,
-	 * this is the last time we use them, and the last region-specific
-	 * call we make.
-	 */
-#ifdef HAVE_MUTEX_SYSTEM_RESOURCES
-	MPOOL *mp;
-
-	mp = R_ADDR(infop, infop->rp->primary);
-
-	/* Destroy mutexes. */
-	__db_shlocks_destroy(infop, R_ADDR(infop, mp->maint_off));
-	if (infop->primary != NULL && F_ISSET(dbenv, DB_ENV_PRIVATE))
-		__db_shalloc_free(infop, R_ADDR(infop, mp->maint_off));
-#endif
-	if (infop->primary != NULL && F_ISSET(dbenv, DB_ENV_PRIVATE))
-		__db_shalloc_free(infop, infop->primary);
-}
diff --git a/storage/bdb/mp/mp_register.c b/storage/bdb/mp/mp_register.c
index 0294fd5f0cb..b93a07cc3de 100644
--- a/storage/bdb/mp/mp_register.c
+++ b/storage/bdb/mp/mp_register.c
@@ -1,10 +1,10 @@
 /*-
  * See the file LICENSE for redistribution information.
  *
- * Copyright (c) 1996-2004
+ * Copyright (c) 1996-2005
  *	Sleepycat Software.  All rights reserved.
  *
- * $Id: mp_register.c,v 11.26 2004/07/15 15:52:54 sue Exp $
+ * $Id: mp_register.c,v 12.6 2005/10/07 20:21:33 ubell Exp $
  */
 
 #include "db_config.h"
@@ -33,18 +33,17 @@ __memp_register_pp(dbenv, ftype, pgin, pgout)
 	int (*pgin) __P((DB_ENV *, db_pgno_t, void *, DBT *));
 	int (*pgout) __P((DB_ENV *, db_pgno_t, void *, DBT *));
 {
-	int rep_check, ret;
+	DB_THREAD_INFO *ip;
+	int ret;
 
 	PANIC_CHECK(dbenv);
 	ENV_REQUIRES_CONFIG(dbenv,
 	    dbenv->mp_handle, "DB_ENV->memp_register", DB_INIT_MPOOL);
 
-	rep_check = IS_ENV_REPLICATED(dbenv) ? 1 : 0;
-	if (rep_check)
-		__env_rep_enter(dbenv);
-	ret = __memp_register(dbenv, ftype, pgin, pgout);
-	if (rep_check)
-		__env_db_rep_exit(dbenv);
+	ENV_ENTER(dbenv, ip);
+	REPLICATION_WRAP(dbenv,
+	    (__memp_register(dbenv, ftype, pgin, pgout)), ret);
+	ENV_LEAVE(dbenv, ip);
 	return (ret);
 }
 
@@ -70,11 +69,29 @@ __memp_register(dbenv, ftype, pgin, pgout)
 	dbmp = dbenv->mp_handle;
 
 	/*
-	 * Chances are good that the item has already been registered, as the
-	 * DB access methods are the folks that call this routine.  If already
-	 * registered, just update the entry, although it's probably unchanged.
+	 * We keep the DB pgin/pgout functions outside of the linked list
+	 * to avoid locking/unlocking the linked list on every page I/O.
+	 *
+	 * The Berkeley DB I/O conversion functions are registered when the
+	 * environment is first created, so there's no need for locking here.
 	 */
-	MUTEX_THREAD_LOCK(dbenv, dbmp->mutexp);
+	if (ftype == DB_FTYPE_SET) {
+		if (dbmp->pg_inout != NULL)
+			return (0);
+		if ((ret =
+		    __os_malloc(dbenv, sizeof(DB_MPREG), &dbmp->pg_inout)) != 0)
+			return (ret);
+		dbmp->pg_inout->ftype = ftype;
+		dbmp->pg_inout->pgin = pgin;
+		dbmp->pg_inout->pgout = pgout;
+		return (0);
+	}
+
+	/*
+	 * The item may already have been registered.  If already registered,
+	 * just update the entry, although it's probably unchanged.
+	 */
+	MUTEX_LOCK(dbenv, dbmp->mutex);
 	for (mpreg = LIST_FIRST(&dbmp->dbregq);
 	    mpreg != NULL; mpreg = LIST_NEXT(mpreg, q))
 		if (mpreg->ftype == ftype) {
@@ -82,21 +99,17 @@ __memp_register(dbenv, ftype, pgin, pgout)
 			mpreg->pgout = pgout;
 			break;
 		}
-	MUTEX_THREAD_UNLOCK(dbenv, dbmp->mutexp);
-	if (mpreg != NULL)
-		return (0);
 
-	/* New entry. */
-	if ((ret = __os_malloc(dbenv, sizeof(DB_MPREG), &mpreg)) != 0)
-		return (ret);
+	if (mpreg == NULL) {			/* New entry. */
+		if ((ret = __os_malloc(dbenv, sizeof(DB_MPREG), &mpreg)) != 0)
+			return (ret);
+		mpreg->ftype = ftype;
+		mpreg->pgin = pgin;
+		mpreg->pgout = pgout;
 
-	mpreg->ftype = ftype;
-	mpreg->pgin = pgin;
-	mpreg->pgout = pgout;
-
-	MUTEX_THREAD_LOCK(dbenv, dbmp->mutexp);
-	LIST_INSERT_HEAD(&dbmp->dbregq, mpreg, q);
-	MUTEX_THREAD_UNLOCK(dbenv, dbmp->mutexp);
+		LIST_INSERT_HEAD(&dbmp->dbregq, mpreg, q);
+	}
+	MUTEX_UNLOCK(dbenv, dbmp->mutex);
 
 	return (0);
 }
diff --git a/storage/bdb/mp/mp_stat.c b/storage/bdb/mp/mp_stat.c
index 3896b06da67..24a37adf308 100644
--- a/storage/bdb/mp/mp_stat.c
+++ b/storage/bdb/mp/mp_stat.c
@@ -1,10 +1,10 @@
 /*-
  * See the file LICENSE for redistribution information.
  *
- * Copyright (c) 1996-2004
+ * Copyright (c) 1996-2005
  *	Sleepycat Software.  All rights reserved.
  *
- * $Id: mp_stat.c,v 11.82 2004/10/15 16:59:43 bostic Exp $
+ * $Id: mp_stat.c,v 12.10 2005/10/27 01:26:00 mjc Exp $
  */
 
 #include "db_config.h"
@@ -28,12 +28,12 @@ static void __memp_print_bh
 		__P((DB_ENV *, DB_MPOOL *, BH *, roff_t *, u_int32_t));
 static int  __memp_print_all __P((DB_ENV *, u_int32_t));
 static int  __memp_print_stats __P((DB_ENV *, u_int32_t));
-static void __memp_print_hash __P((DB_ENV *,
+static int __memp_print_hash __P((DB_ENV *,
 		DB_MPOOL *, REGINFO *, roff_t *, u_int32_t));
 static int  __memp_stat __P((DB_ENV *,
 		DB_MPOOL_STAT **, DB_MPOOL_FSTAT ***, u_int32_t));
 static void __memp_stat_wait __P((
-		REGINFO *, MPOOL *, DB_MPOOL_STAT *, u_int32_t));
+		DB_ENV *, REGINFO *, MPOOL *, DB_MPOOL_STAT *, u_int32_t));
 
 /*
  * __memp_stat_pp --
@@ -49,7 +49,8 @@ __memp_stat_pp(dbenv, gspp, fspp, flags)
 	DB_MPOOL_FSTAT ***fspp;
 	u_int32_t flags;
 {
-	int rep_check, ret;
+	DB_THREAD_INFO *ip;
+	int ret;
 
 	PANIC_CHECK(dbenv);
 	ENV_REQUIRES_CONFIG(dbenv,
@@ -59,12 +60,9 @@ __memp_stat_pp(dbenv, gspp, fspp, flags)
 	    "DB_ENV->memp_stat", flags, DB_STAT_CLEAR)) != 0)
 		return (ret);
 
-	rep_check = IS_ENV_REPLICATED(dbenv) ? 1 : 0;
-	if (rep_check)
-		__env_rep_enter(dbenv);
-	ret = __memp_stat(dbenv, gspp, fspp, flags);
-	if (rep_check)
-		__env_db_rep_exit(dbenv);
+	ENV_ENTER(dbenv, ip);
+	REPLICATION_WRAP(dbenv, (__memp_stat(dbenv, gspp, fspp, flags)), ret);
+	ENV_LEAVE(dbenv, ip);
 	return (ret);
 }
 
@@ -85,7 +83,8 @@ __memp_stat(dbenv, gspp, fspp, flags)
 	MPOOL *c_mp, *mp;
 	MPOOLFILE *mfp;
 	size_t len, nlen;
-	u_int32_t pages, pagesize, i;
+	u_int32_t i, pagesize, st_bytes, st_gbytes, st_hash_buckets, st_pages;
+	u_int32_t tmp_wait, tmp_nowait;
 	int ret;
 	char *name, *tname;
 
@@ -112,12 +111,12 @@ __memp_stat(dbenv, gspp, fspp, flags)
 		sp->st_ncache = dbmp->nreg;
 		sp->st_regsize = dbmp->reginfo[0].rp->size;
 
-		R_LOCK(dbenv, dbmp->reginfo);
+		MPOOL_SYSTEM_LOCK(dbenv);
 		sp->st_mmapsize = mp->mp_mmapsize;
 		sp->st_maxopenfd = mp->mp_maxopenfd;
 		sp->st_maxwrite = mp->mp_maxwrite;
 		sp->st_maxwrite_sleep = mp->mp_maxwrite_sleep;
-		R_UNLOCK(dbenv, dbmp->reginfo);
+		MPOOL_SYSTEM_UNLOCK(dbenv);
 
 		/* Walk the cache list and accumulate the global information. */
 		for (i = 0; i < mp->nreg; ++i) {
@@ -148,11 +147,12 @@ __memp_stat(dbenv, gspp, fspp, flags)
 			 * st_hash_nowait	calculated by __memp_stat_wait
 			 * st_hash_wait
 			 */
-			__memp_stat_wait(&dbmp->reginfo[i], c_mp, sp, flags);
-			sp->st_region_nowait +=
-			    dbmp->reginfo[i].rp->mutex.mutex_set_nowait;
-			sp->st_region_wait +=
-			    dbmp->reginfo[i].rp->mutex.mutex_set_wait;
+			__memp_stat_wait(
+			    dbenv, &dbmp->reginfo[i], c_mp, sp, flags);
+			__mutex_set_wait_info(dbenv,
+			    c_mp->mtx_region, &tmp_wait, &tmp_nowait);
+			sp->st_region_nowait += tmp_nowait;
+			sp->st_region_wait += tmp_wait;
 			sp->st_alloc += c_mp->stat.st_alloc;
 			sp->st_alloc_buckets += c_mp->stat.st_alloc_buckets;
 			if (sp->st_alloc_max_buckets <
@@ -166,14 +166,19 @@ __memp_stat(dbenv, gspp, fspp, flags)
 				    c_mp->stat.st_alloc_max_pages;
 
 			if (LF_ISSET(DB_STAT_CLEAR)) {
-				MUTEX_CLEAR(&dbmp->reginfo[i].rp->mutex);
+				__mutex_clear(dbenv, c_mp->mtx_region);
 
-				R_LOCK(dbenv, dbmp->reginfo);
-				pages = c_mp->stat.st_pages;
+				MPOOL_SYSTEM_LOCK(dbenv);
+				st_bytes = c_mp->stat.st_bytes;
+				st_gbytes = c_mp->stat.st_gbytes;
+				st_hash_buckets = c_mp->stat.st_hash_buckets;
+				st_pages = c_mp->stat.st_pages;
 				memset(&c_mp->stat, 0, sizeof(c_mp->stat));
-				c_mp->stat.st_hash_buckets = c_mp->htab_buckets;
-				c_mp->stat.st_pages = pages;
-				R_UNLOCK(dbenv, dbmp->reginfo);
+				c_mp->stat.st_bytes = st_bytes;
+				c_mp->stat.st_gbytes = st_gbytes;
+				c_mp->stat.st_hash_buckets = st_hash_buckets;
+				c_mp->stat.st_pages = st_pages;
+				MPOOL_SYSTEM_UNLOCK(dbenv);
 			}
 		}
 
@@ -185,7 +190,7 @@ __memp_stat(dbenv, gspp, fspp, flags)
 		 * statistics.  We added the cache information above, now we
 		 * add the per-file information.
 		 */
-		R_LOCK(dbenv, dbmp->reginfo);
+		MPOOL_SYSTEM_LOCK(dbenv);
 		for (mfp = SH_TAILQ_FIRST(&mp->mpfq, __mpoolfile);
 		    mfp != NULL; mfp = SH_TAILQ_NEXT(mfp, q, __mpoolfile)) {
 			sp->st_map += mfp->stat.st_map;
@@ -200,7 +205,7 @@ __memp_stat(dbenv, gspp, fspp, flags)
 				mfp->stat.st_pagesize = pagesize;
 			}
 		}
-		R_UNLOCK(dbenv, dbmp->reginfo);
+		MPOOL_SYSTEM_UNLOCK(dbenv);
 	}
 
 	/* Per-file statistics. */
@@ -208,7 +213,7 @@ __memp_stat(dbenv, gspp, fspp, flags)
 		*fspp = NULL;
 
 		/* Count the MPOOLFILE structures. */
-		R_LOCK(dbenv, dbmp->reginfo);
+		MPOOL_SYSTEM_LOCK(dbenv);
 		for (i = 0, len = 0,
 		    mfp = SH_TAILQ_FIRST(&mp->mpfq, __mpoolfile);
 		    mfp != NULL;
@@ -217,7 +222,7 @@ __memp_stat(dbenv, gspp, fspp, flags)
 			    sizeof(DB_MPOOL_FSTAT) +
 			    strlen(__memp_fns(dbmp, mfp)) + 1;
 		len += sizeof(DB_MPOOL_FSTAT *);	/* Trailing NULL */
-		R_UNLOCK(dbenv, dbmp->reginfo);
+		MPOOL_SYSTEM_UNLOCK(dbenv);
 
 		if (i == 0)
 			return (0);
@@ -244,7 +249,7 @@ __memp_stat(dbenv, gspp, fspp, flags)
 		 * Files may have been opened since we counted, don't walk
 		 * off the end of the allocated space.
 		 */
-		R_LOCK(dbenv, dbmp->reginfo);
+		MPOOL_SYSTEM_LOCK(dbenv);
 		for (mfp = SH_TAILQ_FIRST(&mp->mpfq, __mpoolfile);
 		    mfp != NULL && i-- > 0;
 		    ++tfsp, ++tstruct, tname += nlen,
@@ -261,7 +266,7 @@ __memp_stat(dbenv, gspp, fspp, flags)
 			tstruct->file_name = tname;
 			memcpy(tname, name, nlen);
 		}
-		R_UNLOCK(dbenv, dbmp->reginfo);
+		MPOOL_SYSTEM_UNLOCK(dbenv);
 
 		*tfsp = NULL;
 	}
@@ -279,7 +284,8 @@ __memp_stat_print_pp(dbenv, flags)
 	DB_ENV *dbenv;
 	u_int32_t flags;
 {
-	int rep_check, ret;
+	DB_THREAD_INFO *ip;
+	int ret;
 
 	PANIC_CHECK(dbenv);
 	ENV_REQUIRES_CONFIG(dbenv,
@@ -291,12 +297,9 @@ __memp_stat_print_pp(dbenv, flags)
 	    "DB_ENV->memp_stat_print", flags, DB_STAT_MEMP_FLAGS)) != 0)
 		return (ret);
 
-	rep_check = IS_ENV_REPLICATED(dbenv) ? 1 : 0;
-	if (rep_check)
-		__env_rep_enter(dbenv);
-	ret = __memp_stat_print(dbenv, flags);
-	if (rep_check)
-		__env_db_rep_exit(dbenv);
+	ENV_ENTER(dbenv, ip);
+	REPLICATION_WRAP(dbenv, (__memp_stat_print(dbenv, flags)), ret);
+	ENV_LEAVE(dbenv, ip);
 	return (ret);
 }
 
@@ -393,7 +396,7 @@ __memp_print_stats(dbenv, flags)
 	__db_dl(dbenv, "The longest hash chain searched for a page",
 	    (u_long)gsp->st_hash_longest);
 	__db_dl(dbenv,
-	    "Total number of hash buckets examined for page location",
+	    "Total number of hash chain entries checked for page",
 	    (u_long)gsp->st_hash_examined);
 	__db_dl_pct(dbenv,
 	    "The number of hash bucket locks that required waiting",
@@ -477,17 +480,20 @@ __memp_print_all(dbenv, flags)
 	MPOOLFILE *mfp;
 	roff_t fmap[FMAP_ENTRIES + 1];
 	u_int32_t i, mfp_flags;
-	int cnt;
+	int cnt, ret;
 
 	dbmp = dbenv->mp_handle;
 	mp = dbmp->reginfo[0].primary;
+	ret = 0;
 
-	R_LOCK(dbenv, dbmp->reginfo);
+	MPOOL_SYSTEM_LOCK(dbenv);
 
 	__db_print_reginfo(dbenv, dbmp->reginfo, "Mpool");
-
 	__db_msg(dbenv, "%s", DB_GLOBAL(db_line));
+
 	__db_msg(dbenv, "MPOOL structure:");
+	__mutex_print_debug_single(
+	    dbenv, "MPOOL region mutex", mp->mtx_region, flags);
 	STAT_LSN("Maximum checkpoint LSN", &mp->lsn);
 	STAT_ULONG("Hash table entries", mp->htab_buckets);
 	STAT_ULONG("Hash table last-checked", mp->last_checked);
@@ -496,8 +502,8 @@ __memp_print_all(dbenv, flags)
 
 	__db_msg(dbenv, "%s", DB_GLOBAL(db_line));
 	__db_msg(dbenv, "DB_MPOOL handle information:");
-	__db_print_mutex(
-	    dbenv, NULL, dbmp->mutexp, "DB_MPOOL handle mutex", flags);
+	__mutex_print_debug_single(
+	    dbenv, "DB_MPOOL handle mutex", dbmp->mutex, flags);
 	STAT_ULONG("Underlying cache regions", dbmp->nreg);
 
 	__db_msg(dbenv, "%s", DB_GLOBAL(db_line));
@@ -516,10 +522,10 @@ __memp_print_all(dbenv, flags)
 		STAT_ULONG("Max gbytes", dbmfp->gbytes);
 		STAT_ULONG("Max bytes", dbmfp->bytes);
 		STAT_ULONG("Cache priority", dbmfp->priority);
-		STAT_HEX("mmap address", dbmfp->addr);
+		STAT_POINTER("mmap address", dbmfp->addr);
 		STAT_ULONG("mmap length", dbmfp->len);
 		__db_prflags(dbenv, NULL, dbmfp->flags, cfn, NULL, "\tFlags");
-		__db_print_fh(dbenv, dbmfp->fhp, flags);
+		__db_print_fh(dbenv, "File handle", dbmfp->fhp, flags);
 	}
 
 	__db_msg(dbenv, "%s", DB_GLOBAL(db_line));
@@ -527,9 +533,9 @@ __memp_print_all(dbenv, flags)
 	for (cnt = 0, mfp = SH_TAILQ_FIRST(&mp->mpfq, __mpoolfile);
 	    mfp != NULL; mfp = SH_TAILQ_NEXT(mfp, q, __mpoolfile), ++cnt) {
 		__db_msg(dbenv, "File #%d: %s", cnt + 1, __memp_fns(dbmp, mfp));
-		__db_print_mutex(dbenv, NULL, &mfp->mutex, "Mutex", flags);
+		__mutex_print_debug_single(dbenv, "Mutex", mfp->mutex, flags);
 
-		MUTEX_LOCK(dbenv, &mfp->mutex);
+		MUTEX_LOCK(dbenv, mfp->mutex);
 		STAT_ULONG("Reference count", mfp->mpf_cnt);
 		STAT_ULONG("Block count", mfp->block_cnt);
 		STAT_ULONG("Last page number", mfp->last_pgno);
@@ -556,9 +562,9 @@ __memp_print_all(dbenv, flags)
 
 		if (cnt < FMAP_ENTRIES)
 			fmap[cnt] = R_OFFSET(dbmp->reginfo, mfp);
-		MUTEX_UNLOCK(dbenv, &mfp->mutex);
+		MUTEX_UNLOCK(dbenv, mfp->mutex);
 	}
-	R_UNLOCK(dbenv, dbmp->reginfo);
+	MPOOL_SYSTEM_UNLOCK(dbenv);
 
 	if (cnt < FMAP_ENTRIES)
 		fmap[cnt] = INVALID_ROFF;
@@ -569,17 +575,19 @@ __memp_print_all(dbenv, flags)
 	for (i = 0; i < mp->nreg; ++i) {
 		__db_msg(dbenv, "%s", DB_GLOBAL(db_line));
 		__db_msg(dbenv, "Cache #%d:", i + 1);
-		__memp_print_hash(dbenv, dbmp, &dbmp->reginfo[i], fmap, flags);
+		if ((ret = __memp_print_hash(
+		    dbenv, dbmp, &dbmp->reginfo[i], fmap, flags)) != 0)
+			break;
 	}
 
-	return (0);
+	return (ret);
 }
 
 /*
  * __memp_print_hash --
  *	Display hash bucket statistics for a cache.
  */
-static void
+static int
 __memp_print_hash(dbenv, dbmp, reginfo, fmap, flags)
 	DB_ENV *dbenv;
 	DB_MPOOL *dbmp;
@@ -599,26 +607,27 @@ __memp_print_hash(dbenv, dbmp, reginfo, fmap, flags)
 	/* Display the hash table list of BH's. */
 	__db_msg(dbenv,
 	    "BH hash table (%lu hash slots)", (u_long)c_mp->htab_buckets);
-	__db_msg(dbenv, "bucket #: priority, mutex");
+	__db_msg(dbenv, "bucket #: priority, [mutex]");
 	__db_msg(dbenv,
-	    "\tpageno, file, ref, LSN, mutex, address, priority, flags");
+    "\tpageno, file, ref, LSN, [mutex], address, priority, flags");
 
 	for (hp = R_ADDR(reginfo, c_mp->htab),
 	    bucket = 0; bucket < c_mp->htab_buckets; ++hp, ++bucket) {
-		MUTEX_LOCK(dbenv, &hp->hash_mutex);
-		if ((bhp =
-		    SH_TAILQ_FIRST(&hp->hash_bucket, __bh)) != NULL) {
+		MUTEX_LOCK(dbenv, hp->mtx_hash);
+		if ((bhp = SH_TAILQ_FIRST(&hp->hash_bucket, __bh)) != NULL) {
 			__db_msgadd(dbenv, &mb, "bucket %lu: %lu, ",
 			    (u_long)bucket, (u_long)hp->hash_priority);
-			__db_print_mutex(
-			    dbenv, &mb, &hp->hash_mutex, ":", flags);
+			__mutex_print_debug_stats(
+			    dbenv, &mb, hp->mtx_hash, flags);
 			DB_MSGBUF_FLUSH(dbenv, &mb);
 		}
 		for (; bhp != NULL; bhp = SH_TAILQ_NEXT(bhp, hq, __bh))
 			__memp_print_bh(dbenv, dbmp, bhp, fmap, flags);
 
-		MUTEX_UNLOCK(dbenv, &hp->hash_mutex);
+		MUTEX_UNLOCK(dbenv, hp->mtx_hash);
 	}
+
+	return (0);
 }
 
 /*
@@ -660,8 +669,8 @@ __memp_print_bh(dbenv, dbmp, bhp, fmap, flags)
 
 	__db_msgadd(dbenv, &mb, "%2lu, %lu/%lu, ", (u_long)bhp->ref,
 	    (u_long)LSN(bhp->buf).file, (u_long)LSN(bhp->buf).offset);
-	__db_print_mutex(dbenv, &mb, &bhp->mutex, ", ", flags);
-	__db_msgadd(dbenv, &mb, "%#08lx, %lu",
+	__mutex_print_debug_stats(dbenv, &mb, bhp->mtx_bh, flags);
+	__db_msgadd(dbenv, &mb, ", %#08lx, %lu",
 	    (u_long)R_OFFSET(dbmp->reginfo, bhp), (u_long)bhp->priority);
 	__db_prflags(dbenv, &mb, bhp->flags, fn, " (", ")");
 	DB_MSGBUF_FLUSH(dbenv, &mb);
@@ -672,27 +681,28 @@ __memp_print_bh(dbenv, dbmp, bhp, fmap, flags)
  *	Total hash bucket wait stats into the region.
  */
 static void
-__memp_stat_wait(reginfo, mp, mstat, flags)
+__memp_stat_wait(dbenv, reginfo, mp, mstat, flags)
+	DB_ENV *dbenv;
 	REGINFO *reginfo;
 	MPOOL *mp;
 	DB_MPOOL_STAT *mstat;
 	u_int32_t flags;
 {
 	DB_MPOOL_HASH *hp;
-	DB_MUTEX *mutexp;
-	u_int32_t i;
+	u_int32_t i, tmp_nowait, tmp_wait;
 
 	mstat->st_hash_max_wait = 0;
 	hp = R_ADDR(reginfo, mp->htab);
 	for (i = 0; i < mp->htab_buckets; i++, hp++) {
-		mutexp = &hp->hash_mutex;
-		mstat->st_hash_nowait += mutexp->mutex_set_nowait;
-		mstat->st_hash_wait += mutexp->mutex_set_wait;
-		if (mutexp->mutex_set_wait > mstat->st_hash_max_wait)
-			mstat->st_hash_max_wait = mutexp->mutex_set_wait;
+		__mutex_set_wait_info(
+		    dbenv, hp->mtx_hash, &tmp_wait, &tmp_nowait);
+		mstat->st_hash_nowait += tmp_nowait;
+		mstat->st_hash_wait += tmp_wait;
+		if (tmp_wait > mstat->st_hash_max_wait)
+			mstat->st_hash_max_wait = tmp_wait;
 
 		if (LF_ISSET(DB_STAT_CLEAR))
-			MUTEX_CLEAR(mutexp);
+			__mutex_clear(dbenv, hp->mtx_hash);
 	}
 }
 
diff --git a/storage/bdb/mp/mp_sync.c b/storage/bdb/mp/mp_sync.c
index 6aadab2c131..cfc7c4f52da 100644
--- a/storage/bdb/mp/mp_sync.c
+++ b/storage/bdb/mp/mp_sync.c
@@ -1,10 +1,10 @@
 /*-
  * See the file LICENSE for redistribution information.
  *
- * Copyright (c) 1996-2004
+ * Copyright (c) 1996-2005
  *	Sleepycat Software.  All rights reserved.
  *
- * $Id: mp_sync.c,v 11.98 2004/10/15 16:59:43 bostic Exp $
+ * $Id: mp_sync.c,v 12.11 2005/10/07 20:21:33 ubell Exp $
  */
 
 #include "db_config.h"
@@ -42,7 +42,8 @@ __memp_sync_pp(dbenv, lsnp)
 	DB_ENV *dbenv;
 	DB_LSN *lsnp;
 {
-	int rep_check, ret;
+	DB_THREAD_INFO *ip;
+	int ret;
 
 	PANIC_CHECK(dbenv);
 	ENV_REQUIRES_CONFIG(dbenv,
@@ -56,12 +57,9 @@ __memp_sync_pp(dbenv, lsnp)
 		ENV_REQUIRES_CONFIG(dbenv,
 		    dbenv->lg_handle, "memp_sync", DB_INIT_LOG);
 
-	rep_check = IS_ENV_REPLICATED(dbenv) ? 1 : 0;
-	if (rep_check)
-		__env_rep_enter(dbenv);
-	ret = __memp_sync(dbenv, lsnp);
-	if (rep_check)
-		__env_db_rep_exit(dbenv);
+	ENV_ENTER(dbenv, ip);
+	REPLICATION_WRAP(dbenv, (__memp_sync(dbenv, lsnp)), ret);
+	ENV_LEAVE(dbenv, ip);
 	return (ret);
 }
 
@@ -85,24 +83,24 @@ __memp_sync(dbenv, lsnp)
 
 	/* If we've flushed to the requested LSN, return that information. */
 	if (lsnp != NULL) {
-		R_LOCK(dbenv, dbmp->reginfo);
+		MPOOL_SYSTEM_LOCK(dbenv);
 		if (log_compare(lsnp, &mp->lsn) <= 0) {
 			*lsnp = mp->lsn;
 
-			R_UNLOCK(dbenv, dbmp->reginfo);
+			MPOOL_SYSTEM_UNLOCK(dbenv);
 			return (0);
 		}
-		R_UNLOCK(dbenv, dbmp->reginfo);
+		MPOOL_SYSTEM_UNLOCK(dbenv);
 	}
 
 	if ((ret = __memp_sync_int(dbenv, NULL, 0, DB_SYNC_CACHE, NULL)) != 0)
 		return (ret);
 
 	if (lsnp != NULL) {
-		R_LOCK(dbenv, dbmp->reginfo);
+		MPOOL_SYSTEM_LOCK(dbenv);
 		if (log_compare(lsnp, &mp->lsn) > 0)
 			mp->lsn = *lsnp;
-		R_UNLOCK(dbenv, dbmp->reginfo);
+		MPOOL_SYSTEM_UNLOCK(dbenv);
 	}
 
 	return (0);
@@ -119,19 +117,17 @@ __memp_fsync_pp(dbmfp)
 	DB_MPOOLFILE *dbmfp;
 {
 	DB_ENV *dbenv;
-	int rep_check, ret;
+	DB_THREAD_INFO *ip;
+	int ret;
 
 	dbenv = dbmfp->dbenv;
 
 	PANIC_CHECK(dbenv);
 	MPF_ILLEGAL_BEFORE_OPEN(dbmfp, "DB_MPOOLFILE->sync");
 
-	rep_check = IS_ENV_REPLICATED(dbenv) ? 1 : 0;
-	if (rep_check)
-		__env_rep_enter(dbenv);
-	ret = __memp_fsync(dbmfp);
-	if (rep_check)
-		__env_db_rep_exit(dbenv);
+	ENV_ENTER(dbenv, ip);
+	REPLICATION_WRAP(dbenv, (__memp_fsync(dbmfp)), ret);
+	ENV_LEAVE(dbenv, ip);
 	return (ret);
 }
 
@@ -158,7 +154,7 @@ __memp_fsync(dbmfp)
 	if (F_ISSET(dbmfp, MP_READONLY))
 		return (0);
 
-	if (F_ISSET(mfp, MP_TEMP))
+	if (F_ISSET(dbmfp->mfp, MP_TEMP) || dbmfp->mfp->no_backing_file)
 		return (0);
 
 	if (mfp->file_written == 0)
@@ -216,9 +212,9 @@ __memp_sync_int(dbenv, dbmfp, trickle_max, op, wrotep)
 	BH_TRACK *bharray;
 	DB_MPOOL *dbmp;
 	DB_MPOOL_HASH *hp;
-	DB_MUTEX *mutexp;
 	MPOOL *c_mp, *mp;
 	MPOOLFILE *mfp;
+	db_mutex_t mutex;
 	roff_t last_mf_offset;
 	u_int32_t ar_cnt, ar_max, i, n_cache, remaining, wrote;
 	int filecnt, hb_lock, maxopenfd, maxwrite, maxwrite_sleep;
@@ -230,11 +226,11 @@ __memp_sync_int(dbenv, dbmfp, trickle_max, op, wrotep)
 	filecnt = pass = wrote = 0;
 
 	/* Get shared configuration information. */
-	R_LOCK(dbenv, dbmp->reginfo);
+	MPOOL_SYSTEM_LOCK(dbenv);
 	maxopenfd = mp->mp_maxopenfd;
 	maxwrite = mp->mp_maxwrite;
 	maxwrite_sleep = mp->mp_maxwrite_sleep;
-	R_UNLOCK(dbenv, dbmp->reginfo);
+	MPOOL_SYSTEM_UNLOCK(dbenv);
 
 	/* Assume one dirty page per bucket. */
 	ar_max = mp->nreg * mp->htab_buckets;
@@ -261,7 +257,7 @@ __memp_sync_int(dbenv, dbmfp, trickle_max, op, wrotep)
 			if (SH_TAILQ_FIRST(&hp->hash_bucket, __bh) == NULL)
 				continue;
 
-			MUTEX_LOCK(dbenv, &hp->hash_mutex);
+			MUTEX_LOCK(dbenv, hp->mtx_hash);
 			for (bhp = SH_TAILQ_FIRST(&hp->hash_bucket, __bh);
 			    bhp != NULL; bhp = SH_TAILQ_NEXT(bhp, hq, __bh)) {
 				/* Always ignore unreferenced, clean pages. */
@@ -286,14 +282,12 @@ __memp_sync_int(dbenv, dbmfp, trickle_max, op, wrotep)
 				mfp = R_ADDR(dbmp->reginfo, bhp->mf_offset);
 
 				/*
-				 * Ignore temporary files -- this means you
-				 * can't even flush temporary files by handle.
-				 * (Checkpoint doesn't require temporary files
-				 * be flushed and the underlying buffer write
-				 * write routine may not be able to write it
-				 * anyway.)
+				 * Ignore in-memory files, even if they are
+				 * temp files to whom a backing file has been
+				 * allocated.
 				 */
-				if (F_ISSET(mfp, MP_TEMP))
+				if (mfp->no_backing_file ||
+				    F_ISSET(mfp, MP_TEMP))
 					continue;
 
 				/*
@@ -330,7 +324,7 @@ __memp_sync_int(dbenv, dbmfp, trickle_max, op, wrotep)
 					ar_max *= 2;
 				}
 			}
-			MUTEX_UNLOCK(dbenv, &hp->hash_mutex);
+			MUTEX_UNLOCK(dbenv, hp->mtx_hash);
 
 			if (ret != 0)
 				goto err;
@@ -382,8 +376,8 @@ __memp_sync_int(dbenv, dbmfp, trickle_max, op, wrotep)
 			continue;
 
 		/* Lock the hash bucket and find the buffer. */
-		mutexp = &hp->hash_mutex;
-		MUTEX_LOCK(dbenv, mutexp);
+		mutex = hp->mtx_hash;
+		MUTEX_LOCK(dbenv, mutex);
 		for (bhp = SH_TAILQ_FIRST(&hp->hash_bucket, __bh);
 		    bhp != NULL; bhp = SH_TAILQ_NEXT(bhp, hq, __bh))
 			if (bhp->pgno == bharray[i].track_pgno &&
@@ -398,7 +392,7 @@ __memp_sync_int(dbenv, dbmfp, trickle_max, op, wrotep)
 		 * no work needed.
 		 */
 		if (bhp == NULL || (bhp->ref == 0 && !F_ISSET(bhp, BH_DIRTY))) {
-			MUTEX_UNLOCK(dbenv, mutexp);
+			MUTEX_UNLOCK(dbenv, mutex);
 			--remaining;
 			bharray[i].track_hp = NULL;
 			continue;
@@ -416,7 +410,7 @@ __memp_sync_int(dbenv, dbmfp, trickle_max, op, wrotep)
 		 * write it.
 		 */
 		if (F_ISSET(bhp, BH_LOCKED) || (bhp->ref != 0 && pass < 2)) {
-			MUTEX_UNLOCK(dbenv, mutexp);
+			MUTEX_UNLOCK(dbenv, mutex);
 			if (op != DB_SYNC_CACHE && op != DB_SYNC_FILE) {
 				--remaining;
 				bharray[i].track_hp = NULL;
@@ -435,7 +429,7 @@ __memp_sync_int(dbenv, dbmfp, trickle_max, op, wrotep)
 		/* Pin the buffer into memory and lock it. */
 		++bhp->ref;
 		F_SET(bhp, BH_LOCKED);
-		MUTEX_LOCK(dbenv, &bhp->mutex);
+		MUTEX_LOCK(dbenv, bhp->mtx_bh);
 
 		/*
 		 * Unlock the hash bucket and wait for the wait-for count to
@@ -450,11 +444,11 @@ __memp_sync_int(dbenv, dbmfp, trickle_max, op, wrotep)
 		 * If, when the wait-for count goes to 0, the buffer is found
 		 * to be dirty, write it.
 		 */
-		MUTEX_UNLOCK(dbenv, mutexp);
+		MUTEX_UNLOCK(dbenv, mutex);
 		for (wait_cnt = 1;
 		    bhp->ref_sync != 0 && wait_cnt < 4; ++wait_cnt)
 			__os_sleep(dbenv, 1, 0);
-		MUTEX_LOCK(dbenv, mutexp);
+		MUTEX_LOCK(dbenv, mutex);
 		hb_lock = 1;
 
 		/*
@@ -486,7 +480,7 @@ __memp_sync_int(dbenv, dbmfp, trickle_max, op, wrotep)
 		 */
 		if (bhp->ref_sync == 0 && F_ISSET(bhp, BH_DIRTY)) {
 			hb_lock = 0;
-			MUTEX_UNLOCK(dbenv, mutexp);
+			MUTEX_UNLOCK(dbenv, mutex);
 
 			mfp = R_ADDR(dbmp->reginfo, bhp->mf_offset);
 			if ((ret = __memp_bhwrite(dbmp, hp, mfp, bhp, 1)) == 0)
@@ -523,10 +517,10 @@ __memp_sync_int(dbenv, dbmfp, trickle_max, op, wrotep)
 		 */
 		if (F_ISSET(bhp, BH_LOCKED)) {
 			F_CLR(bhp, BH_LOCKED);
-			MUTEX_UNLOCK(dbenv, &bhp->mutex);
+			MUTEX_UNLOCK(dbenv, bhp->mtx_bh);
 
 			if (!hb_lock)
-				MUTEX_LOCK(dbenv, mutexp);
+				MUTEX_LOCK(dbenv, mutex);
 		}
 
 		/*
@@ -537,7 +531,7 @@ __memp_sync_int(dbenv, dbmfp, trickle_max, op, wrotep)
 
 		/* Discard our reference and unlock the bucket. */
 		--bhp->ref;
-		MUTEX_UNLOCK(dbenv, mutexp);
+		MUTEX_UNLOCK(dbenv, mutex);
 
 		if (ret != 0)
 			break;
@@ -579,46 +573,96 @@ int __memp_sync_files(dbenv, dbmp)
 {
 	DB_MPOOLFILE *dbmfp;
 	MPOOL *mp;
-	MPOOLFILE *mfp;
-	int final_ret, ret;
+	MPOOLFILE *mfp, *next_mfp;
+	int need_discard_pass, ret, t_ret;
 
-	final_ret = 0;
+	need_discard_pass = ret = 0;
 	mp = dbmp->reginfo[0].primary;
 
-	R_LOCK(dbenv, dbmp->reginfo);
+	MPOOL_SYSTEM_LOCK(dbenv);
 	for (mfp = SH_TAILQ_FIRST(&mp->mpfq, __mpoolfile);
 	    mfp != NULL; mfp = SH_TAILQ_NEXT(mfp, q, __mpoolfile)) {
-		if (!mfp->file_written ||
+		if (!mfp->file_written || mfp->no_backing_file ||
 		    mfp->deadfile || F_ISSET(mfp, MP_TEMP))
 			continue;
+		/*
+		 * Pin the MPOOLFILE structure into memory, and release the
+		 * region mutex allowing us to walk the linked list.  We'll
+		 * re-acquire that mutex to move to the next entry in the list.
+		 *
+		 * This works because we only need to flush current entries,
+		 * we don't care about new entries being added, and the linked
+		 * list is never re-ordered, a single pass is sufficient.  It
+		 * requires MPOOLFILE structures removed before we get to them
+		 * be flushed to disk, but that's nothing new, they could have
+		 * been removed while checkpoint was running, too.
+		 *
+		 * Once we have the MPOOLFILE lock, re-check the MPOOLFILE is
+		 * not being discarded.  (A thread removing the MPOOLFILE
+		 * will: hold the MPOOLFILE mutex, set deadfile, drop the
+		 * MPOOLFILE mutex and then acquire the region MUTEX to walk
+		 * the linked list and remove the MPOOLFILE structure.  Make
+		 * sure the MPOOLFILE wasn't marked dead while we waited for
+		 * the mutex.
+		 */
+		MUTEX_LOCK(dbenv, mfp->mutex);
+		if (!mfp->file_written || mfp->deadfile) {
+			MUTEX_UNLOCK(dbenv, mfp->mutex);
+			continue;
+		}
+		MPOOL_SYSTEM_UNLOCK(dbenv);
+		++mfp->mpf_cnt;
+		MUTEX_UNLOCK(dbenv, mfp->mutex);
 
 		/*
 		 * Look for an already open, writeable handle (fsync doesn't
 		 * work on read-only Windows handles).
 		 */
-		ret = 0;
-		MUTEX_THREAD_LOCK(dbenv, dbmp->mutexp);
+		MUTEX_LOCK(dbenv, dbmp->mutex);
 		for (dbmfp = TAILQ_FIRST(&dbmp->dbmfq);
 		    dbmfp != NULL; dbmfp = TAILQ_NEXT(dbmfp, q)) {
 			if (dbmfp->mfp != mfp || F_ISSET(dbmfp, MP_READONLY))
 				continue;
-			ret = __os_fsync(dbenv, dbmfp->fhp);
+			/*
+			 * We don't want to hold the mutex while calling sync.
+			 * Increment the DB_MPOOLFILE handle ref count to pin
+			 * it into memory.
+			 */
+			++dbmfp->ref;
 			break;
 		}
-		MUTEX_THREAD_UNLOCK(dbenv, dbmp->mutexp);
+		MUTEX_UNLOCK(dbenv, dbmp->mutex);
 
-		/* If we don't find one, open one. */
-		if (dbmfp == NULL)
-			ret = __memp_mf_sync(dbmp, mfp);
-		if (ret != 0) {
-			__db_err(dbenv, "%s: unable to flush: %s",
-			    (char *)R_ADDR(dbmp->reginfo, mfp->path_off),
-			    db_strerror(ret));
-			if (final_ret == 0)
-				final_ret = ret;
-			continue;
+		/* If we don't find a handle we can use, open one. */
+		if (dbmfp == NULL) {
+			if ((t_ret = __memp_mf_sync(dbmp, mfp, 0)) != 0) {
+				__db_err(dbenv,
+				    "%s: unable to flush: %s", (char *)
+				    R_ADDR(dbmp->reginfo, mfp->path_off),
+				    db_strerror(t_ret));
+				if (ret == 0)
+					ret = t_ret;
+			}
+		} else {
+			if ((t_ret =
+			    __os_fsync(dbenv, dbmfp->fhp)) != 0 && ret == 0)
+				ret = t_ret;
+
+			if ((t_ret = __memp_fclose(dbmfp, 0)) != 0 && ret == 0)
+				ret = t_ret;
 		}
 
+		/*
+		 * Re-acquire the region lock, we need it to move to the next
+		 * MPOOLFILE.
+		 *
+		 * Re-acquire the MPOOLFILE mutex, we need it to modify the
+		 * reference count.
+		 */
+		MPOOL_SYSTEM_LOCK(dbenv);
+		MUTEX_LOCK(dbenv, mfp->mutex);
+		--mfp->mpf_cnt;
+
 		/*
 		 * If we wrote the file and there are no open handles (or there
 		 * is a single open handle, and it's the one we opened to write
@@ -634,26 +678,66 @@ int __memp_sync_files(dbenv, dbmp)
 		 * racing with us to open a MPOOLFILE.
 		 */
 		if (mfp->mpf_cnt == 0 || (mfp->mpf_cnt == 1 &&
-		    dbmfp != NULL && F_ISSET(dbmfp, MP_FLUSH)))
+		    dbmfp != NULL && F_ISSET(dbmfp, MP_FLUSH))) {
 			mfp->file_written = 0;
-	}
-	R_UNLOCK(dbenv, dbmp->reginfo);
 
-	return (final_ret);
+			/*
+			 * We may be the last reference for a MPOOLFILE, as we
+			 * weren't holding the MPOOLFILE mutex when flushing
+			 * it's buffers to disk.  If we can discard it, set
+			 * a flag to schedule a clean-out pass.   (Not likely,
+			 * I mean, what are the chances that there aren't any
+			 * buffers in the pool?  Regardless, it might happen.)
+			 */
+			if (mfp->mpf_cnt == 0 && mfp->block_cnt == 0)
+				need_discard_pass = 1;
+		}
+
+		/* Unlock the MPOOLFILE, and move to the next entry. */
+		MUTEX_UNLOCK(dbenv, mfp->mutex);
+	}
+
+	/*
+	 * We exit the loop holding the region lock.
+	 *
+	 * We may need to do a last pass through the MPOOLFILE list -- if we
+	 * were the last reference to an MPOOLFILE, we need to clean it out.
+	 */
+	if (need_discard_pass)
+		for (mfp = SH_TAILQ_FIRST(
+		    &mp->mpfq, __mpoolfile); mfp != NULL; mfp = next_mfp) {
+			next_mfp = SH_TAILQ_NEXT(mfp, q, __mpoolfile);
+
+			/*
+			 * Do a fast check -- we can check for zero/non-zero
+			 * without a mutex on the MPOOLFILE.  If likely to
+			 * succeed, lock the MPOOLFILE down and look for real.
+			 */
+			if (mfp->block_cnt != 0 || mfp->mpf_cnt != 0)
+				continue;
+
+			MUTEX_LOCK(dbenv, mfp->mutex);
+			if (mfp->block_cnt == 0 && mfp->mpf_cnt == 0)
+				(void)__memp_mf_discard(dbmp, mfp);
+			else
+				MUTEX_UNLOCK(dbenv, mfp->mutex);
+		}
+	MPOOL_SYSTEM_UNLOCK(dbenv);
+
+	return (ret);
 }
 
 /*
  * __memp_mf_sync --
- *	 Flush an MPOOLFILE.
+ *	Flush an MPOOLFILE, when no currently open handle is available.
  *
- *	Should only be used when the file is not already open in this process.
- *
- * PUBLIC: int __memp_mf_sync __P((DB_MPOOL *, MPOOLFILE *));
+ * PUBLIC: int __memp_mf_sync __P((DB_MPOOL *, MPOOLFILE *, int));
  */
 int
-__memp_mf_sync(dbmp, mfp)
+__memp_mf_sync(dbmp, mfp, region_locked)
 	DB_MPOOL *dbmp;
 	MPOOLFILE *mfp;
+	int region_locked;
 {
 	DB_ENV *dbenv;
 	DB_FH *fhp;
@@ -663,12 +747,14 @@ __memp_mf_sync(dbmp, mfp)
 	dbenv = dbmp->dbenv;
 
 	/*
-	 * Expects caller to be holding the region lock: we're using the path
-	 * name and __memp_nameop might try and rename the file.
+	 * We need to be holding the region lock: we're using the path name
+	 * and __memp_nameop might try and rename the file.
 	 */
+	if (!region_locked)
+		MPOOL_SYSTEM_LOCK(dbenv);
+
 	if ((ret = __db_appname(dbenv, DB_APP_DATA,
-	    R_ADDR(dbmp->reginfo, mfp->path_off), 0, NULL,
-	    &rpath)) == 0) {
+	    R_ADDR(dbmp->reginfo, mfp->path_off), 0, NULL, &rpath)) == 0) {
 		if ((ret = __os_open(dbenv, rpath, 0, 0, &fhp)) == 0) {
 			ret = __os_fsync(dbenv, fhp);
 			if ((t_ret =
@@ -678,6 +764,9 @@ __memp_mf_sync(dbmp, mfp)
 		__os_free(dbenv, rpath);
 	}
 
+	if (!region_locked)
+		MPOOL_SYSTEM_UNLOCK(dbenv);
+
 	return (ret);
 }
 
@@ -708,33 +797,35 @@ __memp_close_flush_files(dbenv, dbmp, dosync)
 	 * MP_FLUSH flag.  Here we walk through our file descriptor list,
 	 * and, if a file was opened by __memp_bhwrite(), we close it.
 	 */
-retry:	MUTEX_THREAD_LOCK(dbenv, dbmp->mutexp);
+retry:	MUTEX_LOCK(dbenv, dbmp->mutex);
 	for (dbmfp = TAILQ_FIRST(&dbmp->dbmfq);
 	    dbmfp != NULL; dbmfp = TAILQ_NEXT(dbmfp, q))
 		if (F_ISSET(dbmfp, MP_FLUSH)) {
 			F_CLR(dbmfp, MP_FLUSH);
-			MUTEX_THREAD_UNLOCK(dbenv, dbmp->mutexp);
+			MUTEX_UNLOCK(dbenv, dbmp->mutex);
 			if (dosync) {
-				if ((ret = __os_fsync(dbenv, dbmfp->fhp)) != 0)
-					return (ret);
 				/*
-				 * If the file is clean and we have the only
-				 * open handle on the file, clear the dirty
-				 * flag so we don't re-open and sync it again.
+				 * If we have the only open handle on the file,
+				 * clear the dirty flag so we don't re-open and
+				 * sync it again when discarding the MPOOLFILE
+				 * structure.  Clear the flag before the sync
+				 * so can't race with a thread writing the file.
 				 */
 				mfp = dbmfp->mfp;
 				if (mfp->mpf_cnt == 1) {
-					R_LOCK(dbenv, dbmp->reginfo);
+					MUTEX_LOCK(dbenv, mfp->mutex);
 					if (mfp->mpf_cnt == 1)
 						mfp->file_written = 0;
-					R_UNLOCK(dbenv, dbmp->reginfo);
+					MUTEX_UNLOCK(dbenv, mfp->mutex);
 				}
+				if ((ret = __os_fsync(dbenv, dbmfp->fhp)) != 0)
+					return (ret);
 			}
 			if ((ret = __memp_fclose(dbmfp, 0)) != 0)
 				return (ret);
 			goto retry;
 		}
-	MUTEX_THREAD_UNLOCK(dbenv, dbmp->mutexp);
+	MUTEX_UNLOCK(dbenv, dbmp->mutex);
 
 	return (0);
 }
diff --git a/storage/bdb/mp/mp_trickle.c b/storage/bdb/mp/mp_trickle.c
index fc346890494..4c6a2d0c82e 100644
--- a/storage/bdb/mp/mp_trickle.c
+++ b/storage/bdb/mp/mp_trickle.c
@@ -1,10 +1,10 @@
 /*-
  * See the file LICENSE for redistribution information.
  *
- * Copyright (c) 1996-2004
+ * Copyright (c) 1996-2005
  *	Sleepycat Software.  All rights reserved.
  *
- * $Id: mp_trickle.c,v 11.35 2004/10/15 16:59:43 bostic Exp $
+ * $Id: mp_trickle.c,v 12.4 2005/10/07 20:21:33 ubell Exp $
  */
 
 #include "db_config.h"
@@ -33,18 +33,16 @@ __memp_trickle_pp(dbenv, pct, nwrotep)
 	DB_ENV *dbenv;
 	int pct, *nwrotep;
 {
-	int rep_check, ret;
+	DB_THREAD_INFO *ip;
+	int ret;
 
 	PANIC_CHECK(dbenv);
 	ENV_REQUIRES_CONFIG(dbenv,
 	    dbenv->mp_handle, "memp_trickle", DB_INIT_MPOOL);
 
-	rep_check = IS_ENV_REPLICATED(dbenv) ? 1 : 0;
-	if (rep_check)
-		__env_rep_enter(dbenv);
-	ret = __memp_trickle(dbenv, pct, nwrotep);
-	if (rep_check)
-		__env_db_rep_exit(dbenv);
+	ENV_ENTER(dbenv, ip);
+	REPLICATION_WRAP(dbenv, (__memp_trickle(dbenv, pct, nwrotep)), ret);
+	ENV_LEAVE(dbenv, ip);
 	return (ret);
 }
 
diff --git a/storage/bdb/mutex/README b/storage/bdb/mutex/README
index 323c34f1e74..23527586a72 100644
--- a/storage/bdb/mutex/README
+++ b/storage/bdb/mutex/README
@@ -1,35 +1,37 @@
-# $Id: README,v 11.2 1999/11/21 18:12:48 bostic Exp $
+# $Id: README,v 12.1 2005/07/20 16:51:55 bostic Exp $
 
 Note: this only applies to locking using test-and-set and fcntl calls,
 pthreads were added after this was written.
 
-Resource locking routines: lock based on a db_mutex_t.  All this gunk
+Resource locking routines: lock based on a DB_MUTEX.  All this gunk
 (including trying to make assembly code portable), is necessary because
 System V semaphores require system calls for uncontested locks and we
 don't want to make two system calls per resource lock.
 
-First, this is how it works.  The db_mutex_t structure contains a resource
+First, this is how it works.  The DB_MUTEX structure contains a resource
 test-and-set lock (tsl), a file offset, a pid for debugging and statistics
 information.
 
-If HAVE_MUTEX_THREADS is defined (i.e. we know how to do test-and-sets
-for this compiler/architecture combination), we try and lock the resource
-tsl __os_spin() times.  If we can't acquire the lock that way, we use a
-system call to sleep for 1ms, 2ms, 4ms, etc.  (The time is bounded at 1
-second, just in case.)  Using the timer backoff means that there are two
-assumptions: that locks are held for brief periods (never over system
-calls or I/O) and that locks are not hotly contested.
+If HAVE_MUTEX_FCNTL is NOT defined (that is, we know how to do
+test-and-sets for this compiler/architecture combination), we try and
+lock the resource tsl some number of times (based on the number of
+processors).  If we can't acquire the mutex that way, we use a system
+call to sleep for 1ms, 2ms, 4ms, etc.  (The time is bounded at 10ms for
+mutexes backing logical locks and 25 ms for data structures, just in
+case.)  Using the timer backoff means that there are two assumptions:
+that mutexes are held for brief periods (never over system calls or I/O)
+and mutexes are not hotly contested.
 
-If HAVE_MUTEX_THREADS is not defined, i.e. we can't do test-and-sets, we
-use a file descriptor to do byte locking on a file at a specified offset.
-In this case, ALL of the locking is done in the kernel.  Because file
-descriptors are allocated per process, we have to provide the file
-descriptor as part of the lock call.  We still have to do timer backoff
-because we need to be able to block ourselves, i.e. the lock manager
-causes processes to wait by having the process acquire a mutex and then
-attempting to re-acquire the mutex.  There's no way to use kernel locking
-to block yourself, i.e. if you hold a lock and attempt to re-acquire it,
-the attempt will succeed.
+If HAVE_MUTEX_FCNTL is defined, we use a file descriptor to do byte
+locking on a file at a specified offset.  In this case, ALL of the
+locking is done in the kernel.  Because file descriptors are allocated
+per process, we have to provide the file descriptor as part of the lock
+call.  We still have to do timer backoff because we need to be able to
+block ourselves, that is, the lock manager causes processes to wait by
+having the process acquire a mutex and then attempting to re-acquire the
+mutex.  There's no way to use kernel locking to block yourself, that is,
+if you hold a lock and attempt to re-acquire it, the attempt will
+succeed.
 
 Next, let's talk about why it doesn't work the way a reasonable person
 would think it should work.
@@ -42,7 +44,7 @@ would wake any waiting processes up after releasing the lock.  This would
 actually require both another tsl (call it the mutex tsl) and
 synchronization between the call that blocks in the kernel and the actual
 resource tsl.  The mutex tsl would be used to protect accesses to the
-db_mutex_t itself.  Locking the mutex tsl would be done by a busy loop,
+DB_MUTEX itself.  Locking the mutex tsl would be done by a busy loop,
 which is safe because processes would never block holding that tsl (all
 they would do is try to obtain the resource tsl and set/check the wait
 count).  The problem in this model is that the blocking call into the
@@ -55,7 +57,7 @@ methods are sufficient to solve the problem.
 
 The problem with fcntl locking is that only the process that obtained the
 lock can release it.  Remember, we want the normal state of the kernel
-semaphore to be locked.  So, if the creator of the db_mutex_t were to
+semaphore to be locked.  So, if the creator of the DB_MUTEX were to
 initialize the lock to "locked", then a second process locks the resource
 tsl, and then a third process needs to block, waiting for the resource
 tsl, when the second process wants to wake up the third process, it can't
@@ -69,11 +71,11 @@ or using a different blocking offset depending on which process is
 holding the lock, but it gets complicated fairly quickly.  I'm open to
 suggestions, but I'm not holding my breath.
 
-Regardless, we use this form of locking when HAVE_SPINLOCKS is not
-defined, (i.e. we're locking in the kernel) because it doesn't have the
-limitations found in System V semaphores, and because the normal state of
-the kernel object in that case is unlocked, so the process releasing the
-lock is also the holder of the lock.
+Regardless, we use this form of locking when we don't have any other
+choice, because it doesn't have the limitations found in System V
+semaphores, and because the normal state of the kernel object in that
+case is unlocked, so the process releasing the lock is also the holder
+of the lock.
 
 The System V semaphore design has a number of other limitations that make
 it inappropriate for this task.  Namely:
diff --git a/storage/bdb/mutex/mut_alloc.c b/storage/bdb/mutex/mut_alloc.c
new file mode 100644
index 00000000000..ad91d3d66b3
--- /dev/null
+++ b/storage/bdb/mutex/mut_alloc.c
@@ -0,0 +1,229 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1999-2005
+ *	Sleepycat Software.  All rights reserved.
+ *
+ * $Id: mut_alloc.c,v 12.6 2005/08/08 14:57:54 bostic Exp $
+ */
+
+#include "db_config.h"
+
+#ifndef NO_SYSTEM_INCLUDES
+#include 
+
+#include 
+#endif
+
+#include "db_int.h"
+#include "dbinc/mutex_int.h"
+
+static int __mutex_free_int __P((DB_ENV *, int, db_mutex_t *));
+
+/*
+ * __mutex_alloc --
+ *	Allocate a mutex from the mutex region.
+ *
+ * PUBLIC: int __mutex_alloc __P((DB_ENV *, int, u_int32_t, db_mutex_t *));
+ */
+int
+__mutex_alloc(dbenv, alloc_id, flags, indxp)
+	DB_ENV *dbenv;
+	int alloc_id;
+	u_int32_t flags;
+	db_mutex_t *indxp;
+{
+	int ret;
+
+	/* The caller may depend on us to initialize. */
+	*indxp = MUTEX_INVALID;
+
+	/*
+	 * If this is not an application lock, and we've turned off locking,
+	 * or the DB_ENV handle isn't thread-safe, and this is a thread lock
+	 * or the environment isn't multi-process by definition, there's no
+	 * need to mutex at all.
+	 */
+	if (alloc_id != MTX_APPLICATION &&
+	    (F_ISSET(dbenv, DB_ENV_NOLOCKING) ||
+	    (!F_ISSET(dbenv, DB_ENV_THREAD) &&
+	    (LF_ISSET(DB_MUTEX_THREAD) || F_ISSET(dbenv, DB_ENV_PRIVATE)))))
+		return (0);
+
+	/*
+	 * If we have a region in which to allocate the mutexes, lock it and
+	 * do the allocation.
+	 */
+	if (MUTEX_ON(dbenv))
+		return (__mutex_alloc_int(dbenv, 1, alloc_id, flags, indxp));
+
+	/*
+	 * We have to allocate some number of mutexes before we have a region
+	 * in which to allocate them.  We handle this by saving up the list of
+	 * flags and allocating them as soon as we have a handle.
+	 *
+	 * The list of mutexes to alloc is maintained in pairs: first the
+	 * alloc_id argument, second the flags passed in by the caller.
+	 */
+	if (dbenv->mutex_iq == NULL) {
+		dbenv->mutex_iq_max = 50;
+		if ((ret = __os_calloc(dbenv, dbenv->mutex_iq_max,
+		    sizeof(dbenv->mutex_iq[0]), &dbenv->mutex_iq)) != 0)
+			return (ret);
+	} else if (dbenv->mutex_iq_next == dbenv->mutex_iq_max - 1) {
+		dbenv->mutex_iq_max *= 2;
+		if ((ret = __os_realloc(dbenv,
+		    dbenv->mutex_iq_max * sizeof(dbenv->mutex_iq[0]),
+		    &dbenv->mutex_iq)) != 0)
+			return (ret);
+	}
+	*indxp = dbenv->mutex_iq_next + 1;	/* Correct for MUTEX_INVALID. */
+	dbenv->mutex_iq[dbenv->mutex_iq_next].alloc_id = alloc_id;
+	dbenv->mutex_iq[dbenv->mutex_iq_next].flags = flags;
+	++dbenv->mutex_iq_next;
+
+	return (0);
+}
+
+/*
+ * __mutex_alloc_int --
+ *	Internal routine to allocate a mutex.
+ *
+ * PUBLIC: int __mutex_alloc_int
+ * PUBLIC:	__P((DB_ENV *, int, int, u_int32_t, db_mutex_t *));
+ */
+int
+__mutex_alloc_int(dbenv, locksys, alloc_id, flags, indxp)
+	DB_ENV *dbenv;
+	int locksys, alloc_id;
+	u_int32_t flags;
+	db_mutex_t *indxp;
+{
+	DB_MUTEX *mutexp;
+	DB_MUTEXMGR *mtxmgr;
+	DB_MUTEXREGION *mtxregion;
+	int ret;
+
+	mtxmgr = dbenv->mutex_handle;
+	mtxregion = mtxmgr->reginfo.primary;
+	ret = 0;
+
+	/*
+	 * If we're not initializing the mutex region, then lock the region to
+	 * allocate new mutexes.  Drop the lock before initializing the mutex,
+	 * mutex initialization may require a system call.
+	 */
+	if (locksys)
+		MUTEX_SYSTEM_LOCK(dbenv);
+
+	if (mtxregion->mutex_next == MUTEX_INVALID) {
+		__db_err(dbenv,
+		    "unable to allocate memory for mutex; resize mutex region");
+		if (locksys)
+			MUTEX_SYSTEM_UNLOCK(dbenv);
+		return (ENOMEM);
+	}
+
+	*indxp = mtxregion->mutex_next;
+	mutexp = MUTEXP_SET(*indxp);
+	mtxregion->mutex_next = mutexp->mutex_next_link;
+
+	--mtxregion->stat.st_mutex_free;
+	++mtxregion->stat.st_mutex_inuse;
+	if (mtxregion->stat.st_mutex_inuse > mtxregion->stat.st_mutex_inuse_max)
+		mtxregion->stat.st_mutex_inuse_max =
+		    mtxregion->stat.st_mutex_inuse;
+	if (locksys)
+		MUTEX_SYSTEM_UNLOCK(dbenv);
+
+	/* Initialize the mutex. */
+	memset(mutexp, 0, sizeof(*mutexp));
+
+	F_SET(mutexp, DB_MUTEX_ALLOCATED);
+	if (LF_ISSET(DB_MUTEX_LOGICAL_LOCK))
+		F_SET(mutexp, DB_MUTEX_LOGICAL_LOCK);
+
+#ifdef DIAGNOSTIC
+	mutexp->alloc_id = alloc_id;
+#else
+	COMPQUIET(alloc_id, 0);
+#endif
+
+	if ((ret = __mutex_init(dbenv, *indxp, flags)) != 0)
+		(void)__mutex_free_int(dbenv, locksys, indxp);
+
+	return (ret);
+}
+
+/*
+ * __mutex_free --
+ *	Free a mutex.
+ *
+ * PUBLIC: int __mutex_free __P((DB_ENV *, db_mutex_t *));
+ */
+int
+__mutex_free(dbenv, indxp)
+	DB_ENV *dbenv;
+	db_mutex_t *indxp;
+{
+	/*
+	 * There is no explicit ordering in how the regions are cleaned up
+	 * up and/or discarded when an environment is destroyed (either a
+	 * private environment is closed or a public environment is removed).
+	 * The way we deal with mutexes is to clean up all remaining mutexes
+	 * when we close the mutex environment (because we have to be able to
+	 * do that anyway, after a crash), which means we don't have to deal
+	 * with region cleanup ordering on normal environment destruction.
+	 * All that said, what it really means is we can get here without a
+	 * mpool region.  It's OK, the mutex has been, or will be, destroyed.
+	 *
+	 * If the mutex has never been configured, we're done.
+	 */
+	if (!MUTEX_ON(dbenv) || *indxp == MUTEX_INVALID)
+		return (0);
+
+	return (__mutex_free_int(dbenv, 1, indxp));
+}
+
+/*
+ * __mutex_free_int --
+ *	Internal routine to free a mutex.
+ */
+static int
+__mutex_free_int(dbenv, locksys, indxp)
+	DB_ENV *dbenv;
+	int locksys;
+	db_mutex_t *indxp;
+{
+	DB_MUTEX *mutexp;
+	DB_MUTEXMGR *mtxmgr;
+	DB_MUTEXREGION *mtxregion;
+	db_mutex_t mutex;
+	int ret;
+
+	mutex = *indxp;
+	*indxp = MUTEX_INVALID;
+
+	mtxmgr = dbenv->mutex_handle;
+	mtxregion = mtxmgr->reginfo.primary;
+	mutexp = MUTEXP_SET(mutex);
+
+	DB_ASSERT(F_ISSET(mutexp, DB_MUTEX_ALLOCATED));
+	F_CLR(mutexp, DB_MUTEX_ALLOCATED);
+
+	ret = __mutex_destroy(dbenv, mutex);
+
+	if (locksys)
+		MUTEX_SYSTEM_LOCK(dbenv);
+
+	/* Link the mutex on the head of the free list. */
+	mutexp->mutex_next_link = mtxregion->mutex_next;
+	mtxregion->mutex_next = mutex;
+	++mtxregion->stat.st_mutex_free;
+	--mtxregion->stat.st_mutex_inuse;
+
+	if (locksys)
+		MUTEX_SYSTEM_UNLOCK(dbenv);
+
+	return (ret);
+}
diff --git a/storage/bdb/mutex/mut_fcntl.c b/storage/bdb/mutex/mut_fcntl.c
index 03521bd77b9..e3efe9269dc 100644
--- a/storage/bdb/mutex/mut_fcntl.c
+++ b/storage/bdb/mutex/mut_fcntl.c
@@ -1,10 +1,10 @@
 /*-
  * See the file LICENSE for redistribution information.
  *
- * Copyright (c) 1996-2004
+ * Copyright (c) 1996-2005
  *	Sleepycat Software.  All rights reserved.
  *
- * $Id: mut_fcntl.c,v 11.26 2004/01/28 03:36:18 bostic Exp $
+ * $Id: mut_fcntl.c,v 12.13 2005/11/01 14:42:17 bostic Exp $
  */
 
 #include "db_config.h"
@@ -15,50 +15,26 @@
 #include 
 #include 
 #include 
-#include 				/* SEEK_SET on SunOS. */
 #endif
 
 #include "db_int.h"
+#include "dbinc/mutex_int.h"
 
 /*
  * __db_fcntl_mutex_init --
- *	Initialize a DB mutex structure.
+ *	Initialize a fcntl mutex.
  *
- * PUBLIC: int __db_fcntl_mutex_init __P((DB_ENV *, DB_MUTEX *, u_int32_t));
+ * PUBLIC: int __db_fcntl_mutex_init __P((DB_ENV *, db_mutex_t, u_int32_t));
  */
 int
-__db_fcntl_mutex_init(dbenv, mutexp, offset)
+__db_fcntl_mutex_init(dbenv, mutex, flags)
 	DB_ENV *dbenv;
-	DB_MUTEX *mutexp;
-	u_int32_t offset;
+	db_mutex_t mutex;
+	u_int32_t flags;
 {
-	u_int32_t save;
-
-	/*
-	 * The only setting/checking of the MUTEX_MPOOL flag is in the mutex
-	 * mutex allocation code (__db_mutex_alloc/free).  Preserve only that
-	 * flag.  This is safe because even if this flag was never explicitly
-	 * set, but happened to be set in memory, it will never be checked or
-	 * acted upon.
-	 */
-	save = F_ISSET(mutexp, MUTEX_MPOOL);
-	memset(mutexp, 0, sizeof(*mutexp));
-	F_SET(mutexp, save);
-
-	/*
-	 * This is where we decide to ignore locks we don't need to set -- if
-	 * the application is private, we don't need any locks.
-	 */
-	if (F_ISSET(dbenv, DB_ENV_PRIVATE)) {
-		F_SET(mutexp, MUTEX_IGNORE);
-		return (0);
-	}
-
-	mutexp->off = offset;
-#ifdef HAVE_MUTEX_SYSTEM_RESOURCES
-	mutexp->reg_off = INVALID_ROFF;
-#endif
-	F_SET(mutexp, MUTEX_INITED);
+	COMPQUIET(dbenv, NULL);
+	COMPQUIET(mutex, MUTEX_INVALID);
+	COMPQUIET(flags, 0);
 
 	return (0);
 }
@@ -67,31 +43,44 @@ __db_fcntl_mutex_init(dbenv, mutexp, offset)
  * __db_fcntl_mutex_lock
  *	Lock on a mutex, blocking if necessary.
  *
- * PUBLIC: int __db_fcntl_mutex_lock __P((DB_ENV *, DB_MUTEX *));
+ * PUBLIC: int __db_fcntl_mutex_lock __P((DB_ENV *, db_mutex_t));
  */
 int
-__db_fcntl_mutex_lock(dbenv, mutexp)
+__db_fcntl_mutex_lock(dbenv, mutex)
 	DB_ENV *dbenv;
-	DB_MUTEX *mutexp;
+	db_mutex_t mutex;
 {
+	DB_MUTEX *mutexp;
+	DB_MUTEXMGR *mtxmgr;
+	DB_MUTEXREGION *mtxregion;
 	struct flock k_lock;
-	int locked, ms, waited;
+	int locked, ms, ret;
 
-	if (F_ISSET(dbenv, DB_ENV_NOLOCKING))
+	if (!MUTEX_ON(dbenv) || F_ISSET(dbenv, DB_ENV_NOLOCKING))
 		return (0);
 
+	mtxmgr = dbenv->mutex_handle;
+	mtxregion = mtxmgr->reginfo.primary;
+	mutexp = MUTEXP_SET(mutex);
+
+#ifdef HAVE_STATISTICS
+	if (F_ISSET(mutexp, DB_MUTEX_LOCKED))
+		++mutexp->mutex_set_wait;
+	else
+		++mutexp->mutex_set_nowait;
+#endif
+
 	/* Initialize the lock. */
 	k_lock.l_whence = SEEK_SET;
-	k_lock.l_start = mutexp->off;
+	k_lock.l_start = mutex;
 	k_lock.l_len = 1;
 
-	for (locked = waited = 0;;) {
+	for (locked = 0;;) {
 		/*
 		 * Wait for the lock to become available; wait 1ms initially,
 		 * up to 1 second.
 		 */
-		for (ms = 1; mutexp->pid != 0;) {
-			waited = 1;
+		for (ms = 1; F_ISSET(mutexp, DB_MUTEX_LOCKED);) {
 			__os_yield(NULL, ms * USEC_PER_MS);
 			if ((ms <<= 1) > MS_PER_SEC)
 				ms = MS_PER_SEC;
@@ -100,18 +89,21 @@ __db_fcntl_mutex_lock(dbenv, mutexp)
 		/* Acquire an exclusive kernel lock. */
 		k_lock.l_type = F_WRLCK;
 		if (fcntl(dbenv->lockfhp->fd, F_SETLKW, &k_lock))
-			return (__os_get_errno());
+			goto err;
 
 		/* If the resource is still available, it's ours. */
-		if (mutexp->pid == 0) {
+		if (!F_ISSET(mutexp, DB_MUTEX_LOCKED)) {
 			locked = 1;
-			__os_id(&mutexp->pid);
+
+			F_SET(mutexp, DB_MUTEX_LOCKED);
+			dbenv->thread_id(dbenv, &mutexp->pid, &mutexp->tid);
+			CHECK_MTX_THREAD(dbenv, mutexp);
 		}
 
 		/* Release the kernel lock. */
 		k_lock.l_type = F_UNLCK;
 		if (fcntl(dbenv->lockfhp->fd, F_SETLK, &k_lock))
-			return (__os_get_errno());
+			goto err;
 
 		/*
 		 * If we got the resource lock we're done.
@@ -126,57 +118,74 @@ __db_fcntl_mutex_lock(dbenv, mutexp)
 			break;
 	}
 
-	if (waited)
-		++mutexp->mutex_set_wait;
-	else
-		++mutexp->mutex_set_nowait;
+#ifdef DIAGNOSTIC
+	/*
+	 * We want to switch threads as often as possible.  Yield every time
+	 * we get a mutex to ensure contention.
+	 */
+	if (F_ISSET(dbenv, DB_ENV_YIELDCPU))
+		__os_yield(NULL, 1);
+#endif
 	return (0);
+
+err:	ret = __os_get_errno();
+	__db_err(dbenv, "fcntl lock failed: %s", db_strerror(ret));
+	return (__db_panic(dbenv, ret));
 }
 
 /*
  * __db_fcntl_mutex_unlock --
- *	Release a lock.
+ *	Release a mutex.
  *
- * PUBLIC: int __db_fcntl_mutex_unlock __P((DB_ENV *, DB_MUTEX *));
+ * PUBLIC: int __db_fcntl_mutex_unlock __P((DB_ENV *, db_mutex_t));
  */
 int
-__db_fcntl_mutex_unlock(dbenv, mutexp)
+__db_fcntl_mutex_unlock(dbenv, mutex)
 	DB_ENV *dbenv;
-	DB_MUTEX *mutexp;
+	db_mutex_t mutex;
 {
-	if (F_ISSET(dbenv, DB_ENV_NOLOCKING))
+	DB_MUTEX *mutexp;
+	DB_MUTEXMGR *mtxmgr;
+	DB_MUTEXREGION *mtxregion;
+
+	if (!MUTEX_ON(dbenv) || F_ISSET(dbenv, DB_ENV_NOLOCKING))
 		return (0);
 
+	mtxmgr = dbenv->mutex_handle;
+	mtxregion = mtxmgr->reginfo.primary;
+	mutexp = MUTEXP_SET(mutex);
+
 #ifdef DIAGNOSTIC
-#define	MSG		"mutex_unlock: ERROR: released lock that was unlocked\n"
-#ifndef	STDERR_FILENO
-#define	STDERR_FILENO	2
-#endif
-	if (mutexp->pid == 0)
-		write(STDERR_FILENO, MSG, sizeof(MSG) - 1);
+	if (!F_ISSET(mutexp, DB_MUTEX_LOCKED)) {
+		__db_err(dbenv, "fcntl unlock failed: lock already unlocked");
+		return (__db_panic(dbenv, EACCES));
+	}
 #endif
 
 	/*
 	 * Release the resource.  We don't have to acquire any locks because
-	 * processes trying to acquire the lock are checking for a pid set to
-	 * 0/non-0, not to any specific value.
+	 * processes trying to acquire the lock are waiting for the flag to
+	 * go to 0.  Once that happens the waiters will serialize acquiring 
+	 * an exclusive kernel lock before locking the mutex.
 	 */
-	mutexp->pid = 0;
+	F_CLR(mutexp, DB_MUTEX_LOCKED);
 
 	return (0);
 }
 
 /*
  * __db_fcntl_mutex_destroy --
- *	Destroy a DB_MUTEX.
+ *	Destroy a mutex.
  *
- * PUBLIC: int __db_fcntl_mutex_destroy __P((DB_MUTEX *));
+ * PUBLIC: int __db_fcntl_mutex_destroy __P((DB_ENV *, db_mutex_t));
  */
 int
-__db_fcntl_mutex_destroy(mutexp)
-	DB_MUTEX *mutexp;
+__db_fcntl_mutex_destroy(dbenv, mutex)
+	DB_ENV *dbenv;
+	db_mutex_t mutex;
 {
-	COMPQUIET(mutexp, NULL);
+	COMPQUIET(dbenv, NULL);
+	COMPQUIET(mutex, MUTEX_INVALID);
 
 	return (0);
 }
diff --git a/storage/bdb/mutex/mut_method.c b/storage/bdb/mutex/mut_method.c
new file mode 100644
index 00000000000..a9db642a0df
--- /dev/null
+++ b/storage/bdb/mutex/mut_method.c
@@ -0,0 +1,277 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996-2004
+ *	Sleepycat Software.  All rights reserved.
+ *
+ * $Id: mut_method.c,v 12.5 2005/10/07 20:21:34 ubell Exp $
+ */
+
+#include "db_config.h"
+
+#ifndef NO_SYSTEM_INCLUDES
+#include 
+
+#include 
+#endif
+
+#include "db_int.h"
+#include "dbinc/mutex_int.h"
+
+/*
+ * __mutex_alloc_pp --
+ *	Allocate a mutex, application method.
+ *
+ * PUBLIC: int __mutex_alloc_pp __P((DB_ENV *, u_int32_t, db_mutex_t *));
+ */
+int
+__mutex_alloc_pp(dbenv, flags, indxp)
+	DB_ENV *dbenv;
+	u_int32_t flags;
+	db_mutex_t *indxp;
+{
+	DB_THREAD_INFO *ip;
+	int ret;
+
+	PANIC_CHECK(dbenv);
+
+	if (flags != 0 && flags != DB_MUTEX_SELF_BLOCK)
+		return (__db_ferr(dbenv, "DB_ENV->mutex_alloc", 0));
+
+	ENV_ENTER(dbenv, ip);
+	ret = __mutex_alloc(dbenv, MTX_APPLICATION, flags, indxp);
+	ENV_LEAVE(dbenv, ip);
+
+	return (ret);
+}
+
+/*
+ * __mutex_free_pp --
+ *	Destroy a mutex, application method.
+ *
+ * PUBLIC: int __mutex_free_pp __P((DB_ENV *, db_mutex_t));
+ */
+int
+__mutex_free_pp(dbenv, indx)
+	DB_ENV *dbenv;
+	db_mutex_t indx;
+{
+	DB_THREAD_INFO *ip;
+	int ret;
+
+	PANIC_CHECK(dbenv);
+
+	if (indx == MUTEX_INVALID)
+		return (EINVAL);
+
+	/*
+	 * Internally Berkeley DB passes around the db_mutex_t address on
+	 * free, because we want to make absolutely sure the slot gets
+	 * overwritten with MUTEX_INVALID.  We don't export MUTEX_INVALID,
+	 * so we don't export that part of the API, either.
+	 */
+	ENV_ENTER(dbenv, ip);
+	ret = __mutex_free(dbenv, &indx);
+	ENV_LEAVE(dbenv, ip);
+
+	return (ret);
+}
+
+/*
+ * __mutex_lock --
+ *	Lock a mutex, application method.
+ *
+ * PUBLIC: int __mutex_lock_pp __P((DB_ENV *, db_mutex_t));
+ */
+int
+__mutex_lock_pp(dbenv, indx)
+	DB_ENV *dbenv;
+	db_mutex_t indx;
+{
+	PANIC_CHECK(dbenv);
+
+	if (indx == MUTEX_INVALID)
+		return (EINVAL);
+
+	return (__mutex_lock(dbenv, indx));
+}
+
+/*
+ * __mutex_unlock --
+ *	Unlock a mutex, application method.
+ *
+ * PUBLIC: int __mutex_unlock_pp __P((DB_ENV *, db_mutex_t));
+ */
+int
+__mutex_unlock_pp(dbenv, indx)
+	DB_ENV *dbenv;
+	db_mutex_t indx;
+{
+	PANIC_CHECK(dbenv);
+
+	if (indx == MUTEX_INVALID)
+		return (EINVAL);
+
+	return (__mutex_unlock(dbenv, indx));
+}
+
+/*
+ * __mutex_get_align --
+ *	DB_ENV->mutex_get_align.
+ *
+ * PUBLIC: int __mutex_get_align __P((DB_ENV *, u_int32_t *));
+ */
+int
+__mutex_get_align(dbenv, alignp)
+	DB_ENV *dbenv;
+	u_int32_t *alignp;
+{
+	if (MUTEX_ON(dbenv))
+		*alignp = ((DB_MUTEXREGION *)((DB_MUTEXMGR *)
+		    dbenv->mutex_handle)->reginfo.primary)->stat.st_mutex_align;
+	else
+		*alignp = dbenv->mutex_align;
+	return (0);
+}
+
+/*
+ * __mutex_set_align --
+ *	DB_ENV->mutex_set_align.
+ *
+ * PUBLIC: int __mutex_set_align __P((DB_ENV *, u_int32_t));
+ */
+int
+__mutex_set_align(dbenv, align)
+	DB_ENV *dbenv;
+	u_int32_t align;
+{
+	ENV_ILLEGAL_AFTER_OPEN(dbenv, "DB_ENV->set_mutex_align");
+
+	if (align == 0 || !POWER_OF_TWO(align)) {
+		__db_err(dbenv,
+    "DB_ENV->mutex_set_align: alignment value must be a non-zero power-of-two");
+		return (EINVAL);
+	}
+
+	dbenv->mutex_align = align;
+	return (0);
+}
+
+/*
+ * __mutex_get_increment --
+ *	DB_ENV->mutex_get_increment.
+ *
+ * PUBLIC: int __mutex_get_increment __P((DB_ENV *, u_int32_t *));
+ */
+int
+__mutex_get_increment(dbenv, incrementp)
+	DB_ENV *dbenv;
+	u_int32_t *incrementp;
+{
+	/*
+	 * We don't maintain the increment in the region (it just makes
+	 * no sense).  Return whatever we have configured on this handle,
+	 * nobody is ever going to notice.
+	 */
+	*incrementp = dbenv->mutex_inc;
+	return (0);
+}
+
+/*
+ * __mutex_set_increment --
+ *	DB_ENV->mutex_set_increment.
+ *
+ * PUBLIC: int __mutex_set_increment __P((DB_ENV *, u_int32_t));
+ */
+int
+__mutex_set_increment(dbenv, increment)
+	DB_ENV *dbenv;
+	u_int32_t increment;
+{
+	ENV_ILLEGAL_AFTER_OPEN(dbenv, "DB_ENV->set_mutex_increment");
+
+	dbenv->mutex_cnt = 0;
+	dbenv->mutex_inc = increment;
+	return (0);
+}
+
+/*
+ * __mutex_get_max --
+ *	DB_ENV->mutex_get_max.
+ *
+ * PUBLIC: int __mutex_get_max __P((DB_ENV *, u_int32_t *));
+ */
+int
+__mutex_get_max(dbenv, maxp)
+	DB_ENV *dbenv;
+	u_int32_t *maxp;
+{
+	if (MUTEX_ON(dbenv))
+		*maxp = ((DB_MUTEXREGION *)((DB_MUTEXMGR *)
+		    dbenv->mutex_handle)->reginfo.primary)->stat.st_mutex_cnt;
+	else
+		*maxp = dbenv->mutex_cnt;
+	return (0);
+}
+
+/*
+ * __mutex_set_max --
+ *	DB_ENV->mutex_set_max.
+ *
+ * PUBLIC: int __mutex_set_max __P((DB_ENV *, u_int32_t));
+ */
+int
+__mutex_set_max(dbenv, max)
+	DB_ENV *dbenv;
+	u_int32_t max;
+{
+	ENV_ILLEGAL_AFTER_OPEN(dbenv, "DB_ENV->set_mutex_max");
+
+	dbenv->mutex_cnt = max;
+	dbenv->mutex_inc = 0;
+	return (0);
+}
+
+/*
+ * __mutex_get_tas_spins --
+ *	DB_ENV->mutex_get_tas_spins.
+ *
+ * PUBLIC: int __mutex_get_tas_spins __P((DB_ENV *, u_int32_t *));
+ */
+int
+__mutex_get_tas_spins(dbenv, tas_spinsp)
+	DB_ENV *dbenv;
+	u_int32_t *tas_spinsp;
+{
+	if (MUTEX_ON(dbenv))
+		*tas_spinsp = ((DB_MUTEXREGION *)((DB_MUTEXMGR *)dbenv->
+		    mutex_handle)->reginfo.primary)->stat.st_mutex_tas_spins;
+	else
+		*tas_spinsp = dbenv->mutex_tas_spins;
+	return (0);
+}
+
+/*
+ * __mutex_set_tas_spins --
+ *	DB_ENV->mutex_set_tas_spins.
+ *
+ * PUBLIC: int __mutex_set_tas_spins __P((DB_ENV *, u_int32_t));
+ */
+int
+__mutex_set_tas_spins(dbenv, tas_spins)
+	DB_ENV *dbenv;
+	u_int32_t tas_spins;
+{
+	/*
+	 * There's a theoretical race here, but I'm not interested in locking
+	 * the test-and-set spin count.  The worst possibility is a thread
+	 * reads out a bad spin count and spins until it gets the lock, but
+	 * that's awfully unlikely.
+	 */
+	if (MUTEX_ON(dbenv))
+		((DB_MUTEXREGION *)((DB_MUTEXMGR *)dbenv->mutex_handle)
+		    ->reginfo.primary)->stat.st_mutex_tas_spins = tas_spins;
+	else
+		dbenv->mutex_tas_spins = tas_spins;
+	return (0);
+}
diff --git a/storage/bdb/mutex/mut_pthread.c b/storage/bdb/mutex/mut_pthread.c
index 6507eba7330..e32e4852412 100644
--- a/storage/bdb/mutex/mut_pthread.c
+++ b/storage/bdb/mutex/mut_pthread.c
@@ -1,10 +1,10 @@
 /*-
  * See the file LICENSE for redistribution information.
  *
- * Copyright (c) 1999-2004
+ * Copyright (c) 1999-2005
  *	Sleepycat Software.  All rights reserved.
  *
- * $Id: mut_pthread.c,v 11.62 2004/09/22 16:27:05 bostic Exp $
+ * $Id: mut_pthread.c,v 12.12 2005/11/01 00:44:27 bostic Exp $
  */
 
 #include "db_config.h"
@@ -13,20 +13,10 @@
 #include 
 
 #include 
-#include 
 #endif
 
 #include "db_int.h"
-
-#ifdef DIAGNOSTIC
-#undef	MSG1
-#define	MSG1		"mutex_lock: ERROR: lock currently in use: pid: %lu.\n"
-#undef	MSG2
-#define	MSG2		"mutex_unlock: ERROR: lock already unlocked\n"
-#ifndef	STDERR_FILENO
-#define	STDERR_FILENO	2
-#endif
-#endif
+#include "dbinc/mutex_int.h"
 
 #ifdef HAVE_MUTEX_SOLARIS_LWP
 #define	pthread_cond_destroy(x)		0
@@ -36,12 +26,6 @@
 #define	pthread_mutex_lock		_lwp_mutex_lock
 #define	pthread_mutex_trylock		_lwp_mutex_trylock
 #define	pthread_mutex_unlock		_lwp_mutex_unlock
-/*
- * !!!
- * _lwp_self returns the LWP process ID which isn't a unique per-thread
- * identifier.  Use pthread_self instead, it appears to work even if we
- * are not a pthreads application.
- */
 #endif
 #ifdef HAVE_MUTEX_UI_THREADS
 #define	pthread_cond_destroy(x)		cond_destroy
@@ -51,89 +35,78 @@
 #define	pthread_mutex_lock		mutex_lock
 #define	pthread_mutex_trylock		mutex_trylock
 #define	pthread_mutex_unlock		mutex_unlock
-#define	pthread_self			thr_self
 #endif
 
 #define	PTHREAD_UNLOCK_ATTEMPTS	5
 
+/*
+ * IBM's MVS pthread mutex implementation returns -1 and sets errno rather than
+ * returning errno itself.  As -1 is not a valid errno value, assume functions
+ * returning -1 have set errno.  If they haven't, return a random error value.
+ */
+#define	RET_SET(f, ret) do {						\
+	if (((ret) = (f)) == -1 && ((ret) = errno) == 0)		\
+		(ret) = EAGAIN;						\
+} while (0)
+
 /*
  * __db_pthread_mutex_init --
- *	Initialize a DB_MUTEX.
+ *	Initialize a pthread mutex.
  *
- * PUBLIC: int __db_pthread_mutex_init __P((DB_ENV *, DB_MUTEX *, u_int32_t));
+ * PUBLIC: int __db_pthread_mutex_init __P((DB_ENV *, db_mutex_t, u_int32_t));
  */
 int
-__db_pthread_mutex_init(dbenv, mutexp, flags)
+__db_pthread_mutex_init(dbenv, mutex, flags)
 	DB_ENV *dbenv;
-	DB_MUTEX *mutexp;
+	db_mutex_t mutex;
 	u_int32_t flags;
 {
-	u_int32_t save;
+	DB_MUTEX *mutexp;
+	DB_MUTEXMGR *mtxmgr;
+	DB_MUTEXREGION *mtxregion;
 	int ret;
 
+	mtxmgr = dbenv->mutex_handle;
+	mtxregion = mtxmgr->reginfo.primary;
+	mutexp = MUTEXP_SET(mutex);
 	ret = 0;
 
-	/*
-	 * The only setting/checking of the MUTEX_MPOOL flag is in the mutex
-	 * mutex allocation code (__db_mutex_alloc/free).  Preserve only that
-	 * flag.  This is safe because even if this flag was never explicitly
-	 * set, but happened to be set in memory, it will never be checked or
-	 * acted upon.
-	 */
-	save = F_ISSET(mutexp, MUTEX_MPOOL);
-	memset(mutexp, 0, sizeof(*mutexp));
-	F_SET(mutexp, save);
-
-	/*
-	 * If this is a thread lock or the process has told us that there are
-	 * no other processes in the environment, use thread-only locks, they
-	 * are faster in some cases.
-	 *
-	 * This is where we decide to ignore locks we don't need to set -- if
-	 * the application isn't threaded, there aren't any threads to block.
-	 */
-	if (LF_ISSET(MUTEX_THREAD) || F_ISSET(dbenv, DB_ENV_PRIVATE)) {
-		if (!F_ISSET(dbenv, DB_ENV_THREAD)) {
-			F_SET(mutexp, MUTEX_IGNORE);
-			return (0);
-		}
-	}
-
 #ifdef HAVE_MUTEX_PTHREADS
 	{
 	pthread_condattr_t condattr, *condattrp = NULL;
 	pthread_mutexattr_t mutexattr, *mutexattrp = NULL;
 
-	if (!LF_ISSET(MUTEX_THREAD)) {
-		ret = pthread_mutexattr_init(&mutexattr);
+	if (!LF_ISSET(DB_MUTEX_THREAD)) {
+		RET_SET((pthread_mutexattr_init(&mutexattr)), ret);
 #ifndef HAVE_MUTEX_THREAD_ONLY
 		if (ret == 0)
-			ret = pthread_mutexattr_setpshared(
-			    &mutexattr, PTHREAD_PROCESS_SHARED);
+			RET_SET((pthread_mutexattr_setpshared(
+			    &mutexattr, PTHREAD_PROCESS_SHARED)), ret);
 #endif
 		mutexattrp = &mutexattr;
 	}
 
 	if (ret == 0)
-		ret = pthread_mutex_init(&mutexp->mutex, mutexattrp);
+		RET_SET((pthread_mutex_init(&mutexp->mutex, mutexattrp)), ret);
 	if (mutexattrp != NULL)
-		pthread_mutexattr_destroy(mutexattrp);
-	if (ret == 0 && LF_ISSET(MUTEX_SELF_BLOCK)) {
-		if (!LF_ISSET(MUTEX_THREAD)) {
-			ret = pthread_condattr_init(&condattr);
-#ifndef HAVE_MUTEX_THREAD_ONLY
+		(void)pthread_mutexattr_destroy(mutexattrp);
+	if (ret == 0 && LF_ISSET(DB_MUTEX_SELF_BLOCK)) {
+		if (!LF_ISSET(DB_MUTEX_THREAD)) {
+			RET_SET((pthread_condattr_init(&condattr)), ret);
 			if (ret == 0) {
 				condattrp = &condattr;
-				ret = pthread_condattr_setpshared(
-				    &condattr, PTHREAD_PROCESS_SHARED);
-			}
+#ifndef HAVE_MUTEX_THREAD_ONLY
+				RET_SET((pthread_condattr_setpshared(
+				    &condattr, PTHREAD_PROCESS_SHARED)), ret);
 #endif
+			}
 		}
 
 		if (ret == 0)
-			ret = pthread_cond_init(&mutexp->cond, condattrp);
+			RET_SET(
+			    (pthread_cond_init(&mutexp->cond, condattrp)), ret);
 
-		F_SET(mutexp, MUTEX_SELF_BLOCK);
+		F_SET(mutexp, DB_MUTEX_SELF_BLOCK);
 		if (condattrp != NULL)
 			(void)pthread_condattr_destroy(condattrp);
 	}
@@ -149,7 +122,7 @@ __db_pthread_mutex_init(dbenv, mutexp, flags)
 	 * initialization values doesn't have surrounding braces.  There's not
 	 * much we can do.
 	 */
-	if (LF_ISSET(MUTEX_THREAD)) {
+	if (LF_ISSET(DB_MUTEX_THREAD)) {
 		static lwp_mutex_t mi = DEFAULTMUTEX;
 
 		mutexp->mutex = mi;
@@ -158,8 +131,8 @@ __db_pthread_mutex_init(dbenv, mutexp, flags)
 
 		mutexp->mutex = mi;
 	}
-	if (LF_ISSET(MUTEX_SELF_BLOCK)) {
-		if (LF_ISSET(MUTEX_THREAD)) {
+	if (LF_ISSET(DB_MUTEX_SELF_BLOCK)) {
+		if (LF_ISSET(DB_MUTEX_THREAD)) {
 			static lwp_cond_t ci = DEFAULTCV;
 
 			mutexp->cond = ci;
@@ -168,63 +141,75 @@ __db_pthread_mutex_init(dbenv, mutexp, flags)
 
 			mutexp->cond = ci;
 		}
-		F_SET(mutexp, MUTEX_SELF_BLOCK);
+		F_SET(mutexp, DB_MUTEX_SELF_BLOCK);
 	}
 #endif
 #ifdef HAVE_MUTEX_UI_THREADS
 	{
 	int type;
 
-	type = LF_ISSET(MUTEX_THREAD) ? USYNC_THREAD : USYNC_PROCESS;
+	type = LF_ISSET(DB_MUTEX_THREAD) ? USYNC_THREAD : USYNC_PROCESS;
 
 	ret = mutex_init(&mutexp->mutex, type, NULL);
-	if (ret == 0 && LF_ISSET(MUTEX_SELF_BLOCK)) {
+	if (ret == 0 && LF_ISSET(DB_MUTEX_SELF_BLOCK)) {
 		ret = cond_init(&mutexp->cond, type, NULL);
 
-		F_SET(mutexp, MUTEX_SELF_BLOCK);
+		F_SET(mutexp, DB_MUTEX_SELF_BLOCK);
 	}}
 #endif
 
-#ifdef HAVE_MUTEX_SYSTEM_RESOURCES
-	mutexp->reg_off = INVALID_ROFF;
-#endif
-	if (ret == 0)
-		F_SET(mutexp, MUTEX_INITED);
-	else
+	if (ret != 0) {
 		__db_err(dbenv,
 		    "unable to initialize mutex: %s", strerror(ret));
-
+	}
 	return (ret);
 }
 
 /*
  * __db_pthread_mutex_lock
- *	Lock on a mutex, logically blocking if necessary.
+ *	Lock on a mutex, blocking if necessary.
  *
- * PUBLIC: int __db_pthread_mutex_lock __P((DB_ENV *, DB_MUTEX *));
+ * PUBLIC: int __db_pthread_mutex_lock __P((DB_ENV *, db_mutex_t));
  */
 int
-__db_pthread_mutex_lock(dbenv, mutexp)
+__db_pthread_mutex_lock(dbenv, mutex)
 	DB_ENV *dbenv;
-	DB_MUTEX *mutexp;
+	db_mutex_t mutex;
 {
-	u_int32_t nspins;
-	int i, ret, waited;
+	DB_MUTEX *mutexp;
+	DB_MUTEXMGR *mtxmgr;
+	DB_MUTEXREGION *mtxregion;
+	int i, ret;
 
-	if (F_ISSET(dbenv, DB_ENV_NOLOCKING) || F_ISSET(mutexp, MUTEX_IGNORE))
+	if (!MUTEX_ON(dbenv) || F_ISSET(dbenv, DB_ENV_NOLOCKING))
 		return (0);
 
-	/* Attempt to acquire the resource for N spins. */
-	for (nspins = dbenv->tas_spins; nspins > 0; --nspins)
-		if (pthread_mutex_trylock(&mutexp->mutex) == 0)
-			break;
+	mtxmgr = dbenv->mutex_handle;
+	mtxregion = mtxmgr->reginfo.primary;
+	mutexp = MUTEXP_SET(mutex);
 
-	if (nspins == 0 && (ret = pthread_mutex_lock(&mutexp->mutex)) != 0)
+#ifdef HAVE_STATISTICS
+	/*
+	 * We want to know which mutexes are contentious, but don't want to
+	 * do an interlocked test here -- that's slower when the underlying
+	 * system has adaptive mutexes and can perform optimizations like
+	 * spinning only if the thread holding the mutex is actually running
+	 * on a CPU.  Make a guess, using a normal load instruction.
+	 */
+	if (F_ISSET(mutexp, DB_MUTEX_LOCKED))
+		++mutexp->mutex_set_wait;
+	else
+		++mutexp->mutex_set_nowait;
+#endif
+
+	RET_SET((pthread_mutex_lock(&mutexp->mutex)), ret);
+	if (ret != 0)
 		goto err;
 
-	if (F_ISSET(mutexp, MUTEX_SELF_BLOCK)) {
-		for (waited = 0; mutexp->locked != 0; waited = 1) {
-			ret = pthread_cond_wait(&mutexp->cond, &mutexp->mutex);
+	if (F_ISSET(mutexp, DB_MUTEX_SELF_BLOCK)) {
+		while (F_ISSET(mutexp, DB_MUTEX_LOCKED)) {
+			RET_SET((pthread_cond_wait(
+			    &mutexp->cond, &mutexp->mutex)), ret);
 			/*
 			 * !!!
 			 * Solaris bug workaround:
@@ -241,20 +226,14 @@ __db_pthread_mutex_lock(dbenv, mutexp)
 #endif
 			    ret != ETIMEDOUT) {
 				(void)pthread_mutex_unlock(&mutexp->mutex);
-				return (ret);
+				goto err;
 			}
 		}
 
-		if (waited)
-			++mutexp->mutex_set_wait;
-		else
-			++mutexp->mutex_set_nowait;
+		F_SET(mutexp, DB_MUTEX_LOCKED);
+		dbenv->thread_id(dbenv, &mutexp->pid, &mutexp->tid);
+		CHECK_MTX_THREAD(dbenv, mutexp);
 
-#ifdef DIAGNOSTIC
-		mutexp->locked = (u_int32_t)pthread_self();
-#else
-		mutexp->locked = 1;
-#endif
 		/*
 		 * According to HP-UX engineers contacted by Netscape,
 		 * pthread_mutex_unlock() will occasionally return EFAULT
@@ -266,100 +245,129 @@ __db_pthread_mutex_lock(dbenv, mutexp)
 		 */
 		i = PTHREAD_UNLOCK_ATTEMPTS;
 		do {
-			ret = pthread_mutex_unlock(&mutexp->mutex);
+			RET_SET((pthread_mutex_unlock(&mutexp->mutex)), ret);
 		} while (ret == EFAULT && --i > 0);
 		if (ret != 0)
 			goto err;
 	} else {
-		if (nspins == dbenv->tas_spins)
-			++mutexp->mutex_set_nowait;
-		else if (nspins > 0) {
-			++mutexp->mutex_set_spin;
-			mutexp->mutex_set_spins += dbenv->tas_spins - nspins;
-		} else
-			++mutexp->mutex_set_wait;
 #ifdef DIAGNOSTIC
-		if (mutexp->locked) {
-			char msgbuf[128];
-			(void)snprintf(msgbuf,
-			    sizeof(msgbuf), MSG1, (u_long)mutexp->locked);
-			(void)write(STDERR_FILENO, msgbuf, strlen(msgbuf));
+		if (F_ISSET(mutexp, DB_MUTEX_LOCKED)) {
+			char buf[DB_THREADID_STRLEN];
+			(void)dbenv->thread_id_string(dbenv,
+			    mutexp->pid, mutexp->tid, buf);
+			__db_err(dbenv,
+		    "pthread lock failed: lock currently in use: pid/tid: %s",
+			    buf);
+			ret = EINVAL;
+			goto err;
 		}
-		mutexp->locked = (u_int32_t)pthread_self();
-#else
-		mutexp->locked = 1;
 #endif
+		F_SET(mutexp, DB_MUTEX_LOCKED);
+		dbenv->thread_id(dbenv, &mutexp->pid, &mutexp->tid);
+		CHECK_MTX_THREAD(dbenv, mutexp);
 	}
+
+#ifdef DIAGNOSTIC
+	/*
+	 * We want to switch threads as often as possible.  Yield every time
+	 * we get a mutex to ensure contention.
+	 */
+	if (F_ISSET(dbenv, DB_ENV_YIELDCPU))
+		__os_yield(NULL, 1);
+#endif
 	return (0);
 
-err:	__db_err(dbenv, "unable to lock mutex: %s", strerror(ret));
-	return (ret);
+err:	__db_err(dbenv, "pthread lock failed: %s", db_strerror(ret));
+	return (__db_panic(dbenv, ret));
 }
 
 /*
  * __db_pthread_mutex_unlock --
- *	Release a lock.
+ *	Release a mutex.
  *
- * PUBLIC: int __db_pthread_mutex_unlock __P((DB_ENV *, DB_MUTEX *));
+ * PUBLIC: int __db_pthread_mutex_unlock __P((DB_ENV *, db_mutex_t));
  */
 int
-__db_pthread_mutex_unlock(dbenv, mutexp)
+__db_pthread_mutex_unlock(dbenv, mutex)
 	DB_ENV *dbenv;
-	DB_MUTEX *mutexp;
+	db_mutex_t mutex;
 {
+	DB_MUTEX *mutexp;
+	DB_MUTEXMGR *mtxmgr;
+	DB_MUTEXREGION *mtxregion;
 	int i, ret;
 
-	if (F_ISSET(dbenv, DB_ENV_NOLOCKING) || F_ISSET(mutexp, MUTEX_IGNORE))
+	if (!MUTEX_ON(dbenv) || F_ISSET(dbenv, DB_ENV_NOLOCKING))
 		return (0);
 
-#ifdef DIAGNOSTIC
-	if (!mutexp->locked)
-		(void)write(STDERR_FILENO, MSG2, sizeof(MSG2) - 1);
-#endif
+	mtxmgr = dbenv->mutex_handle;
+	mtxregion = mtxmgr->reginfo.primary;
+	mutexp = MUTEXP_SET(mutex);
 
-	if (F_ISSET(mutexp, MUTEX_SELF_BLOCK)) {
-		if ((ret = pthread_mutex_lock(&mutexp->mutex)) != 0)
+#ifdef DIAGNOSTIC
+	if (!F_ISSET(mutexp, DB_MUTEX_LOCKED)) {
+		__db_err(dbenv, "pthread unlock failed: lock already unlocked");
+		return (__db_panic(dbenv, EACCES));
+	}
+#endif
+	if (F_ISSET(mutexp, DB_MUTEX_SELF_BLOCK)) {
+		RET_SET((pthread_mutex_lock(&mutexp->mutex)), ret);
+		if (ret != 0)
 			goto err;
 
-		mutexp->locked = 0;
-
-		if ((ret = pthread_cond_signal(&mutexp->cond)) != 0)
-			return (ret);
+		F_CLR(mutexp, DB_MUTEX_LOCKED);
 
+		RET_SET((pthread_cond_signal(&mutexp->cond)), ret);
+		if (ret != 0)
+			goto err;
 	} else
-		mutexp->locked = 0;
+		F_CLR(mutexp, DB_MUTEX_LOCKED);
 
 	/* See comment above;  workaround for [#2471]. */
 	i = PTHREAD_UNLOCK_ATTEMPTS;
 	do {
-		ret = pthread_mutex_unlock(&mutexp->mutex);
+		RET_SET((pthread_mutex_unlock(&mutexp->mutex)), ret);
 	} while (ret == EFAULT && --i > 0);
-	return (ret);
 
-err:	__db_err(dbenv, "unable to unlock mutex: %s", strerror(ret));
+err:	if (ret != 0) {
+		__db_err(dbenv, "pthread unlock failed: %s", db_strerror(ret));
+		return (__db_panic(dbenv, ret));
+	}
 	return (ret);
 }
 
 /*
  * __db_pthread_mutex_destroy --
- *	Destroy a DB_MUTEX.
+ *	Destroy a mutex.
  *
- * PUBLIC: int __db_pthread_mutex_destroy __P((DB_MUTEX *));
+ * PUBLIC: int __db_pthread_mutex_destroy __P((DB_ENV *, db_mutex_t));
  */
 int
-__db_pthread_mutex_destroy(mutexp)
-	DB_MUTEX *mutexp;
+__db_pthread_mutex_destroy(dbenv, mutex)
+	DB_ENV *dbenv;
+	db_mutex_t mutex;
 {
+	DB_MUTEX *mutexp;
+	DB_MUTEXMGR *mtxmgr;
+	DB_MUTEXREGION *mtxregion;
 	int ret, t_ret;
 
-	if (F_ISSET(mutexp, MUTEX_IGNORE))
+	if (!MUTEX_ON(dbenv))
 		return (0);
 
+	mtxmgr = dbenv->mutex_handle;
+	mtxregion = mtxmgr->reginfo.primary;
+	mutexp = MUTEXP_SET(mutex);
+
 	ret = 0;
-	if (F_ISSET(mutexp, MUTEX_SELF_BLOCK) &&
-	    (ret = pthread_cond_destroy(&mutexp->cond)) != 0)
-		__db_err(NULL, "unable to destroy cond: %s", strerror(ret));
-	if ((t_ret = pthread_mutex_destroy(&mutexp->mutex)) != 0) {
+	if (F_ISSET(mutexp, DB_MUTEX_SELF_BLOCK)) {
+		RET_SET((pthread_cond_destroy(&mutexp->cond)), ret);
+		if (ret != 0)
+			__db_err(NULL,
+			    "unable to destroy cond: %s", strerror(ret));
+	}
+	RET_SET((pthread_mutex_destroy(&mutexp->mutex)), t_ret);
+	if (t_ret != 0) {
 		__db_err(NULL, "unable to destroy mutex: %s", strerror(t_ret));
 		if (ret == 0)
 			ret = t_ret;
diff --git a/storage/bdb/mutex/mut_region.c b/storage/bdb/mutex/mut_region.c
new file mode 100644
index 00000000000..5d7c7333118
--- /dev/null
+++ b/storage/bdb/mutex/mut_region.c
@@ -0,0 +1,362 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996-2004
+ *	Sleepycat Software.  All rights reserved.
+ *
+ * $Id: mut_region.c,v 12.9 2005/10/27 15:16:13 bostic Exp $
+ */
+
+#include "db_config.h"
+
+#ifndef NO_SYSTEM_INCLUDES
+#include 
+
+#include 
+#endif
+
+#include "db_int.h"
+#include "dbinc/db_shash.h"
+#include "dbinc/log.h"
+#include "dbinc/lock.h"
+#include "dbinc/mp.h"
+#include "dbinc/mutex_int.h"
+
+static int __mutex_region_init __P((DB_ENV *, DB_MUTEXMGR *));
+static size_t __mutex_region_size __P((DB_ENV *));
+
+/*
+ * __mutex_open --
+ *	Open a mutex region.
+ *
+ * PUBLIC: int __mutex_open __P((DB_ENV *));
+ */
+int
+__mutex_open(dbenv)
+	DB_ENV *dbenv;
+{
+	DB_MUTEXMGR *mtxmgr;
+	DB_MUTEXREGION *mtxregion;
+	db_mutex_t mutex;
+	u_int i;
+	int ret;
+
+	/*
+	 * Initialize the DB_ENV handle information if not already initialized.
+	 *
+	 * Align mutexes on the byte boundaries specified by the application.
+	 */
+	if (dbenv->mutex_align == 0)
+		dbenv->mutex_align = MUTEX_ALIGN;
+	if (dbenv->mutex_tas_spins == 0)
+		dbenv->mutex_tas_spins = __os_spin(dbenv);
+
+	/*
+	 * If the user didn't set an absolute value on the number of mutexes
+	 * we'll need, figure it out.  We're conservative in our allocation,
+	 * we need mutexes for DB handles, group-commit queues and other things
+	 * applications allocate at run-time.  The application may have kicked
+	 * up our count to allocate its own mutexes, add that in.
+	 */
+	if (dbenv->mutex_cnt == 0)
+		dbenv->mutex_cnt =
+		    __lock_region_mutex_count(dbenv) +
+		    __log_region_mutex_count(dbenv) +
+		    __memp_region_mutex_count(dbenv) +
+		    dbenv->mutex_inc + 500;
+
+	/* Create/initialize the mutex manager structure. */
+	if ((ret = __os_calloc(dbenv, 1, sizeof(DB_MUTEXMGR), &mtxmgr)) != 0)
+		return (ret);
+
+	/* Join/create the txn region. */
+	mtxmgr->reginfo.dbenv = dbenv;
+	mtxmgr->reginfo.type = REGION_TYPE_MUTEX;
+	mtxmgr->reginfo.id = INVALID_REGION_ID;
+	mtxmgr->reginfo.flags = REGION_JOIN_OK;
+	if (F_ISSET(dbenv, DB_ENV_CREATE))
+		F_SET(&mtxmgr->reginfo, REGION_CREATE_OK);
+	if ((ret = __db_r_attach(dbenv,
+	    &mtxmgr->reginfo, __mutex_region_size(dbenv))) != 0)
+		goto err;
+
+	/* If we created the region, initialize it. */
+	if (F_ISSET(&mtxmgr->reginfo, REGION_CREATE))
+		if ((ret = __mutex_region_init(dbenv, mtxmgr)) != 0)
+			goto err;
+
+	/* Set the local addresses. */
+	mtxregion = mtxmgr->reginfo.primary =
+	    R_ADDR(&mtxmgr->reginfo, mtxmgr->reginfo.rp->primary);
+	mtxmgr->mutex_array = R_ADDR(&mtxmgr->reginfo, mtxregion->mutex_offset);
+
+	dbenv->mutex_handle = mtxmgr;
+
+	/* Allocate initial queue of mutexes. */
+	if (dbenv->mutex_iq != NULL) {
+		DB_ASSERT(F_ISSET(&mtxmgr->reginfo, REGION_CREATE));
+		for (i = 0; i < dbenv->mutex_iq_next; ++i) {
+			if ((ret = __mutex_alloc_int(
+			    dbenv, 0, dbenv->mutex_iq[i].alloc_id,
+			    dbenv->mutex_iq[i].flags, &mutex)) != 0)
+				goto err;
+			/*
+			 * Confirm we allocated the right index, correcting
+			 * for avoiding slot 0 (MUTEX_INVALID).
+			 */
+			DB_ASSERT(mutex == i + 1);
+		}
+		__os_free(dbenv, dbenv->mutex_iq);
+		dbenv->mutex_iq = NULL;
+
+		/*
+		 * This is the first place we can test mutexes and we need to
+		 * know if they're working.  (They CAN fail, for example on
+		 * SunOS, when using fcntl(2) for locking and using an
+		 * in-memory filesystem as the database environment directory.
+		 * But you knew that, I'm sure -- it probably wasn't worth
+		 * mentioning.)
+		 */
+		mutex = MUTEX_INVALID;
+		if ((ret =
+		    __mutex_alloc(dbenv, MTX_MUTEX_TEST, 0, &mutex) != 0) ||
+		    (ret = __mutex_lock(dbenv, mutex)) != 0 ||
+		    (ret = __mutex_unlock(dbenv, mutex)) != 0 ||
+		    (ret = __mutex_free(dbenv, &mutex)) != 0) {
+			__db_err(dbenv,
+		    "Unable to acquire/release a mutex; check configuration");
+			goto err;
+		}
+	}
+
+	/*
+	 * Initialize thread tracking.  We want to do this as early
+	 * has possible in case we die.  This sits in the mutex region
+	 * so do it now.
+	 */
+	if ((ret = __env_thread_init(dbenv,
+	    F_ISSET(&mtxmgr->reginfo, REGION_CREATE))) != 0)
+		goto err;
+
+	return (0);
+
+err:	dbenv->mutex_handle = NULL;
+	if (mtxmgr->reginfo.addr != NULL)
+		(void)__db_r_detach(dbenv, &mtxmgr->reginfo, 0);
+
+	__os_free(dbenv, mtxmgr);
+	return (ret);
+}
+
+/*
+ * __mutex_region_init --
+ *	Initialize a mutex region in shared memory.
+ */
+static int
+__mutex_region_init(dbenv, mtxmgr)
+	DB_ENV *dbenv;
+	DB_MUTEXMGR *mtxmgr;
+{
+	DB_MUTEXREGION *mtxregion;
+	DB_MUTEX *mutexp;
+	db_mutex_t i;
+	int ret;
+	void *mutex_array;
+
+	COMPQUIET(mutexp, NULL);
+
+	if ((ret = __db_shalloc(&mtxmgr->reginfo,
+	    sizeof(DB_MUTEXREGION), 0, &mtxmgr->reginfo.primary)) != 0) {
+		__db_err(dbenv,
+		    "Unable to allocate memory for the mutex region");
+		return (ret);
+	}
+	mtxmgr->reginfo.rp->primary =
+	    R_OFFSET(&mtxmgr->reginfo, mtxmgr->reginfo.primary);
+	mtxregion = mtxmgr->reginfo.primary;
+	memset(mtxregion, 0, sizeof(*mtxregion));
+
+	if ((ret = __mutex_alloc(
+	    dbenv, MTX_MUTEX_REGION, 0, &mtxregion->mtx_region)) != 0)
+		return (ret);
+
+	mtxregion->mutex_size =
+	    (size_t)DB_ALIGN(sizeof(DB_MUTEX), dbenv->mutex_align);
+
+	mtxregion->stat.st_mutex_align = dbenv->mutex_align;
+	mtxregion->stat.st_mutex_cnt = dbenv->mutex_cnt;
+	mtxregion->stat.st_mutex_tas_spins = dbenv->mutex_tas_spins;
+
+	/*
+	 * Get a chunk of memory to be used for the mutexes themselves.  Each
+	 * piece of the memory must be properly aligned.
+	 *
+	 * The OOB mutex (MUTEX_INVALID) is 0.  To make this work, we ignore
+	 * the first allocated slot when we build the free list.  We have to
+	 * correct the count by 1 here, though, otherwise our counter will be
+	 * off by 1.
+	 */
+	if ((ret = __db_shalloc(&mtxmgr->reginfo,
+	    (mtxregion->stat.st_mutex_cnt + 1) * mtxregion->mutex_size,
+	    mtxregion->stat.st_mutex_align, &mutex_array)) != 0) {
+		__db_err(dbenv,
+		    "Unable to allocate memory for mutexes from the region");
+		return (ret);
+	}
+
+	mtxregion->mutex_offset = R_OFFSET(&mtxmgr->reginfo, mutex_array);
+	mtxmgr->mutex_array = mutex_array;
+
+	/*
+	 * Put the mutexes on a free list and clear the allocated flag.
+	 *
+	 * The OOB mutex (MUTEX_INVALID) is 0, skip it.
+	 *
+	 * The comparison is <, not <=, because we're looking ahead one
+	 * in each link.
+	 */
+	for (i = 1; i < mtxregion->stat.st_mutex_cnt; ++i) {
+		mutexp = MUTEXP_SET(i);
+		mutexp->flags = 0;
+		mutexp->mutex_next_link = i + 1;
+	}
+	mutexp = MUTEXP_SET(i);
+	mutexp->flags = 0;
+	mutexp->mutex_next_link = MUTEX_INVALID;
+	mtxregion->mutex_next = 1;
+	mtxregion->stat.st_mutex_free = mtxregion->stat.st_mutex_cnt;
+	mtxregion->stat.st_mutex_inuse = mtxregion->stat.st_mutex_inuse_max = 0;
+
+	return (0);
+}
+
+/*
+ * __mutex_dbenv_refresh --
+ *	Clean up after the mutex region on a close or failed open.
+ *
+ * PUBLIC: int __mutex_dbenv_refresh __P((DB_ENV *));
+ */
+int
+__mutex_dbenv_refresh(dbenv)
+	DB_ENV *dbenv;
+{
+	DB_MUTEXMGR *mtxmgr;
+	DB_MUTEXREGION *mtxregion;
+	REGINFO *reginfo;
+	int ret;
+
+	mtxmgr = dbenv->mutex_handle;
+	reginfo = &mtxmgr->reginfo;
+	mtxregion = mtxmgr->reginfo.primary;
+
+	/*
+	 * If a private region, return the memory to the heap.  Not needed for
+	 * filesystem-backed or system shared memory regions, that memory isn't
+	 * owned by any particular process.
+	 */
+	if (F_ISSET(dbenv, DB_ENV_PRIVATE)) {
+#ifdef HAVE_MUTEX_SYSTEM_RESOURCES
+		/*
+		 * If destroying the mutex region, return any system resources
+		 * to the system.
+		 */
+		__mutex_resource_return(dbenv, reginfo);
+#endif
+		/* Discard the mutex array. */
+		__db_shalloc_free(
+		    reginfo, R_ADDR(reginfo, mtxregion->mutex_offset));
+	}
+
+	/* Detach from the region. */
+	ret = __db_r_detach(dbenv, reginfo, 0);
+
+	__os_free(dbenv, mtxmgr);
+
+	dbenv->mutex_handle = NULL;
+
+	return (ret);
+}
+
+/*
+ * __mutex_region_size --
+ *	 Return the amount of space needed for the mutex region.
+ */
+static size_t
+__mutex_region_size(dbenv)
+	DB_ENV *dbenv;
+{
+	size_t s;
+
+	s = sizeof(DB_MUTEXMGR) + 1024;
+	s += dbenv->mutex_cnt *
+	    __db_shalloc_size(sizeof(DB_MUTEX), dbenv->mutex_align);
+	/*
+	 * Allocate space for thread info blocks.  Max is only advisory,
+	 * so we allocate 25% more.
+	 */
+	s += (dbenv->thr_max + dbenv->thr_max/4) *
+	    __db_shalloc_size(sizeof(DB_THREAD_INFO), sizeof(roff_t));
+	s += dbenv->thr_nbucket *
+	    __db_shalloc_size(sizeof(DB_HASHTAB), sizeof(roff_t));
+	return (s);
+}
+
+#ifdef	HAVE_MUTEX_SYSTEM_RESOURCES
+/*
+ * __mutex_resource_return
+ *	Return any system-allocated mutex resources to the system.
+ *
+ * PUBLIC: void __mutex_resource_return __P((DB_ENV *, REGINFO *));
+ */
+void
+__mutex_resource_return(dbenv, infop)
+	DB_ENV *dbenv;
+	REGINFO *infop;
+{
+	DB_MUTEX *mutexp;
+	DB_MUTEXMGR *mtxmgr, mtxmgr_st;
+	DB_MUTEXREGION *mtxregion;
+	db_mutex_t i;
+	void *orig_handle;
+
+	/*
+	 * This routine is called in two cases: when discarding the regions
+	 * from a previous Berkeley DB run, during recovery, and two, when
+	 * discarding regions as we shut down the database environment.
+	 *
+	 * Walk the list of mutexes and destroy any live ones.
+	 *
+	 * This is just like joining a region -- the REGINFO we're handed
+	 * is the same as the one returned by __db_r_attach(), all we have
+	 * to do is fill in the links.
+	 *
+	 * !!!
+	 * The region may be corrupted, of course.  We're safe because the
+	 * only things we look at are things that are initialized when the
+	 * region is created, and never modified after that.
+	 */
+	memset(&mtxmgr_st, 0, sizeof(mtxmgr_st));
+	mtxmgr = &mtxmgr_st;
+	mtxmgr->reginfo = *infop;
+	mtxregion = mtxmgr->reginfo.primary =
+	    R_ADDR(&mtxmgr->reginfo, mtxmgr->reginfo.rp->primary);
+	mtxmgr->mutex_array = R_ADDR(&mtxmgr->reginfo, mtxregion->mutex_offset);
+
+	/*
+	 * This is a little strange, but the mutex_handle is what all of the
+	 * underlying mutex routines will use to determine if they should do
+	 * any work and to find their information.  Save/restore the handle
+	 * around the work loop.
+	 *
+	 * The OOB mutex (MUTEX_INVALID) is 0, skip it.
+	 */
+	orig_handle = dbenv->mutex_handle;
+	dbenv->mutex_handle = mtxmgr;
+	for (i = 1; i <= mtxregion->stat.st_mutex_cnt; ++i, ++mutexp) {
+		mutexp = MUTEXP_SET(i);
+		if (F_ISSET(mutexp, DB_MUTEX_ALLOCATED))
+			(void)__mutex_destroy(dbenv, i);
+	}
+	dbenv->mutex_handle = orig_handle;
+}
+#endif
diff --git a/storage/bdb/mutex/mut_stat.c b/storage/bdb/mutex/mut_stat.c
new file mode 100644
index 00000000000..cffff239c76
--- /dev/null
+++ b/storage/bdb/mutex/mut_stat.c
@@ -0,0 +1,454 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996-2004
+ *	Sleepycat Software.  All rights reserved.
+ *
+ * $Id: mut_stat.c,v 12.10 2005/11/01 00:44:28 bostic Exp $
+ */
+
+#include "db_config.h"
+
+#ifndef NO_SYSTEM_INCLUDES
+#include 
+
+#include 
+#include 
+#endif
+
+#include "db_int.h"
+#include "dbinc/db_page.h"
+#include "dbinc/db_am.h"
+#include "dbinc/mutex_int.h"
+
+#ifdef HAVE_STATISTICS
+static int __mutex_print_all __P((DB_ENV *, u_int32_t));
+static const char *__mutex_print_id __P((int));
+static int __mutex_print_stats __P((DB_ENV *, u_int32_t));
+static void __mutex_print_summary __P((DB_ENV *));
+
+/*
+ * __mutex_stat --
+ *	DB_ENV->mutex_stat.
+ *
+ * PUBLIC: int __mutex_stat __P((DB_ENV *, DB_MUTEX_STAT **, u_int32_t));
+ */
+int
+__mutex_stat(dbenv, statp, flags)
+	DB_ENV *dbenv;
+	DB_MUTEX_STAT **statp;
+	u_int32_t flags;
+{
+	DB_MUTEXMGR *mtxmgr;
+	DB_MUTEXREGION *mtxregion;
+	DB_MUTEX_STAT *stats;
+	int ret;
+
+	PANIC_CHECK(dbenv);
+
+	if ((ret = __db_fchk(dbenv,
+	    "DB_ENV->mutex_stat", flags, DB_STAT_CLEAR)) != 0)
+		return (ret);
+
+	*statp = NULL;
+	mtxmgr = dbenv->mutex_handle;
+	mtxregion = mtxmgr->reginfo.primary;
+
+	if ((ret = __os_umalloc(dbenv, sizeof(DB_MUTEX_STAT), &stats)) != 0)
+		return (ret);
+
+	MUTEX_SYSTEM_LOCK(dbenv);
+
+	/*
+	 * Most fields are maintained in the underlying region structure.
+	 * Region size and region mutex are not.
+	 */
+	*stats = mtxregion->stat;
+	stats->st_regsize = mtxmgr->reginfo.rp->size;
+	__mutex_set_wait_info(dbenv, mtxregion->mtx_region,
+	    &stats->st_region_wait, &stats->st_region_nowait);
+	if (LF_ISSET(DB_STAT_CLEAR))
+		__mutex_clear(dbenv, mtxregion->mtx_region);
+
+	MUTEX_SYSTEM_UNLOCK(dbenv);
+
+	*statp = stats;
+	return (0);
+}
+
+/*
+ * __mutex_stat_print
+ *	DB_ENV->mutex_stat_print method.
+ *
+ * PUBLIC: int __mutex_stat_print __P((DB_ENV *, u_int32_t));
+ */
+int
+__mutex_stat_print(dbenv, flags)
+	DB_ENV *dbenv;
+	u_int32_t flags;
+{
+	u_int32_t orig_flags;
+	int ret;
+
+	PANIC_CHECK(dbenv);
+
+	if ((ret = __db_fchk(dbenv, "DB_ENV->mutex_stat_print",
+	    flags, DB_STAT_ALL | DB_STAT_CLEAR)) != 0)
+		return (ret);
+
+	orig_flags = flags;
+	LF_CLR(DB_STAT_CLEAR);
+	if (flags == 0 || LF_ISSET(DB_STAT_ALL)) {
+		ret = __mutex_print_stats(dbenv, orig_flags);
+		__mutex_print_summary(dbenv);
+		if (flags == 0 || ret != 0)
+			return (ret);
+	}
+
+	if (LF_ISSET(DB_STAT_ALL))
+		ret = __mutex_print_all(dbenv, orig_flags);
+
+	return (0);
+}
+
+static void
+__mutex_print_summary(dbenv)
+	DB_ENV *dbenv;
+{
+	DB_MUTEX *mutexp;
+	DB_MUTEXMGR *mtxmgr;
+	DB_MUTEXREGION *mtxregion;
+	db_mutex_t i;
+	u_int32_t counts[MTX_MAX_ENTRY + 2];
+	int alloc_id;
+
+	mtxmgr = dbenv->mutex_handle;
+	mtxregion = mtxmgr->reginfo.primary;
+	memset(counts, 0, sizeof(counts));
+
+	for (i = 1; i <= mtxregion->stat.st_mutex_cnt; ++i, ++mutexp) {
+		mutexp = MUTEXP_SET(i);
+
+		if (!F_ISSET(mutexp, DB_MUTEX_ALLOCATED))
+			counts[0]++;
+		else if (mutexp->alloc_id > MTX_MAX_ENTRY)
+			counts[MTX_MAX_ENTRY + 1]++;
+		else
+			counts[mutexp->alloc_id]++;
+	}
+	__db_msg(dbenv, "Mutex counts");
+	__db_msg(dbenv, "%d\tUnallocated", counts[0]);
+	for (alloc_id = 1; alloc_id <= MTX_TXN_REGION + 1; alloc_id++)
+		if (counts[alloc_id] != 0)
+			__db_msg(dbenv, "%lu\t%s",
+			    (u_long)counts[alloc_id],
+			    __mutex_print_id(alloc_id));
+
+}
+
+/*
+ * __mutex_print_stats --
+ *	Display default mutex region statistics.
+ */
+static int
+__mutex_print_stats(dbenv, flags)
+	DB_ENV *dbenv;
+	u_int32_t flags;
+{
+	DB_MUTEX_STAT *sp;
+	DB_MUTEXMGR *mtxmgr;
+	DB_MUTEXREGION *mtxregion;
+	REGINFO *infop;
+	THREAD_INFO *thread;
+	int ret;
+
+	if ((ret = __mutex_stat(dbenv, &sp, LF_ISSET(DB_STAT_CLEAR))) != 0)
+		return (ret);
+
+	if (LF_ISSET(DB_STAT_ALL))
+		__db_msg(dbenv, "Default mutex region information:");
+
+	__db_dlbytes(dbenv, "Mutex region size",
+	    (u_long)0, (u_long)0, (u_long)sp->st_regsize);
+	__db_dl_pct(dbenv,
+	    "The number of region locks that required waiting",
+	    (u_long)sp->st_region_wait, DB_PCT(sp->st_region_wait,
+	    sp->st_region_wait + sp->st_region_nowait), NULL);
+	STAT_ULONG("Mutex alignment", sp->st_mutex_align);
+	STAT_ULONG("Mutex test-and-set spins", sp->st_mutex_tas_spins);
+	STAT_ULONG("Mutex total count", sp->st_mutex_cnt);
+	STAT_ULONG("Mutex free count", sp->st_mutex_free);
+	STAT_ULONG("Mutex in-use count", sp->st_mutex_inuse);
+	STAT_ULONG("Mutex maximum in-use count", sp->st_mutex_inuse_max);
+
+	__os_ufree(dbenv, sp);
+
+	/*
+	 * Dump out the info we have on thread tracking, we do it here only
+	 * because we share the region.
+	 */
+	if (dbenv->thr_hashtab != NULL) {
+		mtxmgr = dbenv->mutex_handle;
+		mtxregion = mtxmgr->reginfo.primary;
+		infop = &mtxmgr->reginfo;
+		thread = R_ADDR(infop, mtxregion->thread_off);
+		STAT_ULONG("Thread blocks allocated", thread->thr_count);
+		STAT_ULONG("Thread allocation threshold", thread->thr_max);
+		STAT_ULONG("Thread hash buckets", thread->thr_nbucket);
+	}
+
+	return (0);
+}
+
+/*
+ * __mutex_print_all --
+ *	Display debugging mutex region statistics.
+ */
+static int
+__mutex_print_all(dbenv, flags)
+	DB_ENV *dbenv;
+	u_int32_t flags;
+{
+	static const FN fn[] = {
+		{ DB_MUTEX_ALLOCATED,		"alloc" },
+		{ DB_MUTEX_LOGICAL_LOCK,	"logical" },
+		{ DB_MUTEX_SELF_BLOCK,		"self-block" },
+		{ DB_MUTEX_THREAD,		"thread" },
+		{ 0,			NULL }
+	};
+	DB_MSGBUF mb, *mbp;
+	DB_MUTEX *mutexp;
+	DB_MUTEXMGR *mtxmgr;
+	DB_MUTEXREGION *mtxregion;
+	db_mutex_t i;
+
+	DB_MSGBUF_INIT(&mb);
+	mbp = &mb;
+
+	mtxmgr = dbenv->mutex_handle;
+	mtxregion = mtxmgr->reginfo.primary;
+
+	__db_print_reginfo(dbenv, &mtxmgr->reginfo, "Mutex");
+	__db_msg(dbenv, "%s", DB_GLOBAL(db_line));
+
+	__db_msg(dbenv, "DB_MUTEXREGION structure:");
+	__mutex_print_debug_single(dbenv,
+	    "DB_MUTEXREGION region mutex", mtxregion->mtx_region, flags);
+	STAT_ULONG("Size of the aligned mutex", mtxregion->mutex_size);
+	STAT_ULONG("Next free mutex", mtxregion->mutex_next);
+
+	/*
+	 * The OOB mutex (MUTEX_INVALID) is 0, skip it.
+	 *
+	 * We're not holding the mutex region lock, so we're racing threads of
+	 * control allocating mutexes.  That's OK, it just means we display or
+	 * clear statistics while mutexes are moving.
+	 */
+	__db_msg(dbenv, "%s", DB_GLOBAL(db_line));
+	__db_msg(dbenv, "mutex\twait/nowait, pct wait, holder, flags");
+	for (i = 1; i <= mtxregion->stat.st_mutex_cnt; ++i, ++mutexp) {
+		mutexp = MUTEXP_SET(i);
+
+		if (!F_ISSET(mutexp, DB_MUTEX_ALLOCATED))
+			continue;
+
+		__db_msgadd(dbenv, mbp, "%5lu\t", (u_long)i);
+
+		__mutex_print_debug_stats(dbenv, mbp, i, flags);
+
+		if (mutexp->alloc_id != 0)
+			__db_msgadd(dbenv,
+			    mbp, ", %s", __mutex_print_id(mutexp->alloc_id));
+
+		__db_prflags(dbenv, mbp, mutexp->flags, fn, " (", ")");
+
+		DB_MSGBUF_FLUSH(dbenv, mbp);
+	}
+
+	return (0);
+}
+
+/*
+ * __mutex_print_debug_single --
+ *	Print mutex internal debugging statistics for a single mutex on a
+ *	single output line.
+ *
+ * PUBLIC: void __mutex_print_debug_single
+ * PUBLIC:          __P((DB_ENV *, const char *, db_mutex_t, u_int32_t));
+ */
+void
+__mutex_print_debug_single(dbenv, tag, mutex, flags)
+	DB_ENV *dbenv;
+	const char *tag;
+	db_mutex_t mutex;
+	u_int32_t flags;
+{
+	DB_MSGBUF mb, *mbp;
+
+	DB_MSGBUF_INIT(&mb);
+	mbp = &mb;
+
+	__db_msgadd(dbenv, mbp, "%lu\t%s ", (u_long)mutex, tag);
+	__mutex_print_debug_stats(dbenv, mbp, mutex, flags);
+	DB_MSGBUF_FLUSH(dbenv, mbp);
+}
+
+/*
+ * __mutex_print_debug_stats --
+ *	Print mutex internal debugging statistics, that is, the statistics
+ *	in the [] square brackets.
+ *
+ * PUBLIC: void __mutex_print_debug_stats
+ * PUBLIC:          __P((DB_ENV *, DB_MSGBUF *, db_mutex_t, u_int32_t));
+ */
+void
+__mutex_print_debug_stats(dbenv, mbp, mutex, flags)
+	DB_ENV *dbenv;
+	DB_MSGBUF *mbp;
+	db_mutex_t mutex;
+	u_int32_t flags;
+{
+	DB_MUTEX *mutexp;
+	DB_MUTEXMGR *mtxmgr;
+	DB_MUTEXREGION *mtxregion;
+	u_long value;
+	char buf[DB_THREADID_STRLEN];
+
+	if (mutex == MUTEX_INVALID) {
+		__db_msgadd(dbenv, mbp, "[!Set]");
+		return;
+	}
+
+	mtxmgr = dbenv->mutex_handle;
+	mtxregion = mtxmgr->reginfo.primary;
+	mutexp = MUTEXP_SET(mutex);
+
+	__db_msgadd(dbenv, mbp, "[");
+	if ((value = mutexp->mutex_set_wait) < 10000000)
+		__db_msgadd(dbenv, mbp, "%lu", value);
+	else
+		__db_msgadd(dbenv, mbp, "%luM", value / 1000000);
+	if ((value = mutexp->mutex_set_nowait) < 10000000)
+		__db_msgadd(dbenv, mbp, "/%lu", value);
+	else
+		__db_msgadd(dbenv, mbp, "/%luM", value / 1000000);
+
+	__db_msgadd(dbenv, mbp, " %d%%",
+	    DB_PCT(mutexp->mutex_set_wait,
+	    mutexp->mutex_set_wait + mutexp->mutex_set_nowait));
+
+	if (F_ISSET(mutexp, DB_MUTEX_LOCKED))
+		__db_msgadd(dbenv, mbp, " %s]",
+		    dbenv->thread_id_string(dbenv,
+		    mutexp->pid, mutexp->tid, buf));
+	else
+		__db_msgadd(dbenv, mbp, " !Own]");
+
+	if (LF_ISSET(DB_STAT_CLEAR))
+		__mutex_clear(dbenv, mutex);
+}
+
+static const char *
+__mutex_print_id(alloc_id)
+	int alloc_id;
+{
+	switch (alloc_id) {
+	case MTX_APPLICATION:		return ("application allocated");
+	case MTX_DB_HANDLE:		return ("db handle");
+	case MTX_ENV_DBLIST:		return ("env dblist");
+	case MTX_ENV_REGION:		return ("env region");
+	case MTX_LOCK_REGION:		return ("lock region");
+	case MTX_LOGICAL_LOCK:		return ("logical lock");
+	case MTX_LOG_FILENAME:		return ("log filename");
+	case MTX_LOG_FLUSH:		return ("log flush");
+	case MTX_LOG_HANDLE:		return ("log handle");
+	case MTX_LOG_REGION:		return ("log region");
+	case MTX_MPOOLFILE_HANDLE:	return ("mpoolfile handle");
+	case MTX_MPOOL_BUFFER:		return ("mpool buffer");
+	case MTX_MPOOL_FH:		return ("mpool filehandle");
+	case MTX_MPOOL_HANDLE:		return ("mpool handle");
+	case MTX_MPOOL_HASH_BUCKET:	return ("mpool hash bucket");
+	case MTX_MPOOL_REGION:		return ("mpool region");
+	case MTX_REP_DATABASE:		return ("replication database");
+	case MTX_REP_REGION:		return ("replication region");
+	case MTX_SEQUENCE:		return ("sequence");
+	case MTX_TWISTER:		return ("twister");
+	case MTX_TXN_ACTIVE:		return ("txn active list");
+	case MTX_TXN_COMMIT:		return ("txn commit");
+	case MTX_TXN_REGION:		return ("txn region");
+	default:			return ("unknown mutex type");
+	}
+	/* NOTREACHED */
+}
+
+/*
+ * __mutex_set_wait_info --
+ *	Return mutex statistics.
+ *
+ * PUBLIC: void __mutex_set_wait_info
+ * PUBLIC:	__P((DB_ENV *, db_mutex_t, u_int32_t *, u_int32_t *));
+ */
+void
+__mutex_set_wait_info(dbenv, mutex, waitp, nowaitp)
+	DB_ENV *dbenv;
+	db_mutex_t mutex;
+	u_int32_t *waitp, *nowaitp;
+{
+	DB_MUTEX *mutexp;
+	DB_MUTEXMGR *mtxmgr;
+	DB_MUTEXREGION *mtxregion;
+
+	mtxmgr = dbenv->mutex_handle;
+	mtxregion = mtxmgr->reginfo.primary;
+	mutexp = MUTEXP_SET(mutex);
+
+	*waitp = mutexp->mutex_set_wait;
+	*nowaitp = mutexp->mutex_set_nowait;
+}
+
+/*
+ * __mutex_clear --
+ *	Clear mutex statistics.
+ *
+ * PUBLIC: void __mutex_clear __P((DB_ENV *, db_mutex_t));
+ */
+void
+__mutex_clear(dbenv, mutex)
+	DB_ENV *dbenv;
+	db_mutex_t mutex;
+{
+	DB_MUTEX *mutexp;
+	DB_MUTEXMGR *mtxmgr;
+	DB_MUTEXREGION *mtxregion;
+
+	mtxmgr = dbenv->mutex_handle;
+	mtxregion = mtxmgr->reginfo.primary;
+	mutexp = MUTEXP_SET(mutex);
+
+	mutexp->mutex_set_wait = mutexp->mutex_set_nowait = 0;
+}
+
+#else /* !HAVE_STATISTICS */
+
+int
+__mutex_stat(dbenv, statp, flags)
+	DB_ENV *dbenv;
+	DB_MUTEX_STAT **statp;
+	u_int32_t flags;
+{
+	COMPQUIET(statp, NULL);
+	COMPQUIET(flags, 0);
+
+	return (__db_stat_not_built(dbenv));
+}
+
+int
+__mutex_stat_print(dbenv, flags)
+	DB_ENV *dbenv;
+	u_int32_t flags;
+{
+	COMPQUIET(flags, 0);
+
+	return (__db_stat_not_built(dbenv));
+}
+#endif
diff --git a/storage/bdb/mutex/mut_tas.c b/storage/bdb/mutex/mut_tas.c
index 08d7ed876c6..205637c980d 100644
--- a/storage/bdb/mutex/mut_tas.c
+++ b/storage/bdb/mutex/mut_tas.c
@@ -1,10 +1,10 @@
 /*-
  * See the file LICENSE for redistribution information.
  *
- * Copyright (c) 1996-2004
+ * Copyright (c) 1996-2005
  *	Sleepycat Software.  All rights reserved.
  *
- * $Id: mut_tas.c,v 11.44 2004/09/15 19:14:49 bostic Exp $
+ * $Id: mut_tas.c,v 12.14 2005/11/01 14:42:17 bostic Exp $
  */
 
 #include "db_config.h"
@@ -16,117 +16,106 @@
 #include 
 #endif
 
+#include "db_int.h"
+
 /*
  * This is where we load in the actual test-and-set mutex code.
  */
 #define	LOAD_ACTUAL_MUTEX_CODE
-#include "db_int.h"
+#include "dbinc/mutex_int.h"
 
 /*
  * __db_tas_mutex_init --
- *	Initialize a DB_MUTEX.
+ *	Initialize a test-and-set mutex.
  *
- * PUBLIC: int __db_tas_mutex_init __P((DB_ENV *, DB_MUTEX *, u_int32_t));
+ * PUBLIC: int __db_tas_mutex_init __P((DB_ENV *, db_mutex_t, u_int32_t));
  */
 int
-__db_tas_mutex_init(dbenv, mutexp, flags)
+__db_tas_mutex_init(dbenv, mutex, flags)
 	DB_ENV *dbenv;
-	DB_MUTEX *mutexp;
+	db_mutex_t mutex;
 	u_int32_t flags;
 {
-	u_int32_t save;
+	DB_MUTEX *mutexp;
+	DB_MUTEXMGR *mtxmgr;
+	DB_MUTEXREGION *mtxregion;
+	int ret;
+
+	COMPQUIET(flags, 0);
+
+	mtxmgr = dbenv->mutex_handle;
+	mtxregion = mtxmgr->reginfo.primary;
+	mutexp = MUTEXP_SET(mutex);
 
 	/* Check alignment. */
-	if ((uintptr_t)mutexp & (MUTEX_ALIGN - 1)) {
-		__db_err(dbenv,
-		    "__db_tas_mutex_init: mutex not appropriately aligned");
+	if (((uintptr_t)mutexp & (dbenv->mutex_align - 1)) != 0) {
+		__db_err(dbenv, "TAS: mutex not appropriately aligned");
 		return (EINVAL);
 	}
 
-	/*
-	 * The only setting/checking of the MUTEX_MPOOL flag is in the mutex
-	 * mutex allocation code (__db_mutex_alloc/free).  Preserve only that
-	 * flag.  This is safe because even if this flag was never explicitly
-	 * set, but happened to be set in memory, it will never be checked or
-	 * acted upon.
-	 */
-	save = F_ISSET(mutexp, MUTEX_MPOOL);
-	memset(mutexp, 0, sizeof(*mutexp));
-	F_SET(mutexp, save);
-
-	/*
-	 * If this is a thread lock or the process has told us that there are
-	 * no other processes in the environment, use thread-only locks, they
-	 * are faster in some cases.
-	 *
-	 * This is where we decide to ignore locks we don't need to set -- if
-	 * the application isn't threaded, there aren't any threads to block.
-	 */
-	if (LF_ISSET(MUTEX_THREAD) || F_ISSET(dbenv, DB_ENV_PRIVATE)) {
-		if (!F_ISSET(dbenv, DB_ENV_THREAD)) {
-			F_SET(mutexp, MUTEX_IGNORE);
-			return (0);
-		}
+	if (MUTEX_INIT(&mutexp->tas)) {
+		ret = __os_get_errno();
+		__db_err(dbenv,
+		    "TAS: mutex initialize: %s", db_strerror(ret));
+		return (ret);
 	}
-
-	if (LF_ISSET(MUTEX_LOGICAL_LOCK))
-		F_SET(mutexp, MUTEX_LOGICAL_LOCK);
-
-	/* Initialize the lock. */
-	if (MUTEX_INIT(&mutexp->tas))
-		return (__os_get_errno());
-
-#ifdef HAVE_MUTEX_SYSTEM_RESOURCES
-	mutexp->reg_off = INVALID_ROFF;
-#endif
-	F_SET(mutexp, MUTEX_INITED);
-
 	return (0);
 }
 
 /*
  * __db_tas_mutex_lock
- *	Lock on a mutex, logically blocking if necessary.
+ *	Lock on a mutex, blocking if necessary.
  *
- * PUBLIC: int __db_tas_mutex_lock __P((DB_ENV *, DB_MUTEX *));
+ * PUBLIC: int __db_tas_mutex_lock __P((DB_ENV *, db_mutex_t));
  */
 int
-__db_tas_mutex_lock(dbenv, mutexp)
+__db_tas_mutex_lock(dbenv, mutex)
 	DB_ENV *dbenv;
-	DB_MUTEX *mutexp;
+	db_mutex_t mutex;
 {
+	DB_MUTEX *mutexp;
+	DB_MUTEXMGR *mtxmgr;
+	DB_MUTEXREGION *mtxregion;
 	u_int32_t nspins;
 	u_long ms, max_ms;
 
-	if (F_ISSET(dbenv, DB_ENV_NOLOCKING) || F_ISSET(mutexp, MUTEX_IGNORE))
+	if (!MUTEX_ON(dbenv) || F_ISSET(dbenv, DB_ENV_NOLOCKING))
 		return (0);
 
+	mtxmgr = dbenv->mutex_handle;
+	mtxregion = mtxmgr->reginfo.primary;
+	mutexp = MUTEXP_SET(mutex);
+
+#ifdef HAVE_STATISTICS
+	if (F_ISSET(mutexp, DB_MUTEX_LOCKED))
+		++mutexp->mutex_set_wait;
+	else
+		++mutexp->mutex_set_nowait;
+#endif
+
 	/*
 	 * Wait 1ms initially, up to 10ms for mutexes backing logical database
 	 * locks, and up to 25 ms for mutual exclusion data structure mutexes.
 	 * SR: #7675
 	 */
 	ms = 1;
-	max_ms = F_ISSET(mutexp, MUTEX_LOGICAL_LOCK) ? 10 : 25;
+	max_ms = F_ISSET(mutexp, DB_MUTEX_LOGICAL_LOCK) ? 10 : 25;
 
 loop:	/* Attempt to acquire the resource for N spins. */
-	for (nspins = dbenv->tas_spins; nspins > 0; --nspins) {
+	for (nspins =
+	    mtxregion->stat.st_mutex_tas_spins; nspins > 0; --nspins) {
 #ifdef HAVE_MUTEX_HPPA_MSEM_INIT
 relock:
 #endif
 #ifdef HAVE_MUTEX_S390_CC_ASSEMBLY
 		tsl_t zero = 0;
 #endif
-		if (
-#ifdef MUTEX_SET_TEST
 		/*
-		 * If using test-and-set mutexes, and we know the "set" value,
-		 * we can avoid interlocked instructions since they're unlikely
-		 * to succeed.
+		 * Avoid interlocked instructions until they're likely to
+		 * succeed.
 		 */
-		mutexp->tas ||
-#endif
-		!MUTEX_SET(&mutexp->tas)) {
+		if (F_ISSET(mutexp, DB_MUTEX_LOCKED) ||
+		    !MUTEX_SET(&mutexp->tas)) {
 			/*
 			 * Some systems (notably those with newer Intel CPUs)
 			 * need a small pause here. [#6975]
@@ -141,36 +130,47 @@ relock:
 		/*
 		 * HP semaphores are unlocked automatically when a holding
 		 * process exits.  If the mutex appears to be locked
-		 * (mutexp->locked != 0) but we got here, assume this has
-		 * happened.  Stick our own pid into mutexp->locked and
+		 * (F_ISSET(DB_MUTEX_LOCKED)) but we got here, assume this
+		 * has happened.  Set the pid and tid into the mutex and
 		 * lock again.  (The default state of the mutexes used to
 		 * block in __lock_get_internal is locked, so exiting with
 		 * a locked mutex is reasonable behavior for a process that
 		 * happened to initialize or use one of them.)
 		 */
-		if (mutexp->locked != 0) {
-			__os_id(&mutexp->locked);
+		if (F_ISSET(mutexp, DB_MUTEX_LOCKED)) {
+			F_SET(mutexp, DB_MUTEX_LOCKED);
+			dbenv->thread_id(dbenv, &mutexp->pid, &mutexp->tid);
+			CHECK_MTX_THREAD(dbenv, mutexp);
 			goto relock;
 		}
 		/*
-		 * If we make it here, locked == 0, the diagnostic won't fire,
-		 * and we were really unlocked by someone calling the
-		 * DB mutex unlock function.
+		 * If we make it here, the mutex isn't locked, the diagnostic
+		 * won't fire, and we were really unlocked by someone calling
+		 * the DB mutex unlock function.
 		 */
 #endif
 #ifdef DIAGNOSTIC
-		if (mutexp->locked != 0)
+		if (F_ISSET(mutexp, DB_MUTEX_LOCKED)) {
+			char buf[DB_THREADID_STRLEN];
 			__db_err(dbenv,
-		"__db_tas_mutex_lock: ERROR: lock currently in use: ID: %lu",
-			    (u_long)mutexp->locked);
+			      "TAS lock failed: lock currently in use: ID: %s",
+			      dbenv->thread_id_string(dbenv,
+			      mutexp->pid, mutexp->tid, buf));
+			return (__db_panic(dbenv, EACCES));
+		}
 #endif
-#if defined(DIAGNOSTIC) || defined(HAVE_MUTEX_HPPA_MSEM_INIT)
-		__os_id(&mutexp->locked);
+		F_SET(mutexp, DB_MUTEX_LOCKED);
+		dbenv->thread_id(dbenv, &mutexp->pid, &mutexp->tid);
+		CHECK_MTX_THREAD(dbenv, mutexp);
+
+#ifdef DIAGNOSTIC
+		/*
+		 * We want to switch threads as often as possible.  Yield
+		 * every time we get a mutex to ensure contention.
+		 */
+		if (F_ISSET(dbenv, DB_ENV_YIELDCPU))
+			__os_yield(NULL, 1);
 #endif
-		if (ms == 1)
-			++mutexp->mutex_set_nowait;
-		else
-			++mutexp->mutex_set_wait;
 		return (0);
 	}
 
@@ -181,31 +181,46 @@ relock:
 	if ((ms <<= 1) > max_ms)
 		ms = max_ms;
 
+	/*
+	 * We're spinning.  The environment might be hung, and somebody else
+	 * has already recovered it.  The first thing recovery does is panic
+	 * the environment.  Check to see if we're never going to get this
+	 * mutex.
+	 */
+	PANIC_CHECK(dbenv);
+
 	goto loop;
 }
 
 /*
  * __db_tas_mutex_unlock --
- *	Release a lock.
+ *	Release a mutex.
  *
- * PUBLIC: int __db_tas_mutex_unlock __P((DB_ENV *, DB_MUTEX *));
+ * PUBLIC: int __db_tas_mutex_unlock __P((DB_ENV *, db_mutex_t));
  */
 int
-__db_tas_mutex_unlock(dbenv, mutexp)
+__db_tas_mutex_unlock(dbenv, mutex)
 	DB_ENV *dbenv;
-	DB_MUTEX *mutexp;
+	db_mutex_t mutex;
 {
-	if (F_ISSET(dbenv, DB_ENV_NOLOCKING) || F_ISSET(mutexp, MUTEX_IGNORE))
+	DB_MUTEX *mutexp;
+	DB_MUTEXMGR *mtxmgr;
+	DB_MUTEXREGION *mtxregion;
+
+	if (!MUTEX_ON(dbenv) || F_ISSET(dbenv, DB_ENV_NOLOCKING))
 		return (0);
 
+	mtxmgr = dbenv->mutex_handle;
+	mtxregion = mtxmgr->reginfo.primary;
+	mutexp = MUTEXP_SET(mutex);
+
 #ifdef DIAGNOSTIC
-	if (!mutexp->locked)
-		__db_err(dbenv,
-		    "__db_tas_mutex_unlock: ERROR: lock already unlocked");
-#endif
-#if defined(DIAGNOSTIC) || defined(HAVE_MUTEX_HPPA_MSEM_INIT)
-	mutexp->locked = 0;
+	if (!F_ISSET(mutexp, DB_MUTEX_LOCKED)) {
+		__db_err(dbenv, "TAS unlock failed: lock already unlocked");
+		return (__db_panic(dbenv, EACCES));
+	}
 #endif
+	F_CLR(mutexp, DB_MUTEX_LOCKED);
 
 	MUTEX_UNSET(&mutexp->tas);
 
@@ -214,17 +229,26 @@ __db_tas_mutex_unlock(dbenv, mutexp)
 
 /*
  * __db_tas_mutex_destroy --
- *	Destroy a DB_MUTEX.
+ *	Destroy a mutex.
  *
- * PUBLIC: int __db_tas_mutex_destroy __P((DB_MUTEX *));
+ * PUBLIC: int __db_tas_mutex_destroy __P((DB_ENV *, db_mutex_t));
  */
 int
-__db_tas_mutex_destroy(mutexp)
-	DB_MUTEX *mutexp;
+__db_tas_mutex_destroy(dbenv, mutex)
+	DB_ENV *dbenv;
+	db_mutex_t mutex;
 {
-	if (F_ISSET(mutexp, MUTEX_IGNORE))
+	DB_MUTEX *mutexp;
+	DB_MUTEXMGR *mtxmgr;
+	DB_MUTEXREGION *mtxregion;
+
+	if (!MUTEX_ON(dbenv))
 		return (0);
 
+	mtxmgr = dbenv->mutex_handle;
+	mtxregion = mtxmgr->reginfo.primary;
+	mutexp = MUTEXP_SET(mutex);
+
 	MUTEX_DESTROY(&mutexp->tas);
 
 	return (0);
diff --git a/storage/bdb/mutex/mut_win32.c b/storage/bdb/mutex/mut_win32.c
index b510003211e..5e291a550d5 100644
--- a/storage/bdb/mutex/mut_win32.c
+++ b/storage/bdb/mutex/mut_win32.c
@@ -1,10 +1,10 @@
 /*
  * See the file LICENSE for redistribution information.
  *
- * Copyright (c) 2002-2004
+ * Copyright (c) 2002-2005
  *	Sleepycat Software.  All rights reserved.
  *
- * $Id: mut_win32.c,v 1.18 2004/07/06 21:06:39 mjc Exp $
+ * $Id: mut_win32.c,v 12.15 2005/11/01 11:49:31 mjc Exp $
  */
 
 #include "db_config.h"
@@ -15,99 +15,129 @@
 #include 
 #endif
 
+#include "db_int.h"
+
 /*
  * This is where we load in the actual test-and-set mutex code.
  */
 #define	LOAD_ACTUAL_MUTEX_CODE
-#include "db_int.h"
+#include "dbinc/mutex_int.h"
 
 /* We don't want to run this code even in "ordinary" diagnostic mode. */
 #undef MUTEX_DIAG
 
+/*
+ * Common code to get an event handle.  This is executed whenever a mutex
+ * blocks, or when unlocking a mutex that a thread is waiting on.  We can't
+ * keep these handles around, since the mutex structure is in shared memory,
+ * and each process gets its own handle value.
+ *
+ * We pass security attributes so that the created event is accessible by all
+ * users, in case a Windows service is sharing an environment with a local
+ * process run as a different user.
+ */
 static _TCHAR hex_digits[] = _T("0123456789abcdef");
+static SECURITY_DESCRIPTOR null_sd;
+static SECURITY_ATTRIBUTES all_sa;
+static int security_initialized = 0;
 
-#define	GET_HANDLE(mutexp, event) do {					\
-	_TCHAR idbuf[] = _T("db.m00000000");				\
-	_TCHAR *p = idbuf + 12;						\
-	u_int32_t id;							\
-									\
-	for (id = (mutexp)->id; id != 0; id >>= 4)			\
-		*--p = hex_digits[id & 0xf];				\
-	event = CreateEvent(NULL, FALSE, FALSE, idbuf);			\
-	if (event == NULL)						\
-		return (__os_get_errno());				\
-} while (0)
+static __inline int get_handle(dbenv, mutexp, eventp)
+	DB_ENV *dbenv;
+	DB_MUTEX *mutexp;
+	HANDLE *eventp;
+{
+	_TCHAR idbuf[] = _T("db.m00000000");
+	_TCHAR *p = idbuf + 12;
+	int ret = 0;
+	u_int32_t id;
+
+	for (id = (mutexp)->id; id != 0; id >>= 4)
+		*--p = hex_digits[id & 0xf];
+
+	if (!security_initialized) {
+		InitializeSecurityDescriptor(&null_sd,
+		    SECURITY_DESCRIPTOR_REVISION);
+		SetSecurityDescriptorDacl(&null_sd, TRUE, 0, FALSE);
+		all_sa.nLength = sizeof(SECURITY_ATTRIBUTES);
+		all_sa.bInheritHandle = FALSE;
+		all_sa.lpSecurityDescriptor = &null_sd;
+		security_initialized = 1;
+	}
+
+	if ((*eventp = CreateEvent(&all_sa, FALSE, FALSE, idbuf)) == NULL) {
+		ret = __os_get_errno();
+		__db_err(dbenv, "Win32 create event failed: %s",
+		    db_strerror(ret));
+	}
+
+	return (ret);
+}
 
 /*
  * __db_win32_mutex_init --
- *	Initialize a DB_MUTEX.
+ *	Initialize a Win32 mutex.
  *
- * PUBLIC: int __db_win32_mutex_init __P((DB_ENV *, DB_MUTEX *, u_int32_t));
+ * PUBLIC: int __db_win32_mutex_init __P((DB_ENV *, db_mutex_t, u_int32_t));
  */
 int
-__db_win32_mutex_init(dbenv, mutexp, flags)
+__db_win32_mutex_init(dbenv, mutex, flags)
 	DB_ENV *dbenv;
-	DB_MUTEX *mutexp;
+	db_mutex_t mutex;
 	u_int32_t flags;
 {
-	u_int32_t save;
+	DB_MUTEX *mutexp;
+	DB_MUTEXMGR *mtxmgr;
+	DB_MUTEXREGION *mtxregion;
 
-	/*
-	 * The only setting/checking of the MUTEX_MPOOL flag is in the mutex
-	 * mutex allocation code (__db_mutex_alloc/free).  Preserve only that
-	 * flag.  This is safe because even if this flag was never explicitly
-	 * set, but happened to be set in memory, it will never be checked or
-	 * acted upon.
-	 */
-	save = F_ISSET(mutexp, MUTEX_MPOOL);
-	memset(mutexp, 0, sizeof(*mutexp));
-	F_SET(mutexp, save);
-
-	/*
-	 * If this is a thread lock or the process has told us that there are
-	 * no other processes in the environment, and the application isn't
-	 * threaded, there aren't any threads to block.
-	 */
-	if (LF_ISSET(MUTEX_THREAD) || F_ISSET(dbenv, DB_ENV_PRIVATE)) {
-		if (!F_ISSET(dbenv, DB_ENV_THREAD)) {
-			F_SET(mutexp, MUTEX_IGNORE);
-			return (0);
-		}
-	}
+	mtxmgr = dbenv->mutex_handle;
+	mtxregion = mtxmgr->reginfo.primary;
+	mutexp = MUTEXP_SET(mutex);
 
 	mutexp->id = ((getpid() & 0xffff) << 16) ^ P_TO_UINT32(mutexp);
-	F_SET(mutexp, MUTEX_INITED);
+
 	return (0);
 }
 
 /*
  * __db_win32_mutex_lock
- *	Lock on a mutex, logically blocking if necessary.
+ *	Lock on a mutex, blocking if necessary.
  *
- * PUBLIC: int __db_win32_mutex_lock __P((DB_ENV *, DB_MUTEX *));
+ * PUBLIC: int __db_win32_mutex_lock __P((DB_ENV *, db_mutex_t));
  */
 int
-__db_win32_mutex_lock(dbenv, mutexp)
+__db_win32_mutex_lock(dbenv, mutex)
 	DB_ENV *dbenv;
-	DB_MUTEX *mutexp;
+	db_mutex_t mutex;
 {
+	DB_MUTEX *mutexp;
+	DB_MUTEXMGR *mtxmgr;
+	DB_MUTEXREGION *mtxregion;
 	HANDLE event;
 	u_int32_t nspins;
-	int ret, ms;
+	int ms, ret;
 #ifdef MUTEX_DIAG
 	LARGE_INTEGER now;
 #endif
 
-	if (F_ISSET(dbenv, DB_ENV_NOLOCKING) || F_ISSET(mutexp, MUTEX_IGNORE))
+	if (!MUTEX_ON(dbenv) || F_ISSET(dbenv, DB_ENV_NOLOCKING))
 		return (0);
 
+	mtxmgr = dbenv->mutex_handle;
+	mtxregion = mtxmgr->reginfo.primary;
+	mutexp = MUTEXP_SET(mutex);
+
 	event = NULL;
 	ms = 50;
 	ret = 0;
 
 loop:	/* Attempt to acquire the resource for N spins. */
-	for (nspins = dbenv->tas_spins; nspins > 0; --nspins) {
-		if (!MUTEX_SET(&mutexp->tas)) {
+	for (nspins =
+	    mtxregion->stat.st_mutex_tas_spins; nspins > 0; --nspins) {
+		/*
+		 * We can avoid the (expensive) interlocked instructions if
+		 * the mutex is already "set".
+		 */
+		if (mutexp->tas || !MUTEX_SET(&mutexp->tas)) {
 			/*
 			 * Some systems (notably those with newer Intel CPUs)
 			 * need a small pause here. [#6975]
@@ -119,17 +149,26 @@ loop:	/* Attempt to acquire the resource for N spins. */
 		}
 
 #ifdef DIAGNOSTIC
-		if (mutexp->locked)
+		if (F_ISSET(mutexp, DB_MUTEX_LOCKED)) {
+			char buf[DB_THREADID_STRLEN];
 			__db_err(dbenv,
-			    "__db_win32_mutex_lock: mutex double-locked!");
-
-		__os_id(&mutexp->locked);
+			    "Win32 lock failed: mutex already locked by %s",
+			     dbenv->thread_id_string(dbenv,
+			     mutexp->pid, mutexp->tid, buf));
+			return (__db_panic(dbenv, EACCES));
+		}
 #endif
+		F_SET(mutexp, DB_MUTEX_LOCKED);
+		dbenv->thread_id(dbenv, &mutexp->pid, &mutexp->tid);
+		CHECK_MTX_THREAD(dbenv, mutexp);
 
+#ifdef HAVE_STATISTICS
 		if (event == NULL)
 			++mutexp->mutex_set_nowait;
-		else {
+		else
 			++mutexp->mutex_set_wait;
+#endif
+		if (event != NULL) {
 			CloseHandle(event);
 			InterlockedDecrement(&mutexp->nwaiters);
 #ifdef MUTEX_DIAG
@@ -142,6 +181,15 @@ loop:	/* Attempt to acquire the resource for N spins. */
 #endif
 		}
 
+#ifdef DIAGNOSTIC
+		/*
+		 * We want to switch threads as often as possible.  Yield
+		 * every time we get a mutex to ensure contention.
+		 */
+		if (F_ISSET(dbenv, DB_ENV_YIELDCPU))
+			__os_yield(NULL, 1);
+#endif
+
 		return (0);
 	}
 
@@ -158,79 +206,92 @@ loop:	/* Attempt to acquire the resource for N spins. */
 		    now.QuadPart, mutexp, mutexp->id);
 #endif
 		InterlockedIncrement(&mutexp->nwaiters);
-		GET_HANDLE(mutexp, event);
+		if ((ret = get_handle(dbenv, mutexp, &event)) != 0)
+			goto err;
+	}
+	if ((ret = WaitForSingleObject(event, ms)) == WAIT_FAILED) {
+		ret = __os_get_errno();
+		goto err;
 	}
-	if ((ret = WaitForSingleObject(event, ms)) == WAIT_FAILED)
-		return (__os_get_errno());
 	if ((ms <<= 1) > MS_PER_SEC)
 		ms = MS_PER_SEC;
 
+	PANIC_CHECK(dbenv);
 	goto loop;
+
+err:	__db_err(dbenv, "Win32 lock failed: %s", db_strerror(ret));
+	return (__db_panic(dbenv, ret));
 }
 
 /*
  * __db_win32_mutex_unlock --
- *	Release a lock.
+ *	Release a mutex.
  *
- * PUBLIC: int __db_win32_mutex_unlock __P((DB_ENV *, DB_MUTEX *));
+ * PUBLIC: int __db_win32_mutex_unlock __P((DB_ENV *, db_mutex_t));
  */
 int
-__db_win32_mutex_unlock(dbenv, mutexp)
+__db_win32_mutex_unlock(dbenv, mutex)
 	DB_ENV *dbenv;
-	DB_MUTEX *mutexp;
+	db_mutex_t mutex;
 {
-	int ret;
+	DB_MUTEX *mutexp;
+	DB_MUTEXMGR *mtxmgr;
+	DB_MUTEXREGION *mtxregion;
 	HANDLE event;
+	int ret;
 #ifdef MUTEX_DIAG
-		LARGE_INTEGER now;
+	LARGE_INTEGER now;
 #endif
-
-	if (F_ISSET(dbenv, DB_ENV_NOLOCKING) || F_ISSET(mutexp, MUTEX_IGNORE))
+	if (!MUTEX_ON(dbenv) || F_ISSET(dbenv, DB_ENV_NOLOCKING))
 		return (0);
 
-#ifdef DIAGNOSTIC
-	if (!mutexp->tas || !mutexp->locked)
-		__db_err(dbenv,
-		    "__db_win32_mutex_unlock: ERROR: lock already unlocked");
+	mtxmgr = dbenv->mutex_handle;
+	mtxregion = mtxmgr->reginfo.primary;
+	mutexp = MUTEXP_SET(mutex);
 
-	mutexp->locked = 0;
+#ifdef DIAGNOSTIC
+	if (!mutexp->tas || !F_ISSET(mutexp, DB_MUTEX_LOCKED)) {
+		__db_err(dbenv, "Win32 unlock failed: lock already unlocked");
+		return (__db_panic(dbenv, EACCES));
+	}
 #endif
+	F_CLR(mutexp, DB_MUTEX_LOCKED);
 	MUTEX_UNSET(&mutexp->tas);
 
-	ret = 0;
-
 	if (mutexp->nwaiters > 0) {
-		GET_HANDLE(mutexp, event);
+		if ((ret = get_handle(dbenv, mutexp, &event)) != 0)
+			goto err;
 
 #ifdef MUTEX_DIAG
 		QueryPerformanceCounter(&now);
 		printf("[%I64d]: Signalling mutex %p, id %d\n",
 		    now.QuadPart, mutexp, mutexp->id);
 #endif
-		if (!PulseEvent(event))
+		if (!PulseEvent(event)) {
 			ret = __os_get_errno();
+			CloseHandle(event);
+			goto err;
+		}
 
 		CloseHandle(event);
 	}
 
-#ifdef DIAGNOSTIC
-	if (ret != 0)
-		__db_err(dbenv,
-		    "__db_win32_mutex_unlock: ERROR: unlock failed");
-#endif
+	return (0);
 
-	return (ret);
+err:	__db_err(dbenv, "Win32 unlock failed: %s", db_strerror(ret));
+	return (__db_panic(dbenv, ret));
 }
 
 /*
  * __db_win32_mutex_destroy --
- *	Destroy a DB_MUTEX - noop with this implementation.
+ *	Destroy a mutex.
  *
- * PUBLIC: int __db_win32_mutex_destroy __P((DB_MUTEX *));
+ * PUBLIC: int __db_win32_mutex_destroy __P((DB_ENV *, db_mutex_t));
  */
 int
-__db_win32_mutex_destroy(mutexp)
-	DB_MUTEX *mutexp;
+__db_win32_mutex_destroy(dbenv, mutex)
+	DB_ENV *dbenv;
+	db_mutex_t mutex;
 {
 	return (0);
 }
diff --git a/storage/bdb/mutex/tm.c b/storage/bdb/mutex/tm.c
index ebfb2f3290f..ce685c8b7b7 100644
--- a/storage/bdb/mutex/tm.c
+++ b/storage/bdb/mutex/tm.c
@@ -1,50 +1,85 @@
 /*
  * Standalone mutex tester for Berkeley DB mutexes.
+ *
+ * $Id: tm.c,v 12.10 2005/10/21 17:53:04 bostic Exp $
  */
 #include "db_config.h"
 
+#ifndef NO_SYSTEM_INCLUDES
 #include 
-#include 
-#include 
 #include 
 
 #include 
-#include 
+#include 
 #include 
 #include 
 #include 
-#include 
+#include 
 
 #if defined(MUTEX_THREAD_TEST)
 #include 
 #endif
+#endif
 
 #include "db_int.h"
 
-#ifndef	HAVE_QNX
-#define	shm_open	open
-#define	shm_unlink	remove
+#ifdef DB_WIN32
+extern int getopt(int, char * const *, const char *);
+
+typedef HANDLE os_pid_t;
+typedef HANDLE os_thread_t;
+
+#define	os_thread_create(thrp, attr, func, arg)				\
+    (((*(thrp) = CreateThread(NULL, 0,					\
+	(LPTHREAD_START_ROUTINE)(func), (arg), 0, NULL)) == NULL) ? -1 : 0)
+#define	os_thread_join(thr, statusp)					\
+    ((WaitForSingleObject((thr), INFINITE) == WAIT_OBJECT_0) &&		\
+    GetExitCodeThread((thr), (LPDWORD)(statusp)) ? 0 : -1)
+#define	os_thread_self() GetCurrentThreadId()
+
+#else /* !DB_WIN32 */
+
+typedef pid_t os_pid_t;
+
+#ifdef MUTEX_THREAD_TEST
+typedef pthread_t os_thread_t;
 #endif
 
-void  exec_proc(u_long, char *, char *);
-void  map_file(u_int8_t **, u_int8_t **, u_int8_t **, int *);
-void  tm_file_init(void);
-void  run_locker(u_long);
-void *run_lthread(void *);
-void  run_wakeup(u_long);
-void *run_wthread(void *);
-void  tm_mutex_destroy(void);
-void  tm_mutex_init(void);
-void  tm_mutex_stats(void);
-void  unmap_file(u_int8_t *, int);
-int   usage(void);
+#define	os_thread_create(thrp, attr, func, arg)				\
+    pthread_create((thrp), (attr), (func), (arg))
+#define	os_thread_join(thr, statusp) pthread_join((thr), (statusp))
+#define	os_thread_self() pthread_self()
+#endif
 
-#define	MT_FILE		"mutex.file"
-#define	MT_FILE_QUIT	"mutex.file.quit"
+#define	OS_BAD_PID (os_pid_t)-1
 
-DB_ENV	 dbenv;					/* Fake out DB. */
+#define	TESTDIR		"TESTDIR"		/* Working area */
+#define	MT_FILE		"TESTDIR/mutex.file"
+#define	MT_FILE_QUIT	"TESTDIR/mutex.file.quit"
+
+/*
+ * The backing file layout:
+ *	TM[1]			per-thread mutex array lock
+ *	TM[nthreads]		per-thread mutex array
+ *	TM[maxlocks]		per-lock mutex array
+ */
+typedef struct {
+	db_mutex_t mutex;			/* Mutex. */
+	u_long	   id;				/* Holder's ID. */
+	u_int	   wakeme;			/* Request to awake. */
+} TM;
+
+DB_ENV	*dbenv;					/* Backing environment */
 size_t	 len;					/* Backing file size. */
-int	 align;					/* Mutex alignment in file. */
+
+u_int8_t *gm_addr;				/* Global mutex */
+u_int8_t *lm_addr;				/* Locker mutexes */
+u_int8_t *tm_addr;				/* Thread mutexes */
+
+#ifdef MUTEX_THREAD_TEST
+os_thread_t *kidsp;				/* Locker threads */
+os_thread_t  wakep;				/* Wakeup thread */
+#endif
 
 int	 maxlocks = 20;				/* -l: Backing locks. */
 int	 nlocks = 10000;			/* -n: Locks per processes. */
@@ -52,12 +87,24 @@ int	 nprocs = 20;				/* -p: Processes. */
 int	 nthreads = 1;				/* -t: Threads. */
 int	 verbose;				/* -v: Verbosity. */
 
-typedef struct {
-	DB_MUTEX mutex;				/* Mutex. */
-	u_long	 id;				/* Holder's ID. */
-#define	MUTEX_WAKEME	0x01			/* Request to awake. */
-	u_int	 flags;
-} TM;
+int	 locker_start(u_long);
+int	 locker_wait(void);
+void	 map_file(u_int8_t **, u_int8_t **, u_int8_t **, DB_FH **);
+os_pid_t os_spawn(const char *, char *const[]);
+int	 os_wait(os_pid_t *, int);
+void	*run_lthread(void *);
+void	*run_wthread(void *);
+os_pid_t spawn_proc(u_long, char *, char *);
+void	 tm_env_close(void);
+int	 tm_env_init(void);
+void	 tm_file_init(void);
+void	 tm_mutex_destroy(void);
+void	 tm_mutex_init(void);
+void	 tm_mutex_stats(void);
+void	 unmap_file(u_int8_t *, DB_FH *);
+int	 usage(void);
+int	 wakeup_start(u_long);
+int	 wakeup_wait(void);
 
 int
 main(argc, argv)
@@ -67,12 +114,11 @@ main(argc, argv)
 	enum {LOCKER, WAKEUP, PARENT} rtype;
 	extern int optind;
 	extern char *optarg;
-	pid_t pid;
+	os_pid_t wakeup_pid, *pids;
 	u_long id;
-	int ch, fd, eval, i, status;
-	char *p, *tmpath;
-
-	__os_spin(&dbenv);		/* Fake out DB. */
+	DB_FH *fhp, *map_fhp;
+	int ch, err, i;
+	char *p, *tmpath, cmd[1024];
 
 	rtype = PARENT;
 	id = 0;
@@ -122,173 +168,211 @@ main(argc, argv)
 	argv += optind;
 
 	/*
-	 * The file layout:
-	 *	TM[1]			per-thread mutex array lock
-	 *	TM[nthreads]		per-thread mutex array
-	 *	TM[maxlocks]		per-lock mutex array
+	 * If we're not running a multi-process test, we should be running
+	 * a multi-thread test.
 	 */
-	align = DB_ALIGN(sizeof(TM), MUTEX_ALIGN);
-	len = align * (1 + nthreads * nprocs + maxlocks);
-
-	switch (rtype) {
-	case PARENT:
-		break;
-	case LOCKER:
-		run_locker(id);
-		return (EXIT_SUCCESS);
-	case WAKEUP:
-		run_wakeup(id);
-		return (EXIT_SUCCESS);
+	if (nprocs == 1 && nthreads == 1) {
+		fprintf(stderr,
+	    "tm: running in a single process requires multiple threads\n");
+		return (EXIT_FAILURE);
 	}
 
+	len = sizeof(TM) * (1 + nthreads * nprocs + maxlocks);
+
+	/*
+	 * In the multi-process test, the parent spawns processes that exec
+	 * the original binary, ending up here.  Each process joins the DB
+	 * environment separately and then calls the supporting function.
+	 */
+	if (rtype == LOCKER || rtype == WAKEUP) {
+		__os_sleep(dbenv, 3, 0);	/* Let everyone catch up. */
+						/* Initialize random numbers. */
+		srand((u_int)time(NULL) % getpid());
+
+		if (tm_env_init() != 0)		/* Join the environment. */
+			exit(EXIT_FAILURE);
+						/* Join the backing file. */
+		map_file(&gm_addr, &tm_addr, &lm_addr, &map_fhp);
+		if (verbose)
+			printf(
+	    "Backing file: global (%#lx), threads (%#lx), locks (%#lx)\n",
+			    (u_long)gm_addr, (u_long)tm_addr, (u_long)lm_addr);
+
+		if ((rtype == LOCKER ?
+		    locker_start(id) : wakeup_start(id)) != 0)
+			exit(EXIT_FAILURE);
+		if ((rtype == LOCKER ? locker_wait() : wakeup_wait()) != 0)
+			exit(EXIT_FAILURE);
+
+		unmap_file(gm_addr, map_fhp);	/* Detach from backing file. */
+
+		tm_env_close();			/* Detach from environment. */
+
+		exit(EXIT_SUCCESS);
+	}
+
+	/*
+	 * The following code is only executed by the original parent process.
+	 *
+	 * Clean up from any previous runs.
+	 */
+	snprintf(cmd, sizeof(cmd), "rm -rf %s", TESTDIR);
+	(void)system(cmd);
+	snprintf(cmd, sizeof(cmd), "mkdir %s", TESTDIR);
+	(void)system(cmd);
+
 	printf(
     "tm: %d processes, %d threads/process, %d lock requests from %d locks\n",
 	    nprocs, nthreads, nlocks, maxlocks);
-	printf(
-    "tm: mutex alignment %lu, structure alignment %d, backing file %lu bytes\n",
-	    (u_long)MUTEX_ALIGN, align, (u_long)len);
+	printf("tm: backing file %lu bytes\n", (u_long)len);
+
+	if (tm_env_init() != 0)		/* Create the environment. */
+		exit(EXIT_FAILURE);
 
 	tm_file_init();			/* Initialize backing file. */
-	tm_mutex_init();		/* Initialize file's mutexes. */
 
-	for (i = 0; i < nprocs; ++i) {
-		switch (fork()) {
-		case -1:
-			perror("fork");
-			return (EXIT_FAILURE);
-		case 0:
-			exec_proc(id, tmpath, "locker");
-			break;
-		default:
-			break;
-		}
-		id += nthreads;
-	}
-
-	(void)remove(MT_FILE_QUIT);
-
-	switch (fork()) {
-	case -1:
-		perror("fork");
-		return (EXIT_FAILURE);
-	case 0:
-		exec_proc(id, tmpath, "wakeup");
-		break;
-	default:
-		break;
-	}
-	++id;
-
-	/* Wait for locking threads. */
-	for (i = 0, eval = EXIT_SUCCESS; i < nprocs; ++i)
-		if ((pid = wait(&status)) != (pid_t)-1) {
-			fprintf(stderr,
-		    "%lu: exited %d\n", (u_long)pid, WEXITSTATUS(status));
-			if (WEXITSTATUS(status) != 0)
-				eval = EXIT_FAILURE;
-		}
-
-	/* Signal wakeup thread to exit. */
-	if ((fd = open(MT_FILE_QUIT, O_WRONLY | O_CREAT, 0664)) == -1) {
-		fprintf(stderr, "tm: %s\n", strerror(errno));
-		status = EXIT_FAILURE;
-	}
-	(void)close(fd);
-
-	/* Wait for wakeup thread. */
-	if ((pid = wait(&status)) != (pid_t)-1) {
-		fprintf(stderr,
-	    "%lu: exited %d\n", (u_long)pid, WEXITSTATUS(status));
-		if (WEXITSTATUS(status) != 0)
-			eval = EXIT_FAILURE;
-	}
-
-	(void)remove(MT_FILE_QUIT);
-
-	tm_mutex_stats();			/* Display run statistics. */
-	tm_mutex_destroy();			/* Destroy region. */
-
-	printf("tm: exit status: %s\n",
-	    eval == EXIT_SUCCESS ? "success" : "failed!");
-	return (eval);
-}
-
-void
-exec_proc(id, tmpath, typearg)
-	u_long id;
-	char *tmpath, *typearg;
-{
-	char *argv[10], **ap, b_l[10], b_n[10], b_p[10], b_t[10], b_T[10];
-
-	ap = &argv[0];
-	*ap++ = "tm";
-	sprintf(b_l, "-l%d", maxlocks);
-	*ap++ = b_l;
-	sprintf(b_n, "-n%d", nlocks);
-	*ap++ = b_p;
-	sprintf(b_p, "-p%d", nprocs);
-	*ap++ = b_n;
-	sprintf(b_t, "-t%d", nthreads);
-	*ap++ = b_t;
-	sprintf(b_T, "-T%s=%lu", typearg, id);
-	*ap++ = b_T;
+					/* Map in the backing file. */
+	map_file(&gm_addr, &tm_addr, &lm_addr, &map_fhp);
 	if (verbose)
-		*ap++ = "-v";
+		printf(
+	    "backing file: global (%#lx), threads (%#lx), locks (%#lx)\n",
+		    (u_long)gm_addr, (u_long)tm_addr, (u_long)lm_addr);
 
-	*ap = NULL;
-	execvp(tmpath, argv);
+	tm_mutex_init();		/* Initialize mutexes. */
 
-	fprintf(stderr, "%s: %s\n", tmpath, strerror(errno));
-	exit(EXIT_FAILURE);
+	if (nprocs > 1) {		/* Run the multi-process test. */
+		/* Allocate array of locker process IDs. */
+		if ((pids = calloc(nprocs, sizeof(os_pid_t))) == NULL) {
+			fprintf(stderr, "tm: %s\n", strerror(errno));
+			goto fail;
+		}
+
+		/* Spawn locker processes and threads. */
+		for (i = 0; i < nprocs; ++i) {
+			if ((pids[i] =
+			    spawn_proc(id, tmpath, "locker")) == OS_BAD_PID) {
+				fprintf(stderr,
+				    "tm: failed to spawn a locker\n");
+				goto fail;
+			}
+			id += nthreads;
+		}
+
+		/* Spawn wakeup process/thread. */
+		if ((wakeup_pid =
+		    spawn_proc(id, tmpath, "wakeup")) == OS_BAD_PID) {
+			fprintf(stderr, "tm: failed to spawn waker\n");
+			goto fail;
+		}
+		++id;
+
+		/* Wait for all lockers to exit. */
+		if ((err = os_wait(pids, nprocs)) != 0) {
+			fprintf(stderr, "locker wait failed with %d\n", err);
+			goto fail;
+		}
+
+		/* Signal wakeup process to exit. */
+		if ((err = __os_open(
+		    dbenv, MT_FILE_QUIT, DB_OSO_CREATE, 0664, &fhp)) != 0) {
+			fprintf(stderr, "tm: open %s\n", db_strerror(err));
+			goto fail;
+		}
+		(void)__os_closehandle(dbenv, fhp);
+
+		/* Wait for wakeup process/thread. */
+		if ((err = os_wait(&wakeup_pid, 1)) != 0) {
+			fprintf(stderr,
+			    "%lu: exited %d\n", (u_long)wakeup_pid, err);
+			goto fail;
+		}
+	} else {			/* Run the single-process test. */
+		/* Spawn locker threads. */
+		if (locker_start(0) != 0)
+			goto fail;
+
+		/* Spawn wakeup thread. */
+		if (wakeup_start(nthreads) != 0)
+			goto fail;
+
+		/* Wait for all lockers to exit. */
+		if (locker_wait() != 0)
+			goto fail;
+
+		/* Signal wakeup process to exit. */
+		if ((err = __os_open(
+		    dbenv, MT_FILE_QUIT, DB_OSO_CREATE, 0664, &fhp)) != 0) {
+			fprintf(stderr, "tm: open %s\n", db_strerror(err));
+			goto fail;
+		}
+		(void)__os_closehandle(dbenv, fhp);
+
+		/* Wait for wakeup thread. */
+		if (wakeup_wait() != 0)
+			goto fail;
+	}
+
+	tm_mutex_stats();		/* Display run statistics. */
+	tm_mutex_destroy();		/* Destroy mutexes. */
+
+	unmap_file(gm_addr, map_fhp);	/* Detach from backing file. */
+
+	tm_env_close();			/* Detach from environment. */
+
+	printf("tm: test succeeded\n");
+	return (EXIT_SUCCESS);
+
+fail:	printf("tm: FAILED!\n");
+	return (EXIT_FAILURE);
 }
 
-void
-run_locker(id)
+int
+locker_start(id)
 	u_long id;
 {
 #if defined(MUTEX_THREAD_TEST)
-	pthread_t *kidsp;
-	int i;
-	void *retp;
-#endif
-	int status;
+	int err, i;
 
-	__os_sleep(&dbenv, 3, 0);		/* Let everyone catch up. */
-
-	srand((u_int)time(NULL) % getpid());	/* Initialize random numbers. */
-
-#if defined(MUTEX_THREAD_TEST)
 	/*
 	 * Spawn off threads.  We have nthreads all locking and going to
 	 * sleep, and one other thread cycling through and waking them up.
 	 */
 	if ((kidsp =
-	    (pthread_t *)calloc(sizeof(pthread_t), nthreads)) == NULL) {
+	    (os_thread_t *)calloc(sizeof(os_thread_t), nthreads)) == NULL) {
 		fprintf(stderr, "tm: %s\n", strerror(errno));
-		exit(EXIT_FAILURE);
+		return (1);
 	}
 	for (i = 0; i < nthreads; i++)
-		if ((errno = pthread_create(
+		if ((err = os_thread_create(
 		    &kidsp[i], NULL, run_lthread, (void *)(id + i))) != 0) {
 			fprintf(stderr, "tm: failed spawning thread: %s\n",
-			    strerror(errno));
-			exit(EXIT_FAILURE);
+			    db_strerror(err));
+			return (1);
 		}
+	return (0);
+#else
+	return (run_lthread((void *)id) == NULL ? 0 : 1);
+#endif
+}
+
+int
+locker_wait()
+{
+#if defined(MUTEX_THREAD_TEST)
+	int i;
+	void *retp;
 
 	/* Wait for the threads to exit. */
-	status = EXIT_SUCCESS;
 	for (i = 0; i < nthreads; i++) {
-		pthread_join(kidsp[i], &retp);
+		os_thread_join(kidsp[i], &retp);
 		if (retp != NULL) {
 			fprintf(stderr, "tm: thread exited with error\n");
-			status = EXIT_FAILURE;
+			return (1);
 		}
 	}
 	free(kidsp);
-#else
-	status = (int)run_lthread((void *)id);
 #endif
-	exit(status);
+	return (0);
 }
 
 void *
@@ -297,49 +381,38 @@ run_lthread(arg)
 {
 	TM *gp, *mp, *tp;
 	u_long id, tid;
-	int fd, i, lock, nl, remap;
-	u_int8_t *gm_addr, *lm_addr, *tm_addr;
+	int err, i, lock, nl;
 
-	id = (int)arg;
+	id = (uintptr_t)arg;
 #if defined(MUTEX_THREAD_TEST)
-	tid = (u_long)pthread_self();
+	tid = (u_long)os_thread_self();
 #else
 	tid = 0;
 #endif
 	printf("Locker: ID %03lu (PID: %lu; TID: %lx)\n",
 	    id, (u_long)getpid(), tid);
 
-	nl = nlocks;
-	for (gm_addr = NULL, gp = tp = NULL, remap = 0;;) {
-		/* Map in the file as necessary. */
-		if (gm_addr == NULL) {
-			map_file(&gm_addr, &tm_addr, &lm_addr, &fd);
-			gp = (TM *)gm_addr;
-			tp = (TM *)(tm_addr + id * align);
-			if (verbose)
-				printf(
-				    "%03lu: map threads @ %#lx; locks @ %#lx\n",
-				    id, (u_long)tm_addr, (u_long)lm_addr);
-			remap = (rand() % 100) + 35;
-		}
+	gp = (TM *)gm_addr;
+	tp = (TM *)(tm_addr + id * sizeof(TM));
 
+	for (nl = nlocks; nl > 0;) {
 		/* Select and acquire a data lock. */
 		lock = rand() % maxlocks;
-		mp = (TM *)(lm_addr + lock * align);
+		mp = (TM *)(lm_addr + lock * sizeof(TM));
 		if (verbose)
-			printf("%03lu: lock %d @ %#lx\n",
-			    id, lock, (u_long)&mp->mutex);
+			printf("%03lu: lock %d (mtx: %lu)\n",
+			    id, lock, (u_long)mp->mutex);
 
-		if (__db_mutex_lock(&dbenv, &mp->mutex)) {
+		if ((err = dbenv->mutex_lock(dbenv, mp->mutex)) != 0) {
 			fprintf(stderr, "%03lu: never got lock %d: %s\n",
-			    id, lock, strerror(errno));
-			return ((void *)EXIT_FAILURE);
+			    id, lock, db_strerror(err));
+			return ((void *)1);
 		}
 		if (mp->id != 0) {
 			fprintf(stderr,
 			    "RACE! (%03lu granted lock %d held by %03lu)\n",
 			    id, lock, mp->id);
-			return ((void *)EXIT_FAILURE);
+			return ((void *)1);
 		}
 		mp->id = id;
 
@@ -348,12 +421,12 @@ run_lthread(arg)
 		 * we still hold the mutex.
 		 */
 		for (i = 0; i < 3; ++i) {
-			__os_sleep(&dbenv, 0, rand() % 3);
+			__os_sleep(dbenv, 0, rand() % 3);
 			if (mp->id != id) {
 				fprintf(stderr,
 				    "RACE! (%03lu stole lock %d from %03lu)\n",
 				    mp->id, lock, id);
-				return ((void *)EXIT_FAILURE);
+				return ((void *)1);
 			}
 		}
 
@@ -367,112 +440,106 @@ run_lthread(arg)
 		 *
 		 * The wakeup thread will wake us up.
 		 */
-		if (__db_mutex_lock(&dbenv, &gp->mutex)) {
+		if ((err = dbenv->mutex_lock(dbenv, gp->mutex)) != 0) {
 			fprintf(stderr,
-			    "%03lu: global lock: %s\n", id, strerror(errno));
-			return ((void *)EXIT_FAILURE);
+			    "%03lu: global lock: %s\n", id, db_strerror(err));
+			return ((void *)1);
 		}
 		if (tp->id != 0 && tp->id != id) {
 			fprintf(stderr,
 		    "%03lu: per-thread mutex isn't mine, owned by %03lu\n",
 			    id, tp->id);
-			return ((void *)EXIT_FAILURE);
+			return ((void *)1);
 		}
 		tp->id = id;
 		if (verbose)
-			printf("%03lu: self-blocking\n", id);
-		if (F_ISSET(tp, MUTEX_WAKEME)) {
+			printf("%03lu: self-blocking (mtx: %lu)\n",
+			    id, (u_long)tp->mutex);
+		if (tp->wakeme) {
 			fprintf(stderr,
 			    "%03lu: wakeup flag incorrectly set\n", id);
-			return ((void *)EXIT_FAILURE);
+			return ((void *)1);
 		}
-		F_SET(tp, MUTEX_WAKEME);
-		if (__db_mutex_unlock(&dbenv, &gp->mutex)) {
+		tp->wakeme = 1;
+		if ((err = dbenv->mutex_unlock(dbenv, gp->mutex)) != 0) {
 			fprintf(stderr,
-			    "%03lu: global unlock: %s\n", id, strerror(errno));
-			return ((void *)EXIT_FAILURE);
+			    "%03lu: global unlock: %s\n", id, db_strerror(err));
+			return ((void *)1);
 		}
-		if (__db_mutex_lock(&dbenv, &tp->mutex)) {
+		if ((err = dbenv->mutex_lock(dbenv, tp->mutex)) != 0) {
 			fprintf(stderr, "%03lu: per-thread lock: %s\n",
-			    id, strerror(errno));
-			return ((void *)EXIT_FAILURE);
+			    id, db_strerror(err));
+			return ((void *)1);
 		}
 		/* Time passes... */
-		if (F_ISSET(tp, MUTEX_WAKEME)) {
+		if (tp->wakeme) {
 			fprintf(stderr, "%03lu: wakeup flag not cleared\n", id);
-			return ((void *)EXIT_FAILURE);
+			return ((void *)1);
 		}
 
 		if (verbose)
-			printf("%03lu: release %d @ %#lx\n",
-			    id, lock, (u_long)&mp->mutex);
+			printf("%03lu: release %d (mtx: %lu)\n",
+			    id, lock, (u_long)mp->mutex);
 
 		/* Release the data lock. */
 		mp->id = 0;
-		if (__db_mutex_unlock(&dbenv, &mp->mutex)) {
+		if ((err = dbenv->mutex_unlock(dbenv, mp->mutex)) != 0) {
 			fprintf(stderr,
-			    "%03lu: lock release: %s\n", id, strerror(errno));
-			return ((void *)EXIT_FAILURE);
+			    "%03lu: lock release: %s\n", id, db_strerror(err));
+			return ((void *)1);
 		}
 
-		if (--nl % 100 == 0)
+		if (--nl % 100 == 0) {
 			fprintf(stderr, "%03lu: %d\n", id, nl);
-
-		if (nl == 0 || --remap == 0) {
-			if (verbose)
-				printf("%03lu: re-mapping\n", id);
-			unmap_file(gm_addr, fd);
-			gm_addr = NULL;
-
-			if (nl == 0)
-				break;
-
-			__os_sleep(&dbenv, 0, rand() % 500);
+			/*
+			 * Windows buffers stderr and the output looks wrong
+			 * without this.
+			 */
+			fflush(stderr);
 		}
 	}
 
 	return (NULL);
 }
 
-void
-run_wakeup(id)
+int
+wakeup_start(id)
 	u_long id;
 {
 #if defined(MUTEX_THREAD_TEST)
-	pthread_t wakep;
-	int status;
-	void *retp;
-#endif
-	__os_sleep(&dbenv, 3, 0);		/* Let everyone catch up. */
+	int err;
 
-	srand((u_int)time(NULL) % getpid());	/* Initialize random numbers. */
-
-#if defined(MUTEX_THREAD_TEST)
 	/*
 	 * Spawn off wakeup thread.
 	 */
-	if ((errno = pthread_create(
+	if ((err = os_thread_create(
 	    &wakep, NULL, run_wthread, (void *)id)) != 0) {
 		fprintf(stderr, "tm: failed spawning wakeup thread: %s\n",
-		    strerror(errno));
-		exit(EXIT_FAILURE);
+		    db_strerror(err));
+		return (1);
 	}
+	return (0);
+#else
+	return (run_wthread((void *)id) == NULL ? 0 : 1);
+#endif
+}
+
+int
+wakeup_wait()
+{
+#if defined(MUTEX_THREAD_TEST)
+	void *retp;
 
 	/*
-	 * run_locker will create a file when the wakeup thread is no
-	 * longer needed.
+	 * A file is created when the wakeup thread is no longer needed.
 	 */
-	status = 0;
-	pthread_join(wakep, &retp);
+	os_thread_join(wakep, &retp);
 	if (retp != NULL) {
 		fprintf(stderr, "tm: wakeup thread exited with error\n");
-		status = EXIT_FAILURE;
+		return (1);
 	}
-
-	exit(status);
-#else
-	exit((int)run_wthread((void *)id));
 #endif
+	return (0);
 }
 
 /*
@@ -483,31 +550,25 @@ void *
 run_wthread(arg)
 	void *arg;
 {
-	struct stat sb;
 	TM *gp, *tp;
 	u_long id, tid;
-	int fd, check_id;
-	u_int8_t *gm_addr, *tm_addr;
+	int check_id, err;
 
-	id = (int)arg;
+	id = (uintptr_t)arg;
 #if defined(MUTEX_THREAD_TEST)
-	tid = (u_long)pthread_self();
+	tid = (u_long)os_thread_self();
 #else
 	tid = 0;
 #endif
 	printf("Wakeup: ID %03lu (PID: %lu; TID: %lx)\n",
 	    id, (u_long)getpid(), tid);
 
-	arg = NULL;
-	map_file(&gm_addr, &tm_addr, NULL, &fd);
-	if (verbose)
-		printf("%03lu: map threads @ %#lx\n", id, (u_long)tm_addr);
 	gp = (TM *)gm_addr;
 
 	/* Loop, waking up sleepers and periodically sleeping ourselves. */
 	for (check_id = 0;; ++check_id) {
 		/* Check to see if the locking threads have finished. */
-		if (stat(MT_FILE_QUIT, &sb) == 0)
+		if (__os_exists(MT_FILE_QUIT, NULL) == 0)
 			break;
 
 		/* Check for ID wraparound. */
@@ -515,39 +576,96 @@ run_wthread(arg)
 			check_id = 0;
 
 		/* Check for a thread that needs a wakeup. */
-		tp = (TM *)(tm_addr + check_id * align);
-		if (!F_ISSET(tp, MUTEX_WAKEME))
+		tp = (TM *)(tm_addr + check_id * sizeof(TM));
+		if (!tp->wakeme)
 			continue;
 
-		if (verbose)
-			printf("%03lu: wakeup thread %03lu @ %#lx\n",
-			    id, tp->id, (u_long)&tp->mutex);
+		if (verbose) {
+			printf("%03lu: wakeup thread %03lu (mtx: %lu)\n",
+			    id, tp->id, (u_long)tp->mutex);
+			fflush(stdout);
+		}
 
 		/* Acquire the global lock. */
-		if (__db_mutex_lock(&dbenv, &gp->mutex)) {
+		if ((err = dbenv->mutex_lock(dbenv, gp->mutex)) != 0) {
 			fprintf(stderr,
-			    "wakeup: global lock: %s\n", strerror(errno));
-			return ((void *)EXIT_FAILURE);
+			    "wakeup: global lock: %s\n", db_strerror(err));
+			return ((void *)1);
 		}
 
-		F_CLR(tp, MUTEX_WAKEME);
-		if (__db_mutex_unlock(&dbenv, &tp->mutex)) {
+		tp->wakeme = 0;
+		if ((err = dbenv->mutex_unlock(dbenv, tp->mutex)) != 0) {
 			fprintf(stderr,
-			    "wakeup: unlock: %s\n", strerror(errno));
-			return ((void *)EXIT_FAILURE);
+			    "wakeup: unlock: %s\n", db_strerror(err));
+			return ((void *)1);
 		}
 
-		if (__db_mutex_unlock(&dbenv, &gp->mutex)) {
+		if ((err = dbenv->mutex_unlock(dbenv, gp->mutex))) {
 			fprintf(stderr,
-			    "wakeup: global unlock: %s\n", strerror(errno));
-			return ((void *)EXIT_FAILURE);
+			    "wakeup: global unlock: %s\n", db_strerror(err));
+			return ((void *)1);
 		}
 
-		__os_sleep(&dbenv, 0, rand() % 3);
+		__os_sleep(dbenv, 0, rand() % 3);
 	}
 	return (NULL);
 }
 
+/*
+ * tm_env_init --
+ *	Create the backing database environment.
+ */
+int
+tm_env_init()
+{
+	u_int32_t flags;
+	int ret;
+	char *home;
+
+	/*
+	 * Create an environment object and initialize it for error
+	 * reporting.
+	 */
+	if ((ret = db_env_create(&dbenv, 0)) != 0) {
+		fprintf(stderr, "tm: %s\n", db_strerror(ret));
+		return (1);
+	}
+	dbenv->set_errfile(dbenv, stderr);
+	dbenv->set_errpfx(dbenv, "tm");
+
+	/* Allocate enough mutexes. */
+	if ((ret = dbenv->mutex_set_increment(dbenv,
+	    1 + nthreads * nprocs + maxlocks)) != 0) {
+		dbenv->err(dbenv, ret, "dbenv->mutex_set_increment");
+		return (1);
+	}
+
+	flags = DB_CREATE;
+	if (nprocs == 1) {
+		home = NULL;
+		flags |= DB_PRIVATE;
+	} else
+		home = TESTDIR;
+	if (nthreads != 1)
+		flags |= DB_THREAD;
+	if ((ret = dbenv->open(dbenv, home, flags, 0)) != 0) {
+		dbenv->err(dbenv, ret, "environment open: %s", home);
+		return (1);
+	}
+
+	return (0);
+}
+
+/*
+ * tm_env_close --
+ *	Close the backing database environment.
+ */
+void
+tm_env_close()
+{
+	(void)dbenv->close(dbenv, 0);
+}
+
 /*
  * tm_file_init --
  *	Initialize the backing file.
@@ -555,29 +673,32 @@ run_wthread(arg)
 void
 tm_file_init()
 {
-	int fd;
+	DB_FH *fhp;
+	int err;
+	size_t nwrite;
 
 	/* Initialize the backing file. */
 	if (verbose)
 		printf("Create the backing file.\n");
 
-	(void)shm_unlink(MT_FILE);
+	(void)unlink(MT_FILE);
 
-	if ((fd = shm_open(
-	    MT_FILE, O_CREAT | O_RDWR | O_TRUNC,
-	    S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP | S_IROTH | S_IWOTH)) == -1) {
+	if ((err = __os_open(dbenv, MT_FILE,
+	    DB_OSO_CREATE | DB_OSO_TRUNC, 0666, &fhp)) == -1) {
 		(void)fprintf(stderr,
-		    "%s: open: %s\n", MT_FILE, strerror(errno));
+		    "%s: open: %s\n", MT_FILE, db_strerror(err));
 		exit(EXIT_FAILURE);
 	}
 
-	if (lseek(fd,
-	    (off_t)len, SEEK_SET) != (off_t)len || write(fd, &fd, 1) != 1) {
+	if ((err = __os_seek(dbenv, fhp,
+	    0, 0, len, 0, DB_OS_SEEK_SET)) != 0 ||
+	    (err = __os_write(dbenv, fhp, &err, 1, &nwrite)) != 0 ||
+	    nwrite != 1) {
 		(void)fprintf(stderr,
-		    "%s: seek/write: %s\n", MT_FILE, strerror(errno));
+		    "%s: seek/write: %s\n", MT_FILE, db_strerror(err));
 		exit(EXIT_FAILURE);
 	}
-	(void)close(fd);
+	(void)__os_closehandle(dbenv, fhp);
 }
 
 /*
@@ -588,59 +709,59 @@ void
 tm_mutex_init()
 {
 	TM *mp;
-	int fd, i;
-	u_int8_t *gm_addr, *lm_addr, *tm_addr;
-
-	map_file(&gm_addr, &tm_addr, &lm_addr, &fd);
-	if (verbose)
-		printf("init: map threads @ %#lx; locks @ %#lx\n",
-		    (u_long)tm_addr, (u_long)lm_addr);
+	int err, i;
 
 	if (verbose)
-		printf("Initialize the global mutex:\n");
+		printf("Allocate the global mutex: ");
 	mp = (TM *)gm_addr;
-	if (__db_mutex_init_int(&dbenv, &mp->mutex, 0, 0)) {
+	if ((err = dbenv->mutex_alloc(dbenv, 0, &mp->mutex)) != 0) {
 		fprintf(stderr,
-		    "__db_mutex_init (global): %s\n", strerror(errno));
+		    "DB_ENV->mutex_alloc (global): %s\n", db_strerror(err));
 		exit(EXIT_FAILURE);
 	}
 	if (verbose)
-		printf("\t@ %#lx\n", (u_long)&mp->mutex);
+		printf("%lu\n", (u_long)mp->mutex);
 
 	if (verbose)
 		printf(
-		    "Initialize %d per-thread mutexes:\n", nthreads * nprocs);
+		    "Allocate %d per-thread, self-blocking mutexes: ",
+		    nthreads * nprocs);
 	for (i = 0; i < nthreads * nprocs; ++i) {
-		mp = (TM *)(tm_addr + i * align);
-		if (__db_mutex_init_int(
-		    &dbenv, &mp->mutex, 0, MUTEX_SELF_BLOCK)) {
-			fprintf(stderr, "__db_mutex_init (per-thread %d): %s\n",
-			    i, strerror(errno));
+		mp = (TM *)(tm_addr + i * sizeof(TM));
+		if ((err = dbenv->mutex_alloc(
+		    dbenv, DB_MUTEX_SELF_BLOCK, &mp->mutex)) != 0) {
+			fprintf(stderr,
+			    "DB_ENV->mutex_alloc (per-thread %d): %s\n",
+			    i, db_strerror(err));
 			exit(EXIT_FAILURE);
 		}
-		if (__db_mutex_lock(&dbenv, &mp->mutex)) {
-			fprintf(stderr, "__db_mutex_lock (per-thread %d): %s\n",
-			    i, strerror(errno));
+		if ((err = dbenv->mutex_lock(dbenv, mp->mutex)) != 0) {
+			fprintf(stderr,
+			    "DB_ENV->mutex_lock (per-thread %d): %s\n",
+			    i, db_strerror(err));
 			exit(EXIT_FAILURE);
 		}
 		if (verbose)
-			printf("\t@ %#lx\n", (u_long)&mp->mutex);
+			printf("%lu ", (u_long)mp->mutex);
 	}
+	if (verbose)
+		printf("\n");
 
 	if (verbose)
-		printf("Initialize %d per-lock mutexes:\n", maxlocks);
+		printf("Allocate %d per-lock mutexes: ", maxlocks);
 	for (i = 0; i < maxlocks; ++i) {
-		mp = (TM *)(lm_addr + i * align);
-		if (__db_mutex_init_int(&dbenv, &mp->mutex, 0, 0)) {
-			fprintf(stderr, "__db_mutex_init (per-lock: %d): %s\n",
-			    i, strerror(errno));
+		mp = (TM *)(lm_addr + i * sizeof(TM));
+		if ((err = dbenv->mutex_alloc(dbenv, 0, &mp->mutex)) != 0) {
+			fprintf(stderr,
+			    "DB_ENV->mutex_alloc (per-lock: %d): %s\n",
+			    i, db_strerror(err));
 			exit(EXIT_FAILURE);
 		}
 		if (verbose)
-			printf("\t@ %#lx\n", (u_long)&mp->mutex);
+			printf("%lu ", (u_long)mp->mutex);
 	}
-
-	unmap_file(gm_addr, fd);
+	if (verbose)
+		printf("\n");
 }
 
 /*
@@ -651,28 +772,25 @@ void
 tm_mutex_destroy()
 {
 	TM *gp, *mp;
-	int fd, i;
-	u_int8_t *gm_addr, *lm_addr, *tm_addr;
-
-	map_file(&gm_addr, &tm_addr, &lm_addr, &fd);
+	int err, i;
 
 	if (verbose)
 		printf("Destroy the global mutex.\n");
 	gp = (TM *)gm_addr;
-	if (__db_mutex_destroy(&gp->mutex)) {
+	if ((err = dbenv->mutex_free(dbenv, gp->mutex)) != 0) {
 		fprintf(stderr,
-		    "__db_mutex_destroy (global): %s\n", strerror(errno));
+		    "DB_ENV->mutex_free (global): %s\n", db_strerror(err));
 		exit(EXIT_FAILURE);
 	}
 
 	if (verbose)
 		printf("Destroy the per-thread mutexes.\n");
 	for (i = 0; i < nthreads * nprocs; ++i) {
-		mp = (TM *)(tm_addr + i * align);
-		if (__db_mutex_destroy(&mp->mutex)) {
+		mp = (TM *)(tm_addr + i * sizeof(TM));
+		if ((err = dbenv->mutex_free(dbenv, mp->mutex)) != 0) {
 			fprintf(stderr,
-			    "__db_mutex_destroy (per-thread %d): %s\n",
-			    i, strerror(errno));
+			    "DB_ENV->mutex_free (per-thread %d): %s\n",
+			    i, db_strerror(err));
 			exit(EXIT_FAILURE);
 		}
 	}
@@ -680,18 +798,16 @@ tm_mutex_destroy()
 	if (verbose)
 		printf("Destroy the per-lock mutexes.\n");
 	for (i = 0; i < maxlocks; ++i) {
-		mp = (TM *)(tm_addr + i * align);
-		if (__db_mutex_destroy(&mp->mutex)) {
+		mp = (TM *)(lm_addr + i * sizeof(TM));
+		if ((err = dbenv->mutex_free(dbenv, mp->mutex)) != 0) {
 			fprintf(stderr,
-			    "__db_mutex_destroy (per-lock: %d): %s\n",
-			    i, strerror(errno));
+			    "DB_ENV->mutex_free (per-lock: %d): %s\n",
+			    i, db_strerror(err));
 			exit(EXIT_FAILURE);
 		}
 	}
 
-	unmap_file(gm_addr, fd);
-
-	(void)shm_unlink(MT_FILE);
+	(void)unlink(MT_FILE);
 }
 
 /*
@@ -701,21 +817,19 @@ tm_mutex_destroy()
 void
 tm_mutex_stats()
 {
+#ifdef HAVE_STATISTICS
 	TM *mp;
-	int fd, i;
-	u_int8_t *gm_addr, *lm_addr;
-
-	map_file(&gm_addr, NULL, &lm_addr, &fd);
+	int i;
+	u_int32_t set_wait, set_nowait;
 
 	printf("Per-lock mutex statistics.\n");
 	for (i = 0; i < maxlocks; ++i) {
-		mp = (TM *)(lm_addr + i * align);
+		mp = (TM *)(lm_addr + i * sizeof(TM));
+		__mutex_set_wait_info(dbenv, mp->mutex, &set_wait, &set_nowait);
 		printf("mutex %2d: wait: %lu; no wait %lu\n", i,
-		    (u_long)mp->mutex.mutex_set_wait,
-		    (u_long)mp->mutex.mutex_set_nowait);
+		    (u_long)set_wait, (u_long)set_nowait);
 	}
-
-	unmap_file(gm_addr, fd);
+#endif
 }
 
 /*
@@ -723,12 +837,13 @@ tm_mutex_stats()
  *	Map in the backing file.
  */
 void
-map_file(gm_addrp, tm_addrp, lm_addrp, fdp)
+map_file(gm_addrp, tm_addrp, lm_addrp, fhpp)
 	u_int8_t **gm_addrp, **tm_addrp, **lm_addrp;
-	int *fdp;
+	DB_FH **fhpp;
 {
 	void *addr;
-	int fd;
+	DB_FH *fhp;
+	int err;
 
 #ifndef MAP_FAILED
 #define	MAP_FAILED	(void *)-1
@@ -736,29 +851,24 @@ map_file(gm_addrp, tm_addrp, lm_addrp, fdp)
 #ifndef MAP_FILE
 #define	MAP_FILE	0
 #endif
-	if ((fd = shm_open(MT_FILE, O_RDWR, 0)) == -1) {
-		fprintf(stderr, "%s: open %s\n", MT_FILE, strerror(errno));
+	if ((err = __os_open(dbenv, MT_FILE, 0, 0, &fhp)) != 0) {
+		fprintf(stderr, "%s: open %s\n", MT_FILE, db_strerror(err));
 		exit(EXIT_FAILURE);
 	}
 
-	addr = mmap(NULL, len,
-	    PROT_READ | PROT_WRITE, MAP_FILE | MAP_SHARED, fd, (off_t)0);
-	if (addr == MAP_FAILED) {
-		fprintf(stderr, "%s: mmap: %s\n", MT_FILE, strerror(errno));
+	if ((err = __os_mapfile(dbenv, MT_FILE, fhp, len, 0, &addr)) != 0) {
+		fprintf(stderr, "%s: mmap: %s\n", MT_FILE, db_strerror(err));
 		exit(EXIT_FAILURE);
 	}
 
-	if (gm_addrp != NULL)
-		*gm_addrp = (u_int8_t *)addr;
-	addr = (u_int8_t *)addr + align;
-	if (tm_addrp != NULL)
-		*tm_addrp = (u_int8_t *)addr;
-	addr = (u_int8_t *)addr + align * (nthreads * nprocs);
-	if (lm_addrp != NULL)
-		*lm_addrp = (u_int8_t *)addr;
+	*gm_addrp = (u_int8_t *)addr;
+	addr = (u_int8_t *)addr + sizeof(TM);
+	*tm_addrp = (u_int8_t *)addr;
+	addr = (u_int8_t *)addr + sizeof(TM) * (nthreads * nprocs);
+	*lm_addrp = (u_int8_t *)addr;
 
-	if (fdp != NULL)
-		*fdp = fd;
+	if (fhpp != NULL)
+		*fhpp = fhp;
 }
 
 /*
@@ -766,16 +876,18 @@ map_file(gm_addrp, tm_addrp, lm_addrp, fdp)
  *	Discard backing file map.
  */
 void
-unmap_file(addr, fd)
+unmap_file(addr, fhp)
 	u_int8_t *addr;
-	int fd;
+	DB_FH *fhp;
 {
-	if (munmap(addr, len) != 0) {
-		fprintf(stderr, "munmap: %s\n", strerror(errno));
+	int err;
+
+	if ((err = __os_unmapfile(dbenv, addr, len)) != 0) {
+		fprintf(stderr, "munmap: %s\n", db_strerror(err));
 		exit(EXIT_FAILURE);
 	}
-	if (close(fd) != 0) {
-		fprintf(stderr, "close: %s\n", strerror(errno));
+	if ((err = __os_closehandle(dbenv, fhp)) != 0) {
+		fprintf(stderr, "close: %s\n", db_strerror(err));
 		exit(EXIT_FAILURE);
 	}
 }
@@ -792,3 +904,117 @@ usage()
 	    "[-n locks] [-p procs] [-T locker=ID|wakeup=ID] [-t threads]");
 	return (EXIT_FAILURE);
 }
+
+/*
+ * os_wait --
+ *	Wait for an array of N procs.
+ */
+int
+os_wait(procs, nprocs)
+	os_pid_t *procs;
+	int nprocs;
+{
+	int i, status;
+#if defined(DB_WIN32)
+	DWORD ret;
+#endif
+
+	status = 0;
+
+#if defined(DB_WIN32)
+	do {
+		ret = WaitForMultipleObjects(nprocs, procs, FALSE, INFINITE);
+		i = ret - WAIT_OBJECT_0;
+		if (i < 0 || i >= nprocs)
+			return (__os_get_errno());
+
+		if ((GetExitCodeProcess(procs[i], &ret) == 0) || (ret != 0))
+			return (ret);
+
+		/* remove the process handle from the list */
+		while (++i < nprocs)
+			procs[i - 1] = procs[i];
+	} while (--nprocs);
+#elif !defined(HAVE_VXWORKS)
+	do {
+		if ((i = wait(&status)) == -1)
+			return (__os_get_errno());
+
+		if (WIFEXITED(status) == 0 || WEXITSTATUS(status) != 0) {
+			for (i = 0; i < nprocs; i++)
+				kill(procs[i], SIGKILL);
+			return (WEXITSTATUS(status));
+		}
+	} while (--nprocs);
+#endif
+
+	return (0);
+}
+
+os_pid_t
+spawn_proc(id, tmpath, typearg)
+	u_long id;
+	char *tmpath, *typearg;
+{
+	char lbuf[16], nbuf[16], pbuf[16], tbuf[16], Tbuf[256];
+	char *const vbuf = verbose ?  "-v" : NULL;
+	char *args[] = { NULL /* tmpath */,
+	    "-l", NULL /* lbuf */, "-n", NULL /* nbuf */,
+	    "-p", NULL /* pbuf */, "-t", NULL /* tbuf */,
+	    "-T", NULL /* Tbuf */, NULL /* vbuf */,
+	    NULL
+	};
+
+	args[0] = tmpath;
+	snprintf(lbuf, sizeof(lbuf),  "%d", maxlocks);
+	args[2] = lbuf;
+	snprintf(nbuf, sizeof(nbuf),  "%d", nlocks);
+	args[4] = nbuf;
+	snprintf(pbuf, sizeof(pbuf),  "%d", nprocs);
+	args[6] = pbuf;
+	snprintf(tbuf, sizeof(tbuf),  "%d", nthreads);
+	args[8] = tbuf;
+	snprintf(Tbuf, sizeof(Tbuf),  "%s=%lu", typearg, id);
+	args[10] = Tbuf;
+	args[11] = vbuf;
+
+	return (os_spawn(tmpath, args));
+}
+
+os_pid_t
+os_spawn(path, argv)
+	const char *path;
+	char *const argv[];
+{
+	os_pid_t pid;
+	int status;
+
+	COMPQUIET(pid, 0);
+	COMPQUIET(status, 0);
+
+#ifdef HAVE_VXWORKS
+	fprintf(stderr, "ERROR: os_spawn not supported for VxWorks.\n");
+	return (OS_BAD_PID);
+#elif defined(HAVE_QNX)
+	/*
+	 * For QNX, we cannot fork if we've ever used threads.  So
+	 * we'll use their spawn function.  We use 'spawnl' which
+	 * is NOT a POSIX function.
+	 *
+	 * The return value of spawnl is just what we want depending
+	 * on the value of the 'wait' arg.
+	 */
+	return (spawnv(P_NOWAIT, path, argv));
+#elif defined(DB_WIN32)
+	return (os_pid_t)(_spawnv(P_NOWAIT, path, argv));
+#else
+	if ((pid = fork()) != 0) {
+		if (pid == -1)
+			return (OS_BAD_PID);
+		return (pid);
+	} else {
+		execv(path, argv);
+		exit(EXIT_FAILURE);
+	}
+#endif
+}
diff --git a/storage/bdb/mutex/uts4_cc.s b/storage/bdb/mutex/uts4_cc.s
index 9b314c4afca..9fca580bbe4 100644
--- a/storage/bdb/mutex/uts4_cc.s
+++ b/storage/bdb/mutex/uts4_cc.s
@@ -1,9 +1,9 @@
  / See the file LICENSE for redistribution information.
  /
- / Copyright (c) 1997-2004
+ / Copyright (c) 1997-2005
  /	Sleepycat Software.  All rights reserved.
  /
- / $Id: uts4_cc.s,v 11.4 2004/01/28 03:36:18 bostic Exp $
+ / $Id: uts4_cc.s,v 12.1 2005/06/16 20:23:22 bostic Exp $
  /
  / int uts_lock ( int *p, int i );
  /             Update the lock word pointed to by p with the
diff --git a/storage/bdb/os/os_abs.c b/storage/bdb/os/os_abs.c
index 3d9f921ae97..1dc4dd3943c 100644
--- a/storage/bdb/os/os_abs.c
+++ b/storage/bdb/os/os_abs.c
@@ -1,10 +1,10 @@
 /*-
  * See the file LICENSE for redistribution information.
  *
- * Copyright (c) 1997-2004
+ * Copyright (c) 1997-2005
  *	Sleepycat Software.  All rights reserved.
  *
- * $Id: os_abs.c,v 11.7 2004/01/28 03:36:18 bostic Exp $
+ * $Id: os_abs.c,v 12.1 2005/06/16 20:23:23 bostic Exp $
  */
 
 #include "db_config.h"
diff --git a/storage/bdb/os/os_alloc.c b/storage/bdb/os/os_alloc.c
index 7dd9f94f52e..858ec7738b1 100644
--- a/storage/bdb/os/os_alloc.c
+++ b/storage/bdb/os/os_alloc.c
@@ -1,10 +1,10 @@
 /*-
  * See the file LICENSE for redistribution information.
  *
- * Copyright (c) 1997-2004
+ * Copyright (c) 1997-2005
  *	Sleepycat Software.  All rights reserved.
  *
- * $Id: os_alloc.c,v 11.41 2004/07/06 21:06:36 mjc Exp $
+ * $Id: os_alloc.c,v 12.2 2005/06/16 20:23:23 bostic Exp $
  */
 
 #include "db_config.h"
@@ -313,6 +313,14 @@ __os_realloc(dbenv, size, storep)
 
 	/* Back up to the real beginning */
 	ptr = &((union __db_allocinfo *)ptr)[-1];
+
+	{
+		size_t s;
+
+		s = ((union __db_allocinfo *)ptr)->size;
+		if (((u_int8_t *)ptr)[s - 1] != CLEAR_BYTE)
+			 __os_guard(dbenv);
+	}
 #endif
 
 	/*
diff --git a/storage/bdb/os/os_clock.c b/storage/bdb/os/os_clock.c
index 2a8c44d1c24..9fbacddd6b8 100644
--- a/storage/bdb/os/os_clock.c
+++ b/storage/bdb/os/os_clock.c
@@ -1,10 +1,10 @@
 /*-
  * See the file LICENSE for redistribution information.
  *
- * Copyright (c) 2001-2004
+ * Copyright (c) 2001-2005
  *	Sleepycat Software.  All rights reserved.
  *
- * $Id: os_clock.c,v 1.15 2004/07/06 17:33:14 bostic Exp $
+ * $Id: os_clock.c,v 12.1 2005/06/16 20:23:23 bostic Exp $
  */
 
 #include "db_config.h"
diff --git a/storage/bdb/os/os_config.c b/storage/bdb/os/os_config.c
index dcde0dca9db..cdcb1513543 100644
--- a/storage/bdb/os/os_config.c
+++ b/storage/bdb/os/os_config.c
@@ -1,10 +1,10 @@
 /*-
  * See the file LICENSE for redistribution information.
  *
- * Copyright (c) 1998-2004
+ * Copyright (c) 1998-2005
  *	Sleepycat Software.  All rights reserved.
  *
- * $Id: os_config.c,v 11.15 2004/01/28 03:36:18 bostic Exp $
+ * $Id: os_config.c,v 12.2 2005/06/16 20:23:23 bostic Exp $
  */
 
 #include "db_config.h"
@@ -27,3 +27,27 @@ __os_fs_notzero()
 	/* Most filesystems zero out implicitly created pages. */
 	return (0);
 }
+
+/*
+ * __os_support_db_register --
+ *	Return 1 if the system supports DB_REGISTER.
+ *
+ * PUBLIC: int __os_support_db_register __P((void));
+ */
+int
+__os_support_db_register()
+{
+	return (1);
+}
+
+/*
+ * __os_support_replication --
+ *	Return 1 if the system supports replication.
+ *
+ * PUBLIC: int __os_support_replication __P((void));
+ */
+int
+__os_support_replication()
+{
+	return (1);
+}
diff --git a/storage/bdb/os/os_dir.c b/storage/bdb/os/os_dir.c
index 3e381ae7f2b..59fe55ca190 100644
--- a/storage/bdb/os/os_dir.c
+++ b/storage/bdb/os/os_dir.c
@@ -1,10 +1,10 @@
 /*-
  * See the file LICENSE for redistribution information.
  *
- * Copyright (c) 1997-2004
+ * Copyright (c) 1997-2005
  *	Sleepycat Software.  All rights reserved.
  *
- * $Id: os_dir.c,v 11.17 2004/04/26 18:48:19 bostic Exp $
+ * $Id: os_dir.c,v 12.1 2005/06/16 20:23:23 bostic Exp $
  */
 
 #include "db_config.h"
diff --git a/storage/bdb/os/os_errno.c b/storage/bdb/os/os_errno.c
index 52bce4ce67b..508a15abe1e 100644
--- a/storage/bdb/os/os_errno.c
+++ b/storage/bdb/os/os_errno.c
@@ -1,10 +1,10 @@
 /*-
  * See the file LICENSE for redistribution information.
  *
- * Copyright (c) 1999-2004
+ * Copyright (c) 1999-2005
  *	Sleepycat Software.  All rights reserved.
  *
- * $Id: os_errno.c,v 11.11 2004/01/28 03:36:18 bostic Exp $
+ * $Id: os_errno.c,v 12.1 2005/06/16 20:23:23 bostic Exp $
  */
 
 #include "db_config.h"
diff --git a/storage/bdb/os/os_fid.c b/storage/bdb/os/os_fid.c
index 29f19cd8122..9d5633a43d7 100644
--- a/storage/bdb/os/os_fid.c
+++ b/storage/bdb/os/os_fid.c
@@ -1,10 +1,10 @@
 /*-
  * See the file LICENSE for redistribution information.
  *
- * Copyright (c) 1996-2004
+ * Copyright (c) 1996-2005
  *	Sleepycat Software.  All rights reserved.
  *
- * $Id: os_fid.c,v 11.21 2004/07/06 13:55:48 bostic Exp $
+ * $Id: os_fid.c,v 12.4 2005/10/14 15:33:08 bostic Exp $
  */
 
 #include "db_config.h"
@@ -13,17 +13,6 @@
 #include 
 #include 
 
-#if TIME_WITH_SYS_TIME
-#include 
-#include 
-#else
-#if HAVE_SYS_TIME_H
-#include 
-#else
-#include 
-#endif
-#endif
-
 #include 
 #include 
 #endif
@@ -48,6 +37,8 @@ __os_fileid(dbenv, fname, unique_okay, fidp)
 	int unique_okay;
 	u_int8_t *fidp;
 {
+	pid_t pid;
+	db_threadid_t tid;
 	struct stat sb;
 	size_t i;
 	int ret;
@@ -117,6 +108,10 @@ __os_fileid(dbenv, fname, unique_okay, fidp)
 		 * if we race on this no real harm will be done, since the
 		 * finished fileid has so many other components.
 		 *
+		 * We use the bottom 32-bits of the process ID, hoping they
+		 * are more random than the top 32-bits (should we be on a
+		 * machine with 64-bit process IDs).
+		 *
 		 * We increment by 100000 on each call as a simple way of
 		 * randomizing; simply incrementing seems potentially less
 		 * useful if pids are also simply incremented, since this
@@ -125,9 +120,10 @@ __os_fileid(dbenv, fname, unique_okay, fidp)
 		 * 32-bit platforms, and has few interesting properties in
 		 * base 2.
 		 */
-		if (fid_serial == 0)
-			__os_id(&fid_serial);
-		else
+		if (fid_serial == 0) {
+			dbenv->thread_id(dbenv, &pid, &tid);
+			fid_serial = (u_int32_t)pid;
+		} else
 			fid_serial += 100000;
 
 		for (p =
diff --git a/storage/bdb/os/os_flock.c b/storage/bdb/os/os_flock.c
new file mode 100644
index 00000000000..419407a5b23
--- /dev/null
+++ b/storage/bdb/os/os_flock.c
@@ -0,0 +1,56 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1997-2005
+ *	Sleepycat Software.  All rights reserved.
+ *
+ * $Id: os_flock.c,v 12.4 2005/06/20 14:59:01 bostic Exp $
+ */
+
+#include "db_config.h"
+
+#ifndef NO_SYSTEM_INCLUDES
+#include 
+
+#include 
+#include 
+#endif
+
+#include "db_int.h"
+
+/*
+ * __os_fdlock --
+ *	Acquire/release a lock on a byte in a file.
+ *
+ * PUBLIC: int __os_fdlock __P((DB_ENV *, DB_FH *, off_t, int, int));
+ */
+int
+__os_fdlock(dbenv, fhp, offset, acquire, nowait)
+	DB_ENV *dbenv;
+	DB_FH *fhp;
+	int acquire, nowait;
+	off_t offset;
+{
+	struct flock fl;
+	int ret;
+
+	DB_ASSERT(F_ISSET(fhp, DB_FH_OPENED) && fhp->fd != -1);
+
+#ifdef HAVE_FCNTL
+	fl.l_start = offset;
+	fl.l_len = 1;
+	fl.l_type = acquire ? F_WRLCK : F_UNLCK;
+	fl.l_whence = SEEK_SET;
+
+	RETRY_CHK_EINTR_ONLY(
+	    (fcntl(fhp->fd, nowait ? F_SETLK : F_SETLKW, &fl)), ret);
+
+	if (ret != 0 && ret != EACCES && ret != EAGAIN)
+		__db_err(dbenv, "fcntl: %s", strerror(ret));
+	return (ret);
+#else
+	__db_err(dbenv,
+	    "advisory file locking unavailable: %s", strerror(DB_OPNOTSUP));
+	return (DB_OPNOTSUP);
+#endif
+}
diff --git a/storage/bdb/os/os_fsync.c b/storage/bdb/os/os_fsync.c
index 576acf00b5f..81d94ddc0e0 100644
--- a/storage/bdb/os/os_fsync.c
+++ b/storage/bdb/os/os_fsync.c
@@ -1,10 +1,10 @@
 /*-
  * See the file LICENSE for redistribution information.
  *
- * Copyright (c) 1997-2004
+ * Copyright (c) 1997-2005
  *	Sleepycat Software.  All rights reserved.
  *
- * $Id: os_fsync.c,v 11.22 2004/07/06 20:54:09 mjc Exp $
+ * $Id: os_fsync.c,v 12.3 2005/09/07 17:30:20 bostic Exp $
  */
 
 #include "db_config.h"
@@ -12,9 +12,8 @@
 #ifndef NO_SYSTEM_INCLUDES
 #include 
 
-#include 			/* XXX: Required by __hp3000s900 */
+#include 			/* Required on some platforms. */
 #include 
-#include 
 #endif
 
 #include "db_int.h"
@@ -69,6 +68,9 @@ __os_fsync(dbenv, fhp)
 {
 	int ret;
 
+	/* Check for illegal usage. */
+	DB_ASSERT(F_ISSET(fhp, DB_FH_OPENED) && fhp->fd != -1);
+
 	/*
 	 * Do nothing if the file descriptor has been marked as not requiring
 	 * any sync to disk.
@@ -76,13 +78,12 @@ __os_fsync(dbenv, fhp)
 	if (F_ISSET(fhp, DB_FH_NOSYNC))
 		return (0);
 
-	/* Check for illegal usage. */
-	DB_ASSERT(F_ISSET(fhp, DB_FH_OPENED) && fhp->fd != -1);
-
 	if (DB_GLOBAL(j_fsync) != NULL)
 		ret = DB_GLOBAL(j_fsync)(fhp->fd);
 	else
-#ifdef HAVE_FDATASYNC
+#if defined(F_FULLFSYNC)
+		RETRY_CHK((fcntl(fhp->fd, F_FULLFSYNC, 0)), ret);
+#elif defined(HAVE_FDATASYNC)
 		RETRY_CHK((fdatasync(fhp->fd)), ret);
 #else
 		RETRY_CHK((fsync(fhp->fd)), ret);
diff --git a/storage/bdb/os/os_handle.c b/storage/bdb/os/os_handle.c
index 62a7bc1a151..2a87094e1c6 100644
--- a/storage/bdb/os/os_handle.c
+++ b/storage/bdb/os/os_handle.c
@@ -1,10 +1,10 @@
 /*-
  * See the file LICENSE for redistribution information.
  *
- * Copyright (c) 1998-2004
+ * Copyright (c) 1998-2005
  *	Sleepycat Software.  All rights reserved.
  *
- * $Id: os_handle.c,v 11.40 2004/08/19 17:59:22 sue Exp $
+ * $Id: os_handle.c,v 12.2 2005/08/10 15:47:25 bostic Exp $
  */
 
 #include "db_config.h"
@@ -14,7 +14,6 @@
 
 #include 
 #include 
-#include 
 #endif
 
 #include "db_int.h"
diff --git a/storage/bdb/os/os_id.c b/storage/bdb/os/os_id.c
index 79df12f9b0f..d53b18806a2 100644
--- a/storage/bdb/os/os_id.c
+++ b/storage/bdb/os/os_id.c
@@ -1,10 +1,10 @@
 /*-
  * See the file LICENSE for redistribution information.
  *
- * Copyright (c) 2001-2004
+ * Copyright (c) 2001-2005
  *	Sleepycat Software.  All rights reserved.
  *
- * $Id: os_id.c,v 1.9 2004/09/22 16:27:54 bostic Exp $
+ * $Id: os_id.c,v 12.15 2005/11/10 18:44:02 bostic Exp $
  */
 
 #include "db_config.h"
@@ -13,36 +13,58 @@
 #include 
 
 #include 
-#include 
 #endif
 
 #include "db_int.h"
+#include "dbinc/mutex_int.h"		/* Required to load appropriate
+					   header files for thread functions. */
 
 /*
  * __os_id --
- *	Return a 32-bit value identifying the current thread of control.
+ *	Return the current process ID.
  *
- * PUBLIC: void __os_id __P((u_int32_t *));
+ * PUBLIC: void __os_id __P((DB_ENV *, pid_t *, db_threadid_t*));
  */
 void
-__os_id(idp)
-	u_int32_t *idp;
+__os_id(dbenv, pidp, tidp)
+	DB_ENV *dbenv;
+	pid_t *pidp;
+	db_threadid_t *tidp;
 {
 	/*
-	 * By default, use the process ID.
+	 * We can't depend on dbenv not being NULL, this routine is called
+	 * from places where there's no DB_ENV handle.  It takes a DB_ENV
+	 * handle as an arg because it's the default DB_ENV->thread_id function.
 	 *
-	 * getpid() returns a pid_t which we convert to a u_int32_t.  I have
-	 * not yet seen a system where a pid_t has 64-bits, but I'm sure they
-	 * exist.  Since we're returning only the bottom 32-bits, you cannot
-	 * use the return of __os_id to reference a process (for example, you
-	 * cannot send a signal to the value returned by __os_id).  To send a
-	 * signal to the current process, use raise(3) instead.
+	 * We cache the pid in the DB_ENV handle, it's a fairly slow call on
+	 * lots of systems.
 	 */
-#ifdef	HAVE_VXWORKS
-	*idp = taskIdSelf();
+	if (pidp != NULL) {
+		if (dbenv == NULL) {
+#if defined(HAVE_VXWORKS)
+			*pidp = taskIdSelf();
 #else
-	*idp = (u_int32_t)getpid();
+			*pidp = getpid();
 #endif
+		} else
+			*pidp = dbenv->pid_cache;
+	}
+
+	if (tidp != NULL) {
+#if defined(DB_WIN32)
+		*tidp = GetCurrentThreadId();
+#elif defined(HAVE_MUTEX_UI_THREADS)
+		*tidp = thr_self();
+#elif defined(HAVE_MUTEX_SOLARIS_LWP) || \
+	defined(HAVE_MUTEX_PTHREADS) || defined(HAVE_PTHREAD_SELF)
+		*tidp = pthread_self();
+#else
+		/*
+		 * Default to just getpid.
+		 */
+		*tidp = 0;
+#endif
+	}
 }
 
 /*
@@ -57,7 +79,9 @@ __os_unique_id(dbenv, idp)
 	u_int32_t *idp;
 {
 	static int first = 1;
-	u_int32_t id, pid, sec, usec;
+	pid_t pid;
+	db_threadid_t tid;
+	u_int32_t id, sec, usec;
 
 	*idp = 0;
 
@@ -65,10 +89,10 @@ __os_unique_id(dbenv, idp)
 	 * Our randomized value is comprised of our process ID, the current
 	 * time of day and a couple of a stack addresses, all XOR'd together.
 	 */
-	__os_id(&pid);
+	__os_id(dbenv, &pid, &tid);
 	__os_clock(dbenv, &sec, &usec);
 
-	id = pid ^ sec ^ usec ^ P_TO_UINT32(&pid);
+	id = (u_int32_t)pid ^ sec ^ usec ^ P_TO_UINT32(&pid);
 
 	/*
 	 * We could try and find a reasonable random-number generator, but
diff --git a/storage/bdb/os/os_map.c b/storage/bdb/os/os_map.c
index adcdaef3c96..d0db7073ec9 100644
--- a/storage/bdb/os/os_map.c
+++ b/storage/bdb/os/os_map.c
@@ -1,10 +1,10 @@
 /*-
  * See the file LICENSE for redistribution information.
  *
- * Copyright (c) 1996-2004
+ * Copyright (c) 1996-2005
  *	Sleepycat Software.  All rights reserved.
  *
- * $Id: os_map.c,v 11.57 2004/07/06 13:55:48 bostic Exp $
+ * $Id: os_map.c,v 12.3 2005/07/21 01:36:18 bostic Exp $
  */
 
 #include "db_config.h"
@@ -55,7 +55,7 @@ __os_r_sysattach(dbenv, infop, rp)
 		 * threaded.  If we reach this point, we know we're public, so
 		 * it's an error.
 		 */
-#if defined(MUTEX_NO_SHMGET_LOCKS)
+#if defined(HAVE_MUTEX_HPPA_MSEM_INIT)
 		__db_err(dbenv,
 	    "architecture does not support locks inside system shared memory");
 		return (EINVAL);
@@ -163,9 +163,13 @@ __os_r_sysattach(dbenv, infop, rp)
 	 * systems without merged VM/buffer cache systems, or, more to the
 	 * point, *badly* merged VM/buffer cache systems.
 	 */
-	if (ret == 0 && F_ISSET(infop, REGION_CREATE))
-		ret = __db_fileinit(dbenv,
-		    fhp, rp->size, F_ISSET(dbenv, DB_ENV_REGION_INIT) ? 1 : 0);
+	if (ret == 0 && F_ISSET(infop, REGION_CREATE)) {
+		if (F_ISSET(dbenv, DB_ENV_REGION_INIT))
+			ret = __db_file_write(dbenv, "region file", fhp,
+			    rp->size / MEGABYTE, rp->size % MEGABYTE, 0x00);
+		else
+			ret = __db_file_extend(dbenv, fhp, rp->size);
+	}
 
 	/* Map the file in. */
 	if (ret == 0)
diff --git a/storage/bdb/os/os_method.c b/storage/bdb/os/os_method.c
index a5bb17a797b..c304f881547 100644
--- a/storage/bdb/os/os_method.c
+++ b/storage/bdb/os/os_method.c
@@ -1,10 +1,10 @@
 /*-
  * See the file LICENSE for redistribution information.
  *
- * Copyright (c) 1999-2004
+ * Copyright (c) 1999-2005
  *	Sleepycat Software.  All rights reserved.
  *
- * $Id: os_method.c,v 11.21 2004/09/17 22:00:31 mjc Exp $
+ * $Id: os_method.c,v 12.1 2005/06/16 20:23:26 bostic Exp $
  */
 
 #include "db_config.h"
diff --git a/storage/bdb/os/os_mkdir.c b/storage/bdb/os/os_mkdir.c
new file mode 100644
index 00000000000..4c8c8e6380d
--- /dev/null
+++ b/storage/bdb/os/os_mkdir.c
@@ -0,0 +1,54 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1997-2005
+ *	Sleepycat Software.  All rights reserved.
+ *
+ * $Id: os_mkdir.c,v 12.8 2005/11/02 03:12:17 mjc Exp $
+ */
+
+#include "db_config.h"
+
+#ifndef NO_SYSTEM_INCLUDES
+#include 
+#include 
+#endif
+
+#include "db_int.h"
+
+/*
+ * __os_mkdir --
+ *	Create a directory.
+ *
+ * PUBLIC: int __os_mkdir __P((DB_ENV *, const char *, int));
+ */
+int
+__os_mkdir(dbenv, name, mode)
+	DB_ENV *dbenv;
+	const char *name;
+	int mode;
+{
+	int ret;
+
+	COMPQUIET(dbenv, NULL);
+
+	/* Make the directory, with paranoid permissions. */
+#ifdef HAVE_VXWORKS
+	RETRY_CHK((mkdir((char *)name)), ret);
+#else
+#ifdef DB_WIN32
+	RETRY_CHK((_mkdir(name)), ret);
+#else
+	RETRY_CHK((mkdir(name, 0600)), ret);
+#endif
+	if (ret != 0)
+		return (ret);
+
+	/* Set the absolute permissions, if specified. */
+#ifndef DB_WIN32
+	if (mode != 0)
+		RETRY_CHK((chmod(name, mode)), ret);
+#endif
+#endif
+	return (ret);
+}
diff --git a/storage/bdb/os/os_oflags.c b/storage/bdb/os/os_oflags.c
index 2ffb6db2d9f..27f72c8b95b 100644
--- a/storage/bdb/os/os_oflags.c
+++ b/storage/bdb/os/os_oflags.c
@@ -1,10 +1,10 @@
 /*-
  * See the file LICENSE for redistribution information.
  *
- * Copyright (c) 1997-2004
+ * Copyright (c) 1997-2005
  *	Sleepycat Software.  All rights reserved.
  *
- * $Id: os_oflags.c,v 11.14 2004/07/09 18:39:10 mjc Exp $
+ * $Id: os_oflags.c,v 12.2 2005/06/16 20:23:26 bostic Exp $
  */
 
 #include "db_config.h"
@@ -70,18 +70,27 @@ __db_oflags(oflags)
 #ifndef	S_IWUSR
 #define	S_IWUSR	S_IWRITE	/* W for owner */
 #endif
+#ifndef	S_IXUSR
+#define	S_IXUSR	0		/* X for owner */
+#endif
 #ifndef	S_IRGRP
 #define	S_IRGRP	0		/* R for group */
 #endif
 #ifndef	S_IWGRP
 #define	S_IWGRP	0		/* W for group */
 #endif
+#ifndef	S_IXGRP
+#define	S_IXGRP	0		/* X for group */
+#endif
 #ifndef	S_IROTH
 #define	S_IROTH	0		/* R for other */
 #endif
 #ifndef	S_IWOTH
 #define	S_IWOTH	0		/* W for other */
 #endif
+#ifndef	S_IXOTH
+#define	S_IXOTH	0		/* X for other */
+#endif
 #else
 #ifndef	S_IRUSR
 #define	S_IRUSR	0000400		/* R for owner */
@@ -89,18 +98,27 @@ __db_oflags(oflags)
 #ifndef	S_IWUSR
 #define	S_IWUSR	0000200		/* W for owner */
 #endif
+#ifndef	S_IXUSR
+#define	S_IXUSR	0000100		/* X for owner */
+#endif
 #ifndef	S_IRGRP
 #define	S_IRGRP	0000040		/* R for group */
 #endif
 #ifndef	S_IWGRP
 #define	S_IWGRP	0000020		/* W for group */
 #endif
+#ifndef	S_IXGRP
+#define	S_IXGRP	0000010		/* X for group */
+#endif
 #ifndef	S_IROTH
 #define	S_IROTH	0000004		/* R for other */
 #endif
 #ifndef	S_IWOTH
 #define	S_IWOTH	0000002		/* W for other */
 #endif
+#ifndef	S_IXOTH
+#define	S_IXOTH	0000001		/* X for other */
+#endif
 #endif /* DB_WIN32 */
 
 /*
@@ -119,14 +137,20 @@ __db_omode(perm)
 		mode |= S_IRUSR;
 	if (perm[1] == 'w')
 		mode |= S_IWUSR;
-	if (perm[2] == 'r')
+	if (perm[2] == 'x')
+		mode |= S_IXUSR;
+	if (perm[3] == 'r')
 		mode |= S_IRGRP;
-	if (perm[3] == 'w')
+	if (perm[4] == 'w')
 		mode |= S_IWGRP;
-	if (perm[4] == 'r')
+	if (perm[5] == 'x')
+		mode |= S_IXGRP;
+	if (perm[6] == 'r')
 		mode |= S_IROTH;
-	if (perm[5] == 'w')
+	if (perm[7] == 'w')
 		mode |= S_IWOTH;
+	if (perm[8] == 'x')
+		mode |= S_IXOTH;
 	return (mode);
 }
 
diff --git a/storage/bdb/os/os_open.c b/storage/bdb/os/os_open.c
index 44c02e6d497..0ae48092a0e 100644
--- a/storage/bdb/os/os_open.c
+++ b/storage/bdb/os/os_open.c
@@ -1,10 +1,10 @@
 /*-
  * See the file LICENSE for redistribution information.
  *
- * Copyright (c) 1997-2004
+ * Copyright (c) 1997-2005
  *	Sleepycat Software.  All rights reserved.
  *
- * $Id: os_open.c,v 11.60 2004/09/24 00:43:19 bostic Exp $
+ * $Id: os_open.c,v 12.7 2005/10/31 02:22:32 bostic Exp $
  */
 
 #include "db_config.h"
@@ -19,13 +19,11 @@
 
 #include 
 #include 
-#include 
 #endif
 
 #include "db_int.h"
 
 static int __os_intermediate_dir __P((DB_ENV *, const char *));
-static int __os_mkdir __P((DB_ENV *, const char *));
 #ifdef HAVE_QNX
 static int __os_region_open __P((DB_ENV *, const char *, int, int, DB_FH **));
 #endif
@@ -94,9 +92,9 @@ __os_open_extend(dbenv, name, page_size, flags, mode, fhpp)
 	oflags = 0;
 
 #define	OKFLAGS								\
-	(DB_OSO_CREATE | DB_OSO_DIRECT | DB_OSO_DSYNC | DB_OSO_EXCL |	\
-	 DB_OSO_LOG | DB_OSO_RDONLY | DB_OSO_REGION | DB_OSO_SEQ |	\
-	 DB_OSO_TEMP | DB_OSO_TRUNC)
+	(DB_OSO_ABSMODE | DB_OSO_CREATE | DB_OSO_DIRECT | DB_OSO_DSYNC |\
+	DB_OSO_EXCL | DB_OSO_RDONLY | DB_OSO_REGION | DB_OSO_SEQ |	\
+	DB_OSO_TEMP | DB_OSO_TRUNC)
 	if ((ret = __db_fchk(dbenv, "__os_open", flags, OKFLAGS)) != 0)
 		return (ret);
 
@@ -126,7 +124,7 @@ __os_open_extend(dbenv, name, page_size, flags, mode, fhpp)
 		oflags |= O_DIRECT;
 #endif
 #ifdef O_DSYNC
-	if (LF_ISSET(DB_OSO_LOG) && LF_ISSET(DB_OSO_DSYNC))
+	if (LF_ISSET(DB_OSO_DSYNC))
 		oflags |= O_DSYNC;
 #endif
 
@@ -155,8 +153,25 @@ __os_open_extend(dbenv, name, page_size, flags, mode, fhpp)
 	if ((ret = __os_openhandle(dbenv, name, oflags, mode, &fhp)) != 0)
 		return (ret);
 
+#ifdef HAVE_FCHMOD
+	/*
+	 * If the code using Berkeley DB is a library, that code may not be able
+	 * to control the application's umask value.  Allow applications to set
+	 * absolute file modes.  We can't fix the race between file creation and
+	 * the fchmod call -- we can't modify the process' umask here since the
+	 * process may be multi-threaded and the umask value is per-process, not
+	 * per-thread.
+	 */
+	if (LF_ISSET(DB_OSO_CREATE) && LF_ISSET(DB_OSO_ABSMODE))
+		(void)fchmod(fhp->fd, mode);
+#endif
+
 #ifdef O_DSYNC
-	if (LF_ISSET(DB_OSO_LOG) && LF_ISSET(DB_OSO_DSYNC))
+	/*
+	 * If we can configure the file descriptor to flush on write, the
+	 * file descriptor does not need to be explicitly sync'd.
+	 */
+	if (LF_ISSET(DB_OSO_DSYNC))
 		F_SET(fhp, DB_FH_NOSYNC);
 #endif
 
@@ -349,7 +364,7 @@ __os_intermediate_dir(dbenv, name)
 	 * Allocate memory if temporary space is too small.
 	 */
 	if ((len = strlen(name)) > sizeof(buf) - 1) {
-		if ((ret = __os_umalloc(dbenv, len, &t)) != 0)
+		if ((ret = __os_umalloc(dbenv, len + 1, &t)) != 0)
 			return (ret);
 	} else
 		t = buf;
@@ -367,7 +382,8 @@ __os_intermediate_dir(dbenv, name)
 				savech = *p;
 				*p = '\0';
 				if (__os_exists(t, NULL) &&
-				    (ret = __os_mkdir(dbenv, t)) != 0)
+				    (ret = __os_mkdir(
+					dbenv, t, dbenv->dir_mode)) != 0)
 					break;
 				*p = savech;
 			}
@@ -377,7 +393,8 @@ __os_intermediate_dir(dbenv, name)
 				savech = *p;
 				*p = '\0';
 				if (__os_exists(t, NULL) &&
-				    (ret = __os_mkdir(dbenv, t)) != 0)
+				    (ret = __os_mkdir(
+					dbenv, t, dbenv->dir_mode)) != 0)
 					break;
 				*p = savech;
 			}
@@ -385,28 +402,3 @@ __os_intermediate_dir(dbenv, name)
 		__os_free(dbenv, t);
 	return (ret);
 }
-
-/*
- * __os_mkdir --
- *	Create a directory.
- */
-static int
-__os_mkdir(dbenv, name)
-	DB_ENV *dbenv;
-	const char *name;
-{
-	int ret;
-
-	/* Make the directory, with paranoid permissions. */
-#ifdef HAVE_VXWORKS
-	RETRY_CHK((mkdir((char *)name)), ret);
-#else
-	RETRY_CHK((mkdir(name, 0600)), ret);
-	if (ret != 0)
-		return (ret);
-
-	/* Set the absolute permissions. */
-	RETRY_CHK((chmod(name, dbenv->dir_mode)), ret);
-#endif
-	return (ret);
-}
diff --git a/storage/bdb/os/os_region.c b/storage/bdb/os/os_region.c
index 024c0320d35..13083ed6598 100644
--- a/storage/bdb/os/os_region.c
+++ b/storage/bdb/os/os_region.c
@@ -1,10 +1,10 @@
 /*-
  * See the file LICENSE for redistribution information.
  *
- * Copyright (c) 1996-2004
+ * Copyright (c) 1996-2005
  *	Sleepycat Software.  All rights reserved.
  *
- * $Id: os_region.c,v 11.21 2004/06/10 17:20:57 bostic Exp $
+ * $Id: os_region.c,v 12.4 2005/07/21 01:36:18 bostic Exp $
  */
 
 #include "db_config.h"
@@ -31,7 +31,18 @@ __os_r_attach(dbenv, infop, rp)
 {
 	int ret;
 
-	/* Round off the requested size for the underlying VM. */
+	/*
+	 * All regions are created on 8K boundaries out of sheer paranoia,
+	 * so we don't make some underlying VM unhappy. Make sure we don't
+	 * overflow or underflow.
+	 */
+#define	OS_VMPAGESIZE		(8 * 1024)
+#define	OS_VMROUNDOFF(i) {						\
+	if ((i) <							\
+	    (UINT32_MAX - OS_VMPAGESIZE) + 1 || (i) < OS_VMPAGESIZE)	\
+		(i) += OS_VMPAGESIZE - 1;				\
+	(i) -= (i) % OS_VMPAGESIZE;					\
+}
 	OS_VMROUNDOFF(rp->size);
 
 #ifdef DB_REGIONSIZE_MAX
@@ -52,7 +63,7 @@ __os_r_attach(dbenv, infop, rp)
 	 * I don't know of any architectures (yet!) where malloc is a problem.
 	 */
 	if (F_ISSET(dbenv, DB_ENV_PRIVATE)) {
-#if defined(MUTEX_NO_MALLOC_LOCKS)
+#if defined(HAVE_MUTEX_HPPA_MSEM_INIT)
 		/*
 		 * !!!
 		 * There exist spinlocks that don't work in malloc memory, e.g.,
@@ -68,12 +79,8 @@ __os_r_attach(dbenv, infop, rp)
 			return (EINVAL);
 		}
 #endif
-		/*
-		 * Pad out the allocation, we're going to align it to mutex
-		 * alignment.
-		 */
-		if ((ret = __os_malloc(dbenv,
-		    sizeof(REGENV) + (MUTEX_ALIGN - 1), &infop->addr)) != 0)
+		if ((ret = __os_malloc(
+		    dbenv, sizeof(REGENV), &infop->addr)) != 0)
 			return (ret);
 
 		infop->max_alloc = rp->size;
@@ -97,7 +104,7 @@ __os_r_attach(dbenv, infop, rp)
 	 * the original values for restoration when the region is discarded.
 	 */
 	infop->addr_orig = infop->addr;
-	infop->addr = ALIGNP_INC(infop->addr_orig, MUTEX_ALIGN);
+	infop->addr = ALIGNP_INC(infop->addr_orig, sizeof(size_t));
 
 	rp->size_orig = rp->size;
 	if (infop->addr != infop->addr_orig)
diff --git a/storage/bdb/os/os_rename.c b/storage/bdb/os/os_rename.c
index a55160bcc5c..46d32b81abc 100644
--- a/storage/bdb/os/os_rename.c
+++ b/storage/bdb/os/os_rename.c
@@ -1,10 +1,10 @@
 /*-
  * See the file LICENSE for redistribution information.
  *
- * Copyright (c) 1997-2004
+ * Copyright (c) 1997-2005
  *	Sleepycat Software.  All rights reserved.
  *
- * $Id: os_rename.c,v 11.17 2004/07/06 13:55:48 bostic Exp $
+ * $Id: os_rename.c,v 12.2 2005/07/29 14:21:51 bostic Exp $
  */
 
 #include "db_config.h"
@@ -32,8 +32,10 @@ __os_rename(dbenv, old, new, silent)
 {
 	int ret;
 
-	RETRY_CHK((DB_GLOBAL(j_rename) != NULL ?
-	    DB_GLOBAL(j_rename)(old, new) : rename(old, new)), ret);
+	if (DB_GLOBAL(j_rename) != NULL)
+		ret = DB_GLOBAL(j_rename)(old, new);
+	else
+		RETRY_CHK((rename(old, new)), ret);
 
 	/*
 	 * If "silent" is not set, then errors are OK and we should not output
diff --git a/storage/bdb/os/os_root.c b/storage/bdb/os/os_root.c
index bf4702ed2a5..f0a0395b3c9 100644
--- a/storage/bdb/os/os_root.c
+++ b/storage/bdb/os/os_root.c
@@ -1,18 +1,16 @@
 /*-
  * See the file LICENSE for redistribution information.
  *
- * Copyright (c) 1999-2004
+ * Copyright (c) 1999-2005
  *	Sleepycat Software.  All rights reserved.
  *
- * $Id: os_root.c,v 11.8 2004/01/28 03:36:18 bostic Exp $
+ * $Id: os_root.c,v 12.2 2005/08/10 15:47:26 bostic Exp $
  */
 
 #include "db_config.h"
 
 #ifndef NO_SYSTEM_INCLUDES
 #include 
-
-#include 
 #endif
 
 #include "db_int.h"
diff --git a/storage/bdb/os/os_rpath.c b/storage/bdb/os/os_rpath.c
index 28a0a48261e..9b8b84977ec 100644
--- a/storage/bdb/os/os_rpath.c
+++ b/storage/bdb/os/os_rpath.c
@@ -1,10 +1,10 @@
 /*-
  * See the file LICENSE for redistribution information.
  *
- * Copyright (c) 1997-2004
+ * Copyright (c) 1997-2005
  *	Sleepycat Software.  All rights reserved.
  *
- * $Id: os_rpath.c,v 11.9 2004/01/28 03:36:18 bostic Exp $
+ * $Id: os_rpath.c,v 12.1 2005/06/16 20:23:26 bostic Exp $
  */
 
 #include "db_config.h"
diff --git a/storage/bdb/os/os_rw.c b/storage/bdb/os/os_rw.c
index 5519f35e4f7..9a9e5b3b665 100644
--- a/storage/bdb/os/os_rw.c
+++ b/storage/bdb/os/os_rw.c
@@ -1,10 +1,10 @@
 /*-
  * See the file LICENSE for redistribution information.
  *
- * Copyright (c) 1997-2004
+ * Copyright (c) 1997-2005
  *	Sleepycat Software.  All rights reserved.
  *
- * $Id: os_rw.c,v 11.39 2004/09/17 22:00:31 mjc Exp $
+ * $Id: os_rw.c,v 12.5 2005/08/10 15:47:26 bostic Exp $
  */
 
 #include "db_config.h"
@@ -14,7 +14,6 @@
 #include 
 
 #include 
-#include 
 #endif
 
 #include "db_int.h"
@@ -78,7 +77,7 @@ __os_io(dbenv, op, fhp, pgno, pagesize, buf, niop)
 	}
 slow:
 #endif
-	MUTEX_THREAD_LOCK(dbenv, fhp->mutexp);
+	MUTEX_LOCK(dbenv, fhp->mtx_fh);
 
 	if ((ret = __os_seek(dbenv, fhp,
 	    pagesize, pgno, 0, 0, DB_OS_SEEK_SET)) != 0)
@@ -95,7 +94,7 @@ slow:
 		break;
 	}
 
-err:	MUTEX_THREAD_UNLOCK(dbenv, fhp->mutexp);
+err:	MUTEX_UNLOCK(dbenv, fhp->mtx_fh);
 
 	return (ret);
 
@@ -207,6 +206,19 @@ __os_physwrite(dbenv, fhp, addr, len, nwp)
 	}
 #endif
 
+	/*
+	 * Make a last "panic" check.  Imagine a thread of control running in
+	 * Berkeley DB, going to sleep.  Another thread of control decides to
+	 * run recovery because the environment is broken.  The first thing
+	 * recovery does is panic the existing environment, but we only check
+	 * the panic flag when crossing the public API.  If the sleeping thread
+	 * wakes up and writes something, we could have two threads of control
+	 * writing the log files at the same time.  So, before writing, make a
+	 * last panic check.  Obviously, there's still a window, but it's very,
+	 * very small.
+	 */
+	PANIC_CHECK(dbenv);
+
 	if (DB_GLOBAL(j_write) != NULL) {
 		*nwp = len;
 		if (DB_GLOBAL(j_write)(fhp->fd, addr, len) != (ssize_t)len) {
diff --git a/storage/bdb/os/os_seek.c b/storage/bdb/os/os_seek.c
index 482bb6c6c7a..bade2a4355a 100644
--- a/storage/bdb/os/os_seek.c
+++ b/storage/bdb/os/os_seek.c
@@ -1,10 +1,10 @@
 /*-
  * See the file LICENSE for redistribution information.
  *
- * Copyright (c) 1997-2004
+ * Copyright (c) 1997-2005
  *	Sleepycat Software.  All rights reserved.
  *
- * $Id: os_seek.c,v 11.26 2004/09/17 22:00:31 mjc Exp $
+ * $Id: os_seek.c,v 12.2 2005/08/10 15:47:27 bostic Exp $
  */
 
 #include "db_config.h"
@@ -14,7 +14,6 @@
 
 #include 
 #include 
-#include 
 #endif
 
 #include "db_int.h"
diff --git a/storage/bdb/os/os_sleep.c b/storage/bdb/os/os_sleep.c
index da3e97280dc..ee20af67f0d 100644
--- a/storage/bdb/os/os_sleep.c
+++ b/storage/bdb/os/os_sleep.c
@@ -1,10 +1,10 @@
 /*-
  * See the file LICENSE for redistribution information.
  *
- * Copyright (c) 1997-2004
+ * Copyright (c) 1997-2005
  *	Sleepycat Software.  All rights reserved.
  *
- * $Id: os_sleep.c,v 11.23 2004/03/27 19:09:13 bostic Exp $
+ * $Id: os_sleep.c,v 12.2 2005/08/10 15:47:27 bostic Exp $
  */
 
 #include "db_config.h"
@@ -34,7 +34,6 @@
 #endif /* HAVE_VXWORKS */
 
 #include 
-#include 
 #endif
 
 #include "db_int.h"
diff --git a/storage/bdb/os/os_spin.c b/storage/bdb/os/os_spin.c
index 23d4d71aec1..8e01c03a474 100644
--- a/storage/bdb/os/os_spin.c
+++ b/storage/bdb/os/os_spin.c
@@ -1,10 +1,10 @@
 /*-
  * See the file LICENSE for redistribution information.
  *
- * Copyright (c) 1997-2004
+ * Copyright (c) 1997-2005
  *	Sleepycat Software.  All rights reserved.
  *
- * $Id: os_spin.c,v 11.20 2004/06/23 14:10:56 bostic Exp $
+ * $Id: os_spin.c,v 12.3 2005/08/10 15:47:27 bostic Exp $
  */
 
 #include "db_config.h"
@@ -16,7 +16,6 @@
 #endif
 
 #include 			/* Needed for sysconf on Solaris. */
-#include 
 #endif
 
 #include "db_int.h"
@@ -39,18 +38,19 @@ __os_pstat_getdynamic()
 #endif
 
 #if defined(HAVE_SYSCONF) && defined(_SC_NPROCESSORS_ONLN)
-static int __os_sysconf __P((void));
+static u_int32_t __os_sysconf __P((void));
 
 /*
  * __os_sysconf --
  *	Solaris, Linux.
  */
-static int
+static u_int32_t
 __os_sysconf()
 {
 	long nproc;
 
-	return ((nproc = sysconf(_SC_NPROCESSORS_ONLN)) > 1 ? (int)nproc : 1);
+	nproc = sysconf(_SC_NPROCESSORS_ONLN);
+	return ((u_int32_t)(nproc > 1 ? nproc : 1));
 }
 #endif
 
@@ -58,37 +58,32 @@ __os_sysconf()
  * __os_spin --
  *	Set the number of default spins before blocking.
  *
- * PUBLIC: void __os_spin __P((DB_ENV *));
+ * PUBLIC: u_int32_t __os_spin __P((DB_ENV *));
  */
-void
+u_int32_t
 __os_spin(dbenv)
 	DB_ENV *dbenv;
 {
-	/*
-	 * If the application specified a value or we've already figured it
-	 * out, return it.
-	 *
-	 * Don't repeatedly call the underlying function because it can be
-	 * expensive (for example, taking multiple filesystem accesses under
-	 * Debian Linux).
-	 */
-	if (dbenv->tas_spins != 0)
-		return;
+	u_int32_t tas_spins;
 
-	dbenv->tas_spins = 1;
+	COMPQUIET(dbenv, NULL);
+
+	tas_spins = 1;
 #if defined(HAVE_PSTAT_GETDYNAMIC)
-	dbenv->tas_spins = __os_pstat_getdynamic();
+	tas_spins = __os_pstat_getdynamic();
 #endif
 #if defined(HAVE_SYSCONF) && defined(_SC_NPROCESSORS_ONLN)
-	dbenv->tas_spins = __os_sysconf();
+	tas_spins = __os_sysconf();
 #endif
 
 	/*
 	 * Spin 50 times per processor, we have anecdotal evidence that this
 	 * is a reasonable value.
 	 */
-	if (dbenv->tas_spins != 1)
-		dbenv->tas_spins *= 50;
+	if (tas_spins != 1)
+		tas_spins *= 50;
+
+	return (tas_spins);
 }
 
 /*
diff --git a/storage/bdb/os/os_stat.c b/storage/bdb/os/os_stat.c
index 92cea98c0ce..233685ab9ff 100644
--- a/storage/bdb/os/os_stat.c
+++ b/storage/bdb/os/os_stat.c
@@ -1,10 +1,10 @@
 /*-
  * See the file LICENSE for redistribution information.
  *
- * Copyright (c) 1997-2004
+ * Copyright (c) 1997-2005
  *	Sleepycat Software.  All rights reserved.
  *
- * $Id: os_stat.c,v 11.27 2004/07/06 13:55:48 bostic Exp $
+ * $Id: os_stat.c,v 12.1 2005/06/16 20:23:26 bostic Exp $
  */
 
 #include "db_config.h"
diff --git a/storage/bdb/os/os_tmpdir.c b/storage/bdb/os/os_tmpdir.c
index c1abf3cff84..ee80582329b 100644
--- a/storage/bdb/os/os_tmpdir.c
+++ b/storage/bdb/os/os_tmpdir.c
@@ -1,10 +1,10 @@
 /*-
  * See the file LICENSE for redistribution information.
  *
- * Copyright (c) 1998-2004
+ * Copyright (c) 1998-2005
  *	Sleepycat Software.  All rights reserved.
  *
- * $Id: os_tmpdir.c,v 11.24 2004/10/05 14:55:33 mjc Exp $
+ * $Id: os_tmpdir.c,v 12.1 2005/06/16 20:23:26 bostic Exp $
  */
 
 #include "db_config.h"
diff --git a/storage/bdb/os/os_truncate.c b/storage/bdb/os/os_truncate.c
index 0367fde7366..ecf2cc773c9 100644
--- a/storage/bdb/os/os_truncate.c
+++ b/storage/bdb/os/os_truncate.c
@@ -1,10 +1,10 @@
 /*-
  * See the file LICENSE for redistribution information.
  *
- * Copyright (c) 2004
+ * Copyright (c) 2004-2005
  *	Sleepycat Software.  All rights reserved.
  *
- * $Id: os_truncate.c,v 11.7 2004/09/17 22:00:31 mjc Exp $
+ * $Id: os_truncate.c,v 12.2 2005/08/10 15:47:27 bostic Exp $
  */
 
 #include "db_config.h"
@@ -13,7 +13,6 @@
 #include 
 
 #include 
-#include 
 #endif
 
 #include "db_int.h"
diff --git a/storage/bdb/os/os_unlink.c b/storage/bdb/os/os_unlink.c
index 228e06d3918..da6fbcc3637 100644
--- a/storage/bdb/os/os_unlink.c
+++ b/storage/bdb/os/os_unlink.c
@@ -1,10 +1,10 @@
 /*-
  * See the file LICENSE for redistribution information.
  *
- * Copyright (c) 1997-2004
+ * Copyright (c) 1997-2005
  *	Sleepycat Software.  All rights reserved.
  *
- * $Id: os_unlink.c,v 11.28 2004/07/06 13:55:48 bostic Exp $
+ * $Id: os_unlink.c,v 12.3 2005/08/10 15:47:27 bostic Exp $
  */
 
 #include "db_config.h"
@@ -13,7 +13,6 @@
 #include 
 
 #include 
-#include 
 #endif
 
 #include "db_int.h"
@@ -48,7 +47,7 @@ err:
 	return (ret);
 #else
 	if (F_ISSET(dbenv, DB_ENV_OVERWRITE))
-		(void)__db_overwrite(dbenv, path);
+		(void)__db_file_multi_write(dbenv, path);
 
 	return (__os_unlink(dbenv, path));
 #endif
diff --git a/storage/bdb/os_vxworks/os_vx_abs.c b/storage/bdb/os_vxworks/os_vx_abs.c
deleted file mode 100644
index 34a1fe37a7b..00000000000
--- a/storage/bdb/os_vxworks/os_vx_abs.c
+++ /dev/null
@@ -1,43 +0,0 @@
-/*-
- * See the file LICENSE for redistribution information.
- *
- * Copyright (c) 1997-2004
- *	Sleepycat Software.  All rights reserved.
- *
- * $Id: os_vx_abs.c,v 1.9 2004/01/28 03:36:19 bostic Exp $
- */
-
-#include "db_config.h"
-
-#include "db_int.h"
-#include "iosLib.h"
-
-/*
- * __os_abspath --
- *	Return if a path is an absolute path.
- */
-int
-__os_abspath(path)
-	const char *path;
-{
-	DEV_HDR *dummy;
-	char *ptail;
-
-	/*
-	 * VxWorks devices can be rooted at any name at all.
-	 * Use iosDevFind() to see if name matches any of our devices.
-	 */
-	if ((dummy = iosDevFind((char *)path, &ptail)) == NULL)
-		return (0);
-	/*
-	 * If the routine used a device, then ptail points to the
-	 * rest and we are an abs path.
-	 */
-	if (ptail != path)
-		return (1);
-	/*
-	 * If the path starts with a '/', then we are an absolute path,
-	 * using the host machine, otherwise we are not.
-	 */
-	return (path[0] == '/');
-}
diff --git a/storage/bdb/os_vxworks/os_vx_config.c b/storage/bdb/os_vxworks/os_vx_config.c
deleted file mode 100644
index b90a4365df1..00000000000
--- a/storage/bdb/os_vxworks/os_vx_config.c
+++ /dev/null
@@ -1,29 +0,0 @@
-/*-
- * See the file LICENSE for redistribution information.
- *
- * Copyright (c) 1999-2004
- *	Sleepycat Software.  All rights reserved.
- *
- * $Id: os_vx_config.c,v 1.6 2004/01/28 03:36:19 bostic Exp $
- */
-
-#include "db_config.h"
-
-#include "db_int.h"
-
-/*
- * __os_fs_notzero --
- *	Return 1 if allocated filesystem blocks are not zeroed.
- *
- * PUBLIC: int __os_fs_notzero __P((void));
- */
-int
-__os_fs_notzero()
-{
-	/*
-	 * Some VxWorks FS drivers do not zero-fill pages that were never
-	 * explicitly written to the file, they give you random garbage,
-	 * and that breaks Berkeley DB.
-	 */
-	return (1);
-}
diff --git a/storage/bdb/os_vxworks/os_vx_map.c b/storage/bdb/os_vxworks/os_vx_map.c
deleted file mode 100644
index 416f4cc8c2d..00000000000
--- a/storage/bdb/os_vxworks/os_vx_map.c
+++ /dev/null
@@ -1,439 +0,0 @@
-/*-
- * See the file LICENSE for redistribution information.
- *
- * Copyright (c) 1998-2004
- *	Sleepycat Software.  All rights reserved.
- *
- * This code is derived from software contributed to Sleepycat Software by
- * Frederick G.M. Roeber of Netscape Communications Corp.
- *
- * $Id: os_vx_map.c,v 1.23 2004/01/28 03:36:19 bostic Exp $
- */
-
-#include "db_config.h"
-
-#ifndef NO_SYSTEM_INCLUDES
-#include 
-#include 
-#endif
-
-#include "db_int.h"
-
-/*
- * DB uses memory-mapped files for two things:
- *	faster access of read-only databases, and
- *	shared memory for process synchronization and locking.
- * The code carefully does not mix the two uses.  The first-case uses are
- * actually written such that memory-mapping isn't really required -- it's
- * merely a convenience -- so we don't have to worry much about it.  In the
- * second case, it's solely used as a shared memory mechanism, so that's
- * all we have to replace.
- *
- * All memory in VxWorks is shared, and a task can allocate memory and keep
- * notes.  So I merely have to allocate memory, remember the "filename" for
- * that memory, and issue small-integer segment IDs which index the list of
- * these shared-memory segments. Subsequent opens are checked against the
- * list of already open segments.
- */
-typedef struct {
-	void *segment;			/* Segment address. */
-	u_int32_t size;			/* Segment size. */
-	char *name;			/* Segment name. */
-	long segid;			/* Segment ID. */
-} os_segdata_t;
-
-static os_segdata_t *__os_segdata;	/* Segment table. */
-static int __os_segdata_size;		/* Segment table size. */
-
-#define	OS_SEGDATA_STARTING_SIZE 16
-#define	OS_SEGDATA_INCREMENT	 16
-
-static int __os_segdata_allocate
-	       __P((DB_ENV *, const char *, REGINFO *, REGION *));
-static int __os_segdata_find_byname
-	       __P((DB_ENV *, const char *, REGINFO *, REGION *));
-static int __os_segdata_init __P((DB_ENV *));
-static int __os_segdata_new __P((DB_ENV *, int *));
-static int __os_segdata_release __P((DB_ENV *, REGION *, int));
-
-/*
- * __os_r_sysattach --
- *	Create/join a shared memory region.
- *
- * PUBLIC: int __os_r_sysattach __P((DB_ENV *, REGINFO *, REGION *));
- */
-int
-__os_r_sysattach(dbenv, infop, rp)
-	DB_ENV *dbenv;
-	REGINFO *infop;
-	REGION *rp;
-{
-	int ret;
-
-	if (__os_segdata == NULL)
-		__os_segdata_init(dbenv);
-
-	DB_BEGIN_SINGLE_THREAD;
-
-	/* Try to find an already existing segment. */
-	ret = __os_segdata_find_byname(dbenv, infop->name, infop, rp);
-
-	/*
-	 * If we are trying to join a region, it is easy, either we
-	 * found it and we return, or we didn't find it and we return
-	 * an error that it doesn't exist.
-	 */
-	if (!F_ISSET(infop, REGION_CREATE)) {
-		if (ret != 0) {
-			__db_err(dbenv, "segment %s does not exist",
-			    infop->name);
-			ret = EAGAIN;
-		}
-		goto out;
-	}
-
-	/*
-	 * If we get here, we are trying to create the region.
-	 * There are several things to consider:
-	 * - if we have an error (not a found or not-found value), return.
-	 * - they better have shm_key set.
-	 * - if the region is already there (ret == 0 from above),
-	 * assume the application crashed and we're restarting.
-	 * Delete the old region.
-	 * - try to create the region.
-	 */
-	if (ret != 0 && ret != ENOENT)
-		goto out;
-
-	if (dbenv->shm_key == INVALID_REGION_SEGID) {
-		__db_err(dbenv, "no base shared memory ID specified");
-		ret = EAGAIN;
-		goto out;
-	}
-	if (ret == 0 && __os_segdata_release(dbenv, rp, 1) != 0) {
-		__db_err(dbenv,
-		    "key: %ld: shared memory region already exists",
-		    dbenv->shm_key + (infop->id - 1));
-		ret = EAGAIN;
-		goto out;
-	}
-
-	ret = __os_segdata_allocate(dbenv, infop->name, infop, rp);
-out:
-	DB_END_SINGLE_THREAD;
-	return (ret);
-}
-
-/*
- * __os_r_sysdetach --
- *	Detach from a shared region.
- *
- * PUBLIC: int __os_r_sysdetach __P((DB_ENV *, REGINFO *, int));
- */
-int
-__os_r_sysdetach(dbenv, infop, destroy)
-	DB_ENV *dbenv;
-	REGINFO *infop;
-	int destroy;
-{
-	/*
-	 * If just detaching, there is no mapping to discard.
-	 * If destroying, remove the region.
-	 */
-	if (destroy)
-		return (__os_segdata_release(dbenv, infop->rp, 0));
-	return (0);
-}
-
-/*
- * __os_mapfile --
- *	Map in a shared memory file.
- *
- * PUBLIC: int __os_mapfile __P((DB_ENV *,
- * PUBLIC:    char *, DB_FH *, size_t, int, void **));
- */
-int
-__os_mapfile(dbenv, path, fhp, len, is_rdonly, addrp)
-	DB_ENV *dbenv;
-	char *path;
-	DB_FH *fhp;
-	int is_rdonly;
-	size_t len;
-	void **addrp;
-{
-	/* We cannot map in regular files in VxWorks. */
-	COMPQUIET(dbenv, NULL);
-	COMPQUIET(path, NULL);
-	COMPQUIET(fhp, NULL);
-	COMPQUIET(is_rdonly, 0);
-	COMPQUIET(len, 0);
-	COMPQUIET(addrp, NULL);
-	return (EINVAL);
-}
-
-/*
- * __os_unmapfile --
- *	Unmap the shared memory file.
- *
- * PUBLIC: int __os_unmapfile __P((DB_ENV *, void *, size_t));
- */
-int
-__os_unmapfile(dbenv, addr, len)
-	DB_ENV *dbenv;
-	void *addr;
-	size_t len;
-{
-	/* We cannot map in regular files in VxWorks. */
-	COMPQUIET(dbenv, NULL);
-	COMPQUIET(addr, NULL);
-	COMPQUIET(len, 0);
-	return (EINVAL);
-}
-
-/*
- * __os_segdata_init --
- *	Initializes the library's table of shared memory segments.
- *	Called once on the first time through __os_segdata_new().
- */
-static int
-__os_segdata_init(dbenv)
-	DB_ENV *dbenv;
-{
-	int ret;
-
-	if (__os_segdata != NULL) {
-		__db_err(dbenv, "shared memory segment already exists");
-		return (EEXIST);
-	}
-
-	/*
-	 * The lock init call returns a locked lock.
-	 */
-	DB_BEGIN_SINGLE_THREAD;
-	__os_segdata_size = OS_SEGDATA_STARTING_SIZE;
-	ret = __os_calloc(dbenv,
-	    __os_segdata_size, sizeof(os_segdata_t), &__os_segdata);
-	DB_END_SINGLE_THREAD;
-	return (ret);
-}
-
-/*
- * __os_segdata_destroy --
- *	Destroys the library's table of shared memory segments.  It also
- *	frees all linked data: the segments themselves, and their names.
- *	Currently not called.  This function should be called if the
- *	user creates a function to unload or shutdown.
- *
- * PUBLIC: int __os_segdata_destroy __P((DB_ENV *));
- */
-int
-__os_segdata_destroy(dbenv)
-	DB_ENV *dbenv;
-{
-	os_segdata_t *p;
-	int i;
-
-	if (__os_segdata == NULL)
-		return (0);
-
-	DB_BEGIN_SINGLE_THREAD;
-	for (i = 0; i < __os_segdata_size; i++) {
-		p = &__os_segdata[i];
-		if (p->name != NULL) {
-			__os_free(dbenv, p->name);
-			p->name = NULL;
-		}
-		if (p->segment != NULL) {
-			__os_free(dbenv, p->segment);
-			p->segment = NULL;
-		}
-		p->size = 0;
-	}
-
-	__os_free(dbenv, __os_segdata);
-	__os_segdata = NULL;
-	__os_segdata_size = 0;
-	DB_END_SINGLE_THREAD;
-
-	return (0);
-}
-
-/*
- * __os_segdata_allocate --
- *	Creates a new segment of the specified size, optionally with the
- *	specified name.
- *
- * Assumes it is called with the SEGDATA lock taken.
- */
-static int
-__os_segdata_allocate(dbenv, name, infop, rp)
-	DB_ENV *dbenv;
-	const char *name;
-	REGINFO *infop;
-	REGION *rp;
-{
-	os_segdata_t *p;
-	int id, ret;
-
-	if ((ret = __os_segdata_new(dbenv, &id)) != 0)
-		return (ret);
-
-	p = &__os_segdata[id];
-	if ((ret = __os_calloc(dbenv, 1, rp->size, &p->segment)) != 0)
-		return (ret);
-	if ((ret = __os_strdup(dbenv, name, &p->name)) != 0) {
-		__os_free(dbenv, p->segment);
-		p->segment = NULL;
-		return (ret);
-	}
-	p->size = rp->size;
-	p->segid = dbenv->shm_key + infop->id - 1;
-
-	infop->addr = p->segment;
-	rp->segid = id;
-
-	return (0);
-}
-
-/*
- * __os_segdata_new --
- *	Finds a new segdata slot.  Does not initialise it, so the fd returned
- *	is only valid until you call this again.
- *
- * Assumes it is called with the SEGDATA lock taken.
- */
-static int
-__os_segdata_new(dbenv, segidp)
-	DB_ENV *dbenv;
-	int *segidp;
-{
-	os_segdata_t *p;
-	int i, newsize, ret;
-
-	if (__os_segdata == NULL) {
-		__db_err(dbenv, "shared memory segment not initialized");
-		return (EAGAIN);
-	}
-
-	for (i = 0; i < __os_segdata_size; i++) {
-		p = &__os_segdata[i];
-		if (p->segment == NULL) {
-			*segidp = i;
-			return (0);
-		}
-	}
-
-	/*
-	 * No more free slots, expand.
-	 */
-	newsize = __os_segdata_size + OS_SEGDATA_INCREMENT;
-	if ((ret = __os_realloc(dbenv, newsize * sizeof(os_segdata_t),
-	    &__os_segdata)) != 0)
-		return (ret);
-	memset(&__os_segdata[__os_segdata_size],
-	    0, OS_SEGDATA_INCREMENT * sizeof(os_segdata_t));
-
-	*segidp = __os_segdata_size;
-	__os_segdata_size = newsize;
-
-	return (0);
-}
-
-/*
- * __os_segdata_find_byname --
- *	Finds a segment by its name and shm_key.
- *
- * Assumes it is called with the SEGDATA lock taken.
- *
- * PUBLIC: __os_segdata_find_byname
- * PUBLIC:     __P((DB_ENV *, const char *, REGINFO *, REGION *));
- */
-static int
-__os_segdata_find_byname(dbenv, name, infop, rp)
-	DB_ENV *dbenv;
-	const char *name;
-	REGINFO *infop;
-	REGION *rp;
-{
-	os_segdata_t *p;
-	long segid;
-	int i;
-
-	if (__os_segdata == NULL) {
-		__db_err(dbenv, "shared memory segment not initialized");
-		return (EAGAIN);
-	}
-
-	if (name == NULL) {
-		__db_err(dbenv, "no segment name given");
-		return (EAGAIN);
-	}
-
-	/*
-	 * If we are creating the region, compute the segid.
-	 * If we are joining the region, we use the segid in the
-	 * index we are given.
-	 */
-	if (F_ISSET(infop, REGION_CREATE))
-		segid = dbenv->shm_key + (infop->id - 1);
-	else {
-		if (rp->segid >= __os_segdata_size ||
-		    rp->segid == INVALID_REGION_SEGID) {
-			__db_err(dbenv, "Invalid segment id given");
-			return (EAGAIN);
-		}
-		segid = __os_segdata[rp->segid].segid;
-	}
-	for (i = 0; i < __os_segdata_size; i++) {
-		p = &__os_segdata[i];
-		if (p->name != NULL && strcmp(name, p->name) == 0 &&
-		    p->segid == segid) {
-			infop->addr = p->segment;
-			rp->segid = i;
-			return (0);
-		}
-	}
-	return (ENOENT);
-}
-
-/*
- * __os_segdata_release --
- *	Free a segdata entry.
- */
-static int
-__os_segdata_release(dbenv, rp, is_locked)
-	DB_ENV *dbenv;
-	REGION *rp;
-	int is_locked;
-{
-	os_segdata_t *p;
-
-	if (__os_segdata == NULL) {
-		__db_err(dbenv, "shared memory segment not initialized");
-		return (EAGAIN);
-	}
-
-	if (rp->segid < 0 || rp->segid >= __os_segdata_size) {
-		__db_err(dbenv, "segment id %ld out of range", rp->segid);
-		return (EINVAL);
-	}
-
-	if (is_locked == 0)
-		DB_BEGIN_SINGLE_THREAD;
-	p = &__os_segdata[rp->segid];
-	if (p->name != NULL) {
-		__os_free(dbenv, p->name);
-		p->name = NULL;
-	}
-	if (p->segment != NULL) {
-		__os_free(dbenv, p->segment);
-		p->segment = NULL;
-	}
-	p->size = 0;
-	if (is_locked == 0)
-		DB_END_SINGLE_THREAD;
-
-	/* Any shrink-table logic could go here */
-
-	return (0);
-}
diff --git a/storage/bdb/os_win32/os_abs.c b/storage/bdb/os_win32/os_abs.c
index ab05b0a7e84..b22231f7f4d 100644
--- a/storage/bdb/os_win32/os_abs.c
+++ b/storage/bdb/os_win32/os_abs.c
@@ -1,10 +1,10 @@
 /*-
  * See the file LICENSE for redistribution information.
  *
- * Copyright (c) 1997-2004
+ * Copyright (c) 1997-2005
  *	Sleepycat Software.  All rights reserved.
  *
- * $Id: os_abs.c,v 11.7 2004/01/28 03:36:19 bostic Exp $
+ * $Id: os_abs.c,v 12.1 2005/06/16 20:23:28 bostic Exp $
  */
 
 #include "db_config.h"
diff --git a/storage/bdb/os_win32/os_clock.c b/storage/bdb/os_win32/os_clock.c
index c77076691f7..8abd26eaa55 100644
--- a/storage/bdb/os_win32/os_clock.c
+++ b/storage/bdb/os_win32/os_clock.c
@@ -1,10 +1,10 @@
 /*-
  * See the file LICENSE for redistribution information.
  *
- * Copyright (c) 2001-2004
+ * Copyright (c) 2001-2005
  *	Sleepycat Software.  All rights reserved.
  *
- * $Id: os_clock.c,v 1.11 2004/06/28 13:57:18 bostic Exp $
+ * $Id: os_clock.c,v 12.1 2005/06/16 20:23:28 bostic Exp $
  */
 
 #include "db_config.h"
diff --git a/storage/bdb/os_win32/os_config.c b/storage/bdb/os_win32/os_config.c
index 41daebd37e0..762445863e5 100644
--- a/storage/bdb/os_win32/os_config.c
+++ b/storage/bdb/os_win32/os_config.c
@@ -1,10 +1,10 @@
 /*-
  * See the file LICENSE for redistribution information.
  *
- * Copyright (c) 1999-2004
+ * Copyright (c) 1999-2005
  *	Sleepycat Software.  All rights reserved.
  *
- * $Id: os_config.c,v 11.18 2004/02/09 20:54:27 mjc Exp $
+ * $Id: os_config.c,v 12.2 2005/06/16 20:23:28 bostic Exp $
  */
 
 #include "db_config.h"
@@ -86,3 +86,23 @@ __os_fs_notzero()
 	}
 	return (__os_notzero);
 }
+
+/*
+ * __os_support_db_register --
+ *	Return 1 if the system supports DB_REGISTER.
+ */
+int
+__os_support_db_register()
+{
+	return (__os_is_winnt());
+}
+
+/*
+ * __os_support_replication --
+ *	Return 1 if the system supports replication.
+ */
+int
+__os_support_replication()
+{
+	return (__os_is_winnt());
+}
diff --git a/storage/bdb/os_win32/os_dir.c b/storage/bdb/os_win32/os_dir.c
index e0abbb6ee16..f3d28fbb791 100644
--- a/storage/bdb/os_win32/os_dir.c
+++ b/storage/bdb/os_win32/os_dir.c
@@ -1,10 +1,10 @@
 /*-
  * See the file LICENSE for redistribution information.
  *
- * Copyright (c) 1997-2004
+ * Copyright (c) 1997-2005
  *	Sleepycat Software.  All rights reserved.
  *
- * $Id: os_dir.c,v 11.20 2004/10/13 19:12:17 bostic Exp $
+ * $Id: os_dir.c,v 12.2 2005/07/06 23:52:43 dda Exp $
  */
 
 #include "db_config.h"
@@ -27,12 +27,17 @@ __os_dirlist(dbenv, dir, namesp, cntp)
 	int arraysz, cnt, ret;
 	char **names, *onename;
 	_TCHAR tfilespec[MAXPATHLEN + 1];
+	_TCHAR *tdir;
 
 	if (DB_GLOBAL(j_dirlist) != NULL)
 		return (DB_GLOBAL(j_dirlist)(dir, namesp, cntp));
 
+	TO_TSTRING(dbenv, dir, tdir, ret);
+	if (ret != 0)
+		return (ret);
+
 	(void)_sntprintf(tfilespec, MAXPATHLEN,
-	    _T("%hs%hc*"), dir, PATH_SEPARATOR[0]);
+	    _T("%s%hc*"), tdir, PATH_SEPARATOR[0]);
 	if ((dirhandle = FindFirstFile(tfilespec, &fdata))
 	    == INVALID_HANDLE_VALUE)
 		return (__os_get_errno());
@@ -78,6 +83,8 @@ err:	if (!FindClose(dirhandle) && ret == 0)
 	} else if (names != NULL)
 		__os_dirfree(dbenv, names, cnt);
 
+	FREE_STRING(dbenv, tdir);
+
 	return (ret);
 }
 
diff --git a/storage/bdb/os_win32/os_errno.c b/storage/bdb/os_win32/os_errno.c
index 1af2824cc78..80869ec9cd3 100644
--- a/storage/bdb/os_win32/os_errno.c
+++ b/storage/bdb/os_win32/os_errno.c
@@ -1,10 +1,10 @@
 /*-
  * See the file LICENSE for redistribution information.
  *
- * Copyright (c) 1999-2004
+ * Copyright (c) 1999-2005
  *	Sleepycat Software.  All rights reserved.
  *
- * $Id: os_errno.c,v 11.14 2004/07/06 21:06:38 mjc Exp $
+ * $Id: os_errno.c,v 12.2 2005/06/16 20:23:28 bostic Exp $
  */
 
 #include "db_config.h"
@@ -101,10 +101,8 @@ __os_get_errno()
 		ret = EACCES;
 		break;
 
+	case ERROR_LOCK_FAILED:
 	case ERROR_NOT_READY:
-		ret = EBUSY;
-		break;
-
 	case ERROR_LOCK_VIOLATION:
 	case ERROR_SHARING_VIOLATION:
 		ret = EBUSY;
diff --git a/storage/bdb/os_win32/os_fid.c b/storage/bdb/os_win32/os_fid.c
index 69df865d449..b2e2340b67e 100644
--- a/storage/bdb/os_win32/os_fid.c
+++ b/storage/bdb/os_win32/os_fid.c
@@ -1,10 +1,10 @@
 /*-
  * See the file LICENSE for redistribution information.
  *
- * Copyright (c) 1996-2004
+ * Copyright (c) 1996-2005
  *	Sleepycat Software.  All rights reserved.
  *
- * $Id: os_fid.c,v 11.19 2004/07/06 21:06:38 mjc Exp $
+ * $Id: os_fid.c,v 12.4 2005/10/11 18:17:00 bostic Exp $
  */
 
 #include "db_config.h"
@@ -25,6 +25,8 @@ __os_fileid(dbenv, fname, unique_okay, fidp)
 	int unique_okay;
 	u_int8_t *fidp;
 {
+	db_threadid_t tid;
+	pid_t pid;
 	size_t i;
 	u_int32_t tmp;
 	u_int8_t *p;
@@ -53,6 +55,10 @@ __os_fileid(dbenv, fname, unique_okay, fidp)
 	 * this no real harm will be done, since the finished fileid
 	 * has so many other components.
 	 *
+	 * We use the bottom 32-bits of the process ID, hoping they
+	 * are more random than the top 32-bits (should we be on a
+	 * machine with 64-bit process IDs).
+	 *
 	 * We increment by 100000 on each call as a simple way of
 	 * randomizing;  simply incrementing seems potentially less useful
 	 * if pids are also simply incremented, since this is process-local
@@ -60,9 +66,10 @@ __os_fileid(dbenv, fname, unique_okay, fidp)
 	 * pushes us out of pid space on most platforms, and has few
 	 * interesting properties in base 2.
 	 */
-	if (fid_serial == SERIAL_INIT)
-		__os_id(&fid_serial);
-	else
+	if (fid_serial == SERIAL_INIT) {
+		__os_id(dbenv, &pid, &tid);
+		fid_serial = pid;
+	} else
 		fid_serial += 100000;
 
 	/*
diff --git a/storage/bdb/os_win32/os_flock.c b/storage/bdb/os_win32/os_flock.c
new file mode 100644
index 00000000000..0e9c83163e2
--- /dev/null
+++ b/storage/bdb/os_win32/os_flock.c
@@ -0,0 +1,71 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1997-2005
+ *	Sleepycat Software.  All rights reserved.
+ *
+ * $Id: os_flock.c,v 1.6 2005/06/16 20:23:28 bostic Exp $
+ */
+
+#include "db_config.h"
+
+#include "db_int.h"
+
+/*
+ * __os_fdlock --
+ *	Acquire/release a lock on a byte in a file.
+ *
+ * PUBLIC: int __os_fdlock __P((DB_ENV *, DB_FH *, off_t, int, int));
+ */
+int
+__os_fdlock(dbenv, fhp, offset, acquire, nowait)
+	DB_ENV *dbenv;
+	DB_FH *fhp;
+	int acquire, nowait;
+	off_t offset;
+{
+	int ret;
+	DWORD low, high;
+	OVERLAPPED over;
+
+	DB_ASSERT(F_ISSET(fhp, DB_FH_OPENED) &&
+	    fhp->handle != INVALID_HANDLE_VALUE);
+
+	/*
+	 * Windows file locking interferes with read/write operations, so we
+	 * map the ranges to an area past the end of the file.
+	 */
+	DB_ASSERT(offset < (u_int64_t)INT64_MAX);
+	offset = UINT64_MAX - offset;
+	low = (DWORD)offset;
+	high = (DWORD)(offset >> 32);
+
+	if (acquire) {
+		if (nowait)
+			RETRY_CHK_EINTR_ONLY(
+			    !LockFile(fhp->handle, low, high, 1, 0), ret);
+		else if (__os_is_winnt()) {
+			memset(&over, 0, sizeof over);
+			over.Offset = low;
+			over.OffsetHigh = high;
+			RETRY_CHK_EINTR_ONLY(
+			    !LockFileEx(fhp->handle, LOCKFILE_EXCLUSIVE_LOCK,
+			    0, 1, 0, &over),
+			    ret);
+		} else {
+			/* Windows 9x/ME doesn't support a blocking call. */
+			for (;;) {
+				RETRY_CHK_EINTR_ONLY(
+				    !LockFile(fhp->handle, low, high, 1, 0),
+				    ret);
+				if (ret != EAGAIN)
+					break;
+				__os_sleep(dbenv, 1, 0);
+			}
+		}
+	} else
+		RETRY_CHK_EINTR_ONLY(
+		    !UnlockFile(fhp->handle, low, high, 1, 0), ret);
+
+	return (ret);
+}
diff --git a/storage/bdb/os_win32/os_fsync.c b/storage/bdb/os_win32/os_fsync.c
index cc188a2fed5..c5da376ab1e 100644
--- a/storage/bdb/os_win32/os_fsync.c
+++ b/storage/bdb/os_win32/os_fsync.c
@@ -1,10 +1,10 @@
 /*-
  * See the file LICENSE for redistribution information.
  *
- * Copyright (c) 1997-2004
+ * Copyright (c) 1997-2005
  *	Sleepycat Software.  All rights reserved.
  *
- * $Id: os_fsync.c,v 11.21 2004/07/06 21:06:38 mjc Exp $
+ * $Id: os_fsync.c,v 12.2 2005/08/10 15:47:27 bostic Exp $
  */
 
 #include "db_config.h"
@@ -13,7 +13,6 @@
 #include 
 
 #include 			/* XXX: Required by __hp3000s900 */
-#include 
 #include 
 #endif
 
diff --git a/storage/bdb/os_win32/os_handle.c b/storage/bdb/os_win32/os_handle.c
index 4953afd3100..338bfd1e2e7 100644
--- a/storage/bdb/os_win32/os_handle.c
+++ b/storage/bdb/os_win32/os_handle.c
@@ -1,10 +1,10 @@
 /*-
  * See the file LICENSE for redistribution information.
  *
- * Copyright (c) 1998-2004
+ * Copyright (c) 1998-2005
  *	Sleepycat Software.  All rights reserved.
  *
- * $Id: os_handle.c,v 11.39 2004/07/06 21:06:38 mjc Exp $
+ * $Id: os_handle.c,v 12.3 2005/11/02 03:12:18 mjc Exp $
  */
 
 #include "db_config.h"
@@ -14,7 +14,6 @@
 
 #include 
 #include 
-#include 
 #endif
 
 #include "db_int.h"
@@ -50,7 +49,7 @@ __os_openhandle(dbenv, name, flags, mode, fhpp)
 	retries = 0;
 	for (nrepeat = 1; nrepeat < 4; ++nrepeat) {
 		ret = 0;
-		fhp->fd = open(name, flags, mode);
+		fhp->fd = _open(name, flags, mode);
 
 		if (fhp->fd != -1) {
 			F_SET(fhp, DB_FH_OPENED);
@@ -113,7 +112,7 @@ __os_closehandle(dbenv, fhp)
 		else if (fhp->handle != INVALID_HANDLE_VALUE)
 			RETRY_CHK((!CloseHandle(fhp->handle)), ret);
 		else
-			RETRY_CHK((close(fhp->fd)), ret);
+			RETRY_CHK((_close(fhp->fd)), ret);
 
 		if (ret != 0)
 			__db_err(dbenv, "CloseHandle: %s", strerror(ret));
diff --git a/storage/bdb/os_win32/os_map.c b/storage/bdb/os_win32/os_map.c
index 140ac498023..9e2b5a6db63 100644
--- a/storage/bdb/os_win32/os_map.c
+++ b/storage/bdb/os_win32/os_map.c
@@ -1,10 +1,10 @@
 /*-
  * See the file LICENSE for redistribution information.
  *
- * Copyright (c) 1996-2004
+ * Copyright (c) 1996-2005
  *	Sleepycat Software.  All rights reserved.
  *
- * $Id: os_map.c,v 11.51 2004/10/05 14:55:34 mjc Exp $
+ * $Id: os_map.c,v 12.2 2005/06/16 20:23:29 bostic Exp $
  */
 
 #include "db_config.h"
@@ -88,7 +88,7 @@ __os_r_sysdetach(dbenv, infop, destroy)
 
 	if (!F_ISSET(dbenv, DB_ENV_SYSTEM_MEM) && destroy) {
 		if (F_ISSET(dbenv, DB_ENV_OVERWRITE))
-			(void)__db_overwrite(dbenv, infop->name);
+			(void)__db_file_multi_write(dbenv, infop->name);
 		if ((t_ret = __os_unlink(dbenv, infop->name)) != 0 && ret == 0)
 			ret = t_ret;
 	}
diff --git a/storage/bdb/os_win32/os_open.c b/storage/bdb/os_win32/os_open.c
index 1aa65cfa077..dde33cb5d1e 100644
--- a/storage/bdb/os_win32/os_open.c
+++ b/storage/bdb/os_win32/os_open.c
@@ -1,10 +1,10 @@
 /*-
  * See the file LICENSE for redistribution information.
  *
- * Copyright (c) 1997-2004
+ * Copyright (c) 1997-2005
  *	Sleepycat Software.  All rights reserved.
  *
- * $Id: os_open.c,v 11.37 2004/10/05 14:55:35 mjc Exp $
+ * $Id: os_open.c,v 12.6 2005/10/31 11:21:01 mjc Exp $
  */
 
 #include "db_config.h"
@@ -15,7 +15,6 @@
 #include 
 #include 
 #include 
-#include 
 #endif
 
 #include "db_int.h"
@@ -68,9 +67,9 @@ __os_open_extend(dbenv, name, page_size, flags, mode, fhpp)
 	tname = NULL;
 
 #define	OKFLAGS								\
-	(DB_OSO_CREATE | DB_OSO_DIRECT | DB_OSO_DSYNC | DB_OSO_EXCL |	\
-	 DB_OSO_LOG | DB_OSO_RDONLY | DB_OSO_REGION | DB_OSO_SEQ |	\
-	 DB_OSO_TEMP | DB_OSO_TRUNC)
+	(DB_OSO_ABSMODE | DB_OSO_CREATE | DB_OSO_DIRECT | DB_OSO_DSYNC |\
+	DB_OSO_EXCL | DB_OSO_RDONLY | DB_OSO_REGION |	DB_OSO_SEQ |	\
+	DB_OSO_TEMP | DB_OSO_TRUNC)
 	if ((ret = __db_fchk(dbenv, "__os_open", flags, OKFLAGS)) != 0)
 		return (ret);
 
@@ -85,7 +84,7 @@ __os_open_extend(dbenv, name, page_size, flags, mode, fhpp)
 		if (LF_ISSET(DB_OSO_CREATE))
 			oflags |= O_CREAT;
 #ifdef O_DSYNC
-		if (LF_ISSET(DB_OSO_LOG) && LF_ISSET(DB_OSO_DSYNC))
+		if (LF_ISSET(DB_OSO_DSYNC))
 			oflags |= O_DSYNC;
 #endif
 
@@ -120,8 +119,8 @@ __os_open_extend(dbenv, name, page_size, flags, mode, fhpp)
 
 	/*
 	 * Otherwise, use the Windows/32 CreateFile interface so that we can
-	 * play magic games with log files to get data flush effects similar
-	 * to the POSIX O_DSYNC flag.
+	 * play magic games with files to get data flush effects similar to
+	 * the POSIX O_DSYNC flag.
 	 *
 	 * !!!
 	 * We currently ignore the 'mode' argument.  It would be possible
@@ -139,6 +138,8 @@ __os_open_extend(dbenv, name, page_size, flags, mode, fhpp)
 		access |= GENERIC_WRITE;
 
 	share = FILE_SHARE_READ | FILE_SHARE_WRITE;
+	if (__os_is_winnt())
+		share |= FILE_SHARE_DELETE;
 	attr = FILE_ATTRIBUTE_NORMAL;
 
 	/*
@@ -156,7 +157,7 @@ __os_open_extend(dbenv, name, page_size, flags, mode, fhpp)
 	else
 		createflag = OPEN_EXISTING;	/* open only if existing */
 
-	if (LF_ISSET(DB_OSO_LOG) && LF_ISSET(DB_OSO_DSYNC)) {
+	if (LF_ISSET(DB_OSO_DSYNC)) {
 		F_SET(fhp, DB_FH_NOSYNC);
 		attr |= FILE_FLAG_WRITE_THROUGH;
 	}
diff --git a/storage/bdb/os_win32/os_rename.c b/storage/bdb/os_win32/os_rename.c
index 10263323188..0d6d1783588 100644
--- a/storage/bdb/os_win32/os_rename.c
+++ b/storage/bdb/os_win32/os_rename.c
@@ -1,10 +1,10 @@
 /*-
  * See the file LICENSE for redistribution information.
  *
- * Copyright (c) 1997-2004
+ * Copyright (c) 1997-2005
  *	Sleepycat Software.  All rights reserved.
  *
- * $Id: os_rename.c,v 1.19 2004/10/05 14:55:36 mjc Exp $
+ * $Id: os_rename.c,v 12.1 2005/06/16 20:23:30 bostic Exp $
  */
 
 #include "db_config.h"
diff --git a/storage/bdb/os_win32/os_rw.c b/storage/bdb/os_win32/os_rw.c
index c3c103a9ed8..394ce029eb8 100644
--- a/storage/bdb/os_win32/os_rw.c
+++ b/storage/bdb/os_win32/os_rw.c
@@ -1,10 +1,10 @@
 /*-
  * See the file LICENSE for redistribution information.
  *
- * Copyright (c) 1997-2004
+ * Copyright (c) 1997-2005
  *	Sleepycat Software.  All rights reserved.
  *
- * $Id: os_rw.c,v 11.38 2004/09/17 22:00:32 mjc Exp $
+ * $Id: os_rw.c,v 12.4 2005/08/10 15:47:28 bostic Exp $
  */
 
 #include "db_config.h"
@@ -13,7 +13,6 @@
 #include 
 
 #include 
-#include 
 #endif
 
 #include "db_int.h"
@@ -73,7 +72,7 @@ __os_io(dbenv, op, fhp, pgno, pagesize, buf, niop)
 		}
 	}
 
-slow:	MUTEX_THREAD_LOCK(dbenv, fhp->mutexp);
+slow:	MUTEX_LOCK(dbenv, fhp->mtx_fh);
 
 	if ((ret = __os_seek(dbenv, fhp,
 	    pagesize, pgno, 0, 0, DB_OS_SEEK_SET)) != 0)
@@ -88,7 +87,7 @@ slow:	MUTEX_THREAD_LOCK(dbenv, fhp->mutexp);
 		break;
 	}
 
-err:	MUTEX_THREAD_UNLOCK(dbenv, fhp->mutexp);
+err:	MUTEX_UNLOCK(dbenv, fhp->mtx_fh);
 
 	return (ret);
 }
@@ -177,6 +176,19 @@ __os_physwrite(dbenv, fhp, addr, len, nwp)
 	int ret;
 	u_int8_t *taddr;
 
+	/*
+	 * Make a last "panic" check.  Imagine a thread of control running in
+	 * Berkeley DB, going to sleep.  Another thread of control decides to
+	 * run recovery because the environment is broken.  The first thing
+	 * recovery does is panic the existing environment, but we only check
+	 * the panic flag when crossing the public API.  If the sleeping thread
+	 * wakes up and writes something, we could have two threads of control
+	 * writing the log files at the same time.  So, before writing, make a
+	 * last panic check.  Obviously, there's still a window, but it's very,
+	 * very small.
+	 */
+	PANIC_CHECK(dbenv);
+
 	if (DB_GLOBAL(j_write) != NULL) {
 		*nwp = len;
 		if (DB_GLOBAL(j_write)(fhp->fd, addr, len) != (ssize_t)len) {
diff --git a/storage/bdb/os_win32/os_seek.c b/storage/bdb/os_win32/os_seek.c
index e356c3884d1..38e86ba032d 100644
--- a/storage/bdb/os_win32/os_seek.c
+++ b/storage/bdb/os_win32/os_seek.c
@@ -1,10 +1,10 @@
 /*-
  * See the file LICENSE for redistribution information.
  *
- * Copyright (c) 1997-2004
+ * Copyright (c) 1997-2005
  *	Sleepycat Software.  All rights reserved.
  *
- * $Id: os_seek.c,v 11.22 2004/09/17 22:00:32 mjc Exp $
+ * $Id: os_seek.c,v 12.1 2005/06/16 20:23:30 bostic Exp $
  */
 
 #include "db_config.h"
diff --git a/storage/bdb/os_win32/os_sleep.c b/storage/bdb/os_win32/os_sleep.c
index ae06e49803c..4baffe44c11 100644
--- a/storage/bdb/os_win32/os_sleep.c
+++ b/storage/bdb/os_win32/os_sleep.c
@@ -1,10 +1,10 @@
 /*-
  * See the file LICENSE for redistribution information.
  *
- * Copyright (c) 1997-2004
+ * Copyright (c) 1997-2005
  *	Sleepycat Software.  All rights reserved.
  *
- * $Id: os_sleep.c,v 11.11 2004/03/24 15:13:16 bostic Exp $
+ * $Id: os_sleep.c,v 12.1 2005/06/16 20:23:30 bostic Exp $
  */
 
 #include "db_config.h"
diff --git a/storage/bdb/os_win32/os_spin.c b/storage/bdb/os_win32/os_spin.c
index a5cb5853980..53657fc897d 100644
--- a/storage/bdb/os_win32/os_spin.c
+++ b/storage/bdb/os_win32/os_spin.c
@@ -1,10 +1,10 @@
 /*-
  * See the file LICENSE for redistribution information.
  *
- * Copyright (c) 1997-2004
+ * Copyright (c) 1997-2005
  *	Sleepycat Software.  All rights reserved.
  *
- * $Id: os_spin.c,v 11.16 2004/03/24 15:13:16 bostic Exp $
+ * $Id: os_spin.c,v 12.2 2005/07/20 16:52:02 bostic Exp $
  */
 
 #include "db_config.h"
@@ -15,18 +15,12 @@
  * __os_spin --
  *	Return the number of default spins before blocking.
  */
-void
+u_int32_t
 __os_spin(dbenv)
 	DB_ENV *dbenv;
 {
 	SYSTEM_INFO SystemInfo;
-
-	/*
-	 * If the application specified a value or we've already figured it
-	 * out, return it.
-	 */
-	if (dbenv->tas_spins != 0)
-		return;
+	u_int32_t tas_spins;
 
 	/* Get the number of processors */
 	GetSystemInfo(&SystemInfo);
@@ -36,9 +30,11 @@ __os_spin(dbenv)
 	 * is a reasonable value.
 	 */
 	if (SystemInfo.dwNumberOfProcessors > 1)
-		 dbenv->tas_spins = 50 * SystemInfo.dwNumberOfProcessors;
+		 tas_spins = 50 * SystemInfo.dwNumberOfProcessors;
 	else
-		 dbenv->tas_spins = 1;
+		 tas_spins = 1;
+
+	return (tas_spins);
 }
 
 /*
diff --git a/storage/bdb/os_win32/os_stat.c b/storage/bdb/os_win32/os_stat.c
index b11da487a7a..e4209f7d87d 100644
--- a/storage/bdb/os_win32/os_stat.c
+++ b/storage/bdb/os_win32/os_stat.c
@@ -1,10 +1,10 @@
 /*-
  * See the file LICENSE for redistribution information.
  *
- * Copyright (c) 1997-2004
+ * Copyright (c) 1997-2005
  *	Sleepycat Software.  All rights reserved.
  *
- * $Id: os_stat.c,v 11.32 2004/10/07 14:00:11 carol Exp $
+ * $Id: os_stat.c,v 12.1 2005/06/16 20:23:31 bostic Exp $
  */
 
 #include "db_config.h"
diff --git a/storage/bdb/os_win32/os_truncate.c b/storage/bdb/os_win32/os_truncate.c
index 51820ab1d8e..e36a393a328 100644
--- a/storage/bdb/os_win32/os_truncate.c
+++ b/storage/bdb/os_win32/os_truncate.c
@@ -1,10 +1,10 @@
 /*-
  * See the file LICENSE for redistribution information.
  *
- * Copyright (c) 2004
+ * Copyright (c) 2004-2005
  *	Sleepycat Software.  All rights reserved.
  *
- * $Id: os_truncate.c,v 1.9 2004/10/05 14:45:30 mjc Exp $
+ * $Id: os_truncate.c,v 12.4 2005/10/12 17:57:33 bostic Exp $
  */
 
 #include "db_config.h"
@@ -29,10 +29,12 @@ __os_truncate(dbenv, fhp, pgno, pgsize)
 			unsigned long low;
 			long high;
 		};
-	} oldpos;
+	} off;
 	off_t offset;
-	int ret, retries, t_ret;
+	HANDLE dup_handle;
+	int ret;
 
+	ret = 0;
 	offset = (off_t)pgsize * pgno;
 
 	if (DB_GLOBAL(j_ftruncate) != NULL) {
@@ -60,36 +62,28 @@ __os_truncate(dbenv, fhp, pgno, pgsize)
 	}
 #endif
 
-	retries = 0;
-	do {
-		/*
-		 * Windows doesn't provide truncate directly.  Instead,
-		 * it has SetEndOfFile, which truncates to the current
-		 * position.  So we have to save the current position,
-		 * seek to where we want to truncate to, then seek back
-		 * to where we were.  To avoid races, all of that needs
-		 * to be done while holding the file handle mutex.
-		 */
-		MUTEX_THREAD_LOCK(dbenv, fhp->mutexp);
-		oldpos.bigint = 0;
-		if ((oldpos.low = SetFilePointer(fhp->handle,
-		    0, &oldpos.high, FILE_CURRENT)) == -1 &&
-		    GetLastError() != NO_ERROR) {
-			ret = __os_get_errno();
-			goto end;
-		}
-		if ((ret = __os_seek(dbenv, fhp, pgsize, pgno,
-		    0, 0, DB_OS_SEEK_SET)) != 0)
-			goto end;
-		if (!SetEndOfFile(fhp->handle))
-			ret = __os_get_errno();
-		if ((t_ret = __os_seek(dbenv, fhp, pgsize,
-		    (db_pgno_t)(oldpos.bigint / pgsize),
-		    0, 0, DB_OS_SEEK_SET)) != 0 && ret == 0)
-			ret = t_ret;
-end:		MUTEX_THREAD_UNLOCK(dbenv, fhp->mutexp);
-	} while ((ret == EAGAIN || ret == EBUSY || ret == EINTR) &&
-	    ++retries < DB_RETRY);
+	/*
+	 * Windows doesn't provide truncate directly.  Instead, it has
+	 * SetEndOfFile, which truncates to the current position.  To
+	 * deal with that, we first duplicate the file handle, then
+	 * seek and set the end of file.  This is necessary to avoid
+	 * races with {Read,Write}File in other threads.
+	 */
+	if (!DuplicateHandle(GetCurrentProcess(), fhp->handle,
+	    GetCurrentProcess(), &dup_handle, 0, FALSE,
+	    DUPLICATE_SAME_ACCESS)) {
+		ret = __os_get_errno();
+		goto done;
+	}
+
+	off.bigint = (__int64)pgsize * pgno;
+	RETRY_CHK((SetFilePointer(dup_handle,
+	    off.low, &off.high, FILE_BEGIN) == INVALID_SET_FILE_POINTER &&
+	    GetLastError() != NO_ERROR) ||
+	    !SetEndOfFile(dup_handle), ret);
+
+	if (!CloseHandle(dup_handle) && ret == 0)
+		ret = __os_get_errno();
 
 done:	if (ret != 0)
 		__db_err(dbenv,
diff --git a/storage/bdb/os_win32/os_unlink.c b/storage/bdb/os_win32/os_unlink.c
index d1b50539ef9..81e72d744d9 100644
--- a/storage/bdb/os_win32/os_unlink.c
+++ b/storage/bdb/os_win32/os_unlink.c
@@ -1,10 +1,10 @@
 /*-
  * See the file LICENSE for redistribution information.
  *
- * Copyright (c) 1997-2004
+ * Copyright (c) 1997-2005
  *	Sleepycat Software.  All rights reserved.
  *
- * $Id: os_unlink.c,v 11.5 2004/10/05 14:55:36 mjc Exp $
+ * $Id: os_unlink.c,v 12.7 2005/10/20 18:57:08 bostic Exp $
  */
 
 #include "db_config.h"
@@ -13,7 +13,6 @@
 #include 
 
 #include 
-#include 
 #endif
 
 #include "db_int.h"
@@ -28,7 +27,7 @@ __os_region_unlink(dbenv, path)
 	const char *path;
 {
 	if (F_ISSET(dbenv, DB_ENV_OVERWRITE))
-		(void)__db_overwrite(dbenv, path);
+		(void)__db_file_multi_write(dbenv, path);
 
 	return (__os_unlink(dbenv, path));
 }
@@ -44,7 +43,9 @@ __os_unlink(dbenv, path)
 	DB_ENV *dbenv;
 	const char *path;
 {
-	_TCHAR *tpath;
+	HANDLE h;
+	_TCHAR *tpath, *orig_tpath, buf[MAXPATHLEN];
+	u_int32_t id;
 	int ret;
 
 	if (DB_GLOBAL(j_unlink) != NULL) {
@@ -55,8 +56,49 @@ __os_unlink(dbenv, path)
 	TO_TSTRING(dbenv, path, tpath, ret);
 	if (ret != 0)
 		return (ret);
+	orig_tpath = tpath;
+
+	/*
+	 * Windows NT and its descendents allow removal of open files, but the
+	 * DeleteFile Win32 system call isn't equivalent to a POSIX unlink.
+	 * Firstly, it only succeeds if FILE_SHARE_DELETE is set when the file
+	 * is opened.  Secondly, it leaves the file in a "zombie" state, where
+	 * it can't be opened again, but a new file with the same name can't be
+	 * created either.
+	 *
+	 * Since we depend on being able to recreate files (during recovery,
+	 * say), we have to first rename the file, and then delete it.  It
+	 * still hangs around, but with a name we don't care about.  The rename
+	 * will fail if the file doesn't exist, which isn't a problem, but if
+	 * it fails for some other reason, we need to know about it or a
+	 * subsequent open may fail for no apparent reason.
+	 */
+	if (__os_is_winnt()) {
+		__os_unique_id(dbenv, &id);
+		_sntprintf(buf, MAXPATHLEN, _T("%s.del.%010u"), tpath, id);
+		if (MoveFile(tpath, buf))
+			tpath = buf;
+		else if (__os_get_errno() != ENOENT)
+			__db_err(dbenv,
+			    "unlink: rename %s to temporary file failed", path);
+
+		/*
+		 * Try removing the file using the delete-on-close flag.  This
+		 * plays nicer with files that are still open than DeleteFile.
+		 */
+		h = CreateFile(tpath, 0, FILE_SHARE_READ, NULL, OPEN_EXISTING,
+		    FILE_FLAG_DELETE_ON_CLOSE, 0);
+		if (h != INVALID_HANDLE_VALUE) {
+			(void)CloseHandle (h);
+			if (GetFileAttributes(tpath) == INVALID_FILE_ATTRIBUTES)
+				goto skipdel;
+		}
+	}
+
 	RETRY_CHK((!DeleteFile(tpath)), ret);
-	FREE_STRING(dbenv, tpath);
+
+skipdel:
+	FREE_STRING(dbenv, orig_tpath);
 
 	/*
 	 * XXX
diff --git a/storage/bdb/perl/BerkeleyDB/BerkeleyDB.pm b/storage/bdb/perl/BerkeleyDB/BerkeleyDB.pm
deleted file mode 100644
index 5791faea5f5..00000000000
--- a/storage/bdb/perl/BerkeleyDB/BerkeleyDB.pm
+++ /dev/null
@@ -1,1663 +0,0 @@
-
-package BerkeleyDB;
-
-
-#     Copyright (c) 1997-2004 Paul Marquess. All rights reserved.
-#     This program is free software; you can redistribute it and/or
-#     modify it under the same terms as Perl itself.
-#
-
-# The documentation for this module is at the bottom of this file,
-# after the line __END__.
-
-BEGIN { require 5.004_04 }
-
-use strict;
-use Carp;
-use vars qw($VERSION @ISA @EXPORT $AUTOLOAD
-		$use_XSLoader);
-
-$VERSION = '0.26';
-
-require Exporter;
-#require DynaLoader;
-require AutoLoader;
-
-BEGIN {
-    $use_XSLoader = 1 ;
-    { local $SIG{__DIE__} ; eval { require XSLoader } ; }
- 
-    if ($@) {
-        $use_XSLoader = 0 ;
-        require DynaLoader;
-        @ISA = qw(DynaLoader);
-    }
-}
-
-@ISA = qw(Exporter DynaLoader);
-# Items to export into callers namespace by default. Note: do not export
-# names by default without a very good reason. Use EXPORT_OK instead.
-# Do not simply export all your public functions/methods/constants.
-
-# NOTE -- Do not add to @EXPORT directly. It is written by mkconsts
-@EXPORT = qw(
-	DB_AFTER
-	DB_AGGRESSIVE
-	DB_ALREADY_ABORTED
-	DB_APPEND
-	DB_APPLY_LOGREG
-	DB_APP_INIT
-	DB_ARCH_ABS
-	DB_ARCH_DATA
-	DB_ARCH_LOG
-	DB_ARCH_REMOVE
-	DB_AUTO_COMMIT
-	DB_BEFORE
-	DB_BTREE
-	DB_BTREEMAGIC
-	DB_BTREEOLDVER
-	DB_BTREEVERSION
-	DB_BUFFER_SMALL
-	DB_CACHED_COUNTS
-	DB_CDB_ALLDB
-	DB_CHECKPOINT
-	DB_CHKSUM
-	DB_CHKSUM_SHA1
-	DB_CLIENT
-	DB_CL_WRITER
-	DB_COMMIT
-	DB_CONSUME
-	DB_CONSUME_WAIT
-	DB_CREATE
-	DB_CURLSN
-	DB_CURRENT
-	DB_CXX_NO_EXCEPTIONS
-	DB_DEGREE_2
-	DB_DELETED
-	DB_DELIMITER
-	DB_DIRECT
-	DB_DIRECT_DB
-	DB_DIRECT_LOG
-	DB_DIRTY_READ
-	DB_DONOTINDEX
-	DB_DSYNC_LOG
-	DB_DUP
-	DB_DUPCURSOR
-	DB_DUPSORT
-	DB_DURABLE_UNKNOWN
-	DB_EID_BROADCAST
-	DB_EID_INVALID
-	DB_ENCRYPT
-	DB_ENCRYPT_AES
-	DB_ENV_APPINIT
-	DB_ENV_AUTO_COMMIT
-	DB_ENV_CDB
-	DB_ENV_CDB_ALLDB
-	DB_ENV_CREATE
-	DB_ENV_DBLOCAL
-	DB_ENV_DIRECT_DB
-	DB_ENV_DIRECT_LOG
-	DB_ENV_DSYNC_LOG
-	DB_ENV_FATAL
-	DB_ENV_LOCKDOWN
-	DB_ENV_LOCKING
-	DB_ENV_LOGGING
-	DB_ENV_LOG_AUTOREMOVE
-	DB_ENV_LOG_INMEMORY
-	DB_ENV_NOLOCKING
-	DB_ENV_NOMMAP
-	DB_ENV_NOPANIC
-	DB_ENV_OPEN_CALLED
-	DB_ENV_OVERWRITE
-	DB_ENV_PANIC_OK
-	DB_ENV_PRIVATE
-	DB_ENV_REGION_INIT
-	DB_ENV_REP_CLIENT
-	DB_ENV_REP_LOGSONLY
-	DB_ENV_REP_MASTER
-	DB_ENV_RPCCLIENT
-	DB_ENV_RPCCLIENT_GIVEN
-	DB_ENV_STANDALONE
-	DB_ENV_SYSTEM_MEM
-	DB_ENV_THREAD
-	DB_ENV_TIME_NOTGRANTED
-	DB_ENV_TXN
-	DB_ENV_TXN_NOSYNC
-	DB_ENV_TXN_NOT_DURABLE
-	DB_ENV_TXN_WRITE_NOSYNC
-	DB_ENV_USER_ALLOC
-	DB_ENV_YIELDCPU
-	DB_EXCL
-	DB_EXTENT
-	DB_FAST_STAT
-	DB_FCNTL_LOCKING
-	DB_FILEOPEN
-	DB_FILE_ID_LEN
-	DB_FIRST
-	DB_FIXEDLEN
-	DB_FLUSH
-	DB_FORCE
-	DB_GETREC
-	DB_GET_BOTH
-	DB_GET_BOTHC
-	DB_GET_BOTH_RANGE
-	DB_GET_RECNO
-	DB_HANDLE_LOCK
-	DB_HASH
-	DB_HASHMAGIC
-	DB_HASHOLDVER
-	DB_HASHVERSION
-	DB_INCOMPLETE
-	DB_INIT_CDB
-	DB_INIT_LOCK
-	DB_INIT_LOG
-	DB_INIT_MPOOL
-	DB_INIT_REP
-	DB_INIT_TXN
-	DB_INORDER
-	DB_JAVA_CALLBACK
-	DB_JOINENV
-	DB_JOIN_ITEM
-	DB_JOIN_NOSORT
-	DB_KEYEMPTY
-	DB_KEYEXIST
-	DB_KEYFIRST
-	DB_KEYLAST
-	DB_LAST
-	DB_LOCKDOWN
-	DB_LOCKMAGIC
-	DB_LOCKVERSION
-	DB_LOCK_ABORT
-	DB_LOCK_CONFLICT
-	DB_LOCK_DEADLOCK
-	DB_LOCK_DEFAULT
-	DB_LOCK_DUMP
-	DB_LOCK_EXPIRE
-	DB_LOCK_FREE_LOCKER
-	DB_LOCK_GET
-	DB_LOCK_GET_TIMEOUT
-	DB_LOCK_INHERIT
-	DB_LOCK_MAXLOCKS
-	DB_LOCK_MAXWRITE
-	DB_LOCK_MINLOCKS
-	DB_LOCK_MINWRITE
-	DB_LOCK_NORUN
-	DB_LOCK_NOTEXIST
-	DB_LOCK_NOTGRANTED
-	DB_LOCK_NOTHELD
-	DB_LOCK_NOWAIT
-	DB_LOCK_OLDEST
-	DB_LOCK_PUT
-	DB_LOCK_PUT_ALL
-	DB_LOCK_PUT_OBJ
-	DB_LOCK_PUT_READ
-	DB_LOCK_RANDOM
-	DB_LOCK_RECORD
-	DB_LOCK_REMOVE
-	DB_LOCK_RIW_N
-	DB_LOCK_RW_N
-	DB_LOCK_SET_TIMEOUT
-	DB_LOCK_SWITCH
-	DB_LOCK_TIMEOUT
-	DB_LOCK_TRADE
-	DB_LOCK_UPGRADE
-	DB_LOCK_UPGRADE_WRITE
-	DB_LOCK_YOUNGEST
-	DB_LOGC_BUF_SIZE
-	DB_LOGFILEID_INVALID
-	DB_LOGMAGIC
-	DB_LOGOLDVER
-	DB_LOGVERSION
-	DB_LOG_AUTOREMOVE
-	DB_LOG_BUFFER_FULL
-	DB_LOG_CHKPNT
-	DB_LOG_COMMIT
-	DB_LOG_DISK
-	DB_LOG_INMEMORY
-	DB_LOG_LOCKED
-	DB_LOG_NOCOPY
-	DB_LOG_NOT_DURABLE
-	DB_LOG_PERM
-	DB_LOG_RESEND
-	DB_LOG_SILENT_ERR
-	DB_LOG_WRNOSYNC
-	DB_MAX_PAGES
-	DB_MAX_RECORDS
-	DB_MPOOL_CLEAN
-	DB_MPOOL_CREATE
-	DB_MPOOL_DIRTY
-	DB_MPOOL_DISCARD
-	DB_MPOOL_EXTENT
-	DB_MPOOL_FREE
-	DB_MPOOL_LAST
-	DB_MPOOL_NEW
-	DB_MPOOL_NEW_GROUP
-	DB_MPOOL_NOFILE
-	DB_MPOOL_PRIVATE
-	DB_MPOOL_UNLINK
-	DB_MULTIPLE
-	DB_MULTIPLE_KEY
-	DB_MUTEXDEBUG
-	DB_MUTEXLOCKS
-	DB_NEEDSPLIT
-	DB_NEXT
-	DB_NEXT_DUP
-	DB_NEXT_NODUP
-	DB_NOCOPY
-	DB_NODUPDATA
-	DB_NOLOCKING
-	DB_NOMMAP
-	DB_NOORDERCHK
-	DB_NOOVERWRITE
-	DB_NOPANIC
-	DB_NORECURSE
-	DB_NOSERVER
-	DB_NOSERVER_HOME
-	DB_NOSERVER_ID
-	DB_NOSYNC
-	DB_NOTFOUND
-	DB_NO_AUTO_COMMIT
-	DB_ODDFILESIZE
-	DB_OK_BTREE
-	DB_OK_HASH
-	DB_OK_QUEUE
-	DB_OK_RECNO
-	DB_OLD_VERSION
-	DB_OPEN_CALLED
-	DB_OPFLAGS_MASK
-	DB_ORDERCHKONLY
-	DB_OVERWRITE
-	DB_PAD
-	DB_PAGEYIELD
-	DB_PAGE_LOCK
-	DB_PAGE_NOTFOUND
-	DB_PANIC_ENVIRONMENT
-	DB_PERMANENT
-	DB_POSITION
-	DB_POSITIONI
-	DB_PREV
-	DB_PREV_NODUP
-	DB_PRINTABLE
-	DB_PRIORITY_DEFAULT
-	DB_PRIORITY_HIGH
-	DB_PRIORITY_LOW
-	DB_PRIORITY_VERY_HIGH
-	DB_PRIORITY_VERY_LOW
-	DB_PRIVATE
-	DB_PR_HEADERS
-	DB_PR_PAGE
-	DB_PR_RECOVERYTEST
-	DB_QAMMAGIC
-	DB_QAMOLDVER
-	DB_QAMVERSION
-	DB_QUEUE
-	DB_RDONLY
-	DB_RDWRMASTER
-	DB_RECNO
-	DB_RECNUM
-	DB_RECORDCOUNT
-	DB_RECORD_LOCK
-	DB_RECOVER
-	DB_RECOVER_FATAL
-	DB_REGION_ANON
-	DB_REGION_INIT
-	DB_REGION_MAGIC
-	DB_REGION_NAME
-	DB_REGISTERED
-	DB_RENAMEMAGIC
-	DB_RENUMBER
-	DB_REP_CLIENT
-	DB_REP_CREATE
-	DB_REP_DUPMASTER
-	DB_REP_EGENCHG
-	DB_REP_HANDLE_DEAD
-	DB_REP_HOLDELECTION
-	DB_REP_ISPERM
-	DB_REP_LOGREADY
-	DB_REP_LOGSONLY
-	DB_REP_MASTER
-	DB_REP_NEWMASTER
-	DB_REP_NEWSITE
-	DB_REP_NOBUFFER
-	DB_REP_NOTPERM
-	DB_REP_OUTDATED
-	DB_REP_PAGEDONE
-	DB_REP_PERMANENT
-	DB_REP_STARTUPDONE
-	DB_REP_UNAVAIL
-	DB_REVSPLITOFF
-	DB_RMW
-	DB_RPCCLIENT
-	DB_RPC_SERVERPROG
-	DB_RPC_SERVERVERS
-	DB_RUNRECOVERY
-	DB_SALVAGE
-	DB_SECONDARY_BAD
-	DB_SEQUENCE_VERSION
-	DB_SEQUENTIAL
-	DB_SEQ_DEC
-	DB_SEQ_INC
-	DB_SEQ_RANGE_SET
-	DB_SEQ_WRAP
-	DB_SET
-	DB_SET_LOCK_TIMEOUT
-	DB_SET_RANGE
-	DB_SET_RECNO
-	DB_SET_TXN_NOW
-	DB_SET_TXN_TIMEOUT
-	DB_SNAPSHOT
-	DB_STAT_ALL
-	DB_STAT_CLEAR
-	DB_STAT_LOCK_CONF
-	DB_STAT_LOCK_LOCKERS
-	DB_STAT_LOCK_OBJECTS
-	DB_STAT_LOCK_PARAMS
-	DB_STAT_MEMP_HASH
-	DB_STAT_SUBSYSTEM
-	DB_SURPRISE_KID
-	DB_SWAPBYTES
-	DB_SYSTEM_MEM
-	DB_TEMPORARY
-	DB_TEST_ELECTINIT
-	DB_TEST_ELECTSEND
-	DB_TEST_ELECTVOTE1
-	DB_TEST_ELECTVOTE2
-	DB_TEST_ELECTWAIT1
-	DB_TEST_ELECTWAIT2
-	DB_TEST_POSTDESTROY
-	DB_TEST_POSTLOG
-	DB_TEST_POSTLOGMETA
-	DB_TEST_POSTOPEN
-	DB_TEST_POSTRENAME
-	DB_TEST_POSTSYNC
-	DB_TEST_PREDESTROY
-	DB_TEST_PREOPEN
-	DB_TEST_PRERENAME
-	DB_TEST_SUBDB_LOCKS
-	DB_THREAD
-	DB_TIMEOUT
-	DB_TIME_NOTGRANTED
-	DB_TRUNCATE
-	DB_TXNMAGIC
-	DB_TXNVERSION
-	DB_TXN_ABORT
-	DB_TXN_APPLY
-	DB_TXN_BACKWARD_ALLOC
-	DB_TXN_BACKWARD_ROLL
-	DB_TXN_CKP
-	DB_TXN_FORWARD_ROLL
-	DB_TXN_LOCK
-	DB_TXN_LOCK_2PL
-	DB_TXN_LOCK_MASK
-	DB_TXN_LOCK_OPTIMIST
-	DB_TXN_LOCK_OPTIMISTIC
-	DB_TXN_LOG_MASK
-	DB_TXN_LOG_REDO
-	DB_TXN_LOG_UNDO
-	DB_TXN_LOG_UNDOREDO
-	DB_TXN_NOSYNC
-	DB_TXN_NOT_DURABLE
-	DB_TXN_NOWAIT
-	DB_TXN_OPENFILES
-	DB_TXN_POPENFILES
-	DB_TXN_PRINT
-	DB_TXN_REDO
-	DB_TXN_SYNC
-	DB_TXN_UNDO
-	DB_TXN_WRITE_NOSYNC
-	DB_UNKNOWN
-	DB_UNREF
-	DB_UPDATE_SECONDARY
-	DB_UPGRADE
-	DB_USE_ENVIRON
-	DB_USE_ENVIRON_ROOT
-	DB_VERB_CHKPOINT
-	DB_VERB_DEADLOCK
-	DB_VERB_RECOVERY
-	DB_VERB_REPLICATION
-	DB_VERB_WAITSFOR
-	DB_VERIFY
-	DB_VERIFY_BAD
-	DB_VERIFY_FATAL
-	DB_VERSION_MAJOR
-	DB_VERSION_MINOR
-	DB_VERSION_MISMATCH
-	DB_VERSION_PATCH
-	DB_VERSION_STRING
-	DB_VRFY_FLAGMASK
-	DB_WRITECURSOR
-	DB_WRITELOCK
-	DB_WRITEOPEN
-	DB_WRNOSYNC
-	DB_XA_CREATE
-	DB_XIDDATASIZE
-	DB_YIELDCPU
-	DB_debug_FLAG
-	DB_user_BEGIN
-	);
-
-sub AUTOLOAD {
-    my($constname);
-    ($constname = $AUTOLOAD) =~ s/.*:://;
-    my ($error, $val) = constant($constname);
-    Carp::croak $error if $error;
-    no strict 'refs';
-    *{$AUTOLOAD} = sub { $val };
-    goto &{$AUTOLOAD};
-}         
-
-#bootstrap BerkeleyDB $VERSION;
-if ($use_XSLoader)
-  { XSLoader::load("BerkeleyDB", $VERSION)}
-else
-  { bootstrap BerkeleyDB $VERSION }  
-
-# Preloaded methods go here.
-
-
-sub ParseParameters($@)
-{
-    my ($default, @rest) = @_ ;
-    my (%got) = %$default ;
-    my (@Bad) ;
-    my ($key, $value) ;
-    my $sub = (caller(1))[3] ;
-    my %options = () ;
-    local ($Carp::CarpLevel) = 1 ;
-
-    # allow the options to be passed as a hash reference or
-    # as the complete hash.
-    if (@rest == 1) {
-
-        croak "$sub: parameter is not a reference to a hash"
-            if ref $rest[0] ne "HASH" ;
-
-        %options = %{ $rest[0] } ;
-    }
-    elsif (@rest >= 2 && @rest % 2 == 0) {
-        %options = @rest ;
-    }
-    elsif (@rest > 0) {
-	    croak "$sub: malformed option list";
-    }
-
-    while (($key, $value) = each %options)
-    {
-	$key =~ s/^-// ;
-
-        if (exists $default->{$key})
-          { $got{$key} = $value }
-        else
-	  { push (@Bad, $key) }
-    }
-    
-    if (@Bad) {
-        my ($bad) = join(", ", @Bad) ;
-        croak "unknown key value(s) @Bad" ;
-    }
-
-    return \%got ;
-}
-
-sub parseEncrypt
-{
-    my $got = shift ;
-
-
-    if (defined $got->{Encrypt}) {
-    	croak("Encrypt parameter must be a hash reference")
-            if !ref $got->{Encrypt} || ref $got->{Encrypt} ne 'HASH' ;
-
-	my %config = %{ $got->{Encrypt} } ;
-
-        my $p = BerkeleyDB::ParseParameters({
-					Password	=> undef,
-					Flags		=> undef,
-				}, %config);
-
-        croak("Must specify Password and Flags with Encrypt parameter")
-	    if ! (defined $p->{Password} && defined $p->{Flags});
-
-        $got->{"Enc_Passwd"} = $p->{Password};
-        $got->{"Enc_Flags"} = $p->{Flags};
-    }
-}
-
-use UNIVERSAL qw( isa ) ;
-
-sub env_remove
-{
-    # Usage:
-    #
-    #	$env = BerkeleyDB::env_remove
-    #			[ -Home		=> $path, ]
-    #			[ -Config	=> { name => value, name => value }
-    #			[ -Flags	=> DB_INIT_LOCK| ]
-    #			;
-
-    my $got = BerkeleyDB::ParseParameters({
-					Home		=> undef,
-					Flags     	=> 0,
-					Config		=> undef,
-					}, @_) ;
-
-    if (defined $got->{Config}) {
-    	croak("Config parameter must be a hash reference")
-            if ! ref $got->{Config} eq 'HASH' ;
-
-        @BerkeleyDB::a = () ;
-	my $k = "" ; my $v = "" ;
-	while (($k, $v) = each %{$got->{Config}}) {
-	    push @BerkeleyDB::a, "$k\t$v" ;
-	}
-
-        $got->{"Config"} = pack("p*", @BerkeleyDB::a, undef) 
-	    if @BerkeleyDB::a ;
-    }
-
-    return _env_remove($got) ;
-}
-
-sub db_remove
-{
-    my $got = BerkeleyDB::ParseParameters(
-		      {
-			Filename 	=> undef,
-			Subname		=> undef,
-			Flags		=> 0,
-			Env		=> undef,
-		      }, @_) ;
-
-    croak("Must specify a filename")
-	if ! defined $got->{Filename} ;
-
-    croak("Env not of type BerkeleyDB::Env")
-	if defined $got->{Env} and ! isa($got->{Env},'BerkeleyDB::Env');
-
-    return _db_remove($got);
-}
-
-sub db_rename
-{
-    my $got = BerkeleyDB::ParseParameters(
-		      {
-			Filename 	=> undef,
-			Subname		=> undef,
-			Newname		=> undef,
-			Flags		=> 0,
-			Env		=> undef,
-		      }, @_) ;
-
-    croak("Env not of type BerkeleyDB::Env")
-	if defined $got->{Env} and ! isa($got->{Env},'BerkeleyDB::Env');
-
-    croak("Must specify a filename")
-	if ! defined $got->{Filename} ;
-
-    croak("Must specify a Subname")
-	if ! defined $got->{Subname} ;
-
-    croak("Must specify a Newname")
-	if ! defined $got->{Newname} ;
-
-    return _db_rename($got);
-}
-
-sub db_verify
-{
-    my $got = BerkeleyDB::ParseParameters(
-		      {
-			Filename 	=> undef,
-			Subname		=> undef,
-			Outfile		=> undef,
-			Flags		=> 0,
-			Env		=> undef,
-		      }, @_) ;
-
-    croak("Env not of type BerkeleyDB::Env")
-	if defined $got->{Env} and ! isa($got->{Env},'BerkeleyDB::Env');
-
-    croak("Must specify a filename")
-	if ! defined $got->{Filename} ;
-
-    return _db_verify($got);
-}
-
-package BerkeleyDB::Env ;
-
-use UNIVERSAL qw( isa ) ;
-use Carp ;
-use IO::File;
-use vars qw( %valid_config_keys ) ;
-
-sub isaFilehandle
-{
-    my $fh = shift ;
-
-    return ((isa($fh,'GLOB') or isa(\$fh,'GLOB')) and defined fileno($fh) )
-
-}
-
-%valid_config_keys = map { $_, 1 } qw( DB_DATA_DIR DB_LOG_DIR DB_TEMP_DIR
-DB_TMP_DIR ) ;
-
-sub new
-{
-    # Usage:
-    #
-    #	$env = new BerkeleyDB::Env
-    #			[ -Home		=> $path, ]
-    #			[ -Mode		=> mode, ]
-    #			[ -Config	=> { name => value, name => value }
-    #			[ -ErrFile   	=> filename, ]
-    #			[ -ErrPrefix 	=> "string", ]
-    #			[ -Flags	=> DB_INIT_LOCK| ]
-    #			[ -Set_Flags	=> $flags,]
-    #			[ -Cachesize	=> number ]
-    #			[ -LockDetect	=>  ]
-    #			[ -Verbose	=> boolean ]
-    #			[ -Encrypt	=> { Password => string, Flags => value}
-    #
-    #			;
-
-    my $pkg = shift ;
-    my $got = BerkeleyDB::ParseParameters({
-					Home		=> undef,
-					Server		=> undef,
-					Mode		=> 0666,
-					ErrFile  	=> undef,
-					ErrPrefix 	=> undef,
-					Flags     	=> 0,
-					SetFlags     	=> 0,
-					Cachesize     	=> 0,
-					LockDetect     	=> 0,
-					Verbose		=> 0,
-					Config		=> undef,
-					Encrypt		=> undef,
-					SharedMemKey	=> undef,
-					}, @_) ;
-
-    my $errfile  = $got->{ErrFile} ;				
-    if (defined $got->{ErrFile}) {
-	if (!isaFilehandle($got->{ErrFile})) {
-	    my $handle = new IO::File ">$got->{ErrFile}"
-		or croak "Cannot open file $got->{ErrFile}: $!\n" ;
-	    $errfile = $got->{ErrFile} = $handle ;
-	}
-    }
-
-    my %config ;
-    if (defined $got->{Config}) {
-    	croak("Config parameter must be a hash reference")
-            if ! ref $got->{Config} eq 'HASH' ;
-
-	%config = %{ $got->{Config} } ;
-        @BerkeleyDB::a = () ;
-	my $k = "" ; my $v = "" ;
-	while (($k, $v) = each %config) {
-	    if ($BerkeleyDB::db_version >= 3.1 && ! $valid_config_keys{$k} ){
-	        $BerkeleyDB::Error = "illegal name-value pair: $k $v\n" ; 
-                croak $BerkeleyDB::Error ;
-	    }
-	    push @BerkeleyDB::a, "$k\t$v" ;
-	}
-
-        $got->{"Config"} = pack("p*", @BerkeleyDB::a, undef) 
-	    if @BerkeleyDB::a ;
-    }
-
-    BerkeleyDB::parseEncrypt($got);
-
-    my ($addr) = _db_appinit($pkg, $got, $errfile) ;
-    my $obj ;
-    $obj = bless [$addr] , $pkg if $addr ;
-    if ($obj && $BerkeleyDB::db_version >= 3.1 && keys %config) {
-	my ($k, $v);
-	while (($k, $v) = each %config) {
-	    if ($k eq 'DB_DATA_DIR')
-	      { $obj->set_data_dir($v) }
-	    elsif ($k eq 'DB_LOG_DIR')
-	      { $obj->set_lg_dir($v) }
-	    elsif ($k eq 'DB_TEMP_DIR' || $k eq 'DB_TMP_DIR')
-	      { $obj->set_tmp_dir($v) }
-	    else {
-	      $BerkeleyDB::Error = "illegal name-value pair: $k $v\n" ; 
-              croak $BerkeleyDB::Error 
-            }
-	}
-    }
-    return $obj ;
-}
-
-
-sub TxnMgr
-{
-    my $env = shift ;
-    my ($addr) = $env->_TxnMgr() ;
-    my $obj ;
-    $obj = bless [$addr, $env] , "BerkeleyDB::TxnMgr" if $addr ;
-    return $obj ;
-}
-
-sub txn_begin
-{
-    my $env = shift ;
-    my ($addr) = $env->_txn_begin(@_) ;
-    my $obj ;
-    $obj = bless [$addr, $env] , "BerkeleyDB::Txn" if $addr ;
-    return $obj ;
-}
-
-sub DESTROY
-{
-    my $self = shift ;
-    $self->_DESTROY() ;
-}
-
-package BerkeleyDB::Hash ;
-
-use vars qw(@ISA) ;
-@ISA = qw( BerkeleyDB::Common BerkeleyDB::_tiedHash ) ;
-use UNIVERSAL qw( isa ) ;
-use Carp ;
-
-sub new
-{
-    my $self = shift ;
-    my $got = BerkeleyDB::ParseParameters(
-		      {
-			# Generic Stuff
-			Filename 	=> undef,
-			Subname		=> undef,
-			#Flags		=> BerkeleyDB::DB_CREATE(),
-			Flags		=> 0,
-			Property	=> 0,
-			Mode		=> 0666,
-			Cachesize 	=> 0,
-			Lorder 		=> 0,
-			Pagesize 	=> 0,
-			Env		=> undef,
-			#Tie 		=> undef,
-			Txn		=> undef,
-			Encrypt		=> undef,
-
-			# Hash specific
-			Ffactor		=> 0,
-			Nelem 		=> 0,
-			Hash 		=> undef,
-			DupCompare	=> undef,
-
-			# BerkeleyDB specific
-			ReadKey		=> undef,
-			WriteKey	=> undef,
-			ReadValue	=> undef,
-			WriteValue	=> undef,
-		      }, @_) ;
-
-    croak("Env not of type BerkeleyDB::Env")
-	if defined $got->{Env} and ! isa($got->{Env},'BerkeleyDB::Env');
-
-    croak("Txn not of type BerkeleyDB::Txn")
-	if defined $got->{Txn} and ! isa($got->{Txn},'BerkeleyDB::Txn');
-
-    croak("-Tie needs a reference to a hash")
-	if defined $got->{Tie} and $got->{Tie} !~ /HASH/ ;
-
-    BerkeleyDB::parseEncrypt($got);
-
-    my ($addr) = _db_open_hash($self, $got);
-    my $obj ;
-    if ($addr) {
-        $obj = bless [$addr] , $self ;
-	push @{ $obj }, $got->{Env} if $got->{Env} ;
-        $obj->Txn($got->{Txn}) 
-            if $got->{Txn} ;
-    }
-    return $obj ;
-}
-
-*TIEHASH = \&new ;
-
- 
-package BerkeleyDB::Btree ;
-
-use vars qw(@ISA) ;
-@ISA = qw( BerkeleyDB::Common BerkeleyDB::_tiedHash ) ;
-use UNIVERSAL qw( isa ) ;
-use Carp ;
-
-sub new
-{
-    my $self = shift ;
-    my $got = BerkeleyDB::ParseParameters(
-		      {
-			# Generic Stuff
-			Filename 	=> undef,
-			Subname		=> undef,
-			#Flags		=> BerkeleyDB::DB_CREATE(),
-			Flags		=> 0,
-			Property	=> 0,
-			Mode		=> 0666,
-			Cachesize 	=> 0,
-			Lorder 		=> 0,
-			Pagesize 	=> 0,
-			Env		=> undef,
-			#Tie 		=> undef,
-			Txn		=> undef,
-			Encrypt		=> undef,
-
-			# Btree specific
-			Minkey		=> 0,
-			Compare		=> undef,
-			DupCompare	=> undef,
-			Prefix 		=> undef,
-		      }, @_) ;
-
-    croak("Env not of type BerkeleyDB::Env")
-	if defined $got->{Env} and ! isa($got->{Env},'BerkeleyDB::Env');
-
-    croak("Txn not of type BerkeleyDB::Txn")
-	if defined $got->{Txn} and ! isa($got->{Txn},'BerkeleyDB::Txn');
-
-    croak("-Tie needs a reference to a hash")
-	if defined $got->{Tie} and $got->{Tie} !~ /HASH/ ;
-
-    BerkeleyDB::parseEncrypt($got);
-
-    my ($addr) = _db_open_btree($self, $got);
-    my $obj ;
-    if ($addr) {
-        $obj = bless [$addr] , $self ;
-	push @{ $obj }, $got->{Env} if $got->{Env} ;
-        $obj->Txn($got->{Txn}) 
-            if $got->{Txn} ;
-    }
-    return $obj ;
-}
-
-*BerkeleyDB::Btree::TIEHASH = \&BerkeleyDB::Btree::new ;
-
-
-package BerkeleyDB::Recno ;
-
-use vars qw(@ISA) ;
-@ISA = qw( BerkeleyDB::Common BerkeleyDB::_tiedArray ) ;
-use UNIVERSAL qw( isa ) ;
-use Carp ;
-
-sub new
-{
-    my $self = shift ;
-    my $got = BerkeleyDB::ParseParameters(
-		      {
-			# Generic Stuff
-			Filename 	=> undef,
-			Subname		=> undef,
-			#Flags		=> BerkeleyDB::DB_CREATE(),
-			Flags		=> 0,
-			Property	=> 0,
-			Mode		=> 0666,
-			Cachesize 	=> 0,
-			Lorder 		=> 0,
-			Pagesize 	=> 0,
-			Env		=> undef,
-			#Tie 		=> undef,
-			Txn		=> undef,
-			Encrypt		=> undef,
-
-			# Recno specific
-			Delim		=> undef,
-			Len		=> undef,
-			Pad		=> undef,
-			Source 		=> undef,
-			ArrayBase 	=> 1, # lowest index in array
-		      }, @_) ;
-
-    croak("Env not of type BerkeleyDB::Env")
-	if defined $got->{Env} and ! isa($got->{Env},'BerkeleyDB::Env');
-
-    croak("Txn not of type BerkeleyDB::Txn")
-	if defined $got->{Txn} and ! isa($got->{Txn},'BerkeleyDB::Txn');
-
-    croak("Tie needs a reference to an array")
-	if defined $got->{Tie} and $got->{Tie} !~ /ARRAY/ ;
-
-    croak("ArrayBase can only be 0 or 1, parsed $got->{ArrayBase}")
-	if $got->{ArrayBase} != 1 and $got->{ArrayBase} != 0 ;
-
-
-    BerkeleyDB::parseEncrypt($got);
-
-    $got->{Fname} = $got->{Filename} if defined $got->{Filename} ;
-
-    my ($addr) = _db_open_recno($self, $got);
-    my $obj ;
-    if ($addr) {
-        $obj = bless [$addr] , $self ;
-	push @{ $obj }, $got->{Env} if $got->{Env} ;
-        $obj->Txn($got->{Txn}) 
-            if $got->{Txn} ;
-    }	
-    return $obj ;
-}
-
-*BerkeleyDB::Recno::TIEARRAY = \&BerkeleyDB::Recno::new ;
-*BerkeleyDB::Recno::db_stat = \&BerkeleyDB::Btree::db_stat ;
-
-package BerkeleyDB::Queue ;
-
-use vars qw(@ISA) ;
-@ISA = qw( BerkeleyDB::Common BerkeleyDB::_tiedArray ) ;
-use UNIVERSAL qw( isa ) ;
-use Carp ;
-
-sub new
-{
-    my $self = shift ;
-    my $got = BerkeleyDB::ParseParameters(
-		      {
-			# Generic Stuff
-			Filename 	=> undef,
-			Subname		=> undef,
-			#Flags		=> BerkeleyDB::DB_CREATE(),
-			Flags		=> 0,
-			Property	=> 0,
-			Mode		=> 0666,
-			Cachesize 	=> 0,
-			Lorder 		=> 0,
-			Pagesize 	=> 0,
-			Env		=> undef,
-			#Tie 		=> undef,
-			Txn		=> undef,
-			Encrypt		=> undef,
-
-			# Queue specific
-			Len		=> undef,
-			Pad		=> undef,
-			ArrayBase 	=> 1, # lowest index in array
-			ExtentSize      => undef,
-		      }, @_) ;
-
-    croak("Env not of type BerkeleyDB::Env")
-	if defined $got->{Env} and ! isa($got->{Env},'BerkeleyDB::Env');
-
-    croak("Txn not of type BerkeleyDB::Txn")
-	if defined $got->{Txn} and ! isa($got->{Txn},'BerkeleyDB::Txn');
-
-    croak("Tie needs a reference to an array")
-	if defined $got->{Tie} and $got->{Tie} !~ /ARRAY/ ;
-
-    croak("ArrayBase can only be 0 or 1, parsed $got->{ArrayBase}")
-	if $got->{ArrayBase} != 1 and $got->{ArrayBase} != 0 ;
-
-    BerkeleyDB::parseEncrypt($got);
-
-    $got->{Fname} = $got->{Filename} if defined $got->{Filename} ;
-
-    my ($addr) = _db_open_queue($self, $got);
-    my $obj ;
-    if ($addr) {
-        $obj = bless [$addr] , $self ;
-	push @{ $obj }, $got->{Env} if $got->{Env} ;
-        $obj->Txn($got->{Txn})
-            if $got->{Txn} ;
-    }	
-    return $obj ;
-}
-
-*BerkeleyDB::Queue::TIEARRAY = \&BerkeleyDB::Queue::new ;
-
-sub UNSHIFT
-{
-    my $self = shift;
-    croak "unshift is unsupported with Queue databases";
-}
-
-## package BerkeleyDB::Text ;
-## 
-## use vars qw(@ISA) ;
-## @ISA = qw( BerkeleyDB::Common BerkeleyDB::_tiedArray ) ;
-## use UNIVERSAL qw( isa ) ;
-## use Carp ;
-## 
-## sub new
-## {
-##     my $self = shift ;
-##     my $got = BerkeleyDB::ParseParameters(
-## 		      {
-## 			# Generic Stuff
-## 			Filename 	=> undef,
-## 			#Flags		=> BerkeleyDB::DB_CREATE(),
-## 			Flags		=> 0,
-## 			Property	=> 0,
-## 			Mode		=> 0666,
-## 			Cachesize 	=> 0,
-## 			Lorder 		=> 0,
-## 			Pagesize 	=> 0,
-## 			Env		=> undef,
-## 			#Tie 		=> undef,
-## 			Txn		=> undef,
-## 
-## 			# Recno specific
-## 			Delim		=> undef,
-## 			Len		=> undef,
-## 			Pad		=> undef,
-## 			Btree 		=> undef,
-## 		      }, @_) ;
-## 
-##     croak("Env not of type BerkeleyDB::Env")
-## 	if defined $got->{Env} and ! isa($got->{Env},'BerkeleyDB::Env');
-## 
-##     croak("Txn not of type BerkeleyDB::Txn")
-## 	if defined $got->{Txn} and ! isa($got->{Txn},'BerkeleyDB::Txn');
-## 
-##     croak("-Tie needs a reference to an array")
-## 	if defined $got->{Tie} and $got->{Tie} !~ /ARRAY/ ;
-## 
-##     # rearange for recno
-##     $got->{Source} = $got->{Filename} if defined $got->{Filename} ;
-##     delete $got->{Filename} ;
-##     $got->{Fname} = $got->{Btree} if defined $got->{Btree} ;
-##     return BerkeleyDB::Recno::_db_open_recno($self, $got);
-## }
-## 
-## *BerkeleyDB::Text::TIEARRAY = \&BerkeleyDB::Text::new ;
-## *BerkeleyDB::Text::db_stat = \&BerkeleyDB::Btree::db_stat ;
-
-package BerkeleyDB::Unknown ;
-
-use vars qw(@ISA) ;
-@ISA = qw( BerkeleyDB::Common BerkeleyDB::_tiedArray ) ;
-use UNIVERSAL qw( isa ) ;
-use Carp ;
-
-sub new
-{
-    my $self = shift ;
-    my $got = BerkeleyDB::ParseParameters(
-		      {
-			# Generic Stuff
-			Filename 	=> undef,
-			Subname		=> undef,
-			#Flags		=> BerkeleyDB::DB_CREATE(),
-			Flags		=> 0,
-			Property	=> 0,
-			Mode		=> 0666,
-			Cachesize 	=> 0,
-			Lorder 		=> 0,
-			Pagesize 	=> 0,
-			Env		=> undef,
-			#Tie 		=> undef,
-			Txn		=> undef,
-			Encrypt		=> undef,
-
-		      }, @_) ;
-
-    croak("Env not of type BerkeleyDB::Env")
-	if defined $got->{Env} and ! isa($got->{Env},'BerkeleyDB::Env');
-
-    croak("Txn not of type BerkeleyDB::Txn")
-	if defined $got->{Txn} and ! isa($got->{Txn},'BerkeleyDB::Txn');
-
-    croak("-Tie needs a reference to a hash")
-	if defined $got->{Tie} and $got->{Tie} !~ /HASH/ ;
-
-    BerkeleyDB::parseEncrypt($got);
-
-    my ($addr, $type) = _db_open_unknown($got);
-    my $obj ;
-    if ($addr) {
-        $obj = bless [$addr], "BerkeleyDB::$type" ;
-	push @{ $obj }, $got->{Env} if $got->{Env} ;
-        $obj->Txn($got->{Txn})
-            if $got->{Txn} ;
-    }	
-    return $obj ;
-}
-
-
-package BerkeleyDB::_tiedHash ;
-
-use Carp ;
-
-#sub TIEHASH  
-#{ 
-#    my $self = shift ;
-#    my $db_object = shift ;
-#
-#print "Tiehash REF=[$self] [" . (ref $self) . "]\n" ;
-#
-#    return bless { Obj => $db_object}, $self ; 
-#}
-
-sub Tie
-{
-    # Usage:
-    #
-    #   $db->Tie \%hash ;
-    #
-
-    my $self = shift ;
-
-    #print "Tie method REF=[$self] [" . (ref $self) . "]\n" ;
-
-    croak("usage \$x->Tie \\%hash\n") unless @_ ;
-    my $ref  = shift ; 
-
-    croak("Tie needs a reference to a hash")
-	if defined $ref and $ref !~ /HASH/ ;
-
-    #tie %{ $ref }, ref($self), $self ; 
-    tie %{ $ref }, "BerkeleyDB::_tiedHash", $self ; 
-    return undef ;
-}
-
- 
-sub TIEHASH  
-{ 
-    my $self = shift ;
-    my $db_object = shift ;
-    #return bless $db_object, 'BerkeleyDB::Common' ; 
-    return $db_object ;
-}
-
-sub STORE
-{
-    my $self = shift ;
-    my $key  = shift ;
-    my $value = shift ;
-
-    $self->db_put($key, $value) ;
-}
-
-sub FETCH
-{
-    my $self = shift ;
-    my $key  = shift ;
-    my $value = undef ;
-    $self->db_get($key, $value) ;
-
-    return $value ;
-}
-
-sub EXISTS
-{
-    my $self = shift ;
-    my $key  = shift ;
-    my $value = undef ;
-    $self->db_get($key, $value) == 0 ;
-}
-
-sub DELETE
-{
-    my $self = shift ;
-    my $key  = shift ;
-    $self->db_del($key) ;
-}
-
-sub CLEAR
-{
-    my $self = shift ;
-    my ($key, $value) = (0, 0) ;
-    my $cursor = $self->_db_write_cursor() ;
-    while ($cursor->c_get($key, $value, BerkeleyDB::DB_PREV()) == 0) 
-	{ $cursor->c_del() }
-}
-
-#sub DESTROY
-#{
-#    my $self = shift ;
-#    print "BerkeleyDB::_tieHash::DESTROY\n" ;
-#    $self->{Cursor}->c_close() if $self->{Cursor} ;
-#}
-
-package BerkeleyDB::_tiedArray ;
-
-use Carp ;
-
-sub Tie
-{
-    # Usage:
-    #
-    #   $db->Tie \@array ;
-    #
-
-    my $self = shift ;
-
-    #print "Tie method REF=[$self] [" . (ref $self) . "]\n" ;
-
-    croak("usage \$x->Tie \\%hash\n") unless @_ ;
-    my $ref  = shift ; 
-
-    croak("Tie needs a reference to an array")
-	if defined $ref and $ref !~ /ARRAY/ ;
-
-    #tie %{ $ref }, ref($self), $self ; 
-    tie @{ $ref }, "BerkeleyDB::_tiedArray", $self ; 
-    return undef ;
-}
-
- 
-#sub TIEARRAY  
-#{ 
-#    my $self = shift ;
-#    my $db_object = shift ;
-#
-#print "Tiearray REF=[$self] [" . (ref $self) . "]\n" ;
-#
-#    return bless { Obj => $db_object}, $self ; 
-#}
-
-sub TIEARRAY  
-{ 
-    my $self = shift ;
-    my $db_object = shift ;
-    #return bless $db_object, 'BerkeleyDB::Common' ; 
-    return $db_object ;
-}
-
-sub STORE
-{
-    my $self = shift ;
-    my $key  = shift ;
-    my $value = shift ;
-
-    $self->db_put($key, $value) ;
-}
-
-sub FETCH
-{
-    my $self = shift ;
-    my $key  = shift ;
-    my $value = undef ;
-    $self->db_get($key, $value) ;
-
-    return $value ;
-}
-
-*CLEAR =    \&BerkeleyDB::_tiedHash::CLEAR ;
-*FIRSTKEY = \&BerkeleyDB::_tiedHash::FIRSTKEY ;
-*NEXTKEY =  \&BerkeleyDB::_tiedHash::NEXTKEY ;
-
-sub EXTEND {} # don't do anything with EXTEND
-
-
-sub SHIFT
-{
-    my $self = shift;
-    my ($key, $value) = (0, 0) ;
-    my $cursor = $self->_db_write_cursor() ;
-    return undef if $cursor->c_get($key, $value, BerkeleyDB::DB_FIRST()) != 0 ;
-    return undef if $cursor->c_del() != 0 ;
-
-    return $value ;
-}
-
-
-sub UNSHIFT
-{
-    my $self = shift;
-    if (@_)
-    {
-        my ($key, $value) = (0, 0) ;
-        my $cursor = $self->_db_write_cursor() ;
-        my $status = $cursor->c_get($key, $value, BerkeleyDB::DB_FIRST()) ;
-        if ($status == 0)
-        {
-            foreach $value (reverse @_)
-            {
-	        $key = 0 ;
-	        $cursor->c_put($key, $value, BerkeleyDB::DB_BEFORE()) ;
-            }
-        }
-        elsif ($status == BerkeleyDB::DB_NOTFOUND())
-        {
-	    $key = 0 ;
-            foreach $value (@_)
-            {
-	        $self->db_put($key++, $value) ;
-            }
-        }
-    }
-}
-
-sub PUSH
-{
-    my $self = shift;
-    if (@_)
-    {
-        my ($key, $value) = (-1, 0) ;
-        my $cursor = $self->_db_write_cursor() ;
-        my $status = $cursor->c_get($key, $value, BerkeleyDB::DB_LAST()) ;
-        if ($status == 0 || $status == BerkeleyDB::DB_NOTFOUND())
-	{
-            $key = -1 if $status != 0 and $self->type != BerkeleyDB::DB_RECNO() ;
-            foreach $value (@_)
-	    {
-	        ++ $key ;
-	        $status = $self->db_put($key, $value) ;
-	    }
-	}
-
-# can use this when DB_APPEND is fixed.
-#        foreach $value (@_)
-#        {
-#	    my $status = $cursor->c_put($key, $value, BerkeleyDB::DB_AFTER()) ;
-#print "[$status]\n" ;
-#        }
-    }
-}
-
-sub POP
-{
-    my $self = shift;
-    my ($key, $value) = (0, 0) ;
-    my $cursor = $self->_db_write_cursor() ;
-    return undef if $cursor->c_get($key, $value, BerkeleyDB::DB_LAST()) != 0 ;
-    return undef if $cursor->c_del() != 0 ;
-
-    return $value ;
-}
-
-sub SPLICE
-{
-    my $self = shift;
-    croak "SPLICE is not implemented yet" ;
-}
-
-*shift = \&SHIFT ;
-*unshift = \&UNSHIFT ;
-*push = \&PUSH ;
-*pop = \&POP ;
-*clear = \&CLEAR ;
-*length = \&FETCHSIZE ;
-
-sub STORESIZE
-{
-    croak "STORESIZE is not implemented yet" ;
-#print "STORESIZE @_\n" ;
-#    my $self = shift;
-#    my $length = shift ;
-#    my $current_length = $self->FETCHSIZE() ;
-#print "length is $current_length\n";
-#
-#    if ($length < $current_length) {
-#print "Make smaller $length < $current_length\n" ;
-#        my $key ;
-#        for ($key = $current_length - 1 ; $key >= $length ; -- $key)
-#          { $self->db_del($key) }
-#    }
-#    elsif ($length > $current_length) {
-#print "Make larger $length > $current_length\n" ;
-#        $self->db_put($length-1, "") ;
-#    }
-#    else { print "stay the same\n" }
-
-}
-
-
-
-#sub DESTROY
-#{
-#    my $self = shift ;
-#    print "BerkeleyDB::_tieArray::DESTROY\n" ;
-#}
-
-
-package BerkeleyDB::Common ;
-
-
-use Carp ;
-
-sub DESTROY
-{
-    my $self = shift ;
-    $self->_DESTROY() ;
-}
-
-sub Txn
-{
-    my $self = shift ;
-    my $txn  = shift ;
-    #print "BerkeleyDB::Common::Txn db [$self] txn [$txn]\n" ;
-    if ($txn) {
-        $self->_Txn($txn) ;
-        push @{ $txn }, $self ;
-    }
-    else {
-        $self->_Txn() ;
-    }
-    #print "end BerkeleyDB::Common::Txn \n";
-}
-
-
-sub get_dup
-{
-    croak "Usage: \$db->get_dup(key [,flag])\n"
-        unless @_ == 2 or @_ == 3 ;
- 
-    my $db        = shift ;
-    my $key       = shift ;
-    my $flag	  = shift ;
-    my $value 	  = 0 ;
-    my $origkey   = $key ;
-    my $wantarray = wantarray ;
-    my %values	  = () ;
-    my @values    = () ;
-    my $counter   = 0 ;
-    my $status    = 0 ;
-    my $cursor    = $db->db_cursor() ;
- 
-    # iterate through the database until either EOF ($status == 0)
-    # or a different key is encountered ($key ne $origkey).
-    for ($status = $cursor->c_get($key, $value, BerkeleyDB::DB_SET()) ;
-	 $status == 0 and $key eq $origkey ;
-         $status = $cursor->c_get($key, $value, BerkeleyDB::DB_NEXT()) ) {
-        # save the value or count number of matches
-        if ($wantarray) {
-	    if ($flag)
-                { ++ $values{$value} }
-	    else
-                { push (@values, $value) }
-	}
-        else
-            { ++ $counter }
-     
-    }
- 
-    return ($wantarray ? ($flag ? %values : @values) : $counter) ;
-}
-
-sub db_cursor
-{
-    my $db = shift ;
-    my ($addr) = $db->_db_cursor(@_) ;
-    my $obj ;
-    $obj = bless [$addr, $db] , "BerkeleyDB::Cursor" if $addr ;
-    return $obj ;
-}
-
-sub _db_write_cursor
-{
-    my $db = shift ;
-    my ($addr) = $db->__db_write_cursor(@_) ;
-    my $obj ;
-    $obj = bless [$addr, $db] , "BerkeleyDB::Cursor" if $addr ;
-    return $obj ;
-}
-
-sub db_join
-{
-    croak 'Usage: $db->BerkeleyDB::db_join([cursors], flags=0)'
-	if @_ < 2 || @_ > 3 ;
-    my $db = shift ;
-    croak 'db_join: first parameter is not an array reference'
-	if ! ref $_[0] || ref $_[0] ne 'ARRAY';
-    my ($addr) = $db->_db_join(@_) ;
-    my $obj ;
-    $obj = bless [$addr, $db, $_[0]] , "BerkeleyDB::Cursor" if $addr ;
-    return $obj ;
-}
-
-package BerkeleyDB::Cursor ;
-
-sub c_close
-{
-    my $cursor = shift ;
-    $cursor->[1] = "" ;
-    return $cursor->_c_close() ;
-}
-
-sub c_dup
-{
-    my $cursor = shift ;
-    my ($addr) = $cursor->_c_dup(@_) ;
-    my $obj ;
-    $obj = bless [$addr, $cursor->[1]] , "BerkeleyDB::Cursor" if $addr ;
-    return $obj ;
-}
-
-sub DESTROY
-{
-    my $self = shift ;
-    $self->_DESTROY() ;
-}
-
-package BerkeleyDB::TxnMgr ;
-
-sub DESTROY
-{
-    my $self = shift ;
-    $self->_DESTROY() ;
-}
-
-sub txn_begin
-{
-    my $txnmgr = shift ;
-    my ($addr) = $txnmgr->_txn_begin(@_) ;
-    my $obj ;
-    $obj = bless [$addr, $txnmgr] , "BerkeleyDB::Txn" if $addr ;
-    return $obj ;
-}
-
-package BerkeleyDB::Txn ;
-
-sub Txn
-{
-    my $self = shift ;
-    my $db ;
-    # keep a reference to each db in the txn object
-    foreach $db (@_) {
-        $db->_Txn($self) ;
-	push @{ $self}, $db ;
-    }
-}
-
-sub txn_commit
-{
-    my $self = shift ;
-    $self->disassociate() ;
-    my $status = $self->_txn_commit() ;
-    return $status ;
-}
-
-sub txn_abort
-{
-    my $self = shift ;
-    $self->disassociate() ;
-    my $status = $self->_txn_abort() ;
-    return $status ;
-}
-
-sub disassociate
-{
-    my $self = shift ;
-    my $db ;
-    while ( @{ $self } > 2) {
-        $db = pop @{ $self } ;
-        $db->Txn() ;
-    }
-    #print "end disassociate\n" ;
-}
-
-
-sub DESTROY
-{
-    my $self = shift ;
-
-    $self->disassociate() ;
-    # first close the close the transaction
-    $self->_DESTROY() ;
-}
-
-package BerkeleyDB::CDS::Lock;
-
-use vars qw(%Object %Count);
-use Carp;
-
-sub BerkeleyDB::Common::cds_lock
-{
-    my $db = shift ;
-
-    # fatal error if database not opened in CDS mode
-    croak("CDS not enabled for this database\n") 
-        if ! $db->cds_enabled();
-
-    if ( ! defined $Object{"$db"})
-    {
-        $Object{"$db"} = $db->_db_write_cursor()
-         || return undef ;
-    }
-
-    ++ $Count{"$db"} ;
-
-    return bless [$db, 1], "BerkeleyDB::CDS::Lock" ;
-}
-
-sub cds_unlock
-{
-    my $self = shift ;
-    my $db = $self->[0] ;
-
-    if ($self->[1]) 
-    {
-        $self->[1] = 0 ;
-        -- $Count{"$db"} if $Count{"$db"} > 0 ;
-
-        if ($Count{"$db"} == 0)
-        {
-            $Object{"$db"}->c_close() ;
-            undef $Object{"$db"};
-        }
-
-        return 1 ;
-    }
-
-    return undef ;
-}
-
-sub DESTROY
-{
-    my $self = shift ;
-    $self->cds_unlock() ;	
-}
-
-package BerkeleyDB::Term ;
-
-END
-{
-    close_everything() ;
-}
-
-
-package BerkeleyDB ;
-
-
-
-# Autoload methods go after =cut, and are processed by the autosplit program.
-
-1;
-__END__
-
-
-
diff --git a/storage/bdb/perl/BerkeleyDB/BerkeleyDB.pod b/storage/bdb/perl/BerkeleyDB/BerkeleyDB.pod
deleted file mode 100644
index ba2cc0c5833..00000000000
--- a/storage/bdb/perl/BerkeleyDB/BerkeleyDB.pod
+++ /dev/null
@@ -1,1918 +0,0 @@
-=head1 NAME
-
-BerkeleyDB - Perl extension for Berkeley DB version 2, 3 or 4
-
-=head1 SYNOPSIS
-
-  use BerkeleyDB;
-
-  $env = new BerkeleyDB::Env [OPTIONS] ;
-
-  $db  = tie %hash, 'BerkeleyDB::Hash', [OPTIONS] ;
-  $db  = new BerkeleyDB::Hash [OPTIONS] ;
-
-  $db  = tie %hash, 'BerkeleyDB::Btree', [OPTIONS] ;
-  $db  = new BerkeleyDB::Btree [OPTIONS] ;
-
-  $db  = tie @array, 'BerkeleyDB::Recno', [OPTIONS] ;
-  $db  = new BerkeleyDB::Recno [OPTIONS] ;
-
-  $db  = tie @array, 'BerkeleyDB::Queue', [OPTIONS] ;
-  $db  = new BerkeleyDB::Queue [OPTIONS] ;
-
-  $db  = new BerkeleyDB::Unknown [OPTIONS] ;
-
-  $status = BerkeleyDB::db_remove [OPTIONS]
-  $status = BerkeleyDB::db_rename [OPTIONS]
-  $status = BerkeleyDB::db_verify [OPTIONS]
-
-  $hash{$key} = $value ;
-  $value = $hash{$key} ;
-  each %hash ;
-  keys %hash ;
-  values %hash ;
-
-  $status = $db->db_get()
-  $status = $db->db_put() ;
-  $status = $db->db_del() ;
-  $status = $db->db_sync() ;
-  $status = $db->db_close() ;
-  $status = $db->db_pget()
-  $hash_ref = $db->db_stat() ;
-  $status = $db->db_key_range();
-  $type = $db->type() ;
-  $status = $db->status() ;
-  $boolean = $db->byteswapped() ;
-  $status = $db->truncate($count) ;
-
-  $bool = $env->cds_enabled();
-  $bool = $db->cds_enabled();
-  $lock = $db->cds_lock();
-  $lock->cds_unlock();
-  
-  ($flag, $old_offset, $old_length) = $db->partial_set($offset, $length) ;
-  ($flag, $old_offset, $old_length) = $db->partial_clear() ;
-
-  $cursor = $db->db_cursor([$flags]) ;
-  $newcursor = $cursor->c_dup([$flags]);
-  $status = $cursor->c_get() ;
-  $status = $cursor->c_put() ;
-  $status = $cursor->c_del() ;
-  $status = $cursor->c_count() ;
-  $status = $cursor->c_pget() ;
-  $status = $cursor->status() ;
-  $status = $cursor->c_close() ;
-
-  $cursor = $db->db_join() ;
-  $status = $cursor->c_get() ;
-  $status = $cursor->c_close() ;
-
-  $status = $env->txn_checkpoint()
-  $hash_ref = $env->txn_stat()
-  $status = $env->setmutexlocks()
-  $status = $env->set_flags()
-
-  $txn = $env->txn_begin() ;
-  $db->Txn($txn);
-  $txn->Txn($db1, $db2,...);
-  $status = $txn->txn_prepare()
-  $status = $txn->txn_commit()
-  $status = $txn->txn_abort()
-  $status = $txn->txn_id()
-  $status = $txn->txn_discard()
-
-  $status = $env->set_lg_dir();
-  $status = $env->set_lg_bsize();
-  $status = $env->set_lg_max();
-
-  $status = $env->set_data_dir() ;
-  $status = $env->set_tmp_dir() ;
-  $status = $env->set_verbose() ;
-  $db_env_ptr = $env->DB_ENV() ;
-
-  $BerkeleyDB::Error
-  $BerkeleyDB::db_version
-
-  # DBM Filters
-  $old_filter = $db->filter_store_key  ( sub { ... } ) ;
-  $old_filter = $db->filter_store_value( sub { ... } ) ;
-  $old_filter = $db->filter_fetch_key  ( sub { ... } ) ;
-  $old_filter = $db->filter_fetch_value( sub { ... } ) ;
-
-  # deprecated, but supported
-  $txn_mgr = $env->TxnMgr();
-  $status = $txn_mgr->txn_checkpoint()
-  $hash_ref = $txn_mgr->txn_stat()
-  $txn = $txn_mgr->txn_begin() ;
-
-=head1 DESCRIPTION
-
-B
-
-This Perl module provides an interface to most of the functionality
-available in Berkeley DB versions 2, 3 and 4. In general it is safe to assume
-that the interface provided here to be identical to the Berkeley DB
-interface. The main changes have been to make the Berkeley DB API work
-in a Perl way. Note that if you are using Berkeley DB 2.x, the new
-features available in Berkeley DB 3.x or DB 4.x are not available via
-this module.
-
-The reader is expected to be familiar with the Berkeley DB
-documentation. Where the interface provided here is identical to the
-Berkeley DB library and the... TODO
-
-The B, B, B and B man pages are
-particularly relevant.
-
-The interface to Berkeley DB is implemented with a number of Perl
-classes.
-
-=head1 The BerkeleyDB::Env Class
-
-The B class provides an interface to the Berkeley DB
-function B in Berkeley DB 2.x or B and
-Bopen> in Berkeley DB 3.x/4.x. Its purpose is to initialise a
-number of sub-systems that can then be used in a consistent way in all
-the databases you make use of in the environment.
-
-If you don't intend using transactions, locking or logging, then you
-shouldn't need to make use of B.
-
-Note that an environment consists of a number of files that Berkeley DB
-manages behind the scenes for you. When you first use an environment, it
-needs to be explicitly created. This is done by including C
-with the C parameter, described below.
-
-=head2 Synopsis
-
-    $env = new BerkeleyDB::Env
-             [ -Home         => $path, ]
-             [ -Server       => $name, ]
-             [ -CacheSize    => $number, ]
-             [ -Config       => { name => value, name => value }, ]
-             [ -ErrFile      => filename, ]
-             [ -ErrPrefix    => "string", ]
-             [ -Flags        => number, ]
-             [ -SetFlags     => bitmask, ]
-             [ -LockDetect   => number, ]
-             [ -SharedMemKey => number, ]
-             [ -Verbose      => boolean, ]
-             [ -Encrypt      => { Password => "string",
-	                          Flags    => number }, ]
-
-All the parameters to the BerkeleyDB::Env constructor are optional.
-
-=over 5
-
-=item -Home
-
-If present, this parameter should point to an existing directory. Any
-files that I specified with an absolute path in the sub-systems
-that are initialised by the BerkeleyDB::Env class will be assumed to
-live in the B directory.
-
-For example, in the code fragment below the database "fred.db" will be
-opened in the directory "/home/databases" because it was specified as a
-relative path, but "joe.db" will be opened in "/other" because it was
-part of an absolute path.
-
-    $env = new BerkeleyDB::Env
-             -Home         => "/home/databases"
-    ...
-
-    $db1 = new BerkeleyDB::Hash
-	     -Filename = "fred.db",
-	     -Env => $env
-    ...
-
-    $db2 = new BerkeleyDB::Hash
-	     -Filename = "/other/joe.db",
-	     -Env => $env
-    ...
-
-=item -Server
-
-If present, this parameter should be the hostname of a server that is running
-the Berkeley DB RPC server. All databases will be accessed via the RPC server.
-
-=item -Encrypt
-
-If present, this parameter will enable encryption of  all data before
-it is written to the database. This parameters must be given a hash
-reference. The format is shown below.
-
-    -Encrypt => { -Password => "abc", Flags => DB_ENCRYPT_AES }
-
-Valid values for the Flags are 0 or C.
-
-This option requires Berkeley DB 4.1 or better.
-
-=item -Cachesize
-
-If present, this parameter sets the size of the environments shared memory
-buffer pool.
-
-=item -SharedMemKey
-
-If present, this parameter sets the base segment ID for the shared memory
-region used by Berkeley DB. 
-
-This option requires Berkeley DB 3.1 or better.
-
-Use C<$env-Eget_shm_key($id)> to find out the base segment ID used
-once the environment is open.
-
-
-=item -Config
-
-This is a variation on the C<-Home> parameter, but it allows finer
-control of where specific types of files will be stored.
-
-The parameter expects a reference to a hash. Valid keys are:
-B, B and B
-
-The code below shows an example of how it can be used.
-
-    $env = new BerkeleyDB::Env
-             -Config => { DB_DATA_DIR => "/home/databases",
-                          DB_LOG_DIR  => "/home/logs",
-                          DB_TMP_DIR  => "/home/tmp"
-                        }
-    ...
-
-=item -ErrFile
-
-Expects a filename or filenhandle. Any errors generated internally by
-Berkeley DB will be logged to this file. A useful debug setting is to
-open environments with either
-
-    -ErrFile => *STDOUT
-
-or 
-
-    -ErrFile => *STDERR
-
-=item -ErrPrefix
-
-Allows a prefix to be added to the error messages before they are sent
-to B<-ErrFile>.
-
-=item -Flags
-
-The B parameter specifies both which sub-systems to initialise,
-as well as a number of environment-wide options.
-See the Berkeley DB documentation for more details of these options.
-
-Any of the following can be specified by OR'ing them:
-
-B
-
-If any of the files specified do not already exist, create them.
-
-B
-
-Initialise the Concurrent Access Methods  
-
-B
-
-Initialise the Locking sub-system.
-
-B
-
-Initialise the Logging sub-system.
-
-B
-
-Initialise the ...
-
-B
-
-Initialise the ...
-
-B
-
-Initialise the ...
-
-B is also specified.
-
-Initialise the ...
-
-B
-
-Initialise the ...
-
-B
-
-
-
-B
-
-B
-
-B
-
-B
-
-B
-
-=item -SetFlags
-
-Calls ENV->set_flags with the supplied bitmask. Use this when you need to make
-use of DB_ENV->set_flags before DB_ENV->open is called.
-
-Only valid when Berkeley DB 3.x or better is used.
-
-=item -LockDetect
-
-Specifies what to do when a lock conflict occurs. The value should be one of
-
-B 
-
-B
-
-B
-
-B
-
-=item -Verbose
-
-Add extra debugging information to the messages sent to B<-ErrFile>.
-
-=back
-
-=head2 Methods
-
-The environment class has the following methods:
-
-=over 5
-
-=item $env->errPrefix("string") ;
-
-This method is identical to the B<-ErrPrefix> flag. It allows the
-error prefix string to be changed dynamically.
-
-=item $env->set_flags(bitmask, 1|0);
-
-=item $txn = $env->TxnMgr()
-
-Constructor for creating a B object.
-See L<"TRANSACTIONS"> for more details of using transactions.
-
-This method is deprecated. Access the transaction methods using the B
-methods below from the environment object directly.
-
-=item $env->txn_begin()
-
-TODO
-
-=item $env->txn_stat()
-
-TODO
-
-=item $env->txn_checkpoint()
-
-TODO
-
-=item $env->status()
-
-Returns the status of the last BerkeleyDB::Env method.
-
-
-=item $env->DB_ENV()
-
-Returns a pointer to the underlying DB_ENV data structure that Berkeley
-DB uses.
-
-=item $env->get_shm_key($id)
-
-Writes the base segment ID for the shared memory region used by the
-Berkeley DB environment into C<$id>. Returns 0 on success.
-
-This option requires Berkeley DB 4.2 or better.
-
-Use the C<-SharedMemKey> option when opening the environemt to set the
-base segment ID.
-
-=item $env->status()
-
-Returns the status of the last BerkeleyDB::Env method.
-
-=back
-
-=head2 Examples
-
-TODO.
-
-=head1 Global Classes
-
-  $status = BerkeleyDB::db_remove [OPTIONS]
-  $status = BerkeleyDB::db_rename [OPTIONS]
-  $status = BerkeleyDB::db_verify [OPTIONS]
-
-=head1 THE DATABASE CLASSES
-
-B supports the following database formats:
-
-=over 5
-
-=item B
-
-This database type allows arbitrary key/value pairs to be stored in data
-files. This is equivalent to the functionality provided by other
-hashing packages like DBM, NDBM, ODBM, GDBM, and SDBM. Remember though,
-the files created using B are not compatible with any
-of the other packages mentioned.
-
-A default hashing algorithm, which will be adequate for most applications,
-is built into BerkeleyDB. If you do need to use your own hashing algorithm
-it is possible to write your own in Perl and have B use
-it instead.
-
-=item B
-
-The Btree format allows arbitrary key/value pairs to be stored in a
-B+tree.
-
-As with the B format, it is possible to provide a
-user defined Perl routine to perform the comparison of keys. By default,
-though, the keys are stored in lexical order.
-
-=item B
-
-TODO.
-
-
-=item B
-
-TODO.
-
-=item B
-
-This isn't a database format at all. It is used when you want to open an
-existing Berkeley DB database without having to know what type is it. 
-
-=back
-
-
-Each of the database formats described above is accessed via a
-corresponding B class. These will be described in turn in
-the next sections.
-
-=head1 BerkeleyDB::Hash
-
-Equivalent to calling B with type B in Berkeley DB 2.x and
-calling B followed by Bopen> with type B in
-Berkeley DB 3.x or greater. 
-
-Two forms of constructor are supported:
-
-    $db = new BerkeleyDB::Hash
-                [ -Filename      => "filename", ]
-                [ -Subname       => "sub-database name", ]
-                [ -Flags         => flags,]
-                [ -Property      => flags,]
-                [ -Mode          => number,]
-                [ -Cachesize     => number,]
-                [ -Lorder        => number,]
-                [ -Pagesize      => number,]
-                [ -Env           => $env,]
-                [ -Txn           => $txn,]
-                [ -Encrypt       => { Password => "string",
-	                              Flags    => number }, ],
-                # BerkeleyDB::Hash specific
-                [ -Ffactor       => number,]
-                [ -Nelem         => number,]
-                [ -Hash          => code reference,]
-                [ -DupCompare    => code reference,]
-
-and this
-
-    [$db =] tie %hash, 'BerkeleyDB::Hash', 
-                [ -Filename      => "filename", ]
-                [ -Subname       => "sub-database name", ]
-                [ -Flags         => flags,]
-                [ -Property      => flags,]
-                [ -Mode          => number,]
-                [ -Cachesize     => number,]
-                [ -Lorder        => number,]
-                [ -Pagesize      => number,]
-                [ -Env           => $env,]
-                [ -Txn           => $txn,]
-                [ -Encrypt       => { Password => "string",
-	                              Flags    => number }, ],
-                # BerkeleyDB::Hash specific
-                [ -Ffactor       => number,]
-                [ -Nelem         => number,]
-                [ -Hash          => code reference,]
-                [ -DupCompare    => code reference,]
-
-
-When the "tie" interface is used, reading from and writing to the database
-is achieved via the tied hash. In this case the database operates like
-a Perl associative array that happens to be stored on disk.
-
-In addition to the high-level tied hash interface, it is possible to
-make use of the underlying methods provided by Berkeley DB
-
-=head2 Options
-
-In addition to the standard set of options (see L)
-B supports these options:
-
-=over 5
-
-=item -Property
-
-Used to specify extra flags when opening a database. The following
-flags may be specified by bitwise OR'ing together one or more of the
-following values:
-
-B
-
-When creating a new database, this flag enables the storing of duplicate
-keys in the database. If B is not specified as well, the
-duplicates are stored in the order they are created in the database.
-
-B
-
-Enables the sorting of duplicate keys in the database. Ignored if
-B isn't also specified.
-
-=item -Ffactor
-
-=item -Nelem
-
-See the Berkeley DB documentation for details of these options.
-
-=item -Hash
-
-Allows you to provide a user defined hash function. If not specified, 
-a default hash function is used. Here is a template for a user-defined
-hash function
-
-    sub hash
-    {
-        my ($data) = shift ;
-        ...
-        # return the hash value for $data
-	return $hash ;
-    }
-
-    tie %h, "BerkeleyDB::Hash", 
-        -Filename => $filename, 
-    	-Hash     => \&hash,
-	...
-
-See L<""> for an example.
-
-=item -DupCompare
-
-Used in conjunction with the B flag. 
-
-    sub compare
-    {
-	my ($key, $key2) = @_ ;
-        ...
-        # return  0 if $key1 eq $key2
-        #        -1 if $key1 lt $key2
-        #         1 if $key1 gt $key2
-        return (-1 , 0 or 1) ;
-    }
-
-    tie %h, "BerkeleyDB::Hash", 
-        -Filename   => $filename, 
-	-Property   => DB_DUP|DB_DUPSORT,
-    	-DupCompare => \&compare,
-	...
-
-=back
-
-
-=head2 Methods
-
-B only supports the standard database methods.
-See L.
-
-=head2 A Simple Tied Hash Example
-
-    use strict ;
-    use BerkeleyDB ;
-    use vars qw( %h $k $v ) ;
-
-    my $filename = "fruit" ;
-    unlink $filename ;
-    tie %h, "BerkeleyDB::Hash",
-                -Filename => $filename,
-		-Flags    => DB_CREATE
-        or die "Cannot open file $filename: $! $BerkeleyDB::Error\n" ;
-
-    # Add a few key/value pairs to the file
-    $h{"apple"} = "red" ;
-    $h{"orange"} = "orange" ;
-    $h{"banana"} = "yellow" ;
-    $h{"tomato"} = "red" ;
-
-    # Check for existence of a key
-    print "Banana Exists\n\n" if $h{"banana"} ;
-
-    # Delete a key/value pair.
-    delete $h{"apple"} ;
-
-    # print the contents of the file
-    while (($k, $v) = each %h)
-      { print "$k -> $v\n" }
-
-    untie %h ;
-
-here is the output:
-
-    Banana Exists
-    
-    orange -> orange
-    tomato -> red
-    banana -> yellow
-
-Note that the like ordinary associative arrays, the order of the keys
-retrieved from a Hash database are in an apparently random order.
-
-=head2 Another Simple Hash Example
-
-Do the same as the previous example but not using tie.
-
-    use strict ;
-    use BerkeleyDB ;
-
-    my $filename = "fruit" ;
-    unlink $filename ;
-    my $db = new BerkeleyDB::Hash
-                -Filename => $filename,
-		-Flags    => DB_CREATE
-        or die "Cannot open file $filename: $! $BerkeleyDB::Error\n" ;
-
-    # Add a few key/value pairs to the file
-    $db->db_put("apple", "red") ;
-    $db->db_put("orange", "orange") ;
-    $db->db_put("banana", "yellow") ;
-    $db->db_put("tomato", "red") ;
-
-    # Check for existence of a key
-    print "Banana Exists\n\n" if $db->db_get("banana", $v) == 0;
-
-    # Delete a key/value pair.
-    $db->db_del("apple") ;
-
-    # print the contents of the file
-    my ($k, $v) = ("", "") ;
-    my $cursor = $db->db_cursor() ;
-    while ($cursor->c_get($k, $v, DB_NEXT) == 0)
-      { print "$k -> $v\n" }
-
-    undef $cursor ;
-    undef $db ;
-
-=head2 Duplicate keys
-
-The code below is a variation on the examples above. This time the hash has
-been inverted. The key this time is colour and the value is the fruit name.
-The B flag has been specified to allow duplicates.
-
-    use strict ;
-    use BerkeleyDB ;
-
-    my $filename = "fruit" ;
-    unlink $filename ;
-    my $db = new BerkeleyDB::Hash
-                -Filename => $filename,
-		-Flags    => DB_CREATE,
-		-Property  => DB_DUP
-        or die "Cannot open file $filename: $! $BerkeleyDB::Error\n" ;
-
-    # Add a few key/value pairs to the file
-    $db->db_put("red", "apple") ;
-    $db->db_put("orange", "orange") ;
-    $db->db_put("green", "banana") ;
-    $db->db_put("yellow", "banana") ;
-    $db->db_put("red", "tomato") ;
-    $db->db_put("green", "apple") ;
-
-    # print the contents of the file
-    my ($k, $v) = ("", "") ;
-    my $cursor = $db->db_cursor() ;
-    while ($cursor->c_get($k, $v, DB_NEXT) == 0)
-      { print "$k -> $v\n" }
-
-    undef $cursor ;
-    undef $db ;
-
-here is the output:
-
-    orange -> orange
-    yellow -> banana
-    red -> apple
-    red -> tomato
-    green -> banana
-    green -> apple
-
-=head2 Sorting Duplicate Keys
-
-In the previous example, when there were duplicate keys, the values are
-sorted in the order they are stored in. The code below is
-identical to the previous example except the B flag is
-specified.
-
-    use strict ;
-    use BerkeleyDB ;
-
-    my $filename = "fruit" ;
-    unlink $filename ;
-    my $db = new BerkeleyDB::Hash
-                -Filename => $filename,
-		-Flags    => DB_CREATE,
-		-Property  => DB_DUP | DB_DUPSORT
-        or die "Cannot open file $filename: $! $BerkeleyDB::Error\n" ;
-
-    # Add a few key/value pairs to the file
-    $db->db_put("red", "apple") ;
-    $db->db_put("orange", "orange") ;
-    $db->db_put("green", "banana") ;
-    $db->db_put("yellow", "banana") ;
-    $db->db_put("red", "tomato") ;
-    $db->db_put("green", "apple") ;
-
-    # print the contents of the file
-    my ($k, $v) = ("", "") ;
-    my $cursor = $db->db_cursor() ;
-    while ($cursor->c_get($k, $v, DB_NEXT) == 0)
-      { print "$k -> $v\n" }
-
-    undef $cursor ;
-    undef $db ;
-
-Notice that in the output below the duplicate values are sorted.
-
-    orange -> orange
-    yellow -> banana
-    red -> apple
-    red -> tomato
-    green -> apple
-    green -> banana
-
-=head2 Custom Sorting Duplicate Keys
-
-Another variation 
-
-TODO
-
-=head2 Changing the hash
-
-TODO
-
-=head2 Using db_stat
-
-TODO
-
-=head1 BerkeleyDB::Btree
-
-Equivalent to calling B with type B in Berkeley DB 2.x and
-calling B followed by Bopen> with type B in
-Berkeley DB 3.x or greater. 
-
-Two forms of constructor are supported:
-
-
-    $db = new BerkeleyDB::Btree
-                [ -Filename      => "filename", ]
-                [ -Subname       => "sub-database name", ]
-                [ -Flags         => flags,]
-                [ -Property      => flags,]
-                [ -Mode          => number,]
-                [ -Cachesize     => number,]
-                [ -Lorder        => number,]
-                [ -Pagesize      => number,]
-                [ -Env           => $env,]
-                [ -Txn           => $txn,]
-                [ -Encrypt       => { Password => "string",
-	                              Flags    => number }, ],
-                # BerkeleyDB::Btree specific
-                [ -Minkey        => number,]
-                [ -Compare       => code reference,]
-                [ -DupCompare    => code reference,]
-                [ -Prefix        => code reference,]
-
-and this
-
-    [$db =] tie %hash, 'BerkeleyDB::Btree', 
-                [ -Filename      => "filename", ]
-                [ -Subname       => "sub-database name", ]
-                [ -Flags         => flags,]
-                [ -Property      => flags,]
-                [ -Mode          => number,]
-                [ -Cachesize     => number,]
-                [ -Lorder        => number,]
-                [ -Pagesize      => number,]
-                [ -Env           => $env,]
-                [ -Txn           => $txn,]
-                [ -Encrypt       => { Password => "string",
-	                              Flags    => number }, ],
-                # BerkeleyDB::Btree specific
-                [ -Minkey        => number,]
-                [ -Compare       => code reference,]
-                [ -DupCompare    => code reference,]
-                [ -Prefix        => code reference,]
-
-=head2 Options
-
-In addition to the standard set of options (see L)
-B supports these options:
-
-=over 5
-
-=item -Property
-
-Used to specify extra flags when opening a database. The following
-flags may be specified by bitwise OR'ing together one or more of the
-following values:
-
-B
-
-When creating a new database, this flag enables the storing of duplicate
-keys in the database. If B is not specified as well, the
-duplicates are stored in the order they are created in the database.
-
-B
-
-Enables the sorting of duplicate keys in the database. Ignored if
-B isn't also specified.
-
-=item Minkey
-
-TODO
-
-=item Compare
-
-Allow you to override the default sort order used in the database. See
-L<"Changing the sort order"> for an example.
-
-    sub compare
-    {
-	my ($key, $key2) = @_ ;
-        ...
-        # return  0 if $key1 eq $key2
-        #        -1 if $key1 lt $key2
-        #         1 if $key1 gt $key2
-        return (-1 , 0 or 1) ;
-    }
-
-    tie %h, "BerkeleyDB::Hash", 
-        -Filename   => $filename, 
-    	-Compare    => \&compare,
-	...
-
-=item Prefix
-
-    sub prefix
-    {
-	my ($key, $key2) = @_ ;
-        ...
-        # return number of bytes of $key2 which are 
-        # necessary to determine that it is greater than $key1
-        return $bytes ;
-    }
-
-    tie %h, "BerkeleyDB::Hash", 
-        -Filename   => $filename, 
-    	-Prefix     => \&prefix,
-	...
-=item DupCompare
-
-    sub compare
-    {
-	my ($key, $key2) = @_ ;
-        ...
-        # return  0 if $key1 eq $key2
-        #        -1 if $key1 lt $key2
-        #         1 if $key1 gt $key2
-        return (-1 , 0 or 1) ;
-    }
-
-    tie %h, "BerkeleyDB::Hash", 
-        -Filename   => $filename, 
-    	-DupCompare => \&compare,
-	...
-
-=back
-
-=head2 Methods
-
-B supports the following database methods.
-See also L.
-
-All the methods below return 0 to indicate success.
-
-=over 5
-
-=item $status = $db->db_key_range($key, $less, $equal, $greater [, $flags])
-
-Given a key, C<$key>, this method returns the proportion of keys less than 
-C<$key> in C<$less>, the proportion equal to C<$key> in C<$equal> and the
-proportion greater than C<$key> in C<$greater>.
-
-The proportion is returned as a double in the range 0.0 to 1.0.
-
-=back
-
-=head2 A Simple Btree Example
-
-The code below is a simple example of using a btree database.
-
-    use strict ;
-    use BerkeleyDB ;
-
-    my $filename = "tree" ;
-    unlink $filename ;
-    my %h ;
-    tie %h, 'BerkeleyDB::Btree',
-    		-Filename   => $filename,
-	        -Flags      => DB_CREATE
-      or die "Cannot open $filename: $!\n" ;
-
-    # Add a key/value pair to the file
-    $h{'Wall'} = 'Larry' ;
-    $h{'Smith'} = 'John' ;
-    $h{'mouse'} = 'mickey' ;
-    $h{'duck'}  = 'donald' ;
-
-    # Delete
-    delete $h{"duck"} ;
-
-    # Cycle through the keys printing them in order.
-    # Note it is not necessary to sort the keys as
-    # the btree will have kept them in order automatically.
-    foreach (keys %h)
-      { print "$_\n" }
-
-    untie %h ;
-
-Here is the output from the code above. The keys have been sorted using
-Berkeley DB's default sorting algorithm.
-
-    Smith
-    Wall
-    mouse
-
-
-=head2 Changing the sort order
-
-It is possible to supply your own sorting algorithm if the one that Berkeley
-DB used isn't suitable. The code below is identical to the previous example
-except for the case insensitive compare function.
-
-    use strict ;
-    use BerkeleyDB ;
-
-    my $filename = "tree" ;
-    unlink $filename ;
-    my %h ;
-    tie %h, 'BerkeleyDB::Btree',
-    		-Filename   => $filename,
-	        -Flags      => DB_CREATE,
-		-Compare    => sub { lc $_[0] cmp lc $_[1] }
-      or die "Cannot open $filename: $!\n" ;
-
-    # Add a key/value pair to the file
-    $h{'Wall'} = 'Larry' ;
-    $h{'Smith'} = 'John' ;
-    $h{'mouse'} = 'mickey' ;
-    $h{'duck'}  = 'donald' ;
-
-    # Delete
-    delete $h{"duck"} ;
-
-    # Cycle through the keys printing them in order.
-    # Note it is not necessary to sort the keys as
-    # the btree will have kept them in order automatically.
-    foreach (keys %h)
-      { print "$_\n" }
-
-    untie %h ;
-
-Here is the output from the code above.
-
-    mouse
-    Smith
-    Wall
-
-There are a few point to bear in mind if you want to change the
-ordering in a BTREE database:
-
-=over 5
-
-=item 1.
-
-The new compare function must be specified when you create the database.
-
-=item 2.
-
-You cannot change the ordering once the database has been created. Thus
-you must use the same compare function every time you access the
-database.
-
-=back 
-
-=head2 Using db_stat
-
-TODO
-
-=head1 BerkeleyDB::Recno
-
-Equivalent to calling B with type B in Berkeley DB 2.x and
-calling B followed by Bopen> with type B in
-Berkeley DB 3.x or greater. 
-
-Two forms of constructor are supported:
-
-    $db = new BerkeleyDB::Recno
-                [ -Filename      => "filename", ]
-                [ -Subname       => "sub-database name", ]
-                [ -Flags         => flags,]
-                [ -Property      => flags,]
-                [ -Mode          => number,]
-                [ -Cachesize     => number,]
-                [ -Lorder        => number,]
-                [ -Pagesize      => number,]
-                [ -Env           => $env,]
-                [ -Txn           => $txn,]
-                [ -Encrypt       => { Password => "string",
-	                              Flags    => number }, ],
-                # BerkeleyDB::Recno specific
-                [ -Delim           => byte,]
-                [ -Len             => number,]
-                [ -Pad             => byte,]
-                [ -Source          => filename,]
-
-and this
-
-    [$db =] tie @arry, 'BerkeleyDB::Recno', 
-                [ -Filename      => "filename", ]
-                [ -Subname       => "sub-database name", ]
-                [ -Flags         => flags,]
-                [ -Property      => flags,]
-                [ -Mode          => number,]
-                [ -Cachesize     => number,]
-                [ -Lorder        => number,]
-                [ -Pagesize      => number,]
-                [ -Env           => $env,]
-                [ -Txn           => $txn,]
-                [ -Encrypt       => { Password => "string",
-	                              Flags    => number }, ],
-                # BerkeleyDB::Recno specific
-                [ -Delim           => byte,]
-                [ -Len             => number,]
-                [ -Pad             => byte,]
-                [ -Source          => filename,]
-
-=head2 A Recno Example
-
-Here is a simple example that uses RECNO (if you are using a version 
-of Perl earlier than 5.004_57 this example won't work -- see 
-L for a workaround).
-
-    use strict ;
-    use BerkeleyDB ;
-
-    my $filename = "text" ;
-    unlink $filename ;
-
-    my @h ;
-    tie @h, 'BerkeleyDB::Recno',
-    		-Filename   => $filename,
-	        -Flags      => DB_CREATE,
-		-Property   => DB_RENUMBER
-      or die "Cannot open $filename: $!\n" ;
-
-    # Add a few key/value pairs to the file
-    $h[0] = "orange" ;
-    $h[1] = "blue" ;
-    $h[2] = "yellow" ;
-
-    push @h, "green", "black" ;
-
-    my $elements = scalar @h ;
-    print "The array contains $elements entries\n" ;
-
-    my $last = pop @h ;
-    print "popped $last\n" ;
-
-    unshift @h, "white" ;
-    my $first = shift @h ;
-    print "shifted $first\n" ;
-
-    # Check for existence of a key
-    print "Element 1 Exists with value $h[1]\n" if $h[1] ;
-
-    untie @h ;
-
-Here is the output from the script:
-
-    The array contains 5 entries
-    popped black
-    shifted white
-    Element 1 Exists with value blue
-    The last element is green
-    The 2nd last element is yellow
-
-=head1 BerkeleyDB::Queue
-
-Equivalent to calling B followed by Bopen> with
-type B in Berkeley DB 3.x or greater. This database format
-isn't available if you use Berkeley DB 2.x.
-
-Two forms of constructor are supported:
-
-    $db = new BerkeleyDB::Queue
-                [ -Filename      => "filename", ]
-                [ -Subname       => "sub-database name", ]
-                [ -Flags         => flags,]
-                [ -Property      => flags,]
-                [ -Mode          => number,]
-                [ -Cachesize     => number,]
-                [ -Lorder        => number,]
-                [ -Pagesize      => number,]
-                [ -Env           => $env,]
-                [ -Txn           => $txn,]
-                [ -Encrypt       => { Password => "string",
-	                              Flags    => number }, ],
-                # BerkeleyDB::Queue specific
-                [ -Len             => number,]
-                [ -Pad             => byte,]
-                [ -ExtentSize    => number, ]
-
-and this
-
-    [$db =] tie @arry, 'BerkeleyDB::Queue', 
-                [ -Filename      => "filename", ]
-                [ -Subname       => "sub-database name", ]
-                [ -Flags         => flags,]
-                [ -Property      => flags,]
-                [ -Mode          => number,]
-                [ -Cachesize     => number,]
-                [ -Lorder        => number,]
-                [ -Pagesize      => number,]
-                [ -Env           => $env,]
-                [ -Txn           => $txn,]
-                [ -Encrypt       => { Password => "string",
-	                              Flags    => number }, ],
-                # BerkeleyDB::Queue specific
-                [ -Len             => number,]
-                [ -Pad             => byte,]
-
-
-=head1 BerkeleyDB::Unknown
-
-This class is used to open an existing database. 
-
-Equivalent to calling B with type B in Berkeley DB 2.x and
-calling B followed by Bopen> with type B in
-Berkeley DB 3.x or greater. 
-
-The constructor looks like this:
-
-    $db = new BerkeleyDB::Unknown
-                [ -Filename      => "filename", ]
-                [ -Subname       => "sub-database name", ]
-                [ -Flags         => flags,]
-                [ -Property      => flags,]
-                [ -Mode          => number,]
-                [ -Cachesize     => number,]
-                [ -Lorder        => number,]
-                [ -Pagesize      => number,]
-                [ -Env           => $env,]
-                [ -Txn           => $txn,]
-                [ -Encrypt       => { Password => "string",
-	                              Flags    => number }, ],
-
-
-=head2 An example 
-
-=head1 COMMON OPTIONS
-
-All database access class constructors support the common set of
-options defined below. All are optional.
-
-=over 5
-
-=item -Filename
-
-The database filename. If no filename is specified, a temporary file will
-be created and removed once the program terminates.
-
-=item -Subname
-
-Specifies the name of the sub-database to open.
-This option is only valid if you are using Berkeley DB 3.x or greater.
-
-=item -Flags
-
-Specify how the database will be opened/created. The valid flags are:
-
-B
-
-Create any underlying files, as necessary. If the files do not already
-exist and the B flag is not specified, the call will fail.
-
-B
-
-Not supported by BerkeleyDB.
-
-B
-
-Opens the database in read-only mode.
-
-B
-
-Not supported by BerkeleyDB.
-
-B
-
-If the database file already exists, remove all the data before
-opening it.
-
-=item -Mode
-
-Determines the file protection when the database is created. Defaults
-to 0666.
-
-=item -Cachesize
-
-=item -Lorder
-
-=item -Pagesize
-
-=item -Env
-
-When working under a Berkeley DB environment, this parameter
-
-Defaults to no environment.
-
-=item -Encrypt
-
-If present, this parameter will enable encryption of  all data before
-it is written to the database. This parameters must be given a hash
-reference. The format is shown below.
-
-    -Encrypt => { -Password => "abc", Flags => DB_ENCRYPT_AES }
-
-Valid values for the Flags are 0 or C.
-
-This option requires Berkeley DB 4.1 or better.
-
-=item -Txn
-
-TODO.
-
-=back
-
-=head1 COMMON DATABASE METHODS
-
-All the database interfaces support the common set of methods defined
-below.
-
-All the methods below return 0 to indicate success.
-
-=head2 $status = $db->db_get($key, $value [, $flags])
-
-Given a key (C<$key>) this method reads the value associated with it
-from the database. If it exists, the value read from the database is
-returned in the C<$value> parameter.
-
-The B<$flags> parameter is optional. If present, it must be set to B
-of the following values:
-
-=over 5
-
-=item B
-
-When the B flag is specified, B checks for the
-existence of B the C<$key> B C<$value> in the database.
-
-=item B
-
-TODO.
-
-=back
-
-In addition, the following value may be set by bitwise OR'ing it into
-the B<$flags> parameter:
-
-=over 5
-
-=item B
-
-TODO
-
-=back
-
-
-=head2 $status = $db->db_put($key, $value [, $flags])
-
-Stores a key/value pair in the database.
-
-The B<$flags> parameter is optional. If present it must be set to B
-of the following values:
-
-=over 5
-
-=item B
-
-This flag is only applicable when accessing a B
-database.
-
-TODO.
-
-
-=item B
-
-If this flag is specified and C<$key> already exists in the database,
-the call to B will return B.
-
-=back
-
-=head2 $status = $db->db_del($key [, $flags])
-
-Deletes a key/value pair in the database associated with C<$key>.
-If duplicate keys are enabled in the database, B will delete
-B key/value pairs with key C<$key>.
-
-The B<$flags> parameter is optional and is currently unused.
-
-=head2 $status = $db->db_sync()
-
-If any parts of the database are in memory, write them to the database.
-
-=head2 $cursor = $db->db_cursor([$flags])
-
-Creates a cursor object. This is used to access the contents of the
-database sequentially. See L for details of the methods
-available when working with cursors.
-
-The B<$flags> parameter is optional. If present it must be set to B
-of the following values:
-
-=over 5
-
-=item B
-
-TODO.
-
-=back
-
-=head2 ($flag, $old_offset, $old_length) = $db->partial_set($offset, $length) ;
-
-TODO
-
-=head2 ($flag, $old_offset, $old_length) = $db->partial_clear() ;
-
-TODO
-
-=head2 $db->byteswapped()
-
-TODO
-
-=head2 $db->type()
-
-Returns the type of the database. The possible return code are B
-for a B database, B for a B
-database and B for a B database. This method
-is typically used when a database has been opened with
-B.
-
-=head2   $bool = $env->cds_enabled();
-
-Returns true if the Berkeley DB environment C<$env> has been opened on
-CDS mode.
-
-=head2   $bool = $db->cds_enabled();
-
-Returns true if the database C<$db> has been opened on CDS mode.
-
-=head2 $lock = $db->cds_lock();
-
-Creates a CDS write lock object C<$lock>.
-
-It is a fatal error to attempt to create a cds_lock if the Berkeley DB
-environment has not been opened in CDS mode.
-
-=head2 $lock->cds_unlock();
-
-Removes a CDS lock. The destruction of the CDS lock object automatically
-calls this method.
-
-Note that if multiple CDS lock objects are created, the underlying write
-lock will not be released until all CDS lock objects are either explictly
-unlocked with this method, or the CDS lock objects have been destroyed.
-
-=head2 $ref = $db->db_stat()
-
-Returns a reference to an associative array containing information about
-the database. The keys of the associative array correspond directly to the
-names of the fields defined in the Berkeley DB documentation. For example,
-in the DB documentation, the field B stores the version of the
-Btree database. Assuming you called B on a Btree database the
-equivalent field would be accessed as follows:
-
-    $version = $ref->{'bt_version'} ;
-
-If you are using Berkeley DB 3.x or better, this method will work will
-all database formats. When DB 2.x is used, it only works with
-B.
-
-=head2 $status = $db->status()
-
-Returns the status of the last C<$db> method called.
-
-=head2 $status = $db->truncate($count)
-
-Truncates the datatabase and returns the number or records deleted
-in C<$count>.
-
-=head1 CURSORS
-
-A cursor is used whenever you want to access the contents of a database
-in sequential order.
-A cursor object is created with the C
-
-A cursor object has the following methods available:
-
-=head2 $newcursor = $cursor->c_dup($flags)
-
-Creates a duplicate of C<$cursor>. This method needs Berkeley DB 3.0.x or better.
-
-The C<$flags> parameter is optional and can take the following value:
-
-=over 5
-
-=item DB_POSITION
-
-When present this flag will position the new cursor at the same place as the
-existing cursor.
-
-=back
-
-=head2 $status = $cursor->c_get($key, $value, $flags)
-
-Reads a key/value pair from the database, returning the data in C<$key>
-and C<$value>. The key/value pair actually read is controlled by the
-C<$flags> parameter, which can take B of the following values:
-
-=over 5
-
-=item B
-
-Set the cursor to point to the first key/value pair in the
-database. Return the key/value pair in C<$key> and C<$value>.
-
-=item B
-
-Set the cursor to point to the last key/value pair in the database. Return
-the key/value pair in C<$key> and C<$value>.
-
-=item B
-
-If the cursor is already pointing to a key/value pair, it will be
-incremented to point to the next key/value pair and return its contents.
-
-If the cursor isn't initialised, B works just like B.
-
-If the cursor is already positioned at the last key/value pair, B
-will return B.
-
-=item B
-
-This flag is only valid when duplicate keys have been enabled in
-a database.
-If the cursor is already pointing to a key/value pair and the key of
-the next key/value pair is identical, the cursor will be incremented to
-point to it and their contents returned.
-
-=item B
-
-If the cursor is already pointing to a key/value pair, it will be
-decremented to point to the previous key/value pair and return its
-contents.
-
-If the cursor isn't initialised, B works just like B.
-
-If the cursor is already positioned at the first key/value pair, B
-will return B.
-
-=item B
-
-If the cursor has been set to point to a key/value pair, return their
-contents.
-If the key/value pair referenced by the cursor has been deleted, B
-will return B.
-
-=item B
-
-Set the cursor to point to the key/value pair referenced by B<$key>
-and return the value in B<$value>.
-
-=item B
-
-This flag is a variation on the B flag. As well as returning
-the value, it also returns the key, via B<$key>.
-When used with a B database the key matched by B
-will be the shortest key (in length) which is greater than or equal to
-the key supplied, via B<$key>. This allows partial key searches.
-See ??? for an example of how to use this flag.
-
-=item B
-
-Another variation on B. This one returns both the key and
-the value.
-
-=item B
-
-TODO.
-
-=item B
-
-TODO.
-
-=back
-
-In addition, the following value may be set by bitwise OR'ing it into
-the B<$flags> parameter:
-
-=over 5
-
-=item B
-
-TODO.
-
-=back
-
-=head2  $status = $cursor->c_put($key, $value, $flags)
-
-Stores the key/value pair in the database. The position that the data is
-stored in the database is controlled by the C<$flags> parameter, which
-must take B of the following values:
-
-=over 5
-
-=item B
-
-When used with a Btree or Hash database, a duplicate of the key referenced
-by the current cursor position will be created and the contents of
-B<$value> will be associated with it - B<$key> is ignored.
-The new key/value pair will be stored immediately after the current
-cursor position.
-Obviously the database has to have been opened with B.
-
-When used with a Recno ... TODO
-
-
-=item B
-
-When used with a Btree or Hash database, a duplicate of the key referenced
-by the current cursor position will be created and the contents of
-B<$value> will be associated with it - B<$key> is ignored.
-The new key/value pair will be stored immediately before the current
-cursor position.
-Obviously the database has to have been opened with B.
-
-When used with a Recno ... TODO
-
-=item B
-
-If the cursor has been initialised, replace the value of the key/value
-pair stored in the database with the contents of B<$value>.
-
-=item B
-
-Only valid with a Btree or Hash database. This flag is only really
-used when duplicates are enabled in the database and sorted duplicates
-haven't been specified.
-In this case the key/value pair will be inserted as the first entry in
-the duplicates for the particular key.
-
-=item B
-
-Only valid with a Btree or Hash database. This flag is only really
-used when duplicates are enabled in the database and sorted duplicates
-haven't been specified.
-In this case the key/value pair will be inserted as the last entry in
-the duplicates for the particular key.
-
-=back
-
-=head2  $status = $cursor->c_del([$flags])
-
-This method deletes the key/value pair associated with the current cursor
-position. The cursor position will not be changed by this operation, so
-any subsequent cursor operation must first initialise the cursor to
-point to a valid key/value pair.
-
-If the key/value pair associated with the cursor have already been
-deleted, B will return B.
-
-The B<$flags> parameter is not used at present.
-
-=head2 $status = $cursor->c_count($cnt [, $flags])
-
-Stores the number of duplicates at the current cursor position in B<$cnt>.
-
-The B<$flags> parameter is not used at present. This method needs 
-Berkeley DB 3.1 or better.
-
-=head2  $status = $cursor->status()
-
-Returns the status of the last cursor method as a dual type.
-
-=head2  $status = $cursor->c_pget() ;
-
-TODO
-
-=head2  $status = $cursor->c_close()
-
-Closes the cursor B<$cursor>.
-
-=head2 Cursor Examples
-
-TODO
-
-Iterating from first to last, then in reverse.
-
-examples of each of the flags.
-
-=head1 JOIN
-
-Join support for BerkeleyDB is in progress. Watch this space.
-
-TODO
-
-=head1 TRANSACTIONS
-
-TODO.
-
-=head1 CDS Mode
-
-The Berkeley Db Concurrent Data Store is a lightweight locking mechanism
-that is useful in scenarios where transactions are overkill. See the
-accompanying document .. for details of using this module in CDS mode.
-
-=head1 DBM Filters
-
-A DBM Filter is a piece of code that is be used when you I
-want to make the same transformation to all keys and/or values in a DBM
-database. All of the database classes (BerkeleyDB::Hash,
-BerkeleyDB::Btree and BerkeleyDB::Recno) support DBM Filters.
-
-There are four methods associated with DBM Filters. All work
-identically, and each is used to install (or uninstall) a single DBM
-Filter. Each expects a single parameter, namely a reference to a sub.
-The only difference between them is the place that the filter is
-installed.
-
-To summarise:
-
-=over 5
-
-=item B
-
-If a filter has been installed with this method, it will be invoked
-every time you write a key to a DBM database.
-
-=item B
-
-If a filter has been installed with this method, it will be invoked
-every time you write a value to a DBM database.
-
-
-=item B
-
-If a filter has been installed with this method, it will be invoked
-every time you read a key from a DBM database.
-
-=item B
-
-If a filter has been installed with this method, it will be invoked
-every time you read a value from a DBM database.
-
-=back
-
-You can use any combination of the methods, from none, to all four.
-
-All filter methods return the existing filter, if present, or C
-in not.
-
-To delete a filter pass C to it.
-
-=head2 The Filter
-
-When each filter is called by Perl, a local copy of C<$_> will contain
-the key or value to be filtered. Filtering is achieved by modifying
-the contents of C<$_>. The return code from the filter is ignored.
-
-=head2 An Example -- the NULL termination problem.
-
-Consider the following scenario. You have a DBM database that you need
-to share with a third-party C application. The C application assumes
-that I keys and values are NULL terminated. Unfortunately when
-Perl writes to DBM databases it doesn't use NULL termination, so your
-Perl application will have to manage NULL termination itself. When you
-write to the database you will have to use something like this:
-
-    $hash{"$key\0"} = "$value\0" ;
-
-Similarly the NULL needs to be taken into account when you are considering
-the length of existing keys/values.
-
-It would be much better if you could ignore the NULL terminations issue
-in the main application code and have a mechanism that automatically
-added the terminating NULL to all keys and values whenever you write to
-the database and have them removed when you read from the database. As I'm
-sure you have already guessed, this is a problem that DBM Filters can
-fix very easily.
-
-    use strict ;
-    use BerkeleyDB ;
-
-    my %hash ;
-    my $filename = "filt.db" ;
-    unlink $filename ;
-
-    my $db = tie %hash, 'BerkeleyDB::Hash',
-    		-Filename   => $filename,
-	        -Flags      => DB_CREATE
-      or die "Cannot open $filename: $!\n" ;
-
-    # Install DBM Filters
-    $db->filter_fetch_key  ( sub { s/\0$//    } ) ;
-    $db->filter_store_key  ( sub { $_ .= "\0" } ) ;
-    $db->filter_fetch_value( sub { s/\0$//    } ) ;
-    $db->filter_store_value( sub { $_ .= "\0" } ) ;
-
-    $hash{"abc"} = "def" ;
-    my $a = $hash{"ABC"} ;
-    # ...
-    undef $db ;
-    untie %hash ;
-
-Hopefully the contents of each of the filters should be
-self-explanatory. Both "fetch" filters remove the terminating NULL,
-and both "store" filters add a terminating NULL.
-
-
-=head2 Another Example -- Key is a C int.
-
-Here is another real-life example. By default, whenever Perl writes to
-a DBM database it always writes the key and value as strings. So when
-you use this:
-
-    $hash{12345} = "something" ;
-
-the key 12345 will get stored in the DBM database as the 5 byte string
-"12345". If you actually want the key to be stored in the DBM database
-as a C int, you will have to use C when writing, and C
-when reading.
-
-Here is a DBM Filter that does it:
-
-    use strict ;
-    use BerkeleyDB ;
-    my %hash ;
-    my $filename = "filt.db" ;
-    unlink $filename ;
-
-
-    my $db = tie %hash, 'BerkeleyDB::Btree',
-    		-Filename   => $filename,
-	        -Flags      => DB_CREATE
-      or die "Cannot open $filename: $!\n" ;
-
-    $db->filter_fetch_key  ( sub { $_ = unpack("i", $_) } ) ;
-    $db->filter_store_key  ( sub { $_ = pack ("i", $_) } ) ;
-    $hash{123} = "def" ;
-    # ...
-    undef $db ;
-    untie %hash ;
-
-This time only two filters have been used -- we only need to manipulate
-the contents of the key, so it wasn't necessary to install any value
-filters.
-
-=head1 Using BerkeleyDB with MLDBM
-
-Both BerkeleyDB::Hash and BerkeleyDB::Btree can be used with the MLDBM
-module. The code fragment below shows how to open associate MLDBM with
-BerkeleyDB::Btree. To use BerkeleyDB::Hash just replace
-BerkeleyDB::Btree with BerkeleyDB::Hash.
-
-    use strict ;
-    use BerkeleyDB ;
-    use MLDBM qw(BerkeleyDB::Btree) ;
-    use Data::Dumper;
- 
-    my $filename = 'testmldbm' ;
-    my %o ;
-     
-    unlink $filename ;
-    tie %o, 'MLDBM', -Filename => $filename,
-                     -Flags    => DB_CREATE
-                    or die "Cannot open database '$filename: $!\n";
- 
-See the MLDBM documentation for information on how to use the module
-and for details of its limitations.
-
-=head1 EXAMPLES
-
-TODO.
-
-=head1 HINTS & TIPS
-
-=head2 Sharing Databases With C Applications
-
-There is no technical reason why a Berkeley DB database cannot be
-shared by both a Perl and a C application.
-
-The vast majority of problems that are reported in this area boil down
-to the fact that C strings are NULL terminated, whilst Perl strings
-are not. See L in the DBM
-FILTERS section for a generic way to work around this problem.
-
-
-=head2 The untie Gotcha
-
-TODO
-
-=head1 COMMON QUESTIONS
-
-This section attempts to answer some of the more common questions that
-I get asked.
-
-
-=head2 Relationship with DB_File
-
-Before Berkeley DB 2.x was written there was only one Perl module that
-interfaced to Berkeley DB. That module is called B. Although
-B can be build with Berkeley DB 1.x, 2.x, 3.x or 4.x, it only
-provides an interface to the functionality available in Berkeley DB
-1.x. That means that it doesn't support transactions, locking or any of
-the other new features available in DB 2.x or better.
-
-=head2 How do I store Perl data structures with BerkeleyDB?
-
-See L.
-
-=head1 HISTORY
-
-See the Changes file.
-
-=head1 AVAILABILITY
-
-The most recent version of B can always be found
-on CPAN (see L for details), in the directory
-F.
-
-The official web site for Berkeley DB is F.
-
-=head1 COPYRIGHT
-
-Copyright (c) 1997-2004 Paul Marquess. All rights reserved. This program
-is free software; you can redistribute it and/or modify it under the
-same terms as Perl itself.
-
-Although B is covered by the Perl license, the library it
-makes use of, namely Berkeley DB, is not. Berkeley DB has its own
-copyright and its own license. Please take the time to read it.
-
-Here are few words taken from the Berkeley DB FAQ (at
-F) regarding the license:
-
-    Do I have to license DB to use it in Perl scripts?
-
-    No. The Berkeley DB license requires that software that uses
-    Berkeley DB be freely redistributable. In the case of Perl, that
-    software is Perl, and not your scripts. Any Perl scripts that you
-    write are your property, including scripts that make use of Berkeley
-    DB. Neither the Perl license nor the Berkeley DB license
-    place any restriction on what you may do with them.
-
-If you are in any doubt about the license situation, contact either the
-Berkeley DB authors or the author of BerkeleyDB.
-See L<"AUTHOR"> for details.
-
-
-=head1 AUTHOR
-
-Paul Marquess Epmqs@cpan.orgE.
-
-Questions about Berkeley DB may be addressed to Edb@sleepycat.comE.
-
-=head1 SEE ALSO
-
-perl(1), DB_File, Berkeley DB.
-
-=cut
diff --git a/storage/bdb/perl/BerkeleyDB/BerkeleyDB.pod.P b/storage/bdb/perl/BerkeleyDB/BerkeleyDB.pod.P
deleted file mode 100644
index 6540a943a7a..00000000000
--- a/storage/bdb/perl/BerkeleyDB/BerkeleyDB.pod.P
+++ /dev/null
@@ -1,1685 +0,0 @@
-=head1 NAME
-
-BerkeleyDB - Perl extension for Berkeley DB version 2, 3 or 4
-
-=head1 SYNOPSIS
-
-  use BerkeleyDB;
-
-  $env = new BerkeleyDB::Env [OPTIONS] ;
-
-  $db  = tie %hash, 'BerkeleyDB::Hash', [OPTIONS] ;
-  $db  = new BerkeleyDB::Hash [OPTIONS] ;
-
-  $db  = tie %hash, 'BerkeleyDB::Btree', [OPTIONS] ;
-  $db  = new BerkeleyDB::Btree [OPTIONS] ;
-
-  $db  = tie @array, 'BerkeleyDB::Recno', [OPTIONS] ;
-  $db  = new BerkeleyDB::Recno [OPTIONS] ;
-
-  $db  = tie @array, 'BerkeleyDB::Queue', [OPTIONS] ;
-  $db  = new BerkeleyDB::Queue [OPTIONS] ;
-
-  $db  = new BerkeleyDB::Unknown [OPTIONS] ;
-
-  $status = BerkeleyDB::db_remove [OPTIONS]
-  $status = BerkeleyDB::db_rename [OPTIONS]
-  $status = BerkeleyDB::db_verify [OPTIONS]
-
-  $hash{$key} = $value ;
-  $value = $hash{$key} ;
-  each %hash ;
-  keys %hash ;
-  values %hash ;
-
-  $status = $db->db_get()
-  $status = $db->db_put() ;
-  $status = $db->db_del() ;
-  $status = $db->db_sync() ;
-  $status = $db->db_close() ;
-  $status = $db->db_pget()
-  $hash_ref = $db->db_stat() ;
-  $status = $db->db_key_range();
-  $type = $db->type() ;
-  $status = $db->status() ;
-  $boolean = $db->byteswapped() ;
-  $status = $db->truncate($count) ;
-
-  $bool = $env->cds_enabled();
-  $bool = $db->cds_enabled();
-  $lock = $db->cds_lock();
-  $lock->cds_unlock();
-  
-  ($flag, $old_offset, $old_length) = $db->partial_set($offset, $length) ;
-  ($flag, $old_offset, $old_length) = $db->partial_clear() ;
-
-  $cursor = $db->db_cursor([$flags]) ;
-  $newcursor = $cursor->c_dup([$flags]);
-  $status = $cursor->c_get() ;
-  $status = $cursor->c_put() ;
-  $status = $cursor->c_del() ;
-  $status = $cursor->c_count() ;
-  $status = $cursor->c_pget() ;
-  $status = $cursor->status() ;
-  $status = $cursor->c_close() ;
-
-  $cursor = $db->db_join() ;
-  $status = $cursor->c_get() ;
-  $status = $cursor->c_close() ;
-
-  $status = $env->txn_checkpoint()
-  $hash_ref = $env->txn_stat()
-  $status = $env->setmutexlocks()
-  $status = $env->set_flags()
-
-  $txn = $env->txn_begin() ;
-  $db->Txn($txn);
-  $txn->Txn($db1, $db2,...);
-  $status = $txn->txn_prepare()
-  $status = $txn->txn_commit()
-  $status = $txn->txn_abort()
-  $status = $txn->txn_id()
-  $status = $txn->txn_discard()
-
-  $status = $env->set_lg_dir();
-  $status = $env->set_lg_bsize();
-  $status = $env->set_lg_max();
-
-  $status = $env->set_data_dir() ;
-  $status = $env->set_tmp_dir() ;
-  $status = $env->set_verbose() ;
-  $db_env_ptr = $env->DB_ENV() ;
-
-  $BerkeleyDB::Error
-  $BerkeleyDB::db_version
-
-  # DBM Filters
-  $old_filter = $db->filter_store_key  ( sub { ... } ) ;
-  $old_filter = $db->filter_store_value( sub { ... } ) ;
-  $old_filter = $db->filter_fetch_key  ( sub { ... } ) ;
-  $old_filter = $db->filter_fetch_value( sub { ... } ) ;
-
-  # deprecated, but supported
-  $txn_mgr = $env->TxnMgr();
-  $status = $txn_mgr->txn_checkpoint()
-  $hash_ref = $txn_mgr->txn_stat()
-  $txn = $txn_mgr->txn_begin() ;
-
-=head1 DESCRIPTION
-
-B
-
-This Perl module provides an interface to most of the functionality
-available in Berkeley DB versions 2, 3 and 4. In general it is safe to assume
-that the interface provided here to be identical to the Berkeley DB
-interface. The main changes have been to make the Berkeley DB API work
-in a Perl way. Note that if you are using Berkeley DB 2.x, the new
-features available in Berkeley DB 3.x or DB 4.x are not available via
-this module.
-
-The reader is expected to be familiar with the Berkeley DB
-documentation. Where the interface provided here is identical to the
-Berkeley DB library and the... TODO
-
-The B, B, B and B man pages are
-particularly relevant.
-
-The interface to Berkeley DB is implemented with a number of Perl
-classes.
-
-=head1 The BerkeleyDB::Env Class
-
-The B class provides an interface to the Berkeley DB
-function B in Berkeley DB 2.x or B and
-Bopen> in Berkeley DB 3.x/4.x. Its purpose is to initialise a
-number of sub-systems that can then be used in a consistent way in all
-the databases you make use of in the environment.
-
-If you don't intend using transactions, locking or logging, then you
-shouldn't need to make use of B.
-
-Note that an environment consists of a number of files that Berkeley DB
-manages behind the scenes for you. When you first use an environment, it
-needs to be explicitly created. This is done by including C
-with the C parameter, described below.
-
-=head2 Synopsis
-
-    $env = new BerkeleyDB::Env
-             [ -Home         => $path, ]
-             [ -Server       => $name, ]
-             [ -CacheSize    => $number, ]
-             [ -Config       => { name => value, name => value }, ]
-             [ -ErrFile      => filename, ]
-             [ -ErrPrefix    => "string", ]
-             [ -Flags        => number, ]
-             [ -SetFlags     => bitmask, ]
-             [ -LockDetect   => number, ]
-             [ -SharedMemKey => number, ]
-             [ -Verbose      => boolean, ]
-             [ -Encrypt      => { Password => "string",
-	                          Flags    => number }, ]
-
-All the parameters to the BerkeleyDB::Env constructor are optional.
-
-=over 5
-
-=item -Home
-
-If present, this parameter should point to an existing directory. Any
-files that I specified with an absolute path in the sub-systems
-that are initialised by the BerkeleyDB::Env class will be assumed to
-live in the B directory.
-
-For example, in the code fragment below the database "fred.db" will be
-opened in the directory "/home/databases" because it was specified as a
-relative path, but "joe.db" will be opened in "/other" because it was
-part of an absolute path.
-
-    $env = new BerkeleyDB::Env
-             -Home         => "/home/databases"
-    ...
-
-    $db1 = new BerkeleyDB::Hash
-	     -Filename = "fred.db",
-	     -Env => $env
-    ...
-
-    $db2 = new BerkeleyDB::Hash
-	     -Filename = "/other/joe.db",
-	     -Env => $env
-    ...
-
-=item -Server
-
-If present, this parameter should be the hostname of a server that is running
-the Berkeley DB RPC server. All databases will be accessed via the RPC server.
-
-=item -Encrypt
-
-If present, this parameter will enable encryption of  all data before
-it is written to the database. This parameters must be given a hash
-reference. The format is shown below.
-
-    -Encrypt => { -Password => "abc", Flags => DB_ENCRYPT_AES }
-
-Valid values for the Flags are 0 or C.
-
-This option requires Berkeley DB 4.1 or better.
-
-=item -Cachesize
-
-If present, this parameter sets the size of the environments shared memory
-buffer pool.
-
-=item -SharedMemKey
-
-If present, this parameter sets the base segment ID for the shared memory
-region used by Berkeley DB. 
-
-This option requires Berkeley DB 3.1 or better.
-
-Use C<$env-Eget_shm_key($id)> to find out the base segment ID used
-once the environment is open.
-
-
-=item -Config
-
-This is a variation on the C<-Home> parameter, but it allows finer
-control of where specific types of files will be stored.
-
-The parameter expects a reference to a hash. Valid keys are:
-B, B and B
-
-The code below shows an example of how it can be used.
-
-    $env = new BerkeleyDB::Env
-             -Config => { DB_DATA_DIR => "/home/databases",
-                          DB_LOG_DIR  => "/home/logs",
-                          DB_TMP_DIR  => "/home/tmp"
-                        }
-    ...
-
-=item -ErrFile
-
-Expects a filename or filenhandle. Any errors generated internally by
-Berkeley DB will be logged to this file. A useful debug setting is to
-open environments with either
-
-    -ErrFile => *STDOUT
-
-or 
-
-    -ErrFile => *STDERR
-
-=item -ErrPrefix
-
-Allows a prefix to be added to the error messages before they are sent
-to B<-ErrFile>.
-
-=item -Flags
-
-The B parameter specifies both which sub-systems to initialise,
-as well as a number of environment-wide options.
-See the Berkeley DB documentation for more details of these options.
-
-Any of the following can be specified by OR'ing them:
-
-B
-
-If any of the files specified do not already exist, create them.
-
-B
-
-Initialise the Concurrent Access Methods  
-
-B
-
-Initialise the Locking sub-system.
-
-B
-
-Initialise the Logging sub-system.
-
-B
-
-Initialise the ...
-
-B
-
-Initialise the ...
-
-B
-
-Initialise the ...
-
-B is also specified.
-
-Initialise the ...
-
-B
-
-Initialise the ...
-
-B
-
-
-
-B
-
-B
-
-B
-
-B
-
-B
-
-=item -SetFlags
-
-Calls ENV->set_flags with the supplied bitmask. Use this when you need to make
-use of DB_ENV->set_flags before DB_ENV->open is called.
-
-Only valid when Berkeley DB 3.x or better is used.
-
-=item -LockDetect
-
-Specifies what to do when a lock conflict occurs. The value should be one of
-
-B 
-
-B
-
-B
-
-B
-
-=item -Verbose
-
-Add extra debugging information to the messages sent to B<-ErrFile>.
-
-=back
-
-=head2 Methods
-
-The environment class has the following methods:
-
-=over 5
-
-=item $env->errPrefix("string") ;
-
-This method is identical to the B<-ErrPrefix> flag. It allows the
-error prefix string to be changed dynamically.
-
-=item $env->set_flags(bitmask, 1|0);
-
-=item $txn = $env->TxnMgr()
-
-Constructor for creating a B object.
-See L<"TRANSACTIONS"> for more details of using transactions.
-
-This method is deprecated. Access the transaction methods using the B
-methods below from the environment object directly.
-
-=item $env->txn_begin()
-
-TODO
-
-=item $env->txn_stat()
-
-TODO
-
-=item $env->txn_checkpoint()
-
-TODO
-
-=item $env->status()
-
-Returns the status of the last BerkeleyDB::Env method.
-
-
-=item $env->DB_ENV()
-
-Returns a pointer to the underlying DB_ENV data structure that Berkeley
-DB uses.
-
-=item $env->get_shm_key($id)
-
-Writes the base segment ID for the shared memory region used by the
-Berkeley DB environment into C<$id>. Returns 0 on success.
-
-This option requires Berkeley DB 4.2 or better.
-
-Use the C<-SharedMemKey> option when opening the environemt to set the
-base segment ID.
-
-=item $env->status()
-
-Returns the status of the last BerkeleyDB::Env method.
-
-=back
-
-=head2 Examples
-
-TODO.
-
-=head1 Global Classes
-
-  $status = BerkeleyDB::db_remove [OPTIONS]
-  $status = BerkeleyDB::db_rename [OPTIONS]
-  $status = BerkeleyDB::db_verify [OPTIONS]
-
-=head1 THE DATABASE CLASSES
-
-B supports the following database formats:
-
-=over 5
-
-=item B
-
-This database type allows arbitrary key/value pairs to be stored in data
-files. This is equivalent to the functionality provided by other
-hashing packages like DBM, NDBM, ODBM, GDBM, and SDBM. Remember though,
-the files created using B are not compatible with any
-of the other packages mentioned.
-
-A default hashing algorithm, which will be adequate for most applications,
-is built into BerkeleyDB. If you do need to use your own hashing algorithm
-it is possible to write your own in Perl and have B use
-it instead.
-
-=item B
-
-The Btree format allows arbitrary key/value pairs to be stored in a
-B+tree.
-
-As with the B format, it is possible to provide a
-user defined Perl routine to perform the comparison of keys. By default,
-though, the keys are stored in lexical order.
-
-=item B
-
-TODO.
-
-
-=item B
-
-TODO.
-
-=item B
-
-This isn't a database format at all. It is used when you want to open an
-existing Berkeley DB database without having to know what type is it. 
-
-=back
-
-
-Each of the database formats described above is accessed via a
-corresponding B class. These will be described in turn in
-the next sections.
-
-=head1 BerkeleyDB::Hash
-
-Equivalent to calling B with type B in Berkeley DB 2.x and
-calling B followed by Bopen> with type B in
-Berkeley DB 3.x or greater. 
-
-Two forms of constructor are supported:
-
-    $db = new BerkeleyDB::Hash
-                [ -Filename      => "filename", ]
-                [ -Subname       => "sub-database name", ]
-                [ -Flags         => flags,]
-                [ -Property      => flags,]
-                [ -Mode          => number,]
-                [ -Cachesize     => number,]
-                [ -Lorder        => number,]
-                [ -Pagesize      => number,]
-                [ -Env           => $env,]
-                [ -Txn           => $txn,]
-                [ -Encrypt       => { Password => "string",
-	                              Flags    => number }, ],
-                # BerkeleyDB::Hash specific
-                [ -Ffactor       => number,]
-                [ -Nelem         => number,]
-                [ -Hash          => code reference,]
-                [ -DupCompare    => code reference,]
-
-and this
-
-    [$db =] tie %hash, 'BerkeleyDB::Hash', 
-                [ -Filename      => "filename", ]
-                [ -Subname       => "sub-database name", ]
-                [ -Flags         => flags,]
-                [ -Property      => flags,]
-                [ -Mode          => number,]
-                [ -Cachesize     => number,]
-                [ -Lorder        => number,]
-                [ -Pagesize      => number,]
-                [ -Env           => $env,]
-                [ -Txn           => $txn,]
-                [ -Encrypt       => { Password => "string",
-	                              Flags    => number }, ],
-                # BerkeleyDB::Hash specific
-                [ -Ffactor       => number,]
-                [ -Nelem         => number,]
-                [ -Hash          => code reference,]
-                [ -DupCompare    => code reference,]
-
-
-When the "tie" interface is used, reading from and writing to the database
-is achieved via the tied hash. In this case the database operates like
-a Perl associative array that happens to be stored on disk.
-
-In addition to the high-level tied hash interface, it is possible to
-make use of the underlying methods provided by Berkeley DB
-
-=head2 Options
-
-In addition to the standard set of options (see L)
-B supports these options:
-
-=over 5
-
-=item -Property
-
-Used to specify extra flags when opening a database. The following
-flags may be specified by bitwise OR'ing together one or more of the
-following values:
-
-B
-
-When creating a new database, this flag enables the storing of duplicate
-keys in the database. If B is not specified as well, the
-duplicates are stored in the order they are created in the database.
-
-B
-
-Enables the sorting of duplicate keys in the database. Ignored if
-B isn't also specified.
-
-=item -Ffactor
-
-=item -Nelem
-
-See the Berkeley DB documentation for details of these options.
-
-=item -Hash
-
-Allows you to provide a user defined hash function. If not specified, 
-a default hash function is used. Here is a template for a user-defined
-hash function
-
-    sub hash
-    {
-        my ($data) = shift ;
-        ...
-        # return the hash value for $data
-	return $hash ;
-    }
-
-    tie %h, "BerkeleyDB::Hash", 
-        -Filename => $filename, 
-    	-Hash     => \&hash,
-	...
-
-See L<""> for an example.
-
-=item -DupCompare
-
-Used in conjunction with the B flag. 
-
-    sub compare
-    {
-	my ($key, $key2) = @_ ;
-        ...
-        # return  0 if $key1 eq $key2
-        #        -1 if $key1 lt $key2
-        #         1 if $key1 gt $key2
-        return (-1 , 0 or 1) ;
-    }
-
-    tie %h, "BerkeleyDB::Hash", 
-        -Filename   => $filename, 
-	-Property   => DB_DUP|DB_DUPSORT,
-    	-DupCompare => \&compare,
-	...
-
-=back
-
-
-=head2 Methods
-
-B only supports the standard database methods.
-See L.
-
-=head2 A Simple Tied Hash Example
-
-## simpleHash
-
-here is the output:
-
-    Banana Exists
-    
-    orange -> orange
-    tomato -> red
-    banana -> yellow
-
-Note that the like ordinary associative arrays, the order of the keys
-retrieved from a Hash database are in an apparently random order.
-
-=head2 Another Simple Hash Example
-
-Do the same as the previous example but not using tie.
-
-## simpleHash2
-
-=head2 Duplicate keys
-
-The code below is a variation on the examples above. This time the hash has
-been inverted. The key this time is colour and the value is the fruit name.
-The B flag has been specified to allow duplicates.
-
-##dupHash
-
-here is the output:
-
-    orange -> orange
-    yellow -> banana
-    red -> apple
-    red -> tomato
-    green -> banana
-    green -> apple
-
-=head2 Sorting Duplicate Keys
-
-In the previous example, when there were duplicate keys, the values are
-sorted in the order they are stored in. The code below is
-identical to the previous example except the B flag is
-specified.
-
-##dupSortHash
-
-Notice that in the output below the duplicate values are sorted.
-
-    orange -> orange
-    yellow -> banana
-    red -> apple
-    red -> tomato
-    green -> apple
-    green -> banana
-
-=head2 Custom Sorting Duplicate Keys
-
-Another variation 
-
-TODO
-
-=head2 Changing the hash
-
-TODO
-
-=head2 Using db_stat
-
-TODO
-
-=head1 BerkeleyDB::Btree
-
-Equivalent to calling B with type B in Berkeley DB 2.x and
-calling B followed by Bopen> with type B in
-Berkeley DB 3.x or greater. 
-
-Two forms of constructor are supported:
-
-
-    $db = new BerkeleyDB::Btree
-                [ -Filename      => "filename", ]
-                [ -Subname       => "sub-database name", ]
-                [ -Flags         => flags,]
-                [ -Property      => flags,]
-                [ -Mode          => number,]
-                [ -Cachesize     => number,]
-                [ -Lorder        => number,]
-                [ -Pagesize      => number,]
-                [ -Env           => $env,]
-                [ -Txn           => $txn,]
-                [ -Encrypt       => { Password => "string",
-	                              Flags    => number }, ],
-                # BerkeleyDB::Btree specific
-                [ -Minkey        => number,]
-                [ -Compare       => code reference,]
-                [ -DupCompare    => code reference,]
-                [ -Prefix        => code reference,]
-
-and this
-
-    [$db =] tie %hash, 'BerkeleyDB::Btree', 
-                [ -Filename      => "filename", ]
-                [ -Subname       => "sub-database name", ]
-                [ -Flags         => flags,]
-                [ -Property      => flags,]
-                [ -Mode          => number,]
-                [ -Cachesize     => number,]
-                [ -Lorder        => number,]
-                [ -Pagesize      => number,]
-                [ -Env           => $env,]
-                [ -Txn           => $txn,]
-                [ -Encrypt       => { Password => "string",
-	                              Flags    => number }, ],
-                # BerkeleyDB::Btree specific
-                [ -Minkey        => number,]
-                [ -Compare       => code reference,]
-                [ -DupCompare    => code reference,]
-                [ -Prefix        => code reference,]
-
-=head2 Options
-
-In addition to the standard set of options (see L)
-B supports these options:
-
-=over 5
-
-=item -Property
-
-Used to specify extra flags when opening a database. The following
-flags may be specified by bitwise OR'ing together one or more of the
-following values:
-
-B
-
-When creating a new database, this flag enables the storing of duplicate
-keys in the database. If B is not specified as well, the
-duplicates are stored in the order they are created in the database.
-
-B
-
-Enables the sorting of duplicate keys in the database. Ignored if
-B isn't also specified.
-
-=item Minkey
-
-TODO
-
-=item Compare
-
-Allow you to override the default sort order used in the database. See
-L<"Changing the sort order"> for an example.
-
-    sub compare
-    {
-	my ($key, $key2) = @_ ;
-        ...
-        # return  0 if $key1 eq $key2
-        #        -1 if $key1 lt $key2
-        #         1 if $key1 gt $key2
-        return (-1 , 0 or 1) ;
-    }
-
-    tie %h, "BerkeleyDB::Hash", 
-        -Filename   => $filename, 
-    	-Compare    => \&compare,
-	...
-
-=item Prefix
-
-    sub prefix
-    {
-	my ($key, $key2) = @_ ;
-        ...
-        # return number of bytes of $key2 which are 
-        # necessary to determine that it is greater than $key1
-        return $bytes ;
-    }
-
-    tie %h, "BerkeleyDB::Hash", 
-        -Filename   => $filename, 
-    	-Prefix     => \&prefix,
-	...
-=item DupCompare
-
-    sub compare
-    {
-	my ($key, $key2) = @_ ;
-        ...
-        # return  0 if $key1 eq $key2
-        #        -1 if $key1 lt $key2
-        #         1 if $key1 gt $key2
-        return (-1 , 0 or 1) ;
-    }
-
-    tie %h, "BerkeleyDB::Hash", 
-        -Filename   => $filename, 
-    	-DupCompare => \&compare,
-	...
-
-=back
-
-=head2 Methods
-
-B supports the following database methods.
-See also L.
-
-All the methods below return 0 to indicate success.
-
-=over 5
-
-=item $status = $db->db_key_range($key, $less, $equal, $greater [, $flags])
-
-Given a key, C<$key>, this method returns the proportion of keys less than 
-C<$key> in C<$less>, the proportion equal to C<$key> in C<$equal> and the
-proportion greater than C<$key> in C<$greater>.
-
-The proportion is returned as a double in the range 0.0 to 1.0.
-
-=back
-
-=head2 A Simple Btree Example
-
-The code below is a simple example of using a btree database.
-
-## btreeSimple
-
-Here is the output from the code above. The keys have been sorted using
-Berkeley DB's default sorting algorithm.
-
-    Smith
-    Wall
-    mouse
-
-
-=head2 Changing the sort order
-
-It is possible to supply your own sorting algorithm if the one that Berkeley
-DB used isn't suitable. The code below is identical to the previous example
-except for the case insensitive compare function.
-
-## btreeSortOrder
-
-Here is the output from the code above.
-
-    mouse
-    Smith
-    Wall
-
-There are a few point to bear in mind if you want to change the
-ordering in a BTREE database:
-
-=over 5
-
-=item 1.
-
-The new compare function must be specified when you create the database.
-
-=item 2.
-
-You cannot change the ordering once the database has been created. Thus
-you must use the same compare function every time you access the
-database.
-
-=back 
-
-=head2 Using db_stat
-
-TODO
-
-=head1 BerkeleyDB::Recno
-
-Equivalent to calling B with type B in Berkeley DB 2.x and
-calling B followed by Bopen> with type B in
-Berkeley DB 3.x or greater. 
-
-Two forms of constructor are supported:
-
-    $db = new BerkeleyDB::Recno
-                [ -Filename      => "filename", ]
-                [ -Subname       => "sub-database name", ]
-                [ -Flags         => flags,]
-                [ -Property      => flags,]
-                [ -Mode          => number,]
-                [ -Cachesize     => number,]
-                [ -Lorder        => number,]
-                [ -Pagesize      => number,]
-                [ -Env           => $env,]
-                [ -Txn           => $txn,]
-                [ -Encrypt       => { Password => "string",
-	                              Flags    => number }, ],
-                # BerkeleyDB::Recno specific
-                [ -Delim           => byte,]
-                [ -Len             => number,]
-                [ -Pad             => byte,]
-                [ -Source          => filename,]
-
-and this
-
-    [$db =] tie @arry, 'BerkeleyDB::Recno', 
-                [ -Filename      => "filename", ]
-                [ -Subname       => "sub-database name", ]
-                [ -Flags         => flags,]
-                [ -Property      => flags,]
-                [ -Mode          => number,]
-                [ -Cachesize     => number,]
-                [ -Lorder        => number,]
-                [ -Pagesize      => number,]
-                [ -Env           => $env,]
-                [ -Txn           => $txn,]
-                [ -Encrypt       => { Password => "string",
-	                              Flags    => number }, ],
-                # BerkeleyDB::Recno specific
-                [ -Delim           => byte,]
-                [ -Len             => number,]
-                [ -Pad             => byte,]
-                [ -Source          => filename,]
-
-=head2 A Recno Example
-
-Here is a simple example that uses RECNO (if you are using a version 
-of Perl earlier than 5.004_57 this example won't work -- see 
-L for a workaround).
-
-## simpleRecno
-
-Here is the output from the script:
-
-    The array contains 5 entries
-    popped black
-    shifted white
-    Element 1 Exists with value blue
-    The last element is green
-    The 2nd last element is yellow
-
-=head1 BerkeleyDB::Queue
-
-Equivalent to calling B followed by Bopen> with
-type B in Berkeley DB 3.x or greater. This database format
-isn't available if you use Berkeley DB 2.x.
-
-Two forms of constructor are supported:
-
-    $db = new BerkeleyDB::Queue
-                [ -Filename      => "filename", ]
-                [ -Subname       => "sub-database name", ]
-                [ -Flags         => flags,]
-                [ -Property      => flags,]
-                [ -Mode          => number,]
-                [ -Cachesize     => number,]
-                [ -Lorder        => number,]
-                [ -Pagesize      => number,]
-                [ -Env           => $env,]
-                [ -Txn           => $txn,]
-                [ -Encrypt       => { Password => "string",
-	                              Flags    => number }, ],
-                # BerkeleyDB::Queue specific
-                [ -Len             => number,]
-                [ -Pad             => byte,]
-                [ -ExtentSize    => number, ]
-
-and this
-
-    [$db =] tie @arry, 'BerkeleyDB::Queue', 
-                [ -Filename      => "filename", ]
-                [ -Subname       => "sub-database name", ]
-                [ -Flags         => flags,]
-                [ -Property      => flags,]
-                [ -Mode          => number,]
-                [ -Cachesize     => number,]
-                [ -Lorder        => number,]
-                [ -Pagesize      => number,]
-                [ -Env           => $env,]
-                [ -Txn           => $txn,]
-                [ -Encrypt       => { Password => "string",
-	                              Flags    => number }, ],
-                # BerkeleyDB::Queue specific
-                [ -Len             => number,]
-                [ -Pad             => byte,]
-
-
-=head1 BerkeleyDB::Unknown
-
-This class is used to open an existing database. 
-
-Equivalent to calling B with type B in Berkeley DB 2.x and
-calling B followed by Bopen> with type B in
-Berkeley DB 3.x or greater. 
-
-The constructor looks like this:
-
-    $db = new BerkeleyDB::Unknown
-                [ -Filename      => "filename", ]
-                [ -Subname       => "sub-database name", ]
-                [ -Flags         => flags,]
-                [ -Property      => flags,]
-                [ -Mode          => number,]
-                [ -Cachesize     => number,]
-                [ -Lorder        => number,]
-                [ -Pagesize      => number,]
-                [ -Env           => $env,]
-                [ -Txn           => $txn,]
-                [ -Encrypt       => { Password => "string",
-	                              Flags    => number }, ],
-
-
-=head2 An example 
-
-=head1 COMMON OPTIONS
-
-All database access class constructors support the common set of
-options defined below. All are optional.
-
-=over 5
-
-=item -Filename
-
-The database filename. If no filename is specified, a temporary file will
-be created and removed once the program terminates.
-
-=item -Subname
-
-Specifies the name of the sub-database to open.
-This option is only valid if you are using Berkeley DB 3.x or greater.
-
-=item -Flags
-
-Specify how the database will be opened/created. The valid flags are:
-
-B
-
-Create any underlying files, as necessary. If the files do not already
-exist and the B flag is not specified, the call will fail.
-
-B
-
-Not supported by BerkeleyDB.
-
-B
-
-Opens the database in read-only mode.
-
-B
-
-Not supported by BerkeleyDB.
-
-B
-
-If the database file already exists, remove all the data before
-opening it.
-
-=item -Mode
-
-Determines the file protection when the database is created. Defaults
-to 0666.
-
-=item -Cachesize
-
-=item -Lorder
-
-=item -Pagesize
-
-=item -Env
-
-When working under a Berkeley DB environment, this parameter
-
-Defaults to no environment.
-
-=item -Encrypt
-
-If present, this parameter will enable encryption of  all data before
-it is written to the database. This parameters must be given a hash
-reference. The format is shown below.
-
-    -Encrypt => { -Password => "abc", Flags => DB_ENCRYPT_AES }
-
-Valid values for the Flags are 0 or C.
-
-This option requires Berkeley DB 4.1 or better.
-
-=item -Txn
-
-TODO.
-
-=back
-
-=head1 COMMON DATABASE METHODS
-
-All the database interfaces support the common set of methods defined
-below.
-
-All the methods below return 0 to indicate success.
-
-=head2 $status = $db->db_get($key, $value [, $flags])
-
-Given a key (C<$key>) this method reads the value associated with it
-from the database. If it exists, the value read from the database is
-returned in the C<$value> parameter.
-
-The B<$flags> parameter is optional. If present, it must be set to B
-of the following values:
-
-=over 5
-
-=item B
-
-When the B flag is specified, B checks for the
-existence of B the C<$key> B C<$value> in the database.
-
-=item B
-
-TODO.
-
-=back
-
-In addition, the following value may be set by bitwise OR'ing it into
-the B<$flags> parameter:
-
-=over 5
-
-=item B
-
-TODO
-
-=back
-
-
-=head2 $status = $db->db_put($key, $value [, $flags])
-
-Stores a key/value pair in the database.
-
-The B<$flags> parameter is optional. If present it must be set to B
-of the following values:
-
-=over 5
-
-=item B
-
-This flag is only applicable when accessing a B
-database.
-
-TODO.
-
-
-=item B
-
-If this flag is specified and C<$key> already exists in the database,
-the call to B will return B.
-
-=back
-
-=head2 $status = $db->db_del($key [, $flags])
-
-Deletes a key/value pair in the database associated with C<$key>.
-If duplicate keys are enabled in the database, B will delete
-B key/value pairs with key C<$key>.
-
-The B<$flags> parameter is optional and is currently unused.
-
-=head2 $status = $db->db_sync()
-
-If any parts of the database are in memory, write them to the database.
-
-=head2 $cursor = $db->db_cursor([$flags])
-
-Creates a cursor object. This is used to access the contents of the
-database sequentially. See L for details of the methods
-available when working with cursors.
-
-The B<$flags> parameter is optional. If present it must be set to B
-of the following values:
-
-=over 5
-
-=item B
-
-TODO.
-
-=back
-
-=head2 ($flag, $old_offset, $old_length) = $db->partial_set($offset, $length) ;
-
-TODO
-
-=head2 ($flag, $old_offset, $old_length) = $db->partial_clear() ;
-
-TODO
-
-=head2 $db->byteswapped()
-
-TODO
-
-=head2 $db->type()
-
-Returns the type of the database. The possible return code are B
-for a B database, B for a B
-database and B for a B database. This method
-is typically used when a database has been opened with
-B.
-
-=head2   $bool = $env->cds_enabled();
-
-Returns true if the Berkeley DB environment C<$env> has been opened on
-CDS mode.
-
-=head2   $bool = $db->cds_enabled();
-
-Returns true if the database C<$db> has been opened on CDS mode.
-
-=head2 $lock = $db->cds_lock();
-
-Creates a CDS write lock object C<$lock>.
-
-It is a fatal error to attempt to create a cds_lock if the Berkeley DB
-environment has not been opened in CDS mode.
-
-=head2 $lock->cds_unlock();
-
-Removes a CDS lock. The destruction of the CDS lock object automatically
-calls this method.
-
-Note that if multiple CDS lock objects are created, the underlying write
-lock will not be released until all CDS lock objects are either explictly
-unlocked with this method, or the CDS lock objects have been destroyed.
-
-=head2 $ref = $db->db_stat()
-
-Returns a reference to an associative array containing information about
-the database. The keys of the associative array correspond directly to the
-names of the fields defined in the Berkeley DB documentation. For example,
-in the DB documentation, the field B stores the version of the
-Btree database. Assuming you called B on a Btree database the
-equivalent field would be accessed as follows:
-
-    $version = $ref->{'bt_version'} ;
-
-If you are using Berkeley DB 3.x or better, this method will work will
-all database formats. When DB 2.x is used, it only works with
-B.
-
-=head2 $status = $db->status()
-
-Returns the status of the last C<$db> method called.
-
-=head2 $status = $db->truncate($count)
-
-Truncates the datatabase and returns the number or records deleted
-in C<$count>.
-
-=head1 CURSORS
-
-A cursor is used whenever you want to access the contents of a database
-in sequential order.
-A cursor object is created with the C
-
-A cursor object has the following methods available:
-
-=head2 $newcursor = $cursor->c_dup($flags)
-
-Creates a duplicate of C<$cursor>. This method needs Berkeley DB 3.0.x or better.
-
-The C<$flags> parameter is optional and can take the following value:
-
-=over 5
-
-=item DB_POSITION
-
-When present this flag will position the new cursor at the same place as the
-existing cursor.
-
-=back
-
-=head2 $status = $cursor->c_get($key, $value, $flags)
-
-Reads a key/value pair from the database, returning the data in C<$key>
-and C<$value>. The key/value pair actually read is controlled by the
-C<$flags> parameter, which can take B of the following values:
-
-=over 5
-
-=item B
-
-Set the cursor to point to the first key/value pair in the
-database. Return the key/value pair in C<$key> and C<$value>.
-
-=item B
-
-Set the cursor to point to the last key/value pair in the database. Return
-the key/value pair in C<$key> and C<$value>.
-
-=item B
-
-If the cursor is already pointing to a key/value pair, it will be
-incremented to point to the next key/value pair and return its contents.
-
-If the cursor isn't initialised, B works just like B.
-
-If the cursor is already positioned at the last key/value pair, B
-will return B.
-
-=item B
-
-This flag is only valid when duplicate keys have been enabled in
-a database.
-If the cursor is already pointing to a key/value pair and the key of
-the next key/value pair is identical, the cursor will be incremented to
-point to it and their contents returned.
-
-=item B
-
-If the cursor is already pointing to a key/value pair, it will be
-decremented to point to the previous key/value pair and return its
-contents.
-
-If the cursor isn't initialised, B works just like B.
-
-If the cursor is already positioned at the first key/value pair, B
-will return B.
-
-=item B
-
-If the cursor has been set to point to a key/value pair, return their
-contents.
-If the key/value pair referenced by the cursor has been deleted, B
-will return B.
-
-=item B
-
-Set the cursor to point to the key/value pair referenced by B<$key>
-and return the value in B<$value>.
-
-=item B
-
-This flag is a variation on the B flag. As well as returning
-the value, it also returns the key, via B<$key>.
-When used with a B database the key matched by B
-will be the shortest key (in length) which is greater than or equal to
-the key supplied, via B<$key>. This allows partial key searches.
-See ??? for an example of how to use this flag.
-
-=item B
-
-Another variation on B. This one returns both the key and
-the value.
-
-=item B
-
-TODO.
-
-=item B
-
-TODO.
-
-=back
-
-In addition, the following value may be set by bitwise OR'ing it into
-the B<$flags> parameter:
-
-=over 5
-
-=item B
-
-TODO.
-
-=back
-
-=head2  $status = $cursor->c_put($key, $value, $flags)
-
-Stores the key/value pair in the database. The position that the data is
-stored in the database is controlled by the C<$flags> parameter, which
-must take B of the following values:
-
-=over 5
-
-=item B
-
-When used with a Btree or Hash database, a duplicate of the key referenced
-by the current cursor position will be created and the contents of
-B<$value> will be associated with it - B<$key> is ignored.
-The new key/value pair will be stored immediately after the current
-cursor position.
-Obviously the database has to have been opened with B.
-
-When used with a Recno ... TODO
-
-
-=item B
-
-When used with a Btree or Hash database, a duplicate of the key referenced
-by the current cursor position will be created and the contents of
-B<$value> will be associated with it - B<$key> is ignored.
-The new key/value pair will be stored immediately before the current
-cursor position.
-Obviously the database has to have been opened with B.
-
-When used with a Recno ... TODO
-
-=item B
-
-If the cursor has been initialised, replace the value of the key/value
-pair stored in the database with the contents of B<$value>.
-
-=item B
-
-Only valid with a Btree or Hash database. This flag is only really
-used when duplicates are enabled in the database and sorted duplicates
-haven't been specified.
-In this case the key/value pair will be inserted as the first entry in
-the duplicates for the particular key.
-
-=item B
-
-Only valid with a Btree or Hash database. This flag is only really
-used when duplicates are enabled in the database and sorted duplicates
-haven't been specified.
-In this case the key/value pair will be inserted as the last entry in
-the duplicates for the particular key.
-
-=back
-
-=head2  $status = $cursor->c_del([$flags])
-
-This method deletes the key/value pair associated with the current cursor
-position. The cursor position will not be changed by this operation, so
-any subsequent cursor operation must first initialise the cursor to
-point to a valid key/value pair.
-
-If the key/value pair associated with the cursor have already been
-deleted, B will return B.
-
-The B<$flags> parameter is not used at present.
-
-=head2 $status = $cursor->c_count($cnt [, $flags])
-
-Stores the number of duplicates at the current cursor position in B<$cnt>.
-
-The B<$flags> parameter is not used at present. This method needs 
-Berkeley DB 3.1 or better.
-
-=head2  $status = $cursor->status()
-
-Returns the status of the last cursor method as a dual type.
-
-=head2  $status = $cursor->c_pget() ;
-
-TODO
-
-=head2  $status = $cursor->c_close()
-
-Closes the cursor B<$cursor>.
-
-=head2 Cursor Examples
-
-TODO
-
-Iterating from first to last, then in reverse.
-
-examples of each of the flags.
-
-=head1 JOIN
-
-Join support for BerkeleyDB is in progress. Watch this space.
-
-TODO
-
-=head1 TRANSACTIONS
-
-TODO.
-
-=head1 CDS Mode
-
-The Berkeley Db Concurrent Data Store is a lightweight locking mechanism
-that is useful in scenarios where transactions are overkill. See the
-accompanying document .. for details of using this module in CDS mode.
-
-=head1 DBM Filters
-
-A DBM Filter is a piece of code that is be used when you I
-want to make the same transformation to all keys and/or values in a DBM
-database. All of the database classes (BerkeleyDB::Hash,
-BerkeleyDB::Btree and BerkeleyDB::Recno) support DBM Filters.
-
-There are four methods associated with DBM Filters. All work
-identically, and each is used to install (or uninstall) a single DBM
-Filter. Each expects a single parameter, namely a reference to a sub.
-The only difference between them is the place that the filter is
-installed.
-
-To summarise:
-
-=over 5
-
-=item B
-
-If a filter has been installed with this method, it will be invoked
-every time you write a key to a DBM database.
-
-=item B
-
-If a filter has been installed with this method, it will be invoked
-every time you write a value to a DBM database.
-
-
-=item B
-
-If a filter has been installed with this method, it will be invoked
-every time you read a key from a DBM database.
-
-=item B
-
-If a filter has been installed with this method, it will be invoked
-every time you read a value from a DBM database.
-
-=back
-
-You can use any combination of the methods, from none, to all four.
-
-All filter methods return the existing filter, if present, or C
-in not.
-
-To delete a filter pass C to it.
-
-=head2 The Filter
-
-When each filter is called by Perl, a local copy of C<$_> will contain
-the key or value to be filtered. Filtering is achieved by modifying
-the contents of C<$_>. The return code from the filter is ignored.
-
-=head2 An Example -- the NULL termination problem.
-
-Consider the following scenario. You have a DBM database that you need
-to share with a third-party C application. The C application assumes
-that I keys and values are NULL terminated. Unfortunately when
-Perl writes to DBM databases it doesn't use NULL termination, so your
-Perl application will have to manage NULL termination itself. When you
-write to the database you will have to use something like this:
-
-    $hash{"$key\0"} = "$value\0" ;
-
-Similarly the NULL needs to be taken into account when you are considering
-the length of existing keys/values.
-
-It would be much better if you could ignore the NULL terminations issue
-in the main application code and have a mechanism that automatically
-added the terminating NULL to all keys and values whenever you write to
-the database and have them removed when you read from the database. As I'm
-sure you have already guessed, this is a problem that DBM Filters can
-fix very easily.
-
-## nullFilter
-
-Hopefully the contents of each of the filters should be
-self-explanatory. Both "fetch" filters remove the terminating NULL,
-and both "store" filters add a terminating NULL.
-
-
-=head2 Another Example -- Key is a C int.
-
-Here is another real-life example. By default, whenever Perl writes to
-a DBM database it always writes the key and value as strings. So when
-you use this:
-
-    $hash{12345} = "something" ;
-
-the key 12345 will get stored in the DBM database as the 5 byte string
-"12345". If you actually want the key to be stored in the DBM database
-as a C int, you will have to use C when writing, and C
-when reading.
-
-Here is a DBM Filter that does it:
-
-## intFilter
-
-This time only two filters have been used -- we only need to manipulate
-the contents of the key, so it wasn't necessary to install any value
-filters.
-
-=head1 Using BerkeleyDB with MLDBM
-
-Both BerkeleyDB::Hash and BerkeleyDB::Btree can be used with the MLDBM
-module. The code fragment below shows how to open associate MLDBM with
-BerkeleyDB::Btree. To use BerkeleyDB::Hash just replace
-BerkeleyDB::Btree with BerkeleyDB::Hash.
-
-    use strict ;
-    use BerkeleyDB ;
-    use MLDBM qw(BerkeleyDB::Btree) ;
-    use Data::Dumper;
- 
-    my $filename = 'testmldbm' ;
-    my %o ;
-     
-    unlink $filename ;
-    tie %o, 'MLDBM', -Filename => $filename,
-                     -Flags    => DB_CREATE
-                    or die "Cannot open database '$filename: $!\n";
- 
-See the MLDBM documentation for information on how to use the module
-and for details of its limitations.
-
-=head1 EXAMPLES
-
-TODO.
-
-=head1 HINTS & TIPS
-
-=head2 Sharing Databases With C Applications
-
-There is no technical reason why a Berkeley DB database cannot be
-shared by both a Perl and a C application.
-
-The vast majority of problems that are reported in this area boil down
-to the fact that C strings are NULL terminated, whilst Perl strings
-are not. See L in the DBM
-FILTERS section for a generic way to work around this problem.
-
-
-=head2 The untie Gotcha
-
-TODO
-
-=head1 COMMON QUESTIONS
-
-This section attempts to answer some of the more common questions that
-I get asked.
-
-
-=head2 Relationship with DB_File
-
-Before Berkeley DB 2.x was written there was only one Perl module that
-interfaced to Berkeley DB. That module is called B. Although
-B can be build with Berkeley DB 1.x, 2.x, 3.x or 4.x, it only
-provides an interface to the functionality available in Berkeley DB
-1.x. That means that it doesn't support transactions, locking or any of
-the other new features available in DB 2.x or better.
-
-=head2 How do I store Perl data structures with BerkeleyDB?
-
-See L.
-
-=head1 HISTORY
-
-See the Changes file.
-
-=head1 AVAILABILITY
-
-The most recent version of B can always be found
-on CPAN (see L for details), in the directory
-F.
-
-The official web site for Berkeley DB is F.
-
-=head1 COPYRIGHT
-
-Copyright (c) 1997-2004 Paul Marquess. All rights reserved. This program
-is free software; you can redistribute it and/or modify it under the
-same terms as Perl itself.
-
-Although B is covered by the Perl license, the library it
-makes use of, namely Berkeley DB, is not. Berkeley DB has its own
-copyright and its own license. Please take the time to read it.
-
-Here are few words taken from the Berkeley DB FAQ (at
-F) regarding the license:
-
-    Do I have to license DB to use it in Perl scripts?
-
-    No. The Berkeley DB license requires that software that uses
-    Berkeley DB be freely redistributable. In the case of Perl, that
-    software is Perl, and not your scripts. Any Perl scripts that you
-    write are your property, including scripts that make use of Berkeley
-    DB. Neither the Perl license nor the Berkeley DB license
-    place any restriction on what you may do with them.
-
-If you are in any doubt about the license situation, contact either the
-Berkeley DB authors or the author of BerkeleyDB.
-See L<"AUTHOR"> for details.
-
-
-=head1 AUTHOR
-
-Paul Marquess Epmqs@cpan.orgE.
-
-Questions about Berkeley DB may be addressed to Edb@sleepycat.comE.
-
-=head1 SEE ALSO
-
-perl(1), DB_File, Berkeley DB.
-
-=cut
diff --git a/storage/bdb/perl/BerkeleyDB/BerkeleyDB.xs b/storage/bdb/perl/BerkeleyDB/BerkeleyDB.xs
deleted file mode 100644
index bd78509f5f2..00000000000
--- a/storage/bdb/perl/BerkeleyDB/BerkeleyDB.xs
+++ /dev/null
@@ -1,3917 +0,0 @@
-/*
-
- BerkeleyDB.xs -- Perl 5 interface to Berkeley DB version 2, 3 &4
-
- written by Paul Marquess 
-
- All comments/suggestions/problems are welcome
-
-     Copyright (c) 1997-2004 Paul Marquess. All rights reserved.
-     This program is free software; you can redistribute it and/or
-     modify it under the same terms as Perl itself.
-
-     Please refer to the COPYRIGHT section in
-
- Changes:
-        0.01 -  First Alpha Release
-        0.02 -
-
-*/
-
-
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-#define PERL_POLLUTE
-#include "EXTERN.h"
-#include "perl.h"
-#include "XSUB.h"
-#include "ppport.h"
-
-
-/* XSUB.h defines a macro called abort 				*/
-/* This clashes with the txn abort method in Berkeley DB 4.x	*/
-/* This is a problem with ActivePerl (at least)			*/
-
-#ifdef _WIN32
-#  ifdef abort
-#    undef abort
-#  endif
-#  ifdef fopen
-#    undef fopen
-#  endif
-#  ifdef fclose
-#    undef fclose
-#  endif
-#  ifdef rename
-#    undef rename
-#  endif
-#  ifdef open
-#    undef open
-#  endif
-#endif
-
-/* Being the Berkeley DB we prefer the  (which will be
- * shortly #included by the ) __attribute__ to the possibly
- * already defined __attribute__, for example by GNUC or by Perl. */
-
-#undef __attribute__
-
-#ifdef USE_PERLIO
-#    define GetFILEptr(sv) PerlIO_findFILE(IoIFP(sv_2io(sv)))
-#else
-#    define GetFILEptr(sv) IoIFP(sv_2io(sv))
-#endif
-
-#include 
-
-/* Check the version of Berkeley DB */
-
-#ifndef DB_VERSION_MAJOR
-#ifdef HASHMAGIC
-#error db.h is from Berkeley DB 1.x - need at least Berkeley DB 2.6.4
-#else
-#error db.h is not for Berkeley DB at all.
-#endif
-#endif
-
-#if (DB_VERSION_MAJOR == 2 && DB_VERSION_MINOR < 6) ||\
-    (DB_VERSION_MAJOR == 2 && DB_VERSION_MINOR == 6 && DB_VERSION_PATCH < 4)
-#  error db.h is from Berkeley DB 2.0-2.5 - need at least Berkeley DB 2.6.4
-#endif
-
-
-#if (DB_VERSION_MAJOR == 3 && DB_VERSION_MINOR == 0)
-#  define IS_DB_3_0_x
-#endif
-
-#if DB_VERSION_MAJOR >= 3
-#  define AT_LEAST_DB_3
-#endif
-
-#if DB_VERSION_MAJOR > 3 || (DB_VERSION_MAJOR == 3 && DB_VERSION_MINOR >= 1)
-#  define AT_LEAST_DB_3_1
-#endif
-
-#if DB_VERSION_MAJOR > 3 || (DB_VERSION_MAJOR == 3 && DB_VERSION_MINOR >= 2)
-#  define AT_LEAST_DB_3_2
-#endif
-
-#if DB_VERSION_MAJOR > 3 || \
-    (DB_VERSION_MAJOR == 3 && DB_VERSION_MINOR > 2) ||\
-    (DB_VERSION_MAJOR == 3 && DB_VERSION_MINOR == 2 && DB_VERSION_PATCH >= 6)
-#  define AT_LEAST_DB_3_2_6
-#endif
-
-#if DB_VERSION_MAJOR > 3 || (DB_VERSION_MAJOR == 3 && DB_VERSION_MINOR >= 3)
-#  define AT_LEAST_DB_3_3
-#endif
-
-#if DB_VERSION_MAJOR >= 4
-#  define AT_LEAST_DB_4
-#endif
-
-#if DB_VERSION_MAJOR > 4 || (DB_VERSION_MAJOR == 4 && DB_VERSION_MINOR >= 1)
-#  define AT_LEAST_DB_4_1
-#endif
-
-#if DB_VERSION_MAJOR > 4 || (DB_VERSION_MAJOR == 4 && DB_VERSION_MINOR >= 2)
-#  define AT_LEAST_DB_4_2
-#endif
-
-#if DB_VERSION_MAJOR > 4 || (DB_VERSION_MAJOR == 4 && DB_VERSION_MINOR >= 3)
-#  define AT_LEAST_DB_4_3
-#endif
-
-#ifdef __cplusplus
-}
-#endif
-
-#define DBM_FILTERING
-#define STRICT_CLOSE
-/* #define ALLOW_RECNO_OFFSET */
-/* #define TRACE */
-
-#if DB_VERSION_MAJOR == 2 && ! defined(DB_LOCK_DEADLOCK)
-#  define DB_LOCK_DEADLOCK	EAGAIN
-#endif /* DB_VERSION_MAJOR == 2 */
-
-#if DB_VERSION_MAJOR == 2
-#  define DB_QUEUE		4
-#endif /* DB_VERSION_MAJOR == 2 */
-
-#if DB_VERSION_MAJOR == 2 
-#  define BackRef	internal
-#else
-#  if DB_VERSION_MAJOR == 3 || (DB_VERSION_MAJOR == 4 && DB_VERSION_MINOR == 0)
-#    define BackRef	cj_internal
-#  else
-#    define BackRef	api_internal
-#  endif
-#endif
-
-#ifdef AT_LEAST_DB_3_2
-#    define DB_callback	DB * db,
-#    define getCurrentDB ((BerkeleyDB)db->BackRef) 
-#    define saveCurrentDB(db) 
-#else
-#    define DB_callback
-#    define getCurrentDB CurrentDB
-#    define saveCurrentDB(db) CurrentDB = db
-#endif
-
-#if DB_VERSION_MAJOR > 2
-typedef struct {
-        int              db_lorder;
-        size_t           db_cachesize;
-        size_t           db_pagesize;
-
-
-        void *(*db_malloc) __P((size_t));
-        int (*dup_compare)
-            __P((DB_callback const DBT *, const DBT *));
-
-        u_int32_t        bt_maxkey;
-        u_int32_t        bt_minkey;
-        int (*bt_compare)
-            __P((DB_callback const DBT *, const DBT *));
-        size_t (*bt_prefix)
-            __P((DB_callback const DBT *, const DBT *));
-
-        u_int32_t        h_ffactor;
-        u_int32_t        h_nelem;
-        u_int32_t      (*h_hash)
-            __P((DB_callback const void *, u_int32_t));
-
-        int              re_pad;
-        int              re_delim;
-        u_int32_t        re_len;
-        char            *re_source;
-
-#define DB_DELIMITER            0x0001
-#define DB_FIXEDLEN             0x0008
-#define DB_PAD                  0x0010
-        u_int32_t        flags;
-        u_int32_t        q_extentsize;
-} DB_INFO ;
-
-#endif /* DB_VERSION_MAJOR > 2 */
-
-typedef struct {
-	int		Status ;
-	/* char		ErrBuff[1000] ; */
-	SV *		ErrPrefix ;
-	SV *		ErrHandle ;
-	DB_ENV *	Env ;
-	int		open_dbs ;
-	int		TxnMgrStatus ;
-	int		active ;
-	bool		txn_enabled ;
-	bool		opened ;
-	bool		cds_enabled;
-	} BerkeleyDB_ENV_type ;
-
-
-typedef struct {
-        DBTYPE  	type ;
-	bool		recno_or_queue ;
-	char *		filename ;
-	BerkeleyDB_ENV_type * parent_env ;
-        DB *    	dbp ;
-        SV *    	compare ;
-        bool    	in_compare ;
-        SV *    	dup_compare ;
-        bool    	in_dup_compare ;
-        SV *    	prefix ;
-        bool    	in_prefix ;
-        SV *   	 	hash ;
-        bool    	in_hash ;
-#ifdef AT_LEAST_DB_3_3
-        SV *   	 	associated ;
-	bool		secondary_db ;
-#endif
-	int		Status ;
-        DB_INFO *	info ;
-        DBC *   	cursor ;
-	DB_TXN *	txn ;
-	int		open_cursors ;
-	u_int32_t	partial ;
-	u_int32_t	dlen ;
-	u_int32_t	doff ;
-	int		active ;
-	bool		cds_enabled;
-#ifdef ALLOW_RECNO_OFFSET
-	int		array_base ;
-#endif
-#ifdef DBM_FILTERING
-        SV *    filter_fetch_key ;
-        SV *    filter_store_key ;
-        SV *    filter_fetch_value ;
-        SV *    filter_store_value ;
-        int     filtering ;
-#endif
-        } BerkeleyDB_type;
-
-
-typedef struct {
-        DBTYPE  	type ;
-	bool		recno_or_queue ;
-	char *		filename ;
-        DB *    	dbp ;
-        SV *    	compare ;
-        SV *    	dup_compare ;
-        SV *    	prefix ;
-        SV *   	 	hash ;
-#ifdef AT_LEAST_DB_3_3
-        SV *   	 	associated ;
-	bool		secondary_db ;
-#endif
-	int		Status ;
-        DB_INFO *	info ;
-        DBC *   	cursor ;
-	DB_TXN *	txn ;
-	BerkeleyDB_type *		parent_db ;
-	u_int32_t	partial ;
-	u_int32_t	dlen ;
-	u_int32_t	doff ;
-	int		active ;
-	bool		cds_enabled;
-#ifdef ALLOW_RECNO_OFFSET
-	int		array_base ;
-#endif
-#ifdef DBM_FILTERING
-        SV *    filter_fetch_key ;
-        SV *    filter_store_key ;
-        SV *    filter_fetch_value ;
-        SV *    filter_store_value ;
-        int     filtering ;
-#endif
-        } BerkeleyDB_Cursor_type;
-
-typedef struct {
-	BerkeleyDB_ENV_type *	env ;
-	} BerkeleyDB_TxnMgr_type ;
-
-#if 1
-typedef struct {
-	int		Status ;
-	DB_TXN *	txn ;
-	int		active ;
-	} BerkeleyDB_Txn_type ;
-#else
-typedef DB_TXN                BerkeleyDB_Txn_type ;
-#endif
-
-typedef BerkeleyDB_ENV_type *	BerkeleyDB__Env ;
-typedef BerkeleyDB_ENV_type *	BerkeleyDB__Env__Raw ;
-typedef BerkeleyDB_ENV_type *	BerkeleyDB__Env__Inner ;
-typedef BerkeleyDB_type * 	BerkeleyDB ;
-typedef void * 			BerkeleyDB__Raw ;
-typedef BerkeleyDB_type *	BerkeleyDB__Common ;
-typedef BerkeleyDB_type *	BerkeleyDB__Common__Raw ;
-typedef BerkeleyDB_type *	BerkeleyDB__Common__Inner ;
-typedef BerkeleyDB_type * 	BerkeleyDB__Hash ;
-typedef BerkeleyDB_type * 	BerkeleyDB__Hash__Raw ;
-typedef BerkeleyDB_type * 	BerkeleyDB__Btree ;
-typedef BerkeleyDB_type * 	BerkeleyDB__Btree__Raw ;
-typedef BerkeleyDB_type * 	BerkeleyDB__Recno ;
-typedef BerkeleyDB_type * 	BerkeleyDB__Recno__Raw ;
-typedef BerkeleyDB_type * 	BerkeleyDB__Queue ;
-typedef BerkeleyDB_type * 	BerkeleyDB__Queue__Raw ;
-typedef BerkeleyDB_Cursor_type   	BerkeleyDB__Cursor_type ;
-typedef BerkeleyDB_Cursor_type * 	BerkeleyDB__Cursor ;
-typedef BerkeleyDB_Cursor_type * 	BerkeleyDB__Cursor__Raw ;
-typedef BerkeleyDB_TxnMgr_type * BerkeleyDB__TxnMgr ;
-typedef BerkeleyDB_TxnMgr_type * BerkeleyDB__TxnMgr__Raw ;
-typedef BerkeleyDB_TxnMgr_type * BerkeleyDB__TxnMgr__Inner ;
-typedef BerkeleyDB_Txn_type *	BerkeleyDB__Txn ;
-typedef BerkeleyDB_Txn_type *	BerkeleyDB__Txn__Raw ;
-typedef BerkeleyDB_Txn_type *	BerkeleyDB__Txn__Inner ;
-#if 0
-typedef DB_LOG *      		BerkeleyDB__Log ;
-typedef DB_LOCKTAB *  		BerkeleyDB__Lock ;
-#endif
-typedef DBT 			DBTKEY ;
-typedef DBT 			DBT_OPT ;
-typedef DBT 			DBT_B ;
-typedef DBT 			DBTKEY_B ;
-typedef DBT 			DBTVALUE ;
-typedef void *	      		PV_or_NULL ;
-typedef PerlIO *      		IO_or_NULL ;
-typedef int			DualType ;
-
-static void
-hash_delete(char * hash, char * key);
-
-#ifdef TRACE
-#  define Trace(x)	printf x
-#else
-#  define Trace(x)
-#endif
-
-#ifdef ALLOW_RECNO_OFFSET
-#  define RECNO_BASE	db->array_base
-#else
-#  define RECNO_BASE	1
-#endif
-
-#if DB_VERSION_MAJOR == 2
-#  define flagSet_DB2(i, f) i |= f
-#else
-#  define flagSet_DB2(i, f)
-#endif
-
-#if DB_VERSION_MAJOR == 2 && DB_VERSION_MINOR < 5
-#  define flagSet(bitmask)        (flags & (bitmask))
-#else
-#  define flagSet(bitmask)	((flags & DB_OPFLAGS_MASK) == (bitmask))
-#endif
-
-
-#define ERR_BUFF "BerkeleyDB::Error"
-
-#define ZMALLOC(to, typ) ((to = (typ *)safemalloc(sizeof(typ))), \
-				Zero(to,1,typ))
-
-#define DBT_clear(x)	Zero(&x, 1, DBT) ;
-
-#if 1
-#define getInnerObject(x) (*av_fetch((AV*)SvRV(x), 0, FALSE))
-#else
-#define getInnerObject(x) ((SV*)SvRV(sv))
-#endif
-
-#define my_sv_setpvn(sv, d, s) (s ? sv_setpvn(sv, d, s) : sv_setpv(sv, "") )
-
-#define SetValue_iv(i, k) if ((sv = readHash(hash, k)) && sv != &PL_sv_undef) \
-				i = SvIV(sv)
-#define SetValue_io(i, k) if ((sv = readHash(hash, k)) && sv != &PL_sv_undef) \
-				i = GetFILEptr(sv)
-#define SetValue_sv(i, k) if ((sv = readHash(hash, k)) && sv != &PL_sv_undef) \
-				i = sv
-#define SetValue_pv(i, k,t) if ((sv = readHash(hash, k)) && sv != &PL_sv_undef) \
-				i = (t)SvPV(sv,PL_na)
-#define SetValue_pvx(i, k, t) if ((sv = readHash(hash, k)) && sv != &PL_sv_undef) \
-				i = (t)SvPVX(sv)
-#define SetValue_ov(i,k,t) if ((sv = readHash(hash, k)) && sv != &PL_sv_undef) {\
-				IV tmp = SvIV(getInnerObject(sv)) ;	\
-				i = INT2PTR(t, tmp) ;			\
-			  }
-
-#define SetValue_ovx(i,k,t) if ((sv = readHash(hash, k)) && sv != &PL_sv_undef) {\
-				HV * hv = (HV *)GetInternalObject(sv);		\
-				SV ** svp = hv_fetch(hv, "db", 2, FALSE);\
-				IV tmp = SvIV(*svp);			\
-				i = INT2PTR(t, tmp) ;				\
-			  }
-
-#define SetValue_ovX(i,k,t) if ((sv = readHash(hash, k)) && sv != &PL_sv_undef) {\
-				IV tmp = SvIV(GetInternalObject(sv));\
-				i = INT2PTR(t, tmp) ;				\
-			  }
-
-#define LastDBerror DB_RUNRECOVERY
-
-#define setDUALerrno(var, err)					\
-		sv_setnv(var, (double)err) ;			\
-		sv_setpv(var, ((err) ? db_strerror(err) : "")) ;\
-		SvNOK_on(var);
-
-#define OutputValue(arg, name)                                  \
-        { if (RETVAL == 0) {                                    \
-              my_sv_setpvn(arg, name.data, name.size) ;         \
-              DBM_ckFilter(arg, filter_fetch_value,"filter_fetch_value") ;            \
-          }                                                     \
-        }
-
-#define OutputValue_B(arg, name)                                  \
-        { if (RETVAL == 0) {                                    \
-		if (db->type == DB_BTREE && 			\
-			flagSet(DB_GET_RECNO)){			\
-                    sv_setiv(arg, (I32)(*(I32*)name.data) - RECNO_BASE); \
-                }                                               \
-                else {                                          \
-                    my_sv_setpvn(arg, name.data, name.size) ;   \
-                }                                               \
-                DBM_ckFilter(arg, filter_fetch_value, "filter_fetch_value");          \
-          }                                                     \
-        }
-
-#define OutputKey(arg, name)                                    \
-        { if (RETVAL == 0) 					\
-          {                                                     \
-                if (!db->recno_or_queue) {                     	\
-                    my_sv_setpvn(arg, name.data, name.size);    \
-                }                                               \
-                else                                            \
-                    sv_setiv(arg, (I32)*(I32*)name.data - RECNO_BASE);   \
-                DBM_ckFilter(arg, filter_fetch_key, "filter_fetch_key") ;            \
-          }                                                     \
-        }
-
-#define OutputKey_B(arg, name)                                  \
-        { if (RETVAL == 0) 					\
-          {                                                     \
-                if (db->recno_or_queue ||			\
-			(db->type == DB_BTREE && 		\
-			    flagSet(DB_GET_RECNO))){		\
-                    sv_setiv(arg, (I32)(*(I32*)name.data) - RECNO_BASE); \
-                }                                               \
-                else {                                          \
-                    my_sv_setpvn(arg, name.data, name.size);    \
-                }                                               \
-                DBM_ckFilter(arg, filter_fetch_key, "filter_fetch_key") ;            \
-          }                                                     \
-        }
-
-#define SetPartial(data,db) 					\
-	data.flags = db->partial ;				\
-	data.dlen  = db->dlen ;					\
-	data.doff  = db->doff ;
-
-#define ckActive(active, type) 					\
-    {								\
-	if (!active)						\
-	    softCrash("%s is already closed", type) ;		\
-    }
-
-#define ckActive_Environment(a)	ckActive(a, "Environment")
-#define ckActive_TxnMgr(a)	ckActive(a, "Transaction Manager")
-#define ckActive_Transaction(a) ckActive(a, "Transaction")
-#define ckActive_Database(a) 	ckActive(a, "Database")
-#define ckActive_Cursor(a) 	ckActive(a, "Cursor")
-
-#define dieIfEnvOpened(e, m) if (e->opened) softCrash("Cannot call method BerkeleyDB::Env::%s after environment has been opened", m);	
-
-#define isSTDOUT_ERR(f) ((f) == stdout || (f) == stderr)
-
-/* Internal Global Data */
-static db_recno_t Value ;
-static db_recno_t zero = 0 ;
-static BerkeleyDB	CurrentDB ;
-
-static DBTKEY	empty ;
-#if 0
-static char	ErrBuff[1000] ;
-#endif
-
-#ifdef AT_LEAST_DB_3_3
-#    if PERL_REVISION == 5 && PERL_VERSION <= 4
-
-/* saferealloc in perl5.004 will croak if it is given a NULL pointer*/
-void *
-MyRealloc(void * ptr, size_t size)
-{
-    if (ptr == NULL ) 
-        return safemalloc(size) ; 
-    else
-        return saferealloc(ptr, size) ;
-}
-
-#    else
-#        define MyRealloc saferealloc
-#    endif
-#endif
-
-static char *
-my_strdup(const char *s)
-{
-    if (s == NULL)
-        return NULL ;
-
-    {
-        MEM_SIZE l = strlen(s) + 1;
-        char *s1 = (char *)safemalloc(l);
-
-        Copy(s, s1, (MEM_SIZE)l, char);
-        return s1;
-    }
-}
-
-#if DB_VERSION_MAJOR == 2
-static char *
-db_strerror(int err)
-{
-    if (err == 0)
-        return "" ;
-
-    if (err > 0)
-        return Strerror(err) ;
-
-    switch (err) {
-	case DB_INCOMPLETE:
-		return ("DB_INCOMPLETE: Sync was unable to complete");
-	case DB_KEYEMPTY:
-		return ("DB_KEYEMPTY: Non-existent key/data pair");
-	case DB_KEYEXIST:
-		return ("DB_KEYEXIST: Key/data pair already exists");
-	case DB_LOCK_DEADLOCK:
-		return (
-		    "DB_LOCK_DEADLOCK: Locker killed to resolve a deadlock");
-	case DB_LOCK_NOTGRANTED:
-		return ("DB_LOCK_NOTGRANTED: Lock not granted");
-	case DB_LOCK_NOTHELD:
-		return ("DB_LOCK_NOTHELD: Lock not held by locker");
-	case DB_NOTFOUND:
-		return ("DB_NOTFOUND: No matching key/data pair found");
-	case DB_RUNRECOVERY:
-		return ("DB_RUNRECOVERY: Fatal error, run database recovery");
-	default:
-		return "Unknown Error" ;
-
-    }
-}
-#endif 	/* DB_VERSION_MAJOR == 2 */
-
-#ifdef TRACE
-#if DB_VERSION_MAJOR > 2
-static char *
-my_db_strerror(int err)
-{
-    static char buffer[1000] ;
-    SV * sv = perl_get_sv(ERR_BUFF, FALSE) ;
-    sprintf(buffer, "%d: %s", err, db_strerror(err)) ;
-    if (err && sv) {
-        strcat(buffer, ", ") ;
-	strcat(buffer, SvPVX(sv)) ;
-    }
-    return buffer;
-}
-#endif
-#endif
-
-static void
-close_everything(void)
-{
-    dTHR;
-    Trace(("close_everything\n")) ;
-    /* Abort All Transactions */
-    {
-	BerkeleyDB__Txn__Raw 	tid ;
-	HE * he ;
-	I32 len ;
-	HV * hv = perl_get_hv("BerkeleyDB::Term::Txn", TRUE);
-	int  all = 0 ;
-	int  closed = 0 ;
-	(void)hv_iterinit(hv) ;
-	Trace(("BerkeleyDB::Term::close_all_txns dirty=%d\n", PL_dirty)) ;
-	while ( (he = hv_iternext(hv)) ) {
-	    tid = * (BerkeleyDB__Txn__Raw *) hv_iterkey(he, &len) ;
-	    Trace(("  Aborting Transaction [%d] in [%d] Active [%d]\n", tid->txn, tid, tid->active));
-	    if (tid->active) {
-#ifdef AT_LEAST_DB_4
-	    tid->txn->abort(tid->txn) ;
-#else
-	        txn_abort(tid->txn);
-#endif
-		++ closed ;
-	    }
-	    tid->active = FALSE ;
-	    ++ all ;
-	}
-	Trace(("End of BerkeleyDB::Term::close_all_txns aborted %d of %d transactios\n",closed, all)) ;
-    }
-
-    /* Close All Cursors */
-    {
-	BerkeleyDB__Cursor db ;
-	HE * he ;
-	I32 len ;
-	HV * hv = perl_get_hv("BerkeleyDB::Term::Cursor", TRUE);
-	int  all = 0 ;
-	int  closed = 0 ;
-	(void) hv_iterinit(hv) ;
-	Trace(("BerkeleyDB::Term::close_all_cursors \n")) ;
-	while ( (he = hv_iternext(hv)) ) {
-	    db = * (BerkeleyDB__Cursor*) hv_iterkey(he, &len) ;
-	    Trace(("  Closing Cursor [%d] in [%d] Active [%d]\n", db->cursor, db, db->active));
-	    if (db->active) {
-    	        ((db->cursor)->c_close)(db->cursor) ;
-		++ closed ;
-	    }
-	    db->active = FALSE ;
-	    ++ all ;
-	}
-	Trace(("End of BerkeleyDB::Term::close_all_cursors closed %d of %d cursors\n",closed, all)) ;
-    }
-
-    /* Close All Databases */
-    {
-	BerkeleyDB db ;
-	HE * he ;
-	I32 len ;
-	HV * hv = perl_get_hv("BerkeleyDB::Term::Db", TRUE);
-	int  all = 0 ;
-	int  closed = 0 ;
-	(void)hv_iterinit(hv) ;
-	Trace(("BerkeleyDB::Term::close_all_dbs\n" )) ;
-	while ( (he = hv_iternext(hv)) ) {
-	    db = * (BerkeleyDB*) hv_iterkey(he, &len) ;
-	    Trace(("  Closing Database [%d] in [%d] Active [%d]\n", db->dbp, db, db->active));
-	    if (db->active) {
-	        (db->dbp->close)(db->dbp, 0) ;
-		++ closed ;
-	    }
-	    db->active = FALSE ;
-	    ++ all ;
-	}
-	Trace(("End of BerkeleyDB::Term::close_all_dbs closed %d of %d dbs\n",closed, all)) ;
-    }
-
-    /* Close All Environments */
-    {
-	BerkeleyDB__Env env ;
-	HE * he ;
-	I32 len ;
-	HV * hv = perl_get_hv("BerkeleyDB::Term::Env", TRUE);
-	int  all = 0 ;
-	int  closed = 0 ;
-	(void)hv_iterinit(hv) ;
-	Trace(("BerkeleyDB::Term::close_all_envs\n")) ;
-	while ( (he = hv_iternext(hv)) ) {
-	    env = * (BerkeleyDB__Env*) hv_iterkey(he, &len) ;
-	    Trace(("  Closing Environment [%d] in [%d] Active [%d]\n", env->Env, env, env->active));
-	    if (env->active) {
-#if DB_VERSION_MAJOR == 2
-                db_appexit(env->Env) ;
-#else
-	        (env->Env->close)(env->Env, 0) ;
-#endif
-		++ closed ;
-	    }
-	    env->active = FALSE ;
-	    ++ all ;
-	}
-	Trace(("End of BerkeleyDB::Term::close_all_envs closed %d of %d dbs\n",closed, all)) ;
-    }
-
-    Trace(("end close_everything\n")) ;
-
-}
-
-static void
-destroyDB(BerkeleyDB db)
-{
-    dTHR;
-    if (! PL_dirty && db->active) {
-	if (db->parent_env && db->parent_env->open_dbs)
-	    -- db->parent_env->open_dbs ;
-      	-- db->open_cursors ;
-	((db->dbp)->close)(db->dbp, 0) ;
-    }
-    if (db->hash)
-       	  SvREFCNT_dec(db->hash) ;
-    if (db->compare)
-       	  SvREFCNT_dec(db->compare) ;
-    if (db->dup_compare)
-       	  SvREFCNT_dec(db->dup_compare) ;
-#ifdef AT_LEAST_DB_3_3
-    if (db->associated && !db->secondary_db)
-       	  SvREFCNT_dec(db->associated) ;
-#endif
-    if (db->prefix)
-       	  SvREFCNT_dec(db->prefix) ;
-#ifdef DBM_FILTERING
-    if (db->filter_fetch_key)
-          SvREFCNT_dec(db->filter_fetch_key) ;
-    if (db->filter_store_key)
-          SvREFCNT_dec(db->filter_store_key) ;
-    if (db->filter_fetch_value)
-          SvREFCNT_dec(db->filter_fetch_value) ;
-    if (db->filter_store_value)
-          SvREFCNT_dec(db->filter_store_value) ;
-#endif
-    hash_delete("BerkeleyDB::Term::Db", (char *)db) ;
-    if (db->filename)
-             Safefree(db->filename) ;
-    Safefree(db) ;
-}
-
-static int
-softCrash(const char *pat, ...)
-{
-    char buffer1 [500] ;
-    char buffer2 [500] ;
-    va_list args;
-    va_start(args, pat);
-
-    Trace(("softCrash: %s\n", pat)) ;
-
-#define ABORT_PREFIX "BerkeleyDB Aborting: "
-
-    /* buffer = (char*) safemalloc(strlen(pat) + strlen(ABORT_PREFIX) + 1) ; */
-    strcpy(buffer1, ABORT_PREFIX) ;
-    strcat(buffer1, pat) ;
-
-    vsprintf(buffer2, buffer1, args) ;
-
-    croak(buffer2);
-
-    /* NOTREACHED */
-    va_end(args);
-    return 1 ;
-}
-
-
-static I32
-GetArrayLength(BerkeleyDB db)
-{
-    DBT		key ;
-    DBT		value ;
-    int		RETVAL = 0 ;
-    DBC *   	cursor ;
-
-    DBT_clear(key) ;
-    DBT_clear(value) ;
-#if DB_VERSION_MAJOR == 2 && DB_VERSION_MINOR < 6
-    if ( ((db->dbp)->cursor)(db->dbp, db->txn, &cursor) == 0 )
-#else
-    if ( ((db->dbp)->cursor)(db->dbp, db->txn, &cursor, 0) == 0 )
-#endif
-    {
-        RETVAL = cursor->c_get(cursor, &key, &value, DB_LAST) ;
-        if (RETVAL == 0)
-            RETVAL = *(I32 *)key.data ;
-        else /* No key means empty file */
-            RETVAL = 0 ;
-        cursor->c_close(cursor) ;
-    }
-
-    Trace(("GetArrayLength got %d\n", RETVAL)) ;
-    return ((I32)RETVAL) ;
-}
-
-#if 0
-
-#define GetRecnoKey(db, value)  _GetRecnoKey(db, value)
-
-static db_recno_t
-_GetRecnoKey(BerkeleyDB db, I32 value)
-{
-    Trace(("GetRecnoKey start value = %d\n", value)) ;
-    if (db->recno_or_queue && value < 0) {
-	/* Get the length of the array */
-	I32 length = GetArrayLength(db) ;
-
-	/* check for attempt to write before start of array */
-	if (length + value + RECNO_BASE <= 0)
-	    softCrash("Modification of non-creatable array value attempted, subscript %ld", (long)value) ;
-
-	value = length + value + RECNO_BASE ;
-    }
-    else
-        ++ value ;
-
-    Trace(("GetRecnoKey end value = %d\n", value)) ;
-
-    return value ;
-}
-
-#else /* ! 0 */
-
-#if 0
-#ifdef ALLOW_RECNO_OFFSET
-#define GetRecnoKey(db, value) _GetRecnoKey(db, value)
-
-static db_recno_t
-_GetRecnoKey(BerkeleyDB db, I32 value)
-{
-    if (value + RECNO_BASE < 1)
-	softCrash("key value %d < base (%d)", (value), RECNO_BASE?0:1) ;
-    return value + RECNO_BASE ;
-}
-
-#else
-#endif /* ALLOW_RECNO_OFFSET */
-#endif /* 0 */
-
-#define GetRecnoKey(db, value) ((value) + RECNO_BASE )
-
-#endif /* 0 */
-
-#if 0
-static SV *
-GetInternalObject(SV * sv)
-{
-    SV * info = (SV*) NULL ;
-    SV * s ;
-    MAGIC * mg ;
-
-    Trace(("in GetInternalObject %d\n", sv)) ;
-    if (sv == NULL || !SvROK(sv))
-        return NULL ;
-
-    s = SvRV(sv) ;
-    if (SvMAGICAL(s))
-    {
-        if (SvTYPE(s) == SVt_PVHV || SvTYPE(s) == SVt_PVAV)
-            mg = mg_find(s, 'P') ;
-        else
-            mg = mg_find(s, 'q') ;
-
-	 /* all this testing is probably overkill, but till I know more
-	    about global destruction it stays.
-	 */
-        /* if (mg && mg->mg_obj && SvRV(mg->mg_obj) && SvPVX(SvRV(mg->mg_obj))) */
-        if (mg && mg->mg_obj && SvRV(mg->mg_obj) )
-            info = SvRV(mg->mg_obj) ;
-	else
-	    info = s ;
-    }
-
-    Trace(("end of GetInternalObject %d\n", info)) ;
-    return info ;
-}
-#endif
-
-static int
-btree_compare(DB_callback const DBT * key1, const DBT * key2 )
-{
-    dSP ;
-    char * data1, * data2 ;
-    int retval ;
-    int count ;
-    BerkeleyDB	keepDB = CurrentDB ;
-
-    data1 = (char*) key1->data ;
-    data2 = (char*) key2->data ;
-
-#ifndef newSVpvn
-    /* As newSVpv will assume that the data pointer is a null terminated C
-       string if the size parameter is 0, make sure that data points to an
-       empty string if the length is 0
-    */
-    if (key1->size == 0)
-        data1 = "" ;
-    if (key2->size == 0)
-        data2 = "" ;
-#endif
-
-    ENTER ;
-    SAVETMPS;
-
-    PUSHMARK(SP) ;
-    EXTEND(SP,2) ;
-    PUSHs(sv_2mortal(newSVpvn(data1,key1->size)));
-    PUSHs(sv_2mortal(newSVpvn(data2,key2->size)));
-    PUTBACK ;
-
-    count = perl_call_sv(getCurrentDB->compare, G_SCALAR);
-
-    SPAGAIN ;
-
-    if (count != 1)
-        softCrash ("in btree_compare - expected 1 return value from compare sub, got %d", count) ;
-
-    retval = POPi ;
-
-    PUTBACK ;
-    FREETMPS ;
-    LEAVE ;
-    CurrentDB = keepDB ;
-    return (retval) ;
-
-}
-
-static int
-dup_compare(DB_callback const DBT * key1, const DBT * key2 )
-{
-    dSP ;
-    char * data1, * data2 ;
-    int retval ;
-    int count ;
-    BerkeleyDB	keepDB = CurrentDB ;
-
-    Trace(("In dup_compare \n")) ;
-    if (!getCurrentDB)
-	softCrash("Internal Error - No CurrentDB in dup_compare") ;
-    if (getCurrentDB->dup_compare == NULL)
-
-
-        softCrash("in dup_compare: no callback specified for database '%s'", getCurrentDB->filename) ;
-
-    data1 = (char*) key1->data ;
-    data2 = (char*) key2->data ;
-
-#ifndef newSVpvn
-    /* As newSVpv will assume that the data pointer is a null terminated C
-       string if the size parameter is 0, make sure that data points to an
-       empty string if the length is 0
-    */
-    if (key1->size == 0)
-        data1 = "" ;
-    if (key2->size == 0)
-        data2 = "" ;
-#endif
-
-    ENTER ;
-    SAVETMPS;
-
-    PUSHMARK(SP) ;
-    EXTEND(SP,2) ;
-    PUSHs(sv_2mortal(newSVpvn(data1,key1->size)));
-    PUSHs(sv_2mortal(newSVpvn(data2,key2->size)));
-    PUTBACK ;
-
-    count = perl_call_sv(getCurrentDB->dup_compare, G_SCALAR);
-
-    SPAGAIN ;
-
-    if (count != 1)
-        softCrash ("dup_compare: expected 1 return value from compare sub, got %d", count) ;
-
-    retval = POPi ;
-
-    PUTBACK ;
-    FREETMPS ;
-    LEAVE ;
-    CurrentDB = keepDB ;
-    return (retval) ;
-
-}
-
-static size_t
-btree_prefix(DB_callback const DBT * key1, const DBT * key2 )
-{
-    dSP ;
-    char * data1, * data2 ;
-    int retval ;
-    int count ;
-    BerkeleyDB	keepDB = CurrentDB ;
-
-    data1 = (char*) key1->data ;
-    data2 = (char*) key2->data ;
-
-#ifndef newSVpvn
-    /* As newSVpv will assume that the data pointer is a null terminated C
-       string if the size parameter is 0, make sure that data points to an
-       empty string if the length is 0
-    */
-    if (key1->size == 0)
-        data1 = "" ;
-    if (key2->size == 0)
-        data2 = "" ;
-#endif
-
-    ENTER ;
-    SAVETMPS;
-
-    PUSHMARK(SP) ;
-    EXTEND(SP,2) ;
-    PUSHs(sv_2mortal(newSVpvn(data1,key1->size)));
-    PUSHs(sv_2mortal(newSVpvn(data2,key2->size)));
-    PUTBACK ;
-
-    count = perl_call_sv(getCurrentDB->prefix, G_SCALAR);
-
-    SPAGAIN ;
-
-    if (count != 1)
-        softCrash ("btree_prefix: expected 1 return value from prefix sub, got %d", count) ;
-
-    retval = POPi ;
-
-    PUTBACK ;
-    FREETMPS ;
-    LEAVE ;
-    CurrentDB = keepDB ;
-
-    return (retval) ;
-}
-
-static u_int32_t
-hash_cb(DB_callback const void * data, u_int32_t size)
-{
-    dSP ;
-    int retval ;
-    int count ;
-    BerkeleyDB	keepDB = CurrentDB ;
-
-#ifndef newSVpvn
-    if (size == 0)
-        data = "" ;
-#endif
-
-    ENTER ;
-    SAVETMPS;
-
-    PUSHMARK(SP) ;
-
-    XPUSHs(sv_2mortal(newSVpvn((char*)data,size)));
-    PUTBACK ;
-
-    count = perl_call_sv(getCurrentDB->hash, G_SCALAR);
-
-    SPAGAIN ;
-
-    if (count != 1)
-        softCrash ("hash_cb: expected 1 return value from hash sub, got %d", count) ;
-
-    retval = POPi ;
-
-    PUTBACK ;
-    FREETMPS ;
-    LEAVE ;
-    CurrentDB = keepDB ;
-
-    return (retval) ;
-}
-
-#ifdef AT_LEAST_DB_3_3
-
-static int
-associate_cb(DB_callback const DBT * pkey, const DBT * pdata, DBT * skey)
-{
-    dSP ;
-    char * pk_dat, * pd_dat ;
-    /* char *sk_dat ; */
-    int retval ;
-    int count ;
-    SV * skey_SV ;
-    STRLEN skey_len;
-    char * skey_ptr ;
-
-    Trace(("In associate_cb \n")) ;
-    if (getCurrentDB->associated == NULL){
-        Trace(("No Callback registered\n")) ;
-        return EINVAL ;
-    }
-
-    skey_SV = newSVpv("",0);
-
-
-    pk_dat = (char*) pkey->data ;
-    pd_dat = (char*) pdata->data ;
-
-#ifndef newSVpvn
-    /* As newSVpv will assume that the data pointer is a null terminated C
-       string if the size parameter is 0, make sure that data points to an
-       empty string if the length is 0
-    */
-    if (pkey->size == 0)
-        pk_dat = "" ;
-    if (pdata->size == 0)
-        pd_dat = "" ;
-#endif
-
-    ENTER ;
-    SAVETMPS;
-
-    PUSHMARK(SP) ;
-    EXTEND(SP,2) ;
-    PUSHs(sv_2mortal(newSVpvn(pk_dat,pkey->size)));
-    PUSHs(sv_2mortal(newSVpvn(pd_dat,pdata->size)));
-    PUSHs(sv_2mortal(skey_SV));
-    PUTBACK ;
-
-    Trace(("calling associated cb\n"));
-    count = perl_call_sv(getCurrentDB->associated, G_SCALAR);
-    Trace(("called associated cb\n"));
-
-    SPAGAIN ;
-
-    if (count != 1)
-        softCrash ("associate: expected 1 return value from prefix sub, got %d", count) ;
-
-    retval = POPi ;
-
-    PUTBACK ;
-    
-    /* retrieve the secondary key */
-    DBT_clear(*skey);
-    skey_ptr = SvPV(skey_SV, skey_len);
-    skey->flags = DB_DBT_APPMALLOC;
-    /* skey->size = SvCUR(skey_SV); */
-    /* skey->data = (char*)safemalloc(skey->size); */
-    skey->size = skey_len;
-    skey->data = (char*)safemalloc(skey_len);
-    memcpy(skey->data, skey_ptr, skey_len);
-    Trace(("key is %d -- %.*s\n", skey->size, skey->size, skey->data));
-
-    FREETMPS ;
-    LEAVE ;
-
-    return (retval) ;
-}
-
-#endif /* AT_LEAST_DB_3_3 */
-
-static void
-#ifdef AT_LEAST_DB_4_3
-db_errcall_cb(const DB_ENV* dbenv, const char * db_errpfx, const char * buffer)
-#else
-db_errcall_cb(const char * db_errpfx, char * buffer)
-#endif
-{
-#if 0
-
-    if (db_errpfx == NULL)
-	db_errpfx = "" ;
-    if (buffer == NULL )
-	buffer = "" ;
-    ErrBuff[0] = '\0';
-    if (strlen(db_errpfx) + strlen(buffer) + 3 <= 1000) {
-	if (*db_errpfx != '\0') {
-	    strcat(ErrBuff, db_errpfx) ;
-	    strcat(ErrBuff, ": ") ;
-	}
-	strcat(ErrBuff, buffer) ;
-    }
-
-#endif
-
-    SV * sv = perl_get_sv(ERR_BUFF, FALSE) ;
-    if (sv) {
-        if (db_errpfx)
-	    sv_setpvf(sv, "%s: %s", db_errpfx, buffer) ;
-        else
-            sv_setpv(sv, buffer) ;
-    }
-}
-
-static SV *
-readHash(HV * hash, char * key)
-{
-    SV **       svp;
-    svp = hv_fetch(hash, key, strlen(key), FALSE);
-    if (svp && SvOK(*svp))
-        return *svp ;
-    return NULL ;
-}
-
-static void
-hash_delete(char * hash, char * key)
-{
-    HV * hv = perl_get_hv(hash, TRUE);
-    (void) hv_delete(hv, (char*)&key, sizeof(key), G_DISCARD);
-}
-
-static void
-hash_store_iv(char * hash, char * key, IV value)
-{
-    HV * hv = perl_get_hv(hash, TRUE);
-    (void)hv_store(hv, (char*)&key, sizeof(key), newSViv(value), 0);
-    /* printf("hv_store returned %d\n", ret) ; */
-}
-
-static void
-hv_store_iv(HV * hash, char * key, IV value)
-{
-    hv_store(hash, key, strlen(key), newSViv(value), 0);
-}
-
-static BerkeleyDB
-my_db_open(
-		BerkeleyDB	db ,
-		SV * 		ref,
-		SV *		ref_dbenv ,
-		BerkeleyDB__Env	dbenv ,
-    	    	BerkeleyDB__Txn txn, 
-		const char *	file,
-		const char *	subname,
-		DBTYPE		type,
-		int		flags,
-		int		mode,
-		DB_INFO * 	info,
-		char *		password,
-		int		enc_flags
-	)
-{
-    DB_ENV *	env    = NULL ;
-    BerkeleyDB 	RETVAL = NULL ;
-    DB *	dbp ;
-    int		Status ;
-    DB_TXN* 	txnid = NULL ;
-
-    Trace(("_db_open(dbenv[%p] ref_dbenv [%p] file[%s] subname [%s] type[%d] flags[%d] mode[%d]\n",
-		dbenv, ref_dbenv, file, subname, type, flags, mode)) ;
-
-    CurrentDB = db ;
-    
-    if (dbenv)
-	env = dbenv->Env ;
-
-    if (txn)
-        txnid = txn->txn;
-
-    Trace(("_db_open(dbenv[%p] ref_dbenv [%p] txn [%p] file[%s] subname [%s] type[%d] flags[%d] mode[%d]\n",
-		dbenv, ref_dbenv, txn, file, subname, type, flags, mode)) ;
-
-#if DB_VERSION_MAJOR == 2
-    if (subname)
-        softCrash("Subname needs Berkeley DB 3 or better") ;
-#endif
-
-#ifndef AT_LEAST_DB_4_1
-	    if (password)
-	        softCrash("-Encrypt needs Berkeley DB 4.x or better") ;
-#endif /* ! AT_LEAST_DB_4_1 */
-
-#if DB_VERSION_MAJOR > 2
-    Status = db_create(&dbp, env, 0) ;
-    Trace(("db_create returned %s\n", my_db_strerror(Status))) ;
-    if (Status)
-        return RETVAL ;
-
-#ifdef AT_LEAST_DB_3_2
-	dbp->BackRef = db;
-#endif
-
-#ifdef AT_LEAST_DB_3_3
-    if (! env) {
-	dbp->set_alloc(dbp, safemalloc, MyRealloc, safefree) ;
-	dbp->set_errcall(dbp, db_errcall_cb) ;
-    }
-#endif
-
-#ifdef AT_LEAST_DB_4_1
-    /* set encryption */
-    if (password)
-    {
-        Status = dbp->set_encrypt(dbp, password, enc_flags);
-        Trace(("DB->set_encrypt passwd = %s, flags %d returned %s\n", 
-			      		password, enc_flags,
-  					my_db_strerror(Status))) ;
-         if (Status)
-              return RETVAL ;
-    }
-#endif	  
-
-    if (info->re_source) {
-        Status = dbp->set_re_source(dbp, info->re_source) ;
-	Trace(("set_re_source [%s] returned %s\n",
-		info->re_source, my_db_strerror(Status)));
-        if (Status)
-            return RETVAL ;
-    }
-
-    if (info->db_cachesize) {
-        Status = dbp->set_cachesize(dbp, 0, info->db_cachesize, 0) ;
-	Trace(("set_cachesize [%d] returned %s\n",
-		info->db_cachesize, my_db_strerror(Status)));
-        if (Status)
-            return RETVAL ;
-    }
-
-    if (info->db_lorder) {
-        Status = dbp->set_lorder(dbp, info->db_lorder) ;
-	Trace(("set_lorder [%d] returned %s\n",
-		info->db_lorder, my_db_strerror(Status)));
-        if (Status)
-            return RETVAL ;
-    }
-
-    if (info->db_pagesize) {
-        Status = dbp->set_pagesize(dbp, info->db_pagesize) ;
-	Trace(("set_pagesize [%d] returned %s\n",
-		info->db_pagesize, my_db_strerror(Status)));
-        if (Status)
-            return RETVAL ;
-    }
-
-    if (info->h_ffactor) {
-        Status = dbp->set_h_ffactor(dbp, info->h_ffactor) ;
-	Trace(("set_h_ffactor [%d] returned %s\n",
-		info->h_ffactor, my_db_strerror(Status)));
-        if (Status)
-            return RETVAL ;
-    }
-
-    if (info->h_nelem) {
-        Status = dbp->set_h_nelem(dbp, info->h_nelem) ;
-	Trace(("set_h_nelem [%d] returned %s\n",
-		info->h_nelem, my_db_strerror(Status)));
-        if (Status)
-            return RETVAL ;
-    }
-
-    if (info->bt_minkey) {
-        Status = dbp->set_bt_minkey(dbp, info->bt_minkey) ;
-	Trace(("set_bt_minkey [%d] returned %s\n",
-		info->bt_minkey, my_db_strerror(Status)));
-        if (Status)
-            return RETVAL ;
-    }
-
-    if (info->bt_compare) {
-        Status = dbp->set_bt_compare(dbp, info->bt_compare) ;
-	Trace(("set_bt_compare [%p] returned %s\n",
-		info->bt_compare, my_db_strerror(Status)));
-        if (Status)
-            return RETVAL ;
-    }
-
-    if (info->h_hash) {
-        Status = dbp->set_h_hash(dbp, info->h_hash) ;
-	Trace(("set_h_hash [%d] returned %s\n",
-		info->h_hash, my_db_strerror(Status)));
-        if (Status)
-            return RETVAL ;
-    }
-
-    if (info->dup_compare) {
-        Status = dbp->set_dup_compare(dbp, info->dup_compare) ;
-	Trace(("set_dup_compare [%d] returned %s\n",
-		info->dup_compare, my_db_strerror(Status)));
-        if (Status)
-            return RETVAL ;
-    }
-
-    if (info->bt_prefix) {
-        Status = dbp->set_bt_prefix(dbp, info->bt_prefix) ;
-	Trace(("set_bt_prefix [%d] returned %s\n",
-		info->bt_prefix, my_db_strerror(Status)));
-        if (Status)
-            return RETVAL ;
-    }
-
-    if (info->re_len) {
-        Status = dbp->set_re_len(dbp, info->re_len) ;
-	Trace(("set_re_len [%d] returned %s\n",
-		info->re_len, my_db_strerror(Status)));
-        if (Status)
-            return RETVAL ;
-    }
-
-    if (info->re_delim) {
-        Status = dbp->set_re_delim(dbp, info->re_delim) ;
-	Trace(("set_re_delim [%d] returned %s\n",
-		info->re_delim, my_db_strerror(Status)));
-        if (Status)
-            return RETVAL ;
-    }
-
-    if (info->re_pad) {
-        Status = dbp->set_re_pad(dbp, info->re_pad) ;
-	Trace(("set_re_pad [%d] returned %s\n",
-		info->re_pad, my_db_strerror(Status)));
-        if (Status)
-            return RETVAL ;
-    }
-
-    if (info->flags) {
-        Status = dbp->set_flags(dbp, info->flags) ;
-	Trace(("set_flags [%d] returned %s\n",
-		info->flags, my_db_strerror(Status)));
-        if (Status)
-            return RETVAL ;
-    }
-
-    if (info->q_extentsize) {
-#ifdef AT_LEAST_DB_3_2
-        Status = dbp->set_q_extentsize(dbp, info->q_extentsize) ;
-	Trace(("set_q_extentsize [%d] returned %s\n",
-		info->q_extentsize, my_db_strerror(Status)));
-        if (Status)
-            return RETVAL ;
-#else
-        softCrash("-ExtentSize needs at least Berkeley DB 3.2.x") ;
-#endif
-    }
-
-
-#ifdef AT_LEAST_DB_4_1
-    if ((Status = (dbp->open)(dbp, txnid, file, subname, type, flags, mode)) == 0) {
-#else
-    if ((Status = (dbp->open)(dbp, file, subname, type, flags, mode)) == 0) {
-#endif /* AT_LEAST_DB_4_1 */
-#else /* DB_VERSION_MAJOR == 2 */
-    if ((Status = db_open(file, type, flags, mode, env, info, &dbp)) == 0) {
-#endif /* DB_VERSION_MAJOR == 2 */
-
-	Trace(("db_opened ok\n"));
-	RETVAL = db ;
-	RETVAL->dbp  = dbp ;
-	RETVAL->txn  = txnid ;
-#if DB_VERSION_MAJOR == 2
-    	RETVAL->type = dbp->type ;
-#else /* DB_VERSION_MAJOR > 2 */
-#ifdef AT_LEAST_DB_3_3
-    	dbp->get_type(dbp, &RETVAL->type) ;
-#else /* DB 3.0 -> 3.2 */
-    	RETVAL->type = dbp->get_type(dbp) ;
-#endif
-#endif /* DB_VERSION_MAJOR > 2 */
-    	RETVAL->recno_or_queue = (RETVAL->type == DB_RECNO ||
-	                          RETVAL->type == DB_QUEUE) ;
-	RETVAL->filename = my_strdup(file) ;
-	RETVAL->Status = Status ;
-	RETVAL->active = TRUE ;
-	hash_store_iv("BerkeleyDB::Term::Db", (char *)RETVAL, 1) ;
-	Trace(("  storing %p %p in BerkeleyDB::Term::Db\n", RETVAL, dbp)) ;
-	if (dbenv) {
-	    RETVAL->cds_enabled = dbenv->cds_enabled ;
-	    RETVAL->parent_env = dbenv ;
-	    dbenv->Status = Status ;
-	    ++ dbenv->open_dbs ;
-	}
-    }
-    else {
-#if DB_VERSION_MAJOR > 2
-	(dbp->close)(dbp, 0) ;
-#endif
-	destroyDB(db) ;
-        Trace(("db open returned %s\n", my_db_strerror(Status))) ;
-    }
-
-    return RETVAL ;
-}
-
-
-#include "constants.h"
-
-MODULE = BerkeleyDB		PACKAGE = BerkeleyDB	PREFIX = env_
-
-INCLUDE: constants.xs
-
-#define env_db_version(maj, min, patch) 	db_version(&maj, &min, &patch)
-char *
-env_db_version(maj, min, patch)
-	int  maj
-	int  min
-	int  patch
-	OUTPUT:
-	  RETVAL
-	  maj
-	  min
-	  patch
-
-int
-db_value_set(value, which)
-	int value
-	int which
-        NOT_IMPLEMENTED_YET
-
-
-DualType
-_db_remove(ref)
-	SV * 		ref
-	CODE:
-	{
-#if DB_VERSION_MAJOR == 2
-	    softCrash("BerkeleyDB::db_remove needs Berkeley DB 3.x or better") ;
-#else
-	    HV *		hash ;
-    	    DB *		dbp ;
-	    SV * 		sv ;
-	    const char *	db = NULL ;
-	    const char *	subdb 	= NULL ;
-	    BerkeleyDB__Env	env 	= NULL ;
-    	    DB_ENV *		dbenv   = NULL ;
-	    u_int32_t		flags	= 0 ;
-
-	    hash = (HV*) SvRV(ref) ;
-	    SetValue_pv(db,    "Filename", char *) ;
-	    SetValue_pv(subdb, "Subname", char *) ;
-	    SetValue_iv(flags, "Flags") ;
-	    SetValue_ov(env, "Env", BerkeleyDB__Env) ;
-    	    if (env)
-		dbenv = env->Env ;
-            RETVAL = db_create(&dbp, dbenv, 0) ;
-	    if (RETVAL == 0) {
-	        RETVAL = dbp->remove(dbp, db, subdb, flags) ;
-	    }
-#endif
-	}
-	OUTPUT:
-	    RETVAL
-
-DualType
-_db_verify(ref)
-	SV * 		ref
-	CODE:
-	{
-#ifndef AT_LEAST_DB_3_1
-	    softCrash("BerkeleyDB::db_verify needs Berkeley DB 3.1.x or better") ;
-#else
-	    HV *		hash ;
-    	    DB *		dbp ;
-	    SV * 		sv ;
-	    const char *	db = NULL ;
-	    const char *	subdb 	= NULL ;
-	    const char *	outfile	= NULL ;
-	    FILE *		ofh = NULL;
-	    BerkeleyDB__Env	env 	= NULL ;
-    	    DB_ENV *		dbenv   = NULL ;
-	    u_int32_t		flags	= 0 ;
-
-	    hash = (HV*) SvRV(ref) ;
-	    SetValue_pv(db,    "Filename", char *) ;
-	    SetValue_pv(subdb, "Subname", char *) ;
-	    SetValue_pv(outfile, "Outfile", char *) ;
-	    SetValue_iv(flags, "Flags") ;
-	    SetValue_ov(env, "Env", BerkeleyDB__Env) ;
-            RETVAL = 0;
-            if (outfile){
-	        ofh = fopen(outfile, "w");
-                if (! ofh)
-                    RETVAL = errno;
-            }
-            if (! RETVAL) {
-    	        if (env)
-		    dbenv = env->Env ;
-                RETVAL = db_create(&dbp, dbenv, 0) ;
-	        if (RETVAL == 0) {
-	            RETVAL = dbp->verify(dbp, db, subdb, ofh, flags) ;
-	        }
-	        if (outfile) 
-                    fclose(ofh);
-            }
-#endif
-	}
-	OUTPUT:
-	    RETVAL
-
-DualType
-_db_rename(ref)
-	SV * 		ref
-	CODE:
-	{
-#ifndef AT_LEAST_DB_3_1
-	    softCrash("BerkeleyDB::db_rename needs Berkeley DB 3.1.x or better") ;
-#else
-	    HV *		hash ;
-    	    DB *		dbp ;
-	    SV * 		sv ;
-	    const char *	db = NULL ;
-	    const char *	subdb 	= NULL ;
-	    const char *	newname	= NULL ;
-	    BerkeleyDB__Env	env 	= NULL ;
-    	    DB_ENV *		dbenv   = NULL ;
-	    u_int32_t		flags	= 0 ;
-
-	    hash = (HV*) SvRV(ref) ;
-	    SetValue_pv(db,    "Filename", char *) ;
-	    SetValue_pv(subdb, "Subname", char *) ;
-	    SetValue_pv(newname, "Newname", char *) ;
-	    SetValue_iv(flags, "Flags") ;
-	    SetValue_ov(env, "Env", BerkeleyDB__Env) ;
-    	    if (env)
-		dbenv = env->Env ;
-            RETVAL = db_create(&dbp, dbenv, 0) ;
-	    if (RETVAL == 0) {
-	        RETVAL = (dbp->rename)(dbp, db, subdb, newname, flags) ;
-	    }
-#endif
-	}
-	OUTPUT:
-	    RETVAL
-
-MODULE = BerkeleyDB::Env		PACKAGE = BerkeleyDB::Env PREFIX = env_
-
-BerkeleyDB::Env::Raw
-create(flags=0)
-	u_int32_t flags
-	CODE:
-	{
-#ifndef AT_LEAST_DB_4_1
-	    softCrash("$env->create needs Berkeley DB 4.1 or better") ;
-#else
-	    DB_ENV *	env ;
-	    int    status;
-	    RETVAL = NULL;
-	    Trace(("in BerkeleyDB::Env::create flags=%d\n",  flags)) ;
-	    status = db_env_create(&env, flags) ;
-	    Trace(("db_env_create returned %s\n", my_db_strerror(status))) ;
-	    if (status == 0) {
-	        ZMALLOC(RETVAL, BerkeleyDB_ENV_type) ;
-		RETVAL->Env = env ;
-	        RETVAL->active = TRUE ;
-	        RETVAL->opened = FALSE;
-	        env->set_alloc(env, safemalloc, MyRealloc, safefree) ;
-	        env->set_errcall(env, db_errcall_cb) ;
-	    }
-#endif	    
-	}
-	OUTPUT:
-	    RETVAL
-
-int
-open(env, db_home=NULL, flags=0, mode=0777)
-	BerkeleyDB::Env env
-	char * db_home
-	u_int32_t flags
-	int mode
-    CODE:
-#ifndef AT_LEAST_DB_4_1
-	    softCrash("$env->create needs Berkeley DB 4.1 or better") ;
-#else
-        RETVAL = env->Env->open(env->Env, db_home, flags, mode);
-	env->opened = TRUE;
-#endif
-    OUTPUT:
-        RETVAL
-
-bool
-cds_enabled(env)
-	BerkeleyDB::Env env
-	CODE:
-	    RETVAL = env->cds_enabled ;
-	OUTPUT:
-	    RETVAL
-
-
-int
-set_encrypt(env, passwd, flags)
-	BerkeleyDB::Env env
-	const char * passwd
-	u_int32_t flags
-    CODE:
-#ifndef AT_LEAST_DB_4_1
-	    softCrash("$env->set_encrypt needs Berkeley DB 4.1 or better") ;
-#else
-        dieIfEnvOpened(env, "set_encrypt");
-        RETVAL = env->Env->set_encrypt(env->Env, passwd, flags);
-	env->opened = TRUE;
-#endif
-    OUTPUT:
-        RETVAL
-
-
-
-
-BerkeleyDB::Env::Raw
-_db_appinit(self, ref, errfile=NULL)
-	char *		self
-	SV * 		ref
-	SV * 		errfile 
-	CODE:
-	{
-	    HV *	hash ;
-	    SV *	sv ;
-	    char *	enc_passwd = NULL ;
-	    int		enc_flags = 0 ;
-	    char *	home = NULL ;
-	    char * 	server = NULL ;
-	    char **	config = NULL ;
-	    int		flags = 0 ;
-	    int		setflags = 0 ;
-	    int		cachesize = 0 ;
-	    int		lk_detect = 0 ;
-	    long	shm_key = 0 ;
-	    SV *	errprefix = NULL;
-	    DB_ENV *	env ;
-	    int status ;
-
-	    Trace(("in _db_appinit [%s] %d\n", self, ref)) ;
-	    hash = (HV*) SvRV(ref) ;
-	    SetValue_pv(home,      "Home", char *) ;
-	    SetValue_pv(enc_passwd,"Enc_Passwd", char *) ;
-	    SetValue_iv(enc_flags, "Enc_Flags") ;
-	    SetValue_pv(config,    "Config", char **) ;
-	    SetValue_sv(errprefix, "ErrPrefix") ;
-	    SetValue_iv(flags,     "Flags") ;
-	    SetValue_iv(setflags,  "SetFlags") ;
-	    SetValue_pv(server,    "Server", char *) ;
-	    SetValue_iv(cachesize, "Cachesize") ;
-	    SetValue_iv(lk_detect, "LockDetect") ;
-	    SetValue_iv(shm_key,   "SharedMemKey") ;
-#ifndef AT_LEAST_DB_3_2
-	    if (setflags)
-	        softCrash("-SetFlags needs Berkeley DB 3.x or better") ;
-#endif /* ! AT_LEAST_DB_3 */
-#ifndef AT_LEAST_DB_3_1
-	    if (shm_key)
-	        softCrash("-SharedMemKey needs Berkeley DB 3.1 or better") ;
-	    if (server)
-	        softCrash("-Server needs Berkeley DB 3.1 or better") ;
-#endif /* ! AT_LEAST_DB_3_1 */
-#ifndef AT_LEAST_DB_4_1
-	    if (enc_passwd)
-	        softCrash("-Encrypt needs Berkeley DB 4.x or better") ;
-#endif /* ! AT_LEAST_DB_4_1 */
-	    Trace(("_db_appinit(config=[%d], home=[%s],errprefix=[%s],flags=[%d]\n",
-			config, home, errprefix, flags)) ;
-#ifdef TRACE
-	    if (config) {
-	       int i ;
-	      for (i = 0 ; i < 10 ; ++ i) {
-		if (config[i] == NULL) {
-		    printf("    End\n") ;
-		    break ;
-		}
-	        printf("    config = [%s]\n", config[i]) ;
-	      }
-	    }
-#endif /* TRACE */
-	    ZMALLOC(RETVAL, BerkeleyDB_ENV_type) ;
-	    if (flags & DB_INIT_TXN)
-	        RETVAL->txn_enabled = TRUE ;
-#if DB_VERSION_MAJOR == 2
-	  ZMALLOC(RETVAL->Env, DB_ENV) ;
-	  env = RETVAL->Env ;
-	  {
-	    /* Take a copy of the error prefix */
-	    if (errprefix) {
-	        Trace(("copying errprefix\n" )) ;
-		RETVAL->ErrPrefix = newSVsv(errprefix) ;
-		SvPOK_only(RETVAL->ErrPrefix) ;
-	    } 
-	    if (RETVAL->ErrPrefix)
-	        RETVAL->Env->db_errpfx = SvPVX(RETVAL->ErrPrefix) ;
-
-	    if (SvGMAGICAL(errfile))
-		    mg_get(errfile);
-	    if (SvOK(errfile)) {
-	        FILE * ef = GetFILEptr(errfile) ;
-	    	if (! ef)
-		    croak("Cannot open file ErrFile", Strerror(errno));
-		RETVAL->ErrHandle = newSVsv(errfile) ;
-	    	env->db_errfile = ef;
-	    }
-	    SetValue_iv(env->db_verbose, "Verbose") ;
-	    env->db_errcall = db_errcall_cb ;
-	    RETVAL->active = TRUE ;
-	    RETVAL->opened = TRUE;
-	    RETVAL->cds_enabled = ((flags & DB_INIT_CDB) != 0 ? TRUE : FALSE) ;
-	    status = db_appinit(home, config, env, flags) ;
-	    printf("  status = %d errno %d \n", status, errno) ;
-	    Trace(("  status = %d env %d Env %d\n", status, RETVAL, env)) ;
-	    if (status == 0)
-	        hash_store_iv("BerkeleyDB::Term::Env", (char *)RETVAL, 1) ;
-	    else {
-
-                if (RETVAL->ErrHandle)
-                    SvREFCNT_dec(RETVAL->ErrHandle) ;
-                if (RETVAL->ErrPrefix)
-                    SvREFCNT_dec(RETVAL->ErrPrefix) ;
-                Safefree(RETVAL->Env) ;
-                Safefree(RETVAL) ;
-		RETVAL = NULL ;
-	    }
-	  }
-#else /* DB_VERSION_MAJOR > 2 */
-#ifndef AT_LEAST_DB_3_1
-#    define DB_CLIENT	0
-#endif
-#ifdef AT_LEAST_DB_4_2
-#    define DB_CLIENT	DB_RPCCLIENT
-#endif
-	  status = db_env_create(&RETVAL->Env, server ? DB_CLIENT : 0) ;
-	  Trace(("db_env_create flags = %d returned %s\n", flags,
-	  					my_db_strerror(status))) ;
-	  env = RETVAL->Env ;
-#ifdef AT_LEAST_DB_3_3
-	  env->set_alloc(env, safemalloc, MyRealloc, safefree) ;
-#endif
-#ifdef AT_LEAST_DB_3_1
-	  if (status == 0 && shm_key) {
-	      status = env->set_shm_key(env, shm_key) ;
-	      Trace(("set_shm_key [%d] returned %s\n", shm_key,
-			my_db_strerror(status)));
-	  }
-#endif	  
-	  if (status == 0 && cachesize) {
-	      status = env->set_cachesize(env, 0, cachesize, 0) ;
-	      Trace(("set_cachesize [%d] returned %s\n",
-			cachesize, my_db_strerror(status)));
-	  }
-	
-	  if (status == 0 && lk_detect) {
-	      status = env->set_lk_detect(env, lk_detect) ;
-	      Trace(("set_lk_detect [%d] returned %s\n",
-	              lk_detect, my_db_strerror(status)));
-	  }
-#ifdef AT_LEAST_DB_4_1
-	  /* set encryption */
-	  if (enc_passwd && status == 0)
-	  {
-	      status = env->set_encrypt(env, enc_passwd, enc_flags);
-	      Trace(("ENV->set_encrypt passwd = %s, flags %d returned %s\n", 
-				      		enc_passwd, enc_flags,
-	  					my_db_strerror(status))) ;
-	  }
-#endif	  
-#ifdef AT_LEAST_DB_4
-	  /* set the server */
-	  if (server && status == 0)
-	  {
-	      status = env->set_rpc_server(env, NULL, server, 0, 0, 0);
-	      Trace(("ENV->set_rpc_server server = %s returned %s\n", server,
-	  					my_db_strerror(status))) ;
-	  }
-#else
-#  if defined(AT_LEAST_DB_3_1) && ! defined(AT_LEAST_DB_4)
-	  /* set the server */
-	  if (server && status == 0)
-	  {
-	      status = env->set_server(env, server, 0, 0, 0);
-	      Trace(("ENV->set_server server = %s returned %s\n", server,
-	  					my_db_strerror(status))) ;
-	  }
-#  endif
-#endif
-#ifdef AT_LEAST_DB_3_2
-	  if (setflags && status == 0)
-	  {
-	      status = env->set_flags(env, setflags, 1);
-	      Trace(("ENV->set_flags value = %d returned %s\n", setflags,
-	  					my_db_strerror(status))) ;
-	  }
-#endif
-	  if (status == 0)
-	  {
-	    int		mode = 0 ;
-	    /* Take a copy of the error prefix */
-	    if (errprefix) {
-	        Trace(("copying errprefix\n" )) ;
-		RETVAL->ErrPrefix = newSVsv(errprefix) ;
-		SvPOK_only(RETVAL->ErrPrefix) ;
-	    }
-	    if (RETVAL->ErrPrefix)
-	        env->set_errpfx(env, SvPVX(RETVAL->ErrPrefix)) ;
-
-	    if (SvGMAGICAL(errfile))
-		    mg_get(errfile);
-	    if (SvOK(errfile)) {
-	        FILE * ef = GetFILEptr(errfile);
-	    	if (! ef)
-		    croak("Cannot open file ErrFile", Strerror(errno));
-		RETVAL->ErrHandle = newSVsv(errfile) ;
-	    	env->set_errfile(env, ef) ;
-
-	    }
-
-	    SetValue_iv(mode, "Mode") ;
-	    env->set_errcall(env, db_errcall_cb) ;
-	    RETVAL->active = TRUE ;
-	    RETVAL->cds_enabled = ((flags & DB_INIT_CDB) != 0 ? TRUE : FALSE) ; 
-#ifdef IS_DB_3_0_x
-	    status = (env->open)(env, home, config, flags, mode) ;
-#else /* > 3.0 */
-	    status = (env->open)(env, home, flags, mode) ;
-#endif
-	    Trace(("ENV->open(env=%s,home=%s,flags=%d,mode=%d)\n",env,home,flags,mode)) ;
-	    Trace(("ENV->open returned %s\n", my_db_strerror(status))) ;
-	  }
-
-	  if (status == 0)
-	      hash_store_iv("BerkeleyDB::Term::Env", (char *)RETVAL, 1) ;
-	  else {
-	      (env->close)(env, 0) ;
-              if (RETVAL->ErrHandle)
-                  SvREFCNT_dec(RETVAL->ErrHandle) ;
-              if (RETVAL->ErrPrefix)
-                  SvREFCNT_dec(RETVAL->ErrPrefix) ;
-              Safefree(RETVAL) ;
-	      RETVAL = NULL ;
-	  }
-#endif /* DB_VERSION_MAJOR > 2 */
-	  {
-	      SV * sv_err = perl_get_sv(ERR_BUFF, FALSE);
-	      sv_setpv(sv_err, db_strerror(status));
-	  }
-	}
-	OUTPUT:
-	    RETVAL
-
-DB_ENV*
-DB_ENV(env)
-	BerkeleyDB::Env		env
-	CODE:
-	    if (env->active)
-	        RETVAL = env->Env ;
-	    else
-	        RETVAL = NULL;
-
-
-void
-log_archive(env, flags=0)
-	u_int32_t		flags
-	BerkeleyDB::Env		env
-	PPCODE:
-	{
-	  char ** list;
-	  char ** file;
-	  AV    * av;
-#ifndef AT_LEAST_DB_3
-          softCrash("log_archive needs at least Berkeley DB 3.x.x");
-#else
-#  ifdef AT_LEAST_DB_4
-	  env->Status = env->Env->log_archive(env->Env, &list, flags) ;
-#  else
-#    ifdef AT_LEAST_DB_3_3
-	  env->Status = log_archive(env->Env, &list, flags) ;
-#    else
-	  env->Status = log_archive(env->Env, &list, flags, safemalloc) ;
-#    endif
-#  endif
-	  if (env->Status == 0 && list != NULL)
-          {
-	      for (file = list; *file != NULL; ++file)
-	      {
-	        XPUSHs(sv_2mortal(newSVpv(*file, 0))) ;
-	      }
-	      safefree(list);
-	  }
-#endif
-	}
-
-BerkeleyDB::Txn::Raw
-_txn_begin(env, pid=NULL, flags=0)
-	u_int32_t		flags
-	BerkeleyDB::Env		env
-	BerkeleyDB::Txn		pid
-	CODE:
-	{
-	    DB_TXN *txn ;
-	    DB_TXN *p_id = NULL ;
-	    Trace(("txn_begin pid %d, flags %d\n", pid, flags)) ;
-#if DB_VERSION_MAJOR == 2
-	    if (env->Env->tx_info == NULL)
-		softCrash("Transaction Manager not enabled") ;
-#endif
-	    if (!env->txn_enabled)
-		softCrash("Transaction Manager not enabled") ;
-	    if (pid)
-		p_id = pid->txn ;
-	    env->TxnMgrStatus =
-#if DB_VERSION_MAJOR == 2
-	    	txn_begin(env->Env->tx_info, p_id, &txn) ;
-#else
-#  ifdef AT_LEAST_DB_4
-	    	env->Env->txn_begin(env->Env, p_id, &txn, flags) ;
-#  else
-	    	txn_begin(env->Env, p_id, &txn, flags) ;
-#  endif
-#endif
-	    if (env->TxnMgrStatus == 0) {
-	      ZMALLOC(RETVAL, BerkeleyDB_Txn_type) ;
-	      RETVAL->txn  = txn ;
-	      RETVAL->active = TRUE ;
-	      Trace(("_txn_begin created txn [%p] in [%p]\n", txn, RETVAL));
-	      hash_store_iv("BerkeleyDB::Term::Txn", (char *)RETVAL, 1) ;
-	    }
-	    else
-		RETVAL = NULL ;
-	}
-	OUTPUT:
-	    RETVAL
-
-
-#if DB_VERSION_MAJOR == 2
-#  define env_txn_checkpoint(e,k,m,f) txn_checkpoint(e->Env->tx_info, k, m)
-#else /* DB 3.0 or better */
-#  ifdef AT_LEAST_DB_4 
-#    define env_txn_checkpoint(e,k,m,f) e->Env->txn_checkpoint(e->Env, k, m, f)
-#  else
-#    ifdef AT_LEAST_DB_3_1
-#      define env_txn_checkpoint(e,k,m,f) txn_checkpoint(e->Env, k, m, 0)
-#    else
-#      define env_txn_checkpoint(e,k,m,f) txn_checkpoint(e->Env, k, m)
-#    endif
-#  endif
-#endif
-DualType
-env_txn_checkpoint(env, kbyte, min, flags=0)
-	BerkeleyDB::Env		env
-	long			kbyte
-	long			min
-	u_int32_t		flags
-
-HV *
-txn_stat(env)
-	BerkeleyDB::Env		env
-	HV *			RETVAL = NULL ;
-	CODE:
-	{
-	    DB_TXN_STAT *	stat ;
-#ifdef AT_LEAST_DB_4
-	    if(env->Env->txn_stat(env->Env, &stat, 0) == 0) {
-#else
-#  ifdef AT_LEAST_DB_3_3
-	    if(txn_stat(env->Env, &stat) == 0) {
-#  else
-#    if DB_VERSION_MAJOR == 2
-	    if(txn_stat(env->Env->tx_info, &stat, safemalloc) == 0) {
-#    else
-	    if(txn_stat(env->Env, &stat, safemalloc) == 0) {
-#    endif
-#  endif
-#endif
-	    	RETVAL = (HV*)sv_2mortal((SV*)newHV()) ;
-		hv_store_iv(RETVAL, "st_time_ckp", stat->st_time_ckp) ;
-		hv_store_iv(RETVAL, "st_last_txnid", stat->st_last_txnid) ;
-		hv_store_iv(RETVAL, "st_maxtxns", stat->st_maxtxns) ;
-		hv_store_iv(RETVAL, "st_naborts", stat->st_naborts) ;
-		hv_store_iv(RETVAL, "st_nbegins", stat->st_nbegins) ;
-		hv_store_iv(RETVAL, "st_ncommits", stat->st_ncommits) ;
-		hv_store_iv(RETVAL, "st_nactive", stat->st_nactive) ;
-#if DB_VERSION_MAJOR > 2
-		hv_store_iv(RETVAL, "st_maxnactive", stat->st_maxnactive) ;
-		hv_store_iv(RETVAL, "st_regsize", stat->st_regsize) ;
-		hv_store_iv(RETVAL, "st_region_wait", stat->st_region_wait) ;
-		hv_store_iv(RETVAL, "st_region_nowait", stat->st_region_nowait) ;
-#endif
-		safefree(stat) ;
-	    }
-	}
-	OUTPUT:
-	    RETVAL
-
-#define EnDis(x)	((x) ? "Enabled" : "Disabled")
-void
-printEnv(env)
-        BerkeleyDB::Env  env
-	INIT:
-	    ckActive_Environment(env->active) ;
-	CODE:
-#if 0
-	  printf("env             [0x%X]\n", env) ;
-	  printf("  ErrPrefix     [%s]\n", env->ErrPrefix
-				           ? SvPVX(env->ErrPrefix) : 0) ;
-	  printf("  DB_ENV\n") ;
-	  printf("    db_lorder   [%d]\n", env->Env.db_lorder) ;
-	  printf("    db_home     [%s]\n", env->Env.db_home) ;
-	  printf("    db_data_dir [%s]\n", env->Env.db_data_dir) ;
-	  printf("    db_log_dir  [%s]\n", env->Env.db_log_dir) ;
-	  printf("    db_tmp_dir  [%s]\n", env->Env.db_tmp_dir) ;
-	  printf("    lk_info     [%s]\n", EnDis(env->Env.lk_info)) ;
-	  printf("    lk_max      [%d]\n", env->Env.lk_max) ;
-	  printf("    lg_info     [%s]\n", EnDis(env->Env.lg_info)) ;
-	  printf("    lg_max      [%d]\n", env->Env.lg_max) ;
-	  printf("    mp_info     [%s]\n", EnDis(env->Env.mp_info)) ;
-	  printf("    mp_size     [%d]\n", env->Env.mp_size) ;
-	  printf("    tx_info     [%s]\n", EnDis(env->Env.tx_info)) ;
-	  printf("    tx_max      [%d]\n", env->Env.tx_max) ;
-	  printf("    flags       [%d]\n", env->Env.flags) ;
-	  printf("\n") ;
-#endif
-
-SV *
-errPrefix(env, prefix)
-        BerkeleyDB::Env  env
-	SV * 		 prefix
-	INIT:
-	    ckActive_Environment(env->active) ;
-	CODE:
-	  if (env->ErrPrefix) {
-	      RETVAL = newSVsv(env->ErrPrefix) ;
-              SvPOK_only(RETVAL) ;
-	      sv_setsv(env->ErrPrefix, prefix) ;
-	  }
-	  else {
-	      RETVAL = NULL ;
-	      env->ErrPrefix = newSVsv(prefix) ;
-	  }
-	  SvPOK_only(env->ErrPrefix) ;
-#if DB_VERSION_MAJOR == 2
-	  env->Env->db_errpfx = SvPVX(env->ErrPrefix) ;
-#else
-	  env->Env->set_errpfx(env->Env, SvPVX(env->ErrPrefix)) ;
-#endif
-	OUTPUT:
-	  RETVAL
-
-DualType
-status(env)
-        BerkeleyDB::Env 	env
-	CODE:
-	    RETVAL =  env->Status ;
-	OUTPUT:
-	    RETVAL
-
-
-
-DualType
-db_appexit(env)
-        BerkeleyDB::Env 	env
-	ALIAS:	close =1
-	INIT:
-	    ckActive_Environment(env->active) ;
-	CODE:
-#ifdef STRICT_CLOSE
-	    if (env->open_dbs)
-		softCrash("attempted to close an environment with %d open database(s)",
-			env->open_dbs) ;
-#endif /* STRICT_CLOSE */
-#if DB_VERSION_MAJOR == 2
-	    RETVAL = db_appexit(env->Env) ;
-#else
-	    RETVAL = (env->Env->close)(env->Env, 0) ;
-#endif
-	    env->active = FALSE ;
-	    hash_delete("BerkeleyDB::Term::Env", (char *)env) ;
-	OUTPUT:
-	    RETVAL
-
-
-void
-_DESTROY(env)
-        BerkeleyDB::Env  env
-	int RETVAL = 0 ;
-	CODE:
-	  Trace(("In BerkeleyDB::Env::DESTROY\n"));
-	  Trace(("    env %ld Env %ld dirty %d\n", env, &env->Env, PL_dirty)) ;
-	  if (env->active)
-#if DB_VERSION_MAJOR == 2
-              db_appexit(env->Env) ;
-#else
-	      (env->Env->close)(env->Env, 0) ;
-#endif
-          if (env->ErrHandle)
-              SvREFCNT_dec(env->ErrHandle) ;
-          if (env->ErrPrefix)
-              SvREFCNT_dec(env->ErrPrefix) ;
-#if DB_VERSION_MAJOR == 2
-          Safefree(env->Env) ;
-#endif
-          Safefree(env) ;
-	  hash_delete("BerkeleyDB::Term::Env", (char *)env) ;
-	  Trace(("End of BerkeleyDB::Env::DESTROY %d\n", RETVAL)) ;
-
-BerkeleyDB::TxnMgr::Raw
-_TxnMgr(env)
-        BerkeleyDB::Env  env
-	INIT:
-	    ckActive_Environment(env->active) ;
-	    if (!env->txn_enabled)
-		softCrash("Transaction Manager not enabled") ;
-	CODE:
-	    ZMALLOC(RETVAL, BerkeleyDB_TxnMgr_type) ;
-	    RETVAL->env  = env ;
-	    /* hash_store_iv("BerkeleyDB::Term::TxnMgr", (char *)txn, 1) ; */
-	OUTPUT:
-	    RETVAL
-
-int
-get_shm_key(env, id)
-        BerkeleyDB::Env  env
-	long  		 id = NO_INIT
-	INIT:
-	  ckActive_Database(env->active) ;
-	CODE:
-#ifndef AT_LEAST_DB_4_2
-	    softCrash("$env->get_shm_key needs Berkeley DB 4.2 or better") ;
-#else
-	    RETVAL = env->Env->get_shm_key(env->Env, &id);
-#endif	    
-	OUTPUT:
-	    RETVAL
-	    id
-
-
-int
-set_lg_dir(env, dir)
-        BerkeleyDB::Env  env
-	char *		 dir
-	INIT:
-	  ckActive_Database(env->active) ;
-	CODE:
-#ifndef AT_LEAST_DB_3_1
-	    softCrash("$env->set_lg_dir needs Berkeley DB 3.1 or better") ;
-#else
-	    RETVAL = env->Status = env->Env->set_lg_dir(env->Env, dir);
-#endif
-	OUTPUT:
-	    RETVAL
-
-int
-set_lg_bsize(env, bsize)
-        BerkeleyDB::Env  env
-	u_int32_t	 bsize
-	INIT:
-	  ckActive_Database(env->active) ;
-	CODE:
-#ifndef AT_LEAST_DB_3
-	    softCrash("$env->set_lg_bsize needs Berkeley DB 3.0.55 or better") ;
-#else
-	    RETVAL = env->Status = env->Env->set_lg_bsize(env->Env, bsize);
-#endif
-	OUTPUT:
-	    RETVAL
-
-int
-set_lg_max(env, lg_max)
-        BerkeleyDB::Env  env
-	u_int32_t	 lg_max
-	INIT:
-	  ckActive_Database(env->active) ;
-	CODE:
-#ifndef AT_LEAST_DB_3
-	    softCrash("$env->set_lg_max needs Berkeley DB 3.0.55 or better") ;
-#else
-	    RETVAL = env->Status = env->Env->set_lg_max(env->Env, lg_max);
-#endif
-	OUTPUT:
-	    RETVAL
-
-int
-set_data_dir(env, dir)
-        BerkeleyDB::Env  env
-	char *		 dir
-	INIT:
-	  ckActive_Database(env->active) ;
-	CODE:
-#ifndef AT_LEAST_DB_3_1
-	    softCrash("$env->set_data_dir needs Berkeley DB 3.1 or better") ;
-#else
-            dieIfEnvOpened(env, "set_data_dir");
-	    RETVAL = env->Status = env->Env->set_data_dir(env->Env, dir);
-#endif
-	OUTPUT:
-	    RETVAL
-
-int
-set_tmp_dir(env, dir)
-        BerkeleyDB::Env  env
-	char *		 dir
-	INIT:
-	  ckActive_Database(env->active) ;
-	CODE:
-#ifndef AT_LEAST_DB_3_1
-	    softCrash("$env->set_tmp_dir needs Berkeley DB 3.1 or better") ;
-#else
-	    RETVAL = env->Status = env->Env->set_tmp_dir(env->Env, dir);
-#endif
-	OUTPUT:
-	    RETVAL
-
-int
-set_mutexlocks(env, do_lock)
-        BerkeleyDB::Env  env
-	int 		 do_lock
-	INIT:
-	  ckActive_Database(env->active) ;
-	CODE:
-#ifndef AT_LEAST_DB_3
-	    softCrash("$env->set_setmutexlocks needs Berkeley DB 3.0 or better") ;
-#else
-#  ifdef AT_LEAST_DB_4
-	    RETVAL = env->Status = env->Env->set_flags(env->Env, DB_NOLOCKING, do_lock);
-#  else
-#    if defined(AT_LEAST_DB_3_2_6) || defined(IS_DB_3_0_x)
-	    RETVAL = env->Status = env->Env->set_mutexlocks(env->Env, do_lock);
-#    else /* DB 3.1 or 3.2.3 */
-	    RETVAL = env->Status = db_env_set_mutexlocks(do_lock);
-#    endif
-#  endif
-#endif
-	OUTPUT:
-	    RETVAL
-
-int
-set_verbose(env, which, onoff)
-        BerkeleyDB::Env  env
-	u_int32_t	 which
-	int	 	 onoff
-	INIT:
-	  ckActive_Database(env->active) ;
-	CODE:
-#ifndef AT_LEAST_DB_3
-	    softCrash("$env->set_verbose needs Berkeley DB 3.x or better") ;
-#else
-	    RETVAL = env->Status = env->Env->set_verbose(env->Env, which, onoff);
-#endif
-	OUTPUT:
-	    RETVAL
-
-int
-set_flags(env, flags, onoff)
-        BerkeleyDB::Env  env
-	u_int32_t	 flags
-	int	 	 onoff
-	INIT:
-	  ckActive_Database(env->active) ;
-	CODE:
-#ifndef AT_LEAST_DB_3_2
-	    softCrash("$env->set_flags needs Berkeley DB 3.2.x or better") ;
-#else
-	    RETVAL = env->Status = env->Env->set_flags(env->Env, flags, onoff);
-#endif
-	OUTPUT:
-	    RETVAL
-
-
-MODULE = BerkeleyDB::Term		PACKAGE = BerkeleyDB::Term
-
-void
-close_everything()
-
-#define safeCroak(string)	softCrash(string)
-void
-safeCroak(string)
-	char * string
-
-MODULE = BerkeleyDB::Hash	PACKAGE = BerkeleyDB::Hash	PREFIX = hash_
-
-BerkeleyDB::Hash::Raw
-_db_open_hash(self, ref)
-	char *		self
-	SV * 		ref
-	CODE:
-	{
-	    HV *		hash ;
-	    SV * 		sv ;
-	    DB_INFO 		info ;
-	    BerkeleyDB__Env	dbenv = NULL;
-	    SV *		ref_dbenv = NULL;
-	    const char *	file = NULL ;
-	    const char *	subname = NULL ;
-	    int			flags = 0 ;
-	    int			mode = 0 ;
-    	    BerkeleyDB 		db ;
-    	    BerkeleyDB__Txn 	txn = NULL ;
-	    char *	enc_passwd = NULL ;
-	    int		enc_flags = 0 ;
-
-    	    Trace(("_db_open_hash start\n")) ;
-	    hash = (HV*) SvRV(ref) ;
-	    SetValue_pv(file, "Filename", char *) ;
-	    SetValue_pv(subname, "Subname", char *) ;
-	    SetValue_ov(txn, "Txn", BerkeleyDB__Txn) ;
-	    SetValue_ov(dbenv, "Env", BerkeleyDB__Env) ;
-	    ref_dbenv = sv ;
-	    SetValue_iv(flags, "Flags") ;
-	    SetValue_iv(mode, "Mode") ;
-	    SetValue_pv(enc_passwd,"Enc_Passwd", char *) ;
-	    SetValue_iv(enc_flags, "Enc_Flags") ;
-
-       	    Zero(&info, 1, DB_INFO) ;
-	    SetValue_iv(info.db_cachesize, "Cachesize") ;
-	    SetValue_iv(info.db_lorder, "Lorder") ;
-	    SetValue_iv(info.db_pagesize, "Pagesize") ;
-	    SetValue_iv(info.h_ffactor, "Ffactor") ;
-	    SetValue_iv(info.h_nelem, "Nelem") ;
-	    SetValue_iv(info.flags, "Property") ;
-	    ZMALLOC(db, BerkeleyDB_type) ;
-	    if ((sv = readHash(hash, "Hash")) && sv != &PL_sv_undef) {
-		info.h_hash = hash_cb ;
-		db->hash = newSVsv(sv) ;
-	    }
-	    /* DB_DUPSORT was introduced in DB 2.5.9 */
-	    if ((sv = readHash(hash, "DupCompare")) && sv != &PL_sv_undef) {
-#ifdef DB_DUPSORT
-		info.dup_compare = dup_compare ;
-		db->dup_compare = newSVsv(sv) ;
-		info.flags |= DB_DUP|DB_DUPSORT ;
-#else
-	        croak("DupCompare needs Berkeley DB 2.5.9 or later") ;
-#endif
-	    }
-	    RETVAL = my_db_open(db, ref, ref_dbenv, dbenv, txn, file, subname, DB_HASH, flags, mode, &info, enc_passwd, enc_flags) ;
-    	    Trace(("_db_open_hash end\n")) ;
-	}
-	OUTPUT:
-	    RETVAL
-
-
-HV *
-db_stat(db, flags=0)
-	int			flags
-	BerkeleyDB::Common	db
-	HV *			RETVAL = NULL ;
-	INIT:
-	  ckActive_Database(db->active) ;
-	CODE:
-	{
-#if DB_VERSION_MAJOR == 2
-	    softCrash("$db->db_stat for a Hash needs Berkeley DB 3.x or better") ;
-#else
-	    DB_HASH_STAT *	stat ;
-#ifdef AT_LEAST_DB_4_3
-	    db->Status = ((db->dbp)->stat)(db->dbp, db->txn, &stat, flags) ;
-#else        
-#ifdef AT_LEAST_DB_3_3
-	    db->Status = ((db->dbp)->stat)(db->dbp, &stat, flags) ;
-#else
-	    db->Status = ((db->dbp)->stat)(db->dbp, &stat, safemalloc, flags) ;
-#endif
-#endif
-	    if (db->Status == 0) {
-	    	RETVAL = (HV*)sv_2mortal((SV*)newHV()) ;
-		hv_store_iv(RETVAL, "hash_magic", stat->hash_magic) ;
-		hv_store_iv(RETVAL, "hash_version", stat->hash_version);
-		hv_store_iv(RETVAL, "hash_pagesize", stat->hash_pagesize);
-#ifdef AT_LEAST_DB_3_1
-		hv_store_iv(RETVAL, "hash_nkeys", stat->hash_nkeys);
-		hv_store_iv(RETVAL, "hash_ndata", stat->hash_ndata);
-#else
-		hv_store_iv(RETVAL, "hash_nrecs", stat->hash_nrecs);
-#endif
-#ifndef AT_LEAST_DB_3_1
-		hv_store_iv(RETVAL, "hash_nelem", stat->hash_nelem);
-#endif
-		hv_store_iv(RETVAL, "hash_ffactor", stat->hash_ffactor);
-		hv_store_iv(RETVAL, "hash_buckets", stat->hash_buckets);
-		hv_store_iv(RETVAL, "hash_free", stat->hash_free);
-		hv_store_iv(RETVAL, "hash_bfree", stat->hash_bfree);
-		hv_store_iv(RETVAL, "hash_bigpages", stat->hash_bigpages);
-		hv_store_iv(RETVAL, "hash_big_bfree", stat->hash_big_bfree);
-		hv_store_iv(RETVAL, "hash_overflows", stat->hash_overflows);
-		hv_store_iv(RETVAL, "hash_ovfl_free", stat->hash_ovfl_free);
-		hv_store_iv(RETVAL, "hash_dup", stat->hash_dup);
-		hv_store_iv(RETVAL, "hash_dup_free", stat->hash_dup_free);
-#if DB_VERSION_MAJOR >= 3
-		hv_store_iv(RETVAL, "hash_metaflags", stat->hash_metaflags);
-#endif
-		safefree(stat) ;
-	    }
-#endif
-	}
-	OUTPUT:
-	    RETVAL
-
-
-MODULE = BerkeleyDB::Unknown	PACKAGE = BerkeleyDB::Unknown	PREFIX = hash_
-
-void
-_db_open_unknown(ref)
-	SV * 		ref
-	PPCODE:
-	{
-	    HV *		hash ;
-	    SV * 		sv ;
-	    DB_INFO 		info ;
-	    BerkeleyDB__Env	dbenv = NULL;
-	    SV *		ref_dbenv = NULL;
-	    const char *	file = NULL ;
-	    const char *	subname = NULL ;
-	    int			flags = 0 ;
-	    int			mode = 0 ;
-    	    BerkeleyDB 		db ;
-	    BerkeleyDB		RETVAL ;
-    	    BerkeleyDB__Txn 	txn = NULL ;
-	    static char * 		Names[] = {"", "Btree", "Hash", "Recno"} ;
-	    char *	enc_passwd = NULL ;
-	    int		enc_flags = 0 ;
-
-	    hash = (HV*) SvRV(ref) ;
-	    SetValue_pv(file, "Filename", char *) ;
-	    SetValue_pv(subname, "Subname", char *) ;
-	    SetValue_ov(txn, "Txn", BerkeleyDB__Txn) ;
-	    SetValue_ov(dbenv, "Env", BerkeleyDB__Env) ;
-	    ref_dbenv = sv ;
-	    SetValue_iv(flags, "Flags") ;
-	    SetValue_iv(mode, "Mode") ;
-	    SetValue_pv(enc_passwd,"Enc_Passwd", char *) ;
-	    SetValue_iv(enc_flags, "Enc_Flags") ;
-
-       	    Zero(&info, 1, DB_INFO) ;
-	    SetValue_iv(info.db_cachesize, "Cachesize") ;
-	    SetValue_iv(info.db_lorder, "Lorder") ;
-	    SetValue_iv(info.db_pagesize, "Pagesize") ;
-	    SetValue_iv(info.h_ffactor, "Ffactor") ;
-	    SetValue_iv(info.h_nelem, "Nelem") ;
-	    SetValue_iv(info.flags, "Property") ;
-	    ZMALLOC(db, BerkeleyDB_type) ;
-
-	    RETVAL = my_db_open(db, ref, ref_dbenv, dbenv, txn, file, subname, DB_UNKNOWN, flags, mode, &info, enc_passwd, enc_flags) ;
-	    XPUSHs(sv_2mortal(newSViv(PTR2IV(RETVAL))));
-	    if (RETVAL)
-	        XPUSHs(sv_2mortal(newSVpv(Names[RETVAL->type], 0))) ;
-	    else
-	        XPUSHs(sv_2mortal(newSViv((IV)NULL)));
-	}
-
-
-
-MODULE = BerkeleyDB::Btree	PACKAGE = BerkeleyDB::Btree	PREFIX = btree_
-
-BerkeleyDB::Btree::Raw
-_db_open_btree(self, ref)
-	char *		self
-	SV * 		ref
-	CODE:
-	{
-	    HV *		hash ;
-	    SV * 		sv ;
-	    DB_INFO 		info ;
-	    BerkeleyDB__Env	dbenv = NULL;
-	    SV *		ref_dbenv = NULL;
-	    const char *	file = NULL ;
-	    const char *	subname = NULL ;
-	    int			flags = 0 ;
-	    int			mode = 0 ;
-    	    BerkeleyDB  	db ;
-    	    BerkeleyDB__Txn 	txn = NULL ;
-	    char *	enc_passwd = NULL ;
-	    int		enc_flags = 0 ;
-
-	    Trace(("In _db_open_btree\n"));
-	    hash = (HV*) SvRV(ref) ;
-	    SetValue_pv(file, "Filename", char*) ;
-	    SetValue_pv(subname, "Subname", char *) ;
-	    SetValue_ov(txn, "Txn", BerkeleyDB__Txn) ;
-	    SetValue_ov(dbenv, "Env", BerkeleyDB__Env) ;
-	    ref_dbenv = sv ;
-	    SetValue_iv(flags, "Flags") ;
-	    SetValue_iv(mode, "Mode") ;
-	    SetValue_pv(enc_passwd,"Enc_Passwd", char *) ;
-	    SetValue_iv(enc_flags, "Enc_Flags") ;
-
-       	    Zero(&info, 1, DB_INFO) ;
-	    SetValue_iv(info.db_cachesize, "Cachesize") ;
-	    SetValue_iv(info.db_lorder, "Lorder") ;
-	    SetValue_iv(info.db_pagesize, "Pagesize") ;
-	    SetValue_iv(info.bt_minkey, "Minkey") ;
-	    SetValue_iv(info.flags, "Property") ;
-	    ZMALLOC(db, BerkeleyDB_type) ;
-	    if ((sv = readHash(hash, "Compare")) && sv != &PL_sv_undef) {
-		Trace(("    Parsed Compare callback\n"));
-		info.bt_compare = btree_compare ;
-		db->compare = newSVsv(sv) ;
-	    }
-	    /* DB_DUPSORT was introduced in DB 2.5.9 */
-	    if ((sv = readHash(hash, "DupCompare")) && sv != &PL_sv_undef) {
-#ifdef DB_DUPSORT
-		Trace(("    Parsed DupCompare callback\n"));
-		info.dup_compare = dup_compare ;
-		db->dup_compare = newSVsv(sv) ;
-		info.flags |= DB_DUP|DB_DUPSORT ;
-#else
-	        softCrash("DupCompare needs Berkeley DB 2.5.9 or later") ;
-#endif
-	    }
-	    if ((sv = readHash(hash, "Prefix")) && sv != &PL_sv_undef) {
-		Trace(("    Parsed Prefix callback\n"));
-		info.bt_prefix = btree_prefix ;
-		db->prefix = newSVsv(sv) ;
-	    }
-
-	    RETVAL = my_db_open(db, ref, ref_dbenv, dbenv, txn, file, subname, DB_BTREE, flags, mode, &info, enc_passwd, enc_flags) ;
-	}
-	OUTPUT:
-	    RETVAL
-
-
-HV *
-db_stat(db, flags=0)
-	int			flags
-	BerkeleyDB::Common	db
-	HV *			RETVAL = NULL ;
-	INIT:
-	  ckActive_Database(db->active) ;
-	CODE:
-	{
-	    DB_BTREE_STAT *	stat ;
-#ifdef AT_LEAST_DB_4_3
-	    db->Status = ((db->dbp)->stat)(db->dbp, db->txn, &stat, flags) ;
-#else        
-#ifdef AT_LEAST_DB_3_3
-	    db->Status = ((db->dbp)->stat)(db->dbp, &stat, flags) ;
-#else
-	    db->Status = ((db->dbp)->stat)(db->dbp, &stat, safemalloc, flags) ;
-#endif
-#endif
-	    if (db->Status == 0) {
-	    	RETVAL = (HV*)sv_2mortal((SV*)newHV()) ;
-		hv_store_iv(RETVAL, "bt_magic", stat->bt_magic);
-		hv_store_iv(RETVAL, "bt_version", stat->bt_version);
-#if DB_VERSION_MAJOR > 2
-		hv_store_iv(RETVAL, "bt_metaflags", stat->bt_metaflags) ;
-		hv_store_iv(RETVAL, "bt_flags", stat->bt_metaflags) ;
-#else
-		hv_store_iv(RETVAL, "bt_flags", stat->bt_flags) ;
-#endif
-		hv_store_iv(RETVAL, "bt_maxkey", stat->bt_maxkey) ;
-		hv_store_iv(RETVAL, "bt_minkey", stat->bt_minkey);
-		hv_store_iv(RETVAL, "bt_re_len", stat->bt_re_len);
-		hv_store_iv(RETVAL, "bt_re_pad", stat->bt_re_pad);
-		hv_store_iv(RETVAL, "bt_pagesize", stat->bt_pagesize);
-		hv_store_iv(RETVAL, "bt_levels", stat->bt_levels);
-#ifdef AT_LEAST_DB_3_1
-		hv_store_iv(RETVAL, "bt_nkeys", stat->bt_nkeys);
-		hv_store_iv(RETVAL, "bt_ndata", stat->bt_ndata);
-#else
-		hv_store_iv(RETVAL, "bt_nrecs", stat->bt_nrecs);
-#endif
-		hv_store_iv(RETVAL, "bt_int_pg", stat->bt_int_pg);
-		hv_store_iv(RETVAL, "bt_leaf_pg", stat->bt_leaf_pg);
-		hv_store_iv(RETVAL, "bt_dup_pg", stat->bt_dup_pg);
-		hv_store_iv(RETVAL, "bt_over_pg", stat->bt_over_pg);
-		hv_store_iv(RETVAL, "bt_free", stat->bt_free);
-#if DB_VERSION_MAJOR == 2 && DB_VERSION_MINOR < 5
-		hv_store_iv(RETVAL, "bt_freed", stat->bt_freed);
-		hv_store_iv(RETVAL, "bt_pfxsaved", stat->bt_pfxsaved);
-		hv_store_iv(RETVAL, "bt_split", stat->bt_split);
-		hv_store_iv(RETVAL, "bt_rootsplit", stat->bt_rootsplit);
-		hv_store_iv(RETVAL, "bt_fastsplit", stat->bt_fastsplit);
-		hv_store_iv(RETVAL, "bt_added", stat->bt_added);
-		hv_store_iv(RETVAL, "bt_deleted", stat->bt_deleted);
-		hv_store_iv(RETVAL, "bt_get", stat->bt_get);
-		hv_store_iv(RETVAL, "bt_cache_hit", stat->bt_cache_hit);
-		hv_store_iv(RETVAL, "bt_cache_miss", stat->bt_cache_miss);
-#endif
-		hv_store_iv(RETVAL, "bt_int_pgfree", stat->bt_int_pgfree);
-		hv_store_iv(RETVAL, "bt_leaf_pgfree", stat->bt_leaf_pgfree);
-		hv_store_iv(RETVAL, "bt_dup_pgfree", stat->bt_dup_pgfree);
-		hv_store_iv(RETVAL, "bt_over_pgfree", stat->bt_over_pgfree);
-		safefree(stat) ;
-	    }
-	}
-	OUTPUT:
-	    RETVAL
-
-
-MODULE = BerkeleyDB::Recno	PACKAGE = BerkeleyDB::Recno	PREFIX = recno_
-
-BerkeleyDB::Recno::Raw
-_db_open_recno(self, ref)
-	char *		self
-	SV * 		ref
-	CODE:
-	{
-	    HV *		hash ;
-	    SV * 		sv ;
-	    DB_INFO 		info ;
-	    BerkeleyDB__Env	dbenv = NULL;
-	    SV *		ref_dbenv = NULL;
-	    const char *	file = NULL ;
-	    const char *	subname = NULL ;
-	    int			flags = 0 ;
-	    int			mode = 0 ;
-    	    BerkeleyDB 		db ;
-    	    BerkeleyDB__Txn 	txn = NULL ;
-	    char *	enc_passwd = NULL ;
-	    int		enc_flags = 0 ;
-
-	    hash = (HV*) SvRV(ref) ;
-	    SetValue_pv(file, "Fname", char*) ;
-	    SetValue_pv(subname, "Subname", char *) ;
-	    SetValue_ov(dbenv, "Env", BerkeleyDB__Env) ;
-	    ref_dbenv = sv ;
-	    SetValue_ov(txn, "Txn", BerkeleyDB__Txn) ;
-	    SetValue_iv(flags, "Flags") ;
-	    SetValue_iv(mode, "Mode") ;
-	    SetValue_pv(enc_passwd,"Enc_Passwd", char *) ;
-	    SetValue_iv(enc_flags, "Enc_Flags") ;
-
-       	    Zero(&info, 1, DB_INFO) ;
-	    SetValue_iv(info.db_cachesize, "Cachesize") ;
-	    SetValue_iv(info.db_lorder, "Lorder") ;
-	    SetValue_iv(info.db_pagesize, "Pagesize") ;
-	    SetValue_iv(info.bt_minkey, "Minkey") ;
-
-	    SetValue_iv(info.flags, "Property") ;
-	    SetValue_pv(info.re_source, "Source", char*) ;
-	    if ((sv = readHash(hash, "Len")) && sv != &PL_sv_undef) {
-		info.re_len = SvIV(sv) ; ;
-		flagSet_DB2(info.flags, DB_FIXEDLEN) ;
-	    }
-	    if ((sv = readHash(hash, "Delim")) && sv != &PL_sv_undef) {
-		info.re_delim = SvPOK(sv) ? *SvPV(sv,PL_na) : SvIV(sv) ; ;
-		flagSet_DB2(info.flags, DB_DELIMITER) ;
-	    }
-	    if ((sv = readHash(hash, "Pad")) && sv != &PL_sv_undef) {
-		info.re_pad = (u_int32_t)SvPOK(sv) ? *SvPV(sv,PL_na) : SvIV(sv) ; ;
-		flagSet_DB2(info.flags, DB_PAD) ;
-	    }
-	    ZMALLOC(db, BerkeleyDB_type) ;
-#ifdef ALLOW_RECNO_OFFSET
-	    SetValue_iv(db->array_base, "ArrayBase") ;
-	    db->array_base = (db->array_base == 0 ? 1 : 0) ;
-#endif /* ALLOW_RECNO_OFFSET */
-
-	    RETVAL = my_db_open(db, ref, ref_dbenv, dbenv, txn, file, subname, DB_RECNO, flags, mode, &info, enc_passwd, enc_flags) ;
-	}
-	OUTPUT:
-	    RETVAL
-
-
-MODULE = BerkeleyDB::Queue	PACKAGE = BerkeleyDB::Queue	PREFIX = recno_
-
-BerkeleyDB::Queue::Raw
-_db_open_queue(self, ref)
-	char *		self
-	SV * 		ref
-	CODE:
-	{
-#ifndef AT_LEAST_DB_3
-            softCrash("BerkeleyDB::Queue needs Berkeley DB 3.0.x or better");
-#else
-	    HV *		hash ;
-	    SV * 		sv ;
-	    DB_INFO 		info ;
-	    BerkeleyDB__Env	dbenv = NULL;
-	    SV *		ref_dbenv = NULL;
-	    const char *	file = NULL ;
-	    const char *	subname = NULL ;
-	    int			flags = 0 ;
-	    int			mode = 0 ;
-    	    BerkeleyDB 		db ;
-    	    BerkeleyDB__Txn 	txn = NULL ;
-	    char *	enc_passwd = NULL ;
-	    int		enc_flags = 0 ;
-
-	    hash = (HV*) SvRV(ref) ;
-	    SetValue_pv(file, "Fname", char*) ;
-	    SetValue_pv(subname, "Subname", char *) ;
-	    SetValue_ov(dbenv, "Env", BerkeleyDB__Env) ;
-	    ref_dbenv = sv ;
-	    SetValue_ov(txn, "Txn", BerkeleyDB__Txn) ;
-	    SetValue_iv(flags, "Flags") ;
-	    SetValue_iv(mode, "Mode") ;
-	    SetValue_pv(enc_passwd,"Enc_Passwd", char *) ;
-	    SetValue_iv(enc_flags, "Enc_Flags") ;
-
-       	    Zero(&info, 1, DB_INFO) ;
-	    SetValue_iv(info.db_cachesize, "Cachesize") ;
-	    SetValue_iv(info.db_lorder, "Lorder") ;
-	    SetValue_iv(info.db_pagesize, "Pagesize") ;
-	    SetValue_iv(info.bt_minkey, "Minkey") ;
-    	    SetValue_iv(info.q_extentsize, "ExtentSize") ;
-
-
-	    SetValue_iv(info.flags, "Property") ;
-	    if ((sv = readHash(hash, "Len")) && sv != &PL_sv_undef) {
-		info.re_len = SvIV(sv) ; ;
-		flagSet_DB2(info.flags, DB_FIXEDLEN) ;
-	    }
-	    if ((sv = readHash(hash, "Pad")) && sv != &PL_sv_undef) {
-		info.re_pad = (u_int32_t)SvPOK(sv) ? *SvPV(sv,PL_na) : SvIV(sv) ; ;
-		flagSet_DB2(info.flags, DB_PAD) ;
-	    }
-	    ZMALLOC(db, BerkeleyDB_type) ;
-#ifdef ALLOW_RECNO_OFFSET
-	    SetValue_iv(db->array_base, "ArrayBase") ;
-	    db->array_base = (db->array_base == 0 ? 1 : 0) ;
-#endif /* ALLOW_RECNO_OFFSET */
-
-	    RETVAL = my_db_open(db, ref, ref_dbenv, dbenv, txn, file, subname, DB_QUEUE, flags, mode, &info, enc_passwd, enc_flags) ;
-#endif
-	}
-	OUTPUT:
-	    RETVAL
-
-HV *
-db_stat(db, flags=0)
-	int			flags
-	BerkeleyDB::Common	db
-	HV *			RETVAL = NULL ;
-	INIT:
-	  ckActive_Database(db->active) ;
-	CODE:
-	{
-#if DB_VERSION_MAJOR == 2
-	    softCrash("$db->db_stat for a Queue needs Berkeley DB 3.x or better") ;
-#else /* Berkeley DB 3, or better */
-	    DB_QUEUE_STAT *	stat ;
-#ifdef AT_LEAST_DB_4_3
-	    db->Status = ((db->dbp)->stat)(db->dbp, db->txn, &stat, flags) ;
-#else        
-#ifdef AT_LEAST_DB_3_3
-	    db->Status = ((db->dbp)->stat)(db->dbp, &stat, flags) ;
-#else
-	    db->Status = ((db->dbp)->stat)(db->dbp, &stat, safemalloc, flags) ;
-#endif
-#endif
-	    if (db->Status == 0) {
-	    	RETVAL = (HV*)sv_2mortal((SV*)newHV()) ;
-		hv_store_iv(RETVAL, "qs_magic", stat->qs_magic) ;
-		hv_store_iv(RETVAL, "qs_version", stat->qs_version);
-#ifdef AT_LEAST_DB_3_1
-		hv_store_iv(RETVAL, "qs_nkeys", stat->qs_nkeys);
-		hv_store_iv(RETVAL, "qs_ndata", stat->qs_ndata);
-#else
-		hv_store_iv(RETVAL, "qs_nrecs", stat->qs_nrecs);
-#endif
-		hv_store_iv(RETVAL, "qs_pages", stat->qs_pages);
-		hv_store_iv(RETVAL, "qs_pagesize", stat->qs_pagesize);
-		hv_store_iv(RETVAL, "qs_pgfree", stat->qs_pgfree);
-		hv_store_iv(RETVAL, "qs_re_len", stat->qs_re_len);
-		hv_store_iv(RETVAL, "qs_re_pad", stat->qs_re_pad);
-#ifdef AT_LEAST_DB_3_2
-#else
-		hv_store_iv(RETVAL, "qs_start", stat->qs_start);
-#endif
-		hv_store_iv(RETVAL, "qs_first_recno", stat->qs_first_recno);
-		hv_store_iv(RETVAL, "qs_cur_recno", stat->qs_cur_recno);
-#if DB_VERSION_MAJOR >= 3
-		hv_store_iv(RETVAL, "qs_metaflags", stat->qs_metaflags);
-#endif
-		safefree(stat) ;
-	    }
-#endif
-	}
-	OUTPUT:
-	    RETVAL
-
-
-MODULE = BerkeleyDB::Common  PACKAGE = BerkeleyDB::Common	PREFIX = dab_
-
-
-DualType
-db_close(db,flags=0)
-	int 			flags
-        BerkeleyDB::Common 	db
-	INIT:
-	    ckActive_Database(db->active) ;
-	    saveCurrentDB(db) ;
-	CODE:
-	    Trace(("BerkeleyDB::Common::db_close %d\n", db));
-#ifdef STRICT_CLOSE
-	    if (db->txn)
-		softCrash("attempted to close a database while a transaction was still open") ;
-	    if (db->open_cursors)
-		softCrash("attempted to close a database with %d open cursor(s)",
-				db->open_cursors) ;
-#endif /* STRICT_CLOSE */
-	    RETVAL =  db->Status = ((db->dbp)->close)(db->dbp, flags) ;
-	    if (db->parent_env && db->parent_env->open_dbs)
-		-- db->parent_env->open_dbs ;
-	    db->active = FALSE ;
-	    hash_delete("BerkeleyDB::Term::Db", (char *)db) ;
-	    -- db->open_cursors ;
-	    Trace(("end of BerkeleyDB::Common::db_close\n"));
-	OUTPUT:
-	    RETVAL
-
-void
-dab__DESTROY(db)
-	BerkeleyDB::Common	db
-	CODE:
-	  saveCurrentDB(db) ;
-	  Trace(("In BerkeleyDB::Common::_DESTROY db %d dirty=%d\n", db, PL_dirty)) ;
-	  destroyDB(db) ;
-	  Trace(("End of BerkeleyDB::Common::DESTROY \n")) ;
-
-#if DB_VERSION_MAJOR == 2 && DB_VERSION_MINOR < 6
-#define db_cursor(db, txn, cur,flags)  ((db->dbp)->cursor)(db->dbp, txn, cur)
-#else
-#define db_cursor(db, txn, cur,flags)  ((db->dbp)->cursor)(db->dbp, txn, cur,flags)
-#endif
-BerkeleyDB::Cursor::Raw
-_db_cursor(db, flags=0)
-	u_int32_t		flags
-        BerkeleyDB::Common 	db
-        BerkeleyDB::Cursor 	RETVAL = NULL ;
-	ALIAS: __db_write_cursor = 1
-	INIT:
-	    ckActive_Database(db->active) ;
-	CODE:
-	{
-	  DBC *	cursor ;
-	  saveCurrentDB(db) ;
-	  if (ix == 1 && db->cds_enabled) {
-#ifdef AT_LEAST_DB_3
-	      flags |= DB_WRITECURSOR;
-#else	      
-	      flags |= DB_RMW;
-#endif	      
-	  }
-	  if ((db->Status = db_cursor(db, db->txn, &cursor, flags)) == 0){
-	      ZMALLOC(RETVAL, BerkeleyDB__Cursor_type) ;
-	      db->open_cursors ++ ;
-	      RETVAL->parent_db  = db ;
-	      RETVAL->cursor  = cursor ;
-	      RETVAL->dbp     = db->dbp ;
-	      RETVAL->txn     = db->txn ;
-              RETVAL->type    = db->type ;
-              RETVAL->recno_or_queue    = db->recno_or_queue ;
-              RETVAL->cds_enabled    = db->cds_enabled ;
-              RETVAL->filename    = my_strdup(db->filename) ;
-              RETVAL->compare = db->compare ;
-              RETVAL->dup_compare = db->dup_compare ;
-#ifdef AT_LEAST_DB_3_3
-              RETVAL->associated = db->associated ;
-	      RETVAL->secondary_db  = db->secondary_db;
-#endif
-              RETVAL->prefix  = db->prefix ;
-              RETVAL->hash    = db->hash ;
-	      RETVAL->partial = db->partial ;
-	      RETVAL->doff    = db->doff ;
-	      RETVAL->dlen    = db->dlen ;
-	      RETVAL->active  = TRUE ;
-#ifdef ALLOW_RECNO_OFFSET
-	      RETVAL->array_base  = db->array_base ;
-#endif /* ALLOW_RECNO_OFFSET */
-#ifdef DBM_FILTERING
-	      RETVAL->filtering   = FALSE ;
-	      RETVAL->filter_fetch_key    = db->filter_fetch_key ;
-	      RETVAL->filter_store_key    = db->filter_store_key ;
-	      RETVAL->filter_fetch_value  = db->filter_fetch_value ;
-	      RETVAL->filter_store_value  = db->filter_store_value ;
-#endif
-              /* RETVAL->info ; */
-	      hash_store_iv("BerkeleyDB::Term::Cursor", (char *)RETVAL, 1) ;
-	  }
-	}
-	OUTPUT:
-	  RETVAL
-
-BerkeleyDB::Cursor::Raw
-_db_join(db, cursors, flags=0)
-	u_int32_t		flags
-        BerkeleyDB::Common 	db
-	AV *			cursors
-        BerkeleyDB::Cursor 	RETVAL = NULL ;
-	INIT:
-	    ckActive_Database(db->active) ;
-	CODE:
-	{
-#if DB_VERSION_MAJOR == 2 && (DB_VERSION_MINOR < 5 || (DB_VERSION_MINOR == 5 && DB_VERSION_PATCH < 2))
-	    softCrash("join needs Berkeley DB 2.5.2 or later") ;
-#else /* Berkeley DB >= 2.5.2 */
-	  DBC *		join_cursor ;
-	  DBC **	cursor_list ;
-	  I32		count = av_len(cursors) + 1 ;
-	  int		i ;
-	  saveCurrentDB(db) ;
-	  if (count < 1 )
-	      softCrash("db_join: No cursors in parameter list") ;
-	  cursor_list = (DBC **)safemalloc(sizeof(DBC*) * (count + 1));
-	  for (i = 0 ; i < count ; ++i) {
-	      SV * obj = (SV*) * av_fetch(cursors, i, FALSE) ;
-	      IV tmp = SvIV(getInnerObject(obj)) ;
-	      BerkeleyDB__Cursor cur = INT2PTR(BerkeleyDB__Cursor, tmp);
-	      if (cur->dbp == db->dbp)
-	          softCrash("attempted to do a self-join");
-	      cursor_list[i] = cur->cursor ;
-	  }
-	  cursor_list[i] = NULL ;
-#if DB_VERSION_MAJOR == 2
-	  if ((db->Status = ((db->dbp)->join)(db->dbp, cursor_list, flags, &join_cursor)) == 0){
-#else
-	  if ((db->Status = ((db->dbp)->join)(db->dbp, cursor_list, &join_cursor, flags)) == 0){
-#endif
-	      ZMALLOC(RETVAL, BerkeleyDB__Cursor_type) ;
-	      db->open_cursors ++ ;
-	      RETVAL->parent_db  = db ;
-	      RETVAL->cursor  = join_cursor ;
-	      RETVAL->dbp     = db->dbp ;
-              RETVAL->type    = db->type ;
-              RETVAL->filename    = my_strdup(db->filename) ;
-              RETVAL->compare = db->compare ;
-              RETVAL->dup_compare = db->dup_compare ;
-#ifdef AT_LEAST_DB_3_3
-              RETVAL->associated = db->associated ;
-	      RETVAL->secondary_db  = db->secondary_db;
-#endif
-              RETVAL->prefix  = db->prefix ;
-              RETVAL->hash    = db->hash ;
-	      RETVAL->partial = db->partial ;
-	      RETVAL->doff    = db->doff ;
-	      RETVAL->dlen    = db->dlen ;
-	      RETVAL->active  = TRUE ;
-#ifdef ALLOW_RECNO_OFFSET
-	      RETVAL->array_base  = db->array_base ;
-#endif /* ALLOW_RECNO_OFFSET */
-#ifdef DBM_FILTERING
-	      RETVAL->filtering   = FALSE ;
-	      RETVAL->filter_fetch_key    = db->filter_fetch_key ;
-	      RETVAL->filter_store_key    = db->filter_store_key ;
-	      RETVAL->filter_fetch_value  = db->filter_fetch_value ;
-	      RETVAL->filter_store_value  = db->filter_store_value ;
-#endif
-              /* RETVAL->info ; */
-	      hash_store_iv("BerkeleyDB::Term::Cursor", (char *)RETVAL, 1) ;
-	  }
-	  safefree(cursor_list) ;
-#endif /* Berkeley DB >= 2.5.2 */
-	}
-	OUTPUT:
-	  RETVAL
-
-int
-ArrayOffset(db)
-        BerkeleyDB::Common 	db
-	INIT:
-	    ckActive_Database(db->active) ;
-	CODE:
-#ifdef ALLOW_RECNO_OFFSET
-	    RETVAL = db->array_base ? 0 : 1 ;
-#else
-	    RETVAL = 0 ;
-#endif /* ALLOW_RECNO_OFFSET */
-	OUTPUT:
-	    RETVAL
-
-
-bool
-cds_enabled(db)
-        BerkeleyDB::Common 	db
-	INIT:
-	    ckActive_Database(db->active) ;
-	CODE:
-	    RETVAL = db->cds_enabled ;
-	OUTPUT:
-	    RETVAL
-
-
-
-int
-type(db)
-        BerkeleyDB::Common 	db
-	INIT:
-	    ckActive_Database(db->active) ;
-	CODE:
-	    RETVAL = db->type ;
-	OUTPUT:
-	    RETVAL
-
-int
-byteswapped(db)
-        BerkeleyDB::Common 	db
-	INIT:
-	    ckActive_Database(db->active) ;
-	CODE:
-#if DB_VERSION_MAJOR == 2 && DB_VERSION_MINOR < 5
-	    softCrash("byteswapped needs Berkeley DB 2.5 or later") ;
-#else
-#if DB_VERSION_MAJOR == 2
-	    RETVAL = db->dbp->byteswapped ;
-#else
-#ifdef AT_LEAST_DB_3_3
-	    db->dbp->get_byteswapped(db->dbp, &RETVAL) ;
-#else
-	    RETVAL = db->dbp->get_byteswapped(db->dbp) ;
-#endif
-#endif
-#endif
-	OUTPUT:
-	    RETVAL
-
-DualType
-status(db)
-        BerkeleyDB::Common 	db
-	CODE:
-	    RETVAL =  db->Status ;
-	OUTPUT:
-	    RETVAL
-
-#ifdef DBM_FILTERING
-
-#define setFilter(ftype)				\
-	{						\
-	    if (db->ftype)				\
-	        RETVAL = sv_mortalcopy(db->ftype) ;	\
-	    ST(0) = RETVAL ;				\
-	    if (db->ftype && (code == &PL_sv_undef)) {	\
-                SvREFCNT_dec(db->ftype) ;		\
-	        db->ftype = NULL ;			\
-	    }						\
-	    else if (code) {				\
-	        if (db->ftype)				\
-	            sv_setsv(db->ftype, code) ;		\
-	        else					\
-	            db->ftype = newSVsv(code) ;		\
-	    }	    					\
-	}
-
-
-SV *
-filter_fetch_key(db, code)
-	BerkeleyDB::Common		db
-	SV *		code
-	SV *		RETVAL = &PL_sv_undef ;
-	CODE:
-	    DBM_setFilter(db->filter_fetch_key, code) ;
-
-SV *
-filter_store_key(db, code)
-	BerkeleyDB::Common		db
-	SV *		code
-	SV *		RETVAL = &PL_sv_undef ;
-	CODE:
-	    DBM_setFilter(db->filter_store_key, code) ;
-
-SV *
-filter_fetch_value(db, code)
-	BerkeleyDB::Common		db
-	SV *		code
-	SV *		RETVAL = &PL_sv_undef ;
-	CODE:
-	    DBM_setFilter(db->filter_fetch_value, code) ;
-
-SV *
-filter_store_value(db, code)
-	BerkeleyDB::Common		db
-	SV *		code
-	SV *		RETVAL = &PL_sv_undef ;
-	CODE:
-	    DBM_setFilter(db->filter_store_value, code) ;
-
-#endif /* DBM_FILTERING */
-
-void
-partial_set(db, offset, length)
-        BerkeleyDB::Common 	db
-	u_int32_t		offset
-	u_int32_t		length
-	INIT:
-	    ckActive_Database(db->active) ;
-	PPCODE:
-	    if (GIMME == G_ARRAY) {
-		XPUSHs(sv_2mortal(newSViv(db->partial == DB_DBT_PARTIAL))) ;
-		XPUSHs(sv_2mortal(newSViv(db->doff))) ;
-		XPUSHs(sv_2mortal(newSViv(db->dlen))) ;
-	    }
-	    db->partial = DB_DBT_PARTIAL ;
-	    db->doff    = offset ;
-	    db->dlen    = length ;
-
-
-void
-partial_clear(db)
-        BerkeleyDB::Common 	db
-	INIT:
-	    ckActive_Database(db->active) ;
-	PPCODE:
-	    if (GIMME == G_ARRAY) {
-		XPUSHs(sv_2mortal(newSViv(db->partial == DB_DBT_PARTIAL))) ;
-		XPUSHs(sv_2mortal(newSViv(db->doff))) ;
-		XPUSHs(sv_2mortal(newSViv(db->dlen))) ;
-	    }
-	    db->partial =
-	    db->doff    =
-	    db->dlen    = 0 ;
-
-
-#define db_del(db, key, flags)  \
-	(db->Status = ((db->dbp)->del)(db->dbp, db->txn, &key, flags))
-DualType
-db_del(db, key, flags=0)
-	u_int		flags
-	BerkeleyDB::Common	db
-	DBTKEY		key
-	INIT:
-	    Trace(("db_del db[%p] in [%p] txn[%p] key[%.*s] flags[%d]\n", db->dbp, db, db->txn, key.size, key.data, flags)) ;
-	    ckActive_Database(db->active) ;
-	    saveCurrentDB(db) ;
-
-
-#ifdef AT_LEAST_DB_3
-#  ifdef AT_LEAST_DB_3_2
-#    define writeToKey() (flagSet(DB_CONSUME)||flagSet(DB_CONSUME_WAIT)||flagSet(DB_GET_BOTH)||flagSet(DB_SET_RECNO))
-#  else
-#    define writeToKey() (flagSet(DB_CONSUME)||flagSet(DB_GET_BOTH)||flagSet(DB_SET_RECNO))
-#  endif
-#else
-#define writeToKey() (flagSet(DB_GET_BOTH)||flagSet(DB_SET_RECNO))
-#endif
-#define db_get(db, key, data, flags)   \
-	(db->Status = ((db->dbp)->get)(db->dbp, db->txn, &key, &data, flags))
-DualType
-db_get(db, key, data, flags=0)
-	u_int		flags
-	BerkeleyDB::Common	db
-	DBTKEY_B	key
-	DBT_OPT		data
-	CODE:
-	  ckActive_Database(db->active) ;
-	  saveCurrentDB(db) ;
-	  SetPartial(data,db) ;
-	  Trace(("db_get db[%p] in [%p] txn[%p] key [%.*s] flags[%d]\n", db->dbp, db, db->txn, key.size, key.data, flags)) ;
-	  RETVAL = db_get(db, key, data, flags);
-	  Trace(("  RETVAL %d\n", RETVAL));
-	OUTPUT:
-	  RETVAL
-	  key	if (writeToKey()) OutputKey(ST(1), key) ;
-	  data
-
-#define db_pget(db, key, pkey, data, flags)   \
-	(db->Status = ((db->dbp)->pget)(db->dbp, db->txn, &key, &pkey, &data, flags))
-DualType
-db_pget(db, key, pkey, data, flags=0)
-	u_int		flags
-	BerkeleyDB::Common	db
-	DBTKEY_B	key
-	DBTKEY_B	pkey = NO_INIT
-	DBT_OPT		data
-	CODE:
-#ifndef AT_LEAST_DB_3_3
-          softCrash("db_pget needs at least Berkeley DB 3.3");
-#else
-	  Trace(("db_pget db [%p] in [%p] txn [%p] flags [%d]\n", db->dbp, db, db->txn, flags)) ;
-	  ckActive_Database(db->active) ;
-	  saveCurrentDB(db) ;
-	  SetPartial(data,db) ;
-	  DBT_clear(pkey);
-	  RETVAL = db_pget(db, key, pkey, data, flags);
-	  Trace(("  RETVAL %d\n", RETVAL));
-#endif
-	OUTPUT:
-	  RETVAL
-	  key	if (writeToKey()) OutputKey(ST(1), key) ;
-	  pkey
-	  data
-
-#define db_put(db,key,data,flag)	\
-		(db->Status = (db->dbp->put)(db->dbp,db->txn,&key,&data,flag))
-DualType
-db_put(db, key, data, flags=0)
-	u_int			flags
-	BerkeleyDB::Common	db
-	DBTKEY			key
-	DBT			data
-	CODE:
-	  ckActive_Database(db->active) ;
-	  saveCurrentDB(db) ;
-	  /* SetPartial(data,db) ; */
-	  Trace(("db_put db[%p] in [%p] txn[%p] key[%.*s] data [%.*s] flags[%d]\n", db->dbp, db, db->txn, key.size, key.data, data.size, data.data, flags)) ;
-	  RETVAL = db_put(db, key, data, flags);
-	  Trace(("  RETVAL %d\n", RETVAL));
-	OUTPUT:
-	  RETVAL
-	  key	if (flagSet(DB_APPEND)) OutputKey(ST(1), key) ;
-
-#define db_key_range(db, key, range, flags)   \
-	(db->Status = ((db->dbp)->key_range)(db->dbp, db->txn, &key, &range, flags))
-DualType
-db_key_range(db, key, less, equal, greater, flags=0)
-	u_int32_t	flags
-	BerkeleyDB::Common	db
-	DBTKEY_B	key
-	double          less = 0.0 ;
-	double          equal = 0.0 ;
-	double          greater = 0.0 ;
-	CODE:
-	{
-#ifndef AT_LEAST_DB_3_1
-          softCrash("key_range needs Berkeley DB 3.1.x or later") ;
-#else
-          DB_KEY_RANGE range ;
-          range.less = range.equal = range.greater = 0.0 ;
-	  ckActive_Database(db->active) ;
-	  saveCurrentDB(db) ;
-	  RETVAL = db_key_range(db, key, range, flags);
-	  if (RETVAL == 0) {
-	        less = range.less ;
-	        equal = range.equal;
-	        greater = range.greater;
-	  }
-#endif
-	}
-	OUTPUT:
-	  RETVAL
-	  less
-	  equal
-	  greater
-
-
-#define db_fd(d, x)	(db->Status = (db->dbp->fd)(db->dbp, &x))
-int
-db_fd(db)
-	BerkeleyDB::Common	db
-	INIT:
-	  ckActive_Database(db->active) ;
-	CODE:
-	  saveCurrentDB(db) ;
-	  db_fd(db, RETVAL) ;
-	OUTPUT:
-	  RETVAL
-
-
-#define db_sync(db, fl)	(db->Status = (db->dbp->sync)(db->dbp, fl))
-DualType
-db_sync(db, flags=0)
-	u_int			flags
-	BerkeleyDB::Common	db
-	INIT:
-	  ckActive_Database(db->active) ;
-	  saveCurrentDB(db) ;
-
-void
-_Txn(db, txn=NULL)
-        BerkeleyDB::Common      db
-        BerkeleyDB::Txn         txn
-	INIT:
-	  ckActive_Database(db->active) ;
-	CODE:
-	   if (txn) {
-	       Trace(("_Txn[%p] in[%p] active [%d]\n", txn->txn, txn, txn->active));
-	       ckActive_Transaction(txn->active) ;
-	       db->txn = txn->txn ;
-	   }
-	   else {
-	       Trace(("_Txn[undef] \n"));
-	       db->txn = NULL ;
-	   }
-
-
-#define db_truncate(db, countp, flags)  \
-	(db->Status = ((db->dbp)->truncate)(db->dbp, db->txn, &countp, flags))
-DualType
-truncate(db, countp, flags=0)
-	BerkeleyDB::Common	db
-	u_int32_t		countp
-	u_int32_t		flags
-	INIT:
-	  ckActive_Database(db->active) ;
-	CODE:
-#ifndef AT_LEAST_DB_3_3
-          softCrash("truncate needs Berkeley DB 3.3 or later") ;
-#else
-	  saveCurrentDB(db) ;
-	  RETVAL = db_truncate(db, countp, flags);
-#endif
-	OUTPUT:
-	  RETVAL
-	  countp
-
-#ifdef AT_LEAST_DB_4_1
-#  define db_associate(db, sec, cb, flags)\
-	(db->Status = ((db->dbp)->associate)(db->dbp, NULL, sec->dbp, &cb, flags))
-#else
-#  define db_associate(db, sec, cb, flags)\
-	(db->Status = ((db->dbp)->associate)(db->dbp, sec->dbp, &cb, flags))
-#endif
-DualType
-associate(db, secondary, callback, flags=0)
-	BerkeleyDB::Common	db
-	BerkeleyDB::Common	secondary
-	SV*			callback
-	u_int32_t		flags
-	INIT:
-	  ckActive_Database(db->active) ;
-	CODE:
-#ifndef AT_LEAST_DB_3_3
-          softCrash("associate needs Berkeley DB 3.3 or later") ;
-#else
-	  saveCurrentDB(db) ;
-	  /* db->associated = newSVsv(callback) ; */
-	  secondary->associated = newSVsv(callback) ;
-	  /* secondary->dbp->app_private = secondary->associated ; */
-	  secondary->secondary_db = TRUE;
-	  RETVAL = db_associate(db, secondary, associate_cb, flags);
-#endif
-	OUTPUT:
-	  RETVAL
-
-
-MODULE = BerkeleyDB::Cursor              PACKAGE = BerkeleyDB::Cursor	PREFIX = cu_
-
-BerkeleyDB::Cursor::Raw
-_c_dup(db, flags=0)
-	u_int32_t		flags
-    	BerkeleyDB::Cursor	db
-        BerkeleyDB::Cursor 	RETVAL = NULL ;
-	INIT:
-	    saveCurrentDB(db->parent_db);
-	    ckActive_Database(db->active) ;
-	CODE:
-	{
-#ifndef AT_LEAST_DB_3
-          softCrash("c_dup needs at least Berkeley DB 3.0.x");
-#else
-	  DBC *		newcursor ;
-	  db->Status = ((db->cursor)->c_dup)(db->cursor, &newcursor, flags) ;
-	  if (db->Status == 0){
-	      ZMALLOC(RETVAL, BerkeleyDB__Cursor_type) ;
-	      db->parent_db->open_cursors ++ ;
-	      RETVAL->parent_db  = db->parent_db ;
-	      RETVAL->cursor  = newcursor ;
-	      RETVAL->dbp     = db->dbp ;
-              RETVAL->type    = db->type ;
-              RETVAL->recno_or_queue    = db->recno_or_queue ;
-              RETVAL->cds_enabled    = db->cds_enabled ;
-              RETVAL->filename    = my_strdup(db->filename) ;
-              RETVAL->compare = db->compare ;
-              RETVAL->dup_compare = db->dup_compare ;
-#ifdef AT_LEAST_DB_3_3
-              RETVAL->associated = db->associated ;
-#endif
-              RETVAL->prefix  = db->prefix ;
-              RETVAL->hash    = db->hash ;
-	      RETVAL->partial = db->partial ;
-	      RETVAL->doff    = db->doff ;
-	      RETVAL->dlen    = db->dlen ;
-	      RETVAL->active  = TRUE ;
-#ifdef ALLOW_RECNO_OFFSET
-	      RETVAL->array_base  = db->array_base ;
-#endif /* ALLOW_RECNO_OFFSET */
-#ifdef DBM_FILTERING
-	      RETVAL->filtering   = FALSE ;
-	      RETVAL->filter_fetch_key    = db->filter_fetch_key ;
-	      RETVAL->filter_store_key    = db->filter_store_key ;
-	      RETVAL->filter_fetch_value  = db->filter_fetch_value ;
-	      RETVAL->filter_store_value  = db->filter_store_value ;
-#endif /* DBM_FILTERING */
-              /* RETVAL->info ; */
-	      hash_store_iv("BerkeleyDB::Term::Cursor", (char *)RETVAL, 1) ;
-	  }
-#endif	
-	}
-	OUTPUT:
-	  RETVAL
-
-DualType
-_c_close(db)
-    BerkeleyDB::Cursor	db
-	INIT:
-	  saveCurrentDB(db->parent_db);
-	  ckActive_Cursor(db->active) ;
-	  hash_delete("BerkeleyDB::Term::Cursor", (char *)db) ;
-	CODE:
-	  RETVAL =  db->Status =
-    	          ((db->cursor)->c_close)(db->cursor) ;
-	  db->active = FALSE ;
-	  if (db->parent_db->open_cursors)
-	      -- db->parent_db->open_cursors ;
-	OUTPUT:
-	  RETVAL
-
-void
-_DESTROY(db)
-    BerkeleyDB::Cursor	db
-	CODE:
-	  saveCurrentDB(db->parent_db);
-	  Trace(("In BerkeleyDB::Cursor::_DESTROY db %d dirty=%d active=%d\n", db, PL_dirty, db->active));
-	  hash_delete("BerkeleyDB::Term::Cursor", (char *)db) ;
-	  if (db->active)
-    	      ((db->cursor)->c_close)(db->cursor) ;
-	  if (db->parent_db->open_cursors)
-	      -- db->parent_db->open_cursors ;
-          Safefree(db->filename) ;
-          Safefree(db) ;
-	  Trace(("End of BerkeleyDB::Cursor::_DESTROY\n")) ;
-
-DualType
-status(db)
-        BerkeleyDB::Cursor 	db
-	CODE:
-	    RETVAL =  db->Status ;
-	OUTPUT:
-	    RETVAL
-
-
-#define cu_c_del(c,f)	(c->Status = ((c->cursor)->c_del)(c->cursor,f))
-DualType
-cu_c_del(db, flags=0)
-    int			flags
-    BerkeleyDB::Cursor	db
-	INIT:
-	  saveCurrentDB(db->parent_db);
-	  ckActive_Cursor(db->active) ;
-	OUTPUT:
-	  RETVAL
-
-
-#define cu_c_get(c,k,d,f) (c->Status = (c->cursor->c_get)(c->cursor,&k,&d,f))
-DualType
-cu_c_get(db, key, data, flags=0)
-    int			flags
-    BerkeleyDB::Cursor	db
-    DBTKEY_B		key 
-    DBT_B		data 
-	INIT:
-	  Trace(("c_get db [%p] in [%p] flags [%d]\n", db->dbp, db, flags)) ;
-	  saveCurrentDB(db->parent_db);
-	  ckActive_Cursor(db->active) ;
-	  /* DBT_clear(key); */
-	  /* DBT_clear(data); */
-	  SetPartial(data,db) ;
-	  Trace(("c_get end\n")) ;
-	OUTPUT:
-	  RETVAL
-	  key
-	  data		if (! flagSet(DB_JOIN_ITEM)) OutputValue_B(ST(2), data) ;
-
-#define cu_c_pget(c,k,p,d,f) (c->Status = (c->secondary_db ? (c->cursor->c_pget)(c->cursor,&k,&p,&d,f) : EINVAL))
-DualType
-cu_c_pget(db, key, pkey, data, flags=0)
-    int			flags
-    BerkeleyDB::Cursor	db
-    DBTKEY_B		key
-    DBTKEY_B		pkey = NO_INIT
-    DBT_B		data
-	CODE:
-#ifndef AT_LEAST_DB_3_3
-          softCrash("db_c_pget needs at least Berkeley DB 3.3");
-#else
-	  Trace(("c_pget db [%d] flags [%d]\n", db, flags)) ;
-	  saveCurrentDB(db->parent_db);
-	  ckActive_Cursor(db->active) ;
-	  SetPartial(data,db) ;
-	  DBT_clear(pkey);
-	  RETVAL = cu_c_pget(db, key, pkey, data, flags);
-	  Trace(("c_pget end\n")) ;
-#endif
-	OUTPUT:
-	  RETVAL
-	  key
-	  pkey
-	  data		
-
-
-
-#define cu_c_put(c,k,d,f)  (c->Status = (c->cursor->c_put)(c->cursor,&k,&d,f))
-DualType
-cu_c_put(db, key, data, flags=0)
-    int			flags
-    BerkeleyDB::Cursor	db
-    DBTKEY		key
-    DBT			data
-	INIT:
-	  saveCurrentDB(db->parent_db);
-	  ckActive_Cursor(db->active) ;
-	  /* SetPartial(data,db) ; */
-	OUTPUT:
-	  RETVAL
-
-#define cu_c_count(c,p,f) (c->Status = (c->cursor->c_count)(c->cursor,&p,f))
-DualType
-cu_c_count(db, count, flags=0)
-    int			flags
-    BerkeleyDB::Cursor	db
-    u_int32_t           count = NO_INIT
-	CODE:
-#ifndef AT_LEAST_DB_3_1
-          softCrash("c_count needs at least Berkeley DB 3.1.x");
-#else
-	  Trace(("c_get count [%d] flags [%d]\n", db, flags)) ;
-	  saveCurrentDB(db->parent_db);
-	  ckActive_Cursor(db->active) ;
-	  RETVAL = cu_c_count(db, count, flags) ;
-	  Trace(("    c_count got %d duplicates\n", count)) ;
-#endif
-	OUTPUT:
-	  RETVAL
-	  count
-
-MODULE = BerkeleyDB::TxnMgr           PACKAGE = BerkeleyDB::TxnMgr	PREFIX = xx_
-
-BerkeleyDB::Txn::Raw
-_txn_begin(txnmgr, pid=NULL, flags=0)
-	u_int32_t		flags
-	BerkeleyDB::TxnMgr	txnmgr
-	BerkeleyDB::Txn		pid
-	CODE:
-	{
-	    DB_TXN *txn ;
-	    DB_TXN *p_id = NULL ;
-#if DB_VERSION_MAJOR == 2
-	    if (txnmgr->env->Env->tx_info == NULL)
-		softCrash("Transaction Manager not enabled") ;
-#endif
-	    if (pid)
-		p_id = pid->txn ;
-	    txnmgr->env->TxnMgrStatus =
-#if DB_VERSION_MAJOR == 2
-	    	txn_begin(txnmgr->env->Env->tx_info, p_id, &txn) ;
-#else
-#  ifdef AT_LEAST_DB_4
-	    	txnmgr->env->Env->txn_begin(txnmgr->env->Env, p_id, &txn, flags) ;
-#  else
-	    	txn_begin(txnmgr->env->Env, p_id, &txn, flags) ;
-#  endif
-#endif
-	    if (txnmgr->env->TxnMgrStatus == 0) {
-	      ZMALLOC(RETVAL, BerkeleyDB_Txn_type) ;
-	      RETVAL->txn  = txn ;
-	      RETVAL->active = TRUE ;
-	      Trace(("_txn_begin created txn [%d] in [%d]\n", txn, RETVAL));
-	      hash_store_iv("BerkeleyDB::Term::Txn", (char *)RETVAL, 1) ;
-	    }
-	    else
-		RETVAL = NULL ;
-	}
-	OUTPUT:
-	    RETVAL
-
-
-DualType
-status(mgr)
-        BerkeleyDB::TxnMgr 	mgr
-	CODE:
-	    RETVAL =  mgr->env->TxnMgrStatus ;
-	OUTPUT:
-	    RETVAL
-
-
-void
-_DESTROY(mgr)
-    BerkeleyDB::TxnMgr	mgr
-	CODE:
-	  Trace(("In BerkeleyDB::TxnMgr::DESTROY dirty=%d\n", PL_dirty)) ;
-          Safefree(mgr) ;
-	  Trace(("End of BerkeleyDB::TxnMgr::DESTROY\n")) ;
-
-DualType
-txn_close(txnp)
-	BerkeleyDB::TxnMgr	txnp
-        NOT_IMPLEMENTED_YET
-
-
-#if DB_VERSION_MAJOR == 2
-#  define xx_txn_checkpoint(t,k,m,f) txn_checkpoint(t->env->Env->tx_info, k, m)
-#else
-#  ifdef AT_LEAST_DB_4 
-#    define xx_txn_checkpoint(e,k,m,f) e->env->Env->txn_checkpoint(e->env->Env, k, m, f)
-#  else
-#    ifdef AT_LEAST_DB_3_1
-#      define xx_txn_checkpoint(t,k,m,f) txn_checkpoint(t->env->Env, k, m, 0)
-#    else
-#      define xx_txn_checkpoint(t,k,m,f) txn_checkpoint(t->env->Env, k, m)
-#    endif
-#  endif
-#endif
-DualType
-xx_txn_checkpoint(txnp, kbyte, min, flags=0)
-	BerkeleyDB::TxnMgr	txnp
-	long			kbyte
-	long			min
-	u_int32_t		flags
-
-HV *
-txn_stat(txnp)
-	BerkeleyDB::TxnMgr	txnp
-	HV *			RETVAL = NULL ;
-	CODE:
-	{
-	    DB_TXN_STAT *	stat ;
-#ifdef AT_LEAST_DB_4
-	    if(txnp->env->Env->txn_stat(txnp->env->Env, &stat, 0) == 0) {
-#else
-#  ifdef AT_LEAST_DB_3_3
-	    if(txn_stat(txnp->env->Env, &stat) == 0) {
-#  else
-#    if DB_VERSION_MAJOR == 2
-	    if(txn_stat(txnp->env->Env->tx_info, &stat, safemalloc) == 0) {
-#    else
-	    if(txn_stat(txnp->env->Env, &stat, safemalloc) == 0) {
-#    endif
-#  endif
-#endif
-	    	RETVAL = (HV*)sv_2mortal((SV*)newHV()) ;
-		hv_store_iv(RETVAL, "st_time_ckp", stat->st_time_ckp) ;
-		hv_store_iv(RETVAL, "st_last_txnid", stat->st_last_txnid) ;
-		hv_store_iv(RETVAL, "st_maxtxns", stat->st_maxtxns) ;
-		hv_store_iv(RETVAL, "st_naborts", stat->st_naborts) ;
-		hv_store_iv(RETVAL, "st_nbegins", stat->st_nbegins) ;
-		hv_store_iv(RETVAL, "st_ncommits", stat->st_ncommits) ;
-		hv_store_iv(RETVAL, "st_nactive", stat->st_nactive) ;
-#if DB_VERSION_MAJOR > 2
-		hv_store_iv(RETVAL, "st_maxnactive", stat->st_maxnactive) ;
-		hv_store_iv(RETVAL, "st_regsize", stat->st_regsize) ;
-		hv_store_iv(RETVAL, "st_region_wait", stat->st_region_wait) ;
-		hv_store_iv(RETVAL, "st_region_nowait", stat->st_region_nowait) ;
-#endif
-		safefree(stat) ;
-	    }
-	}
-	OUTPUT:
-	    RETVAL
-
-
-BerkeleyDB::TxnMgr
-txn_open(dir, flags, mode, dbenv)
-    int 		flags
-    const char *	dir
-    int 		mode
-    BerkeleyDB::Env 	dbenv
-        NOT_IMPLEMENTED_YET
-
-
-MODULE = BerkeleyDB::Txn              PACKAGE = BerkeleyDB::Txn		PREFIX = xx_
-
-DualType
-status(tid)
-        BerkeleyDB::Txn 	tid
-	CODE:
-	    RETVAL =  tid->Status ;
-	OUTPUT:
-	    RETVAL
-
-int
-_DESTROY(tid)
-    BerkeleyDB::Txn	tid
-	CODE:
-	  Trace(("In BerkeleyDB::Txn::_DESTROY txn [%d] active [%d] dirty=%d\n", tid->txn, tid->active, PL_dirty)) ;
-	  if (tid->active)
-#ifdef AT_LEAST_DB_4
-	    tid->txn->abort(tid->txn) ;
-#else
-	    txn_abort(tid->txn) ;
-#endif
-          RETVAL = (int)tid ;
-	  hash_delete("BerkeleyDB::Term::Txn", (char *)tid) ;
-          Safefree(tid) ;
-	  Trace(("End of BerkeleyDB::Txn::DESTROY\n")) ;
-	OUTPUT:
-	  RETVAL
-
-#define xx_txn_unlink(d,f,e)	txn_unlink(d,f,&(e->Env))
-DualType
-xx_txn_unlink(dir, force, dbenv)
-    const char *	dir
-    int 		force
-    BerkeleyDB::Env 	dbenv
-        NOT_IMPLEMENTED_YET
-
-#ifdef AT_LEAST_DB_4
-#  define xx_txn_prepare(t) (t->Status = t->txn->prepare(t->txn, 0))
-#else
-#  ifdef AT_LEAST_DB_3_3
-#    define xx_txn_prepare(t) (t->Status = txn_prepare(t->txn, 0))
-#  else
-#    define xx_txn_prepare(t) (t->Status = txn_prepare(t->txn))
-#  endif
-#endif
-DualType
-xx_txn_prepare(tid)
-	BerkeleyDB::Txn	tid
-	INIT:
-	    ckActive_Transaction(tid->active) ;
-
-#ifdef AT_LEAST_DB_4
-#  define _txn_commit(t,flags) (t->Status = t->txn->commit(t->txn, flags))
-#else
-#  if DB_VERSION_MAJOR == 2
-#    define _txn_commit(t,flags) (t->Status = txn_commit(t->txn))
-#  else
-#    define _txn_commit(t, flags) (t->Status = txn_commit(t->txn, flags))
-#  endif
-#endif
-DualType
-_txn_commit(tid, flags=0)
-	u_int32_t	flags
-	BerkeleyDB::Txn	tid
-	INIT:
-	    ckActive_Transaction(tid->active) ;
-	    hash_delete("BerkeleyDB::Term::Txn", (char *)tid) ;
-	    tid->active = FALSE ;
-
-#ifdef AT_LEAST_DB_4
-#  define _txn_abort(t) (t->Status = t->txn->abort(t->txn))
-#else
-#  define _txn_abort(t) (t->Status = txn_abort(t->txn))
-#endif
-DualType
-_txn_abort(tid)
-	BerkeleyDB::Txn	tid
-	INIT:
-	    ckActive_Transaction(tid->active) ;
-	    hash_delete("BerkeleyDB::Term::Txn", (char *)tid) ;
-	    tid->active = FALSE ;
-
-#ifdef AT_LEAST_DB_4
-#  define _txn_discard(t,f) (t->Status = t->txn->discard(t->txn, f))
-#else
-#  ifdef AT_LEAST_DB_3_3_4
-#    define _txn_discard(t,f) (t->Status = txn_discard(t->txn, f))
-#  else
-#    define _txn_discard(t,f) (int)softCrash("txn_discard needs Berkeley DB 3.3.4 or better") ;
-#  endif
-#endif
-DualType
-_txn_discard(tid, flags=0)
-	BerkeleyDB::Txn	tid
-	u_int32_t       flags
-	INIT:
-	    ckActive_Transaction(tid->active) ;
-	    hash_delete("BerkeleyDB::Term::Txn", (char *)tid) ;
-	    tid->active = FALSE ;
-
-#ifdef AT_LEAST_DB_4
-#  define xx_txn_id(t) t->txn->id(t->txn)
-#else
-#  define xx_txn_id(t) txn_id(t->txn)
-#endif
-u_int32_t
-xx_txn_id(tid)
-	BerkeleyDB::Txn	tid
-
-MODULE = BerkeleyDB::_tiedHash        PACKAGE = BerkeleyDB::_tiedHash
-
-int
-FIRSTKEY(db)
-        BerkeleyDB::Common         db
-        CODE:
-        {
-            DBTKEY      key ;
-            DBT         value ;
-	    DBC *	cursor ;
-
-	    /*
-		TODO!
-		set partial value to 0 - to eliminate the retrieval of
-		the value need to store any existing partial settings &
-		restore at the end.
-
-	     */
-            saveCurrentDB(db) ;
-	    DBT_clear(key) ;
-	    DBT_clear(value) ;
-	    /* If necessary create a cursor for FIRSTKEY/NEXTKEY use */
-	    if (!db->cursor &&
-		(db->Status = db_cursor(db, db->txn, &cursor, 0)) == 0 )
-	            db->cursor  = cursor ;
-
-	    if (db->cursor)
-	        RETVAL = (db->Status) =
-		    ((db->cursor)->c_get)(db->cursor, &key, &value, DB_FIRST);
-	    else
-		RETVAL = db->Status ;
-	    /* check for end of cursor */
-	    if (RETVAL == DB_NOTFOUND) {
-	      ((db->cursor)->c_close)(db->cursor) ;
-	      db->cursor = NULL ;
-	    }
-            ST(0) = sv_newmortal();
-	    OutputKey(ST(0), key)
-        }
-
-
-
-int
-NEXTKEY(db, key)
-        BerkeleyDB::Common  db
-        DBTKEY              key = NO_INIT
-        CODE:
-        {
-            DBT         value ;
-
-            saveCurrentDB(db) ;
-	    DBT_clear(key) ;
-	    DBT_clear(value) ;
-	    key.flags = 0 ;
-	    RETVAL = (db->Status) =
-		((db->cursor)->c_get)(db->cursor, &key, &value, DB_NEXT);
-
-	    /* check for end of cursor */
-	    if (RETVAL == DB_NOTFOUND) {
-	      ((db->cursor)->c_close)(db->cursor) ;
-	      db->cursor = NULL ;
-	    }
-            ST(0) = sv_newmortal();
-	    OutputKey(ST(0), key)
-        }
-
-MODULE = BerkeleyDB::_tiedArray        PACKAGE = BerkeleyDB::_tiedArray
-
-I32
-FETCHSIZE(db)
-        BerkeleyDB::Common         db
-        CODE:
-            saveCurrentDB(db) ;
-            RETVAL = GetArrayLength(db) ;
-        OUTPUT:
-            RETVAL
-
-
-MODULE = BerkeleyDB        PACKAGE = BerkeleyDB
-
-BOOT:
-  {
-    SV * sv_err = perl_get_sv(ERR_BUFF, GV_ADD|GV_ADDMULTI) ;
-    SV * version_sv = perl_get_sv("BerkeleyDB::db_version", GV_ADD|GV_ADDMULTI) ;
-    SV * ver_sv = perl_get_sv("BerkeleyDB::db_ver", GV_ADD|GV_ADDMULTI) ;
-    int Major, Minor, Patch ;
-    (void)db_version(&Major, &Minor, &Patch) ;
-    /* Check that the versions of db.h and libdb.a are the same */
-    if (Major != DB_VERSION_MAJOR || Minor != DB_VERSION_MINOR
-                || Patch != DB_VERSION_PATCH)
-        croak("\nBerkeleyDB needs compatible versions of libdb & db.h\n\tyou have db.h version %d.%d.%d and libdb version %d.%d.%d\n",
-                DB_VERSION_MAJOR, DB_VERSION_MINOR, DB_VERSION_PATCH,
-                Major, Minor, Patch) ;
-
-    if (Major < 2 || (Major == 2 && Minor < 6))
-    {
-        croak("BerkeleyDB needs Berkeley DB 2.6 or greater. This is %d.%d.%d\n",
-		Major, Minor, Patch) ;
-    }
-    sv_setpvf(version_sv, "%d.%d", Major, Minor) ;
-    sv_setpvf(ver_sv, "%d.%03d%03d", Major, Minor, Patch) ;
-    sv_setpv(sv_err, "");
-
-    DBT_clear(empty) ;
-    empty.data  = &zero ;
-    empty.size  =  sizeof(db_recno_t) ;
-    empty.flags = 0 ;
-
-  }
-
diff --git a/storage/bdb/perl/BerkeleyDB/BerkeleyDB/Btree.pm b/storage/bdb/perl/BerkeleyDB/BerkeleyDB/Btree.pm
deleted file mode 100644
index ba9a9c0085d..00000000000
--- a/storage/bdb/perl/BerkeleyDB/BerkeleyDB/Btree.pm
+++ /dev/null
@@ -1,8 +0,0 @@
-
-package BerkeleyDB::Btree ;
-
-# This file is only used for MLDBM
-
-use BerkeleyDB ;
-
-1 ;
diff --git a/storage/bdb/perl/BerkeleyDB/BerkeleyDB/Hash.pm b/storage/bdb/perl/BerkeleyDB/BerkeleyDB/Hash.pm
deleted file mode 100644
index 8e7bc7e78c7..00000000000
--- a/storage/bdb/perl/BerkeleyDB/BerkeleyDB/Hash.pm
+++ /dev/null
@@ -1,8 +0,0 @@
-
-package BerkeleyDB::Hash ;
-
-# This file is only used for MLDBM
-
-use BerkeleyDB ;
-
-1 ;
diff --git a/storage/bdb/perl/BerkeleyDB/Changes b/storage/bdb/perl/BerkeleyDB/Changes
deleted file mode 100644
index 8f3718a7f2b..00000000000
--- a/storage/bdb/perl/BerkeleyDB/Changes
+++ /dev/null
@@ -1,249 +0,0 @@
-Revision history for Perl extension BerkeleyDB.
-
-0.26  10th October 2004
-
-        * Changed to allow Building with Berkeley DB 4.3
-
-        * added cds_lock and associated methods as a convenience to allow
-          safe updaing of database records when using Berkeley DB CDS mode.
-
-        * added t/cds.t and t/pod.t
-
-        * Modified the test suite to use "-ErrFile => *STDOUT" where 
-          possible. This will make it easier to diagnose build issues.
-
-	* -Errfile will now accept a filehandle as well as a filename
-          This means that -ErrFile => *STDOUT will get all extended error
-          messages displayed directly on screen.
-
-	* Added support for set_shm_key & get_shm_key.
-
-	* Patch from Mark Jason Dominus to add a better error message
-	  when an odd number of parameters are passed to ParseParameters.
-
-	* fixed off-by-one error in my_strdup
-
-	* Fixed a problem with push, pop, shift & unshift with Queue &
-	  Recno when used in CDS mode. These methods were not using
-	  a write cursor behind the scenes. 
-	  Problem reported by Pavel Hlavnicka.
-
-0.25  1st November 2003
-
-	* Minor update to dbinfo
-
-	* Fixed a bug in the test harnesses that is only apparent in
-	  perl 5.8.2.  Original patch courtesy of Michael Schwern.
-
-0.24 27th September 2003
-
-        * Mentioned comp.databases.berkeley-db in README
-
-        * Builds with Berkeley DB 4.2
-
-        * The return type for db->db_fd was wrongly set at DualType -
-          should be int.
-
-0.23 15th June 2003
-
-        * Fixed problem where a secondary index would use the same
-          compare callback as the primary key, regardless of what was
-          defined for the secondary index. 
-	  Problem spotted by Dave Tallman.
-
-        * Also fixed a problem with the associate callback. If the value
-          for the secondary key was not a string, the secondary key was
-          being set incorrectly. This is now fixed.
-
-        * When built with Berkeley DB 3.2 or better, all callbacks now use
-          the BackRef pointer instead of the global CurrentDB. This was
-          done partially to fix the secondary index problem, above.
-
-        * The test harness was failing under cygwin. Now fixed. 
-
-        * Previous release broke TRACE. Fixed.
-
-0.22 17th May 2003
-
-        * win32 problem with open macro fixed.
-
-0.21 12th May 2003
-
-        * adding support for env->set_flags 
-	* adding recursion detection
-	* win32 problem with rename fixed.
-	* problem with sub-database name in Recno & Queue fixed.
-	* fixed the mldbm.t test harness to work with perl 5.8.0
-	* added a note about not using a network drive when running the
-	  test harness.
-	* fixed c_pget
-	* added BerkeleyDB::Env::DB_ENV method
-	* added support for encryption
-	* the dbinfo script will now indicate if the database is encrypted
-	* The CLEAR method is now CDB safe.
-
-0.20 2nd September 2002
-
-        * More support for building with Berkeley DB 4.1.x
-        * db->get & db->pget used the wrong output macro for DBM filters 
-          bug spotted by Aaron Ross.
-	* db_join didn't keep a reference to the cursors it was joining.
-          Spotted by Winton Davies.
-
-0.19 5th June 2002
-        * Removed the targets that used mkconsts from Makefile.PL. They relied
-          on a module that is not available in all versions of Perl.
-        * added support for env->set_verbose
-        * added support for db->truncate
-        * added support for db->rename via BerkeleyDB::db_rename
-        * added support for db->verify via BerkeleyDB::db_verify
-        * added support for db->associate, db->pget & cursor->c_pget
-        * Builds with Berkeley DB 4.1.x
- 
-
-0.18 6th January 2002
-        * Dropped support for ErrFile as a file handle. It was proving too
-          difficult to get at the underlying FILE * in XS.
-          Reported by Jonas Smedegaard (Debian powerpc) & Kenneth Olwing (Win32)
-        * Fixed problem with abort macro in XSUB.h clashing with txn abort
-          method in Berkeley DB 4.x -- patch supplied by Kenneth Olwing.
-        * DB->set_alloc was getting called too late in BerkeleyDB.xs. 
-          This was causing problems with ActivePerl -- problem reported
-          by Kenneth Olwing.
-        * When opening a queue, the Len proprty set the DB_PAD flag. 
-          Should have been DB_FIXEDLEN. Fix provided by Kenneth Olwing.
-        * Test harness fixes from Kenneth Olwing.
-
-0.17 23 September 2001
-        * Fixed a bug in BerkeleyDB::Recno - reported by Niklas Paulsson. 
-        * Added log_archive - patch supplied by Benjamin Holzman
-        * Added txn_discard
-        * Builds with Berkeley DB 4.0.x
-
-0.16 1 August 2001
-        * added support for Berkeley DB 3.3.x (but no support for any of the
-          new features just yet)
-
-0.15 26 April 2001
-        * Fixed a bug in the processing of the flags options in
-          db_key_range.
-        * added support for set_lg_max & set_lg_bsize
-        * allow DB_TMP_DIR and DB_TEMP_DIR
-        * the -Filename parameter to BerkeleyDB::Queue didn't work.
-        * added symbol DB_CONSUME_WAIT
-
-0.14 21st January 2001
-        * Silenced the warnings when build with a 64-bit Perl.
-        * Can now build with DB 3.2.3h (part of MySQL). The test harness
-          takes an age to do the queue test, but it does eventually pass.
-        * Mentioned the problems that occur when perl is built with sfio.
-
-0.13 15th January 2001
-        * Added support to allow this module to build with Berkeley DB 3.2  
-        * Updated dbinfo to support Berkeley DB 3.1 & 3.2 file format
-          changes.  
-        * Documented the Solaris 2.7 core dump problem in README.
-        * Tidied up the test harness to fix a problem on Solaris where the
-          "fred" directory wasn't being deleted when it should have been.
-        * two calls to "open" clashed with a win32 macro.
-        * size argument for hash_cb is different for Berkeley DB 3.x 
-        * Documented the issue of building on Linux.
-        * Added -Server, -CacheSize & -LockDetect options 
-          [original patch supplied by Graham Barr]
-        * Added support for set_mutexlocks, c_count, set_q_extentsize,
-          key_range, c_dup
-	* Dropped the "attempted to close a Cursor with an open transaction"
-	  error in c_close. The correct behaviour is that the cursor
-	  should be closed before committing/aborting the transaction.
-
-0.12  2nd August 2000
-	* Serious bug with get fixed. Spotted by Sleepycat.
-	* Added hints file for Solaris & Irix (courtesy of Albert Chin-A-Young)
-
-0.11  4th June 2000
-	* When built with Berkeley Db 3.x there can be a clash with the close
-	  macro.
-	* Typo in the definition of DB_WRITECURSOR
-	* The flags parameter wasn't getting sent to db_cursor
-	* Plugged small memory leak in db_cursor (DESTROY wasn't freeing
-	  memory)
-	* Can be built with Berkeley DB 3.1
-   
-0.10  8th December 1999
-	* The DESTROY method was missing for BerkeleyDB::Env. This resulted in
-	  a memory leak. Fixed.
-	* If opening an environment or database failed, there was a small 
-	  memory leak. This has been fixed. 
-	* A thread-enabled Perl it could core when a database was closed. 
-	  Problem traced to the strdup function.
-
-0.09  29th November 1999
-        * the queue.t & subdb.t test harnesses were outputting a few
-          spurious warnings. This has been fixed.
-
-0.08  28nd November 1999
-	* More documentation updates
-	* Changed reference to files in /tmp in examples.t
-	* Fixed a typo in softCrash that caused problems when building
-	  with a thread-enabled Perl.
-	* BerkeleyDB::Error wasn't initialised properly.
-	* ANSI-ified all the static C functions in BerkeleyDB.xs
-	* Added support for the following DB 3.x features:
-	    + The Queue database type
-	    + db_remove
-	    + subdatabases 
-	    + db_stat for Hash & Queue
-
-0.07  21st September 1999
-	* Numerous small bug fixes.
-	* Added support for sorting duplicate values DB_DUPSORT.
-	* Added support for DB_GET_BOTH & DB_NEXT_DUP.
-	* Added get_dup (from DB_File).
-	* beefed up the documentation.
-	* Forgot to add the DB_INIT_CDB in BerkeleyDB.pm in previous release.
-	* Merged the DBM Filter code from DB_File into BerkeleyDB.
-	* Fixed a nasty bug where a closed transaction was still used with
-	  with dp_put, db_get etc.
-	* Added logic to gracefully close everything whenever a fatal error
-	  happens. Previously the plug was just pulled.
-	* It is now a fatal error to explicitly close an environment if there
-	  is still an open database; a database when there are open cursors or
-	  an open transaction; and a cursor if there is an open transaction.
-	  Using object destruction doesn't have this issue, as object
-	  references will ensure everything gets closed in the correct order.
-	* The BOOT code now checks that the version of db.h & libdb are the
-	  same - this seems to be a common problem on Linux.
-	* MLDBM support added.  
-	* Support for the new join cursor added.
-	* Builds with Berkeley DB 3.x
-   	* Updated dbinfo for Berkeley DB 3.x file formats.
-	* Deprecated the TxnMgr class. As with Berkeley DB version 3,
-	  txn_begin etc are now accessed via the environment object.
-	
-0.06  19 December 1998
-	* Minor modifications to get the module to build with DB 2.6.x
-	* Added support for DB 2.6.x's  Concurrent Access Method, DB_INIT_CDB.
-
-0.05  9 November 1998
-	* Added a note to README about how to build Berkeley DB 2.x
-	  when using HP-UX.
-	* Minor modifications to get the module to build with DB 2.5.x
-
-0.04  19 May 1998
-	* Define DEFSV & SAVE_DEFSV if not already defined. This allows
-	  the module to be built with Perl 5.004_04.
-
-0.03  5 May 1998
-	* fixed db_get with DB_SET_RECNO
-	* fixed c_get with DB_SET_RECNO and DB_GET_RECNO
-	* implemented BerkeleyDB::Unknown
-	* implemented BerkeleyDB::Recno, including push, pop etc
-	  modified the txn support. 
-
-0.02  30 October 1997
-	* renamed module to BerkeleyDB	
-	* fixed a few bugs & added more tests
-
-0.01  23 October 1997
-	* first alpha release as BerkDB.
-
diff --git a/storage/bdb/perl/BerkeleyDB/MANIFEST b/storage/bdb/perl/BerkeleyDB/MANIFEST
deleted file mode 100644
index 7c090a17584..00000000000
--- a/storage/bdb/perl/BerkeleyDB/MANIFEST
+++ /dev/null
@@ -1,60 +0,0 @@
-BerkeleyDB.pm
-BerkeleyDB.pod
-BerkeleyDB.pod.P
-BerkeleyDB.xs
-BerkeleyDB/Btree.pm
-BerkeleyDB/Hash.pm
-Changes
-config.in
-constants.h
-constants.xs
-dbinfo
-hints/dec_osf.pl
-hints/solaris.pl
-hints/irix_6_5.pl
-Makefile.PL
-MANIFEST
-mkconsts
-mkpod
-ppport.h
-README
-t/btree.t
-t/cds.t
-t/db-3.0.t
-t/db-3.1.t
-t/db-3.2.t
-t/db-3.3.t
-t/destroy.t
-t/encrypt.t
-t/env.t
-t/examples.t
-t/examples.t.T
-t/examples3.t
-t/examples3.t.T
-t/filter.t
-t/hash.t
-t/join.t
-t/mldbm.t
-t/pod.t
-t/queue.t
-t/recno.t
-t/strict.t
-t/subdb.t
-t/txn.t
-t/unknown.t
-t/util.pm
-Todo
-typemap
-patches/5.004
-patches/5.004_01
-patches/5.004_02
-patches/5.004_03
-patches/5.004_04
-patches/5.004_05
-patches/5.005
-patches/5.005_01
-patches/5.005_02
-patches/5.005_03
-patches/5.6.0
-scan
-META.yml                                 Module meta-data (added by MakeMaker)
diff --git a/storage/bdb/perl/BerkeleyDB/META.yml b/storage/bdb/perl/BerkeleyDB/META.yml
deleted file mode 100644
index 3b205c9ce4e..00000000000
--- a/storage/bdb/perl/BerkeleyDB/META.yml
+++ /dev/null
@@ -1,10 +0,0 @@
-# http://module-build.sourceforge.net/META-spec.html
-#XXXXXXX This is a prototype!!!  It will change in the future!!! XXXXX#
-name:         BerkeleyDB
-version:      0.25
-version_from: BerkeleyDB.pm
-installdirs:  site
-requires:
-
-distribution_type: module
-generated_by: ExtUtils::MakeMaker version 6.17
diff --git a/storage/bdb/perl/BerkeleyDB/Makefile.PL b/storage/bdb/perl/BerkeleyDB/Makefile.PL
deleted file mode 100644
index 0c926a394ba..00000000000
--- a/storage/bdb/perl/BerkeleyDB/Makefile.PL
+++ /dev/null
@@ -1,134 +0,0 @@
-#! perl -w
-
-# It should not be necessary to edit this file. The configuration for
-# BerkeleyDB is controlled from the file config.in
-
-
-BEGIN { die "BerkeleyDB needs Perl 5.004_04 or greater" if $] < 5.004_04 ; }
-
-use strict ;
-use ExtUtils::MakeMaker ;
-use Config ;
-
-# Check for the presence of sfio
-if ($Config{'d_sfio'}) {
-   print < 'BerkeleyDB',
-	LIBS 		=> ["-L${LIB_DIR} $LIBS"],
-	#MAN3PODS        => {},         # Pods will be built by installman. 
-	INC		=> "-I$INC_DIR",
-	VERSION_FROM	=> 'BerkeleyDB.pm',
-	XSPROTOARG	=> '-noprototypes',
-	DEFINE		=> "$OS2 $WALL",
-	#'macro'		=> { INSTALLDIRS => 'perl' },
-        'dist'          => {COMPRESS=>'gzip', SUFFIX=>'gz'},    
-	($] >= 5.005
-	    ? (ABSTRACT_FROM	=> 'BerkeleyDB.pod',
-	       AUTHOR  	=> 'Paul Marquess ')
-	    : ()
-	),
-	);
-
-
-sub MY::postamble {
-	'
-$(NAME).pod:	$(NAME).pod.P t/examples.t.T t/examples3.t.T mkpod
-	perl ./mkpod
-
-$(NAME).xs:	typemap
-	$(TOUCH) $(NAME).xs
-
-Makefile:	config.in 
-
-
-' ;
-}
-
-sub ParseCONFIG
-{
-    my ($k, $v) ;
-    my @badkey = () ;
-    my %Info = () ;
-    my @Options = qw( INCLUDE LIB DBNAME ) ;
-    my %ValidOption = map {$_, 1} @Options ;
-    my %Parsed = %ValidOption ;
-    my $CONFIG = 'config.in' ;
-
-    print "Parsing $CONFIG...\n" ;
-
-    # DBNAME is optional, so pretend it has been parsed.
-    delete $Parsed{'DBNAME'} ;
-
-    open(F, "$CONFIG") or die "Cannot open file $CONFIG: $!\n" ;
-    while () {
-	s/^\s*|\s*$//g ;
-	next if /^\s*$/ or /^\s*#/ ;
-	s/\s*#\s*$// ;
-
-	($k, $v) = split(/\s+=\s+/, $_, 2) ;
-	$k = uc $k ;
-	if ($ValidOption{$k}) {
-	    delete $Parsed{$k} ;
-	    $Info{$k} = $v ;
-	}
-	else {
-	    push(@badkey, $k) ;
-	}
-    }
-    close F ;
-
-    print "Unknown keys in $CONFIG ignored [@badkey]\n"
-	if @badkey ;
-
-    # check parsed values
-    my @missing = () ;
-    die "The following keys are missing from $CONFIG file: [@missing]\n" 
-        if @missing = keys %Parsed ;
-
-    $INC_DIR =  $ENV{'BERKELEYDB_INCLUDE'} || $Info{'INCLUDE'} ;
-    $LIB_DIR =  $ENV{'BERKELEYDB_LIB'} || $Info{'LIB'} ;
-    $DB_NAME = $ENV{BERKELEYDB_NAME} || $Info{'DBNAME'} ;
-    #$DB_NAME =  $ENV{} || $Info{'DBNAME'} if defined $Info{'DBNAME'} ;
-
-    print "Looks Good.\n" ;
-
-}
-
-# end of file Makefile.PL
diff --git a/storage/bdb/perl/BerkeleyDB/README b/storage/bdb/perl/BerkeleyDB/README
deleted file mode 100644
index 3c08d2c822e..00000000000
--- a/storage/bdb/perl/BerkeleyDB/README
+++ /dev/null
@@ -1,640 +0,0 @@
-                                   BerkeleyDB
-
-                                  Version 0.26
-
-                                 10th Oct 2004
-
-	Copyright (c) 1997-2004 Paul Marquess. All rights reserved. This
-	program is free software; you can redistribute it and/or modify
-	it under the same terms as Perl itself.
-
-
-DESCRIPTION
------------
-
-BerkeleyDB is a module which allows Perl programs to make use of the
-facilities provided by Berkeley DB version 2 or greater. (Note: if
-you want to use version 1 of Berkeley DB with Perl you need the DB_File
-module).
-
-Berkeley DB is a C library which provides a consistent interface to a
-number of database formats. BerkeleyDB provides an interface to all
-four of the database types (hash, btree, queue and recno) currently
-supported by Berkeley DB.
-
-For further details see the documentation in the file BerkeleyDB.pod.
-
-PREREQUISITES
--------------
-
-Before you can build BerkeleyDB you need to have the following
-installed on your system:
-
-    * To run the test harness for this module, you must make sure that the
-      directory where you have untarred this module is NOT a network
-      drive, e.g. NFS or AFS.
-
-    * Perl 5.004_04 or greater.
-
-    * Berkeley DB Version 2.6.4 or greater
-
-      The official web site for Berkeley DB is http://www.sleepycat.com
-
-      The latest version of Berkeley DB is always available there. It
-      is recommended that you use the most recent version available at
-      the Sleepycat site.
-
-      The one exception to this advice is where you want to use BerkeleyDB
-      to access database files created by a third-party application,
-      like Sendmail. In these cases you must build BerkeleyDB with a
-      compatible version of Berkeley DB.
-
-
-BUILDING THE MODULE
--------------------
-
-Assuming you have met all the prerequisites, building the module should
-be relatively straightforward.
-
-Step 1 : If you are running Solaris 2.5, 2.7 or HP-UX 10 read either
-         the Solaris Notes or HP-UX Notes sections below.
-	 If you are running Linux please read the Linux Notes section
-         before proceeding.
-
-Step 2 : Edit the file config.in to suit you local installation.
-         Instructions are given in the file.
-
-Step 3 : Build and test the module using this sequence of commands:
-
-             perl Makefile.PL
-             make
-             make test
-
-INSTALLATION
-------------
-
-    make install
-
-TROUBLESHOOTING
-===============
-
-Here are some of the problems that people encounter when building BerkeleyDB.
-
-Missing db.h or libdb.a
------------------------
-
-If you get an error like this:
-
-  cc -c -I./libraries/ -Dbool=char -DHAS_BOOL -I/usr/local/include -O2
-  -DVERSION=\"0.07\" -DXS_VERSION=\"0.07\" -fpic
-  -I/usr/local/lib/perl5/5.00503/i586-linux/CORE  BerkeleyDB.c
-  BerkeleyDB.xs:52: db.h: No such file or directory
-
-or this:
-
-  cc -c -I./libraries/2.7.5 -Dbool=char -DHAS_BOOL -I/usr/local/include -O2
-  -DVERSION=\"0.07\" -DXS_VERSION=\"0.07\" -fpic
-  -I/usr/local/lib/perl5/5.00503/i586-linux/CORE  BerkeleyDB.c
-  LD_RUN_PATH="/lib" cc -o blib/arch/auto/BerkeleyDB/BerkeleyDB.so  -shared
-  -L/usr/local/lib BerkeleyDB.o
-  -L/home/paul/perl/ext/BerkDB/BerkeleyDB/libraries -ldb
-  ld: cannot open -ldb: No such file or directory
-
-This symptom can imply:
-
- 1. You don't have Berkeley DB installed on your system at all.
-    Solution: get & install Berkeley DB.
-
- 2. You do have Berkeley DB installed, but it isn't in a standard place.
-    Solution: Edit config.in and set the LIB and INCLUDE variables to point
-              to the directories where libdb.a and db.h are installed.
-
-#error db.h is not for Berkeley DB at all.
-------------------------------------------
-
-If you get the error above when building this module it means that there
-is a file called "db.h" on your system that isn't the one that comes
-with Berkeley DB.
-
-Options:
-
- 1. You don't have Berkeley DB installed on your system at all.
-    Solution: get & install Berkeley DB.
-
- 2. Edit config.in and make sure the INCLUDE variable points to the
-    directory where the Berkeley DB file db.h is installed.
-
- 3. If option 2 doesn't work, try tempoarily renaming the db.h file
-    that is causing the error.
-
-#error db.h is for Berkeley DB 1.x - need at least Berkeley DB 2.6.4
---------------------------------------------------------------------
-
-The error above will occur if there is a copy of the Berkeley DB 1.x
-file db.h on your system.
-
-This error will happen when 
-
-  1. you only have Berkeley DB version 1 on your system.
-     Solution: get & install a newer version of Berkeley DB.
-
-  2. you have both version 1 and a later version of Berkeley DB
-     installed on your system. When building BerkeleyDB it attempts to
-     use the db.h for Berkeley DB version 1.
-     Solution: Edit config.in and set the LIB and INCLUDE variables
-               to point to the directories where libdb.a and db.h are
-               installed.
-
-
-#error db.h is for Berkeley DB 2.0-2.5 - need at least Berkeley DB 2.6.4
-------------------------------------------------------------------------
-
-The error above will occur if there is a copy of the the file db.h for
-Berkeley DB 2.0 to 2.5 on your system.
-
-This symptom can imply:
-
- 1. You don't have a new enough version of Berkeley DB.
-    Solution: get & install a newer version of Berkeley DB.
-
- 2. You have the correct version of Berkeley DB installed, but it isn't
-    in a standard place.
-    Solution: Edit config.in and set the LIB and INCLUDE variables
-              to point to the directories where libdb.a and db.h are
-              installed.
-
-Undefined Symbol: txn_stat 
---------------------------
-
-BerkeleyDB seems to have built correctly, but you get an error like this
-when you run the test harness:
-
-  $ make test
-  PERL_DL_NONLAZY=1 /home/paul/perl/install/bin/perl5.00503
-  -Iblib/arch -Iblib/lib -I/usr/local/lib/perl5/5.00503/i586-linux
-  -I/usr/local/lib/perl5/5.00503 -e 'use Test::Harness qw(&runtests $verbose);
-  $verbose=0; runtests @ARGV;' t/*.t
-  t/btree.............Can't load 'blib/arch/auto/BerkeleyDB/BerkeleyDB.so' for
-  module BerkeleyDB: blib/arch/auto/BerkeleyDB/BerkeleyDB.so:
-  undefined symbol: txn_stat
-  at /usr/local/lib/perl5/5.00503/i586-linux/DynaLoader.pm line 169.
-  ...
-
-This error usually happens when you have both version 1 and a newer version
-of Berkeley DB installed on your system. BerkeleyDB attempts
-to build using the db.h for Berkeley DB version 2/3/4 and the version 1
-library. Unfortunately the two versions aren't compatible with each
-other. BerkeleyDB can only be built with Berkeley DB version 2, 3 or 4.
-
-Solution: Setting the LIB & INCLUDE variables in config.in to point to the
-          correct directories can sometimes be enough to fix this
-          problem. If that doesn't work the easiest way to fix the
-          problem is to either delete or temporarily rename the copies
-          of db.h and libdb.a that you don't want BerkeleyDB to use.
-
-Undefined Symbol: db_appinit 
-----------------------------
-
-BerkeleyDB seems to have built correctly, but you get an error like this
-when you run the test harness:
-
-  $ make test
-  PERL_DL_NONLAZY=1 /home/paul/perl/install/bin/perl5.00561 -Iblib/arch 
-  -Iblib/lib -I/home/paul/perl/install/5.005_61/lib/5.00561/i586-linux 
-  -I/home/paul/perl/install/5.005_61/lib/5.00561 -e 'use Test::Harness 
-  qw(&runtests $verbose); $verbose=0; runtests @ARGV;' t/*.t
-  t/btree.............Can't load 'blib/arch/auto/BerkeleyDB/BerkeleyDB.so' for 
-  module BerkeleyDB: blib/arch/auto/BerkeleyDB/BerkeleyDB.so: 
-  undefined symbol: db_appinit 
-  at /home/paul/perl/install/5.005_61/lib/5.00561/i586-linux/DynaLoader.pm 
-  ...
-
-
-This error usually happens when you have both version 2 and version
-3 of Berkeley DB installed on your system and BerkeleyDB attempts
-to build using the db.h for Berkeley DB version 2 and the version 3
-library. Unfortunately the two versions aren't compatible with each
-other. 
-
-Solution: Setting the LIB & INCLUDE variables in config.in to point to the
-          correct directories can sometimes be enough to fix this
-          problem. If that doesn't work the easiest way to fix the
-          problem is to either delete or temporarily rename the copies
-          of db.h and libdb.a that you don't want BerkeleyDB to use.
-
-Undefined Symbol: db_create
----------------------------
-
-BerkeleyDB seems to have built correctly, but you get an error like this
-when you run the test harness:
-
-  $ make test   
-  PERL_DL_NONLAZY=1 /home/paul/perl/install/bin/perl5.00561 -Iblib/arch 
-  -Iblib/lib -I/home/paul/perl/install/5.005_61/lib/5.00561/i586-linux 
-  -I/home/paul/perl/install/5.005_61/lib/5.00561 -e 'use Test::Harness 
-  qw(&runtests $verbose); $verbose=0; runtests @ARGV;' t/*.t
-  t/btree.............Can't load 'blib/arch/auto/BerkeleyDB/BerkeleyDB.so' for 
-  module BerkeleyDB: blib/arch/auto/BerkeleyDB/BerkeleyDB.so: 
-  undefined symbol: db_create 
-  at /home/paul/perl/install/5.005_61/lib/5.00561/i586-linux/DynaLoader.pm 
-  ...
-
-This error usually happens when you have both version 2 and version
-3 of Berkeley DB installed on your system and BerkeleyDB attempts
-to build using the db.h for Berkeley DB version 3 and the version 2
-library. Unfortunately the two versions aren't compatible with each
-other. 
-
-Solution: Setting the LIB & INCLUDE variables in config.in to point to the
-          correct directories can sometimes be enough to fix this
-          problem. If that doesn't work the easiest way to fix the
-          problem is to either delete or temporarily rename the copies
-          of db.h and libdb.a that you don't want BerkeleyDB to use.
-
-
-Incompatible versions of db.h and libdb
----------------------------------------
-
-BerkeleyDB seems to have built correctly, but you get an error like this
-when you run the test harness:
-
-  $ make test
-  PERL_DL_NONLAZY=1 /home/paul/perl/install/bin/perl5.00503
-  -Iblib/arch -Iblib/lib -I/usr/local/lib/perl5/5.00503/i586-linux
-  -I/usr/local/lib/perl5/5.00503 -e 'use Test::Harness qw(&runtests $verbose);
-  $verbose=0; runtests @ARGV;' t/*.t
-  t/btree.............
-  BerkeleyDB needs compatible versions of libdb & db.h
-          you have db.h version 2.6.4 and libdb version 2.7.5
-  BEGIN failed--compilation aborted at t/btree.t line 25.
-  dubious
-        Test returned status 255 (wstat 65280, 0xff00)
-  ...	
-
-Another variation on the theme of having two versions of Berkeley DB on
-your system. 
-
-Solution: Setting the LIB & INCLUDE variables in config.in to point to the
-          correct directories can sometimes be enough to fix this
-          problem. If that doesn't work the easiest way to fix the
-          problem is to either delete or temporarily rename the copies
-          of db.h and libdb.a that you don't want BerkeleyDB to use.
-	  If you are running Linux, please read the Linux Notes section below.
-
-
-
-Solaris build fails with "language optional software package not installed"
----------------------------------------------------------------------------
-
-If you are trying to build this module under Solaris and you get an
-error message like this
-
-    /usr/ucb/cc: language optional software package not installed
-
-it means that Perl cannot find the C compiler on your system. The cryptic
-message is just Sun's way of telling you that you haven't bought their
-C compiler.
-
-When you build a Perl module that needs a C compiler, the Perl build
-system tries to use the same C compiler that was used to build perl
-itself. In this case your Perl binary was built with a C compiler that
-lived in /usr/ucb.
-
-To continue with building this module, you need to get a C compiler,
-or tell Perl where your C compiler is, if you already have one.
-
-Assuming you have now got a C compiler, what you do next will be dependant
-on what C compiler you have installed. If you have just installed Sun's
-C compiler, you shouldn't have to do anything. Just try rebuilding
-this module.
-
-If you have installed another C compiler, say gcc, you have to tell perl
-how to use it instead of /usr/ucb/cc.
-
-This set of options seems to work if you want to use gcc. Your mileage
-may vary.
-
-    perl Makefile.PL CC=gcc CCCDLFLAGS=-fPIC OPTIMIZE=" "
-    make test
-
-If that doesn't work for you, it's time to make changes to the Makefile
-by hand. Good luck!
-
-
-
-Solaris build fails with "gcc: unrecognized option `-KPIC'"
------------------------------------------------------------
-
-You are running Solaris and you get an error like this when you try to
-build this Perl module
-
-    gcc: unrecognized option `-KPIC'
-
-This symptom usually means that you are using a Perl binary that has been
-built with the Sun C compiler, but you are using gcc to build this module.
-
-When Perl builds modules that need a C compiler, it will attempt to use
-the same C compiler and command line options that was used to build perl
-itself. In this case "-KPIC" is a valid option for the Sun C compiler,
-but not for gcc. The equivalent option for gcc is "-fPIC".
-
-The solution is either:
-
-    1. Build both Perl and this module with the same C compiler, either
-       by using the Sun C compiler for both or gcc for both.
-
-    2. Try generating the Makefile for this module like this perl
-
-           perl Makefile.PL CC=gcc CCCDLFLAGS=-fPIC OPTIMIZE=" " LD=gcc
-           make test
-
-       This second option seems to work when mixing a Perl binary built
-       with the Sun C compiler and this module built with gcc. Your
-       mileage may vary.
-
-
-
-Network Drive
--------------
-
-BerkeleyDB seems to have built correctly, but you get a series of errors
-like this when you run the test harness:
-
-
-t/btree........NOK 178Can't call method "txn_begin" on an undefined value at t/btree.t line 637.
-t/btree........dubious                                                       
-        Test returned status 11 (wstat 2816, 0xb00)
-DIED. FAILED tests 28, 178-244
-        Failed 68/244 tests, 72.13% okay
-t/db-3.0.......NOK 2Can't call method "set_mutexlocks" on an undefined value at t/db-3.0.t line 39.
-t/db-3.0.......dubious                                                       
-        Test returned status 11 (wstat 2816, 0xb00)
-DIED. FAILED tests 2-14
-        Failed 13/14 tests, 7.14% okay
-t/db-3.1.......ok                                                            
-t/db-3.2.......NOK 5Can't call method "set_flags" on an undefined value at t/db-3.2.t line 62.
-t/db-3.2.......dubious                                                       
-        Test returned status 11 (wstat 2816, 0xb00)
-DIED. FAILED tests 3, 5-6
-        Failed 3/6 tests, 50.00% okay
-t/db-3.3.......ok                  
-
-This pattern of errors happens if you have built the module in a directory
-that is network mounted (e.g. NFS ar AFS).
-
-The solution is to use a local drive. Berkeley DB doesn't support
-network drives.
-
-
-Berkeley DB library configured to support only DB_PRIVATE environments
-----------------------------------------------------------------------
-
-BerkeleyDB seems to have built correctly, but you get a series of errors
-like this when you run the test harness:
-
-  t/btree........ok 27/244
-  # : Berkeley DB library configured to support only DB_PRIVATE environments
-  t/btree........ok 177/244
-  # : Berkeley DB library configured to support only DB_PRIVATE environments
-  t/btree........NOK 178Can't call method "txn_begin" on an undefined value at t/btree.t line 638.
-  t/btree........dubious
-          Test returned status 2 (wstat 512, 0x200)
-  Scalar found where operator expected at (eval 153) line 1, near "'int'  $__val"
-        (Missing operator before   $__val?)
-  DIED. FAILED tests 28, 178-244
-          Failed 68/244 tests, 72.13% okay
-
-
-Some versions of Redhat Linux, and possibly some other Linux
-distributions, include a seriously restricted build of the
-Berkeley DB library that is incompatible with this module. See
-https://bugzilla.redhat.com/bugzilla/show_bug.cgi?id=91933 for an
-exhaustive discussion on the reasons for this.
-
-
-Solution:
-
-You will have to build a private copy of the Berkeley DB library and
-use it when building this Perl module.
-
-
-
-Linux Notes
------------
-
-Some versions of Linux (e.g. RedHat 6, SuSe 6) ship with a C library
-that has version 2.x of Berkeley DB linked into it. This makes it
-difficult to build this module with anything other than the version of
-Berkeley DB that shipped with your Linux release. If you do try to use
-a different version of Berkeley DB you will most likely get the error
-described in the "Incompatible versions of db.h and libdb" section of
-this file.
-
-To make matters worse, prior to Perl 5.6.1, the perl binary itself
-*always* included the Berkeley DB library.
-
-If you want to use a newer version of Berkeley DB with this module, the
-easiest solution is to use Perl 5.6.1 (or better) and Berkeley DB 3.x
-(or better).
-
-There are two approaches you can use to get older versions of Perl to
-work with specific versions of Berkeley DB. Both have their advantages
-and disadvantages.
-
-The first approach will only work when you want to build a version of
-Perl older than 5.6.1 along with Berkeley DB 3.x. If you want to use
-Berkeley DB 2.x, you must use the next approach. This approach involves
-rebuilding your existing version of Perl after applying an unofficial
-patch. The "patches" directory in the this module's source distribution
-contains a number of patch files. There is one patch file for every
-stable version of Perl since 5.004. Apply the appropriate patch to your
-Perl source tree before re-building and installing Perl from scratch.
-For example, assuming you are in the top-level source directory for
-Perl 5.6.0, the command below will apply the necessary patch. Remember
-to replace the path shown below with one that points to this module's
-patches directory.
-
-    patch -p1 -N 
-
diff --git a/storage/bdb/perl/BerkeleyDB/Todo b/storage/bdb/perl/BerkeleyDB/Todo
deleted file mode 100644
index 12d53bcf91c..00000000000
--- a/storage/bdb/perl/BerkeleyDB/Todo
+++ /dev/null
@@ -1,57 +0,0 @@
-
-  * Proper documentation.
-
-  * address or document the "close all cursors if you encounter an error"
-
-  * Change the $BerkeleyDB::Error to store the info in the db object,
-    if possible.
-
-  * $BerkeleyDB::db_version is documented. &db_version isn't.
-
-  * migrate perl code into the .xs file where necessary
-
-  * convert as many of the DB examples files to BerkeleyDB format.
-
-  * add a method to the DB object to allow access to the environment (if there
-    actually is one).
-
-
-Possibles
-
-  * use '~' magic to store the inner data.
-
-  * for the get stuff zap the value to undef if it doesn't find the
-    key. This may be more intuitive for those folks who are used with
-    the $hash{key} interface.
-
-  * Text interface? This can be done as via Recno
-
-  * allow recno to allow base offset for arrays to be either 0 or 1.
-
-  * when duplicate keys are enabled, allow db_put($key, [$val1, $val2,...]) 
-
-
-2.x -> 3.x Upgrade
-==================
-
-Environment Verbose
-Env->open mode
-DB cache size extra parameter
-DB->open subdatabases	Done
-An empty environment causes DB->open to fail
-where is __db.001 coming from? db_remove seems to create it. Bug in 3.0.55
-Change db_strerror for 0 to ""? Done
-Queue	Done
-db_stat for Hash & Queue	Done
-No TxnMgr
-DB->remove
-ENV->remove
-ENV->set_verbose
-upgrade
-
-    $env = BerkeleyDB::Env::Create
-    $env = create BerkeleyDB::Env
-    $status = $env->open()
-
-    $db = BerkeleyDB::Hash::Create
-    $status = $db->open()
diff --git a/storage/bdb/perl/BerkeleyDB/config.in b/storage/bdb/perl/BerkeleyDB/config.in
deleted file mode 100644
index 3c37ea937a9..00000000000
--- a/storage/bdb/perl/BerkeleyDB/config.in
+++ /dev/null
@@ -1,45 +0,0 @@
-# Filename:	config.in
-#
-# written by Paul Marquess 
-
-# 1. Where is the file db.h?
-#
-#    Change the path below to point to the directory where db.h is
-#    installed on your system.
-
-#INCLUDE	= /usr/local/include
-#INCLUDE	= ../..
-INCLUDE	= /usr/local/BerkeleyDB/include
-
-# 2. Where is libdb?
-#
-#    Change the path below to point to the directory where libdb is
-#    installed on your system.
-
-#LIB	= /usr/local/lib
-#LIB	= ../..
-LIB	= /usr/local/BerkeleyDB/lib
-
-# 3. Is the library called libdb?
-#
-#    If you have copies of both 1.x and 2.x Berkeley DB installed on
-#    your system it can sometimes be tricky to make sure you are using
-#    the correct one. Renaming one (or creating a symbolic link) to
-#    include the version number of the library can help.
-#
-#    For example, if you have Berkeley DB 2.6.4  you could rename the
-#    Berkeley DB library from libdb.a to libdb-2.6.4.a  and change the
-#    DBNAME line below to look like this:
-#
-#        DBNAME = -ldb-2.6.4 
-#
-#    Note: If you are building this module with Win32, -llibdb will be
-#    used by default.
-#
-#    If you have changed the name of the library, uncomment the line
-#    below (by removing the leading #) and edit the line to use the name
-#    you have picked.
-
-#DBNAME = -ldb-3.0
-
-# end of file config.in
diff --git a/storage/bdb/perl/BerkeleyDB/constants.h b/storage/bdb/perl/BerkeleyDB/constants.h
deleted file mode 100644
index 98dc5eb1621..00000000000
--- a/storage/bdb/perl/BerkeleyDB/constants.h
+++ /dev/null
@@ -1,4530 +0,0 @@
-#define PERL_constant_NOTFOUND	1
-#define PERL_constant_NOTDEF	2
-#define PERL_constant_ISIV	3
-#define PERL_constant_ISNO	4
-#define PERL_constant_ISNV	5
-#define PERL_constant_ISPV	6
-#define PERL_constant_ISPVN	7
-#define PERL_constant_ISSV	8
-#define PERL_constant_ISUNDEF	9
-#define PERL_constant_ISUV	10
-#define PERL_constant_ISYES	11
-
-#ifndef NVTYPE
-typedef double NV; /* 5.6 and later define NVTYPE, and typedef NV to it.  */
-#endif
-#ifndef aTHX_
-#define aTHX_ /* 5.6 or later define this for threading support.  */
-#endif
-#ifndef pTHX_
-#define pTHX_ /* 5.6 or later define this for threading support.  */
-#endif
-
-static int
-constant_6 (pTHX_ const char *name, IV *iv_return) {
-  /* When generated this function returned values for the list of names given
-     here.  However, subsequent manual editing may have added or removed some.
-     DB_DUP DB_PAD DB_RMW DB_SET */
-  /* Offset 3 gives the best switch position.  */
-  switch (name[3]) {
-  case 'D':
-    if (memEQ(name, "DB_DUP", 6)) {
-    /*                  ^        */
-#ifdef DB_DUP
-      *iv_return = DB_DUP;
-      return PERL_constant_ISIV;
-#else
-      return PERL_constant_NOTDEF;
-#endif
-    }
-    break;
-  case 'P':
-    if (memEQ(name, "DB_PAD", 6)) {
-    /*                  ^        */
-#ifdef DB_PAD
-      *iv_return = DB_PAD;
-      return PERL_constant_ISIV;
-#else
-      return PERL_constant_NOTDEF;
-#endif
-    }
-    break;
-  case 'R':
-    if (memEQ(name, "DB_RMW", 6)) {
-    /*                  ^        */
-#ifdef DB_RMW
-      *iv_return = DB_RMW;
-      return PERL_constant_ISIV;
-#else
-      return PERL_constant_NOTDEF;
-#endif
-    }
-    break;
-  case 'S':
-    if (memEQ(name, "DB_SET", 6)) {
-    /*                  ^        */
-#ifdef DB_SET
-      *iv_return = DB_SET;
-      return PERL_constant_ISIV;
-#else
-      return PERL_constant_NOTDEF;
-#endif
-    }
-    break;
-  }
-  return PERL_constant_NOTFOUND;
-}
-
-static int
-constant_7 (pTHX_ const char *name, IV *iv_return) {
-  /* When generated this function returned values for the list of names given
-     here.  However, subsequent manual editing may have added or removed some.
-     DB_EXCL DB_HASH DB_LAST DB_NEXT DB_PREV */
-  /* Offset 3 gives the best switch position.  */
-  switch (name[3]) {
-  case 'E':
-    if (memEQ(name, "DB_EXCL", 7)) {
-    /*                  ^         */
-#ifdef DB_EXCL
-      *iv_return = DB_EXCL;
-      return PERL_constant_ISIV;
-#else
-      return PERL_constant_NOTDEF;
-#endif
-    }
-    break;
-  case 'H':
-    if (memEQ(name, "DB_HASH", 7)) {
-    /*                  ^         */
-#if (DB_VERSION_MAJOR > 2) || \
-    (DB_VERSION_MAJOR == 2 && DB_VERSION_MINOR > 0) || \
-    (DB_VERSION_MAJOR == 2 && DB_VERSION_MINOR == 0 && \
-     DB_VERSION_PATCH >= 3)
-      *iv_return = DB_HASH;
-      return PERL_constant_ISIV;
-#else
-      return PERL_constant_NOTDEF;
-#endif
-    }
-    break;
-  case 'L':
-    if (memEQ(name, "DB_LAST", 7)) {
-    /*                  ^         */
-#ifdef DB_LAST
-      *iv_return = DB_LAST;
-      return PERL_constant_ISIV;
-#else
-      return PERL_constant_NOTDEF;
-#endif
-    }
-    break;
-  case 'N':
-    if (memEQ(name, "DB_NEXT", 7)) {
-    /*                  ^         */
-#ifdef DB_NEXT
-      *iv_return = DB_NEXT;
-      return PERL_constant_ISIV;
-#else
-      return PERL_constant_NOTDEF;
-#endif
-    }
-    break;
-  case 'P':
-    if (memEQ(name, "DB_PREV", 7)) {
-    /*                  ^         */
-#ifdef DB_PREV
-      *iv_return = DB_PREV;
-      return PERL_constant_ISIV;
-#else
-      return PERL_constant_NOTDEF;
-#endif
-    }
-    break;
-  }
-  return PERL_constant_NOTFOUND;
-}
-
-static int
-constant_8 (pTHX_ const char *name, IV *iv_return) {
-  /* When generated this function returned values for the list of names given
-     here.  However, subsequent manual editing may have added or removed some.
-     DB_AFTER DB_BTREE DB_FIRST DB_FLUSH DB_FORCE DB_QUEUE DB_RECNO DB_UNREF */
-  /* Offset 4 gives the best switch position.  */
-  switch (name[4]) {
-  case 'E':
-    if (memEQ(name, "DB_RECNO", 8)) {
-    /*                   ^         */
-#if (DB_VERSION_MAJOR > 2) || \
-    (DB_VERSION_MAJOR == 2 && DB_VERSION_MINOR > 0) || \
-    (DB_VERSION_MAJOR == 2 && DB_VERSION_MINOR == 0 && \
-     DB_VERSION_PATCH >= 3)
-      *iv_return = DB_RECNO;
-      return PERL_constant_ISIV;
-#else
-      return PERL_constant_NOTDEF;
-#endif
-    }
-    break;
-  case 'F':
-    if (memEQ(name, "DB_AFTER", 8)) {
-    /*                   ^         */
-#ifdef DB_AFTER
-      *iv_return = DB_AFTER;
-      return PERL_constant_ISIV;
-#else
-      return PERL_constant_NOTDEF;
-#endif
-    }
-    break;
-  case 'I':
-    if (memEQ(name, "DB_FIRST", 8)) {
-    /*                   ^         */
-#ifdef DB_FIRST
-      *iv_return = DB_FIRST;
-      return PERL_constant_ISIV;
-#else
-      return PERL_constant_NOTDEF;
-#endif
-    }
-    break;
-  case 'L':
-    if (memEQ(name, "DB_FLUSH", 8)) {
-    /*                   ^         */
-#ifdef DB_FLUSH
-      *iv_return = DB_FLUSH;
-      return PERL_constant_ISIV;
-#else
-      return PERL_constant_NOTDEF;
-#endif
-    }
-    break;
-  case 'N':
-    if (memEQ(name, "DB_UNREF", 8)) {
-    /*                   ^         */
-#ifdef DB_UNREF
-      *iv_return = DB_UNREF;
-      return PERL_constant_ISIV;
-#else
-      return PERL_constant_NOTDEF;
-#endif
-    }
-    break;
-  case 'O':
-    if (memEQ(name, "DB_FORCE", 8)) {
-    /*                   ^         */
-#ifdef DB_FORCE
-      *iv_return = DB_FORCE;
-      return PERL_constant_ISIV;
-#else
-      return PERL_constant_NOTDEF;
-#endif
-    }
-    break;
-  case 'T':
-    if (memEQ(name, "DB_BTREE", 8)) {
-    /*                   ^         */
-#if (DB_VERSION_MAJOR > 2) || \
-    (DB_VERSION_MAJOR == 2 && DB_VERSION_MINOR > 0) || \
-    (DB_VERSION_MAJOR == 2 && DB_VERSION_MINOR == 0 && \
-     DB_VERSION_PATCH >= 3)
-      *iv_return = DB_BTREE;
-      return PERL_constant_ISIV;
-#else
-      return PERL_constant_NOTDEF;
-#endif
-    }
-    break;
-  case 'U':
-    if (memEQ(name, "DB_QUEUE", 8)) {
-    /*                   ^         */
-#if (DB_VERSION_MAJOR > 3) || \
-    (DB_VERSION_MAJOR == 3 && DB_VERSION_MINOR > 0) || \
-    (DB_VERSION_MAJOR == 3 && DB_VERSION_MINOR == 0 && \
-     DB_VERSION_PATCH >= 55)
-      *iv_return = DB_QUEUE;
-      return PERL_constant_ISIV;
-#else
-      return PERL_constant_NOTDEF;
-#endif
-    }
-    break;
-  }
-  return PERL_constant_NOTFOUND;
-}
-
-static int
-constant_9 (pTHX_ const char *name, IV *iv_return) {
-  /* When generated this function returned values for the list of names given
-     here.  However, subsequent manual editing may have added or removed some.
-     DB_APPEND DB_BEFORE DB_CHKSUM DB_CLIENT DB_COMMIT DB_CREATE DB_CURLSN
-     DB_DIRECT DB_EXTENT DB_GETREC DB_NOCOPY DB_NOMMAP DB_NOSYNC DB_RDONLY
-     DB_RECNUM DB_THREAD DB_VERIFY */
-  /* Offset 7 gives the best switch position.  */
-  switch (name[7]) {
-  case 'A':
-    if (memEQ(name, "DB_NOMMAP", 9)) {
-    /*                      ^       */
-#ifdef DB_NOMMAP
-      *iv_return = DB_NOMMAP;
-      return PERL_constant_ISIV;
-#else
-      return PERL_constant_NOTDEF;
-#endif
-    }
-    if (memEQ(name, "DB_THREAD", 9)) {
-    /*                      ^       */
-#ifdef DB_THREAD
-      *iv_return = DB_THREAD;
-      return PERL_constant_ISIV;
-#else
-      return PERL_constant_NOTDEF;
-#endif
-    }
-    break;
-  case 'C':
-    if (memEQ(name, "DB_DIRECT", 9)) {
-    /*                      ^       */
-#ifdef DB_DIRECT
-      *iv_return = DB_DIRECT;
-      return PERL_constant_ISIV;
-#else
-      return PERL_constant_NOTDEF;
-#endif
-    }
-    break;
-  case 'E':
-    if (memEQ(name, "DB_GETREC", 9)) {
-    /*                      ^       */
-#ifdef DB_GETREC
-      *iv_return = DB_GETREC;
-      return PERL_constant_ISIV;
-#else
-      return PERL_constant_NOTDEF;
-#endif
-    }
-    break;
-  case 'F':
-    if (memEQ(name, "DB_VERIFY", 9)) {
-    /*                      ^       */
-#ifdef DB_VERIFY
-      *iv_return = DB_VERIFY;
-      return PERL_constant_ISIV;
-#else
-      return PERL_constant_NOTDEF;
-#endif
-    }
-    break;
-  case 'I':
-    if (memEQ(name, "DB_COMMIT", 9)) {
-    /*                      ^       */
-#ifdef DB_COMMIT
-      *iv_return = DB_COMMIT;
-      return PERL_constant_ISIV;
-#else
-      return PERL_constant_NOTDEF;
-#endif
-    }
-    break;
-  case 'L':
-    if (memEQ(name, "DB_RDONLY", 9)) {
-    /*                      ^       */
-#ifdef DB_RDONLY
-      *iv_return = DB_RDONLY;
-      return PERL_constant_ISIV;
-#else
-      return PERL_constant_NOTDEF;
-#endif
-    }
-    break;
-  case 'N':
-    if (memEQ(name, "DB_APPEND", 9)) {
-    /*                      ^       */
-#ifdef DB_APPEND
-      *iv_return = DB_APPEND;
-      return PERL_constant_ISIV;
-#else
-      return PERL_constant_NOTDEF;
-#endif
-    }
-    if (memEQ(name, "DB_CLIENT", 9)) {
-    /*                      ^       */
-#ifdef DB_CLIENT
-      *iv_return = DB_CLIENT;
-      return PERL_constant_ISIV;
-#else
-      return PERL_constant_NOTDEF;
-#endif
-    }
-    if (memEQ(name, "DB_EXTENT", 9)) {
-    /*                      ^       */
-#ifdef DB_EXTENT
-      *iv_return = DB_EXTENT;
-      return PERL_constant_ISIV;
-#else
-      return PERL_constant_NOTDEF;
-#endif
-    }
-    if (memEQ(name, "DB_NOSYNC", 9)) {
-    /*                      ^       */
-#ifdef DB_NOSYNC
-      *iv_return = DB_NOSYNC;
-      return PERL_constant_ISIV;
-#else
-      return PERL_constant_NOTDEF;
-#endif
-    }
-    break;
-  case 'P':
-    if (memEQ(name, "DB_NOCOPY", 9)) {
-    /*                      ^       */
-#ifdef DB_NOCOPY
-      *iv_return = DB_NOCOPY;
-      return PERL_constant_ISIV;
-#else
-      return PERL_constant_NOTDEF;
-#endif
-    }
-    break;
-  case 'R':
-    if (memEQ(name, "DB_BEFORE", 9)) {
-    /*                      ^       */
-#ifdef DB_BEFORE
-      *iv_return = DB_BEFORE;
-      return PERL_constant_ISIV;
-#else
-      return PERL_constant_NOTDEF;
-#endif
-    }
-    break;
-  case 'S':
-    if (memEQ(name, "DB_CURLSN", 9)) {
-    /*                      ^       */
-#ifdef DB_CURLSN
-      *iv_return = DB_CURLSN;
-      return PERL_constant_ISIV;
-#else
-      return PERL_constant_NOTDEF;
-#endif
-    }
-    break;
-  case 'T':
-    if (memEQ(name, "DB_CREATE", 9)) {
-    /*                      ^       */
-#ifdef DB_CREATE
-      *iv_return = DB_CREATE;
-      return PERL_constant_ISIV;
-#else
-      return PERL_constant_NOTDEF;
-#endif
-    }
-    break;
-  case 'U':
-    if (memEQ(name, "DB_CHKSUM", 9)) {
-    /*                      ^       */
-#ifdef DB_CHKSUM
-      *iv_return = DB_CHKSUM;
-      return PERL_constant_ISIV;
-#else
-      return PERL_constant_NOTDEF;
-#endif
-    }
-    if (memEQ(name, "DB_RECNUM", 9)) {
-    /*                      ^       */
-#ifdef DB_RECNUM
-      *iv_return = DB_RECNUM;
-      return PERL_constant_ISIV;
-#else
-      return PERL_constant_NOTDEF;
-#endif
-    }
-    break;
-  }
-  return PERL_constant_NOTFOUND;
-}
-
-static int
-constant_10 (pTHX_ const char *name, IV *iv_return) {
-  /* When generated this function returned values for the list of names given
-     here.  However, subsequent manual editing may have added or removed some.
-     DB_CONSUME DB_CURRENT DB_DELETED DB_DUPSORT DB_ENCRYPT DB_ENV_CDB
-     DB_ENV_TXN DB_INORDER DB_JOINENV DB_KEYLAST DB_NOPANIC DB_OK_HASH
-     DB_PRIVATE DB_PR_PAGE DB_RECOVER DB_SALVAGE DB_SEQ_DEC DB_SEQ_INC
-     DB_TIMEOUT DB_TXN_CKP DB_UNKNOWN DB_UPGRADE */
-  /* Offset 5 gives the best switch position.  */
-  switch (name[5]) {
-  case 'C':
-    if (memEQ(name, "DB_ENCRYPT", 10)) {
-    /*                    ^           */
-#ifdef DB_ENCRYPT
-      *iv_return = DB_ENCRYPT;
-      return PERL_constant_ISIV;
-#else
-      return PERL_constant_NOTDEF;
-#endif
-    }
-    if (memEQ(name, "DB_RECOVER", 10)) {
-    /*                    ^           */
-#ifdef DB_RECOVER
-      *iv_return = DB_RECOVER;
-      return PERL_constant_ISIV;
-#else
-      return PERL_constant_NOTDEF;
-#endif
-    }
-    break;
-  case 'G':
-    if (memEQ(name, "DB_UPGRADE", 10)) {
-    /*                    ^           */
-#ifdef DB_UPGRADE
-      *iv_return = DB_UPGRADE;
-      return PERL_constant_ISIV;
-#else
-      return PERL_constant_NOTDEF;
-#endif
-    }
-    break;
-  case 'I':
-    if (memEQ(name, "DB_JOINENV", 10)) {
-    /*                    ^           */
-#ifdef DB_JOINENV
-      *iv_return = DB_JOINENV;
-      return PERL_constant_ISIV;
-#else
-      return PERL_constant_NOTDEF;
-#endif
-    }
-    if (memEQ(name, "DB_PRIVATE", 10)) {
-    /*                    ^           */
-#ifdef DB_PRIVATE
-      *iv_return = DB_PRIVATE;
-      return PERL_constant_ISIV;
-#else
-      return PERL_constant_NOTDEF;
-#endif
-    }
-    break;
-  case 'K':
-    if (memEQ(name, "DB_UNKNOWN", 10)) {
-    /*                    ^           */
-#if (DB_VERSION_MAJOR > 2) || \
-    (DB_VERSION_MAJOR == 2 && DB_VERSION_MINOR > 0) || \
-    (DB_VERSION_MAJOR == 2 && DB_VERSION_MINOR == 0 && \
-     DB_VERSION_PATCH >= 3)
-      *iv_return = DB_UNKNOWN;
-      return PERL_constant_ISIV;
-#else
-      return PERL_constant_NOTDEF;
-#endif
-    }
-    break;
-  case 'L':
-    if (memEQ(name, "DB_DELETED", 10)) {
-    /*                    ^           */
-#ifdef DB_DELETED
-      *iv_return = DB_DELETED;
-      return PERL_constant_ISIV;
-#else
-      return PERL_constant_NOTDEF;
-#endif
-    }
-    if (memEQ(name, "DB_SALVAGE", 10)) {
-    /*                    ^           */
-#ifdef DB_SALVAGE
-      *iv_return = DB_SALVAGE;
-      return PERL_constant_ISIV;
-#else
-      return PERL_constant_NOTDEF;
-#endif
-    }
-    break;
-  case 'M':
-    if (memEQ(name, "DB_TIMEOUT", 10)) {
-    /*                    ^           */
-#ifdef DB_TIMEOUT
-      *iv_return = DB_TIMEOUT;
-      return PERL_constant_ISIV;
-#else
-      return PERL_constant_NOTDEF;
-#endif
-    }
-    break;
-  case 'N':
-    if (memEQ(name, "DB_CONSUME", 10)) {
-    /*                    ^           */
-#ifdef DB_CONSUME
-      *iv_return = DB_CONSUME;
-      return PERL_constant_ISIV;
-#else
-      return PERL_constant_NOTDEF;
-#endif
-    }
-    if (memEQ(name, "DB_TXN_CKP", 10)) {
-    /*                    ^           */
-#ifdef DB_TXN_CKP
-      *iv_return = DB_TXN_CKP;
-      return PERL_constant_ISIV;
-#else
-      return PERL_constant_NOTDEF;
-#endif
-    }
-    break;
-  case 'O':
-    if (memEQ(name, "DB_INORDER", 10)) {
-    /*                    ^           */
-#ifdef DB_INORDER
-      *iv_return = DB_INORDER;
-      return PERL_constant_ISIV;
-#else
-      return PERL_constant_NOTDEF;
-#endif
-    }
-    break;
-  case 'P':
-    if (memEQ(name, "DB_DUPSORT", 10)) {
-    /*                    ^           */
-#ifdef DB_DUPSORT
-      *iv_return = DB_DUPSORT;
-      return PERL_constant_ISIV;
-#else
-      return PERL_constant_NOTDEF;
-#endif
-    }
-    if (memEQ(name, "DB_NOPANIC", 10)) {
-    /*                    ^           */
-#ifdef DB_NOPANIC
-      *iv_return = DB_NOPANIC;
-      return PERL_constant_ISIV;
-#else
-      return PERL_constant_NOTDEF;
-#endif
-    }
-    break;
-  case 'Q':
-    if (memEQ(name, "DB_SEQ_DEC", 10)) {
-    /*                    ^           */
-#ifdef DB_SEQ_DEC
-      *iv_return = DB_SEQ_DEC;
-      return PERL_constant_ISIV;
-#else
-      return PERL_constant_NOTDEF;
-#endif
-    }
-    if (memEQ(name, "DB_SEQ_INC", 10)) {
-    /*                    ^           */
-#ifdef DB_SEQ_INC
-      *iv_return = DB_SEQ_INC;
-      return PERL_constant_ISIV;
-#else
-      return PERL_constant_NOTDEF;
-#endif
-    }
-    break;
-  case 'R':
-    if (memEQ(name, "DB_CURRENT", 10)) {
-    /*                    ^           */
-#ifdef DB_CURRENT
-      *iv_return = DB_CURRENT;
-      return PERL_constant_ISIV;
-#else
-      return PERL_constant_NOTDEF;
-#endif
-    }
-    break;
-  case 'V':
-    if (memEQ(name, "DB_ENV_CDB", 10)) {
-    /*                    ^           */
-#ifdef DB_ENV_CDB
-      *iv_return = DB_ENV_CDB;
-      return PERL_constant_ISIV;
-#else
-      return PERL_constant_NOTDEF;
-#endif
-    }
-    if (memEQ(name, "DB_ENV_TXN", 10)) {
-    /*                    ^           */
-#ifdef DB_ENV_TXN
-      *iv_return = DB_ENV_TXN;
-      return PERL_constant_ISIV;
-#else
-      return PERL_constant_NOTDEF;
-#endif
-    }
-    break;
-  case 'Y':
-    if (memEQ(name, "DB_KEYLAST", 10)) {
-    /*                    ^           */
-#ifdef DB_KEYLAST
-      *iv_return = DB_KEYLAST;
-      return PERL_constant_ISIV;
-#else
-      return PERL_constant_NOTDEF;
-#endif
-    }
-    break;
-  case '_':
-    if (memEQ(name, "DB_OK_HASH", 10)) {
-    /*                    ^           */
-#ifdef DB_OK_HASH
-      *iv_return = DB_OK_HASH;
-      return PERL_constant_ISIV;
-#else
-      return PERL_constant_NOTDEF;
-#endif
-    }
-    if (memEQ(name, "DB_PR_PAGE", 10)) {
-    /*                    ^           */
-#ifdef DB_PR_PAGE
-      *iv_return = DB_PR_PAGE;
-      return PERL_constant_ISIV;
-#else
-      return PERL_constant_NOTDEF;
-#endif
-    }
-    break;
-  }
-  return PERL_constant_NOTFOUND;
-}
-
-static int
-constant_11 (pTHX_ const char *name, IV *iv_return) {
-  /* When generated this function returned values for the list of names given
-     here.  However, subsequent manual editing may have added or removed some.
-     DB_APP_INIT DB_ARCH_ABS DB_ARCH_LOG DB_DEGREE_2 DB_FILEOPEN DB_FIXEDLEN
-     DB_GET_BOTH DB_INIT_CDB DB_INIT_LOG DB_INIT_REP DB_INIT_TXN DB_KEYEMPTY
-     DB_KEYEXIST DB_KEYFIRST DB_LOCKDOWN DB_LOCK_GET DB_LOCK_PUT DB_LOGMAGIC
-     DB_LOG_DISK DB_LOG_PERM DB_MULTIPLE DB_NEXT_DUP DB_NOSERVER DB_NOTFOUND
-     DB_OK_BTREE DB_OK_QUEUE DB_OK_RECNO DB_POSITION DB_QAMMAGIC DB_RENUMBER
-     DB_SEQ_WRAP DB_SNAPSHOT DB_STAT_ALL DB_TRUNCATE DB_TXNMAGIC DB_TXN_LOCK
-     DB_TXN_REDO DB_TXN_SYNC DB_TXN_UNDO DB_WRNOSYNC DB_YIELDCPU */
-  /* Offset 8 gives the best switch position.  */
-  switch (name[8]) {
-  case 'A':
-    if (memEQ(name, "DB_ARCH_ABS", 11)) {
-    /*                       ^         */
-#ifdef DB_ARCH_ABS
-      *iv_return = DB_ARCH_ABS;
-      return PERL_constant_ISIV;
-#else
-      return PERL_constant_NOTDEF;
-#endif
-    }
-    if (memEQ(name, "DB_STAT_ALL", 11)) {
-    /*                       ^         */
-#ifdef DB_STAT_ALL
-      *iv_return = DB_STAT_ALL;
-      return PERL_constant_ISIV;
-#else
-      return PERL_constant_NOTDEF;
-#endif
-    }
-    if (memEQ(name, "DB_TRUNCATE", 11)) {
-    /*                       ^         */
-#ifdef DB_TRUNCATE
-      *iv_return = DB_TRUNCATE;
-      return PERL_constant_ISIV;
-#else
-      return PERL_constant_NOTDEF;
-#endif
-    }
-    break;
-  case 'B':
-    if (memEQ(name, "DB_RENUMBER", 11)) {
-    /*                       ^         */
-#ifdef DB_RENUMBER
-      *iv_return = DB_RENUMBER;
-      return PERL_constant_ISIV;
-#else
-      return PERL_constant_NOTDEF;
-#endif
-    }
-    break;
-  case 'C':
-    if (memEQ(name, "DB_INIT_CDB", 11)) {
-    /*                       ^         */
-#ifdef DB_INIT_CDB
-      *iv_return = DB_INIT_CDB;
-      return PERL_constant_ISIV;
-#else
-      return PERL_constant_NOTDEF;
-#endif
-    }
-    if (memEQ(name, "DB_OK_RECNO", 11)) {
-    /*                       ^         */
-#ifdef DB_OK_RECNO
-      *iv_return = DB_OK_RECNO;
-      return PERL_constant_ISIV;
-#else
-      return PERL_constant_NOTDEF;
-#endif
-    }
-    if (memEQ(name, "DB_YIELDCPU", 11)) {
-    /*                       ^         */
-#ifdef DB_YIELDCPU
-      *iv_return = DB_YIELDCPU;
-      return PERL_constant_ISIV;
-#else
-      return PERL_constant_NOTDEF;
-#endif
-    }
-    break;
-  case 'D':
-    if (memEQ(name, "DB_NEXT_DUP", 11)) {
-    /*                       ^         */
-#ifdef DB_NEXT_DUP
-      *iv_return = DB_NEXT_DUP;
-      return PERL_constant_ISIV;
-#else
-      return PERL_constant_NOTDEF;
-#endif
-    }
-    break;
-  case 'E':
-    if (memEQ(name, "DB_DEGREE_2", 11)) {
-    /*                       ^         */
-#ifdef DB_DEGREE_2
-      *iv_return = DB_DEGREE_2;
-      return PERL_constant_ISIV;
-#else
-      return PERL_constant_NOTDEF;
-#endif
-    }
-    if (memEQ(name, "DB_LOG_PERM", 11)) {
-    /*                       ^         */
-#ifdef DB_LOG_PERM
-      *iv_return = DB_LOG_PERM;
-      return PERL_constant_ISIV;
-#else
-      return PERL_constant_NOTDEF;
-#endif
-    }
-    if (memEQ(name, "DB_OK_QUEUE", 11)) {
-    /*                       ^         */
-#ifdef DB_OK_QUEUE
-      *iv_return = DB_OK_QUEUE;
-      return PERL_constant_ISIV;
-#else
-      return PERL_constant_NOTDEF;
-#endif
-    }
-    if (memEQ(name, "DB_TXN_REDO", 11)) {
-    /*                       ^         */
-#ifdef DB_TXN_REDO
-      *iv_return = DB_TXN_REDO;
-      return PERL_constant_ISIV;
-#else
-      return PERL_constant_NOTDEF;
-#endif
-    }
-    break;
-  case 'G':
-    if (memEQ(name, "DB_LOCK_GET", 11)) {
-    /*                       ^         */
-#if (DB_VERSION_MAJOR > 2) || \
-    (DB_VERSION_MAJOR == 2 && DB_VERSION_MINOR > 0) || \
-    (DB_VERSION_MAJOR == 2 && DB_VERSION_MINOR == 0 && \
-     DB_VERSION_PATCH >= 3)
-      *iv_return = DB_LOCK_GET;
-      return PERL_constant_ISIV;
-#else
-      return PERL_constant_NOTDEF;
-#endif
-    }
-    if (memEQ(name, "DB_LOGMAGIC", 11)) {
-    /*                       ^         */
-#ifdef DB_LOGMAGIC
-      *iv_return = DB_LOGMAGIC;
-      return PERL_constant_ISIV;
-#else
-      return PERL_constant_NOTDEF;
-#endif
-    }
-    if (memEQ(name, "DB_QAMMAGIC", 11)) {
-    /*                       ^         */
-#ifdef DB_QAMMAGIC
-      *iv_return = DB_QAMMAGIC;
-      return PERL_constant_ISIV;
-#else
-      return PERL_constant_NOTDEF;
-#endif
-    }
-    if (memEQ(name, "DB_TXNMAGIC", 11)) {
-    /*                       ^         */
-#ifdef DB_TXNMAGIC
-      *iv_return = DB_TXNMAGIC;
-      return PERL_constant_ISIV;
-#else
-      return PERL_constant_NOTDEF;
-#endif
-    }
-    break;
-  case 'H':
-    if (memEQ(name, "DB_SNAPSHOT", 11)) {
-    /*                       ^         */
-#ifdef DB_SNAPSHOT
-      *iv_return = DB_SNAPSHOT;
-      return PERL_constant_ISIV;
-#else
-      return PERL_constant_NOTDEF;
-#endif
-    }
-    break;
-  case 'I':
-    if (memEQ(name, "DB_KEYEXIST", 11)) {
-    /*                       ^         */
-#ifdef DB_KEYEXIST
-      *iv_return = DB_KEYEXIST;
-      return PERL_constant_ISIV;
-#else
-      return PERL_constant_NOTDEF;
-#endif
-    }
-    if (memEQ(name, "DB_LOG_DISK", 11)) {
-    /*                       ^         */
-#ifdef DB_LOG_DISK
-      *iv_return = DB_LOG_DISK;
-      return PERL_constant_ISIV;
-#else
-      return PERL_constant_NOTDEF;
-#endif
-    }
-    if (memEQ(name, "DB_POSITION", 11)) {
-    /*                       ^         */
-#ifdef DB_POSITION
-      *iv_return = DB_POSITION;
-      return PERL_constant_ISIV;
-#else
-      return PERL_constant_NOTDEF;
-#endif
-    }
-    break;
-  case 'L':
-    if (memEQ(name, "DB_ARCH_LOG", 11)) {
-    /*                       ^         */
-#ifdef DB_ARCH_LOG
-      *iv_return = DB_ARCH_LOG;
-      return PERL_constant_ISIV;
-#else
-      return PERL_constant_NOTDEF;
-#endif
-    }
-    if (memEQ(name, "DB_FIXEDLEN", 11)) {
-    /*                       ^         */
-#ifdef DB_FIXEDLEN
-      *iv_return = DB_FIXEDLEN;
-      return PERL_constant_ISIV;
-#else
-      return PERL_constant_NOTDEF;
-#endif
-    }
-    if (memEQ(name, "DB_INIT_LOG", 11)) {
-    /*                       ^         */
-#ifdef DB_INIT_LOG
-      *iv_return = DB_INIT_LOG;
-      return PERL_constant_ISIV;
-#else
-      return PERL_constant_NOTDEF;
-#endif
-    }
-    break;
-  case 'N':
-    if (memEQ(name, "DB_APP_INIT", 11)) {
-    /*                       ^         */
-#ifdef DB_APP_INIT
-      *iv_return = DB_APP_INIT;
-      return PERL_constant_ISIV;
-#else
-      return PERL_constant_NOTDEF;
-#endif
-    }
-    if (memEQ(name, "DB_TXN_UNDO", 11)) {
-    /*                       ^         */
-#ifdef DB_TXN_UNDO
-      *iv_return = DB_TXN_UNDO;
-      return PERL_constant_ISIV;
-#else
-      return PERL_constant_NOTDEF;
-#endif
-    }
-    break;
-  case 'O':
-    if (memEQ(name, "DB_GET_BOTH", 11)) {
-    /*                       ^         */
-#ifdef DB_GET_BOTH
-      *iv_return = DB_GET_BOTH;
-      return PERL_constant_ISIV;
-#else
-      return PERL_constant_NOTDEF;
-#endif
-    }
-    if (memEQ(name, "DB_LOCKDOWN", 11)) {
-    /*                       ^         */
-#ifdef DB_LOCKDOWN
-      *iv_return = DB_LOCKDOWN;
-      return PERL_constant_ISIV;
-#else
-      return PERL_constant_NOTDEF;
-#endif
-    }
-    if (memEQ(name, "DB_TXN_LOCK", 11)) {
-    /*                       ^         */
-#ifdef DB_TXN_LOCK
-      *iv_return = DB_TXN_LOCK;
-      return PERL_constant_ISIV;
-#else
-      return PERL_constant_NOTDEF;
-#endif
-    }
-    break;
-  case 'P':
-    if (memEQ(name, "DB_FILEOPEN", 11)) {
-    /*                       ^         */
-#ifdef DB_FILEOPEN
-      *iv_return = DB_FILEOPEN;
-      return PERL_constant_ISIV;
-#else
-      return PERL_constant_NOTDEF;
-#endif
-    }
-    if (memEQ(name, "DB_KEYEMPTY", 11)) {
-    /*                       ^         */
-#ifdef DB_KEYEMPTY
-      *iv_return = DB_KEYEMPTY;
-      return PERL_constant_ISIV;
-#else
-      return PERL_constant_NOTDEF;
-#endif
-    }
-    if (memEQ(name, "DB_LOCK_PUT", 11)) {
-    /*                       ^         */
-#if (DB_VERSION_MAJOR > 2) || \
-    (DB_VERSION_MAJOR == 2 && DB_VERSION_MINOR > 0) || \
-    (DB_VERSION_MAJOR == 2 && DB_VERSION_MINOR == 0 && \
-     DB_VERSION_PATCH >= 3)
-      *iv_return = DB_LOCK_PUT;
-      return PERL_constant_ISIV;
-#else
-      return PERL_constant_NOTDEF;
-#endif
-    }
-    if (memEQ(name, "DB_MULTIPLE", 11)) {
-    /*                       ^         */
-#ifdef DB_MULTIPLE
-      *iv_return = DB_MULTIPLE;
-      return PERL_constant_ISIV;
-#else
-      return PERL_constant_NOTDEF;
-#endif
-    }
-    break;
-  case 'R':
-    if (memEQ(name, "DB_INIT_REP", 11)) {
-    /*                       ^         */
-#ifdef DB_INIT_REP
-      *iv_return = DB_INIT_REP;
-      return PERL_constant_ISIV;
-#else
-      return PERL_constant_NOTDEF;
-#endif
-    }
-    if (memEQ(name, "DB_KEYFIRST", 11)) {
-    /*                       ^         */
-#ifdef DB_KEYFIRST
-      *iv_return = DB_KEYFIRST;
-      return PERL_constant_ISIV;
-#else
-      return PERL_constant_NOTDEF;
-#endif
-    }
-    if (memEQ(name, "DB_OK_BTREE", 11)) {
-    /*                       ^         */
-#ifdef DB_OK_BTREE
-      *iv_return = DB_OK_BTREE;
-      return PERL_constant_ISIV;
-#else
-      return PERL_constant_NOTDEF;
-#endif
-    }
-    if (memEQ(name, "DB_SEQ_WRAP", 11)) {
-    /*                       ^         */
-#ifdef DB_SEQ_WRAP
-      *iv_return = DB_SEQ_WRAP;
-      return PERL_constant_ISIV;
-#else
-      return PERL_constant_NOTDEF;
-#endif
-    }
-    break;
-  case 'T':
-    if (memEQ(name, "DB_INIT_TXN", 11)) {
-    /*                       ^         */
-#ifdef DB_INIT_TXN
-      *iv_return = DB_INIT_TXN;
-      return PERL_constant_ISIV;
-#else
-      return PERL_constant_NOTDEF;
-#endif
-    }
-    break;
-  case 'U':
-    if (memEQ(name, "DB_NOTFOUND", 11)) {
-    /*                       ^         */
-#ifdef DB_NOTFOUND
-      *iv_return = DB_NOTFOUND;
-      return PERL_constant_ISIV;
-#else
-      return PERL_constant_NOTDEF;
-#endif
-    }
-    break;
-  case 'V':
-    if (memEQ(name, "DB_NOSERVER", 11)) {
-    /*                       ^         */
-#ifdef DB_NOSERVER
-      *iv_return = DB_NOSERVER;
-      return PERL_constant_ISIV;
-#else
-      return PERL_constant_NOTDEF;
-#endif
-    }
-    break;
-  case 'Y':
-    if (memEQ(name, "DB_TXN_SYNC", 11)) {
-    /*                       ^         */
-#ifdef DB_TXN_SYNC
-      *iv_return = DB_TXN_SYNC;
-      return PERL_constant_ISIV;
-#else
-      return PERL_constant_NOTDEF;
-#endif
-    }
-    if (memEQ(name, "DB_WRNOSYNC", 11)) {
-    /*                       ^         */
-#ifdef DB_WRNOSYNC
-      *iv_return = DB_WRNOSYNC;
-      return PERL_constant_ISIV;
-#else
-      return PERL_constant_NOTDEF;
-#endif
-    }
-    break;
-  }
-  return PERL_constant_NOTFOUND;
-}
-
-static int
-constant_12 (pTHX_ const char *name, IV *iv_return) {
-  /* When generated this function returned values for the list of names given
-     here.  However, subsequent manual editing may have added or removed some.
-     DB_ARCH_DATA DB_CDB_ALLDB DB_CL_WRITER DB_DELIMITER DB_DIRECT_DB
-     DB_DSYNC_LOG DB_DUPCURSOR DB_ENV_FATAL DB_FAST_STAT DB_GET_BOTHC
-     DB_GET_RECNO DB_HASHMAGIC DB_INIT_LOCK DB_JOIN_ITEM DB_LOCKMAGIC
-     DB_LOCK_DUMP DB_LOCK_RW_N DB_LOGOLDVER DB_MAX_PAGES DB_MPOOL_NEW
-     DB_NEEDSPLIT DB_NODUPDATA DB_NOLOCKING DB_NORECURSE DB_OVERWRITE
-     DB_PAGEYIELD DB_PAGE_LOCK DB_PERMANENT DB_POSITIONI DB_PRINTABLE
-     DB_QAMOLDVER DB_RPCCLIENT DB_SET_RANGE DB_SET_RECNO DB_SWAPBYTES
-     DB_TEMPORARY DB_TXN_ABORT DB_TXN_APPLY DB_TXN_PRINT DB_WRITELOCK
-     DB_WRITEOPEN DB_XA_CREATE */
-  /* Offset 3 gives the best switch position.  */
-  switch (name[3]) {
-  case 'A':
-    if (memEQ(name, "DB_ARCH_DATA", 12)) {
-    /*                  ^               */
-#ifdef DB_ARCH_DATA
-      *iv_return = DB_ARCH_DATA;
-      return PERL_constant_ISIV;
-#else
-      return PERL_constant_NOTDEF;
-#endif
-    }
-    break;
-  case 'C':
-    if (memEQ(name, "DB_CDB_ALLDB", 12)) {
-    /*                  ^               */
-#ifdef DB_CDB_ALLDB
-      *iv_return = DB_CDB_ALLDB;
-      return PERL_constant_ISIV;
-#else
-      return PERL_constant_NOTDEF;
-#endif
-    }
-    if (memEQ(name, "DB_CL_WRITER", 12)) {
-    /*                  ^               */
-#ifdef DB_CL_WRITER
-      *iv_return = DB_CL_WRITER;
-      return PERL_constant_ISIV;
-#else
-      return PERL_constant_NOTDEF;
-#endif
-    }
-    break;
-  case 'D':
-    if (memEQ(name, "DB_DELIMITER", 12)) {
-    /*                  ^               */
-#ifdef DB_DELIMITER
-      *iv_return = DB_DELIMITER;
-      return PERL_constant_ISIV;
-#else
-      return PERL_constant_NOTDEF;
-#endif
-    }
-    if (memEQ(name, "DB_DIRECT_DB", 12)) {
-    /*                  ^               */
-#ifdef DB_DIRECT_DB
-      *iv_return = DB_DIRECT_DB;
-      return PERL_constant_ISIV;
-#else
-      return PERL_constant_NOTDEF;
-#endif
-    }
-    if (memEQ(name, "DB_DSYNC_LOG", 12)) {
-    /*                  ^               */
-#ifdef DB_DSYNC_LOG
-      *iv_return = DB_DSYNC_LOG;
-      return PERL_constant_ISIV;
-#else
-      return PERL_constant_NOTDEF;
-#endif
-    }
-    if (memEQ(name, "DB_DUPCURSOR", 12)) {
-    /*                  ^               */
-#ifdef DB_DUPCURSOR
-      *iv_return = DB_DUPCURSOR;
-      return PERL_constant_ISIV;
-#else
-      return PERL_constant_NOTDEF;
-#endif
-    }
-    break;
-  case 'E':
-    if (memEQ(name, "DB_ENV_FATAL", 12)) {
-    /*                  ^               */
-#ifdef DB_ENV_FATAL
-      *iv_return = DB_ENV_FATAL;
-      return PERL_constant_ISIV;
-#else
-      return PERL_constant_NOTDEF;
-#endif
-    }
-    break;
-  case 'F':
-    if (memEQ(name, "DB_FAST_STAT", 12)) {
-    /*                  ^               */
-#ifdef DB_FAST_STAT
-      *iv_return = DB_FAST_STAT;
-      return PERL_constant_ISIV;
-#else
-      return PERL_constant_NOTDEF;
-#endif
-    }
-    break;
-  case 'G':
-    if (memEQ(name, "DB_GET_BOTHC", 12)) {
-    /*                  ^               */
-#ifdef DB_GET_BOTHC
-      *iv_return = DB_GET_BOTHC;
-      return PERL_constant_ISIV;
-#else
-      return PERL_constant_NOTDEF;
-#endif
-    }
-    if (memEQ(name, "DB_GET_RECNO", 12)) {
-    /*                  ^               */
-#ifdef DB_GET_RECNO
-      *iv_return = DB_GET_RECNO;
-      return PERL_constant_ISIV;
-#else
-      return PERL_constant_NOTDEF;
-#endif
-    }
-    break;
-  case 'H':
-    if (memEQ(name, "DB_HASHMAGIC", 12)) {
-    /*                  ^               */
-#ifdef DB_HASHMAGIC
-      *iv_return = DB_HASHMAGIC;
-      return PERL_constant_ISIV;
-#else
-      return PERL_constant_NOTDEF;
-#endif
-    }
-    break;
-  case 'I':
-    if (memEQ(name, "DB_INIT_LOCK", 12)) {
-    /*                  ^               */
-#ifdef DB_INIT_LOCK
-      *iv_return = DB_INIT_LOCK;
-      return PERL_constant_ISIV;
-#else
-      return PERL_constant_NOTDEF;
-#endif
-    }
-    break;
-  case 'J':
-    if (memEQ(name, "DB_JOIN_ITEM", 12)) {
-    /*                  ^               */
-#ifdef DB_JOIN_ITEM
-      *iv_return = DB_JOIN_ITEM;
-      return PERL_constant_ISIV;
-#else
-      return PERL_constant_NOTDEF;
-#endif
-    }
-    break;
-  case 'L':
-    if (memEQ(name, "DB_LOCKMAGIC", 12)) {
-    /*                  ^               */
-#ifdef DB_LOCKMAGIC
-      *iv_return = DB_LOCKMAGIC;
-      return PERL_constant_ISIV;
-#else
-      return PERL_constant_NOTDEF;
-#endif
-    }
-    if (memEQ(name, "DB_LOCK_DUMP", 12)) {
-    /*                  ^               */
-#if (DB_VERSION_MAJOR > 2) || \
-    (DB_VERSION_MAJOR == 2 && DB_VERSION_MINOR > 0) || \
-    (DB_VERSION_MAJOR == 2 && DB_VERSION_MINOR == 0 && \
-     DB_VERSION_PATCH >= 3)
-      *iv_return = DB_LOCK_DUMP;
-      return PERL_constant_ISIV;
-#else
-      return PERL_constant_NOTDEF;
-#endif
-    }
-    if (memEQ(name, "DB_LOCK_RW_N", 12)) {
-    /*                  ^               */
-#ifdef DB_LOCK_RW_N
-      *iv_return = DB_LOCK_RW_N;
-      return PERL_constant_ISIV;
-#else
-      return PERL_constant_NOTDEF;
-#endif
-    }
-    if (memEQ(name, "DB_LOGOLDVER", 12)) {
-    /*                  ^               */
-#ifdef DB_LOGOLDVER
-      *iv_return = DB_LOGOLDVER;
-      return PERL_constant_ISIV;
-#else
-      return PERL_constant_NOTDEF;
-#endif
-    }
-    break;
-  case 'M':
-    if (memEQ(name, "DB_MAX_PAGES", 12)) {
-    /*                  ^               */
-#ifdef DB_MAX_PAGES
-      *iv_return = DB_MAX_PAGES;
-      return PERL_constant_ISIV;
-#else
-      return PERL_constant_NOTDEF;
-#endif
-    }
-    if (memEQ(name, "DB_MPOOL_NEW", 12)) {
-    /*                  ^               */
-#ifdef DB_MPOOL_NEW
-      *iv_return = DB_MPOOL_NEW;
-      return PERL_constant_ISIV;
-#else
-      return PERL_constant_NOTDEF;
-#endif
-    }
-    break;
-  case 'N':
-    if (memEQ(name, "DB_NEEDSPLIT", 12)) {
-    /*                  ^               */
-#ifdef DB_NEEDSPLIT
-      *iv_return = DB_NEEDSPLIT;
-      return PERL_constant_ISIV;
-#else
-      return PERL_constant_NOTDEF;
-#endif
-    }
-    if (memEQ(name, "DB_NODUPDATA", 12)) {
-    /*                  ^               */
-#ifdef DB_NODUPDATA
-      *iv_return = DB_NODUPDATA;
-      return PERL_constant_ISIV;
-#else
-      return PERL_constant_NOTDEF;
-#endif
-    }
-    if (memEQ(name, "DB_NOLOCKING", 12)) {
-    /*                  ^               */
-#ifdef DB_NOLOCKING
-      *iv_return = DB_NOLOCKING;
-      return PERL_constant_ISIV;
-#else
-      return PERL_constant_NOTDEF;
-#endif
-    }
-    if (memEQ(name, "DB_NORECURSE", 12)) {
-    /*                  ^               */
-#ifdef DB_NORECURSE
-      *iv_return = DB_NORECURSE;
-      return PERL_constant_ISIV;
-#else
-      return PERL_constant_NOTDEF;
-#endif
-    }
-    break;
-  case 'O':
-    if (memEQ(name, "DB_OVERWRITE", 12)) {
-    /*                  ^               */
-#ifdef DB_OVERWRITE
-      *iv_return = DB_OVERWRITE;
-      return PERL_constant_ISIV;
-#else
-      return PERL_constant_NOTDEF;
-#endif
-    }
-    break;
-  case 'P':
-    if (memEQ(name, "DB_PAGEYIELD", 12)) {
-    /*                  ^               */
-#ifdef DB_PAGEYIELD
-      *iv_return = DB_PAGEYIELD;
-      return PERL_constant_ISIV;
-#else
-      return PERL_constant_NOTDEF;
-#endif
-    }
-    if (memEQ(name, "DB_PAGE_LOCK", 12)) {
-    /*                  ^               */
-#ifdef DB_PAGE_LOCK
-      *iv_return = DB_PAGE_LOCK;
-      return PERL_constant_ISIV;
-#else
-      return PERL_constant_NOTDEF;
-#endif
-    }
-    if (memEQ(name, "DB_PERMANENT", 12)) {
-    /*                  ^               */
-#ifdef DB_PERMANENT
-      *iv_return = DB_PERMANENT;
-      return PERL_constant_ISIV;
-#else
-      return PERL_constant_NOTDEF;
-#endif
-    }
-    if (memEQ(name, "DB_POSITIONI", 12)) {
-    /*                  ^               */
-#ifdef DB_POSITIONI
-      *iv_return = DB_POSITIONI;
-      return PERL_constant_ISIV;
-#else
-      return PERL_constant_NOTDEF;
-#endif
-    }
-    if (memEQ(name, "DB_PRINTABLE", 12)) {
-    /*                  ^               */
-#ifdef DB_PRINTABLE
-      *iv_return = DB_PRINTABLE;
-      return PERL_constant_ISIV;
-#else
-      return PERL_constant_NOTDEF;
-#endif
-    }
-    break;
-  case 'Q':
-    if (memEQ(name, "DB_QAMOLDVER", 12)) {
-    /*                  ^               */
-#ifdef DB_QAMOLDVER
-      *iv_return = DB_QAMOLDVER;
-      return PERL_constant_ISIV;
-#else
-      return PERL_constant_NOTDEF;
-#endif
-    }
-    break;
-  case 'R':
-    if (memEQ(name, "DB_RPCCLIENT", 12)) {
-    /*                  ^               */
-#ifdef DB_RPCCLIENT
-      *iv_return = DB_RPCCLIENT;
-      return PERL_constant_ISIV;
-#else
-      return PERL_constant_NOTDEF;
-#endif
-    }
-    break;
-  case 'S':
-    if (memEQ(name, "DB_SET_RANGE", 12)) {
-    /*                  ^               */
-#ifdef DB_SET_RANGE
-      *iv_return = DB_SET_RANGE;
-      return PERL_constant_ISIV;
-#else
-      return PERL_constant_NOTDEF;
-#endif
-    }
-    if (memEQ(name, "DB_SET_RECNO", 12)) {
-    /*                  ^               */
-#ifdef DB_SET_RECNO
-      *iv_return = DB_SET_RECNO;
-      return PERL_constant_ISIV;
-#else
-      return PERL_constant_NOTDEF;
-#endif
-    }
-    if (memEQ(name, "DB_SWAPBYTES", 12)) {
-    /*                  ^               */
-#ifdef DB_SWAPBYTES
-      *iv_return = DB_SWAPBYTES;
-      return PERL_constant_ISIV;
-#else
-      return PERL_constant_NOTDEF;
-#endif
-    }
-    break;
-  case 'T':
-    if (memEQ(name, "DB_TEMPORARY", 12)) {
-    /*                  ^               */
-#ifdef DB_TEMPORARY
-      *iv_return = DB_TEMPORARY;
-      return PERL_constant_ISIV;
-#else
-      return PERL_constant_NOTDEF;
-#endif
-    }
-    if (memEQ(name, "DB_TXN_ABORT", 12)) {
-    /*                  ^               */
-#if (DB_VERSION_MAJOR > 3) || \
-    (DB_VERSION_MAJOR == 3 && DB_VERSION_MINOR > 1) || \
-    (DB_VERSION_MAJOR == 3 && DB_VERSION_MINOR == 1 && \
-     DB_VERSION_PATCH >= 14)
-      *iv_return = DB_TXN_ABORT;
-      return PERL_constant_ISIV;
-#else
-      return PERL_constant_NOTDEF;
-#endif
-    }
-    if (memEQ(name, "DB_TXN_APPLY", 12)) {
-    /*                  ^               */
-#if (DB_VERSION_MAJOR > 4) || \
-    (DB_VERSION_MAJOR == 4 && DB_VERSION_MINOR > 0) || \
-    (DB_VERSION_MAJOR == 4 && DB_VERSION_MINOR == 0 && \
-     DB_VERSION_PATCH >= 14)
-      *iv_return = DB_TXN_APPLY;
-      return PERL_constant_ISIV;
-#else
-      return PERL_constant_NOTDEF;
-#endif
-    }
-    if (memEQ(name, "DB_TXN_PRINT", 12)) {
-    /*                  ^               */
-#if (DB_VERSION_MAJOR > 4) || \
-    (DB_VERSION_MAJOR == 4 && DB_VERSION_MINOR > 1) || \
-    (DB_VERSION_MAJOR == 4 && DB_VERSION_MINOR == 1 && \
-     DB_VERSION_PATCH >= 24)
-      *iv_return = DB_TXN_PRINT;
-      return PERL_constant_ISIV;
-#else
-      return PERL_constant_NOTDEF;
-#endif
-    }
-    break;
-  case 'W':
-    if (memEQ(name, "DB_WRITELOCK", 12)) {
-    /*                  ^               */
-#ifdef DB_WRITELOCK
-      *iv_return = DB_WRITELOCK;
-      return PERL_constant_ISIV;
-#else
-      return PERL_constant_NOTDEF;
-#endif
-    }
-    if (memEQ(name, "DB_WRITEOPEN", 12)) {
-    /*                  ^               */
-#ifdef DB_WRITEOPEN
-      *iv_return = DB_WRITEOPEN;
-      return PERL_constant_ISIV;
-#else
-      return PERL_constant_NOTDEF;
-#endif
-    }
-    break;
-  case 'X':
-    if (memEQ(name, "DB_XA_CREATE", 12)) {
-    /*                  ^               */
-#ifdef DB_XA_CREATE
-      *iv_return = DB_XA_CREATE;
-      return PERL_constant_ISIV;
-#else
-      return PERL_constant_NOTDEF;
-#endif
-    }
-    break;
-  }
-  return PERL_constant_NOTFOUND;
-}
-
-static int
-constant_13 (pTHX_ const char *name, IV *iv_return) {
-  /* When generated this function returned values for the list of names given
-     here.  However, subsequent manual editing may have added or removed some.
-     DB_AGGRESSIVE DB_BTREEMAGIC DB_CHECKPOINT DB_DIRECT_LOG DB_DIRTY_READ
-     DB_DONOTINDEX DB_ENV_CREATE DB_ENV_NOMMAP DB_ENV_THREAD DB_HASHOLDVER
-     DB_INCOMPLETE DB_INIT_MPOOL DB_LOCK_ABORT DB_LOCK_NORUN DB_LOCK_RIW_N
-     DB_LOCK_TRADE DB_LOGVERSION DB_LOG_CHKPNT DB_LOG_COMMIT DB_LOG_LOCKED
-     DB_LOG_NOCOPY DB_LOG_RESEND DB_MPOOL_FREE DB_MPOOL_LAST DB_MUTEXDEBUG
-     DB_MUTEXLOCKS DB_NEXT_NODUP DB_NOORDERCHK DB_PREV_NODUP DB_PR_HEADERS
-     DB_QAMVERSION DB_RDWRMASTER DB_REGISTERED DB_REP_CLIENT DB_REP_CREATE
-     DB_REP_ISPERM DB_REP_MASTER DB_SEQUENTIAL DB_STAT_CLEAR DB_SYSTEM_MEM
-     DB_TXNVERSION DB_TXN_NOSYNC DB_TXN_NOWAIT DB_VERIFY_BAD DB_debug_FLAG
-     DB_user_BEGIN */
-  /* Offset 5 gives the best switch position.  */
-  switch (name[5]) {
-  case 'A':
-    if (memEQ(name, "DB_STAT_CLEAR", 13)) {
-    /*                    ^              */
-#ifdef DB_STAT_CLEAR
-      *iv_return = DB_STAT_CLEAR;
-      return PERL_constant_ISIV;
-#else
-      return PERL_constant_NOTDEF;
-#endif
-    }
-    break;
-  case 'C':
-    if (memEQ(name, "DB_INCOMPLETE", 13)) {
-    /*                    ^              */
-#ifdef DB_INCOMPLETE
-      *iv_return = DB_INCOMPLETE;
-      return PERL_constant_ISIV;
-#else
-      return PERL_constant_NOTDEF;
-#endif
-    }
-    if (memEQ(name, "DB_LOCK_ABORT", 13)) {
-    /*                    ^              */
-#ifdef DB_LOCK_ABORT
-      *iv_return = DB_LOCK_ABORT;
-      return PERL_constant_ISIV;
-#else
-      return PERL_constant_NOTDEF;
-#endif
-    }
-    if (memEQ(name, "DB_LOCK_NORUN", 13)) {
-    /*                    ^              */
-#ifdef DB_LOCK_NORUN
-      *iv_return = DB_LOCK_NORUN;
-      return PERL_constant_ISIV;
-#else
-      return PERL_constant_NOTDEF;
-#endif
-    }
-    if (memEQ(name, "DB_LOCK_RIW_N", 13)) {
-    /*                    ^              */
-#ifdef DB_LOCK_RIW_N
-      *iv_return = DB_LOCK_RIW_N;
-      return PERL_constant_ISIV;
-#else
-      return PERL_constant_NOTDEF;
-#endif
-    }
-    if (memEQ(name, "DB_LOCK_TRADE", 13)) {
-    /*                    ^              */
-#if (DB_VERSION_MAJOR > 4) || \
-    (DB_VERSION_MAJOR == 4 && DB_VERSION_MINOR > 1) || \
-    (DB_VERSION_MAJOR == 4 && DB_VERSION_MINOR == 1 && \
-     DB_VERSION_PATCH >= 24)
-      *iv_return = DB_LOCK_TRADE;
-      return PERL_constant_ISIV;
-#else
-      return PERL_constant_NOTDEF;
-#endif
-    }
-    break;
-  case 'E':
-    if (memEQ(name, "DB_CHECKPOINT", 13)) {
-    /*                    ^              */
-#ifdef DB_CHECKPOINT
-      *iv_return = DB_CHECKPOINT;
-      return PERL_constant_ISIV;
-#else
-      return PERL_constant_NOTDEF;
-#endif
-    }
-    if (memEQ(name, "DB_PREV_NODUP", 13)) {
-    /*                    ^              */
-#ifdef DB_PREV_NODUP
-      *iv_return = DB_PREV_NODUP;
-      return PERL_constant_ISIV;
-#else
-      return PERL_constant_NOTDEF;
-#endif
-    }
-    break;
-  case 'G':
-    if (memEQ(name, "DB_AGGRESSIVE", 13)) {
-    /*                    ^              */
-#ifdef DB_AGGRESSIVE
-      *iv_return = DB_AGGRESSIVE;
-      return PERL_constant_ISIV;
-#else
-      return PERL_constant_NOTDEF;
-#endif
-    }
-    if (memEQ(name, "DB_LOGVERSION", 13)) {
-    /*                    ^              */
-#ifdef DB_LOGVERSION
-      *iv_return = DB_LOGVERSION;
-      return PERL_constant_ISIV;
-#else
-      return PERL_constant_NOTDEF;
-#endif
-    }
-    if (memEQ(name, "DB_LOG_CHKPNT", 13)) {
-    /*                    ^              */
-#ifdef DB_LOG_CHKPNT
-      *iv_return = DB_LOG_CHKPNT;
-      return PERL_constant_ISIV;
-#else
-      return PERL_constant_NOTDEF;
-#endif
-    }
-    if (memEQ(name, "DB_LOG_COMMIT", 13)) {
-    /*                    ^              */
-#ifdef DB_LOG_COMMIT
-      *iv_return = DB_LOG_COMMIT;
-      return PERL_constant_ISIV;
-#else
-      return PERL_constant_NOTDEF;
-#endif
-    }
-    if (memEQ(name, "DB_LOG_LOCKED", 13)) {
-    /*                    ^              */
-#ifdef DB_LOG_LOCKED
-      *iv_return = DB_LOG_LOCKED;
-      return PERL_constant_ISIV;
-#else
-      return PERL_constant_NOTDEF;
-#endif
-    }
-    if (memEQ(name, "DB_LOG_NOCOPY", 13)) {
-    /*                    ^              */
-#ifdef DB_LOG_NOCOPY
-      *iv_return = DB_LOG_NOCOPY;
-      return PERL_constant_ISIV;
-#else
-      return PERL_constant_NOTDEF;
-#endif
-    }
-    if (memEQ(name, "DB_LOG_RESEND", 13)) {
-    /*                    ^              */
-#ifdef DB_LOG_RESEND
-      *iv_return = DB_LOG_RESEND;
-      return PERL_constant_ISIV;
-#else
-      return PERL_constant_NOTDEF;
-#endif
-    }
-    if (memEQ(name, "DB_REGISTERED", 13)) {
-    /*                    ^              */
-#ifdef DB_REGISTERED
-      *iv_return = DB_REGISTERED;
-      return PERL_constant_ISIV;
-#else
-      return PERL_constant_NOTDEF;
-#endif
-    }
-    break;
-  case 'I':
-    if (memEQ(name, "DB_INIT_MPOOL", 13)) {
-    /*                    ^              */
-#ifdef DB_INIT_MPOOL
-      *iv_return = DB_INIT_MPOOL;
-      return PERL_constant_ISIV;
-#else
-      return PERL_constant_NOTDEF;
-#endif
-    }
-    break;
-  case 'M':
-    if (memEQ(name, "DB_QAMVERSION", 13)) {
-    /*                    ^              */
-#ifdef DB_QAMVERSION
-      *iv_return = DB_QAMVERSION;
-      return PERL_constant_ISIV;
-#else
-      return PERL_constant_NOTDEF;
-#endif
-    }
-    break;
-  case 'N':
-    if (memEQ(name, "DB_DONOTINDEX", 13)) {
-    /*                    ^              */
-#ifdef DB_DONOTINDEX
-      *iv_return = DB_DONOTINDEX;
-      return PERL_constant_ISIV;
-#else
-      return PERL_constant_NOTDEF;
-#endif
-    }
-    if (memEQ(name, "DB_TXNVERSION", 13)) {
-    /*                    ^              */
-#ifdef DB_TXNVERSION
-      *iv_return = DB_TXNVERSION;
-      return PERL_constant_ISIV;
-#else
-      return PERL_constant_NOTDEF;
-#endif
-    }
-    if (memEQ(name, "DB_TXN_NOSYNC", 13)) {
-    /*                    ^              */
-#ifdef DB_TXN_NOSYNC
-      *iv_return = DB_TXN_NOSYNC;
-      return PERL_constant_ISIV;
-#else
-      return PERL_constant_NOTDEF;
-#endif
-    }
-    if (memEQ(name, "DB_TXN_NOWAIT", 13)) {
-    /*                    ^              */
-#ifdef DB_TXN_NOWAIT
-      *iv_return = DB_TXN_NOWAIT;
-      return PERL_constant_ISIV;
-#else
-      return PERL_constant_NOTDEF;
-#endif
-    }
-    break;
-  case 'O':
-    if (memEQ(name, "DB_MPOOL_FREE", 13)) {
-    /*                    ^              */
-#ifdef DB_MPOOL_FREE
-      *iv_return = DB_MPOOL_FREE;
-      return PERL_constant_ISIV;
-#else
-      return PERL_constant_NOTDEF;
-#endif
-    }
-    if (memEQ(name, "DB_MPOOL_LAST", 13)) {
-    /*                    ^              */
-#ifdef DB_MPOOL_LAST
-      *iv_return = DB_MPOOL_LAST;
-      return PERL_constant_ISIV;
-#else
-      return PERL_constant_NOTDEF;
-#endif
-    }
-    if (memEQ(name, "DB_NOORDERCHK", 13)) {
-    /*                    ^              */
-#ifdef DB_NOORDERCHK
-      *iv_return = DB_NOORDERCHK;
-      return PERL_constant_ISIV;
-#else
-      return PERL_constant_NOTDEF;
-#endif
-    }
-    break;
-  case 'P':
-    if (memEQ(name, "DB_REP_CLIENT", 13)) {
-    /*                    ^              */
-#ifdef DB_REP_CLIENT
-      *iv_return = DB_REP_CLIENT;
-      return PERL_constant_ISIV;
-#else
-      return PERL_constant_NOTDEF;
-#endif
-    }
-    if (memEQ(name, "DB_REP_CREATE", 13)) {
-    /*                    ^              */
-#ifdef DB_REP_CREATE
-      *iv_return = DB_REP_CREATE;
-      return PERL_constant_ISIV;
-#else
-      return PERL_constant_NOTDEF;
-#endif
-    }
-    if (memEQ(name, "DB_REP_ISPERM", 13)) {
-    /*                    ^              */
-#ifdef DB_REP_ISPERM
-      *iv_return = DB_REP_ISPERM;
-      return PERL_constant_ISIV;
-#else
-      return PERL_constant_NOTDEF;
-#endif
-    }
-    if (memEQ(name, "DB_REP_MASTER", 13)) {
-    /*                    ^              */
-#ifdef DB_REP_MASTER
-      *iv_return = DB_REP_MASTER;
-      return PERL_constant_ISIV;
-#else
-      return PERL_constant_NOTDEF;
-#endif
-    }
-    break;
-  case 'Q':
-    if (memEQ(name, "DB_SEQUENTIAL", 13)) {
-    /*                    ^              */
-#ifdef DB_SEQUENTIAL
-      *iv_return = DB_SEQUENTIAL;
-      return PERL_constant_ISIV;
-#else
-      return PERL_constant_NOTDEF;
-#endif
-    }
-    break;
-  case 'R':
-    if (memEQ(name, "DB_BTREEMAGIC", 13)) {
-    /*                    ^              */
-#ifdef DB_BTREEMAGIC
-      *iv_return = DB_BTREEMAGIC;
-      return PERL_constant_ISIV;
-#else
-      return PERL_constant_NOTDEF;
-#endif
-    }
-    if (memEQ(name, "DB_DIRECT_LOG", 13)) {
-    /*                    ^              */
-#ifdef DB_DIRECT_LOG
-      *iv_return = DB_DIRECT_LOG;
-      return PERL_constant_ISIV;
-#else
-      return PERL_constant_NOTDEF;
-#endif
-    }
-    if (memEQ(name, "DB_DIRTY_READ", 13)) {
-    /*                    ^              */
-#ifdef DB_DIRTY_READ
-      *iv_return = DB_DIRTY_READ;
-      return PERL_constant_ISIV;
-#else
-      return PERL_constant_NOTDEF;
-#endif
-    }
-    if (memEQ(name, "DB_VERIFY_BAD", 13)) {
-    /*                    ^              */
-#ifdef DB_VERIFY_BAD
-      *iv_return = DB_VERIFY_BAD;
-      return PERL_constant_ISIV;
-#else
-      return PERL_constant_NOTDEF;
-#endif
-    }
-    break;
-  case 'S':
-    if (memEQ(name, "DB_HASHOLDVER", 13)) {
-    /*                    ^              */
-#ifdef DB_HASHOLDVER
-      *iv_return = DB_HASHOLDVER;
-      return PERL_constant_ISIV;
-#else
-      return PERL_constant_NOTDEF;
-#endif
-    }
-    if (memEQ(name, "DB_SYSTEM_MEM", 13)) {
-    /*                    ^              */
-#ifdef DB_SYSTEM_MEM
-      *iv_return = DB_SYSTEM_MEM;
-      return PERL_constant_ISIV;
-#else
-      return PERL_constant_NOTDEF;
-#endif
-    }
-    break;
-  case 'T':
-    if (memEQ(name, "DB_MUTEXDEBUG", 13)) {
-    /*                    ^              */
-#ifdef DB_MUTEXDEBUG
-      *iv_return = DB_MUTEXDEBUG;
-      return PERL_constant_ISIV;
-#else
-      return PERL_constant_NOTDEF;
-#endif
-    }
-    if (memEQ(name, "DB_MUTEXLOCKS", 13)) {
-    /*                    ^              */
-#ifdef DB_MUTEXLOCKS
-      *iv_return = DB_MUTEXLOCKS;
-      return PERL_constant_ISIV;
-#else
-      return PERL_constant_NOTDEF;
-#endif
-    }
-    break;
-  case 'V':
-    if (memEQ(name, "DB_ENV_CREATE", 13)) {
-    /*                    ^              */
-#ifdef DB_ENV_CREATE
-      *iv_return = DB_ENV_CREATE;
-      return PERL_constant_ISIV;
-#else
-      return PERL_constant_NOTDEF;
-#endif
-    }
-    if (memEQ(name, "DB_ENV_NOMMAP", 13)) {
-    /*                    ^              */
-#ifdef DB_ENV_NOMMAP
-      *iv_return = DB_ENV_NOMMAP;
-      return PERL_constant_ISIV;
-#else
-      return PERL_constant_NOTDEF;
-#endif
-    }
-    if (memEQ(name, "DB_ENV_THREAD", 13)) {
-    /*                    ^              */
-#ifdef DB_ENV_THREAD
-      *iv_return = DB_ENV_THREAD;
-      return PERL_constant_ISIV;
-#else
-      return PERL_constant_NOTDEF;
-#endif
-    }
-    break;
-  case 'W':
-    if (memEQ(name, "DB_RDWRMASTER", 13)) {
-    /*                    ^              */
-#ifdef DB_RDWRMASTER
-      *iv_return = DB_RDWRMASTER;
-      return PERL_constant_ISIV;
-#else
-      return PERL_constant_NOTDEF;
-#endif
-    }
-    break;
-  case 'X':
-    if (memEQ(name, "DB_NEXT_NODUP", 13)) {
-    /*                    ^              */
-#ifdef DB_NEXT_NODUP
-      *iv_return = DB_NEXT_NODUP;
-      return PERL_constant_ISIV;
-#else
-      return PERL_constant_NOTDEF;
-#endif
-    }
-    break;
-  case '_':
-    if (memEQ(name, "DB_PR_HEADERS", 13)) {
-    /*                    ^              */
-#ifdef DB_PR_HEADERS
-      *iv_return = DB_PR_HEADERS;
-      return PERL_constant_ISIV;
-#else
-      return PERL_constant_NOTDEF;
-#endif
-    }
-    break;
-  case 'b':
-    if (memEQ(name, "DB_debug_FLAG", 13)) {
-    /*                    ^              */
-#ifdef DB_debug_FLAG
-      *iv_return = DB_debug_FLAG;
-      return PERL_constant_ISIV;
-#else
-      return PERL_constant_NOTDEF;
-#endif
-    }
-    break;
-  case 'e':
-    if (memEQ(name, "DB_user_BEGIN", 13)) {
-    /*                    ^              */
-#ifdef DB_user_BEGIN
-      *iv_return = DB_user_BEGIN;
-      return PERL_constant_ISIV;
-#else
-      return PERL_constant_NOTDEF;
-#endif
-    }
-    break;
-  }
-  return PERL_constant_NOTFOUND;
-}
-
-static int
-constant_14 (pTHX_ const char *name, IV *iv_return) {
-  /* When generated this function returned values for the list of names given
-     here.  However, subsequent manual editing may have added or removed some.
-     DB_ARCH_REMOVE DB_AUTO_COMMIT DB_BTREEOLDVER DB_CHKSUM_SHA1 DB_EID_INVALID
-     DB_ENCRYPT_AES DB_ENV_APPINIT DB_ENV_DBLOCAL DB_ENV_LOCKING DB_ENV_LOGGING
-     DB_ENV_NOPANIC DB_ENV_PRIVATE DB_FILE_ID_LEN DB_HANDLE_LOCK DB_HASHVERSION
-     DB_JOIN_NOSORT DB_LOCKVERSION DB_LOCK_EXPIRE DB_LOCK_NOWAIT DB_LOCK_OLDEST
-     DB_LOCK_RANDOM DB_LOCK_RECORD DB_LOCK_REMOVE DB_LOCK_SWITCH DB_MAX_RECORDS
-     DB_MPOOL_CLEAN DB_MPOOL_DIRTY DB_NOOVERWRITE DB_NOSERVER_ID DB_ODDFILESIZE
-     DB_OLD_VERSION DB_OPEN_CALLED DB_RECORDCOUNT DB_RECORD_LOCK DB_REGION_ANON
-     DB_REGION_INIT DB_REGION_NAME DB_RENAMEMAGIC DB_REP_EGENCHG DB_REP_NEWSITE
-     DB_REP_NOTPERM DB_REP_UNAVAIL DB_REVSPLITOFF DB_RUNRECOVERY DB_SET_TXN_NOW
-     DB_USE_ENVIRON DB_WRITECURSOR DB_XIDDATASIZE */
-  /* Offset 9 gives the best switch position.  */
-  switch (name[9]) {
-  case 'A':
-    if (memEQ(name, "DB_LOCK_RANDOM", 14)) {
-    /*                        ^           */
-#ifdef DB_LOCK_RANDOM
-      *iv_return = DB_LOCK_RANDOM;
-      return PERL_constant_ISIV;
-#else
-      return PERL_constant_NOTDEF;
-#endif
-    }
-    if (memEQ(name, "DB_OPEN_CALLED", 14)) {
-    /*                        ^           */
-#ifdef DB_OPEN_CALLED
-      *iv_return = DB_OPEN_CALLED;
-      return PERL_constant_ISIV;
-#else
-      return PERL_constant_NOTDEF;
-#endif
-    }
-    if (memEQ(name, "DB_REP_UNAVAIL", 14)) {
-    /*                        ^           */
-#ifdef DB_REP_UNAVAIL
-      *iv_return = DB_REP_UNAVAIL;
-      return PERL_constant_ISIV;
-#else
-      return PERL_constant_NOTDEF;
-#endif
-    }
-    if (memEQ(name, "DB_XIDDATASIZE", 14)) {
-    /*                        ^           */
-#ifdef DB_XIDDATASIZE
-      *iv_return = DB_XIDDATASIZE;
-      return PERL_constant_ISIV;
-#else
-      return PERL_constant_NOTDEF;
-#endif
-    }
-    break;
-  case 'C':
-    if (memEQ(name, "DB_ENV_LOCKING", 14)) {
-    /*                        ^           */
-#ifdef DB_ENV_LOCKING
-      *iv_return = DB_ENV_LOCKING;
-      return PERL_constant_ISIV;
-#else
-      return PERL_constant_NOTDEF;
-#endif
-    }
-    if (memEQ(name, "DB_MAX_RECORDS", 14)) {
-    /*                        ^           */
-#ifdef DB_MAX_RECORDS
-      *iv_return = DB_MAX_RECORDS;
-      return PERL_constant_ISIV;
-#else
-      return PERL_constant_NOTDEF;
-#endif
-    }
-    if (memEQ(name, "DB_MPOOL_CLEAN", 14)) {
-    /*                        ^           */
-#ifdef DB_MPOOL_CLEAN
-      *iv_return = DB_MPOOL_CLEAN;
-      return PERL_constant_ISIV;
-#else
-      return PERL_constant_NOTDEF;
-#endif
-    }
-    if (memEQ(name, "DB_RECORDCOUNT", 14)) {
-    /*                        ^           */
-#ifdef DB_RECORDCOUNT
-      *iv_return = DB_RECORDCOUNT;
-      return PERL_constant_ISIV;
-#else
-      return PERL_constant_NOTDEF;
-#endif
-    }
-    break;
-  case 'D':
-    if (memEQ(name, "DB_FILE_ID_LEN", 14)) {
-    /*                        ^           */
-#ifdef DB_FILE_ID_LEN
-      *iv_return = DB_FILE_ID_LEN;
-      return PERL_constant_ISIV;
-#else
-      return PERL_constant_NOTDEF;
-#endif
-    }
-    if (memEQ(name, "DB_MPOOL_DIRTY", 14)) {
-    /*                        ^           */
-#ifdef DB_MPOOL_DIRTY
-      *iv_return = DB_MPOOL_DIRTY;
-      return PERL_constant_ISIV;
-#else
-      return PERL_constant_NOTDEF;
-#endif
-    }
-    break;
-  case 'E':
-    if (memEQ(name, "DB_ARCH_REMOVE", 14)) {
-    /*                        ^           */
-#ifdef DB_ARCH_REMOVE
-      *iv_return = DB_ARCH_REMOVE;
-      return PERL_constant_ISIV;
-#else
-      return PERL_constant_NOTDEF;
-#endif
-    }
-    if (memEQ(name, "DB_LOCK_RECORD", 14)) {
-    /*                        ^           */
-#ifdef DB_LOCK_RECORD
-      *iv_return = DB_LOCK_RECORD;
-      return PERL_constant_ISIV;
-#else
-      return PERL_constant_NOTDEF;
-#endif
-    }
-    if (memEQ(name, "DB_LOCK_REMOVE", 14)) {
-    /*                        ^           */
-#ifdef DB_LOCK_REMOVE
-      *iv_return = DB_LOCK_REMOVE;
-      return PERL_constant_ISIV;
-#else
-      return PERL_constant_NOTDEF;
-#endif
-    }
-    if (memEQ(name, "DB_NOSERVER_ID", 14)) {
-    /*                        ^           */
-#ifdef DB_NOSERVER_ID
-      *iv_return = DB_NOSERVER_ID;
-      return PERL_constant_ISIV;
-#else
-      return PERL_constant_NOTDEF;
-#endif
-    }
-    if (memEQ(name, "DB_ODDFILESIZE", 14)) {
-    /*                        ^           */
-#ifdef DB_ODDFILESIZE
-      *iv_return = DB_ODDFILESIZE;
-      return PERL_constant_ISIV;
-#else
-      return PERL_constant_NOTDEF;
-#endif
-    }
-    if (memEQ(name, "DB_REP_EGENCHG", 14)) {
-    /*                        ^           */
-#ifdef DB_REP_EGENCHG
-      *iv_return = DB_REP_EGENCHG;
-      return PERL_constant_ISIV;
-#else
-      return PERL_constant_NOTDEF;
-#endif
-    }
-    break;
-  case 'G':
-    if (memEQ(name, "DB_ENV_LOGGING", 14)) {
-    /*                        ^           */
-#ifdef DB_ENV_LOGGING
-      *iv_return = DB_ENV_LOGGING;
-      return PERL_constant_ISIV;
-#else
-      return PERL_constant_NOTDEF;
-#endif
-    }
-    break;
-  case 'I':
-    if (memEQ(name, "DB_ENV_PRIVATE", 14)) {
-    /*                        ^           */
-#ifdef DB_ENV_PRIVATE
-      *iv_return = DB_ENV_PRIVATE;
-      return PERL_constant_ISIV;
-#else
-      return PERL_constant_NOTDEF;
-#endif
-    }
-    if (memEQ(name, "DB_REVSPLITOFF", 14)) {
-    /*                        ^           */
-#ifdef DB_REVSPLITOFF
-      *iv_return = DB_REVSPLITOFF;
-      return PERL_constant_ISIV;
-#else
-      return PERL_constant_NOTDEF;
-#endif
-    }
-    break;
-  case 'L':
-    if (memEQ(name, "DB_BTREEOLDVER", 14)) {
-    /*                        ^           */
-#ifdef DB_BTREEOLDVER
-      *iv_return = DB_BTREEOLDVER;
-      return PERL_constant_ISIV;
-#else
-      return PERL_constant_NOTDEF;
-#endif
-    }
-    if (memEQ(name, "DB_ENV_DBLOCAL", 14)) {
-    /*                        ^           */
-#ifdef DB_ENV_DBLOCAL
-      *iv_return = DB_ENV_DBLOCAL;
-      return PERL_constant_ISIV;
-#else
-      return PERL_constant_NOTDEF;
-#endif
-    }
-    if (memEQ(name, "DB_LOCK_OLDEST", 14)) {
-    /*                        ^           */
-#ifdef DB_LOCK_OLDEST
-      *iv_return = DB_LOCK_OLDEST;
-      return PERL_constant_ISIV;
-#else
-      return PERL_constant_NOTDEF;
-#endif
-    }
-    break;
-  case 'M':
-    if (memEQ(name, "DB_RENAMEMAGIC", 14)) {
-    /*                        ^           */
-#ifdef DB_RENAMEMAGIC
-      *iv_return = DB_RENAMEMAGIC;
-      return PERL_constant_ISIV;
-#else
-      return PERL_constant_NOTDEF;
-#endif
-    }
-    break;
-  case 'N':
-    if (memEQ(name, "DB_SET_TXN_NOW", 14)) {
-    /*                        ^           */
-#ifdef DB_SET_TXN_NOW
-      *iv_return = DB_SET_TXN_NOW;
-      return PERL_constant_ISIV;
-#else
-      return PERL_constant_NOTDEF;
-#endif
-    }
-    break;
-  case 'O':
-    if (memEQ(name, "DB_AUTO_COMMIT", 14)) {
-    /*                        ^           */
-#ifdef DB_AUTO_COMMIT
-      *iv_return = DB_AUTO_COMMIT;
-      return PERL_constant_ISIV;
-#else
-      return PERL_constant_NOTDEF;
-#endif
-    }
-    if (memEQ(name, "DB_JOIN_NOSORT", 14)) {
-    /*                        ^           */
-#ifdef DB_JOIN_NOSORT
-      *iv_return = DB_JOIN_NOSORT;
-      return PERL_constant_ISIV;
-#else
-      return PERL_constant_NOTDEF;
-#endif
-    }
-    if (memEQ(name, "DB_LOCK_NOWAIT", 14)) {
-    /*                        ^           */
-#ifdef DB_LOCK_NOWAIT
-      *iv_return = DB_LOCK_NOWAIT;
-      return PERL_constant_ISIV;
-#else
-      return PERL_constant_NOTDEF;
-#endif
-    }
-    if (memEQ(name, "DB_RUNRECOVERY", 14)) {
-    /*                        ^           */
-#ifdef DB_RUNRECOVERY
-      *iv_return = DB_RUNRECOVERY;
-      return PERL_constant_ISIV;
-#else
-      return PERL_constant_NOTDEF;
-#endif
-    }
-    break;
-  case 'P':
-    if (memEQ(name, "DB_ENV_APPINIT", 14)) {
-    /*                        ^           */
-#ifdef DB_ENV_APPINIT
-      *iv_return = DB_ENV_APPINIT;
-      return PERL_constant_ISIV;
-#else
-      return PERL_constant_NOTDEF;
-#endif
-    }
-    if (memEQ(name, "DB_ENV_NOPANIC", 14)) {
-    /*                        ^           */
-#ifdef DB_ENV_NOPANIC
-      *iv_return = DB_ENV_NOPANIC;
-      return PERL_constant_ISIV;
-#else
-      return PERL_constant_NOTDEF;
-#endif
-    }
-    break;
-  case 'R':
-    if (memEQ(name, "DB_HASHVERSION", 14)) {
-    /*                        ^           */
-#ifdef DB_HASHVERSION
-      *iv_return = DB_HASHVERSION;
-      return PERL_constant_ISIV;
-#else
-      return PERL_constant_NOTDEF;
-#endif
-    }
-    if (memEQ(name, "DB_LOCKVERSION", 14)) {
-    /*                        ^           */
-#ifdef DB_LOCKVERSION
-      *iv_return = DB_LOCKVERSION;
-      return PERL_constant_ISIV;
-#else
-      return PERL_constant_NOTDEF;
-#endif
-    }
-    if (memEQ(name, "DB_OLD_VERSION", 14)) {
-    /*                        ^           */
-#ifdef DB_OLD_VERSION
-      *iv_return = DB_OLD_VERSION;
-      return PERL_constant_ISIV;
-#else
-      return PERL_constant_NOTDEF;
-#endif
-    }
-    break;
-  case 'T':
-    if (memEQ(name, "DB_ENCRYPT_AES", 14)) {
-    /*                        ^           */
-#ifdef DB_ENCRYPT_AES
-      *iv_return = DB_ENCRYPT_AES;
-      return PERL_constant_ISIV;
-#else
-      return PERL_constant_NOTDEF;
-#endif
-    }
-    if (memEQ(name, "DB_REP_NOTPERM", 14)) {
-    /*                        ^           */
-#ifdef DB_REP_NOTPERM
-      *iv_return = DB_REP_NOTPERM;
-      return PERL_constant_ISIV;
-#else
-      return PERL_constant_NOTDEF;
-#endif
-    }
-    break;
-  case 'U':
-    if (memEQ(name, "DB_WRITECURSOR", 14)) {
-    /*                        ^           */
-#ifdef DB_WRITECURSOR
-      *iv_return = DB_WRITECURSOR;
-      return PERL_constant_ISIV;
-#else
-      return PERL_constant_NOTDEF;
-#endif
-    }
-    break;
-  case 'V':
-    if (memEQ(name, "DB_EID_INVALID", 14)) {
-    /*                        ^           */
-#ifdef DB_EID_INVALID
-      *iv_return = DB_EID_INVALID;
-      return PERL_constant_ISIV;
-#else
-      return PERL_constant_NOTDEF;
-#endif
-    }
-    if (memEQ(name, "DB_USE_ENVIRON", 14)) {
-    /*                        ^           */
-#ifdef DB_USE_ENVIRON
-      *iv_return = DB_USE_ENVIRON;
-      return PERL_constant_ISIV;
-#else
-      return PERL_constant_NOTDEF;
-#endif
-    }
-    break;
-  case 'W':
-    if (memEQ(name, "DB_LOCK_SWITCH", 14)) {
-    /*                        ^           */
-#ifdef DB_LOCK_SWITCH
-      *iv_return = DB_LOCK_SWITCH;
-      return PERL_constant_ISIV;
-#else
-      return PERL_constant_NOTDEF;
-#endif
-    }
-    if (memEQ(name, "DB_NOOVERWRITE", 14)) {
-    /*                        ^           */
-#ifdef DB_NOOVERWRITE
-      *iv_return = DB_NOOVERWRITE;
-      return PERL_constant_ISIV;
-#else
-      return PERL_constant_NOTDEF;
-#endif
-    }
-    if (memEQ(name, "DB_REP_NEWSITE", 14)) {
-    /*                        ^           */
-#ifdef DB_REP_NEWSITE
-      *iv_return = DB_REP_NEWSITE;
-      return PERL_constant_ISIV;
-#else
-      return PERL_constant_NOTDEF;
-#endif
-    }
-    break;
-  case 'X':
-    if (memEQ(name, "DB_LOCK_EXPIRE", 14)) {
-    /*                        ^           */
-#ifdef DB_LOCK_EXPIRE
-      *iv_return = DB_LOCK_EXPIRE;
-      return PERL_constant_ISIV;
-#else
-      return PERL_constant_NOTDEF;
-#endif
-    }
-    break;
-  case '_':
-    if (memEQ(name, "DB_CHKSUM_SHA1", 14)) {
-    /*                        ^           */
-#ifdef DB_CHKSUM_SHA1
-      *iv_return = DB_CHKSUM_SHA1;
-      return PERL_constant_ISIV;
-#else
-      return PERL_constant_NOTDEF;
-#endif
-    }
-    if (memEQ(name, "DB_HANDLE_LOCK", 14)) {
-    /*                        ^           */
-#ifdef DB_HANDLE_LOCK
-      *iv_return = DB_HANDLE_LOCK;
-      return PERL_constant_ISIV;
-#else
-      return PERL_constant_NOTDEF;
-#endif
-    }
-    if (memEQ(name, "DB_RECORD_LOCK", 14)) {
-    /*                        ^           */
-#ifdef DB_RECORD_LOCK
-      *iv_return = DB_RECORD_LOCK;
-      return PERL_constant_ISIV;
-#else
-      return PERL_constant_NOTDEF;
-#endif
-    }
-    if (memEQ(name, "DB_REGION_ANON", 14)) {
-    /*                        ^           */
-#ifdef DB_REGION_ANON
-      *iv_return = DB_REGION_ANON;
-      return PERL_constant_ISIV;
-#else
-      return PERL_constant_NOTDEF;
-#endif
-    }
-    if (memEQ(name, "DB_REGION_INIT", 14)) {
-    /*                        ^           */
-#ifdef DB_REGION_INIT
-      *iv_return = DB_REGION_INIT;
-      return PERL_constant_ISIV;
-#else
-      return PERL_constant_NOTDEF;
-#endif
-    }
-    if (memEQ(name, "DB_REGION_NAME", 14)) {
-    /*                        ^           */
-#ifdef DB_REGION_NAME
-      *iv_return = DB_REGION_NAME;
-      return PERL_constant_ISIV;
-#else
-      return PERL_constant_NOTDEF;
-#endif
-    }
-    break;
-  }
-  return PERL_constant_NOTFOUND;
-}
-
-static int
-constant_15 (pTHX_ const char *name, IV *iv_return) {
-  /* When generated this function returned values for the list of names given
-     here.  However, subsequent manual editing may have added or removed some.
-     DB_APPLY_LOGREG DB_BTREEVERSION DB_BUFFER_SMALL DB_CONSUME_WAIT
-     DB_ENV_LOCKDOWN DB_ENV_PANIC_OK DB_ENV_YIELDCPU DB_LOCK_DEFAULT
-     DB_LOCK_INHERIT DB_LOCK_NOTHELD DB_LOCK_PUT_ALL DB_LOCK_PUT_OBJ
-     DB_LOCK_TIMEOUT DB_LOCK_UPGRADE DB_LOG_INMEMORY DB_LOG_WRNOSYNC
-     DB_MPOOL_CREATE DB_MPOOL_EXTENT DB_MPOOL_NOFILE DB_MPOOL_UNLINK
-     DB_MULTIPLE_KEY DB_OPFLAGS_MASK DB_ORDERCHKONLY DB_PRIORITY_LOW
-     DB_REGION_MAGIC DB_REP_LOGREADY DB_REP_LOGSONLY DB_REP_NOBUFFER
-     DB_REP_OUTDATED DB_REP_PAGEDONE DB_SURPRISE_KID DB_TEST_POSTLOG
-     DB_TEST_PREOPEN DB_TXN_LOCK_2PL DB_TXN_LOG_MASK DB_TXN_LOG_REDO
-     DB_TXN_LOG_UNDO DB_VERIFY_FATAL */
-  /* Offset 10 gives the best switch position.  */
-  switch (name[10]) {
-  case 'D':
-    if (memEQ(name, "DB_REP_OUTDATED", 15)) {
-    /*                         ^           */
-#ifdef DB_REP_OUTDATED
-      *iv_return = DB_REP_OUTDATED;
-      return PERL_constant_ISIV;
-#else
-      return PERL_constant_NOTDEF;
-#endif
-    }
-    break;
-  case 'E':
-    if (memEQ(name, "DB_LOG_INMEMORY", 15)) {
-    /*                         ^           */
-#ifdef DB_LOG_INMEMORY
-      *iv_return = DB_LOG_INMEMORY;
-      return PERL_constant_ISIV;
-#else
-      return PERL_constant_NOTDEF;
-#endif
-    }
-    if (memEQ(name, "DB_MULTIPLE_KEY", 15)) {
-    /*                         ^           */
-#ifdef DB_MULTIPLE_KEY
-      *iv_return = DB_MULTIPLE_KEY;
-      return PERL_constant_ISIV;
-#else
-      return PERL_constant_NOTDEF;
-#endif
-    }
-    if (memEQ(name, "DB_REP_PAGEDONE", 15)) {
-    /*                         ^           */
-#ifdef DB_REP_PAGEDONE
-      *iv_return = DB_REP_PAGEDONE;
-      return PERL_constant_ISIV;
-#else
-      return PERL_constant_NOTDEF;
-#endif
-    }
-    if (memEQ(name, "DB_SURPRISE_KID", 15)) {
-    /*                         ^           */
-#ifdef DB_SURPRISE_KID
-      *iv_return = DB_SURPRISE_KID;
-      return PERL_constant_ISIV;
-#else
-      return PERL_constant_NOTDEF;
-#endif
-    }
-    if (memEQ(name, "DB_TEST_PREOPEN", 15)) {
-    /*                         ^           */
-#ifdef DB_TEST_PREOPEN
-      *iv_return = DB_TEST_PREOPEN;
-      return PERL_constant_ISIV;
-#else
-      return PERL_constant_NOTDEF;
-#endif
-    }
-    break;
-  case 'F':
-    if (memEQ(name, "DB_LOCK_DEFAULT", 15)) {
-    /*                         ^           */
-#ifdef DB_LOCK_DEFAULT
-      *iv_return = DB_LOCK_DEFAULT;
-      return PERL_constant_ISIV;
-#else
-      return PERL_constant_NOTDEF;
-#endif
-    }
-    if (memEQ(name, "DB_VERIFY_FATAL", 15)) {
-    /*                         ^           */
-#ifdef DB_VERIFY_FATAL
-      *iv_return = DB_VERIFY_FATAL;
-      return PERL_constant_ISIV;
-#else
-      return PERL_constant_NOTDEF;
-#endif
-    }
-    break;
-  case 'G':
-    if (memEQ(name, "DB_LOCK_UPGRADE", 15)) {
-    /*                         ^           */
-#ifdef DB_LOCK_UPGRADE
-      *iv_return = DB_LOCK_UPGRADE;
-      return PERL_constant_ISIV;
-#else
-      return PERL_constant_NOTDEF;
-#endif
-    }
-    break;
-  case 'H':
-    if (memEQ(name, "DB_LOCK_INHERIT", 15)) {
-    /*                         ^           */
-#if (DB_VERSION_MAJOR > 2) || \
-    (DB_VERSION_MAJOR == 2 && DB_VERSION_MINOR > 7) || \
-    (DB_VERSION_MAJOR == 2 && DB_VERSION_MINOR == 7 && \
-     DB_VERSION_PATCH >= 1)
-      *iv_return = DB_LOCK_INHERIT;
-      return PERL_constant_ISIV;
-#else
-      return PERL_constant_NOTDEF;
-#endif
-    }
-    break;
-  case 'I':
-    if (memEQ(name, "DB_ENV_PANIC_OK", 15)) {
-    /*                         ^           */
-#ifdef DB_ENV_PANIC_OK
-      *iv_return = DB_ENV_PANIC_OK;
-      return PERL_constant_ISIV;
-#else
-      return PERL_constant_NOTDEF;
-#endif
-    }
-    break;
-  case 'K':
-    if (memEQ(name, "DB_ENV_LOCKDOWN", 15)) {
-    /*                         ^           */
-#ifdef DB_ENV_LOCKDOWN
-      *iv_return = DB_ENV_LOCKDOWN;
-      return PERL_constant_ISIV;
-#else
-      return PERL_constant_NOTDEF;
-#endif
-    }
-    if (memEQ(name, "DB_ORDERCHKONLY", 15)) {
-    /*                         ^           */
-#ifdef DB_ORDERCHKONLY
-      *iv_return = DB_ORDERCHKONLY;
-      return PERL_constant_ISIV;
-#else
-      return PERL_constant_NOTDEF;
-#endif
-    }
-    if (memEQ(name, "DB_TXN_LOCK_2PL", 15)) {
-    /*                         ^           */
-#ifdef DB_TXN_LOCK_2PL
-      *iv_return = DB_TXN_LOCK_2PL;
-      return PERL_constant_ISIV;
-#else
-      return PERL_constant_NOTDEF;
-#endif
-    }
-    break;
-  case 'L':
-    if (memEQ(name, "DB_ENV_YIELDCPU", 15)) {
-    /*                         ^           */
-#ifdef DB_ENV_YIELDCPU
-      *iv_return = DB_ENV_YIELDCPU;
-      return PERL_constant_ISIV;
-#else
-      return PERL_constant_NOTDEF;
-#endif
-    }
-    break;
-  case 'M':
-    if (memEQ(name, "DB_LOCK_TIMEOUT", 15)) {
-    /*                         ^           */
-#if (DB_VERSION_MAJOR > 4) || \
-    (DB_VERSION_MAJOR == 4 && DB_VERSION_MINOR > 0) || \
-    (DB_VERSION_MAJOR == 4 && DB_VERSION_MINOR == 0 && \
-     DB_VERSION_PATCH >= 14)
-      *iv_return = DB_LOCK_TIMEOUT;
-      return PERL_constant_ISIV;
-#else
-      return PERL_constant_NOTDEF;
-#endif
-    }
-    if (memEQ(name, "DB_REGION_MAGIC", 15)) {
-    /*                         ^           */
-#ifdef DB_REGION_MAGIC
-      *iv_return = DB_REGION_MAGIC;
-      return PERL_constant_ISIV;
-#else
-      return PERL_constant_NOTDEF;
-#endif
-    }
-    break;
-  case 'N':
-    if (memEQ(name, "DB_MPOOL_UNLINK", 15)) {
-    /*                         ^           */
-#ifdef DB_MPOOL_UNLINK
-      *iv_return = DB_MPOOL_UNLINK;
-      return PERL_constant_ISIV;
-#else
-      return PERL_constant_NOTDEF;
-#endif
-    }
-    break;
-  case 'O':
-    if (memEQ(name, "DB_APPLY_LOGREG", 15)) {
-    /*                         ^           */
-#ifdef DB_APPLY_LOGREG
-      *iv_return = DB_APPLY_LOGREG;
-      return PERL_constant_ISIV;
-#else
-      return PERL_constant_NOTDEF;
-#endif
-    }
-    if (memEQ(name, "DB_LOG_WRNOSYNC", 15)) {
-    /*                         ^           */
-#ifdef DB_LOG_WRNOSYNC
-      *iv_return = DB_LOG_WRNOSYNC;
-      return PERL_constant_ISIV;
-#else
-      return PERL_constant_NOTDEF;
-#endif
-    }
-    if (memEQ(name, "DB_MPOOL_NOFILE", 15)) {
-    /*                         ^           */
-#ifdef DB_MPOOL_NOFILE
-      *iv_return = DB_MPOOL_NOFILE;
-      return PERL_constant_ISIV;
-#else
-      return PERL_constant_NOTDEF;
-#endif
-    }
-    break;
-  case 'R':
-    if (memEQ(name, "DB_BTREEVERSION", 15)) {
-    /*                         ^           */
-#ifdef DB_BTREEVERSION
-      *iv_return = DB_BTREEVERSION;
-      return PERL_constant_ISIV;
-#else
-      return PERL_constant_NOTDEF;
-#endif
-    }
-    if (memEQ(name, "DB_MPOOL_CREATE", 15)) {
-    /*                         ^           */
-#ifdef DB_MPOOL_CREATE
-      *iv_return = DB_MPOOL_CREATE;
-      return PERL_constant_ISIV;
-#else
-      return PERL_constant_NOTDEF;
-#endif
-    }
-    if (memEQ(name, "DB_REP_LOGREADY", 15)) {
-    /*                         ^           */
-#ifdef DB_REP_LOGREADY
-      *iv_return = DB_REP_LOGREADY;
-      return PERL_constant_ISIV;
-#else
-      return PERL_constant_NOTDEF;
-#endif
-    }
-    break;
-  case 'S':
-    if (memEQ(name, "DB_BUFFER_SMALL", 15)) {
-    /*                         ^           */
-#ifdef DB_BUFFER_SMALL
-      *iv_return = DB_BUFFER_SMALL;
-      return PERL_constant_ISIV;
-#else
-      return PERL_constant_NOTDEF;
-#endif
-    }
-    if (memEQ(name, "DB_REP_LOGSONLY", 15)) {
-    /*                         ^           */
-#ifdef DB_REP_LOGSONLY
-      *iv_return = DB_REP_LOGSONLY;
-      return PERL_constant_ISIV;
-#else
-      return PERL_constant_NOTDEF;
-#endif
-    }
-    if (memEQ(name, "DB_TEST_POSTLOG", 15)) {
-    /*                         ^           */
-#ifdef DB_TEST_POSTLOG
-      *iv_return = DB_TEST_POSTLOG;
-      return PERL_constant_ISIV;
-#else
-      return PERL_constant_NOTDEF;
-#endif
-    }
-    break;
-  case 'T':
-    if (memEQ(name, "DB_LOCK_NOTHELD", 15)) {
-    /*                         ^           */
-#ifdef DB_LOCK_NOTHELD
-      *iv_return = DB_LOCK_NOTHELD;
-      return PERL_constant_ISIV;
-#else
-      return PERL_constant_NOTDEF;
-#endif
-    }
-    if (memEQ(name, "DB_LOCK_PUT_ALL", 15)) {
-    /*                         ^           */
-#if (DB_VERSION_MAJOR > 2) || \
-    (DB_VERSION_MAJOR == 2 && DB_VERSION_MINOR > 0) || \
-    (DB_VERSION_MAJOR == 2 && DB_VERSION_MINOR == 0 && \
-     DB_VERSION_PATCH >= 3)
-      *iv_return = DB_LOCK_PUT_ALL;
-      return PERL_constant_ISIV;
-#else
-      return PERL_constant_NOTDEF;
-#endif
-    }
-    if (memEQ(name, "DB_LOCK_PUT_OBJ", 15)) {
-    /*                         ^           */
-#if (DB_VERSION_MAJOR > 2) || \
-    (DB_VERSION_MAJOR == 2 && DB_VERSION_MINOR > 0) || \
-    (DB_VERSION_MAJOR == 2 && DB_VERSION_MINOR == 0 && \
-     DB_VERSION_PATCH >= 3)
-      *iv_return = DB_LOCK_PUT_OBJ;
-      return PERL_constant_ISIV;
-#else
-      return PERL_constant_NOTDEF;
-#endif
-    }
-    break;
-  case 'U':
-    if (memEQ(name, "DB_REP_NOBUFFER", 15)) {
-    /*                         ^           */
-#ifdef DB_REP_NOBUFFER
-      *iv_return = DB_REP_NOBUFFER;
-      return PERL_constant_ISIV;
-#else
-      return PERL_constant_NOTDEF;
-#endif
-    }
-    break;
-  case 'X':
-    if (memEQ(name, "DB_MPOOL_EXTENT", 15)) {
-    /*                         ^           */
-#ifdef DB_MPOOL_EXTENT
-      *iv_return = DB_MPOOL_EXTENT;
-      return PERL_constant_ISIV;
-#else
-      return PERL_constant_NOTDEF;
-#endif
-    }
-    break;
-  case 'Y':
-    if (memEQ(name, "DB_PRIORITY_LOW", 15)) {
-    /*                         ^           */
-#if (DB_VERSION_MAJOR > 4) || \
-    (DB_VERSION_MAJOR == 4 && DB_VERSION_MINOR > 1) || \
-    (DB_VERSION_MAJOR == 4 && DB_VERSION_MINOR == 1 && \
-     DB_VERSION_PATCH >= 24)
-      *iv_return = DB_PRIORITY_LOW;
-      return PERL_constant_ISIV;
-#else
-      return PERL_constant_NOTDEF;
-#endif
-    }
-    break;
-  case '_':
-    if (memEQ(name, "DB_CONSUME_WAIT", 15)) {
-    /*                         ^           */
-#ifdef DB_CONSUME_WAIT
-      *iv_return = DB_CONSUME_WAIT;
-      return PERL_constant_ISIV;
-#else
-      return PERL_constant_NOTDEF;
-#endif
-    }
-    if (memEQ(name, "DB_OPFLAGS_MASK", 15)) {
-    /*                         ^           */
-#ifdef DB_OPFLAGS_MASK
-      *iv_return = DB_OPFLAGS_MASK;
-      return PERL_constant_ISIV;
-#else
-      return PERL_constant_NOTDEF;
-#endif
-    }
-    if (memEQ(name, "DB_TXN_LOG_MASK", 15)) {
-    /*                         ^           */
-#ifdef DB_TXN_LOG_MASK
-      *iv_return = DB_TXN_LOG_MASK;
-      return PERL_constant_ISIV;
-#else
-      return PERL_constant_NOTDEF;
-#endif
-    }
-    if (memEQ(name, "DB_TXN_LOG_REDO", 15)) {
-    /*                         ^           */
-#ifdef DB_TXN_LOG_REDO
-      *iv_return = DB_TXN_LOG_REDO;
-      return PERL_constant_ISIV;
-#else
-      return PERL_constant_NOTDEF;
-#endif
-    }
-    if (memEQ(name, "DB_TXN_LOG_UNDO", 15)) {
-    /*                         ^           */
-#ifdef DB_TXN_LOG_UNDO
-      *iv_return = DB_TXN_LOG_UNDO;
-      return PERL_constant_ISIV;
-#else
-      return PERL_constant_NOTDEF;
-#endif
-    }
-    break;
-  }
-  return PERL_constant_NOTFOUND;
-}
-
-static int
-constant_16 (pTHX_ const char *name, IV *iv_return) {
-  /* When generated this function returned values for the list of names given
-     here.  However, subsequent manual editing may have added or removed some.
-     DB_CACHED_COUNTS DB_EID_BROADCAST DB_ENV_CDB_ALLDB DB_ENV_DIRECT_DB
-     DB_ENV_DSYNC_LOG DB_ENV_NOLOCKING DB_ENV_OVERWRITE DB_ENV_RPCCLIENT
-     DB_FCNTL_LOCKING DB_JAVA_CALLBACK DB_LOCK_CONFLICT DB_LOCK_DEADLOCK
-     DB_LOCK_MAXLOCKS DB_LOCK_MAXWRITE DB_LOCK_MINLOCKS DB_LOCK_MINWRITE
-     DB_LOCK_NOTEXIST DB_LOCK_PUT_READ DB_LOCK_YOUNGEST DB_LOGC_BUF_SIZE
-     DB_MPOOL_DISCARD DB_MPOOL_PRIVATE DB_NOSERVER_HOME DB_PAGE_NOTFOUND
-     DB_PRIORITY_HIGH DB_RECOVER_FATAL DB_REP_DUPMASTER DB_REP_NEWMASTER
-     DB_REP_PERMANENT DB_SECONDARY_BAD DB_SEQ_RANGE_SET DB_TEST_POSTOPEN
-     DB_TEST_POSTSYNC DB_TXN_LOCK_MASK DB_TXN_OPENFILES DB_VERB_CHKPOINT
-     DB_VERB_DEADLOCK DB_VERB_RECOVERY DB_VERB_WAITSFOR DB_VERSION_MAJOR
-     DB_VERSION_MINOR DB_VERSION_PATCH DB_VRFY_FLAGMASK */
-  /* Offset 10 gives the best switch position.  */
-  switch (name[10]) {
-  case 'A':
-    if (memEQ(name, "DB_EID_BROADCAST", 16)) {
-    /*                         ^            */
-#ifdef DB_EID_BROADCAST
-      *iv_return = DB_EID_BROADCAST;
-      return PERL_constant_ISIV;
-#else
-      return PERL_constant_NOTDEF;
-#endif
-    }
-    if (memEQ(name, "DB_LOCK_DEADLOCK", 16)) {
-    /*                         ^            */
-#ifdef DB_LOCK_DEADLOCK
-      *iv_return = DB_LOCK_DEADLOCK;
-      return PERL_constant_ISIV;
-#else
-      return PERL_constant_NOTDEF;
-#endif
-    }
-    if (memEQ(name, "DB_VERB_DEADLOCK", 16)) {
-    /*                         ^            */
-#ifdef DB_VERB_DEADLOCK
-      *iv_return = DB_VERB_DEADLOCK;
-      return PERL_constant_ISIV;
-#else
-      return PERL_constant_NOTDEF;
-#endif
-    }
-    if (memEQ(name, "DB_VRFY_FLAGMASK", 16)) {
-    /*                         ^            */
-#ifdef DB_VRFY_FLAGMASK
-      *iv_return = DB_VRFY_FLAGMASK;
-      return PERL_constant_ISIV;
-#else
-      return PERL_constant_NOTDEF;
-#endif
-    }
-    break;
-  case 'C':
-    if (memEQ(name, "DB_CACHED_COUNTS", 16)) {
-    /*                         ^            */
-#ifdef DB_CACHED_COUNTS
-      *iv_return = DB_CACHED_COUNTS;
-      return PERL_constant_ISIV;
-#else
-      return PERL_constant_NOTDEF;
-#endif
-    }
-    if (memEQ(name, "DB_ENV_RPCCLIENT", 16)) {
-    /*                         ^            */
-#ifdef DB_ENV_RPCCLIENT
-      *iv_return = DB_ENV_RPCCLIENT;
-      return PERL_constant_ISIV;
-#else
-      return PERL_constant_NOTDEF;
-#endif
-    }
-    if (memEQ(name, "DB_VERB_RECOVERY", 16)) {
-    /*                         ^            */
-#ifdef DB_VERB_RECOVERY
-      *iv_return = DB_VERB_RECOVERY;
-      return PERL_constant_ISIV;
-#else
-      return PERL_constant_NOTDEF;
-#endif
-    }
-    break;
-  case 'E':
-    if (memEQ(name, "DB_ENV_DIRECT_DB", 16)) {
-    /*                         ^            */
-#ifdef DB_ENV_DIRECT_DB
-      *iv_return = DB_ENV_DIRECT_DB;
-      return PERL_constant_ISIV;
-#else
-      return PERL_constant_NOTDEF;
-#endif
-    }
-    break;
-  case 'F':
-    if (memEQ(name, "DB_LOGC_BUF_SIZE", 16)) {
-    /*                         ^            */
-#ifdef DB_LOGC_BUF_SIZE
-      *iv_return = DB_LOGC_BUF_SIZE;
-      return PERL_constant_ISIV;
-#else
-      return PERL_constant_NOTDEF;
-#endif
-    }
-    break;
-  case 'G':
-    if (memEQ(name, "DB_SEQ_RANGE_SET", 16)) {
-    /*                         ^            */
-#ifdef DB_SEQ_RANGE_SET
-      *iv_return = DB_SEQ_RANGE_SET;
-      return PERL_constant_ISIV;
-#else
-      return PERL_constant_NOTDEF;
-#endif
-    }
-    break;
-  case 'I':
-    if (memEQ(name, "DB_MPOOL_DISCARD", 16)) {
-    /*                         ^            */
-#ifdef DB_MPOOL_DISCARD
-      *iv_return = DB_MPOOL_DISCARD;
-      return PERL_constant_ISIV;
-#else
-      return PERL_constant_NOTDEF;
-#endif
-    }
-    if (memEQ(name, "DB_VERB_WAITSFOR", 16)) {
-    /*                         ^            */
-#ifdef DB_VERB_WAITSFOR
-      *iv_return = DB_VERB_WAITSFOR;
-      return PERL_constant_ISIV;
-#else
-      return PERL_constant_NOTDEF;
-#endif
-    }
-    break;
-  case 'K':
-    if (memEQ(name, "DB_TXN_LOCK_MASK", 16)) {
-    /*                         ^            */
-#ifdef DB_TXN_LOCK_MASK
-      *iv_return = DB_TXN_LOCK_MASK;
-      return PERL_constant_ISIV;
-#else
-      return PERL_constant_NOTDEF;
-#endif
-    }
-    if (memEQ(name, "DB_VERB_CHKPOINT", 16)) {
-    /*                         ^            */
-#ifdef DB_VERB_CHKPOINT
-      *iv_return = DB_VERB_CHKPOINT;
-      return PERL_constant_ISIV;
-#else
-      return PERL_constant_NOTDEF;
-#endif
-    }
-    break;
-  case 'L':
-    if (memEQ(name, "DB_JAVA_CALLBACK", 16)) {
-    /*                         ^            */
-#ifdef DB_JAVA_CALLBACK
-      *iv_return = DB_JAVA_CALLBACK;
-      return PERL_constant_ISIV;
-#else
-      return PERL_constant_NOTDEF;
-#endif
-    }
-    break;
-  case 'M':
-    if (memEQ(name, "DB_REP_DUPMASTER", 16)) {
-    /*                         ^            */
-#ifdef DB_REP_DUPMASTER
-      *iv_return = DB_REP_DUPMASTER;
-      return PERL_constant_ISIV;
-#else
-      return PERL_constant_NOTDEF;
-#endif
-    }
-    if (memEQ(name, "DB_REP_NEWMASTER", 16)) {
-    /*                         ^            */
-#ifdef DB_REP_NEWMASTER
-      *iv_return = DB_REP_NEWMASTER;
-      return PERL_constant_ISIV;
-#else
-      return PERL_constant_NOTDEF;
-#endif
-    }
-    if (memEQ(name, "DB_REP_PERMANENT", 16)) {
-    /*                         ^            */
-#ifdef DB_REP_PERMANENT
-      *iv_return = DB_REP_PERMANENT;
-      return PERL_constant_ISIV;
-#else
-      return PERL_constant_NOTDEF;
-#endif
-    }
-    break;
-  case 'N':
-    if (memEQ(name, "DB_ENV_DSYNC_LOG", 16)) {
-    /*                         ^            */
-#ifdef DB_ENV_DSYNC_LOG
-      *iv_return = DB_ENV_DSYNC_LOG;
-      return PERL_constant_ISIV;
-#else
-      return PERL_constant_NOTDEF;
-#endif
-    }
-    if (memEQ(name, "DB_LOCK_CONFLICT", 16)) {
-    /*                         ^            */
-#ifdef DB_LOCK_CONFLICT
-      *iv_return = DB_LOCK_CONFLICT;
-      return PERL_constant_ISIV;
-#else
-      return PERL_constant_NOTDEF;
-#endif
-    }
-    if (memEQ(name, "DB_LOCK_MINLOCKS", 16)) {
-    /*                         ^            */
-#ifdef DB_LOCK_MINLOCKS
-      *iv_return = DB_LOCK_MINLOCKS;
-      return PERL_constant_ISIV;
-#else
-      return PERL_constant_NOTDEF;
-#endif
-    }
-    if (memEQ(name, "DB_LOCK_MINWRITE", 16)) {
-    /*                         ^            */
-#ifdef DB_LOCK_MINWRITE
-      *iv_return = DB_LOCK_MINWRITE;
-      return PERL_constant_ISIV;
-#else
-      return PERL_constant_NOTDEF;
-#endif
-    }
-    if (memEQ(name, "DB_TXN_OPENFILES", 16)) {
-    /*                         ^            */
-#if (DB_VERSION_MAJOR > 3) || \
-    (DB_VERSION_MAJOR == 3 && DB_VERSION_MINOR > 1) || \
-    (DB_VERSION_MAJOR == 3 && DB_VERSION_MINOR == 1 && \
-     DB_VERSION_PATCH >= 14)
-      *iv_return = DB_TXN_OPENFILES;
-      return PERL_constant_ISIV;
-#else
-      return PERL_constant_NOTDEF;
-#endif
-    }
-    break;
-  case 'O':
-    if (memEQ(name, "DB_ENV_NOLOCKING", 16)) {
-    /*                         ^            */
-#ifdef DB_ENV_NOLOCKING
-      *iv_return = DB_ENV_NOLOCKING;
-      return PERL_constant_ISIV;
-#else
-      return PERL_constant_NOTDEF;
-#endif
-    }
-    if (memEQ(name, "DB_FCNTL_LOCKING", 16)) {
-    /*                         ^            */
-#ifdef DB_FCNTL_LOCKING
-      *iv_return = DB_FCNTL_LOCKING;
-      return PERL_constant_ISIV;
-#else
-      return PERL_constant_NOTDEF;
-#endif
-    }
-    break;
-  case 'R':
-    if (memEQ(name, "DB_ENV_OVERWRITE", 16)) {
-    /*                         ^            */
-#ifdef DB_ENV_OVERWRITE
-      *iv_return = DB_ENV_OVERWRITE;
-      return PERL_constant_ISIV;
-#else
-      return PERL_constant_NOTDEF;
-#endif
-    }
-    if (memEQ(name, "DB_MPOOL_PRIVATE", 16)) {
-    /*                         ^            */
-#ifdef DB_MPOOL_PRIVATE
-      *iv_return = DB_MPOOL_PRIVATE;
-      return PERL_constant_ISIV;
-#else
-      return PERL_constant_NOTDEF;
-#endif
-    }
-    if (memEQ(name, "DB_NOSERVER_HOME", 16)) {
-    /*                         ^            */
-#ifdef DB_NOSERVER_HOME
-      *iv_return = DB_NOSERVER_HOME;
-      return PERL_constant_ISIV;
-#else
-      return PERL_constant_NOTDEF;
-#endif
-    }
-    if (memEQ(name, "DB_SECONDARY_BAD", 16)) {
-    /*                         ^            */
-#ifdef DB_SECONDARY_BAD
-      *iv_return = DB_SECONDARY_BAD;
-      return PERL_constant_ISIV;
-#else
-      return PERL_constant_NOTDEF;
-#endif
-    }
-    break;
-  case 'S':
-    if (memEQ(name, "DB_TEST_POSTOPEN", 16)) {
-    /*                         ^            */
-#ifdef DB_TEST_POSTOPEN
-      *iv_return = DB_TEST_POSTOPEN;
-      return PERL_constant_ISIV;
-#else
-      return PERL_constant_NOTDEF;
-#endif
-    }
-    if (memEQ(name, "DB_TEST_POSTSYNC", 16)) {
-    /*                         ^            */
-#ifdef DB_TEST_POSTSYNC
-      *iv_return = DB_TEST_POSTSYNC;
-      return PERL_constant_ISIV;
-#else
-      return PERL_constant_NOTDEF;
-#endif
-    }
-    break;
-  case 'T':
-    if (memEQ(name, "DB_LOCK_NOTEXIST", 16)) {
-    /*                         ^            */
-#ifdef DB_LOCK_NOTEXIST
-      *iv_return = DB_LOCK_NOTEXIST;
-      return PERL_constant_ISIV;
-#else
-      return PERL_constant_NOTDEF;
-#endif
-    }
-    if (memEQ(name, "DB_LOCK_PUT_READ", 16)) {
-    /*                         ^            */
-#if (DB_VERSION_MAJOR > 4) || \
-    (DB_VERSION_MAJOR == 4 && DB_VERSION_MINOR > 0) || \
-    (DB_VERSION_MAJOR == 4 && DB_VERSION_MINOR == 0 && \
-     DB_VERSION_PATCH >= 14)
-      *iv_return = DB_LOCK_PUT_READ;
-      return PERL_constant_ISIV;
-#else
-      return PERL_constant_NOTDEF;
-#endif
-    }
-    if (memEQ(name, "DB_PAGE_NOTFOUND", 16)) {
-    /*                         ^            */
-#ifdef DB_PAGE_NOTFOUND
-      *iv_return = DB_PAGE_NOTFOUND;
-      return PERL_constant_ISIV;
-#else
-      return PERL_constant_NOTDEF;
-#endif
-    }
-    break;
-  case 'U':
-    if (memEQ(name, "DB_LOCK_YOUNGEST", 16)) {
-    /*                         ^            */
-#ifdef DB_LOCK_YOUNGEST
-      *iv_return = DB_LOCK_YOUNGEST;
-      return PERL_constant_ISIV;
-#else
-      return PERL_constant_NOTDEF;
-#endif
-    }
-    break;
-  case 'X':
-    if (memEQ(name, "DB_LOCK_MAXLOCKS", 16)) {
-    /*                         ^            */
-#ifdef DB_LOCK_MAXLOCKS
-      *iv_return = DB_LOCK_MAXLOCKS;
-      return PERL_constant_ISIV;
-#else
-      return PERL_constant_NOTDEF;
-#endif
-    }
-    if (memEQ(name, "DB_LOCK_MAXWRITE", 16)) {
-    /*                         ^            */
-#ifdef DB_LOCK_MAXWRITE
-      *iv_return = DB_LOCK_MAXWRITE;
-      return PERL_constant_ISIV;
-#else
-      return PERL_constant_NOTDEF;
-#endif
-    }
-    break;
-  case 'Y':
-    if (memEQ(name, "DB_PRIORITY_HIGH", 16)) {
-    /*                         ^            */
-#if (DB_VERSION_MAJOR > 4) || \
-    (DB_VERSION_MAJOR == 4 && DB_VERSION_MINOR > 1) || \
-    (DB_VERSION_MAJOR == 4 && DB_VERSION_MINOR == 1 && \
-     DB_VERSION_PATCH >= 24)
-      *iv_return = DB_PRIORITY_HIGH;
-      return PERL_constant_ISIV;
-#else
-      return PERL_constant_NOTDEF;
-#endif
-    }
-    break;
-  case '_':
-    if (memEQ(name, "DB_ENV_CDB_ALLDB", 16)) {
-    /*                         ^            */
-#ifdef DB_ENV_CDB_ALLDB
-      *iv_return = DB_ENV_CDB_ALLDB;
-      return PERL_constant_ISIV;
-#else
-      return PERL_constant_NOTDEF;
-#endif
-    }
-    if (memEQ(name, "DB_RECOVER_FATAL", 16)) {
-    /*                         ^            */
-#ifdef DB_RECOVER_FATAL
-      *iv_return = DB_RECOVER_FATAL;
-      return PERL_constant_ISIV;
-#else
-      return PERL_constant_NOTDEF;
-#endif
-    }
-    if (memEQ(name, "DB_VERSION_MAJOR", 16)) {
-    /*                         ^            */
-#ifdef DB_VERSION_MAJOR
-      *iv_return = DB_VERSION_MAJOR;
-      return PERL_constant_ISIV;
-#else
-      return PERL_constant_NOTDEF;
-#endif
-    }
-    if (memEQ(name, "DB_VERSION_MINOR", 16)) {
-    /*                         ^            */
-#ifdef DB_VERSION_MINOR
-      *iv_return = DB_VERSION_MINOR;
-      return PERL_constant_ISIV;
-#else
-      return PERL_constant_NOTDEF;
-#endif
-    }
-    if (memEQ(name, "DB_VERSION_PATCH", 16)) {
-    /*                         ^            */
-#ifdef DB_VERSION_PATCH
-      *iv_return = DB_VERSION_PATCH;
-      return PERL_constant_ISIV;
-#else
-      return PERL_constant_NOTDEF;
-#endif
-    }
-    break;
-  }
-  return PERL_constant_NOTFOUND;
-}
-
-static int
-constant_17 (pTHX_ const char *name, IV *iv_return, const char **pv_return) {
-  /* When generated this function returned values for the list of names given
-     here.  However, subsequent manual editing may have added or removed some.
-     DB_ENV_DIRECT_LOG DB_ENV_REP_CLIENT DB_ENV_REP_MASTER DB_ENV_STANDALONE
-     DB_ENV_SYSTEM_MEM DB_ENV_TXN_NOSYNC DB_ENV_USER_ALLOC DB_GET_BOTH_RANGE
-     DB_LOG_AUTOREMOVE DB_LOG_SILENT_ERR DB_NO_AUTO_COMMIT DB_RPC_SERVERPROG
-     DB_RPC_SERVERVERS DB_STAT_LOCK_CONF DB_STAT_MEMP_HASH DB_STAT_SUBSYSTEM
-     DB_TEST_ELECTINIT DB_TEST_ELECTSEND DB_TEST_PRERENAME DB_TXN_POPENFILES
-     DB_VERSION_STRING */
-  /* Offset 13 gives the best switch position.  */
-  switch (name[13]) {
-  case 'A':
-    if (memEQ(name, "DB_GET_BOTH_RANGE", 17)) {
-    /*                            ^          */
-#ifdef DB_GET_BOTH_RANGE
-      *iv_return = DB_GET_BOTH_RANGE;
-      return PERL_constant_ISIV;
-#else
-      return PERL_constant_NOTDEF;
-#endif
-    }
-    break;
-  case 'C':
-    if (memEQ(name, "DB_STAT_LOCK_CONF", 17)) {
-    /*                            ^          */
-#ifdef DB_STAT_LOCK_CONF
-      *iv_return = DB_STAT_LOCK_CONF;
-      return PERL_constant_ISIV;
-#else
-      return PERL_constant_NOTDEF;
-#endif
-    }
-    break;
-  case 'H':
-    if (memEQ(name, "DB_STAT_MEMP_HASH", 17)) {
-    /*                            ^          */
-#ifdef DB_STAT_MEMP_HASH
-      *iv_return = DB_STAT_MEMP_HASH;
-      return PERL_constant_ISIV;
-#else
-      return PERL_constant_NOTDEF;
-#endif
-    }
-    break;
-  case 'I':
-    if (memEQ(name, "DB_ENV_REP_CLIENT", 17)) {
-    /*                            ^          */
-#ifdef DB_ENV_REP_CLIENT
-      *iv_return = DB_ENV_REP_CLIENT;
-      return PERL_constant_ISIV;
-#else
-      return PERL_constant_NOTDEF;
-#endif
-    }
-    if (memEQ(name, "DB_TEST_ELECTINIT", 17)) {
-    /*                            ^          */
-#ifdef DB_TEST_ELECTINIT
-      *iv_return = DB_TEST_ELECTINIT;
-      return PERL_constant_ISIV;
-#else
-      return PERL_constant_NOTDEF;
-#endif
-    }
-    if (memEQ(name, "DB_TXN_POPENFILES", 17)) {
-    /*                            ^          */
-#if (DB_VERSION_MAJOR > 3) || \
-    (DB_VERSION_MAJOR == 3 && DB_VERSION_MINOR > 3) || \
-    (DB_VERSION_MAJOR == 3 && DB_VERSION_MINOR == 3 && \
-     DB_VERSION_PATCH >= 11)
-      *iv_return = DB_TXN_POPENFILES;
-      return PERL_constant_ISIV;
-#else
-      return PERL_constant_NOTDEF;
-#endif
-    }
-    break;
-  case 'L':
-    if (memEQ(name, "DB_ENV_STANDALONE", 17)) {
-    /*                            ^          */
-#ifdef DB_ENV_STANDALONE
-      *iv_return = DB_ENV_STANDALONE;
-      return PERL_constant_ISIV;
-#else
-      return PERL_constant_NOTDEF;
-#endif
-    }
-    if (memEQ(name, "DB_ENV_USER_ALLOC", 17)) {
-    /*                            ^          */
-#ifdef DB_ENV_USER_ALLOC
-      *iv_return = DB_ENV_USER_ALLOC;
-      return PERL_constant_ISIV;
-#else
-      return PERL_constant_NOTDEF;
-#endif
-    }
-    break;
-  case 'M':
-    if (memEQ(name, "DB_LOG_AUTOREMOVE", 17)) {
-    /*                            ^          */
-#ifdef DB_LOG_AUTOREMOVE
-      *iv_return = DB_LOG_AUTOREMOVE;
-      return PERL_constant_ISIV;
-#else
-      return PERL_constant_NOTDEF;
-#endif
-    }
-    if (memEQ(name, "DB_NO_AUTO_COMMIT", 17)) {
-    /*                            ^          */
-#ifdef DB_NO_AUTO_COMMIT
-      *iv_return = DB_NO_AUTO_COMMIT;
-      return PERL_constant_ISIV;
-#else
-      return PERL_constant_NOTDEF;
-#endif
-    }
-    break;
-  case 'N':
-    if (memEQ(name, "DB_TEST_PRERENAME", 17)) {
-    /*                            ^          */
-#ifdef DB_TEST_PRERENAME
-      *iv_return = DB_TEST_PRERENAME;
-      return PERL_constant_ISIV;
-#else
-      return PERL_constant_NOTDEF;
-#endif
-    }
-    break;
-  case 'P':
-    if (memEQ(name, "DB_RPC_SERVERPROG", 17)) {
-    /*                            ^          */
-#ifdef DB_RPC_SERVERPROG
-      *iv_return = DB_RPC_SERVERPROG;
-      return PERL_constant_ISIV;
-#else
-      return PERL_constant_NOTDEF;
-#endif
-    }
-    break;
-  case 'R':
-    if (memEQ(name, "DB_VERSION_STRING", 17)) {
-    /*                            ^          */
-#ifdef DB_VERSION_STRING
-      *pv_return = DB_VERSION_STRING;
-      return PERL_constant_ISPV;
-#else
-      return PERL_constant_NOTDEF;
-#endif
-    }
-    break;
-  case 'S':
-    if (memEQ(name, "DB_ENV_REP_MASTER", 17)) {
-    /*                            ^          */
-#ifdef DB_ENV_REP_MASTER
-      *iv_return = DB_ENV_REP_MASTER;
-      return PERL_constant_ISIV;
-#else
-      return PERL_constant_NOTDEF;
-#endif
-    }
-    if (memEQ(name, "DB_ENV_TXN_NOSYNC", 17)) {
-    /*                            ^          */
-#ifdef DB_ENV_TXN_NOSYNC
-      *iv_return = DB_ENV_TXN_NOSYNC;
-      return PERL_constant_ISIV;
-#else
-      return PERL_constant_NOTDEF;
-#endif
-    }
-    if (memEQ(name, "DB_STAT_SUBSYSTEM", 17)) {
-    /*                            ^          */
-#ifdef DB_STAT_SUBSYSTEM
-      *iv_return = DB_STAT_SUBSYSTEM;
-      return PERL_constant_ISIV;
-#else
-      return PERL_constant_NOTDEF;
-#endif
-    }
-    if (memEQ(name, "DB_TEST_ELECTSEND", 17)) {
-    /*                            ^          */
-#ifdef DB_TEST_ELECTSEND
-      *iv_return = DB_TEST_ELECTSEND;
-      return PERL_constant_ISIV;
-#else
-      return PERL_constant_NOTDEF;
-#endif
-    }
-    break;
-  case 'V':
-    if (memEQ(name, "DB_RPC_SERVERVERS", 17)) {
-    /*                            ^          */
-#ifdef DB_RPC_SERVERVERS
-      *iv_return = DB_RPC_SERVERVERS;
-      return PERL_constant_ISIV;
-#else
-      return PERL_constant_NOTDEF;
-#endif
-    }
-    break;
-  case '_':
-    if (memEQ(name, "DB_ENV_DIRECT_LOG", 17)) {
-    /*                            ^          */
-#ifdef DB_ENV_DIRECT_LOG
-      *iv_return = DB_ENV_DIRECT_LOG;
-      return PERL_constant_ISIV;
-#else
-      return PERL_constant_NOTDEF;
-#endif
-    }
-    if (memEQ(name, "DB_ENV_SYSTEM_MEM", 17)) {
-    /*                            ^          */
-#ifdef DB_ENV_SYSTEM_MEM
-      *iv_return = DB_ENV_SYSTEM_MEM;
-      return PERL_constant_ISIV;
-#else
-      return PERL_constant_NOTDEF;
-#endif
-    }
-    if (memEQ(name, "DB_LOG_SILENT_ERR", 17)) {
-    /*                            ^          */
-#ifdef DB_LOG_SILENT_ERR
-      *iv_return = DB_LOG_SILENT_ERR;
-      return PERL_constant_ISIV;
-#else
-      return PERL_constant_NOTDEF;
-#endif
-    }
-    break;
-  }
-  return PERL_constant_NOTFOUND;
-}
-
-static int
-constant_18 (pTHX_ const char *name, IV *iv_return) {
-  /* When generated this function returned values for the list of names given
-     here.  However, subsequent manual editing may have added or removed some.
-     DB_ALREADY_ABORTED DB_DURABLE_UNKNOWN DB_ENV_AUTO_COMMIT
-     DB_ENV_OPEN_CALLED DB_ENV_REGION_INIT DB_LOCK_NOTGRANTED
-     DB_LOG_BUFFER_FULL DB_LOG_NOT_DURABLE DB_MPOOL_NEW_GROUP
-     DB_PR_RECOVERYTEST DB_REP_HANDLE_DEAD DB_REP_STARTUPDONE
-     DB_SET_TXN_TIMEOUT DB_TEST_ELECTVOTE1 DB_TEST_ELECTVOTE2
-     DB_TEST_ELECTWAIT1 DB_TEST_ELECTWAIT2 DB_TEST_POSTRENAME
-     DB_TEST_PREDESTROY DB_TIME_NOTGRANTED DB_TXN_NOT_DURABLE */
-  /* Offset 13 gives the best switch position.  */
-  switch (name[13]) {
-  case 'A':
-    if (memEQ(name, "DB_ENV_OPEN_CALLED", 18)) {
-    /*                            ^           */
-#ifdef DB_ENV_OPEN_CALLED
-      *iv_return = DB_ENV_OPEN_CALLED;
-      return PERL_constant_ISIV;
-#else
-      return PERL_constant_NOTDEF;
-#endif
-    }
-    if (memEQ(name, "DB_LOCK_NOTGRANTED", 18)) {
-    /*                            ^           */
-#ifdef DB_LOCK_NOTGRANTED
-      *iv_return = DB_LOCK_NOTGRANTED;
-      return PERL_constant_ISIV;
-#else
-      return PERL_constant_NOTDEF;
-#endif
-    }
-    if (memEQ(name, "DB_TIME_NOTGRANTED", 18)) {
-    /*                            ^           */
-#ifdef DB_TIME_NOTGRANTED
-      *iv_return = DB_TIME_NOTGRANTED;
-      return PERL_constant_ISIV;
-#else
-      return PERL_constant_NOTDEF;
-#endif
-    }
-    break;
-  case 'E':
-    if (memEQ(name, "DB_TEST_POSTRENAME", 18)) {
-    /*                            ^           */
-#ifdef DB_TEST_POSTRENAME
-      *iv_return = DB_TEST_POSTRENAME;
-      return PERL_constant_ISIV;
-#else
-      return PERL_constant_NOTDEF;
-#endif
-    }
-    break;
-  case 'G':
-    if (memEQ(name, "DB_MPOOL_NEW_GROUP", 18)) {
-    /*                            ^           */
-#ifdef DB_MPOOL_NEW_GROUP
-      *iv_return = DB_MPOOL_NEW_GROUP;
-      return PERL_constant_ISIV;
-#else
-      return PERL_constant_NOTDEF;
-#endif
-    }
-    break;
-  case 'K':
-    if (memEQ(name, "DB_DURABLE_UNKNOWN", 18)) {
-    /*                            ^           */
-#ifdef DB_DURABLE_UNKNOWN
-      *iv_return = DB_DURABLE_UNKNOWN;
-      return PERL_constant_ISIV;
-#else
-      return PERL_constant_NOTDEF;
-#endif
-    }
-    break;
-  case 'M':
-    if (memEQ(name, "DB_SET_TXN_TIMEOUT", 18)) {
-    /*                            ^           */
-#ifdef DB_SET_TXN_TIMEOUT
-      *iv_return = DB_SET_TXN_TIMEOUT;
-      return PERL_constant_ISIV;
-#else
-      return PERL_constant_NOTDEF;
-#endif
-    }
-    break;
-  case 'O':
-    if (memEQ(name, "DB_ALREADY_ABORTED", 18)) {
-    /*                            ^           */
-#ifdef DB_ALREADY_ABORTED
-      *iv_return = DB_ALREADY_ABORTED;
-      return PERL_constant_ISIV;
-#else
-      return PERL_constant_NOTDEF;
-#endif
-    }
-    if (memEQ(name, "DB_ENV_AUTO_COMMIT", 18)) {
-    /*                            ^           */
-#ifdef DB_ENV_AUTO_COMMIT
-      *iv_return = DB_ENV_AUTO_COMMIT;
-      return PERL_constant_ISIV;
-#else
-      return PERL_constant_NOTDEF;
-#endif
-    }
-    break;
-  case 'P':
-    if (memEQ(name, "DB_REP_STARTUPDONE", 18)) {
-    /*                            ^           */
-#ifdef DB_REP_STARTUPDONE
-      *iv_return = DB_REP_STARTUPDONE;
-      return PERL_constant_ISIV;
-#else
-      return PERL_constant_NOTDEF;
-#endif
-    }
-    break;
-  case 'R':
-    if (memEQ(name, "DB_LOG_NOT_DURABLE", 18)) {
-    /*                            ^           */
-#ifdef DB_LOG_NOT_DURABLE
-      *iv_return = DB_LOG_NOT_DURABLE;
-      return PERL_constant_ISIV;
-#else
-      return PERL_constant_NOTDEF;
-#endif
-    }
-    if (memEQ(name, "DB_TXN_NOT_DURABLE", 18)) {
-    /*                            ^           */
-#ifdef DB_TXN_NOT_DURABLE
-      *iv_return = DB_TXN_NOT_DURABLE;
-      return PERL_constant_ISIV;
-#else
-      return PERL_constant_NOTDEF;
-#endif
-    }
-    break;
-  case 'S':
-    if (memEQ(name, "DB_TEST_PREDESTROY", 18)) {
-    /*                            ^           */
-#ifdef DB_TEST_PREDESTROY
-      *iv_return = DB_TEST_PREDESTROY;
-      return PERL_constant_ISIV;
-#else
-      return PERL_constant_NOTDEF;
-#endif
-    }
-    break;
-  case 'V':
-    if (memEQ(name, "DB_TEST_ELECTVOTE1", 18)) {
-    /*                            ^           */
-#ifdef DB_TEST_ELECTVOTE1
-      *iv_return = DB_TEST_ELECTVOTE1;
-      return PERL_constant_ISIV;
-#else
-      return PERL_constant_NOTDEF;
-#endif
-    }
-    if (memEQ(name, "DB_TEST_ELECTVOTE2", 18)) {
-    /*                            ^           */
-#ifdef DB_TEST_ELECTVOTE2
-      *iv_return = DB_TEST_ELECTVOTE2;
-      return PERL_constant_ISIV;
-#else
-      return PERL_constant_NOTDEF;
-#endif
-    }
-    break;
-  case 'W':
-    if (memEQ(name, "DB_TEST_ELECTWAIT1", 18)) {
-    /*                            ^           */
-#ifdef DB_TEST_ELECTWAIT1
-      *iv_return = DB_TEST_ELECTWAIT1;
-      return PERL_constant_ISIV;
-#else
-      return PERL_constant_NOTDEF;
-#endif
-    }
-    if (memEQ(name, "DB_TEST_ELECTWAIT2", 18)) {
-    /*                            ^           */
-#ifdef DB_TEST_ELECTWAIT2
-      *iv_return = DB_TEST_ELECTWAIT2;
-      return PERL_constant_ISIV;
-#else
-      return PERL_constant_NOTDEF;
-#endif
-    }
-    break;
-  case 'Y':
-    if (memEQ(name, "DB_PR_RECOVERYTEST", 18)) {
-    /*                            ^           */
-#ifdef DB_PR_RECOVERYTEST
-      *iv_return = DB_PR_RECOVERYTEST;
-      return PERL_constant_ISIV;
-#else
-      return PERL_constant_NOTDEF;
-#endif
-    }
-    break;
-  case '_':
-    if (memEQ(name, "DB_ENV_REGION_INIT", 18)) {
-    /*                            ^           */
-#ifdef DB_ENV_REGION_INIT
-      *iv_return = DB_ENV_REGION_INIT;
-      return PERL_constant_ISIV;
-#else
-      return PERL_constant_NOTDEF;
-#endif
-    }
-    if (memEQ(name, "DB_LOG_BUFFER_FULL", 18)) {
-    /*                            ^           */
-#ifdef DB_LOG_BUFFER_FULL
-      *iv_return = DB_LOG_BUFFER_FULL;
-      return PERL_constant_ISIV;
-#else
-      return PERL_constant_NOTDEF;
-#endif
-    }
-    if (memEQ(name, "DB_REP_HANDLE_DEAD", 18)) {
-    /*                            ^           */
-#ifdef DB_REP_HANDLE_DEAD
-      *iv_return = DB_REP_HANDLE_DEAD;
-      return PERL_constant_ISIV;
-#else
-      return PERL_constant_NOTDEF;
-#endif
-    }
-    break;
-  }
-  return PERL_constant_NOTFOUND;
-}
-
-static int
-constant_19 (pTHX_ const char *name, IV *iv_return) {
-  /* When generated this function returned values for the list of names given
-     here.  However, subsequent manual editing may have added or removed some.
-     DB_ENV_LOG_INMEMORY DB_ENV_REP_LOGSONLY DB_LOCK_FREE_LOCKER
-     DB_LOCK_GET_TIMEOUT DB_LOCK_SET_TIMEOUT DB_PRIORITY_DEFAULT
-     DB_REP_HOLDELECTION DB_SEQUENCE_VERSION DB_SET_LOCK_TIMEOUT
-     DB_STAT_LOCK_PARAMS DB_TEST_POSTDESTROY DB_TEST_POSTLOGMETA
-     DB_TEST_SUBDB_LOCKS DB_TXN_FORWARD_ROLL DB_TXN_LOG_UNDOREDO
-     DB_TXN_WRITE_NOSYNC DB_UPDATE_SECONDARY DB_USE_ENVIRON_ROOT
-     DB_VERB_REPLICATION DB_VERSION_MISMATCH */
-  /* Offset 9 gives the best switch position.  */
-  switch (name[9]) {
-  case 'C':
-    if (memEQ(name, "DB_SEQUENCE_VERSION", 19)) {
-    /*                        ^                */
-#ifdef DB_SEQUENCE_VERSION
-      *iv_return = DB_SEQUENCE_VERSION;
-      return PERL_constant_ISIV;
-#else
-      return PERL_constant_NOTDEF;
-#endif
-    }
-    if (memEQ(name, "DB_SET_LOCK_TIMEOUT", 19)) {
-    /*                        ^                */
-#ifdef DB_SET_LOCK_TIMEOUT
-      *iv_return = DB_SET_LOCK_TIMEOUT;
-      return PERL_constant_ISIV;
-#else
-      return PERL_constant_NOTDEF;
-#endif
-    }
-    break;
-  case 'E':
-    if (memEQ(name, "DB_LOCK_GET_TIMEOUT", 19)) {
-    /*                        ^                */
-#if (DB_VERSION_MAJOR > 4) || \
-    (DB_VERSION_MAJOR == 4 && DB_VERSION_MINOR > 0) || \
-    (DB_VERSION_MAJOR == 4 && DB_VERSION_MINOR == 0 && \
-     DB_VERSION_PATCH >= 14)
-      *iv_return = DB_LOCK_GET_TIMEOUT;
-      return PERL_constant_ISIV;
-#else
-      return PERL_constant_NOTDEF;
-#endif
-    }
-    if (memEQ(name, "DB_LOCK_SET_TIMEOUT", 19)) {
-    /*                        ^                */
-#ifdef DB_LOCK_SET_TIMEOUT
-      *iv_return = DB_LOCK_SET_TIMEOUT;
-      return PERL_constant_ISIV;
-#else
-      return PERL_constant_NOTDEF;
-#endif
-    }
-    if (memEQ(name, "DB_VERB_REPLICATION", 19)) {
-    /*                        ^                */
-#ifdef DB_VERB_REPLICATION
-      *iv_return = DB_VERB_REPLICATION;
-      return PERL_constant_ISIV;
-#else
-      return PERL_constant_NOTDEF;
-#endif
-    }
-    break;
-  case 'G':
-    if (memEQ(name, "DB_ENV_LOG_INMEMORY", 19)) {
-    /*                        ^                */
-#ifdef DB_ENV_LOG_INMEMORY
-      *iv_return = DB_ENV_LOG_INMEMORY;
-      return PERL_constant_ISIV;
-#else
-      return PERL_constant_NOTDEF;
-#endif
-    }
-    if (memEQ(name, "DB_TXN_LOG_UNDOREDO", 19)) {
-    /*                        ^                */
-#ifdef DB_TXN_LOG_UNDOREDO
-      *iv_return = DB_TXN_LOG_UNDOREDO;
-      return PERL_constant_ISIV;
-#else
-      return PERL_constant_NOTDEF;
-#endif
-    }
-    break;
-  case 'I':
-    if (memEQ(name, "DB_TXN_WRITE_NOSYNC", 19)) {
-    /*                        ^                */
-#ifdef DB_TXN_WRITE_NOSYNC
-      *iv_return = DB_TXN_WRITE_NOSYNC;
-      return PERL_constant_ISIV;
-#else
-      return PERL_constant_NOTDEF;
-#endif
-    }
-    break;
-  case 'L':
-    if (memEQ(name, "DB_REP_HOLDELECTION", 19)) {
-    /*                        ^                */
-#ifdef DB_REP_HOLDELECTION
-      *iv_return = DB_REP_HOLDELECTION;
-      return PERL_constant_ISIV;
-#else
-      return PERL_constant_NOTDEF;
-#endif
-    }
-    break;
-  case 'N':
-    if (memEQ(name, "DB_VERSION_MISMATCH", 19)) {
-    /*                        ^                */
-#ifdef DB_VERSION_MISMATCH
-      *iv_return = DB_VERSION_MISMATCH;
-      return PERL_constant_ISIV;
-#else
-      return PERL_constant_NOTDEF;
-#endif
-    }
-    break;
-  case 'O':
-    if (memEQ(name, "DB_STAT_LOCK_PARAMS", 19)) {
-    /*                        ^                */
-#ifdef DB_STAT_LOCK_PARAMS
-      *iv_return = DB_STAT_LOCK_PARAMS;
-      return PERL_constant_ISIV;
-#else
-      return PERL_constant_NOTDEF;
-#endif
-    }
-    if (memEQ(name, "DB_TEST_POSTDESTROY", 19)) {
-    /*                        ^                */
-#ifdef DB_TEST_POSTDESTROY
-      *iv_return = DB_TEST_POSTDESTROY;
-      return PERL_constant_ISIV;
-#else
-      return PERL_constant_NOTDEF;
-#endif
-    }
-    if (memEQ(name, "DB_TEST_POSTLOGMETA", 19)) {
-    /*                        ^                */
-#ifdef DB_TEST_POSTLOGMETA
-      *iv_return = DB_TEST_POSTLOGMETA;
-      return PERL_constant_ISIV;
-#else
-      return PERL_constant_NOTDEF;
-#endif
-    }
-    break;
-  case 'P':
-    if (memEQ(name, "DB_ENV_REP_LOGSONLY", 19)) {
-    /*                        ^                */
-#ifdef DB_ENV_REP_LOGSONLY
-      *iv_return = DB_ENV_REP_LOGSONLY;
-      return PERL_constant_ISIV;
-#else
-      return PERL_constant_NOTDEF;
-#endif
-    }
-    break;
-  case 'R':
-    if (memEQ(name, "DB_LOCK_FREE_LOCKER", 19)) {
-    /*                        ^                */
-#ifdef DB_LOCK_FREE_LOCKER
-      *iv_return = DB_LOCK_FREE_LOCKER;
-      return PERL_constant_ISIV;
-#else
-      return PERL_constant_NOTDEF;
-#endif
-    }
-    if (memEQ(name, "DB_TXN_FORWARD_ROLL", 19)) {
-    /*                        ^                */
-#if (DB_VERSION_MAJOR > 3) || \
-    (DB_VERSION_MAJOR == 3 && DB_VERSION_MINOR > 1) || \
-    (DB_VERSION_MAJOR == 3 && DB_VERSION_MINOR == 1 && \
-     DB_VERSION_PATCH >= 14)
-      *iv_return = DB_TXN_FORWARD_ROLL;
-      return PERL_constant_ISIV;
-#else
-      return PERL_constant_NOTDEF;
-#endif
-    }
-    break;
-  case 'T':
-    if (memEQ(name, "DB_PRIORITY_DEFAULT", 19)) {
-    /*                        ^                */
-#if (DB_VERSION_MAJOR > 4) || \
-    (DB_VERSION_MAJOR == 4 && DB_VERSION_MINOR > 1) || \
-    (DB_VERSION_MAJOR == 4 && DB_VERSION_MINOR == 1 && \
-     DB_VERSION_PATCH >= 24)
-      *iv_return = DB_PRIORITY_DEFAULT;
-      return PERL_constant_ISIV;
-#else
-      return PERL_constant_NOTDEF;
-#endif
-    }
-    break;
-  case 'U':
-    if (memEQ(name, "DB_TEST_SUBDB_LOCKS", 19)) {
-    /*                        ^                */
-#ifdef DB_TEST_SUBDB_LOCKS
-      *iv_return = DB_TEST_SUBDB_LOCKS;
-      return PERL_constant_ISIV;
-#else
-      return PERL_constant_NOTDEF;
-#endif
-    }
-    break;
-  case 'V':
-    if (memEQ(name, "DB_USE_ENVIRON_ROOT", 19)) {
-    /*                        ^                */
-#ifdef DB_USE_ENVIRON_ROOT
-      *iv_return = DB_USE_ENVIRON_ROOT;
-      return PERL_constant_ISIV;
-#else
-      return PERL_constant_NOTDEF;
-#endif
-    }
-    break;
-  case '_':
-    if (memEQ(name, "DB_UPDATE_SECONDARY", 19)) {
-    /*                        ^                */
-#ifdef DB_UPDATE_SECONDARY
-      *iv_return = DB_UPDATE_SECONDARY;
-      return PERL_constant_ISIV;
-#else
-      return PERL_constant_NOTDEF;
-#endif
-    }
-    break;
-  }
-  return PERL_constant_NOTFOUND;
-}
-
-static int
-constant_20 (pTHX_ const char *name, IV *iv_return) {
-  /* When generated this function returned values for the list of names given
-     here.  However, subsequent manual editing may have added or removed some.
-     DB_CXX_NO_EXCEPTIONS DB_LOGFILEID_INVALID DB_PANIC_ENVIRONMENT
-     DB_PRIORITY_VERY_LOW DB_STAT_LOCK_LOCKERS DB_STAT_LOCK_OBJECTS
-     DB_TXN_BACKWARD_ROLL DB_TXN_LOCK_OPTIMIST */
-  /* Offset 15 gives the best switch position.  */
-  switch (name[15]) {
-  case 'C':
-    if (memEQ(name, "DB_STAT_LOCK_LOCKERS", 20)) {
-    /*                              ^           */
-#ifdef DB_STAT_LOCK_LOCKERS
-      *iv_return = DB_STAT_LOCK_LOCKERS;
-      return PERL_constant_ISIV;
-#else
-      return PERL_constant_NOTDEF;
-#endif
-    }
-    break;
-  case 'I':
-    if (memEQ(name, "DB_TXN_LOCK_OPTIMIST", 20)) {
-    /*                              ^           */
-#ifdef DB_TXN_LOCK_OPTIMIST
-      *iv_return = DB_TXN_LOCK_OPTIMIST;
-      return PERL_constant_ISIV;
-#else
-      return PERL_constant_NOTDEF;
-#endif
-    }
-    break;
-  case 'J':
-    if (memEQ(name, "DB_STAT_LOCK_OBJECTS", 20)) {
-    /*                              ^           */
-#ifdef DB_STAT_LOCK_OBJECTS
-      *iv_return = DB_STAT_LOCK_OBJECTS;
-      return PERL_constant_ISIV;
-#else
-      return PERL_constant_NOTDEF;
-#endif
-    }
-    break;
-  case 'N':
-    if (memEQ(name, "DB_PANIC_ENVIRONMENT", 20)) {
-    /*                              ^           */
-#ifdef DB_PANIC_ENVIRONMENT
-      *iv_return = DB_PANIC_ENVIRONMENT;
-      return PERL_constant_ISIV;
-#else
-      return PERL_constant_NOTDEF;
-#endif
-    }
-    break;
-  case 'T':
-    if (memEQ(name, "DB_CXX_NO_EXCEPTIONS", 20)) {
-    /*                              ^           */
-#ifdef DB_CXX_NO_EXCEPTIONS
-      *iv_return = DB_CXX_NO_EXCEPTIONS;
-      return PERL_constant_ISIV;
-#else
-      return PERL_constant_NOTDEF;
-#endif
-    }
-    break;
-  case 'V':
-    if (memEQ(name, "DB_LOGFILEID_INVALID", 20)) {
-    /*                              ^           */
-#ifdef DB_LOGFILEID_INVALID
-      *iv_return = DB_LOGFILEID_INVALID;
-      return PERL_constant_ISIV;
-#else
-      return PERL_constant_NOTDEF;
-#endif
-    }
-    break;
-  case 'Y':
-    if (memEQ(name, "DB_PRIORITY_VERY_LOW", 20)) {
-    /*                              ^           */
-#if (DB_VERSION_MAJOR > 4) || \
-    (DB_VERSION_MAJOR == 4 && DB_VERSION_MINOR > 1) || \
-    (DB_VERSION_MAJOR == 4 && DB_VERSION_MINOR == 1 && \
-     DB_VERSION_PATCH >= 24)
-      *iv_return = DB_PRIORITY_VERY_LOW;
-      return PERL_constant_ISIV;
-#else
-      return PERL_constant_NOTDEF;
-#endif
-    }
-    break;
-  case '_':
-    if (memEQ(name, "DB_TXN_BACKWARD_ROLL", 20)) {
-    /*                              ^           */
-#if (DB_VERSION_MAJOR > 3) || \
-    (DB_VERSION_MAJOR == 3 && DB_VERSION_MINOR > 1) || \
-    (DB_VERSION_MAJOR == 3 && DB_VERSION_MINOR == 1 && \
-     DB_VERSION_PATCH >= 14)
-      *iv_return = DB_TXN_BACKWARD_ROLL;
-      return PERL_constant_ISIV;
-#else
-      return PERL_constant_NOTDEF;
-#endif
-    }
-    break;
-  }
-  return PERL_constant_NOTFOUND;
-}
-
-static int
-constant_21 (pTHX_ const char *name, IV *iv_return) {
-  /* When generated this function returned values for the list of names given
-     here.  However, subsequent manual editing may have added or removed some.
-     DB_ENV_LOG_AUTOREMOVE DB_LOCK_UPGRADE_WRITE DB_PRIORITY_VERY_HIGH
-     DB_TXN_BACKWARD_ALLOC */
-  /* Offset 4 gives the best switch position.  */
-  switch (name[4]) {
-  case 'N':
-    if (memEQ(name, "DB_ENV_LOG_AUTOREMOVE", 21)) {
-    /*                   ^                       */
-#ifdef DB_ENV_LOG_AUTOREMOVE
-      *iv_return = DB_ENV_LOG_AUTOREMOVE;
-      return PERL_constant_ISIV;
-#else
-      return PERL_constant_NOTDEF;
-#endif
-    }
-    break;
-  case 'O':
-    if (memEQ(name, "DB_LOCK_UPGRADE_WRITE", 21)) {
-    /*                   ^                       */
-#if (DB_VERSION_MAJOR > 3) || \
-    (DB_VERSION_MAJOR == 3 && DB_VERSION_MINOR > 3) || \
-    (DB_VERSION_MAJOR == 3 && DB_VERSION_MINOR == 3 && \
-     DB_VERSION_PATCH >= 11)
-      *iv_return = DB_LOCK_UPGRADE_WRITE;
-      return PERL_constant_ISIV;
-#else
-      return PERL_constant_NOTDEF;
-#endif
-    }
-    break;
-  case 'R':
-    if (memEQ(name, "DB_PRIORITY_VERY_HIGH", 21)) {
-    /*                   ^                       */
-#if (DB_VERSION_MAJOR > 4) || \
-    (DB_VERSION_MAJOR == 4 && DB_VERSION_MINOR > 1) || \
-    (DB_VERSION_MAJOR == 4 && DB_VERSION_MINOR == 1 && \
-     DB_VERSION_PATCH >= 24)
-      *iv_return = DB_PRIORITY_VERY_HIGH;
-      return PERL_constant_ISIV;
-#else
-      return PERL_constant_NOTDEF;
-#endif
-    }
-    break;
-  case 'X':
-    if (memEQ(name, "DB_TXN_BACKWARD_ALLOC", 21)) {
-    /*                   ^                       */
-#if (DB_VERSION_MAJOR > 4) || \
-    (DB_VERSION_MAJOR == 4 && DB_VERSION_MINOR > 1) || \
-    (DB_VERSION_MAJOR == 4 && DB_VERSION_MINOR == 1 && \
-     DB_VERSION_PATCH >= 24)
-      *iv_return = DB_TXN_BACKWARD_ALLOC;
-      return PERL_constant_ISIV;
-#else
-      return PERL_constant_NOTDEF;
-#endif
-    }
-    break;
-  }
-  return PERL_constant_NOTFOUND;
-}
-
-static int
-constant_22 (pTHX_ const char *name, IV *iv_return) {
-  /* When generated this function returned values for the list of names given
-     here.  However, subsequent manual editing may have added or removed some.
-     DB_ENV_RPCCLIENT_GIVEN DB_ENV_TIME_NOTGRANTED DB_ENV_TXN_NOT_DURABLE
-     DB_TXN_LOCK_OPTIMISTIC */
-  /* Offset 21 gives the best switch position.  */
-  switch (name[21]) {
-  case 'C':
-    if (memEQ(name, "DB_TXN_LOCK_OPTIMISTI", 21)) {
-    /*                                    C      */
-#ifdef DB_TXN_LOCK_OPTIMISTIC
-      *iv_return = DB_TXN_LOCK_OPTIMISTIC;
-      return PERL_constant_ISIV;
-#else
-      return PERL_constant_NOTDEF;
-#endif
-    }
-    break;
-  case 'D':
-    if (memEQ(name, "DB_ENV_TIME_NOTGRANTE", 21)) {
-    /*                                    D      */
-#ifdef DB_ENV_TIME_NOTGRANTED
-      *iv_return = DB_ENV_TIME_NOTGRANTED;
-      return PERL_constant_ISIV;
-#else
-      return PERL_constant_NOTDEF;
-#endif
-    }
-    break;
-  case 'E':
-    if (memEQ(name, "DB_ENV_TXN_NOT_DURABL", 21)) {
-    /*                                    E      */
-#ifdef DB_ENV_TXN_NOT_DURABLE
-      *iv_return = DB_ENV_TXN_NOT_DURABLE;
-      return PERL_constant_ISIV;
-#else
-      return PERL_constant_NOTDEF;
-#endif
-    }
-    break;
-  case 'N':
-    if (memEQ(name, "DB_ENV_RPCCLIENT_GIVE", 21)) {
-    /*                                    N      */
-#ifdef DB_ENV_RPCCLIENT_GIVEN
-      *iv_return = DB_ENV_RPCCLIENT_GIVEN;
-      return PERL_constant_ISIV;
-#else
-      return PERL_constant_NOTDEF;
-#endif
-    }
-    break;
-  }
-  return PERL_constant_NOTFOUND;
-}
-
-static int
-constant (pTHX_ const char *name, STRLEN len, IV *iv_return, const char **pv_return) {
-  /* Initially switch on the length of the name.  */
-  /* When generated this function returned values for the list of names given
-     in this section of perl code.  Rather than manually editing these functions
-     to add or remove constants, which would result in this comment and section
-     of code becoming inaccurate, we recommend that you edit this section of
-     code, and use it to regenerate a new set of constant functions which you
-     then use to replace the originals.
-
-     Regenerate these constant functions by feeding this entire source file to
-     perl -x
-
-#!/usr/bin/perl5.8.3 -w
-use ExtUtils::Constant qw (constant_types C_constant XS_constant);
-
-my $types = {map {($_, 1)} qw(IV PV)};
-my @names = (qw(DB_AFTER DB_AGGRESSIVE DB_ALREADY_ABORTED DB_APPEND
-	       DB_APPLY_LOGREG DB_APP_INIT DB_ARCH_ABS DB_ARCH_DATA DB_ARCH_LOG
-	       DB_ARCH_REMOVE DB_AUTO_COMMIT DB_BEFORE DB_BTREEMAGIC
-	       DB_BTREEOLDVER DB_BTREEVERSION DB_BUFFER_SMALL DB_CACHED_COUNTS
-	       DB_CDB_ALLDB DB_CHECKPOINT DB_CHKSUM DB_CHKSUM_SHA1 DB_CLIENT
-	       DB_CL_WRITER DB_COMMIT DB_CONSUME DB_CONSUME_WAIT DB_CREATE
-	       DB_CURLSN DB_CURRENT DB_CXX_NO_EXCEPTIONS DB_DEGREE_2 DB_DELETED
-	       DB_DELIMITER DB_DIRECT DB_DIRECT_DB DB_DIRECT_LOG DB_DIRTY_READ
-	       DB_DONOTINDEX DB_DSYNC_LOG DB_DUP DB_DUPCURSOR DB_DUPSORT
-	       DB_DURABLE_UNKNOWN DB_EID_BROADCAST DB_EID_INVALID DB_ENCRYPT
-	       DB_ENCRYPT_AES DB_ENV_APPINIT DB_ENV_AUTO_COMMIT DB_ENV_CDB
-	       DB_ENV_CDB_ALLDB DB_ENV_CREATE DB_ENV_DBLOCAL DB_ENV_DIRECT_DB
-	       DB_ENV_DIRECT_LOG DB_ENV_DSYNC_LOG DB_ENV_FATAL DB_ENV_LOCKDOWN
-	       DB_ENV_LOCKING DB_ENV_LOGGING DB_ENV_LOG_AUTOREMOVE
-	       DB_ENV_LOG_INMEMORY DB_ENV_NOLOCKING DB_ENV_NOMMAP
-	       DB_ENV_NOPANIC DB_ENV_OPEN_CALLED DB_ENV_OVERWRITE
-	       DB_ENV_PANIC_OK DB_ENV_PRIVATE DB_ENV_REGION_INIT
-	       DB_ENV_REP_CLIENT DB_ENV_REP_LOGSONLY DB_ENV_REP_MASTER
-	       DB_ENV_RPCCLIENT DB_ENV_RPCCLIENT_GIVEN DB_ENV_STANDALONE
-	       DB_ENV_SYSTEM_MEM DB_ENV_THREAD DB_ENV_TIME_NOTGRANTED
-	       DB_ENV_TXN DB_ENV_TXN_NOSYNC DB_ENV_TXN_NOT_DURABLE
-	       DB_ENV_TXN_WRITE_NOSYNC DB_ENV_USER_ALLOC DB_ENV_YIELDCPU
-	       DB_EXCL DB_EXTENT DB_FAST_STAT DB_FCNTL_LOCKING DB_FILEOPEN
-	       DB_FILE_ID_LEN DB_FIRST DB_FIXEDLEN DB_FLUSH DB_FORCE DB_GETREC
-	       DB_GET_BOTH DB_GET_BOTHC DB_GET_BOTH_RANGE DB_GET_RECNO
-	       DB_HANDLE_LOCK DB_HASHMAGIC DB_HASHOLDVER DB_HASHVERSION
-	       DB_INCOMPLETE DB_INIT_CDB DB_INIT_LOCK DB_INIT_LOG DB_INIT_MPOOL
-	       DB_INIT_REP DB_INIT_TXN DB_INORDER DB_JAVA_CALLBACK DB_JOINENV
-	       DB_JOIN_ITEM DB_JOIN_NOSORT DB_KEYEMPTY DB_KEYEXIST DB_KEYFIRST
-	       DB_KEYLAST DB_LAST DB_LOCKDOWN DB_LOCKMAGIC DB_LOCKVERSION
-	       DB_LOCK_ABORT DB_LOCK_CONFLICT DB_LOCK_DEADLOCK DB_LOCK_DEFAULT
-	       DB_LOCK_EXPIRE DB_LOCK_FREE_LOCKER DB_LOCK_MAXLOCKS
-	       DB_LOCK_MAXWRITE DB_LOCK_MINLOCKS DB_LOCK_MINWRITE DB_LOCK_NORUN
-	       DB_LOCK_NOTEXIST DB_LOCK_NOTGRANTED DB_LOCK_NOTHELD
-	       DB_LOCK_NOWAIT DB_LOCK_OLDEST DB_LOCK_RANDOM DB_LOCK_RECORD
-	       DB_LOCK_REMOVE DB_LOCK_RIW_N DB_LOCK_RW_N DB_LOCK_SET_TIMEOUT
-	       DB_LOCK_SWITCH DB_LOCK_UPGRADE DB_LOCK_YOUNGEST DB_LOGC_BUF_SIZE
-	       DB_LOGFILEID_INVALID DB_LOGMAGIC DB_LOGOLDVER DB_LOGVERSION
-	       DB_LOG_AUTOREMOVE DB_LOG_BUFFER_FULL DB_LOG_CHKPNT DB_LOG_COMMIT
-	       DB_LOG_DISK DB_LOG_INMEMORY DB_LOG_LOCKED DB_LOG_NOCOPY
-	       DB_LOG_NOT_DURABLE DB_LOG_PERM DB_LOG_RESEND DB_LOG_SILENT_ERR
-	       DB_LOG_WRNOSYNC DB_MAX_PAGES DB_MAX_RECORDS DB_MPOOL_CLEAN
-	       DB_MPOOL_CREATE DB_MPOOL_DIRTY DB_MPOOL_DISCARD DB_MPOOL_EXTENT
-	       DB_MPOOL_FREE DB_MPOOL_LAST DB_MPOOL_NEW DB_MPOOL_NEW_GROUP
-	       DB_MPOOL_NOFILE DB_MPOOL_PRIVATE DB_MPOOL_UNLINK DB_MULTIPLE
-	       DB_MULTIPLE_KEY DB_MUTEXDEBUG DB_MUTEXLOCKS DB_NEEDSPLIT DB_NEXT
-	       DB_NEXT_DUP DB_NEXT_NODUP DB_NOCOPY DB_NODUPDATA DB_NOLOCKING
-	       DB_NOMMAP DB_NOORDERCHK DB_NOOVERWRITE DB_NOPANIC DB_NORECURSE
-	       DB_NOSERVER DB_NOSERVER_HOME DB_NOSERVER_ID DB_NOSYNC
-	       DB_NOTFOUND DB_NO_AUTO_COMMIT DB_ODDFILESIZE DB_OK_BTREE
-	       DB_OK_HASH DB_OK_QUEUE DB_OK_RECNO DB_OLD_VERSION DB_OPEN_CALLED
-	       DB_OPFLAGS_MASK DB_ORDERCHKONLY DB_OVERWRITE DB_PAD DB_PAGEYIELD
-	       DB_PAGE_LOCK DB_PAGE_NOTFOUND DB_PANIC_ENVIRONMENT DB_PERMANENT
-	       DB_POSITION DB_POSITIONI DB_PREV DB_PREV_NODUP DB_PRINTABLE
-	       DB_PRIVATE DB_PR_HEADERS DB_PR_PAGE DB_PR_RECOVERYTEST
-	       DB_QAMMAGIC DB_QAMOLDVER DB_QAMVERSION DB_RDONLY DB_RDWRMASTER
-	       DB_RECNUM DB_RECORDCOUNT DB_RECORD_LOCK DB_RECOVER
-	       DB_RECOVER_FATAL DB_REGION_ANON DB_REGION_INIT DB_REGION_MAGIC
-	       DB_REGION_NAME DB_REGISTERED DB_RENAMEMAGIC DB_RENUMBER
-	       DB_REP_CLIENT DB_REP_CREATE DB_REP_DUPMASTER DB_REP_EGENCHG
-	       DB_REP_HANDLE_DEAD DB_REP_HOLDELECTION DB_REP_ISPERM
-	       DB_REP_LOGREADY DB_REP_LOGSONLY DB_REP_MASTER DB_REP_NEWMASTER
-	       DB_REP_NEWSITE DB_REP_NOBUFFER DB_REP_NOTPERM DB_REP_OUTDATED
-	       DB_REP_PAGEDONE DB_REP_PERMANENT DB_REP_STARTUPDONE
-	       DB_REP_UNAVAIL DB_REVSPLITOFF DB_RMW DB_RPCCLIENT
-	       DB_RPC_SERVERPROG DB_RPC_SERVERVERS DB_RUNRECOVERY DB_SALVAGE
-	       DB_SECONDARY_BAD DB_SEQUENCE_VERSION DB_SEQUENTIAL DB_SEQ_DEC
-	       DB_SEQ_INC DB_SEQ_RANGE_SET DB_SEQ_WRAP DB_SET
-	       DB_SET_LOCK_TIMEOUT DB_SET_RANGE DB_SET_RECNO DB_SET_TXN_NOW
-	       DB_SET_TXN_TIMEOUT DB_SNAPSHOT DB_STAT_ALL DB_STAT_CLEAR
-	       DB_STAT_LOCK_CONF DB_STAT_LOCK_LOCKERS DB_STAT_LOCK_OBJECTS
-	       DB_STAT_LOCK_PARAMS DB_STAT_MEMP_HASH DB_STAT_SUBSYSTEM
-	       DB_SURPRISE_KID DB_SWAPBYTES DB_SYSTEM_MEM DB_TEMPORARY
-	       DB_TEST_ELECTINIT DB_TEST_ELECTSEND DB_TEST_ELECTVOTE1
-	       DB_TEST_ELECTVOTE2 DB_TEST_ELECTWAIT1 DB_TEST_ELECTWAIT2
-	       DB_TEST_POSTDESTROY DB_TEST_POSTLOG DB_TEST_POSTLOGMETA
-	       DB_TEST_POSTOPEN DB_TEST_POSTRENAME DB_TEST_POSTSYNC
-	       DB_TEST_PREDESTROY DB_TEST_PREOPEN DB_TEST_PRERENAME
-	       DB_TEST_SUBDB_LOCKS DB_THREAD DB_TIMEOUT DB_TIME_NOTGRANTED
-	       DB_TRUNCATE DB_TXNMAGIC DB_TXNVERSION DB_TXN_CKP DB_TXN_LOCK
-	       DB_TXN_LOCK_2PL DB_TXN_LOCK_MASK DB_TXN_LOCK_OPTIMIST
-	       DB_TXN_LOCK_OPTIMISTIC DB_TXN_LOG_MASK DB_TXN_LOG_REDO
-	       DB_TXN_LOG_UNDO DB_TXN_LOG_UNDOREDO DB_TXN_NOSYNC
-	       DB_TXN_NOT_DURABLE DB_TXN_NOWAIT DB_TXN_REDO DB_TXN_SYNC
-	       DB_TXN_UNDO DB_TXN_WRITE_NOSYNC DB_UNREF DB_UPDATE_SECONDARY
-	       DB_UPGRADE DB_USE_ENVIRON DB_USE_ENVIRON_ROOT DB_VERB_CHKPOINT
-	       DB_VERB_DEADLOCK DB_VERB_RECOVERY DB_VERB_REPLICATION
-	       DB_VERB_WAITSFOR DB_VERIFY DB_VERIFY_BAD DB_VERIFY_FATAL
-	       DB_VERSION_MAJOR DB_VERSION_MINOR DB_VERSION_MISMATCH
-	       DB_VERSION_PATCH DB_VRFY_FLAGMASK DB_WRITECURSOR DB_WRITELOCK
-	       DB_WRITEOPEN DB_WRNOSYNC DB_XA_CREATE DB_XIDDATASIZE DB_YIELDCPU
-	       DB_debug_FLAG DB_user_BEGIN),
-            {name=>"DB_BTREE", type=>"IV", macro=>["#if (DB_VERSION_MAJOR > 2) || \\\n    (DB_VERSION_MAJOR == 2 && DB_VERSION_MINOR > 0) || \\\n    (DB_VERSION_MAJOR == 2 && DB_VERSION_MINOR == 0 && \\\n     DB_VERSION_PATCH >= 3)\n", "#endif\n"]},
-            {name=>"DB_HASH", type=>"IV", macro=>["#if (DB_VERSION_MAJOR > 2) || \\\n    (DB_VERSION_MAJOR == 2 && DB_VERSION_MINOR > 0) || \\\n    (DB_VERSION_MAJOR == 2 && DB_VERSION_MINOR == 0 && \\\n     DB_VERSION_PATCH >= 3)\n", "#endif\n"]},
-            {name=>"DB_LOCK_DUMP", type=>"IV", macro=>["#if (DB_VERSION_MAJOR > 2) || \\\n    (DB_VERSION_MAJOR == 2 && DB_VERSION_MINOR > 0) || \\\n    (DB_VERSION_MAJOR == 2 && DB_VERSION_MINOR == 0 && \\\n     DB_VERSION_PATCH >= 3)\n", "#endif\n"]},
-            {name=>"DB_LOCK_GET", type=>"IV", macro=>["#if (DB_VERSION_MAJOR > 2) || \\\n    (DB_VERSION_MAJOR == 2 && DB_VERSION_MINOR > 0) || \\\n    (DB_VERSION_MAJOR == 2 && DB_VERSION_MINOR == 0 && \\\n     DB_VERSION_PATCH >= 3)\n", "#endif\n"]},
-            {name=>"DB_LOCK_GET_TIMEOUT", type=>"IV", macro=>["#if (DB_VERSION_MAJOR > 4) || \\\n    (DB_VERSION_MAJOR == 4 && DB_VERSION_MINOR > 0) || \\\n    (DB_VERSION_MAJOR == 4 && DB_VERSION_MINOR == 0 && \\\n     DB_VERSION_PATCH >= 14)\n", "#endif\n"]},
-            {name=>"DB_LOCK_INHERIT", type=>"IV", macro=>["#if (DB_VERSION_MAJOR > 2) || \\\n    (DB_VERSION_MAJOR == 2 && DB_VERSION_MINOR > 7) || \\\n    (DB_VERSION_MAJOR == 2 && DB_VERSION_MINOR == 7 && \\\n     DB_VERSION_PATCH >= 1)\n", "#endif\n"]},
-            {name=>"DB_LOCK_PUT", type=>"IV", macro=>["#if (DB_VERSION_MAJOR > 2) || \\\n    (DB_VERSION_MAJOR == 2 && DB_VERSION_MINOR > 0) || \\\n    (DB_VERSION_MAJOR == 2 && DB_VERSION_MINOR == 0 && \\\n     DB_VERSION_PATCH >= 3)\n", "#endif\n"]},
-            {name=>"DB_LOCK_PUT_ALL", type=>"IV", macro=>["#if (DB_VERSION_MAJOR > 2) || \\\n    (DB_VERSION_MAJOR == 2 && DB_VERSION_MINOR > 0) || \\\n    (DB_VERSION_MAJOR == 2 && DB_VERSION_MINOR == 0 && \\\n     DB_VERSION_PATCH >= 3)\n", "#endif\n"]},
-            {name=>"DB_LOCK_PUT_OBJ", type=>"IV", macro=>["#if (DB_VERSION_MAJOR > 2) || \\\n    (DB_VERSION_MAJOR == 2 && DB_VERSION_MINOR > 0) || \\\n    (DB_VERSION_MAJOR == 2 && DB_VERSION_MINOR == 0 && \\\n     DB_VERSION_PATCH >= 3)\n", "#endif\n"]},
-            {name=>"DB_LOCK_PUT_READ", type=>"IV", macro=>["#if (DB_VERSION_MAJOR > 4) || \\\n    (DB_VERSION_MAJOR == 4 && DB_VERSION_MINOR > 0) || \\\n    (DB_VERSION_MAJOR == 4 && DB_VERSION_MINOR == 0 && \\\n     DB_VERSION_PATCH >= 14)\n", "#endif\n"]},
-            {name=>"DB_LOCK_TIMEOUT", type=>"IV", macro=>["#if (DB_VERSION_MAJOR > 4) || \\\n    (DB_VERSION_MAJOR == 4 && DB_VERSION_MINOR > 0) || \\\n    (DB_VERSION_MAJOR == 4 && DB_VERSION_MINOR == 0 && \\\n     DB_VERSION_PATCH >= 14)\n", "#endif\n"]},
-            {name=>"DB_LOCK_TRADE", type=>"IV", macro=>["#if (DB_VERSION_MAJOR > 4) || \\\n    (DB_VERSION_MAJOR == 4 && DB_VERSION_MINOR > 1) || \\\n    (DB_VERSION_MAJOR == 4 && DB_VERSION_MINOR == 1 && \\\n     DB_VERSION_PATCH >= 24)\n", "#endif\n"]},
-            {name=>"DB_LOCK_UPGRADE_WRITE", type=>"IV", macro=>["#if (DB_VERSION_MAJOR > 3) || \\\n    (DB_VERSION_MAJOR == 3 && DB_VERSION_MINOR > 3) || \\\n    (DB_VERSION_MAJOR == 3 && DB_VERSION_MINOR == 3 && \\\n     DB_VERSION_PATCH >= 11)\n", "#endif\n"]},
-            {name=>"DB_PRIORITY_DEFAULT", type=>"IV", macro=>["#if (DB_VERSION_MAJOR > 4) || \\\n    (DB_VERSION_MAJOR == 4 && DB_VERSION_MINOR > 1) || \\\n    (DB_VERSION_MAJOR == 4 && DB_VERSION_MINOR == 1 && \\\n     DB_VERSION_PATCH >= 24)\n", "#endif\n"]},
-            {name=>"DB_PRIORITY_HIGH", type=>"IV", macro=>["#if (DB_VERSION_MAJOR > 4) || \\\n    (DB_VERSION_MAJOR == 4 && DB_VERSION_MINOR > 1) || \\\n    (DB_VERSION_MAJOR == 4 && DB_VERSION_MINOR == 1 && \\\n     DB_VERSION_PATCH >= 24)\n", "#endif\n"]},
-            {name=>"DB_PRIORITY_LOW", type=>"IV", macro=>["#if (DB_VERSION_MAJOR > 4) || \\\n    (DB_VERSION_MAJOR == 4 && DB_VERSION_MINOR > 1) || \\\n    (DB_VERSION_MAJOR == 4 && DB_VERSION_MINOR == 1 && \\\n     DB_VERSION_PATCH >= 24)\n", "#endif\n"]},
-            {name=>"DB_PRIORITY_VERY_HIGH", type=>"IV", macro=>["#if (DB_VERSION_MAJOR > 4) || \\\n    (DB_VERSION_MAJOR == 4 && DB_VERSION_MINOR > 1) || \\\n    (DB_VERSION_MAJOR == 4 && DB_VERSION_MINOR == 1 && \\\n     DB_VERSION_PATCH >= 24)\n", "#endif\n"]},
-            {name=>"DB_PRIORITY_VERY_LOW", type=>"IV", macro=>["#if (DB_VERSION_MAJOR > 4) || \\\n    (DB_VERSION_MAJOR == 4 && DB_VERSION_MINOR > 1) || \\\n    (DB_VERSION_MAJOR == 4 && DB_VERSION_MINOR == 1 && \\\n     DB_VERSION_PATCH >= 24)\n", "#endif\n"]},
-            {name=>"DB_QUEUE", type=>"IV", macro=>["#if (DB_VERSION_MAJOR > 3) || \\\n    (DB_VERSION_MAJOR == 3 && DB_VERSION_MINOR > 0) || \\\n    (DB_VERSION_MAJOR == 3 && DB_VERSION_MINOR == 0 && \\\n     DB_VERSION_PATCH >= 55)\n", "#endif\n"]},
-            {name=>"DB_RECNO", type=>"IV", macro=>["#if (DB_VERSION_MAJOR > 2) || \\\n    (DB_VERSION_MAJOR == 2 && DB_VERSION_MINOR > 0) || \\\n    (DB_VERSION_MAJOR == 2 && DB_VERSION_MINOR == 0 && \\\n     DB_VERSION_PATCH >= 3)\n", "#endif\n"]},
-            {name=>"DB_TXN_ABORT", type=>"IV", macro=>["#if (DB_VERSION_MAJOR > 3) || \\\n    (DB_VERSION_MAJOR == 3 && DB_VERSION_MINOR > 1) || \\\n    (DB_VERSION_MAJOR == 3 && DB_VERSION_MINOR == 1 && \\\n     DB_VERSION_PATCH >= 14)\n", "#endif\n"]},
-            {name=>"DB_TXN_APPLY", type=>"IV", macro=>["#if (DB_VERSION_MAJOR > 4) || \\\n    (DB_VERSION_MAJOR == 4 && DB_VERSION_MINOR > 0) || \\\n    (DB_VERSION_MAJOR == 4 && DB_VERSION_MINOR == 0 && \\\n     DB_VERSION_PATCH >= 14)\n", "#endif\n"]},
-            {name=>"DB_TXN_BACKWARD_ALLOC", type=>"IV", macro=>["#if (DB_VERSION_MAJOR > 4) || \\\n    (DB_VERSION_MAJOR == 4 && DB_VERSION_MINOR > 1) || \\\n    (DB_VERSION_MAJOR == 4 && DB_VERSION_MINOR == 1 && \\\n     DB_VERSION_PATCH >= 24)\n", "#endif\n"]},
-            {name=>"DB_TXN_BACKWARD_ROLL", type=>"IV", macro=>["#if (DB_VERSION_MAJOR > 3) || \\\n    (DB_VERSION_MAJOR == 3 && DB_VERSION_MINOR > 1) || \\\n    (DB_VERSION_MAJOR == 3 && DB_VERSION_MINOR == 1 && \\\n     DB_VERSION_PATCH >= 14)\n", "#endif\n"]},
-            {name=>"DB_TXN_FORWARD_ROLL", type=>"IV", macro=>["#if (DB_VERSION_MAJOR > 3) || \\\n    (DB_VERSION_MAJOR == 3 && DB_VERSION_MINOR > 1) || \\\n    (DB_VERSION_MAJOR == 3 && DB_VERSION_MINOR == 1 && \\\n     DB_VERSION_PATCH >= 14)\n", "#endif\n"]},
-            {name=>"DB_TXN_OPENFILES", type=>"IV", macro=>["#if (DB_VERSION_MAJOR > 3) || \\\n    (DB_VERSION_MAJOR == 3 && DB_VERSION_MINOR > 1) || \\\n    (DB_VERSION_MAJOR == 3 && DB_VERSION_MINOR == 1 && \\\n     DB_VERSION_PATCH >= 14)\n", "#endif\n"]},
-            {name=>"DB_TXN_POPENFILES", type=>"IV", macro=>["#if (DB_VERSION_MAJOR > 3) || \\\n    (DB_VERSION_MAJOR == 3 && DB_VERSION_MINOR > 3) || \\\n    (DB_VERSION_MAJOR == 3 && DB_VERSION_MINOR == 3 && \\\n     DB_VERSION_PATCH >= 11)\n", "#endif\n"]},
-            {name=>"DB_TXN_PRINT", type=>"IV", macro=>["#if (DB_VERSION_MAJOR > 4) || \\\n    (DB_VERSION_MAJOR == 4 && DB_VERSION_MINOR > 1) || \\\n    (DB_VERSION_MAJOR == 4 && DB_VERSION_MINOR == 1 && \\\n     DB_VERSION_PATCH >= 24)\n", "#endif\n"]},
-            {name=>"DB_UNKNOWN", type=>"IV", macro=>["#if (DB_VERSION_MAJOR > 2) || \\\n    (DB_VERSION_MAJOR == 2 && DB_VERSION_MINOR > 0) || \\\n    (DB_VERSION_MAJOR == 2 && DB_VERSION_MINOR == 0 && \\\n     DB_VERSION_PATCH >= 3)\n", "#endif\n"]},
-            {name=>"DB_VERSION_STRING", type=>"PV"});
-
-print constant_types(); # macro defs
-foreach (C_constant ("BerkeleyDB", 'constant', 'IV', $types, undef, 3, @names) ) {
-    print $_, "\n"; # C constant subs
-}
-print "#### XS Section:\n";
-print XS_constant ("BerkeleyDB", $types);
-__END__
-   */
-
-  switch (len) {
-  case 6:
-    return constant_6 (aTHX_ name, iv_return);
-    break;
-  case 7:
-    return constant_7 (aTHX_ name, iv_return);
-    break;
-  case 8:
-    return constant_8 (aTHX_ name, iv_return);
-    break;
-  case 9:
-    return constant_9 (aTHX_ name, iv_return);
-    break;
-  case 10:
-    return constant_10 (aTHX_ name, iv_return);
-    break;
-  case 11:
-    return constant_11 (aTHX_ name, iv_return);
-    break;
-  case 12:
-    return constant_12 (aTHX_ name, iv_return);
-    break;
-  case 13:
-    return constant_13 (aTHX_ name, iv_return);
-    break;
-  case 14:
-    return constant_14 (aTHX_ name, iv_return);
-    break;
-  case 15:
-    return constant_15 (aTHX_ name, iv_return);
-    break;
-  case 16:
-    return constant_16 (aTHX_ name, iv_return);
-    break;
-  case 17:
-    return constant_17 (aTHX_ name, iv_return, pv_return);
-    break;
-  case 18:
-    return constant_18 (aTHX_ name, iv_return);
-    break;
-  case 19:
-    return constant_19 (aTHX_ name, iv_return);
-    break;
-  case 20:
-    return constant_20 (aTHX_ name, iv_return);
-    break;
-  case 21:
-    return constant_21 (aTHX_ name, iv_return);
-    break;
-  case 22:
-    return constant_22 (aTHX_ name, iv_return);
-    break;
-  case 23:
-    if (memEQ(name, "DB_ENV_TXN_WRITE_NOSYNC", 23)) {
-#ifdef DB_ENV_TXN_WRITE_NOSYNC
-      *iv_return = DB_ENV_TXN_WRITE_NOSYNC;
-      return PERL_constant_ISIV;
-#else
-      return PERL_constant_NOTDEF;
-#endif
-    }
-    break;
-  }
-  return PERL_constant_NOTFOUND;
-}
-
diff --git a/storage/bdb/perl/BerkeleyDB/constants.xs b/storage/bdb/perl/BerkeleyDB/constants.xs
deleted file mode 100644
index 1b2c8b2c3c8..00000000000
--- a/storage/bdb/perl/BerkeleyDB/constants.xs
+++ /dev/null
@@ -1,87 +0,0 @@
-void
-constant(sv)
-    PREINIT:
-#ifdef dXSTARG
-	dXSTARG; /* Faster if we have it.  */
-#else
-	dTARGET;
-#endif
-	STRLEN		len;
-        int		type;
-	IV		iv;
-	/* NV		nv;	Uncomment this if you need to return NVs */
-	const char	*pv;
-    INPUT:
-	SV *		sv;
-        const char *	s = SvPV(sv, len);
-    PPCODE:
-        /* Change this to constant(aTHX_ s, len, &iv, &nv);
-           if you need to return both NVs and IVs */
-	type = constant(aTHX_ s, len, &iv, &pv);
-      /* Return 1 or 2 items. First is error message, or undef if no error.
-           Second, if present, is found value */
-        switch (type) {
-        case PERL_constant_NOTFOUND:
-          sv = sv_2mortal(newSVpvf("%s is not a valid BerkeleyDB macro", s));
-          PUSHs(sv);
-          break;
-        case PERL_constant_NOTDEF:
-          sv = sv_2mortal(newSVpvf(
-	    "Your vendor has not defined BerkeleyDB macro %s, used", s));
-          PUSHs(sv);
-          break;
-        case PERL_constant_ISIV:
-          EXTEND(SP, 1);
-          PUSHs(&PL_sv_undef);
-          PUSHi(iv);
-          break;
-	/* Uncomment this if you need to return NOs
-        case PERL_constant_ISNO:
-          EXTEND(SP, 1);
-          PUSHs(&PL_sv_undef);
-          PUSHs(&PL_sv_no);
-          break; */
-	/* Uncomment this if you need to return NVs
-        case PERL_constant_ISNV:
-          EXTEND(SP, 1);
-          PUSHs(&PL_sv_undef);
-          PUSHn(nv);
-          break; */
-        case PERL_constant_ISPV:
-          EXTEND(SP, 1);
-          PUSHs(&PL_sv_undef);
-          PUSHp(pv, strlen(pv));
-          break;
-	/* Uncomment this if you need to return PVNs
-        case PERL_constant_ISPVN:
-          EXTEND(SP, 1);
-          PUSHs(&PL_sv_undef);
-          PUSHp(pv, iv);
-          break; */
-	/* Uncomment this if you need to return SVs
-        case PERL_constant_ISSV:
-          EXTEND(SP, 1);
-          PUSHs(&PL_sv_undef);
-          PUSHs(sv);
-          break; */
-	/* Uncomment this if you need to return UNDEFs
-        case PERL_constant_ISUNDEF:
-          break; */
-	/* Uncomment this if you need to return UVs
-        case PERL_constant_ISUV:
-          EXTEND(SP, 1);
-          PUSHs(&PL_sv_undef);
-          PUSHu((UV)iv);
-          break; */
-	/* Uncomment this if you need to return YESs
-        case PERL_constant_ISYES:
-          EXTEND(SP, 1);
-          PUSHs(&PL_sv_undef);
-          PUSHs(&PL_sv_yes);
-          break; */
-        default:
-          sv = sv_2mortal(newSVpvf(
-	    "Unexpected return type %d while processing BerkeleyDB macro %s, used",
-               type, s));
-          PUSHs(sv);
-        }
diff --git a/storage/bdb/perl/BerkeleyDB/dbinfo b/storage/bdb/perl/BerkeleyDB/dbinfo
deleted file mode 100755
index 421d36c6260..00000000000
--- a/storage/bdb/perl/BerkeleyDB/dbinfo
+++ /dev/null
@@ -1,129 +0,0 @@
-#!/usr/local/bin/perl
-
-# Name:		dbinfo -- identify berkeley DB version used to create 
-#			  a database file
-#
-# Author:	Paul Marquess  
-# Version: 	1.05 
-# Date		1sh November 2003
-#
-#     Copyright (c) 1998-2003 Paul Marquess. All rights reserved.
-#     This program is free software; you can redistribute it and/or
-#     modify it under the same terms as Perl itself.
-
-# Todo: Print more stats on a db file, e.g. no of records
-#       add log/txn/lock files
-
-use strict ;
-
-my %Data =
-	(
-	0x053162 =>	{
-			  Type 	   => "Btree",
-			  Versions => 
-				{
-				  1	=> [0, "Unknown (older than 1.71)"],
-				  2	=> [0, "Unknown (older than 1.71)"],
-				  3	=> [0, "1.71 -> 1.85, 1.86"],
-				  4	=> [0, "Unknown"],
-				  5	=> [0, "2.0.0 -> 2.3.0"],
-				  6	=> [0, "2.3.1 -> 2.7.7"],
-				  7	=> [0, "3.0.x"],
-				  8	=> [0, "3.1.x -> 4.0.x"],
-				  9	=> [1, "4.1.x or greater"],
-				}
-			},
-	0x061561 => 	{
-			  Type     => "Hash",
-			  Versions =>
-				{
-				  1	=> [0, "Unknown (older than 1.71)"],
-        			  2     => [0, "1.71 -> 1.85"],
-        			  3     => [0, "1.86"],
-        			  4     => [0, "2.0.0 -> 2.1.0"],
-        			  5     => [0, "2.2.6 -> 2.7.7"],
-        			  6     => [0, "3.0.x"],
-				  7	=> [0, "3.1.x -> 4.0.x"],
-				  8	=> [1, "4.1.x or greater"],
-				}
-			},
-	0x042253 => 	{
-			  Type     => "Queue",
-			  Versions =>
-				{
-				  1	=> [0, "3.0.x"],
-				  2	=> [0, "3.1.x"],
-				  3	=> [0, "3.2.x -> 4.0.x"],
-				  4	=> [1, "4.1.x or greater"],
-				}
-			},
-	) ;
-
-die "Usage: dbinfo file\n" unless @ARGV == 1 ;
-
-print "testing file $ARGV[0]...\n\n" ;
-open (F, "<$ARGV[0]") or die "Cannot open file $ARGV[0]: $!\n" ;
-
-my $buff ;
-read F, $buff, 30 ;
-
-
-my (@info) = unpack("NNNNNNC", $buff) ;
-my (@info1) = unpack("VVVVVVC", $buff) ;
-my ($magic, $version, $endian, $encrypt) ;
-
-if ($Data{$info[0]}) # first try DB 1.x format, big endian
-{
-    $magic = $info[0] ;
-    $version = $info[1] ;
-    $endian  = "Big Endian" ;
-    $encrypt = "Not Supported";
-}
-elsif ($Data{$info1[0]}) # first try DB 1.x format, little endian
-{
-    $magic = $info1[0] ;
-    $version = $info1[1] ;
-    $endian  = "Little Endian" ;
-    $encrypt = "Not Supported";
-}
-elsif ($Data{$info[3]}) # next DB 2.x big endian
-{
-    $magic = $info[3] ;
-    $version = $info[4] ;
-    $endian  = "Big Endian" ;
-}
-elsif ($Data{$info1[3]}) # next DB 2.x little endian
-{
-    $magic = $info1[3] ;
-    $version = $info1[4] ;
-    $endian  = "Little Endian" ;
-}
-else
-  { die "not a Berkeley DB database file.\n" }
-
-my $type = $Data{$magic} ;
-$magic = sprintf "%06X", $magic ;
-
-my $ver_string = "Unknown" ;
-
-if ( defined $type->{Versions}{$version} )
-{
-     $ver_string = $type->{Versions}{$version}[1];
-     if ($type->{Versions}{$version}[0] )
-       { $encrypt = $info[6] ? "Enabled" : "Disabled" }
-     else
-       { $encrypt = "Not Supported" }
-}
-
-print <{Type} file.
-File Version ID:	$version
-Built with Berkeley DB:	$ver_string
-Byte Order:		$endian
-Magic:			$magic
-Encryption:             $encrypt
-EOM
-
-close F ;
-
-exit ;
diff --git a/storage/bdb/perl/BerkeleyDB/hints/dec_osf.pl b/storage/bdb/perl/BerkeleyDB/hints/dec_osf.pl
deleted file mode 100644
index 6d7faeed2e2..00000000000
--- a/storage/bdb/perl/BerkeleyDB/hints/dec_osf.pl
+++ /dev/null
@@ -1 +0,0 @@
-$self->{LIBS} = [ "@{$self->{LIBS}} -lpthreads" ];
diff --git a/storage/bdb/perl/BerkeleyDB/hints/irix_6_5.pl b/storage/bdb/perl/BerkeleyDB/hints/irix_6_5.pl
deleted file mode 100644
index b531673e6e0..00000000000
--- a/storage/bdb/perl/BerkeleyDB/hints/irix_6_5.pl
+++ /dev/null
@@ -1 +0,0 @@
-$self->{LIBS} = [ "@{$self->{LIBS}} -lthread" ];
diff --git a/storage/bdb/perl/BerkeleyDB/hints/solaris.pl b/storage/bdb/perl/BerkeleyDB/hints/solaris.pl
deleted file mode 100644
index ddd941d634a..00000000000
--- a/storage/bdb/perl/BerkeleyDB/hints/solaris.pl
+++ /dev/null
@@ -1 +0,0 @@
-$self->{LIBS} = [ "@{$self->{LIBS}} -lmt" ];
diff --git a/storage/bdb/perl/BerkeleyDB/mkconsts b/storage/bdb/perl/BerkeleyDB/mkconsts
deleted file mode 100644
index 0383378e423..00000000000
--- a/storage/bdb/perl/BerkeleyDB/mkconsts
+++ /dev/null
@@ -1,799 +0,0 @@
-#!/usr/bin/perl
-
-use ExtUtils::Constant qw(WriteConstants); 
-
-use constant DEFINE => 'define' ;
-use constant STRING => 'string' ;
-use constant IGNORE => 'ignore' ;
-
-%constants = (
-
-
-	#########
-	# 2.0.3
-	#########
-
-	DBM_INSERT                 => IGNORE,
-	DBM_REPLACE                => IGNORE,
-	DBM_SUFFIX                 => IGNORE,
-	DB_AFTER                   => DEFINE,
-	DB_AM_DUP                  => IGNORE,
-	DB_AM_INMEM                => IGNORE,
-	DB_AM_LOCKING              => IGNORE,
-	DB_AM_LOGGING              => IGNORE,
-	DB_AM_MLOCAL               => IGNORE,
-	DB_AM_PGDEF                => IGNORE,
-	DB_AM_RDONLY               => IGNORE,
-	DB_AM_RECOVER              => IGNORE,
-	DB_AM_SWAP                 => IGNORE,
-	DB_AM_TXN                  => IGNORE,
-	DB_APP_INIT                => DEFINE,
-	DB_BEFORE                  => DEFINE,
-	DB_BTREEMAGIC              => DEFINE,
-	DB_BTREEVERSION            => DEFINE,
-	DB_BT_DELIMITER            => IGNORE,
-	DB_BT_EOF                  => IGNORE,
-	DB_BT_FIXEDLEN             => IGNORE,
-	DB_BT_PAD                  => IGNORE,
-	DB_BT_SNAPSHOT             => IGNORE,
-	DB_CHECKPOINT              => DEFINE,
-	DB_CREATE                  => DEFINE,
-	DB_CURRENT                 => DEFINE,
-	DB_DBT_INTERNAL            => IGNORE,
-	DB_DBT_MALLOC              => IGNORE,
-	DB_DBT_PARTIAL             => IGNORE,
-	DB_DBT_USERMEM             => IGNORE,
-	DB_DELETED                 => DEFINE,
-	DB_DELIMITER               => DEFINE,
-	DB_DUP                     => DEFINE,
-	DB_EXCL                    => DEFINE,
-	DB_FIRST                   => DEFINE,
-	DB_FIXEDLEN                => DEFINE,
-	DB_FLUSH                   => DEFINE,
-	DB_HASHMAGIC               => DEFINE,
-	DB_HASHVERSION             => DEFINE,
-	DB_HS_DIRTYMETA            => IGNORE,
-	DB_INCOMPLETE              => DEFINE,
-	DB_INIT_LOCK               => DEFINE,
-	DB_INIT_LOG                => DEFINE,
-	DB_INIT_MPOOL              => DEFINE,
-	DB_INIT_TXN                => DEFINE,
-	DB_KEYEXIST                => DEFINE,
-	DB_KEYFIRST                => DEFINE,
-	DB_KEYLAST                 => DEFINE,
-	DB_LAST                    => DEFINE,
-	DB_LOCKMAGIC               => DEFINE,
-	DB_LOCKVERSION             => DEFINE,
-	DB_LOCK_DEADLOCK           => DEFINE,
-	DB_LOCK_NOTGRANTED         => DEFINE,
-	DB_LOCK_NOTHELD            => DEFINE,
-	DB_LOCK_NOWAIT             => DEFINE,
-	DB_LOCK_RIW_N              => DEFINE,
-	DB_LOCK_RW_N               => DEFINE,
-	DB_LOGMAGIC                => DEFINE,
-	DB_LOGVERSION              => DEFINE,
-	DB_MAX_PAGES               => DEFINE,
-	DB_MAX_RECORDS             => DEFINE,
-	DB_MPOOL_CLEAN             => DEFINE,
-	DB_MPOOL_CREATE            => DEFINE,
-	DB_MPOOL_DIRTY             => DEFINE,
-	DB_MPOOL_DISCARD           => DEFINE,
-	DB_MPOOL_LAST              => DEFINE,
-	DB_MPOOL_NEW               => DEFINE,
-	DB_MPOOL_PRIVATE           => DEFINE,
-	DB_MUTEXDEBUG              => DEFINE,
-	DB_NEEDSPLIT               => DEFINE,
-	DB_NEXT                    => DEFINE,
-	DB_NOOVERWRITE             => DEFINE,
-	DB_NORECURSE               => DEFINE,
-	DB_NOSYNC                  => DEFINE,
-	DB_NOTFOUND                => DEFINE,
-	DB_PAD                     => DEFINE,
-	DB_PREV                    => DEFINE,
-	DB_RDONLY                  => DEFINE,
-	DB_REGISTERED              => DEFINE,
-	DB_RE_MODIFIED             => IGNORE,
-	DB_SEQUENTIAL              => DEFINE,
-	DB_SET                     => DEFINE,
-	DB_SET_RANGE               => DEFINE,
-	DB_SNAPSHOT                => DEFINE,
-	DB_SWAPBYTES               => DEFINE,
-	DB_TEMPORARY               => DEFINE,
-	DB_TRUNCATE                => DEFINE,
-	DB_TXNMAGIC                => DEFINE,
-	DB_TXNVERSION              => DEFINE,
-	DB_TXN_BACKWARD_ROLL       => DEFINE,
-	DB_TXN_FORWARD_ROLL        => DEFINE,
-	DB_TXN_LOCK_2PL            => DEFINE,
-	DB_TXN_LOCK_MASK           => DEFINE,
-	DB_TXN_LOCK_OPTIMISTIC     => DEFINE,
-	DB_TXN_LOG_MASK            => DEFINE,
-	DB_TXN_LOG_REDO            => DEFINE,
-	DB_TXN_LOG_UNDO            => DEFINE,
-	DB_TXN_LOG_UNDOREDO        => DEFINE,
-	DB_TXN_OPENFILES           => DEFINE,
-	DB_TXN_REDO                => DEFINE,
-	DB_TXN_UNDO                => DEFINE,
-	DB_USE_ENVIRON             => DEFINE,
-	DB_USE_ENVIRON_ROOT        => DEFINE,
-	DB_VERSION_MAJOR           => DEFINE,
-	DB_VERSION_MINOR           => DEFINE,
-	DB_VERSION_PATCH           => DEFINE,
-	DB_VERSION_STRING          => STRING,
-	_DB_H_                     => IGNORE,
-	__BIT_TYPES_DEFINED__      => IGNORE,
-	const                      => IGNORE,
-
-	# enum DBTYPE
-	DB_BTREE                   => '2.0.3',
-	DB_HASH                    => '2.0.3',
-	DB_RECNO                   => '2.0.3',
-	DB_UNKNOWN                 => '2.0.3',
-
-	# enum db_lockop_t
-	DB_LOCK_DUMP               => '2.0.3',
-	DB_LOCK_GET                => '2.0.3',
-	DB_LOCK_PUT                => '2.0.3',
-	DB_LOCK_PUT_ALL            => '2.0.3',
-	DB_LOCK_PUT_OBJ            => '2.0.3',
-
-	# enum db_lockmode_t
-	DB_LOCK_NG                 => IGNORE, # 2.0.3
-	DB_LOCK_READ               => IGNORE, # 2.0.3
-	DB_LOCK_WRITE              => IGNORE, # 2.0.3
-	DB_LOCK_IREAD              => IGNORE, # 2.0.3
-	DB_LOCK_IWRITE             => IGNORE, # 2.0.3
-	DB_LOCK_IWR                => IGNORE, # 2.0.3
-
-	# enum ACTION
-	FIND                       => IGNORE, # 2.0.3
-	ENTER                      => IGNORE, # 2.0.3
-
-	#########
-	# 2.1.0
-	#########
-
-	DB_NOMMAP                  => DEFINE,
-
-	#########
-	# 2.2.6
-	#########
-
-	DB_AM_THREAD               => IGNORE,
-	DB_ARCH_ABS                => DEFINE,
-	DB_ARCH_DATA               => DEFINE,
-	DB_ARCH_LOG                => DEFINE,
-	DB_LOCK_CONFLICT           => DEFINE,
-	DB_LOCK_DEFAULT            => DEFINE,
-	DB_LOCK_NORUN              => DEFINE,
-	DB_LOCK_OLDEST             => DEFINE,
-	DB_LOCK_RANDOM             => DEFINE,
-	DB_LOCK_YOUNGEST           => DEFINE,
-	DB_RECOVER                 => DEFINE,
-	DB_RECOVER_FATAL           => DEFINE,
-	DB_THREAD                  => DEFINE,
-	DB_TXN_NOSYNC              => DEFINE,
-
-	#########
-	# 2.3.0
-	#########
-
-	DB_BTREEOLDVER             => DEFINE,
-	DB_BT_RECNUM               => IGNORE,
-	DB_FILE_ID_LEN             => DEFINE,
-	DB_GETREC                  => DEFINE,
-	DB_HASHOLDVER              => DEFINE,
-	DB_KEYEMPTY                => DEFINE,
-	DB_LOGOLDVER               => DEFINE,
-	DB_RECNUM                  => DEFINE,
-	DB_RECORDCOUNT             => DEFINE,
-	DB_RENUMBER                => DEFINE,
-	DB_RE_DELIMITER            => IGNORE,
-	DB_RE_FIXEDLEN             => IGNORE,
-	DB_RE_PAD                  => IGNORE,
-	DB_RE_RENUMBER             => IGNORE,
-	DB_RE_SNAPSHOT             => IGNORE,
-
-	#########
-	# 2.3.10
-	#########
-
-	DB_APPEND                  => DEFINE,
-	DB_GET_RECNO               => DEFINE,
-	DB_SET_RECNO               => DEFINE,
-	DB_TXN_CKP                 => DEFINE,
-
-	#########
-	# 2.3.11
-	#########
-
-	DB_ENV_APPINIT             => DEFINE,
-	DB_ENV_STANDALONE          => DEFINE,
-	DB_ENV_THREAD              => DEFINE,
-
-	#########
-	# 2.3.12
-	#########
-
-	DB_FUNC_CALLOC             => IGNORE,
-	DB_FUNC_CLOSE              => IGNORE,
-	DB_FUNC_DIRFREE            => IGNORE,
-	DB_FUNC_DIRLIST            => IGNORE,
-	DB_FUNC_EXISTS             => IGNORE,
-	DB_FUNC_FREE               => IGNORE,
-	DB_FUNC_FSYNC              => IGNORE,
-	DB_FUNC_IOINFO             => IGNORE,
-	DB_FUNC_MALLOC             => IGNORE,
-	DB_FUNC_MAP                => IGNORE,
-	DB_FUNC_OPEN               => IGNORE,
-	DB_FUNC_READ               => IGNORE,
-	DB_FUNC_REALLOC            => IGNORE,
-	DB_FUNC_SEEK               => IGNORE,
-	DB_FUNC_SLEEP              => IGNORE,
-	DB_FUNC_STRDUP             => IGNORE,
-	DB_FUNC_UNLINK             => IGNORE,
-	DB_FUNC_UNMAP              => IGNORE,
-	DB_FUNC_WRITE              => IGNORE,
-	DB_FUNC_YIELD              => IGNORE,
-
-	#########
-	# 2.3.14
-	#########
-
-	DB_TSL_SPINS               => IGNORE,
-
-	#########
-	# 2.3.16
-	#########
-
-	DB_DBM_HSEARCH             => IGNORE,
-	firstkey                   => IGNORE,
-	hdestroy                   => IGNORE,
-
-	#########
-	# 2.4.10
-	#########
-
-	DB_CURLSN                  => DEFINE,
-	DB_FUNC_RUNLINK            => IGNORE,
-	DB_REGION_ANON             => DEFINE,
-	DB_REGION_INIT             => DEFINE,
-	DB_REGION_NAME             => DEFINE,
-	DB_TXN_LOCK_OPTIMIST       => DEFINE,
-	__CURRENTLY_UNUSED         => IGNORE,
-
-	# enum db_status_t
-	DB_LSTAT_ABORTED           => IGNORE, # 2.4.10
-	DB_LSTAT_ERR               => IGNORE, # 2.4.10
-	DB_LSTAT_FREE              => IGNORE, # 2.4.10
-	DB_LSTAT_HELD              => IGNORE, # 2.4.10
-	DB_LSTAT_NOGRANT           => IGNORE, # 2.4.10
-	DB_LSTAT_PENDING           => IGNORE, # 2.4.10
-	DB_LSTAT_WAITING           => IGNORE, # 2.4.10
-
-	#########
-	# 2.4.14
-	#########
-
-	DB_MUTEXLOCKS              => DEFINE,
-	DB_PAGEYIELD               => DEFINE,
-	__UNUSED_100               => IGNORE,
-	__UNUSED_4000              => IGNORE,
-
-	#########
-	# 2.5.9
-	#########
-
-	DBC_CONTINUE               => IGNORE,
-	DBC_KEYSET                 => IGNORE,
-	DBC_RECOVER                => IGNORE,
-	DBC_RMW                    => IGNORE,
-	DB_DBM_ERROR               => IGNORE,
-	DB_DUPSORT                 => DEFINE,
-	DB_GET_BOTH                => DEFINE,
-	DB_JOIN_ITEM               => DEFINE,
-	DB_NEXT_DUP                => DEFINE,
-	DB_OPFLAGS_MASK            => DEFINE,
-	DB_RMW                     => DEFINE,
-	DB_RUNRECOVERY             => DEFINE,
-	dbmclose                   => IGNORE,
-
-	#########
-	# 2.6.4
-	#########
-
-	DBC_WRITER                 => IGNORE,
-	DB_AM_CDB                  => IGNORE,
-	DB_ENV_CDB                 => DEFINE,
-	DB_INIT_CDB                => DEFINE,
-	DB_LOCK_UPGRADE            => DEFINE,
-	DB_WRITELOCK               => DEFINE,
-
-	#########
-	# 2.7.1
-	#########
-
-
-	# enum db_lockop_t
-	DB_LOCK_INHERIT            => '2.7.1',
-
-	#########
-	# 2.7.7
-	#########
-
-	DB_FCNTL_LOCKING           => DEFINE,
-
-	#########
-	# 3.0.55
-	#########
-
-	DBC_WRITECURSOR            => IGNORE,
-	DB_AM_DISCARD              => IGNORE,
-	DB_AM_SUBDB                => IGNORE,
-	DB_BT_REVSPLIT             => IGNORE,
-	DB_CONSUME                 => DEFINE,
-	DB_CXX_NO_EXCEPTIONS       => DEFINE,
-	DB_DBT_REALLOC             => IGNORE,
-	DB_DUPCURSOR               => DEFINE,
-	DB_ENV_CREATE              => DEFINE,
-	DB_ENV_DBLOCAL             => DEFINE,
-	DB_ENV_LOCKDOWN            => DEFINE,
-	DB_ENV_LOCKING             => DEFINE,
-	DB_ENV_LOGGING             => DEFINE,
-	DB_ENV_NOMMAP              => DEFINE,
-	DB_ENV_OPEN_CALLED         => DEFINE,
-	DB_ENV_PRIVATE             => DEFINE,
-	DB_ENV_SYSTEM_MEM          => DEFINE,
-	DB_ENV_TXN                 => DEFINE,
-	DB_ENV_TXN_NOSYNC          => DEFINE,
-	DB_ENV_USER_ALLOC          => DEFINE,
-	DB_FORCE                   => DEFINE,
-	DB_LOCKDOWN                => DEFINE,
-	DB_LOCK_RECORD             => DEFINE,
-	DB_LOGFILEID_INVALID       => DEFINE,
-	DB_MPOOL_NEW_GROUP         => DEFINE,
-	DB_NEXT_NODUP              => DEFINE,
-	DB_OK_BTREE                => DEFINE,
-	DB_OK_HASH                 => DEFINE,
-	DB_OK_QUEUE                => DEFINE,
-	DB_OK_RECNO                => DEFINE,
-	DB_OLD_VERSION             => DEFINE,
-	DB_OPEN_CALLED             => DEFINE,
-	DB_PAGE_LOCK               => DEFINE,
-	DB_POSITION                => DEFINE,
-	DB_POSITIONI               => DEFINE,
-	DB_PRIVATE                 => DEFINE,
-	DB_QAMMAGIC                => DEFINE,
-	DB_QAMOLDVER               => DEFINE,
-	DB_QAMVERSION              => DEFINE,
-	DB_RECORD_LOCK             => DEFINE,
-	DB_REVSPLITOFF             => DEFINE,
-	DB_SYSTEM_MEM              => DEFINE,
-	DB_TEST_POSTLOG            => DEFINE,
-	DB_TEST_POSTLOGMETA        => DEFINE,
-	DB_TEST_POSTOPEN           => DEFINE,
-	DB_TEST_POSTRENAME         => DEFINE,
-	DB_TEST_POSTSYNC           => DEFINE,
-	DB_TEST_PREOPEN            => DEFINE,
-	DB_TEST_PRERENAME          => DEFINE,
-	DB_TXN_NOWAIT              => DEFINE,
-	DB_TXN_SYNC                => DEFINE,
-	DB_UPGRADE                 => DEFINE,
-	DB_VERB_CHKPOINT           => DEFINE,
-	DB_VERB_DEADLOCK           => DEFINE,
-	DB_VERB_RECOVERY           => DEFINE,
-	DB_VERB_WAITSFOR           => DEFINE,
-	DB_WRITECURSOR             => DEFINE,
-	DB_XA_CREATE               => DEFINE,
-
-	# enum DBTYPE
-	DB_QUEUE                   => '3.0.55',
-
-	#########
-	# 3.1.14
-	#########
-
-	DBC_ACTIVE                 => IGNORE,
-	DBC_OPD                    => IGNORE,
-	DBC_TRANSIENT              => IGNORE,
-	DBC_WRITEDUP               => IGNORE,
-	DB_AGGRESSIVE              => DEFINE,
-	DB_AM_DUPSORT              => IGNORE,
-	DB_CACHED_COUNTS           => DEFINE,
-	DB_CLIENT                  => DEFINE,
-	DB_DBT_DUPOK               => IGNORE,
-	DB_DBT_ISSET               => IGNORE,
-	DB_ENV_RPCCLIENT           => DEFINE,
-	DB_GET_BOTHC               => DEFINE,
-	DB_JOIN_NOSORT             => DEFINE,
-	DB_NODUPDATA               => DEFINE,
-	DB_NOORDERCHK              => DEFINE,
-	DB_NOSERVER                => DEFINE,
-	DB_NOSERVER_HOME           => DEFINE,
-	DB_NOSERVER_ID             => DEFINE,
-	DB_ODDFILESIZE             => DEFINE,
-	DB_ORDERCHKONLY            => DEFINE,
-	DB_PREV_NODUP              => DEFINE,
-	DB_PR_HEADERS              => DEFINE,
-	DB_PR_PAGE                 => DEFINE,
-	DB_PR_RECOVERYTEST         => DEFINE,
-	DB_RDWRMASTER              => DEFINE,
-	DB_SALVAGE                 => DEFINE,
-	DB_VERIFY_BAD              => DEFINE,
-	DB_VERIFY_FATAL            => DEFINE,
-	DB_VRFY_FLAGMASK           => DEFINE,
-
-	# enum db_recops
-	DB_TXN_ABORT               => '3.1.14',
-	DB_TXN_BACKWARD_ROLL       => '3.1.14',
-	DB_TXN_FORWARD_ROLL        => '3.1.14',
-	DB_TXN_OPENFILES           => '3.1.14',
-
-	#########
-	# 3.2.9
-	#########
-
-	DBC_COMPENSATE             => IGNORE,
-	DB_ALREADY_ABORTED         => DEFINE,
-	DB_AM_VERIFYING            => IGNORE,
-	DB_CDB_ALLDB               => DEFINE,
-	DB_CONSUME_WAIT            => DEFINE,
-	DB_ENV_CDB_ALLDB           => DEFINE,
-	DB_EXTENT                  => DEFINE,
-	DB_JAVA_CALLBACK           => DEFINE,
-	DB_JOINENV                 => DEFINE,
-	DB_LOCK_SWITCH             => DEFINE,
-	DB_MPOOL_EXTENT            => DEFINE,
-	DB_REGION_MAGIC            => DEFINE,
-	DB_VERIFY                  => DEFINE,
-
-	# enum db_lockmode_t
-	DB_LOCK_WAIT               => IGNORE, # 3.2.9
-
-	#########
-	# 3.3.11
-	#########
-
-	DBC_DIRTY_READ             => IGNORE,
-	DBC_MULTIPLE               => IGNORE,
-	DBC_MULTIPLE_KEY           => IGNORE,
-	DB_AM_DIRTY                => IGNORE,
-	DB_AM_SECONDARY            => IGNORE,
-	DB_COMMIT                  => DEFINE,
-	DB_DBT_APPMALLOC           => IGNORE,
-	DB_DIRTY_READ              => DEFINE,
-	DB_DONOTINDEX              => DEFINE,
-	DB_ENV_PANIC_OK            => DEFINE,
-	DB_ENV_RPCCLIENT_GIVEN     => DEFINE,
-	DB_FAST_STAT               => DEFINE,
-	DB_LOCK_MAXLOCKS           => DEFINE,
-	DB_LOCK_MINLOCKS           => DEFINE,
-	DB_LOCK_MINWRITE           => DEFINE,
-	DB_MULTIPLE                => DEFINE,
-	DB_MULTIPLE_KEY            => DEFINE,
-	DB_PAGE_NOTFOUND           => DEFINE,
-	DB_RPC_SERVERPROG          => DEFINE,
-	DB_RPC_SERVERVERS          => DEFINE,
-	DB_SECONDARY_BAD           => DEFINE,
-	DB_SURPRISE_KID            => DEFINE,
-	DB_TEST_POSTDESTROY        => DEFINE,
-	DB_TEST_PREDESTROY         => DEFINE,
-	DB_UPDATE_SECONDARY        => DEFINE,
-	DB_XIDDATASIZE             => DEFINE,
-
-	# enum db_recops
-	DB_TXN_POPENFILES          => '3.3.11',
-
-	# enum db_lockop_t
-	DB_LOCK_UPGRADE_WRITE      => '3.3.11',
-
-	# enum db_lockmode_t
-	DB_LOCK_DIRTY              => IGNORE, # 3.3.11
-	DB_LOCK_WWRITE             => IGNORE, # 3.3.11
-
-	#########
-	# 4.0.14
-	#########
-
-	DB_APPLY_LOGREG            => DEFINE,
-	DB_CL_WRITER               => DEFINE,
-	DB_EID_BROADCAST           => DEFINE,
-	DB_EID_INVALID             => DEFINE,
-	DB_ENV_NOLOCKING           => DEFINE,
-	DB_ENV_NOPANIC             => DEFINE,
-	DB_ENV_REGION_INIT         => DEFINE,
-	DB_ENV_REP_CLIENT          => DEFINE,
-	DB_ENV_REP_LOGSONLY        => DEFINE,
-	DB_ENV_REP_MASTER          => DEFINE,
-	DB_ENV_YIELDCPU            => DEFINE,
-	DB_GET_BOTH_RANGE          => DEFINE,
-	DB_LOCK_EXPIRE             => DEFINE,
-	DB_LOCK_FREE_LOCKER        => DEFINE,
-	DB_LOCK_SET_TIMEOUT        => DEFINE,
-	DB_LOGC_BUF_SIZE           => DEFINE,
-	DB_LOG_DISK                => DEFINE,
-	DB_LOG_LOCKED              => DEFINE,
-	DB_LOG_SILENT_ERR          => DEFINE,
-	DB_NOLOCKING               => DEFINE,
-	DB_NOPANIC                 => DEFINE,
-	DB_PANIC_ENVIRONMENT       => DEFINE,
-	DB_REP_CLIENT              => DEFINE,
-	DB_REP_DUPMASTER           => DEFINE,
-	DB_REP_HOLDELECTION        => DEFINE,
-	DB_REP_LOGSONLY            => DEFINE,
-	DB_REP_MASTER              => DEFINE,
-	DB_REP_NEWMASTER           => DEFINE,
-	DB_REP_NEWSITE             => DEFINE,
-	DB_REP_OUTDATED            => DEFINE,
-	DB_REP_PERMANENT           => DEFINE,
-	DB_REP_UNAVAIL             => DEFINE,
-	DB_SET_LOCK_TIMEOUT        => DEFINE,
-	DB_SET_TXN_NOW             => DEFINE,
-	DB_SET_TXN_TIMEOUT         => DEFINE,
-	DB_STAT_CLEAR              => DEFINE,
-	DB_TIMEOUT                 => DEFINE,
-	DB_VERB_REPLICATION        => DEFINE,
-	DB_YIELDCPU                => DEFINE,
-	MP_FLUSH                   => IGNORE,
-	MP_OPEN_CALLED             => IGNORE,
-	MP_READONLY                => IGNORE,
-	MP_UPGRADE                 => IGNORE,
-	MP_UPGRADE_FAIL            => IGNORE,
-	TXN_CHILDCOMMIT            => IGNORE,
-	TXN_COMPENSATE             => IGNORE,
-	TXN_DIRTY_READ             => IGNORE,
-	TXN_LOCKTIMEOUT            => IGNORE,
-	TXN_MALLOC                 => IGNORE,
-	TXN_NOSYNC                 => IGNORE,
-	TXN_NOWAIT                 => IGNORE,
-	TXN_SYNC                   => IGNORE,
-
-	# enum db_recops
-	DB_TXN_APPLY               => '4.0.14',
-
-	# enum db_lockop_t
-	DB_LOCK_GET_TIMEOUT        => '4.0.14',
-	DB_LOCK_PUT_READ           => '4.0.14',
-	DB_LOCK_TIMEOUT            => '4.0.14',
-
-	# enum db_status_t
-	DB_LSTAT_EXPIRED           => IGNORE, # 4.0.14
-
-	#########
-	# 4.1.24
-	#########
-
-	DBC_OWN_LID                => IGNORE,
-	DB_AM_CHKSUM               => IGNORE,
-	DB_AM_CL_WRITER            => IGNORE,
-	DB_AM_COMPENSATE           => IGNORE,
-	DB_AM_CREATED              => IGNORE,
-	DB_AM_CREATED_MSTR         => IGNORE,
-	DB_AM_DBM_ERROR            => IGNORE,
-	DB_AM_DELIMITER            => IGNORE,
-	DB_AM_ENCRYPT              => IGNORE,
-	DB_AM_FIXEDLEN             => IGNORE,
-	DB_AM_IN_RENAME            => IGNORE,
-	DB_AM_OPEN_CALLED          => IGNORE,
-	DB_AM_PAD                  => IGNORE,
-	DB_AM_RECNUM               => IGNORE,
-	DB_AM_RENUMBER             => IGNORE,
-	DB_AM_REVSPLITOFF          => IGNORE,
-	DB_AM_SNAPSHOT             => IGNORE,
-	DB_AUTO_COMMIT             => DEFINE,
-	DB_CHKSUM_SHA1             => DEFINE,
-	DB_DIRECT                  => DEFINE,
-	DB_DIRECT_DB               => DEFINE,
-	DB_DIRECT_LOG              => DEFINE,
-	DB_ENCRYPT                 => DEFINE,
-	DB_ENCRYPT_AES             => DEFINE,
-	DB_ENV_AUTO_COMMIT         => DEFINE,
-	DB_ENV_DIRECT_DB           => DEFINE,
-	DB_ENV_DIRECT_LOG          => DEFINE,
-	DB_ENV_FATAL               => DEFINE,
-	DB_ENV_OVERWRITE           => DEFINE,
-	DB_ENV_TXN_WRITE_NOSYNC    => DEFINE,
-	DB_HANDLE_LOCK             => DEFINE,
-	DB_LOCK_NOTEXIST           => DEFINE,
-	DB_LOCK_REMOVE             => DEFINE,
-	DB_NOCOPY                  => DEFINE,
-	DB_OVERWRITE               => DEFINE,
-	DB_PERMANENT               => DEFINE,
-	DB_PRINTABLE               => DEFINE,
-	DB_RENAMEMAGIC             => DEFINE,
-	DB_TEST_ELECTINIT          => DEFINE,
-	DB_TEST_ELECTSEND          => DEFINE,
-	DB_TEST_ELECTVOTE1         => DEFINE,
-	DB_TEST_ELECTVOTE2         => DEFINE,
-	DB_TEST_ELECTWAIT1         => DEFINE,
-	DB_TEST_ELECTWAIT2         => DEFINE,
-	DB_TEST_SUBDB_LOCKS        => DEFINE,
-	DB_TXN_LOCK                => DEFINE,
-	DB_TXN_WRITE_NOSYNC        => DEFINE,
-	DB_WRITEOPEN               => DEFINE,
-	DB_WRNOSYNC                => DEFINE,
-	_DB_EXT_PROT_IN_           => IGNORE,
-
-	# enum db_lockop_t
-	DB_LOCK_TRADE              => '4.1.24',
-
-	# enum db_status_t
-	DB_LSTAT_NOTEXIST          => IGNORE, # 4.1.24
-
-	# enum DB_CACHE_PRIORITY
-	DB_PRIORITY_VERY_LOW       => '4.1.24',
-	DB_PRIORITY_LOW            => '4.1.24',
-	DB_PRIORITY_DEFAULT        => '4.1.24',
-	DB_PRIORITY_HIGH           => '4.1.24',
-	DB_PRIORITY_VERY_HIGH      => '4.1.24',
-
-	# enum db_recops
-	DB_TXN_BACKWARD_ALLOC      => '4.1.24',
-	DB_TXN_PRINT               => '4.1.24',
-
-	#########
-	# 4.2.50
-	#########
-
-	DB_AM_NOT_DURABLE          => IGNORE,
-	DB_AM_REPLICATION          => IGNORE,
-	DB_ARCH_REMOVE             => DEFINE,
-	DB_CHKSUM                  => DEFINE,
-	DB_ENV_LOG_AUTOREMOVE      => DEFINE,
-	DB_ENV_TIME_NOTGRANTED     => DEFINE,
-	DB_ENV_TXN_NOT_DURABLE     => DEFINE,
-	DB_FILEOPEN                => DEFINE,
-	DB_INIT_REP                => DEFINE,
-	DB_LOG_AUTOREMOVE          => DEFINE,
-	DB_LOG_CHKPNT              => DEFINE,
-	DB_LOG_COMMIT              => DEFINE,
-	DB_LOG_NOCOPY              => DEFINE,
-	DB_LOG_NOT_DURABLE         => DEFINE,
-	DB_LOG_PERM                => DEFINE,
-	DB_LOG_WRNOSYNC            => DEFINE,
-	DB_MPOOL_NOFILE            => DEFINE,
-	DB_MPOOL_UNLINK            => DEFINE,
-	DB_NO_AUTO_COMMIT          => DEFINE,
-	DB_REP_CREATE              => DEFINE,
-	DB_REP_HANDLE_DEAD         => DEFINE,
-	DB_REP_ISPERM              => DEFINE,
-	DB_REP_NOBUFFER            => DEFINE,
-	DB_REP_NOTPERM             => DEFINE,
-	DB_RPCCLIENT               => DEFINE,
-	DB_TIME_NOTGRANTED         => DEFINE,
-	DB_TXN_NOT_DURABLE         => DEFINE,
-	DB_debug_FLAG              => DEFINE,
-	DB_user_BEGIN              => DEFINE,
-	MP_FILEID_SET              => IGNORE,
-	TXN_RESTORED               => IGNORE,
-
-	#########
-	# 4.3.12
-	#########
-
-	DBC_DEGREE_2               => IGNORE,
-	DB_AM_INORDER              => IGNORE,
-	DB_BUFFER_SMALL            => DEFINE,
-	DB_DEGREE_2                => DEFINE,
-	DB_DSYNC_LOG               => DEFINE,
-	DB_DURABLE_UNKNOWN         => DEFINE,
-	DB_ENV_DSYNC_LOG           => DEFINE,
-	DB_ENV_LOG_INMEMORY        => DEFINE,
-	DB_INORDER                 => DEFINE,
-	DB_LOCK_ABORT              => DEFINE,
-	DB_LOCK_MAXWRITE           => DEFINE,
-	DB_LOG_BUFFER_FULL         => DEFINE,
-	DB_LOG_INMEMORY            => DEFINE,
-	DB_LOG_RESEND              => DEFINE,
-	DB_MPOOL_FREE              => DEFINE,
-	DB_REP_EGENCHG             => DEFINE,
-	DB_REP_LOGREADY            => DEFINE,
-	DB_REP_PAGEDONE            => DEFINE,
-	DB_REP_STARTUPDONE         => DEFINE,
-	DB_SEQUENCE_VERSION        => DEFINE,
-	DB_SEQ_DEC                 => DEFINE,
-	DB_SEQ_INC                 => DEFINE,
-	DB_SEQ_RANGE_SET           => DEFINE,
-	DB_SEQ_WRAP                => DEFINE,
-	DB_STAT_ALL                => DEFINE,
-	DB_STAT_LOCK_CONF          => DEFINE,
-	DB_STAT_LOCK_LOCKERS       => DEFINE,
-	DB_STAT_LOCK_OBJECTS       => DEFINE,
-	DB_STAT_LOCK_PARAMS        => DEFINE,
-	DB_STAT_MEMP_HASH          => DEFINE,
-	DB_STAT_SUBSYSTEM          => DEFINE,
-	DB_UNREF                   => DEFINE,
-	DB_VERSION_MISMATCH        => DEFINE,
-	TXN_DEADLOCK               => IGNORE,
-	TXN_DEGREE_2               => IGNORE,
-
-	) ;
-
-sub enum_Macro
-{
-    my $str = shift ;
-    my ($major, $minor, $patch) = split /\./, $str ;
-
-    my $macro = 
-    "#if (DB_VERSION_MAJOR > $major) || \\\n" .
-    "    (DB_VERSION_MAJOR == $major && DB_VERSION_MINOR > $minor) || \\\n" .
-    "    (DB_VERSION_MAJOR == $major && DB_VERSION_MINOR == $minor && \\\n" .
-    "     DB_VERSION_PATCH >= $patch)\n" ;
-
-    return $macro;
-
-}
-
-sub OutputXS
-{
-
-    my @names = () ;
-
-    foreach my $key (sort keys %constants)
-    {
-        my $val = $constants{$key} ;
-        next if $val eq IGNORE;
-
-        if ($val eq STRING)
-          { push @names, { name => $key, type => "PV" } }
-        elsif ($val eq DEFINE)
-          { push @names, $key }
-        else
-          { push @names, { name => $key, macro => [enum_Macro($val), "#endif\n"] } }
-    }
-
-    warn "Updating constants.xs & constants.h...\n";
-    WriteConstants(
-              NAME    => BerkeleyDB,
-              NAMES   => \@names,
-              C_FILE  => 'constants.h',
-              XS_FILE => 'constants.xs',
-          ) ;
-}
-
-sub OutputPM
-{
-    my $filename = 'BerkeleyDB.pm';
-    warn "Updating $filename...\n";
-    open IN, "<$filename" || die "Cannot open $filename: $!\n";
-    open OUT, ">$filename.tmp" || die "Cannot open $filename.tmp: $!\n";
-
-    my $START = '@EXPORT = qw(' ;
-    my $START_re = quotemeta $START ;
-    my $END = ');';
-    my $END_re = quotemeta $END ;
-
-    # skip to the @EXPORT declaration
-    OUTER: while ()
-    {
-        if ( /^\s*$START_re/ )
-        {
-            # skip to the end marker.
-            while () 
-                { last OUTER if /^\s*$END_re/ }
-        }
-        print OUT ;
-    }
-    
-    print OUT "$START\n";
-    foreach my $key (sort keys %constants)
-    {
-        next if $constants{$key} eq IGNORE;
-	print OUT "\t$key\n";
-    }
-    print OUT "\t$END\n";
-    
-    while ()
-    {
-        print OUT ;
-    }
-
-    close IN;
-    close OUT;
-
-    rename $filename, "$filename.bak" || die "Cannot rename $filename: $!\n" ;
-    rename "$filename.tmp", $filename || die "Cannot rename $filename.tmp: $!\n" ;
-}
-
-OutputXS() ;
-OutputPM() ;
diff --git a/storage/bdb/perl/BerkeleyDB/mkpod b/storage/bdb/perl/BerkeleyDB/mkpod
deleted file mode 100755
index 44bbf3fbf4f..00000000000
--- a/storage/bdb/perl/BerkeleyDB/mkpod
+++ /dev/null
@@ -1,146 +0,0 @@
-#!/usr/local/bin/perl5
-
-# Filename: mkpod
-#
-# Author:	Paul Marquess
-
-# File types
-#
-#    Macro files end with .M
-#    Tagged source files end with .T
-#    Output from the code ends with .O
-#    Pre-Pod file ends with .P
-#    
-# Tags
-#
-#    ## BEGIN tagname
-#     ...
-#    ## END tagname
-#
-#    ## 0
-#    ## 1
-#
-
-# Constants
-
-$TOKEN = '##' ;
-$Verbose = 1 if $ARGV[0] =~ /^-v/i ;
-
-# Macros files first
-foreach $file (glob("*.M"))
-{
-    open (F, "<$file") or die "Cannot open '$file':$!\n" ;
-    print "    Processing Macro file $file\n"  ;
-    while ()
-    {
-        # Skip blank & comment lines
-        next if /^\s*$/ || /^\s*#/ ;
-	
-	# 
-	($name, $expand) = split (/\t+/, $_, 2) ;
-
-	$expand =~ s/^\s*// ;
-        $expand =~ s/\s*$// ;
-
-	if ($expand =~ /\[#/ )
-	{
-	}
-
-	$Macros{$name} = $expand ;
-    }
-    close F ;
-}
-
-# Suck up all the code files
-foreach $file (glob("t/*.T"))
-{
-    ($newfile = $file) =~ s/\.T$// ;
-    open (F, "<$file") or die "Cannot open '$file':$!\n" ;
-    open (N, ">$newfile") or die "Cannot open '$newfile':$!\n" ;
-
-    print "    Processing $file -> $newfile\n"  ;
-
-    while ($line = )
-    {
-        if ($line =~ /^$TOKEN\s*BEGIN\s+(\w+)\s*$/ or
-            $line =~ m[\s*/\*$TOKEN\s*BEGIN\s+(\w+)\s*$] )
-        {
-	    print "    Section $1 begins\n" if $Verbose ;
-	    $InSection{$1} ++ ;
-	    $Section{$1} = '' unless $Section{$1} ;
-        }
-        elsif ($line =~ /^$TOKEN\s*END\s+(\w+)\s*$/ or
-               $line =~ m[^\s*/\*$TOKEN\s*END\s+(\w+)\s*$] )
-        {
-	    warn "Encountered END without a begin [$line]\n"
-		unless $InSection{$1} ;
-
-	    delete $InSection{$1}  ;
-	    print "    Section $1 ends\n" if $Verbose ;
-        }
-        else
-        {
-	    print N $line ;
-	    chop $line ;
-	    $line =~ s/\s*$// ;
-
-	    # Save the current line in each of the sections
-	    foreach( keys %InSection)
-	    {
-		if ($line !~ /^\s*$/ )
-	          #{ $Section{$_} .= "    $line" }
-	          { $Section{$_} .= $line }
-	        $Section{$_} .= "\n" ;
-	    }
-        }
-
-    }
-
-    if (%InSection)
-    {
-        # Check for unclosed sections
-	print "The following Sections are not terminated\n" ;
-        foreach (sort keys %InSection)
-          { print "\t$_\n" }
-	exit 1 ;
-    }
-
-    close F ;
-    close N ;
-}
-
-print "\n\nCreating pod file(s)\n\n" if $Verbose ;
-
-@ppods = glob('*.P') ;
-#$ppod = $ARGV[0] ;
-#$pod = $ARGV[1] ;
-
-# Now process the pre-pod file
-foreach $ppod (@ppods)
-{
-    ($pod = $ppod) =~ s/\.P$// ;
-    open (PPOD, "<$ppod") or die "Cannot open file '$ppod': $!\n" ;
-    open (POD, ">$pod") or die "Cannot open file '$pod': $!\n" ;
-
-    print "    $ppod -> $pod\n" ;
-
-    while ($line = )
-    {
-        if ( $line =~ /^\s*$TOKEN\s*(\w+)\s*$/)
-        {
-            warn "No code insert '$1' available\n"
-	        unless $Section{$1} ;
-    
-	    print "Expanding section $1\n" if $Verbose ;
-	    print POD $Section{$1} ;
-        }
-        else
-        {
-#	    $line =~ s/\[#([^\]])]/$Macros{$1}/ge ;
-	    print POD $line ;
-        }
-    }
-    
-    close PPOD ;
-    close POD ;
-}
diff --git a/storage/bdb/perl/BerkeleyDB/patches/5.004 b/storage/bdb/perl/BerkeleyDB/patches/5.004
deleted file mode 100644
index 0665d1f6c40..00000000000
--- a/storage/bdb/perl/BerkeleyDB/patches/5.004
+++ /dev/null
@@ -1,93 +0,0 @@
-diff -rc perl5.004.orig/Configure perl5.004/Configure
-*** perl5.004.orig/Configure	1997-05-13 18:20:34.000000000 +0100
---- perl5.004/Configure	2003-04-26 16:36:53.000000000 +0100
-***************
-*** 188,193 ****
---- 188,194 ----
-  mv=''
-  nroff=''
-  perl=''
-+ perllibs=''
-  pg=''
-  pmake=''
-  pr=''
-***************
-*** 9902,9907 ****
---- 9903,9916 ----
-  shift
-  extensions="$*"
-  
-+ : Remove libraries needed only for extensions
-+ : The appropriate ext/Foo/Makefile.PL will add them back in, if
-+ : necessary.
-+ set X `echo " $libs " | 
-+   sed -e 's@ -lndbm @ @' -e 's@ -lgdbm @ @' -e 's@ -ldbm @ @' -e 's@ -ldb @ @'` 
-+ shift
-+ perllibs="$*"
-+ 
-  : Remove build directory name from cppstdin so it can be used from
-  : either the present location or the final installed location.
-  echo " "
-***************
-*** 10370,10375 ****
---- 10379,10385 ----
-  patchlevel='$patchlevel'
-  path_sep='$path_sep'
-  perl='$perl'
-+ perllibs='$perllibs'
-  perladmin='$perladmin'
-  perlpath='$perlpath'
-  pg='$pg'
-diff -rc perl5.004.orig/Makefile.SH perl5.004/Makefile.SH
-*** perl5.004.orig/Makefile.SH	1997-05-01 15:22:39.000000000 +0100
---- perl5.004/Makefile.SH	2003-04-26 16:37:23.000000000 +0100
-***************
-*** 119,125 ****
-  ext = \$(dynamic_ext) \$(static_ext)
-  DYNALOADER = lib/auto/DynaLoader/DynaLoader\$(LIB_EXT)
-  
-! libs = $libs $cryptlib
-  
-  public = perl $suidperl utilities translators
-  
---- 119,125 ----
-  ext = \$(dynamic_ext) \$(static_ext)
-  DYNALOADER = lib/auto/DynaLoader/DynaLoader\$(LIB_EXT)
-  
-! libs = $perllibs $cryptlib
-  
-  public = perl $suidperl utilities translators
-  
-diff -rc perl5.004.orig/myconfig perl5.004/myconfig
-*** perl5.004.orig/myconfig	1996-12-21 01:13:20.000000000 +0000
---- perl5.004/myconfig	2003-04-26 16:37:51.000000000 +0100
-***************
-*** 35,41 ****
-    Linker and Libraries:
-      ld='$ld', ldflags ='$ldflags'
-      libpth=$libpth
-!     libs=$libs
-      libc=$libc, so=$so
-      useshrplib=$useshrplib, libperl=$libperl
-    Dynamic Linking:
---- 35,41 ----
-    Linker and Libraries:
-      ld='$ld', ldflags ='$ldflags'
-      libpth=$libpth
-!     libs=$perllibs
-      libc=$libc, so=$so
-      useshrplib=$useshrplib, libperl=$libperl
-    Dynamic Linking:
-diff -rc perl5.004.orig/patchlevel.h perl5.004/patchlevel.h
-*** perl5.004.orig/patchlevel.h	1997-05-15 23:15:17.000000000 +0100
---- perl5.004/patchlevel.h	2003-04-26 16:38:11.000000000 +0100
-***************
-*** 38,43 ****
---- 38,44 ----
-   */
-  static	char	*local_patches[] = {
-  	NULL
-+ 	,"NODB-1.0 - remove -ldb from core perl binary."
-  	,NULL
-  };
-  
diff --git a/storage/bdb/perl/BerkeleyDB/patches/5.004_01 b/storage/bdb/perl/BerkeleyDB/patches/5.004_01
deleted file mode 100644
index 1b05eb4e02b..00000000000
--- a/storage/bdb/perl/BerkeleyDB/patches/5.004_01
+++ /dev/null
@@ -1,217 +0,0 @@
-diff -rc perl5.004_01.orig/Configure perl5.004_01/Configure
-*** perl5.004_01.orig/Configure	Wed Jun 11 00:28:03 1997
---- perl5.004_01/Configure	Sun Nov 12 22:12:35 2000
-***************
-*** 188,193 ****
---- 188,194 ----
-  mv=''
-  nroff=''
-  perl=''
-+ perllibs=''
-  pg=''
-  pmake=''
-  pr=''
-***************
-*** 9907,9912 ****
---- 9908,9921 ----
-  shift
-  extensions="$*"
-  
-+ : Remove libraries needed only for extensions
-+ : The appropriate ext/Foo/Makefile.PL will add them back in, if
-+ : necessary.
-+ set X `echo " $libs " | 
-+   sed -e 's@ -lndbm @ @' -e 's@ -lgdbm @ @' -e 's@ -ldbm @ @' -e 's@ -ldb @ @'` 
-+ shift
-+ perllibs="$*"
-+ 
-  : Remove build directory name from cppstdin so it can be used from
-  : either the present location or the final installed location.
-  echo " "
-***************
-*** 10375,10380 ****
---- 10384,10390 ----
-  patchlevel='$patchlevel'
-  path_sep='$path_sep'
-  perl='$perl'
-+ perllibs='$perllibs'
-  perladmin='$perladmin'
-  perlpath='$perlpath'
-  pg='$pg'
-diff -rc perl5.004_01.orig/Makefile.SH perl5.004_01/Makefile.SH
-*** perl5.004_01.orig/Makefile.SH	Thu Jun 12 23:27:56 1997
---- perl5.004_01/Makefile.SH	Sun Nov 12 22:12:35 2000
-***************
-*** 126,132 ****
-  ext = \$(dynamic_ext) \$(static_ext)
-  DYNALOADER = lib/auto/DynaLoader/DynaLoader\$(LIB_EXT)
-  
-! libs = $libs $cryptlib
-  
-  public = perl $suidperl utilities translators
-  
---- 126,132 ----
-  ext = \$(dynamic_ext) \$(static_ext)
-  DYNALOADER = lib/auto/DynaLoader/DynaLoader\$(LIB_EXT)
-  
-! libs = $perllibs $cryptlib
-  
-  public = perl $suidperl utilities translators
-  
-diff -rc perl5.004_01.orig/lib/ExtUtils/Embed.pm perl5.004_01/lib/ExtUtils/Embed.pm
-*** perl5.004_01.orig/lib/ExtUtils/Embed.pm	Wed Apr  2 22:12:04 1997
---- perl5.004_01/lib/ExtUtils/Embed.pm	Sun Nov 12 22:12:35 2000
-***************
-*** 170,176 ****
-      @path = $path ? split(/:/, $path) : @INC;
-  
-      push(@potential_libs, @link_args)    if scalar @link_args;
-!     push(@potential_libs, $Config{libs}) if defined $std;
-  
-      push(@mods, static_ext()) if $std;
-  
---- 170,176 ----
-      @path = $path ? split(/:/, $path) : @INC;
-  
-      push(@potential_libs, @link_args)    if scalar @link_args;
-!     push(@potential_libs, $Config{perllibs}) if defined $std;
-  
-      push(@mods, static_ext()) if $std;
-  
-diff -rc perl5.004_01.orig/lib/ExtUtils/Liblist.pm perl5.004_01/lib/ExtUtils/Liblist.pm
-*** perl5.004_01.orig/lib/ExtUtils/Liblist.pm	Sat Jun  7 01:19:44 1997
---- perl5.004_01/lib/ExtUtils/Liblist.pm	Sun Nov 12 22:13:27 2000
-***************
-*** 16,33 ****
-  
-  sub _unix_os2_ext {
-      my($self,$potential_libs, $Verbose) = @_;
-!     if ($^O =~ 'os2' and $Config{libs}) { 
-  	# Dynamic libraries are not transitive, so we may need including
-  	# the libraries linked against perl.dll again.
-  
-  	$potential_libs .= " " if $potential_libs;
-! 	$potential_libs .= $Config{libs};
-      }
-      return ("", "", "", "") unless $potential_libs;
-      print STDOUT "Potential libraries are '$potential_libs':\n" if $Verbose;
-  
-      my($so)   = $Config{'so'};
-!     my($libs) = $Config{'libs'};
-      my $Config_libext = $Config{lib_ext} || ".a";
-  
-  
---- 16,33 ----
-  
-  sub _unix_os2_ext {
-      my($self,$potential_libs, $Verbose) = @_;
-!     if ($^O =~ 'os2' and $Config{perllibs}) { 
-  	# Dynamic libraries are not transitive, so we may need including
-  	# the libraries linked against perl.dll again.
-  
-  	$potential_libs .= " " if $potential_libs;
-! 	$potential_libs .= $Config{perllibs};
-      }
-      return ("", "", "", "") unless $potential_libs;
-      print STDOUT "Potential libraries are '$potential_libs':\n" if $Verbose;
-  
-      my($so)   = $Config{'so'};
-!     my($libs) = $Config{'perllibs'};
-      my $Config_libext = $Config{lib_ext} || ".a";
-  
-  
-***************
-*** 186,196 ****
-      my($self, $potential_libs, $Verbose) = @_;
-  
-      # If user did not supply a list, we punt.
-!     # (caller should probably use the list in $Config{libs})
-      return ("", "", "", "") unless $potential_libs;
-  
-      my($so)   = $Config{'so'};
-!     my($libs) = $Config{'libs'};
-      my($libpth) = $Config{'libpth'};
-      my($libext) = $Config{'lib_ext'} || ".lib";
-  
---- 186,196 ----
-      my($self, $potential_libs, $Verbose) = @_;
-  
-      # If user did not supply a list, we punt.
-!     # (caller should probably use the list in $Config{perllibs})
-      return ("", "", "", "") unless $potential_libs;
-  
-      my($so)   = $Config{'so'};
-!     my($libs) = $Config{'perllibs'};
-      my($libpth) = $Config{'libpth'};
-      my($libext) = $Config{'lib_ext'} || ".lib";
-  
-***************
-*** 540,546 ****
-  =item *
-  
-  If C<$potential_libs> is empty, the return value will be empty.
-! Otherwise, the libraries specified by C<$Config{libs}> (see Config.pm)
-  will be appended to the list of C<$potential_libs>.  The libraries
-  will be searched for in the directories specified in C<$potential_libs>
-  as well as in C<$Config{libpth}>. For each library that is found,  a
---- 540,546 ----
-  =item *
-  
-  If C<$potential_libs> is empty, the return value will be empty.
-! Otherwise, the libraries specified by C<$Config{perllibs}> (see Config.pm)
-  will be appended to the list of C<$potential_libs>.  The libraries
-  will be searched for in the directories specified in C<$potential_libs>
-  as well as in C<$Config{libpth}>. For each library that is found,  a
-diff -rc perl5.004_01.orig/lib/ExtUtils/MM_Unix.pm perl5.004_01/lib/ExtUtils/MM_Unix.pm
-*** perl5.004_01.orig/lib/ExtUtils/MM_Unix.pm	Thu Jun 12 22:06:18 1997
---- perl5.004_01/lib/ExtUtils/MM_Unix.pm	Sun Nov 12 22:12:35 2000
-***************
-*** 2137,2143 ****
-  MAP_STATIC    = ",
-  join(" \\\n\t", reverse sort keys %static), "
-  
-! MAP_PRELIBS   = $Config::Config{libs} $Config::Config{cryptlib}
-  ";
-  
-      if (defined $libperl) {
---- 2137,2143 ----
-  MAP_STATIC    = ",
-  join(" \\\n\t", reverse sort keys %static), "
-  
-! MAP_PRELIBS   = $Config::Config{perllibs} $Config::Config{cryptlib}
-  ";
-  
-      if (defined $libperl) {
-diff -rc perl5.004_01.orig/myconfig perl5.004_01/myconfig
-*** perl5.004_01.orig/myconfig	Sat Dec 21 01:13:20 1996
---- perl5.004_01/myconfig	Sun Nov 12 22:12:35 2000
-***************
-*** 35,41 ****
-    Linker and Libraries:
-      ld='$ld', ldflags ='$ldflags'
-      libpth=$libpth
-!     libs=$libs
-      libc=$libc, so=$so
-      useshrplib=$useshrplib, libperl=$libperl
-    Dynamic Linking:
---- 35,41 ----
-    Linker and Libraries:
-      ld='$ld', ldflags ='$ldflags'
-      libpth=$libpth
-!     libs=$perllibs
-      libc=$libc, so=$so
-      useshrplib=$useshrplib, libperl=$libperl
-    Dynamic Linking:
-diff -rc perl5.004_01.orig/patchlevel.h perl5.004_01/patchlevel.h
-*** perl5.004_01.orig/patchlevel.h	Wed Jun 11 03:06:10 1997
---- perl5.004_01/patchlevel.h	Sun Nov 12 22:12:35 2000
-***************
-*** 38,43 ****
---- 38,44 ----
-   */
-  static	char	*local_patches[] = {
-  	NULL
-+ 	,"NODB-1.0 - remove -ldb from core perl binary."
-  	,NULL
-  };
-  
diff --git a/storage/bdb/perl/BerkeleyDB/patches/5.004_02 b/storage/bdb/perl/BerkeleyDB/patches/5.004_02
deleted file mode 100644
index 238f8737941..00000000000
--- a/storage/bdb/perl/BerkeleyDB/patches/5.004_02
+++ /dev/null
@@ -1,217 +0,0 @@
-diff -rc perl5.004_02.orig/Configure perl5.004_02/Configure
-*** perl5.004_02.orig/Configure	Thu Aug  7 15:08:44 1997
---- perl5.004_02/Configure	Sun Nov 12 22:06:24 2000
-***************
-*** 188,193 ****
---- 188,194 ----
-  mv=''
-  nroff=''
-  perl=''
-+ perllibs=''
-  pg=''
-  pmake=''
-  pr=''
-***************
-*** 9911,9916 ****
---- 9912,9925 ----
-  shift
-  extensions="$*"
-  
-+ : Remove libraries needed only for extensions
-+ : The appropriate ext/Foo/Makefile.PL will add them back in, if
-+ : necessary.
-+ set X `echo " $libs " | 
-+   sed -e 's@ -lndbm @ @' -e 's@ -lgdbm @ @' -e 's@ -ldbm @ @' -e 's@ -ldb @ @'` 
-+ shift
-+ perllibs="$*"
-+ 
-  : Remove build directory name from cppstdin so it can be used from
-  : either the present location or the final installed location.
-  echo " "
-***************
-*** 10379,10384 ****
---- 10388,10394 ----
-  patchlevel='$patchlevel'
-  path_sep='$path_sep'
-  perl='$perl'
-+ perllibs='$perllibs'
-  perladmin='$perladmin'
-  perlpath='$perlpath'
-  pg='$pg'
-diff -rc perl5.004_02.orig/Makefile.SH perl5.004_02/Makefile.SH
-*** perl5.004_02.orig/Makefile.SH	Thu Aug  7 13:10:53 1997
---- perl5.004_02/Makefile.SH	Sun Nov 12 22:06:24 2000
-***************
-*** 126,132 ****
-  ext = \$(dynamic_ext) \$(static_ext)
-  DYNALOADER = lib/auto/DynaLoader/DynaLoader\$(LIB_EXT)
-  
-! libs = $libs $cryptlib
-  
-  public = perl $suidperl utilities translators
-  
---- 126,132 ----
-  ext = \$(dynamic_ext) \$(static_ext)
-  DYNALOADER = lib/auto/DynaLoader/DynaLoader\$(LIB_EXT)
-  
-! libs = $perllibs $cryptlib
-  
-  public = perl $suidperl utilities translators
-  
-diff -rc perl5.004_02.orig/lib/ExtUtils/Embed.pm perl5.004_02/lib/ExtUtils/Embed.pm
-*** perl5.004_02.orig/lib/ExtUtils/Embed.pm	Fri Aug  1 15:08:44 1997
---- perl5.004_02/lib/ExtUtils/Embed.pm	Sun Nov 12 22:06:24 2000
-***************
-*** 178,184 ****
-      @path = $path ? split(/:/, $path) : @INC;
-  
-      push(@potential_libs, @link_args)    if scalar @link_args;
-!     push(@potential_libs, $Config{libs}) if defined $std;
-  
-      push(@mods, static_ext()) if $std;
-  
---- 178,184 ----
-      @path = $path ? split(/:/, $path) : @INC;
-  
-      push(@potential_libs, @link_args)    if scalar @link_args;
-!     push(@potential_libs, $Config{perllibs}) if defined $std;
-  
-      push(@mods, static_ext()) if $std;
-  
-diff -rc perl5.004_02.orig/lib/ExtUtils/Liblist.pm perl5.004_02/lib/ExtUtils/Liblist.pm
-*** perl5.004_02.orig/lib/ExtUtils/Liblist.pm	Fri Aug  1 19:36:58 1997
---- perl5.004_02/lib/ExtUtils/Liblist.pm	Sun Nov 12 22:06:24 2000
-***************
-*** 16,33 ****
-  
-  sub _unix_os2_ext {
-      my($self,$potential_libs, $verbose) = @_;
-!     if ($^O =~ 'os2' and $Config{libs}) { 
-  	# Dynamic libraries are not transitive, so we may need including
-  	# the libraries linked against perl.dll again.
-  
-  	$potential_libs .= " " if $potential_libs;
-! 	$potential_libs .= $Config{libs};
-      }
-      return ("", "", "", "") unless $potential_libs;
-      print STDOUT "Potential libraries are '$potential_libs':\n" if $verbose;
-  
-      my($so)   = $Config{'so'};
-!     my($libs) = $Config{'libs'};
-      my $Config_libext = $Config{lib_ext} || ".a";
-  
-  
---- 16,33 ----
-  
-  sub _unix_os2_ext {
-      my($self,$potential_libs, $verbose) = @_;
-!     if ($^O =~ 'os2' and $Config{perllibs}) { 
-  	# Dynamic libraries are not transitive, so we may need including
-  	# the libraries linked against perl.dll again.
-  
-  	$potential_libs .= " " if $potential_libs;
-! 	$potential_libs .= $Config{perllibs};
-      }
-      return ("", "", "", "") unless $potential_libs;
-      print STDOUT "Potential libraries are '$potential_libs':\n" if $verbose;
-  
-      my($so)   = $Config{'so'};
-!     my($libs) = $Config{'perllibs'};
-      my $Config_libext = $Config{lib_ext} || ".a";
-  
-  
-***************
-*** 186,196 ****
-      my($self, $potential_libs, $verbose) = @_;
-  
-      # If user did not supply a list, we punt.
-!     # (caller should probably use the list in $Config{libs})
-      return ("", "", "", "") unless $potential_libs;
-  
-      my($so)   = $Config{'so'};
-!     my($libs) = $Config{'libs'};
-      my($libpth) = $Config{'libpth'};
-      my($libext) = $Config{'lib_ext'} || ".lib";
-  
---- 186,196 ----
-      my($self, $potential_libs, $verbose) = @_;
-  
-      # If user did not supply a list, we punt.
-!     # (caller should probably use the list in $Config{perllibs})
-      return ("", "", "", "") unless $potential_libs;
-  
-      my($so)   = $Config{'so'};
-!     my($libs) = $Config{'perllibs'};
-      my($libpth) = $Config{'libpth'};
-      my($libext) = $Config{'lib_ext'} || ".lib";
-  
-***************
-*** 540,546 ****
-  =item *
-  
-  If C<$potential_libs> is empty, the return value will be empty.
-! Otherwise, the libraries specified by C<$Config{libs}> (see Config.pm)
-  will be appended to the list of C<$potential_libs>.  The libraries
-  will be searched for in the directories specified in C<$potential_libs>
-  as well as in C<$Config{libpth}>. For each library that is found,  a
---- 540,546 ----
-  =item *
-  
-  If C<$potential_libs> is empty, the return value will be empty.
-! Otherwise, the libraries specified by C<$Config{perllibs}> (see Config.pm)
-  will be appended to the list of C<$potential_libs>.  The libraries
-  will be searched for in the directories specified in C<$potential_libs>
-  as well as in C<$Config{libpth}>. For each library that is found,  a
-diff -rc perl5.004_02.orig/lib/ExtUtils/MM_Unix.pm perl5.004_02/lib/ExtUtils/MM_Unix.pm
-*** perl5.004_02.orig/lib/ExtUtils/MM_Unix.pm	Tue Aug  5 14:28:08 1997
---- perl5.004_02/lib/ExtUtils/MM_Unix.pm	Sun Nov 12 22:06:25 2000
-***************
-*** 2224,2230 ****
-  MAP_STATIC    = ",
-  join(" \\\n\t", reverse sort keys %static), "
-  
-! MAP_PRELIBS   = $Config::Config{libs} $Config::Config{cryptlib}
-  ";
-  
-      if (defined $libperl) {
---- 2224,2230 ----
-  MAP_STATIC    = ",
-  join(" \\\n\t", reverse sort keys %static), "
-  
-! MAP_PRELIBS   = $Config::Config{perllibs} $Config::Config{cryptlib}
-  ";
-  
-      if (defined $libperl) {
-diff -rc perl5.004_02.orig/myconfig perl5.004_02/myconfig
-*** perl5.004_02.orig/myconfig	Sat Dec 21 01:13:20 1996
---- perl5.004_02/myconfig	Sun Nov 12 22:06:25 2000
-***************
-*** 35,41 ****
-    Linker and Libraries:
-      ld='$ld', ldflags ='$ldflags'
-      libpth=$libpth
-!     libs=$libs
-      libc=$libc, so=$so
-      useshrplib=$useshrplib, libperl=$libperl
-    Dynamic Linking:
---- 35,41 ----
-    Linker and Libraries:
-      ld='$ld', ldflags ='$ldflags'
-      libpth=$libpth
-!     libs=$perllibs
-      libc=$libc, so=$so
-      useshrplib=$useshrplib, libperl=$libperl
-    Dynamic Linking:
-diff -rc perl5.004_02.orig/patchlevel.h perl5.004_02/patchlevel.h
-*** perl5.004_02.orig/patchlevel.h	Fri Aug  1 15:07:34 1997
---- perl5.004_02/patchlevel.h	Sun Nov 12 22:06:25 2000
-***************
-*** 38,43 ****
---- 38,44 ----
-   */
-  static	char	*local_patches[] = {
-  	NULL
-+ 	,"NODB-1.0 - remove -ldb from core perl binary."
-  	,NULL
-  };
-  
diff --git a/storage/bdb/perl/BerkeleyDB/patches/5.004_03 b/storage/bdb/perl/BerkeleyDB/patches/5.004_03
deleted file mode 100644
index 06331eac922..00000000000
--- a/storage/bdb/perl/BerkeleyDB/patches/5.004_03
+++ /dev/null
@@ -1,223 +0,0 @@
-diff -rc perl5.004_03.orig/Configure perl5.004_03/Configure
-*** perl5.004_03.orig/Configure	Wed Aug 13 16:09:46 1997
---- perl5.004_03/Configure	Sun Nov 12 21:56:18 2000
-***************
-*** 188,193 ****
---- 188,194 ----
-  mv=''
-  nroff=''
-  perl=''
-+ perllibs=''
-  pg=''
-  pmake=''
-  pr=''
-***************
-*** 9911,9916 ****
---- 9912,9925 ----
-  shift
-  extensions="$*"
-  
-+ : Remove libraries needed only for extensions
-+ : The appropriate ext/Foo/Makefile.PL will add them back in, if
-+ : necessary.
-+ set X `echo " $libs " | 
-+   sed -e 's@ -lndbm @ @' -e 's@ -lgdbm @ @' -e 's@ -ldbm @ @' -e 's@ -ldb @ @'` 
-+ shift
-+ perllibs="$*"
-+ 
-  : Remove build directory name from cppstdin so it can be used from
-  : either the present location or the final installed location.
-  echo " "
-***************
-*** 10379,10384 ****
---- 10388,10394 ----
-  patchlevel='$patchlevel'
-  path_sep='$path_sep'
-  perl='$perl'
-+ perllibs='$perllibs'
-  perladmin='$perladmin'
-  perlpath='$perlpath'
-  pg='$pg'
-Only in perl5.004_03: Configure.orig
-diff -rc perl5.004_03.orig/Makefile.SH perl5.004_03/Makefile.SH
-*** perl5.004_03.orig/Makefile.SH	Mon Aug 18 19:24:29 1997
---- perl5.004_03/Makefile.SH	Sun Nov 12 21:56:18 2000
-***************
-*** 126,132 ****
-  ext = \$(dynamic_ext) \$(static_ext)
-  DYNALOADER = lib/auto/DynaLoader/DynaLoader\$(LIB_EXT)
-  
-! libs = $libs $cryptlib
-  
-  public = perl $suidperl utilities translators
-  
---- 126,132 ----
-  ext = \$(dynamic_ext) \$(static_ext)
-  DYNALOADER = lib/auto/DynaLoader/DynaLoader\$(LIB_EXT)
-  
-! libs = $perllibs $cryptlib
-  
-  public = perl $suidperl utilities translators
-  
-Only in perl5.004_03: Makefile.SH.orig
-diff -rc perl5.004_03.orig/lib/ExtUtils/Embed.pm perl5.004_03/lib/ExtUtils/Embed.pm
-*** perl5.004_03.orig/lib/ExtUtils/Embed.pm	Fri Aug  1 15:08:44 1997
---- perl5.004_03/lib/ExtUtils/Embed.pm	Sun Nov 12 21:56:18 2000
-***************
-*** 178,184 ****
-      @path = $path ? split(/:/, $path) : @INC;
-  
-      push(@potential_libs, @link_args)    if scalar @link_args;
-!     push(@potential_libs, $Config{libs}) if defined $std;
-  
-      push(@mods, static_ext()) if $std;
-  
---- 178,184 ----
-      @path = $path ? split(/:/, $path) : @INC;
-  
-      push(@potential_libs, @link_args)    if scalar @link_args;
-!     push(@potential_libs, $Config{perllibs}) if defined $std;
-  
-      push(@mods, static_ext()) if $std;
-  
-diff -rc perl5.004_03.orig/lib/ExtUtils/Liblist.pm perl5.004_03/lib/ExtUtils/Liblist.pm
-*** perl5.004_03.orig/lib/ExtUtils/Liblist.pm	Fri Aug  1 19:36:58 1997
---- perl5.004_03/lib/ExtUtils/Liblist.pm	Sun Nov 12 21:57:17 2000
-***************
-*** 16,33 ****
-  
-  sub _unix_os2_ext {
-      my($self,$potential_libs, $verbose) = @_;
-!     if ($^O =~ 'os2' and $Config{libs}) { 
-  	# Dynamic libraries are not transitive, so we may need including
-  	# the libraries linked against perl.dll again.
-  
-  	$potential_libs .= " " if $potential_libs;
-! 	$potential_libs .= $Config{libs};
-      }
-      return ("", "", "", "") unless $potential_libs;
-      print STDOUT "Potential libraries are '$potential_libs':\n" if $verbose;
-  
-      my($so)   = $Config{'so'};
-!     my($libs) = $Config{'libs'};
-      my $Config_libext = $Config{lib_ext} || ".a";
-  
-  
---- 16,33 ----
-  
-  sub _unix_os2_ext {
-      my($self,$potential_libs, $verbose) = @_;
-!     if ($^O =~ 'os2' and $Config{perllibs}) { 
-  	# Dynamic libraries are not transitive, so we may need including
-  	# the libraries linked against perl.dll again.
-  
-  	$potential_libs .= " " if $potential_libs;
-! 	$potential_libs .= $Config{perllibs};
-      }
-      return ("", "", "", "") unless $potential_libs;
-      print STDOUT "Potential libraries are '$potential_libs':\n" if $verbose;
-  
-      my($so)   = $Config{'so'};
-!     my($libs) = $Config{'perllibs'};
-      my $Config_libext = $Config{lib_ext} || ".a";
-  
-  
-***************
-*** 186,196 ****
-      my($self, $potential_libs, $verbose) = @_;
-  
-      # If user did not supply a list, we punt.
-!     # (caller should probably use the list in $Config{libs})
-      return ("", "", "", "") unless $potential_libs;
-  
-      my($so)   = $Config{'so'};
-!     my($libs) = $Config{'libs'};
-      my($libpth) = $Config{'libpth'};
-      my($libext) = $Config{'lib_ext'} || ".lib";
-  
---- 186,196 ----
-      my($self, $potential_libs, $verbose) = @_;
-  
-      # If user did not supply a list, we punt.
-!     # (caller should probably use the list in $Config{perllibs})
-      return ("", "", "", "") unless $potential_libs;
-  
-      my($so)   = $Config{'so'};
-!     my($libs) = $Config{'perllibs'};
-      my($libpth) = $Config{'libpth'};
-      my($libext) = $Config{'lib_ext'} || ".lib";
-  
-***************
-*** 540,546 ****
-  =item *
-  
-  If C<$potential_libs> is empty, the return value will be empty.
-! Otherwise, the libraries specified by C<$Config{libs}> (see Config.pm)
-  will be appended to the list of C<$potential_libs>.  The libraries
-  will be searched for in the directories specified in C<$potential_libs>
-  as well as in C<$Config{libpth}>. For each library that is found,  a
---- 540,546 ----
-  =item *
-  
-  If C<$potential_libs> is empty, the return value will be empty.
-! Otherwise, the libraries specified by C<$Config{perllibs}> (see Config.pm)
-  will be appended to the list of C<$potential_libs>.  The libraries
-  will be searched for in the directories specified in C<$potential_libs>
-  as well as in C<$Config{libpth}>. For each library that is found,  a
-Only in perl5.004_03/lib/ExtUtils: Liblist.pm.orig
-Only in perl5.004_03/lib/ExtUtils: Liblist.pm.rej
-diff -rc perl5.004_03.orig/lib/ExtUtils/MM_Unix.pm perl5.004_03/lib/ExtUtils/MM_Unix.pm
-*** perl5.004_03.orig/lib/ExtUtils/MM_Unix.pm	Mon Aug 18 19:16:12 1997
---- perl5.004_03/lib/ExtUtils/MM_Unix.pm	Sun Nov 12 21:56:19 2000
-***************
-*** 2224,2230 ****
-  MAP_STATIC    = ",
-  join(" \\\n\t", reverse sort keys %static), "
-  
-! MAP_PRELIBS   = $Config::Config{libs} $Config::Config{cryptlib}
-  ";
-  
-      if (defined $libperl) {
---- 2224,2230 ----
-  MAP_STATIC    = ",
-  join(" \\\n\t", reverse sort keys %static), "
-  
-! MAP_PRELIBS   = $Config::Config{perllibs} $Config::Config{cryptlib}
-  ";
-  
-      if (defined $libperl) {
-Only in perl5.004_03/lib/ExtUtils: MM_Unix.pm.orig
-diff -rc perl5.004_03.orig/myconfig perl5.004_03/myconfig
-*** perl5.004_03.orig/myconfig	Sat Dec 21 01:13:20 1996
---- perl5.004_03/myconfig	Sun Nov 12 21:56:19 2000
-***************
-*** 35,41 ****
-    Linker and Libraries:
-      ld='$ld', ldflags ='$ldflags'
-      libpth=$libpth
-!     libs=$libs
-      libc=$libc, so=$so
-      useshrplib=$useshrplib, libperl=$libperl
-    Dynamic Linking:
---- 35,41 ----
-    Linker and Libraries:
-      ld='$ld', ldflags ='$ldflags'
-      libpth=$libpth
-!     libs=$perllibs
-      libc=$libc, so=$so
-      useshrplib=$useshrplib, libperl=$libperl
-    Dynamic Linking:
-diff -rc perl5.004_03.orig/patchlevel.h perl5.004_03/patchlevel.h
-*** perl5.004_03.orig/patchlevel.h	Wed Aug 13 11:42:01 1997
---- perl5.004_03/patchlevel.h	Sun Nov 12 21:56:19 2000
-***************
-*** 38,43 ****
---- 38,44 ----
-   */
-  static	char	*local_patches[] = {
-  	NULL
-+ 	,"NODB-1.0 - remove -ldb from core perl binary."
-  	,NULL
-  };
-  
-Only in perl5.004_03: patchlevel.h.orig
diff --git a/storage/bdb/perl/BerkeleyDB/patches/5.004_04 b/storage/bdb/perl/BerkeleyDB/patches/5.004_04
deleted file mode 100644
index a227dc700d9..00000000000
--- a/storage/bdb/perl/BerkeleyDB/patches/5.004_04
+++ /dev/null
@@ -1,209 +0,0 @@
-diff -rc perl5.004_04.orig/Configure perl5.004_04/Configure
-*** perl5.004_04.orig/Configure	Fri Oct  3 18:57:39 1997
---- perl5.004_04/Configure	Sun Nov 12 21:50:51 2000
-***************
-*** 188,193 ****
---- 188,194 ----
-  mv=''
-  nroff=''
-  perl=''
-+ perllibs=''
-  pg=''
-  pmake=''
-  pr=''
-***************
-*** 9910,9915 ****
---- 9911,9924 ----
-  shift
-  extensions="$*"
-  
-+ : Remove libraries needed only for extensions
-+ : The appropriate ext/Foo/Makefile.PL will add them back in, if
-+ : necessary.
-+ set X `echo " $libs " | 
-+   sed -e 's@ -lndbm @ @' -e 's@ -lgdbm @ @' -e 's@ -ldbm @ @' -e 's@ -ldb @ @'` 
-+ shift
-+ perllibs="$*"
-+ 
-  : Remove build directory name from cppstdin so it can be used from
-  : either the present location or the final installed location.
-  echo " "
-***************
-*** 10378,10383 ****
---- 10387,10393 ----
-  patchlevel='$patchlevel'
-  path_sep='$path_sep'
-  perl='$perl'
-+ perllibs='$perllibs'
-  perladmin='$perladmin'
-  perlpath='$perlpath'
-  pg='$pg'
-diff -rc perl5.004_04.orig/Makefile.SH perl5.004_04/Makefile.SH
-*** perl5.004_04.orig/Makefile.SH	Wed Oct 15 10:33:16 1997
---- perl5.004_04/Makefile.SH	Sun Nov 12 21:50:51 2000
-***************
-*** 129,135 ****
-  ext = \$(dynamic_ext) \$(static_ext)
-  DYNALOADER = lib/auto/DynaLoader/DynaLoader\$(LIB_EXT)
-  
-! libs = $libs $cryptlib
-  
-  public = perl $suidperl utilities translators
-  
---- 129,135 ----
-  ext = \$(dynamic_ext) \$(static_ext)
-  DYNALOADER = lib/auto/DynaLoader/DynaLoader\$(LIB_EXT)
-  
-! libs = $perllibs $cryptlib
-  
-  public = perl $suidperl utilities translators
-  
-diff -rc perl5.004_04.orig/lib/ExtUtils/Embed.pm perl5.004_04/lib/ExtUtils/Embed.pm
-*** perl5.004_04.orig/lib/ExtUtils/Embed.pm	Fri Aug  1 15:08:44 1997
---- perl5.004_04/lib/ExtUtils/Embed.pm	Sun Nov 12 21:50:51 2000
-***************
-*** 178,184 ****
-      @path = $path ? split(/:/, $path) : @INC;
-  
-      push(@potential_libs, @link_args)    if scalar @link_args;
-!     push(@potential_libs, $Config{libs}) if defined $std;
-  
-      push(@mods, static_ext()) if $std;
-  
---- 178,184 ----
-      @path = $path ? split(/:/, $path) : @INC;
-  
-      push(@potential_libs, @link_args)    if scalar @link_args;
-!     push(@potential_libs, $Config{perllibs}) if defined $std;
-  
-      push(@mods, static_ext()) if $std;
-  
-diff -rc perl5.004_04.orig/lib/ExtUtils/Liblist.pm perl5.004_04/lib/ExtUtils/Liblist.pm
-*** perl5.004_04.orig/lib/ExtUtils/Liblist.pm	Tue Sep  9 17:41:32 1997
---- perl5.004_04/lib/ExtUtils/Liblist.pm	Sun Nov 12 21:51:33 2000
-***************
-*** 16,33 ****
-  
-  sub _unix_os2_ext {
-      my($self,$potential_libs, $verbose) = @_;
-!     if ($^O =~ 'os2' and $Config{libs}) { 
-  	# Dynamic libraries are not transitive, so we may need including
-  	# the libraries linked against perl.dll again.
-  
-  	$potential_libs .= " " if $potential_libs;
-! 	$potential_libs .= $Config{libs};
-      }
-      return ("", "", "", "") unless $potential_libs;
-      warn "Potential libraries are '$potential_libs':\n" if $verbose;
-  
-      my($so)   = $Config{'so'};
-!     my($libs) = $Config{'libs'};
-      my $Config_libext = $Config{lib_ext} || ".a";
-  
-  
---- 16,33 ----
-  
-  sub _unix_os2_ext {
-      my($self,$potential_libs, $verbose) = @_;
-!     if ($^O =~ 'os2' and $Config{perllibs}) { 
-  	# Dynamic libraries are not transitive, so we may need including
-  	# the libraries linked against perl.dll again.
-  
-  	$potential_libs .= " " if $potential_libs;
-! 	$potential_libs .= $Config{perllibs};
-      }
-      return ("", "", "", "") unless $potential_libs;
-      warn "Potential libraries are '$potential_libs':\n" if $verbose;
-  
-      my($so)   = $Config{'so'};
-!     my($libs) = $Config{'perllibs'};
-      my $Config_libext = $Config{lib_ext} || ".a";
-  
-  
-***************
-*** 189,195 ****
-      return ("", "", "", "") unless $potential_libs;
-  
-      my($so)   = $Config{'so'};
-!     my($libs) = $Config{'libs'};
-      my($libpth) = $Config{'libpth'};
-      my($libext) = $Config{'lib_ext'} || ".lib";
-  
---- 189,195 ----
-      return ("", "", "", "") unless $potential_libs;
-  
-      my($so)   = $Config{'so'};
-!     my($libs) = $Config{'perllibs'};
-      my($libpth) = $Config{'libpth'};
-      my($libext) = $Config{'lib_ext'} || ".lib";
-  
-***************
-*** 539,545 ****
-  =item *
-  
-  If C<$potential_libs> is empty, the return value will be empty.
-! Otherwise, the libraries specified by C<$Config{libs}> (see Config.pm)
-  will be appended to the list of C<$potential_libs>.  The libraries
-  will be searched for in the directories specified in C<$potential_libs>
-  as well as in C<$Config{libpth}>. For each library that is found,  a
---- 539,545 ----
-  =item *
-  
-  If C<$potential_libs> is empty, the return value will be empty.
-! Otherwise, the libraries specified by C<$Config{perllibs}> (see Config.pm)
-  will be appended to the list of C<$potential_libs>.  The libraries
-  will be searched for in the directories specified in C<$potential_libs>
-  as well as in C<$Config{libpth}>. For each library that is found,  a
-diff -rc perl5.004_04.orig/lib/ExtUtils/MM_Unix.pm perl5.004_04/lib/ExtUtils/MM_Unix.pm
-*** perl5.004_04.orig/lib/ExtUtils/MM_Unix.pm	Wed Oct  8 14:13:51 1997
---- perl5.004_04/lib/ExtUtils/MM_Unix.pm	Sun Nov 12 21:50:51 2000
-***************
-*** 2229,2235 ****
-  MAP_STATIC    = ",
-  join(" \\\n\t", reverse sort keys %static), "
-  
-! MAP_PRELIBS   = $Config::Config{libs} $Config::Config{cryptlib}
-  ";
-  
-      if (defined $libperl) {
---- 2229,2235 ----
-  MAP_STATIC    = ",
-  join(" \\\n\t", reverse sort keys %static), "
-  
-! MAP_PRELIBS   = $Config::Config{perllibs} $Config::Config{cryptlib}
-  ";
-  
-      if (defined $libperl) {
-diff -rc perl5.004_04.orig/myconfig perl5.004_04/myconfig
-*** perl5.004_04.orig/myconfig	Mon Oct  6 18:26:49 1997
---- perl5.004_04/myconfig	Sun Nov 12 21:50:51 2000
-***************
-*** 35,41 ****
-    Linker and Libraries:
-      ld='$ld', ldflags ='$ldflags'
-      libpth=$libpth
-!     libs=$libs
-      libc=$libc, so=$so
-      useshrplib=$useshrplib, libperl=$libperl
-    Dynamic Linking:
---- 35,41 ----
-    Linker and Libraries:
-      ld='$ld', ldflags ='$ldflags'
-      libpth=$libpth
-!     libs=$perllibs
-      libc=$libc, so=$so
-      useshrplib=$useshrplib, libperl=$libperl
-    Dynamic Linking:
-diff -rc perl5.004_04.orig/patchlevel.h perl5.004_04/patchlevel.h
-*** perl5.004_04.orig/patchlevel.h	Wed Oct 15 10:55:19 1997
---- perl5.004_04/patchlevel.h	Sun Nov 12 21:50:51 2000
-***************
-*** 39,44 ****
---- 39,45 ----
-  /* The following line and terminating '};' are read by perlbug.PL. Don't alter. */ 
-  static	char	*local_patches[] = {
-  	NULL
-+ 	,"NODB-1.0 - remove -ldb from core perl binary."
-  	,NULL
-  };
-  
diff --git a/storage/bdb/perl/BerkeleyDB/patches/5.004_05 b/storage/bdb/perl/BerkeleyDB/patches/5.004_05
deleted file mode 100644
index 51c8bf35009..00000000000
--- a/storage/bdb/perl/BerkeleyDB/patches/5.004_05
+++ /dev/null
@@ -1,209 +0,0 @@
-diff -rc perl5.004_05.orig/Configure perl5.004_05/Configure
-*** perl5.004_05.orig/Configure	Thu Jan  6 22:05:49 2000
---- perl5.004_05/Configure	Sun Nov 12 21:36:25 2000
-***************
-*** 188,193 ****
---- 188,194 ----
-  mv=''
-  nroff=''
-  perl=''
-+ perllibs=''
-  pg=''
-  pmake=''
-  pr=''
-***************
-*** 10164,10169 ****
---- 10165,10178 ----
-  shift
-  extensions="$*"
-  
-+ : Remove libraries needed only for extensions
-+ : The appropriate ext/Foo/Makefile.PL will add them back in, if
-+ : necessary.
-+ set X `echo " $libs " | 
-+   sed -e 's@ -lndbm @ @' -e 's@ -lgdbm @ @' -e 's@ -ldbm @ @' -e 's@ -ldb @ @'` 
-+ shift
-+ perllibs="$*"
-+ 
-  : Remove build directory name from cppstdin so it can be used from
-  : either the present location or the final installed location.
-  echo " "
-***************
-*** 10648,10653 ****
---- 10657,10663 ----
-  patchlevel='$patchlevel'
-  path_sep='$path_sep'
-  perl='$perl'
-+ perllibs='$perllibs'
-  perladmin='$perladmin'
-  perlpath='$perlpath'
-  pg='$pg'
-diff -rc perl5.004_05.orig/Makefile.SH perl5.004_05/Makefile.SH
-*** perl5.004_05.orig/Makefile.SH	Thu Jan  6 22:05:49 2000
---- perl5.004_05/Makefile.SH	Sun Nov 12 21:36:25 2000
-***************
-*** 151,157 ****
-  ext = \$(dynamic_ext) \$(static_ext)
-  DYNALOADER = lib/auto/DynaLoader/DynaLoader\$(LIB_EXT)
-  
-! libs = $libs $cryptlib
-  
-  public = perl $suidperl utilities translators
-  
---- 151,157 ----
-  ext = \$(dynamic_ext) \$(static_ext)
-  DYNALOADER = lib/auto/DynaLoader/DynaLoader\$(LIB_EXT)
-  
-! libs = $perllibs $cryptlib
-  
-  public = perl $suidperl utilities translators
-  
-diff -rc perl5.004_05.orig/lib/ExtUtils/Embed.pm perl5.004_05/lib/ExtUtils/Embed.pm
-*** perl5.004_05.orig/lib/ExtUtils/Embed.pm	Fri Aug  1 15:08:44 1997
---- perl5.004_05/lib/ExtUtils/Embed.pm	Sun Nov 12 21:36:25 2000
-***************
-*** 178,184 ****
-      @path = $path ? split(/:/, $path) : @INC;
-  
-      push(@potential_libs, @link_args)    if scalar @link_args;
-!     push(@potential_libs, $Config{libs}) if defined $std;
-  
-      push(@mods, static_ext()) if $std;
-  
---- 178,184 ----
-      @path = $path ? split(/:/, $path) : @INC;
-  
-      push(@potential_libs, @link_args)    if scalar @link_args;
-!     push(@potential_libs, $Config{perllibs}) if defined $std;
-  
-      push(@mods, static_ext()) if $std;
-  
-diff -rc perl5.004_05.orig/lib/ExtUtils/Liblist.pm perl5.004_05/lib/ExtUtils/Liblist.pm
-*** perl5.004_05.orig/lib/ExtUtils/Liblist.pm	Thu Jan  6 22:05:54 2000
---- perl5.004_05/lib/ExtUtils/Liblist.pm	Sun Nov 12 21:45:31 2000
-***************
-*** 16,33 ****
-  
-  sub _unix_os2_ext {
-      my($self,$potential_libs, $verbose) = @_;
-!     if ($^O =~ 'os2' and $Config{libs}) { 
-  	# Dynamic libraries are not transitive, so we may need including
-  	# the libraries linked against perl.dll again.
-  
-  	$potential_libs .= " " if $potential_libs;
-! 	$potential_libs .= $Config{libs};
-      }
-      return ("", "", "", "") unless $potential_libs;
-      warn "Potential libraries are '$potential_libs':\n" if $verbose;
-  
-      my($so)   = $Config{'so'};
-!     my($libs) = $Config{'libs'};
-      my $Config_libext = $Config{lib_ext} || ".a";
-  
-  
---- 16,33 ----
-  
-  sub _unix_os2_ext {
-      my($self,$potential_libs, $verbose) = @_;
-!     if ($^O =~ 'os2' and $Config{perllibs}) { 
-  	# Dynamic libraries are not transitive, so we may need including
-  	# the libraries linked against perl.dll again.
-  
-  	$potential_libs .= " " if $potential_libs;
-! 	$potential_libs .= $Config{perllibs};
-      }
-      return ("", "", "", "") unless $potential_libs;
-      warn "Potential libraries are '$potential_libs':\n" if $verbose;
-  
-      my($so)   = $Config{'so'};
-!     my($libs) = $Config{'perllibs'};
-      my $Config_libext = $Config{lib_ext} || ".a";
-  
-  
-***************
-*** 196,202 ****
-      my $BC		= 1 if $cc =~ /^bcc/i;
-      my $GC		= 1 if $cc =~ /^gcc/i;
-      my $so		= $Config{'so'};
-!     my $libs		= $Config{'libs'};
-      my $libpth		= $Config{'libpth'};
-      my $libext		= $Config{'lib_ext'} || ".lib";
-  
---- 196,202 ----
-      my $BC		= 1 if $cc =~ /^bcc/i;
-      my $GC		= 1 if $cc =~ /^gcc/i;
-      my $so		= $Config{'so'};
-!     my $libs		= $Config{'perllibs'};
-      my $libpth		= $Config{'libpth'};
-      my $libext		= $Config{'lib_ext'} || ".lib";
-  
-***************
-*** 590,596 ****
-  =item *
-  
-  If C<$potential_libs> is empty, the return value will be empty.
-! Otherwise, the libraries specified by C<$Config{libs}> (see Config.pm)
-  will be appended to the list of C<$potential_libs>.  The libraries
-  will be searched for in the directories specified in C<$potential_libs>
-  as well as in C<$Config{libpth}>. For each library that is found,  a
---- 590,596 ----
-  =item *
-  
-  If C<$potential_libs> is empty, the return value will be empty.
-! Otherwise, the libraries specified by C<$Config{perllibs}> (see Config.pm)
-  will be appended to the list of C<$potential_libs>.  The libraries
-  will be searched for in the directories specified in C<$potential_libs>
-  as well as in C<$Config{libpth}>. For each library that is found,  a
-diff -rc perl5.004_05.orig/lib/ExtUtils/MM_Unix.pm perl5.004_05/lib/ExtUtils/MM_Unix.pm
-*** perl5.004_05.orig/lib/ExtUtils/MM_Unix.pm	Thu Jan  6 22:05:54 2000
---- perl5.004_05/lib/ExtUtils/MM_Unix.pm	Sun Nov 12 21:36:25 2000
-***************
-*** 2246,2252 ****
-  MAP_STATIC    = ",
-  join(" \\\n\t", reverse sort keys %static), "
-  
-! MAP_PRELIBS   = $Config::Config{libs} $Config::Config{cryptlib}
-  ";
-  
-      if (defined $libperl) {
---- 2246,2252 ----
-  MAP_STATIC    = ",
-  join(" \\\n\t", reverse sort keys %static), "
-  
-! MAP_PRELIBS   = $Config::Config{perllibs} $Config::Config{cryptlib}
-  ";
-  
-      if (defined $libperl) {
-diff -rc perl5.004_05.orig/myconfig perl5.004_05/myconfig
-*** perl5.004_05.orig/myconfig	Thu Jan  6 22:05:55 2000
---- perl5.004_05/myconfig	Sun Nov 12 21:43:54 2000
-***************
-*** 34,40 ****
-    Linker and Libraries:
-      ld='$ld', ldflags ='$ldflags'
-      libpth=$libpth
-!     libs=$libs
-      libc=$libc, so=$so
-      useshrplib=$useshrplib, libperl=$libperl
-    Dynamic Linking:
---- 34,40 ----
-    Linker and Libraries:
-      ld='$ld', ldflags ='$ldflags'
-      libpth=$libpth
-!     libs=$perllibs
-      libc=$libc, so=$so
-      useshrplib=$useshrplib, libperl=$libperl
-    Dynamic Linking:
-diff -rc perl5.004_05.orig/patchlevel.h perl5.004_05/patchlevel.h
-*** perl5.004_05.orig/patchlevel.h	Thu Jan  6 22:05:48 2000
---- perl5.004_05/patchlevel.h	Sun Nov 12 21:36:25 2000
-***************
-*** 39,44 ****
---- 39,45 ----
-  /* The following line and terminating '};' are read by perlbug.PL. Don't alter. */ 
-  static	char	*local_patches[] = {
-  	NULL
-+ 	,"NODB-1.0 - remove -ldb from core perl binary."
-  	,NULL
-  };
-  
diff --git a/storage/bdb/perl/BerkeleyDB/patches/5.005 b/storage/bdb/perl/BerkeleyDB/patches/5.005
deleted file mode 100644
index effee3e8275..00000000000
--- a/storage/bdb/perl/BerkeleyDB/patches/5.005
+++ /dev/null
@@ -1,209 +0,0 @@
-diff -rc perl5.005.orig/Configure perl5.005/Configure
-*** perl5.005.orig/Configure	Wed Jul 15 08:05:44 1998
---- perl5.005/Configure	Sun Nov 12 21:30:40 2000
-***************
-*** 234,239 ****
---- 234,240 ----
-  nm=''
-  nroff=''
-  perl=''
-+ perllibs=''
-  pg=''
-  pmake=''
-  pr=''
-***************
-*** 11279,11284 ****
---- 11280,11293 ----
-  shift
-  extensions="$*"
-  
-+ : Remove libraries needed only for extensions
-+ : The appropriate ext/Foo/Makefile.PL will add them back in, if
-+ : necessary.
-+ set X `echo " $libs " | 
-+   sed -e 's@ -lndbm @ @' -e 's@ -lgdbm @ @' -e 's@ -ldbm @ @' -e 's@ -ldb @ @'` 
-+ shift
-+ perllibs="$*"
-+ 
-  : Remove build directory name from cppstdin so it can be used from
-  : either the present location or the final installed location.
-  echo " "
-***************
-*** 11804,11809 ****
---- 11813,11819 ----
-  patchlevel='$patchlevel'
-  path_sep='$path_sep'
-  perl='$perl'
-+ perllibs='$perllibs'
-  perladmin='$perladmin'
-  perlpath='$perlpath'
-  pg='$pg'
-diff -rc perl5.005.orig/Makefile.SH perl5.005/Makefile.SH
-*** perl5.005.orig/Makefile.SH	Sun Jul 19 08:06:35 1998
---- perl5.005/Makefile.SH	Sun Nov 12 21:30:40 2000
-***************
-*** 150,156 ****
-  ext = \$(dynamic_ext) \$(static_ext) \$(nonxs_ext)
-  DYNALOADER = lib/auto/DynaLoader/DynaLoader\$(LIB_EXT)
-  
-! libs = $libs $cryptlib
-  
-  public = perl $suidperl utilities translators
-  
---- 150,156 ----
-  ext = \$(dynamic_ext) \$(static_ext) \$(nonxs_ext)
-  DYNALOADER = lib/auto/DynaLoader/DynaLoader\$(LIB_EXT)
-  
-! libs = $perllibs $cryptlib
-  
-  public = perl $suidperl utilities translators
-  
-diff -rc perl5.005.orig/lib/ExtUtils/Embed.pm perl5.005/lib/ExtUtils/Embed.pm
-*** perl5.005.orig/lib/ExtUtils/Embed.pm	Wed Jul 22 07:45:02 1998
---- perl5.005/lib/ExtUtils/Embed.pm	Sun Nov 12 21:30:40 2000
-***************
-*** 194,200 ****
-      @path = $path ? split(/:/, $path) : @INC;
-  
-      push(@potential_libs, @link_args)    if scalar @link_args;
-!     push(@potential_libs, $Config{libs}) if defined $std;
-  
-      push(@mods, static_ext()) if $std;
-  
---- 194,200 ----
-      @path = $path ? split(/:/, $path) : @INC;
-  
-      push(@potential_libs, @link_args)    if scalar @link_args;
-!     push(@potential_libs, $Config{perllibs}) if defined $std;
-  
-      push(@mods, static_ext()) if $std;
-  
-diff -rc perl5.005.orig/lib/ExtUtils/Liblist.pm perl5.005/lib/ExtUtils/Liblist.pm
-*** perl5.005.orig/lib/ExtUtils/Liblist.pm	Wed Jul 22 07:09:42 1998
---- perl5.005/lib/ExtUtils/Liblist.pm	Sun Nov 12 21:30:40 2000
-***************
-*** 16,33 ****
-  
-  sub _unix_os2_ext {
-      my($self,$potential_libs, $verbose) = @_;
-!     if ($^O =~ 'os2' and $Config{libs}) { 
-  	# Dynamic libraries are not transitive, so we may need including
-  	# the libraries linked against perl.dll again.
-  
-  	$potential_libs .= " " if $potential_libs;
-! 	$potential_libs .= $Config{libs};
-      }
-      return ("", "", "", "") unless $potential_libs;
-      warn "Potential libraries are '$potential_libs':\n" if $verbose;
-  
-      my($so)   = $Config{'so'};
-!     my($libs) = $Config{'libs'};
-      my $Config_libext = $Config{lib_ext} || ".a";
-  
-  
---- 16,33 ----
-  
-  sub _unix_os2_ext {
-      my($self,$potential_libs, $verbose) = @_;
-!     if ($^O =~ 'os2' and $Config{perllibs}) { 
-  	# Dynamic libraries are not transitive, so we may need including
-  	# the libraries linked against perl.dll again.
-  
-  	$potential_libs .= " " if $potential_libs;
-! 	$potential_libs .= $Config{perllibs};
-      }
-      return ("", "", "", "") unless $potential_libs;
-      warn "Potential libraries are '$potential_libs':\n" if $verbose;
-  
-      my($so)   = $Config{'so'};
-!     my($libs) = $Config{'perllibs'};
-      my $Config_libext = $Config{lib_ext} || ".a";
-  
-  
-***************
-*** 290,296 ****
-                   $self->{CCFLAS}   || $Config{'ccflags'};
-    @crtls = ( ($dbgqual =~ m-/Debug-i ? $Config{'dbgprefix'} : '')
-                . 'PerlShr/Share' );
-!   push(@crtls, grep { not /\(/ } split /\s+/, $Config{'libs'});
-    push(@crtls, grep { not /\(/ } split /\s+/, $Config{'libc'});
-    # In general, we pass through the basic libraries from %Config unchanged.
-    # The one exception is that if we're building in the Perl source tree, and
---- 290,296 ----
-                   $self->{CCFLAS}   || $Config{'ccflags'};
-    @crtls = ( ($dbgqual =~ m-/Debug-i ? $Config{'dbgprefix'} : '')
-                . 'PerlShr/Share' );
-!   push(@crtls, grep { not /\(/ } split /\s+/, $Config{'perllibs'});
-    push(@crtls, grep { not /\(/ } split /\s+/, $Config{'libc'});
-    # In general, we pass through the basic libraries from %Config unchanged.
-    # The one exception is that if we're building in the Perl source tree, and
-***************
-*** 598,604 ****
-  =item *
-  
-  If C<$potential_libs> is empty, the return value will be empty.
-! Otherwise, the libraries specified by C<$Config{libs}> (see Config.pm)
-  will be appended to the list of C<$potential_libs>.  The libraries
-  will be searched for in the directories specified in C<$potential_libs>
-  as well as in C<$Config{libpth}>. For each library that is found,  a
---- 598,604 ----
-  =item *
-  
-  If C<$potential_libs> is empty, the return value will be empty.
-! Otherwise, the libraries specified by C<$Config{perllibs}> (see Config.pm)
-  will be appended to the list of C<$potential_libs>.  The libraries
-  will be searched for in the directories specified in C<$potential_libs>
-  as well as in C<$Config{libpth}>. For each library that is found,  a
-diff -rc perl5.005.orig/lib/ExtUtils/MM_Unix.pm perl5.005/lib/ExtUtils/MM_Unix.pm
-*** perl5.005.orig/lib/ExtUtils/MM_Unix.pm	Tue Jul 14 04:39:12 1998
---- perl5.005/lib/ExtUtils/MM_Unix.pm	Sun Nov 12 21:30:41 2000
-***************
-*** 2281,2287 ****
-  MAP_STATIC    = ",
-  join(" \\\n\t", reverse sort keys %static), "
-  
-! MAP_PRELIBS   = $Config::Config{libs} $Config::Config{cryptlib}
-  ";
-  
-      if (defined $libperl) {
---- 2281,2287 ----
-  MAP_STATIC    = ",
-  join(" \\\n\t", reverse sort keys %static), "
-  
-! MAP_PRELIBS   = $Config::Config{perllibs} $Config::Config{cryptlib}
-  ";
-  
-      if (defined $libperl) {
-diff -rc perl5.005.orig/myconfig perl5.005/myconfig
-*** perl5.005.orig/myconfig	Fri Apr  3 01:20:35 1998
---- perl5.005/myconfig	Sun Nov 12 21:30:41 2000
-***************
-*** 34,40 ****
-    Linker and Libraries:
-      ld='$ld', ldflags ='$ldflags'
-      libpth=$libpth
-!     libs=$libs
-      libc=$libc, so=$so, useshrplib=$useshrplib, libperl=$libperl
-    Dynamic Linking:
-      dlsrc=$dlsrc, dlext=$dlext, d_dlsymun=$d_dlsymun, ccdlflags='$ccdlflags'
---- 34,40 ----
-    Linker and Libraries:
-      ld='$ld', ldflags ='$ldflags'
-      libpth=$libpth
-!     libs=$perllibs
-      libc=$libc, so=$so, useshrplib=$useshrplib, libperl=$libperl
-    Dynamic Linking:
-      dlsrc=$dlsrc, dlext=$dlext, d_dlsymun=$d_dlsymun, ccdlflags='$ccdlflags'
-diff -rc perl5.005.orig/patchlevel.h perl5.005/patchlevel.h
-*** perl5.005.orig/patchlevel.h	Wed Jul 22 19:22:01 1998
---- perl5.005/patchlevel.h	Sun Nov 12 21:30:41 2000
-***************
-*** 39,44 ****
---- 39,45 ----
-   */
-  static	char	*local_patches[] = {
-  	NULL
-+ 	,"NODB-1.0 - remove -ldb from core perl binary."
-  	,NULL
-  };
-  
diff --git a/storage/bdb/perl/BerkeleyDB/patches/5.005_01 b/storage/bdb/perl/BerkeleyDB/patches/5.005_01
deleted file mode 100644
index 2a05dd545f6..00000000000
--- a/storage/bdb/perl/BerkeleyDB/patches/5.005_01
+++ /dev/null
@@ -1,209 +0,0 @@
-diff -rc perl5.005_01.orig/Configure perl5.005_01/Configure
-*** perl5.005_01.orig/Configure	Wed Jul 15 08:05:44 1998
---- perl5.005_01/Configure	Sun Nov 12 20:55:58 2000
-***************
-*** 234,239 ****
---- 234,240 ----
-  nm=''
-  nroff=''
-  perl=''
-+ perllibs=''
-  pg=''
-  pmake=''
-  pr=''
-***************
-*** 11279,11284 ****
---- 11280,11293 ----
-  shift
-  extensions="$*"
-  
-+ : Remove libraries needed only for extensions
-+ : The appropriate ext/Foo/Makefile.PL will add them back in, if
-+ : necessary.
-+ set X `echo " $libs " | 
-+   sed -e 's@ -lndbm @ @' -e 's@ -lgdbm @ @' -e 's@ -ldbm @ @' -e 's@ -ldb @ @'` 
-+ shift
-+ perllibs="$*"
-+ 
-  : Remove build directory name from cppstdin so it can be used from
-  : either the present location or the final installed location.
-  echo " "
-***************
-*** 11804,11809 ****
---- 11813,11819 ----
-  patchlevel='$patchlevel'
-  path_sep='$path_sep'
-  perl='$perl'
-+ perllibs='$perllibs'
-  perladmin='$perladmin'
-  perlpath='$perlpath'
-  pg='$pg'
-diff -rc perl5.005_01.orig/Makefile.SH perl5.005_01/Makefile.SH
-*** perl5.005_01.orig/Makefile.SH	Sun Jul 19 08:06:35 1998
---- perl5.005_01/Makefile.SH	Sun Nov 12 20:55:58 2000
-***************
-*** 150,156 ****
-  ext = \$(dynamic_ext) \$(static_ext) \$(nonxs_ext)
-  DYNALOADER = lib/auto/DynaLoader/DynaLoader\$(LIB_EXT)
-  
-! libs = $libs $cryptlib
-  
-  public = perl $suidperl utilities translators
-  
---- 150,156 ----
-  ext = \$(dynamic_ext) \$(static_ext) \$(nonxs_ext)
-  DYNALOADER = lib/auto/DynaLoader/DynaLoader\$(LIB_EXT)
-  
-! libs = $perllibs $cryptlib
-  
-  public = perl $suidperl utilities translators
-  
-diff -rc perl5.005_01.orig/lib/ExtUtils/Embed.pm perl5.005_01/lib/ExtUtils/Embed.pm
-*** perl5.005_01.orig/lib/ExtUtils/Embed.pm	Wed Jul 22 07:45:02 1998
---- perl5.005_01/lib/ExtUtils/Embed.pm	Sun Nov 12 20:55:58 2000
-***************
-*** 194,200 ****
-      @path = $path ? split(/:/, $path) : @INC;
-  
-      push(@potential_libs, @link_args)    if scalar @link_args;
-!     push(@potential_libs, $Config{libs}) if defined $std;
-  
-      push(@mods, static_ext()) if $std;
-  
---- 194,200 ----
-      @path = $path ? split(/:/, $path) : @INC;
-  
-      push(@potential_libs, @link_args)    if scalar @link_args;
-!     push(@potential_libs, $Config{perllibs}) if defined $std;
-  
-      push(@mods, static_ext()) if $std;
-  
-diff -rc perl5.005_01.orig/lib/ExtUtils/Liblist.pm perl5.005_01/lib/ExtUtils/Liblist.pm
-*** perl5.005_01.orig/lib/ExtUtils/Liblist.pm	Wed Jul 22 07:09:42 1998
---- perl5.005_01/lib/ExtUtils/Liblist.pm	Sun Nov 12 20:55:58 2000
-***************
-*** 16,33 ****
-  
-  sub _unix_os2_ext {
-      my($self,$potential_libs, $verbose) = @_;
-!     if ($^O =~ 'os2' and $Config{libs}) { 
-  	# Dynamic libraries are not transitive, so we may need including
-  	# the libraries linked against perl.dll again.
-  
-  	$potential_libs .= " " if $potential_libs;
-! 	$potential_libs .= $Config{libs};
-      }
-      return ("", "", "", "") unless $potential_libs;
-      warn "Potential libraries are '$potential_libs':\n" if $verbose;
-  
-      my($so)   = $Config{'so'};
-!     my($libs) = $Config{'libs'};
-      my $Config_libext = $Config{lib_ext} || ".a";
-  
-  
---- 16,33 ----
-  
-  sub _unix_os2_ext {
-      my($self,$potential_libs, $verbose) = @_;
-!     if ($^O =~ 'os2' and $Config{perllibs}) { 
-  	# Dynamic libraries are not transitive, so we may need including
-  	# the libraries linked against perl.dll again.
-  
-  	$potential_libs .= " " if $potential_libs;
-! 	$potential_libs .= $Config{perllibs};
-      }
-      return ("", "", "", "") unless $potential_libs;
-      warn "Potential libraries are '$potential_libs':\n" if $verbose;
-  
-      my($so)   = $Config{'so'};
-!     my($libs) = $Config{'perllibs'};
-      my $Config_libext = $Config{lib_ext} || ".a";
-  
-  
-***************
-*** 290,296 ****
-                   $self->{CCFLAS}   || $Config{'ccflags'};
-    @crtls = ( ($dbgqual =~ m-/Debug-i ? $Config{'dbgprefix'} : '')
-                . 'PerlShr/Share' );
-!   push(@crtls, grep { not /\(/ } split /\s+/, $Config{'libs'});
-    push(@crtls, grep { not /\(/ } split /\s+/, $Config{'libc'});
-    # In general, we pass through the basic libraries from %Config unchanged.
-    # The one exception is that if we're building in the Perl source tree, and
---- 290,296 ----
-                   $self->{CCFLAS}   || $Config{'ccflags'};
-    @crtls = ( ($dbgqual =~ m-/Debug-i ? $Config{'dbgprefix'} : '')
-                . 'PerlShr/Share' );
-!   push(@crtls, grep { not /\(/ } split /\s+/, $Config{'perllibs'});
-    push(@crtls, grep { not /\(/ } split /\s+/, $Config{'libc'});
-    # In general, we pass through the basic libraries from %Config unchanged.
-    # The one exception is that if we're building in the Perl source tree, and
-***************
-*** 598,604 ****
-  =item *
-  
-  If C<$potential_libs> is empty, the return value will be empty.
-! Otherwise, the libraries specified by C<$Config{libs}> (see Config.pm)
-  will be appended to the list of C<$potential_libs>.  The libraries
-  will be searched for in the directories specified in C<$potential_libs>
-  as well as in C<$Config{libpth}>. For each library that is found,  a
---- 598,604 ----
-  =item *
-  
-  If C<$potential_libs> is empty, the return value will be empty.
-! Otherwise, the libraries specified by C<$Config{perllibs}> (see Config.pm)
-  will be appended to the list of C<$potential_libs>.  The libraries
-  will be searched for in the directories specified in C<$potential_libs>
-  as well as in C<$Config{libpth}>. For each library that is found,  a
-diff -rc perl5.005_01.orig/lib/ExtUtils/MM_Unix.pm perl5.005_01/lib/ExtUtils/MM_Unix.pm
-*** perl5.005_01.orig/lib/ExtUtils/MM_Unix.pm	Tue Jul 14 04:39:12 1998
---- perl5.005_01/lib/ExtUtils/MM_Unix.pm	Sun Nov 12 20:55:58 2000
-***************
-*** 2281,2287 ****
-  MAP_STATIC    = ",
-  join(" \\\n\t", reverse sort keys %static), "
-  
-! MAP_PRELIBS   = $Config::Config{libs} $Config::Config{cryptlib}
-  ";
-  
-      if (defined $libperl) {
---- 2281,2287 ----
-  MAP_STATIC    = ",
-  join(" \\\n\t", reverse sort keys %static), "
-  
-! MAP_PRELIBS   = $Config::Config{perllibs} $Config::Config{cryptlib}
-  ";
-  
-      if (defined $libperl) {
-diff -rc perl5.005_01.orig/myconfig perl5.005_01/myconfig
-*** perl5.005_01.orig/myconfig	Fri Apr  3 01:20:35 1998
---- perl5.005_01/myconfig	Sun Nov 12 20:55:58 2000
-***************
-*** 34,40 ****
-    Linker and Libraries:
-      ld='$ld', ldflags ='$ldflags'
-      libpth=$libpth
-!     libs=$libs
-      libc=$libc, so=$so, useshrplib=$useshrplib, libperl=$libperl
-    Dynamic Linking:
-      dlsrc=$dlsrc, dlext=$dlext, d_dlsymun=$d_dlsymun, ccdlflags='$ccdlflags'
---- 34,40 ----
-    Linker and Libraries:
-      ld='$ld', ldflags ='$ldflags'
-      libpth=$libpth
-!     libs=$perllibs
-      libc=$libc, so=$so, useshrplib=$useshrplib, libperl=$libperl
-    Dynamic Linking:
-      dlsrc=$dlsrc, dlext=$dlext, d_dlsymun=$d_dlsymun, ccdlflags='$ccdlflags'
-diff -rc perl5.005_01.orig/patchlevel.h perl5.005_01/patchlevel.h
-*** perl5.005_01.orig/patchlevel.h	Mon Jan  3 11:07:45 2000
---- perl5.005_01/patchlevel.h	Sun Nov 12 20:55:58 2000
-***************
-*** 39,44 ****
---- 39,45 ----
-   */
-  static	char	*local_patches[] = {
-  	NULL
-+ 	,"NODB-1.0 - remove -ldb from core perl binary."
-  	,NULL
-  };
-  
diff --git a/storage/bdb/perl/BerkeleyDB/patches/5.005_02 b/storage/bdb/perl/BerkeleyDB/patches/5.005_02
deleted file mode 100644
index 5dd57ddc03f..00000000000
--- a/storage/bdb/perl/BerkeleyDB/patches/5.005_02
+++ /dev/null
@@ -1,264 +0,0 @@
-diff -rc perl5.005_02.orig/Configure perl5.005_02/Configure
-*** perl5.005_02.orig/Configure	Mon Jan  3 11:12:20 2000
---- perl5.005_02/Configure	Sun Nov 12 20:50:51 2000
-***************
-*** 234,239 ****
---- 234,240 ----
-  nm=''
-  nroff=''
-  perl=''
-+ perllibs=''
-  pg=''
-  pmake=''
-  pr=''
-***************
-*** 11334,11339 ****
---- 11335,11348 ----
-  shift
-  extensions="$*"
-  
-+ : Remove libraries needed only for extensions
-+ : The appropriate ext/Foo/Makefile.PL will add them back in, if
-+ : necessary.
-+ set X `echo " $libs " | 
-+   sed -e 's@ -lndbm @ @' -e 's@ -lgdbm @ @' -e 's@ -ldbm @ @' -e 's@ -ldb @ @'` 
-+ shift
-+ perllibs="$*"
-+ 
-  : Remove build directory name from cppstdin so it can be used from
-  : either the present location or the final installed location.
-  echo " "
-***************
-*** 11859,11864 ****
---- 11868,11874 ----
-  patchlevel='$patchlevel'
-  path_sep='$path_sep'
-  perl='$perl'
-+ perllibs='$perllibs'
-  perladmin='$perladmin'
-  perlpath='$perlpath'
-  pg='$pg'
-Only in perl5.005_02: Configure.orig
-diff -rc perl5.005_02.orig/Makefile.SH perl5.005_02/Makefile.SH
-*** perl5.005_02.orig/Makefile.SH	Sun Jul 19 08:06:35 1998
---- perl5.005_02/Makefile.SH	Sun Nov 12 20:50:51 2000
-***************
-*** 150,156 ****
-  ext = \$(dynamic_ext) \$(static_ext) \$(nonxs_ext)
-  DYNALOADER = lib/auto/DynaLoader/DynaLoader\$(LIB_EXT)
-  
-! libs = $libs $cryptlib
-  
-  public = perl $suidperl utilities translators
-  
---- 150,156 ----
-  ext = \$(dynamic_ext) \$(static_ext) \$(nonxs_ext)
-  DYNALOADER = lib/auto/DynaLoader/DynaLoader\$(LIB_EXT)
-  
-! libs = $perllibs $cryptlib
-  
-  public = perl $suidperl utilities translators
-  
-Only in perl5.005_02: Makefile.SH.orig
-diff -rc perl5.005_02.orig/lib/ExtUtils/Embed.pm perl5.005_02/lib/ExtUtils/Embed.pm
-*** perl5.005_02.orig/lib/ExtUtils/Embed.pm	Wed Jul 22 07:45:02 1998
---- perl5.005_02/lib/ExtUtils/Embed.pm	Sun Nov 12 20:50:51 2000
-***************
-*** 194,200 ****
-      @path = $path ? split(/:/, $path) : @INC;
-  
-      push(@potential_libs, @link_args)    if scalar @link_args;
-!     push(@potential_libs, $Config{libs}) if defined $std;
-  
-      push(@mods, static_ext()) if $std;
-  
---- 194,200 ----
-      @path = $path ? split(/:/, $path) : @INC;
-  
-      push(@potential_libs, @link_args)    if scalar @link_args;
-!     push(@potential_libs, $Config{perllibs}) if defined $std;
-  
-      push(@mods, static_ext()) if $std;
-  
-diff -rc perl5.005_02.orig/lib/ExtUtils/Liblist.pm perl5.005_02/lib/ExtUtils/Liblist.pm
-*** perl5.005_02.orig/lib/ExtUtils/Liblist.pm	Mon Jan  3 11:12:21 2000
---- perl5.005_02/lib/ExtUtils/Liblist.pm	Sun Nov 12 20:50:51 2000
-***************
-*** 16,33 ****
-  
-  sub _unix_os2_ext {
-      my($self,$potential_libs, $verbose) = @_;
-!     if ($^O =~ 'os2' and $Config{libs}) { 
-  	# Dynamic libraries are not transitive, so we may need including
-  	# the libraries linked against perl.dll again.
-  
-  	$potential_libs .= " " if $potential_libs;
-! 	$potential_libs .= $Config{libs};
-      }
-      return ("", "", "", "") unless $potential_libs;
-      warn "Potential libraries are '$potential_libs':\n" if $verbose;
-  
-      my($so)   = $Config{'so'};
-!     my($libs) = $Config{'libs'};
-      my $Config_libext = $Config{lib_ext} || ".a";
-  
-  
---- 16,33 ----
-  
-  sub _unix_os2_ext {
-      my($self,$potential_libs, $verbose) = @_;
-!     if ($^O =~ 'os2' and $Config{perllibs}) { 
-  	# Dynamic libraries are not transitive, so we may need including
-  	# the libraries linked against perl.dll again.
-  
-  	$potential_libs .= " " if $potential_libs;
-! 	$potential_libs .= $Config{perllibs};
-      }
-      return ("", "", "", "") unless $potential_libs;
-      warn "Potential libraries are '$potential_libs':\n" if $verbose;
-  
-      my($so)   = $Config{'so'};
-!     my($libs) = $Config{'perllibs'};
-      my $Config_libext = $Config{lib_ext} || ".a";
-  
-  
-***************
-*** 196,202 ****
-      my $BC		= 1 if $cc =~ /^bcc/i;
-      my $GC		= 1 if $cc =~ /^gcc/i;
-      my $so		= $Config{'so'};
-!     my $libs		= $Config{'libs'};
-      my $libpth		= $Config{'libpth'};
-      my $libext		= $Config{'lib_ext'} || ".lib";
-  
---- 196,202 ----
-      my $BC		= 1 if $cc =~ /^bcc/i;
-      my $GC		= 1 if $cc =~ /^gcc/i;
-      my $so		= $Config{'so'};
-!     my $libs		= $Config{'perllibs'};
-      my $libpth		= $Config{'libpth'};
-      my $libext		= $Config{'lib_ext'} || ".lib";
-  
-***************
-*** 333,339 ****
-                   $self->{CCFLAS}   || $Config{'ccflags'};
-    @crtls = ( ($dbgqual =~ m-/Debug-i ? $Config{'dbgprefix'} : '')
-                . 'PerlShr/Share' );
-!   push(@crtls, grep { not /\(/ } split /\s+/, $Config{'libs'});
-    push(@crtls, grep { not /\(/ } split /\s+/, $Config{'libc'});
-    # In general, we pass through the basic libraries from %Config unchanged.
-    # The one exception is that if we're building in the Perl source tree, and
---- 333,339 ----
-                   $self->{CCFLAS}   || $Config{'ccflags'};
-    @crtls = ( ($dbgqual =~ m-/Debug-i ? $Config{'dbgprefix'} : '')
-                . 'PerlShr/Share' );
-!   push(@crtls, grep { not /\(/ } split /\s+/, $Config{'perllibs'});
-    push(@crtls, grep { not /\(/ } split /\s+/, $Config{'libc'});
-    # In general, we pass through the basic libraries from %Config unchanged.
-    # The one exception is that if we're building in the Perl source tree, and
-***************
-*** 623,629 ****
-  =item *
-  
-  If C<$potential_libs> is empty, the return value will be empty.
-! Otherwise, the libraries specified by C<$Config{libs}> (see Config.pm)
-  will be appended to the list of C<$potential_libs>.  The libraries
-  will be searched for in the directories specified in C<$potential_libs>
-  as well as in C<$Config{libpth}>. For each library that is found,  a
---- 623,629 ----
-  =item *
-  
-  If C<$potential_libs> is empty, the return value will be empty.
-! Otherwise, the libraries specified by C<$Config{perllibs}> (see Config.pm)
-  will be appended to the list of C<$potential_libs>.  The libraries
-  will be searched for in the directories specified in C<$potential_libs>
-  as well as in C<$Config{libpth}>. For each library that is found,  a
-***************
-*** 666,672 ****
-  alphanumeric characters are treated as flags.  Unknown flags will be ignored.
-  
-  An entry that matches C disables the appending of default
-! libraries found in C<$Config{libs}> (this should be only needed very rarely).
-  
-  An entry that matches C disables all searching for
-  the libraries specified after it.  Translation of C<-Lfoo> and
---- 666,672 ----
-  alphanumeric characters are treated as flags.  Unknown flags will be ignored.
-  
-  An entry that matches C disables the appending of default
-! libraries found in C<$Config{perllibs}> (this should be only needed very rarely).
-  
-  An entry that matches C disables all searching for
-  the libraries specified after it.  Translation of C<-Lfoo> and
-***************
-*** 676,682 ****
-  
-  An entry that matches C reenables searching for
-  the libraries specified after it.  You can put it at the end to
-! enable searching for default libraries specified by C<$Config{libs}>.
-  
-  =item *
-  
---- 676,682 ----
-  
-  An entry that matches C reenables searching for
-  the libraries specified after it.  You can put it at the end to
-! enable searching for default libraries specified by C<$Config{perllibs}>.
-  
-  =item *
-  
-Only in perl5.005_02/lib/ExtUtils: Liblist.pm.orig
-diff -rc perl5.005_02.orig/lib/ExtUtils/MM_Unix.pm perl5.005_02/lib/ExtUtils/MM_Unix.pm
-*** perl5.005_02.orig/lib/ExtUtils/MM_Unix.pm	Tue Jul 14 04:39:12 1998
---- perl5.005_02/lib/ExtUtils/MM_Unix.pm	Sun Nov 12 20:50:51 2000
-***************
-*** 2281,2287 ****
-  MAP_STATIC    = ",
-  join(" \\\n\t", reverse sort keys %static), "
-  
-! MAP_PRELIBS   = $Config::Config{libs} $Config::Config{cryptlib}
-  ";
-  
-      if (defined $libperl) {
---- 2281,2287 ----
-  MAP_STATIC    = ",
-  join(" \\\n\t", reverse sort keys %static), "
-  
-! MAP_PRELIBS   = $Config::Config{perllibs} $Config::Config{cryptlib}
-  ";
-  
-      if (defined $libperl) {
-Only in perl5.005_02/lib/ExtUtils: MM_Unix.pm.orig
-diff -rc perl5.005_02.orig/myconfig perl5.005_02/myconfig
-*** perl5.005_02.orig/myconfig	Fri Apr  3 01:20:35 1998
---- perl5.005_02/myconfig	Sun Nov 12 20:50:51 2000
-***************
-*** 34,40 ****
-    Linker and Libraries:
-      ld='$ld', ldflags ='$ldflags'
-      libpth=$libpth
-!     libs=$libs
-      libc=$libc, so=$so, useshrplib=$useshrplib, libperl=$libperl
-    Dynamic Linking:
-      dlsrc=$dlsrc, dlext=$dlext, d_dlsymun=$d_dlsymun, ccdlflags='$ccdlflags'
---- 34,40 ----
-    Linker and Libraries:
-      ld='$ld', ldflags ='$ldflags'
-      libpth=$libpth
-!     libs=$perllibs
-      libc=$libc, so=$so, useshrplib=$useshrplib, libperl=$libperl
-    Dynamic Linking:
-      dlsrc=$dlsrc, dlext=$dlext, d_dlsymun=$d_dlsymun, ccdlflags='$ccdlflags'
-diff -rc perl5.005_02.orig/patchlevel.h perl5.005_02/patchlevel.h
-*** perl5.005_02.orig/patchlevel.h	Mon Jan  3 11:12:19 2000
---- perl5.005_02/patchlevel.h	Sun Nov 12 20:50:51 2000
-***************
-*** 40,45 ****
---- 40,46 ----
-   */
-  static	char	*local_patches[] = {
-  	NULL
-+ 	,"NODB-1.0 - remove -ldb from core perl binary."
-  	,NULL
-  };
-  
diff --git a/storage/bdb/perl/BerkeleyDB/patches/5.005_03 b/storage/bdb/perl/BerkeleyDB/patches/5.005_03
deleted file mode 100644
index 115f9f5b909..00000000000
--- a/storage/bdb/perl/BerkeleyDB/patches/5.005_03
+++ /dev/null
@@ -1,250 +0,0 @@
-diff -rc perl5.005_03.orig/Configure perl5.005_03/Configure
-*** perl5.005_03.orig/Configure	Sun Mar 28 17:12:57 1999
---- perl5.005_03/Configure	Sun Sep 17 22:19:16 2000
-***************
-*** 208,213 ****
---- 208,214 ----
-  nm=''
-  nroff=''
-  perl=''
-+ perllibs=''
-  pg=''
-  pmake=''
-  pr=''
-***************
-*** 11642,11647 ****
---- 11643,11656 ----
-  shift
-  extensions="$*"
-  
-+ : Remove libraries needed only for extensions
-+ : The appropriate ext/Foo/Makefile.PL will add them back in, if
-+ : necessary.
-+ set X `echo " $libs " | 
-+   sed -e 's@ -lndbm @ @' -e 's@ -lgdbm @ @' -e 's@ -ldbm @ @' -e 's@ -ldb @ @'` 
-+ shift
-+ perllibs="$*"
-+ 
-  : Remove build directory name from cppstdin so it can be used from
-  : either the present location or the final installed location.
-  echo " "
-***************
-*** 12183,12188 ****
---- 12192,12198 ----
-  patchlevel='$patchlevel'
-  path_sep='$path_sep'
-  perl='$perl'
-+ perllibs='$perllibs'
-  perladmin='$perladmin'
-  perlpath='$perlpath'
-  pg='$pg'
-diff -rc perl5.005_03.orig/Makefile.SH perl5.005_03/Makefile.SH
-*** perl5.005_03.orig/Makefile.SH	Thu Mar  4 02:35:25 1999
---- perl5.005_03/Makefile.SH	Sun Sep 17 22:21:01 2000
-***************
-*** 58,67 ****
-  		shrpldflags="-H512 -T512 -bhalt:4 -bM:SRE -bE:perl.exp"
-  		case "$osvers" in
-  		3*)
-! 			shrpldflags="$shrpldflags -e _nostart $ldflags $libs $cryptlib"
-  			;;
-  		*)
-! 			shrpldflags="$shrpldflags -b noentry $ldflags $libs $cryptlib"
-  			;;
-  		esac
-  		aixinstdir=`pwd | sed 's/\/UU$//'`
---- 58,67 ----
-  		shrpldflags="-H512 -T512 -bhalt:4 -bM:SRE -bE:perl.exp"
-  		case "$osvers" in
-  		3*)
-! 			shrpldflags="$shrpldflags -e _nostart $ldflags $perllibs $cryptlib"
-  			;;
-  		*)
-! 			shrpldflags="$shrpldflags -b noentry $ldflags $perllibs $cryptlib"
-  			;;
-  		esac
-  		aixinstdir=`pwd | sed 's/\/UU$//'`
-***************
-*** 155,161 ****
-  ext = \$(dynamic_ext) \$(static_ext) \$(nonxs_ext)
-  DYNALOADER = lib/auto/DynaLoader/DynaLoader\$(LIB_EXT)
-  
-! libs = $libs $cryptlib
-  
-  public = perl $suidperl utilities translators
-  
---- 155,161 ----
-  ext = \$(dynamic_ext) \$(static_ext) \$(nonxs_ext)
-  DYNALOADER = lib/auto/DynaLoader/DynaLoader\$(LIB_EXT)
-  
-! libs = $perllibs $cryptlib
-  
-  public = perl $suidperl utilities translators
-  
-diff -rc perl5.005_03.orig/lib/ExtUtils/Embed.pm perl5.005_03/lib/ExtUtils/Embed.pm
-*** perl5.005_03.orig/lib/ExtUtils/Embed.pm	Wed Jan  6 02:17:50 1999
---- perl5.005_03/lib/ExtUtils/Embed.pm	Sun Sep 17 22:19:16 2000
-***************
-*** 194,200 ****
-      @path = $path ? split(/:/, $path) : @INC;
-  
-      push(@potential_libs, @link_args)    if scalar @link_args;
-!     push(@potential_libs, $Config{libs}) if defined $std;
-  
-      push(@mods, static_ext()) if $std;
-  
---- 194,200 ----
-      @path = $path ? split(/:/, $path) : @INC;
-  
-      push(@potential_libs, @link_args)    if scalar @link_args;
-!     push(@potential_libs, $Config{perllibs}) if defined $std;
-  
-      push(@mods, static_ext()) if $std;
-  
-diff -rc perl5.005_03.orig/lib/ExtUtils/Liblist.pm perl5.005_03/lib/ExtUtils/Liblist.pm
-*** perl5.005_03.orig/lib/ExtUtils/Liblist.pm	Wed Jan  6 02:17:47 1999
---- perl5.005_03/lib/ExtUtils/Liblist.pm	Sun Sep 17 22:19:16 2000
-***************
-*** 16,33 ****
-  
-  sub _unix_os2_ext {
-      my($self,$potential_libs, $verbose) = @_;
-!     if ($^O =~ 'os2' and $Config{libs}) { 
-  	# Dynamic libraries are not transitive, so we may need including
-  	# the libraries linked against perl.dll again.
-  
-  	$potential_libs .= " " if $potential_libs;
-! 	$potential_libs .= $Config{libs};
-      }
-      return ("", "", "", "") unless $potential_libs;
-      warn "Potential libraries are '$potential_libs':\n" if $verbose;
-  
-      my($so)   = $Config{'so'};
-!     my($libs) = $Config{'libs'};
-      my $Config_libext = $Config{lib_ext} || ".a";
-  
-  
---- 16,33 ----
-  
-  sub _unix_os2_ext {
-      my($self,$potential_libs, $verbose) = @_;
-!     if ($^O =~ 'os2' and $Config{perllibs}) { 
-  	# Dynamic libraries are not transitive, so we may need including
-  	# the libraries linked against perl.dll again.
-  
-  	$potential_libs .= " " if $potential_libs;
-! 	$potential_libs .= $Config{perllibs};
-      }
-      return ("", "", "", "") unless $potential_libs;
-      warn "Potential libraries are '$potential_libs':\n" if $verbose;
-  
-      my($so)   = $Config{'so'};
-!     my($libs) = $Config{'perllibs'};
-      my $Config_libext = $Config{lib_ext} || ".a";
-  
-  
-***************
-*** 196,202 ****
-      my $BC		= 1 if $cc =~ /^bcc/i;
-      my $GC		= 1 if $cc =~ /^gcc/i;
-      my $so		= $Config{'so'};
-!     my $libs		= $Config{'libs'};
-      my $libpth		= $Config{'libpth'};
-      my $libext		= $Config{'lib_ext'} || ".lib";
-  
---- 196,202 ----
-      my $BC		= 1 if $cc =~ /^bcc/i;
-      my $GC		= 1 if $cc =~ /^gcc/i;
-      my $so		= $Config{'so'};
-!     my $libs		= $Config{'perllibs'};
-      my $libpth		= $Config{'libpth'};
-      my $libext		= $Config{'lib_ext'} || ".lib";
-  
-***************
-*** 336,342 ****
-                   $self->{CCFLAS}   || $Config{'ccflags'};
-    @crtls = ( ($dbgqual =~ m-/Debug-i ? $Config{'dbgprefix'} : '')
-                . 'PerlShr/Share' );
-!   push(@crtls, grep { not /\(/ } split /\s+/, $Config{'libs'});
-    push(@crtls, grep { not /\(/ } split /\s+/, $Config{'libc'});
-    # In general, we pass through the basic libraries from %Config unchanged.
-    # The one exception is that if we're building in the Perl source tree, and
---- 336,342 ----
-                   $self->{CCFLAS}   || $Config{'ccflags'};
-    @crtls = ( ($dbgqual =~ m-/Debug-i ? $Config{'dbgprefix'} : '')
-                . 'PerlShr/Share' );
-!   push(@crtls, grep { not /\(/ } split /\s+/, $Config{'perllibs'});
-    push(@crtls, grep { not /\(/ } split /\s+/, $Config{'libc'});
-    # In general, we pass through the basic libraries from %Config unchanged.
-    # The one exception is that if we're building in the Perl source tree, and
-***************
-*** 626,632 ****
-  =item *
-  
-  If C<$potential_libs> is empty, the return value will be empty.
-! Otherwise, the libraries specified by C<$Config{libs}> (see Config.pm)
-  will be appended to the list of C<$potential_libs>.  The libraries
-  will be searched for in the directories specified in C<$potential_libs>,
-  C<$Config{libpth}>, and in C<$Config{installarchlib}/CORE>.
---- 626,632 ----
-  =item *
-  
-  If C<$potential_libs> is empty, the return value will be empty.
-! Otherwise, the libraries specified by C<$Config{perllibs}> (see Config.pm)
-  will be appended to the list of C<$potential_libs>.  The libraries
-  will be searched for in the directories specified in C<$potential_libs>,
-  C<$Config{libpth}>, and in C<$Config{installarchlib}/CORE>.
-***************
-*** 670,676 ****
-  alphanumeric characters are treated as flags.  Unknown flags will be ignored.
-  
-  An entry that matches C disables the appending of default
-! libraries found in C<$Config{libs}> (this should be only needed very rarely).
-  
-  An entry that matches C disables all searching for
-  the libraries specified after it.  Translation of C<-Lfoo> and
---- 670,676 ----
-  alphanumeric characters are treated as flags.  Unknown flags will be ignored.
-  
-  An entry that matches C disables the appending of default
-! libraries found in C<$Config{perllibs}> (this should be only needed very rarely).
-  
-  An entry that matches C disables all searching for
-  the libraries specified after it.  Translation of C<-Lfoo> and
-***************
-*** 680,686 ****
-  
-  An entry that matches C reenables searching for
-  the libraries specified after it.  You can put it at the end to
-! enable searching for default libraries specified by C<$Config{libs}>.
-  
-  =item *
-  
---- 680,686 ----
-  
-  An entry that matches C reenables searching for
-  the libraries specified after it.  You can put it at the end to
-! enable searching for default libraries specified by C<$Config{perllibs}>.
-  
-  =item *
-  
-diff -rc perl5.005_03.orig/lib/ExtUtils/MM_Unix.pm perl5.005_03/lib/ExtUtils/MM_Unix.pm
-*** perl5.005_03.orig/lib/ExtUtils/MM_Unix.pm	Fri Mar  5 00:34:20 1999
---- perl5.005_03/lib/ExtUtils/MM_Unix.pm	Sun Sep 17 22:19:16 2000
-***************
-*** 2284,2290 ****
-  MAP_STATIC    = ",
-  join(" \\\n\t", reverse sort keys %static), "
-  
-! MAP_PRELIBS   = $Config::Config{libs} $Config::Config{cryptlib}
-  ";
-  
-      if (defined $libperl) {
---- 2284,2290 ----
-  MAP_STATIC    = ",
-  join(" \\\n\t", reverse sort keys %static), "
-  
-! MAP_PRELIBS   = $Config::Config{perllibs} $Config::Config{cryptlib}
-  ";
-  
-      if (defined $libperl) {
diff --git a/storage/bdb/perl/BerkeleyDB/patches/5.6.0 b/storage/bdb/perl/BerkeleyDB/patches/5.6.0
deleted file mode 100644
index 1f9b3b620de..00000000000
--- a/storage/bdb/perl/BerkeleyDB/patches/5.6.0
+++ /dev/null
@@ -1,294 +0,0 @@
-diff -cr perl-5.6.0.orig/Configure perl-5.6.0/Configure
-*** perl-5.6.0.orig/Configure	Wed Mar 22 20:36:37 2000
---- perl-5.6.0/Configure	Sun Sep 17 23:40:15 2000
-***************
-*** 217,222 ****
---- 217,223 ----
-  nm=''
-  nroff=''
-  perl=''
-+ perllibs=''
-  pg=''
-  pmake=''
-  pr=''
-***************
-*** 14971,14976 ****
---- 14972,14985 ----
-  shift
-  extensions="$*"
-  
-+ : Remove libraries needed only for extensions
-+ : The appropriate ext/Foo/Makefile.PL will add them back in, if
-+ : necessary.
-+ set X `echo " $libs " | 
-+   sed -e 's@ -lndbm @ @' -e 's@ -lgdbm @ @' -e 's@ -ldbm @ @' -e 's@ -ldb @ @'` 
-+ shift
-+ perllibs="$*"
-+ 
-  : Remove build directory name from cppstdin so it can be used from
-  : either the present location or the final installed location.
-  echo " "
-***************
-*** 15640,15645 ****
---- 15649,15655 ----
-  path_sep='$path_sep'
-  perl5='$perl5'
-  perl='$perl'
-+ perllibs='$perllibs'
-  perladmin='$perladmin'
-  perlpath='$perlpath'
-  pg='$pg'
-diff -cr perl-5.6.0.orig/Makefile.SH perl-5.6.0/Makefile.SH
-*** perl-5.6.0.orig/Makefile.SH	Sat Mar 11 16:05:24 2000
---- perl-5.6.0/Makefile.SH	Sun Sep 17 23:40:15 2000
-***************
-*** 70,76 ****
-  		*)	shrpldflags="$shrpldflags -b noentry"
-  			;;
-  		esac
-! 	        shrpldflags="$shrpldflags $ldflags $libs $cryptlib"
-  		linklibperl="-L $archlibexp/CORE -L `pwd | sed 's/\/UU$//'` -lperl"
-  		;;
-  	hpux*)
---- 70,76 ----
-  		*)	shrpldflags="$shrpldflags -b noentry"
-  			;;
-  		esac
-! 	        shrpldflags="$shrpldflags $ldflags $perllibs $cryptlib"
-  		linklibperl="-L $archlibexp/CORE -L `pwd | sed 's/\/UU$//'` -lperl"
-  		;;
-  	hpux*)
-***************
-*** 176,182 ****
-  ext = \$(dynamic_ext) \$(static_ext) \$(nonxs_ext)
-  DYNALOADER = lib/auto/DynaLoader/DynaLoader\$(LIB_EXT)
-  
-! libs = $libs $cryptlib
-  
-  public = perl $suidperl utilities translators
-  
---- 176,182 ----
-  ext = \$(dynamic_ext) \$(static_ext) \$(nonxs_ext)
-  DYNALOADER = lib/auto/DynaLoader/DynaLoader\$(LIB_EXT)
-  
-! libs = $perllibs $cryptlib
-  
-  public = perl $suidperl utilities translators
-  
-***************
-*** 333,339 ****
-  case "$osname" in
-  aix)
-  	$spitshell >>Makefile <>Makefile <{CCFLAS}   || $Config{'ccflags'};
-    @crtls = ( ($dbgqual =~ m-/Debug-i ? $Config{'dbgprefix'} : '')
-                . 'PerlShr/Share' );
-!   push(@crtls, grep { not /\(/ } split /\s+/, $Config{'libs'});
-    push(@crtls, grep { not /\(/ } split /\s+/, $Config{'libc'});
-    # In general, we pass through the basic libraries from %Config unchanged.
-    # The one exception is that if we're building in the Perl source tree, and
---- 338,344 ----
-                   $self->{CCFLAS}   || $Config{'ccflags'};
-    @crtls = ( ($dbgqual =~ m-/Debug-i ? $Config{'dbgprefix'} : '')
-                . 'PerlShr/Share' );
-!   push(@crtls, grep { not /\(/ } split /\s+/, $Config{'perllibs'});
-    push(@crtls, grep { not /\(/ } split /\s+/, $Config{'libc'});
-    # In general, we pass through the basic libraries from %Config unchanged.
-    # The one exception is that if we're building in the Perl source tree, and
-***************
-*** 624,630 ****
-  =item *
-  
-  If C<$potential_libs> is empty, the return value will be empty.
-! Otherwise, the libraries specified by C<$Config{libs}> (see Config.pm)
-  will be appended to the list of C<$potential_libs>.  The libraries
-  will be searched for in the directories specified in C<$potential_libs>,
-  C<$Config{libpth}>, and in C<$Config{installarchlib}/CORE>.
---- 624,630 ----
-  =item *
-  
-  If C<$potential_libs> is empty, the return value will be empty.
-! Otherwise, the libraries specified by C<$Config{perllibs}> (see Config.pm)
-  will be appended to the list of C<$potential_libs>.  The libraries
-  will be searched for in the directories specified in C<$potential_libs>,
-  C<$Config{libpth}>, and in C<$Config{installarchlib}/CORE>.
-***************
-*** 668,674 ****
-  alphanumeric characters are treated as flags.  Unknown flags will be ignored.
-  
-  An entry that matches C disables the appending of default
-! libraries found in C<$Config{libs}> (this should be only needed very rarely).
-  
-  An entry that matches C disables all searching for
-  the libraries specified after it.  Translation of C<-Lfoo> and
---- 668,674 ----
-  alphanumeric characters are treated as flags.  Unknown flags will be ignored.
-  
-  An entry that matches C disables the appending of default
-! libraries found in C<$Config{perllibs}> (this should be only needed very rarely).
-  
-  An entry that matches C disables all searching for
-  the libraries specified after it.  Translation of C<-Lfoo> and
-***************
-*** 678,684 ****
-  
-  An entry that matches C reenables searching for
-  the libraries specified after it.  You can put it at the end to
-! enable searching for default libraries specified by C<$Config{libs}>.
-  
-  =item *
-  
---- 678,684 ----
-  
-  An entry that matches C reenables searching for
-  the libraries specified after it.  You can put it at the end to
-! enable searching for default libraries specified by C<$Config{perllibs}>.
-  
-  =item *
-  
-diff -cr perl-5.6.0.orig/lib/ExtUtils/MM_Unix.pm perl-5.6.0/lib/ExtUtils/MM_Unix.pm
-*** perl-5.6.0.orig/lib/ExtUtils/MM_Unix.pm	Thu Mar  2 17:52:52 2000
---- perl-5.6.0/lib/ExtUtils/MM_Unix.pm	Sun Sep 17 23:40:15 2000
-***************
-*** 2450,2456 ****
-  MAP_STATIC    = ",
-  join(" \\\n\t", reverse sort keys %static), "
-  
-! MAP_PRELIBS   = $Config::Config{libs} $Config::Config{cryptlib}
-  ";
-  
-      if (defined $libperl) {
---- 2450,2456 ----
-  MAP_STATIC    = ",
-  join(" \\\n\t", reverse sort keys %static), "
-  
-! MAP_PRELIBS   = $Config::Config{perllibs} $Config::Config{cryptlib}
-  ";
-  
-      if (defined $libperl) {
-diff -cr perl-5.6.0.orig/myconfig.SH perl-5.6.0/myconfig.SH
-*** perl-5.6.0.orig/myconfig.SH	Sat Feb 26 06:34:49 2000
---- perl-5.6.0/myconfig.SH	Sun Sep 17 23:41:17 2000
-***************
-*** 48,54 ****
-    Linker and Libraries:
-      ld='$ld', ldflags ='$ldflags'
-      libpth=$libpth
-!     libs=$libs
-      libc=$libc, so=$so, useshrplib=$useshrplib, libperl=$libperl
-    Dynamic Linking:
-      dlsrc=$dlsrc, dlext=$dlext, d_dlsymun=$d_dlsymun, ccdlflags='$ccdlflags'
---- 48,54 ----
-    Linker and Libraries:
-      ld='$ld', ldflags ='$ldflags'
-      libpth=$libpth
-!     libs=$perllibs
-      libc=$libc, so=$so, useshrplib=$useshrplib, libperl=$libperl
-    Dynamic Linking:
-      dlsrc=$dlsrc, dlext=$dlext, d_dlsymun=$d_dlsymun, ccdlflags='$ccdlflags'
-diff -cr perl-5.6.0.orig/patchlevel.h perl-5.6.0/patchlevel.h
-*** perl-5.6.0.orig/patchlevel.h	Wed Mar 22 20:23:11 2000
---- perl-5.6.0/patchlevel.h	Sun Sep 17 23:40:15 2000
-***************
-*** 70,75 ****
---- 70,76 ----
-  #if !defined(PERL_PATCHLEVEL_H_IMPLICIT) && !defined(LOCAL_PATCH_COUNT)
-  static	char	*local_patches[] = {
-  	NULL
-+ 	,"NODB-1.0 - remove -ldb from core perl binary."
-  	,NULL
-  };
-  
diff --git a/storage/bdb/perl/BerkeleyDB/ppport.h b/storage/bdb/perl/BerkeleyDB/ppport.h
deleted file mode 100644
index 0815cf2d88f..00000000000
--- a/storage/bdb/perl/BerkeleyDB/ppport.h
+++ /dev/null
@@ -1,349 +0,0 @@
-/* This file is Based on output from 
- * Perl/Pollution/Portability Version 2.0000 */
-
-#ifndef _P_P_PORTABILITY_H_
-#define _P_P_PORTABILITY_H_
-
-#ifndef PERL_REVISION
-#   ifndef __PATCHLEVEL_H_INCLUDED__
-#       include "patchlevel.h"
-#   endif
-#   ifndef PERL_REVISION
-#	define PERL_REVISION	(5)
-        /* Replace: 1 */
-#       define PERL_VERSION	PATCHLEVEL
-#       define PERL_SUBVERSION	SUBVERSION
-        /* Replace PERL_PATCHLEVEL with PERL_VERSION */
-        /* Replace: 0 */
-#   endif
-#endif
-
-#define PERL_BCDVERSION ((PERL_REVISION * 0x1000000L) + (PERL_VERSION * 0x1000L) + PERL_SUBVERSION)
-
-#ifndef ERRSV
-#	define ERRSV perl_get_sv("@",FALSE)
-#endif
-
-#if (PERL_VERSION < 4) || ((PERL_VERSION == 4) && (PERL_SUBVERSION <= 5))
-/* Replace: 1 */
-#	define PL_Sv		Sv
-#	define PL_compiling	compiling
-#	define PL_copline	copline
-#	define PL_curcop	curcop
-#	define PL_curstash	curstash
-#	define PL_defgv		defgv
-#	define PL_dirty		dirty
-#	define PL_hints		hints
-#	define PL_na		na
-#	define PL_perldb	perldb
-#	define PL_rsfp_filters	rsfp_filters
-#	define PL_rsfp		rsfp
-#	define PL_stdingv	stdingv
-#	define PL_sv_no		sv_no
-#	define PL_sv_undef	sv_undef
-#	define PL_sv_yes	sv_yes
-/* Replace: 0 */
-#endif
-
-#ifndef pTHX
-#    define pTHX
-#    define pTHX_
-#    define aTHX
-#    define aTHX_
-#endif         
-
-#ifndef PTR2IV
-#    define PTR2IV(d)   (IV)(d)
-#endif
- 
-#ifndef INT2PTR
-#    define INT2PTR(any,d)      (any)(d)
-#endif
-
-#ifndef dTHR
-#  ifdef WIN32
-#	define dTHR extern int Perl___notused
-#  else
-#	define dTHR extern int errno
-#  endif
-#endif
-
-#ifndef boolSV
-#	define boolSV(b) ((b) ? &PL_sv_yes : &PL_sv_no)
-#endif
-
-#ifndef gv_stashpvn
-#	define gv_stashpvn(str,len,flags) gv_stashpv(str,flags)
-#endif
-
-#ifndef newSVpvn
-#	define newSVpvn(data,len) ((len) ? newSVpv ((data), (len)) : newSVpv ("", 0))
-#endif
-
-#ifndef newRV_inc
-/* Replace: 1 */
-#	define newRV_inc(sv) newRV(sv)
-/* Replace: 0 */
-#endif
-
-#ifndef SvGETMAGIC
-#  define SvGETMAGIC(x)                  STMT_START { if (SvGMAGICAL(x)) mg_get(x); } STMT_END
-#endif
-
-
-/* DEFSV appears first in 5.004_56 */
-#ifndef DEFSV
-#  define DEFSV	GvSV(PL_defgv)
-#endif
-
-#ifndef SAVE_DEFSV
-#    define SAVE_DEFSV SAVESPTR(GvSV(PL_defgv))
-#endif
-
-#ifndef newRV_noinc
-#  ifdef __GNUC__
-#    define newRV_noinc(sv)               \
-      ({                                  \
-          SV *nsv = (SV*)newRV(sv);       \
-          SvREFCNT_dec(sv);               \
-          nsv;                            \
-      })
-#  else
-#    if defined(CRIPPLED_CC) || defined(USE_THREADS)
-static SV * newRV_noinc (SV * sv)
-{
-          SV *nsv = (SV*)newRV(sv);       
-          SvREFCNT_dec(sv);               
-          return nsv;                     
-}
-#    else
-#      define newRV_noinc(sv)    \
-        ((PL_Sv=(SV*)newRV(sv), SvREFCNT_dec(sv), (SV*)PL_Sv)
-#    endif
-#  endif
-#endif
-
-/* Provide: newCONSTSUB */
-
-/* newCONSTSUB from IO.xs is in the core starting with 5.004_63 */
-#if (PERL_VERSION < 4) || ((PERL_VERSION == 4) && (PERL_SUBVERSION < 63))
-
-#if defined(NEED_newCONSTSUB)
-static
-#else
-extern void newCONSTSUB _((HV * stash, char * name, SV *sv));
-#endif
-
-#if defined(NEED_newCONSTSUB) || defined(NEED_newCONSTSUB_GLOBAL)
-void
-newCONSTSUB(stash,name,sv)
-HV *stash;
-char *name;
-SV *sv;
-{
-	U32 oldhints = PL_hints;
-	HV *old_cop_stash = PL_curcop->cop_stash;
-	HV *old_curstash = PL_curstash;
-	line_t oldline = PL_curcop->cop_line;
-	PL_curcop->cop_line = PL_copline;
-
-	PL_hints &= ~HINT_BLOCK_SCOPE;
-	if (stash)
-		PL_curstash = PL_curcop->cop_stash = stash;
-
-	newSUB(
-
-#if (PERL_VERSION < 3) || ((PERL_VERSION == 3) && (PERL_SUBVERSION < 22))
-     /* before 5.003_22 */
-		start_subparse(),
-#else
-#  if (PERL_VERSION == 3) && (PERL_SUBVERSION == 22)
-     /* 5.003_22 */
-     		start_subparse(0),
-#  else
-     /* 5.003_23  onwards */
-     		start_subparse(FALSE, 0),
-#  endif
-#endif
-
-		newSVOP(OP_CONST, 0, newSVpv(name,0)),
-		newSVOP(OP_CONST, 0, &PL_sv_no),   /* SvPV(&PL_sv_no) == "" -- GMB */
-		newSTATEOP(0, Nullch, newSVOP(OP_CONST, 0, sv))
-	);
-
-	PL_hints = oldhints;
-	PL_curcop->cop_stash = old_cop_stash;
-	PL_curstash = old_curstash;
-	PL_curcop->cop_line = oldline;
-}
-#endif
-
-#endif /* newCONSTSUB */
-
-
-#ifndef START_MY_CXT
-
-/*
- * Boilerplate macros for initializing and accessing interpreter-local
- * data from C.  All statics in extensions should be reworked to use
- * this, if you want to make the extension thread-safe.  See ext/re/re.xs
- * for an example of the use of these macros.
- *
- * Code that uses these macros is responsible for the following:
- * 1. #define MY_CXT_KEY to a unique string, e.g. "DynaLoader_guts"
- * 2. Declare a typedef named my_cxt_t that is a structure that contains
- *    all the data that needs to be interpreter-local.
- * 3. Use the START_MY_CXT macro after the declaration of my_cxt_t.
- * 4. Use the MY_CXT_INIT macro such that it is called exactly once
- *    (typically put in the BOOT: section).
- * 5. Use the members of the my_cxt_t structure everywhere as
- *    MY_CXT.member.
- * 6. Use the dMY_CXT macro (a declaration) in all the functions that
- *    access MY_CXT.
- */
-
-#if defined(MULTIPLICITY) || defined(PERL_OBJECT) || \
-    defined(PERL_CAPI)    || defined(PERL_IMPLICIT_CONTEXT)
-
-/* This must appear in all extensions that define a my_cxt_t structure,
- * right after the definition (i.e. at file scope).  The non-threads
- * case below uses it to declare the data as static. */
-#define START_MY_CXT
-
-#if PERL_REVISION == 5 && \
-    (PERL_VERSION < 4 || (PERL_VERSION == 4 && PERL_SUBVERSION < 68 ))
-/* Fetches the SV that keeps the per-interpreter data. */
-#define dMY_CXT_SV \
-	SV *my_cxt_sv = perl_get_sv(MY_CXT_KEY, FALSE)
-#else /* >= perl5.004_68 */
-#define dMY_CXT_SV \
-	SV *my_cxt_sv = *hv_fetch(PL_modglobal, MY_CXT_KEY,		\
-				  sizeof(MY_CXT_KEY)-1, TRUE)
-#endif /* < perl5.004_68 */
-
-/* This declaration should be used within all functions that use the
- * interpreter-local data. */
-#define dMY_CXT	\
-	dMY_CXT_SV;							\
-	my_cxt_t *my_cxtp = INT2PTR(my_cxt_t*,SvUV(my_cxt_sv))
-
-/* Creates and zeroes the per-interpreter data.
- * (We allocate my_cxtp in a Perl SV so that it will be released when
- * the interpreter goes away.) */
-#define MY_CXT_INIT \
-	dMY_CXT_SV;							\
-	/* newSV() allocates one more than needed */			\
-	my_cxt_t *my_cxtp = (my_cxt_t*)SvPVX(newSV(sizeof(my_cxt_t)-1));\
-	Zero(my_cxtp, 1, my_cxt_t);					\
-	sv_setuv(my_cxt_sv, PTR2UV(my_cxtp))
-
-/* This macro must be used to access members of the my_cxt_t structure.
- * e.g. MYCXT.some_data */
-#define MY_CXT		(*my_cxtp)
-
-/* Judicious use of these macros can reduce the number of times dMY_CXT
- * is used.  Use is similar to pTHX, aTHX etc. */
-#define pMY_CXT		my_cxt_t *my_cxtp
-#define pMY_CXT_	pMY_CXT,
-#define _pMY_CXT	,pMY_CXT
-#define aMY_CXT		my_cxtp
-#define aMY_CXT_	aMY_CXT,
-#define _aMY_CXT	,aMY_CXT
-
-#else /* single interpreter */
-
-#ifndef NOOP
-#  define NOOP (void)0
-#endif
-
-#ifdef HASATTRIBUTE
-#  define PERL_UNUSED_DECL __attribute__((unused))
-#else
-#  define PERL_UNUSED_DECL
-#endif    
-
-#ifndef dNOOP
-#  define dNOOP extern int Perl___notused PERL_UNUSED_DECL
-#endif
-
-#define START_MY_CXT	static my_cxt_t my_cxt;
-#define dMY_CXT_SV	dNOOP
-#define dMY_CXT		dNOOP
-#define MY_CXT_INIT	NOOP
-#define MY_CXT		my_cxt
-
-#define pMY_CXT		void
-#define pMY_CXT_
-#define _pMY_CXT
-#define aMY_CXT
-#define aMY_CXT_
-#define _aMY_CXT
-
-#endif 
-
-#endif /* START_MY_CXT */
-
-
-#if 1
-#ifdef DBM_setFilter
-#undef DBM_setFilter
-#undef DBM_ckFilter
-#endif
-#endif
-
-#ifndef DBM_setFilter
-
-/* 
-   The DBM_setFilter & DBM_ckFilter macros are only used by 
-   the *DB*_File modules 
-*/
-
-#define DBM_setFilter(db_type,code)				\
-	{							\
-	    if (db_type)					\
-	        RETVAL = sv_mortalcopy(db_type) ;		\
-	    ST(0) = RETVAL ;					\
-	    if (db_type && (code == &PL_sv_undef)) {		\
-                SvREFCNT_dec(db_type) ;				\
-	        db_type = NULL ;				\
-	    }							\
-	    else if (code) {					\
-	        if (db_type)					\
-	            sv_setsv(db_type, code) ;			\
-	        else						\
-	            db_type = newSVsv(code) ;			\
-	    }	    						\
-	}
-
-#define DBM_ckFilter(arg,type,name)				\
-	if (db->type) {						\
-	    /* printf("Filtering %s\n", name); */		\
-	    if (db->filtering) {				\
-	        croak("recursion detected in %s", name) ;	\
-	    }                     				\
-	    ENTER ;						\
-	    SAVETMPS ;						\
-	    SAVEINT(db->filtering) ;				\
-	    db->filtering = TRUE ;				\
-	    SAVESPTR(DEFSV) ;					\
-	    if (name[7] == 's')  				\
-	        arg = newSVsv(arg);				\
-	    DEFSV = arg ;					\
-	    SvTEMP_off(arg) ;					\
-	    PUSHMARK(SP) ;					\
-	    PUTBACK ;						\
-	    (void) perl_call_sv(db->type, G_DISCARD); 		\
-	    arg = DEFSV ;					\
-	    SPAGAIN ;						\
-	    PUTBACK ;						\
-	    FREETMPS ;						\
-	    LEAVE ;						\
-	    if (name[7] == 's'){ 				\
-	        arg = sv_2mortal(arg);				\
-	    }							\
-	    SvOKp(arg);						\
-	}
-
-#endif /* DBM_setFilter */
-
-#endif /* _P_P_PORTABILITY_H_ */
diff --git a/storage/bdb/perl/BerkeleyDB/scan b/storage/bdb/perl/BerkeleyDB/scan
deleted file mode 100644
index c501f3c4532..00000000000
--- a/storage/bdb/perl/BerkeleyDB/scan
+++ /dev/null
@@ -1,238 +0,0 @@
-#!/usr/local/bin/perl
-
-my $ignore_re = '^(' . join("|", 
-	qw(
-		_
-		[a-z]
-		DBM
-		DBC
-		DB_AM_
-		DB_BT_
-		DB_RE_
-		DB_HS_
-		DB_FUNC_
-		DB_DBT_
-		DB_DBM
-		DB_TSL
-		MP
-		TXN
-                DB_TXN_GETPGNOS
-	)) . ')' ;
-
-my %ignore_def = map {$_, 1} qw() ;
-
-%ignore_enums = map {$_, 1} qw( ACTION db_status_t db_notices db_lockmode_t ) ;
-
-my %ignore_exact_enum = map { $_ => 1}
-	qw(
-                DB_TXN_GETPGNOS
-                );
-
-my $filler = ' ' x 26 ;
-
-chdir "libraries" || die "Cannot chdir into './libraries': $!\n";
-
-foreach my $name (sort tuple glob "[2-9]*")
-{
-    next if $name =~ /(NC|private)$/;
-
-    my $inc = "$name/include/db.h" ;
-    next unless -f $inc ;
-
-    my $file = readFile($inc) ;
-    StripCommentsAndStrings($file) ;
-    my $result = scan($name, $file) ;
-    print "\n\t#########\n\t# $name\n\t#########\n\n$result" 
-        if $result;
-}
-exit ;
-
-
-sub scan
-{
-    my $version = shift ;
-    my $file = shift ;
-
-    my %seen_define = () ;
-    my $result = "" ;
-
-    if (1) {
-        # Preprocess all tri-graphs 
-        # including things stuck in quoted string constants.
-        $file =~ s/\?\?=/#/g;                         # | ??=|  #|
-        $file =~ s/\?\?\!/|/g;                        # | ??!|  ||
-        $file =~ s/\?\?'/^/g;                         # | ??'|  ^|
-        $file =~ s/\?\?\(/[/g;                        # | ??(|  [|
-        $file =~ s/\?\?\)/]/g;                        # | ??)|  ]|
-        $file =~ s/\?\?\-/~/g;                        # | ??-|  ~|
-        $file =~ s/\?\?\//\\/g;                       # | ??/|  \|
-        $file =~ s/\?\?/}/g;                         # | ??>|  }|
-    }
-    
-    while ( $file =~ /^\s*#\s*define\s+([\$\w]+)\b(?!\()\s*(.*)/gm ) 
-    {
-        my $def = $1;
-        my $rest = $2;
-        my $ignore = 0 ;
-    
-        $ignore = 1 if $ignore_def{$def} || $def =~ /$ignore_re/o ;
-    
-        # Cannot do: (-1) and ((LHANDLE)3) are OK:
-        #print("Skip non-wordy $def => $rest\n"),
-    
-        $rest =~ s/\s*$//;
-        #next if $rest =~ /[^\w\$]/;
-    
-        #print "Matched $_ ($def)\n" ;
-
-	next if $before{$def} ++ ;
-    
-        if ($ignore)
-          { $seen_define{$def} = 'IGNORE' }
-        elsif ($rest =~ /"/) 
-          { $seen_define{$def} = 'STRING' }
-        else
-          { $seen_define{$def} = 'DEFINE' }
-    }
-    
-    foreach $define (sort keys %seen_define)
-    { 
-        my $out = $filler ;
-	substr($out,0, length $define) = $define;
-	$result .= "\t$out => $seen_define{$define},\n" ;
-    }
-    
-    while ($file =~ /\btypedef\s+enum\s*{(.*?)}\s*(\w+)/gs )
-    {
-        my $enum = $1 ;
-        my $name = $2 ;
-        my $ignore = 0 ;
-    
-        $ignore = 1 if $ignore_enums{$name} ;
-    
-        #$enum =~ s/\s*=\s*\S+\s*(,?)\s*\n/$1/g;
-        $enum =~ s/^\s*//;
-        $enum =~ s/\s*$//;
-    
-        my @tokens = map { s/\s*=.*// ; $_} split /\s*,\s*/, $enum ;
-        my @new =  grep { ! $Enums{$_}++ } @tokens ;
-	if (@new)
-	{
-            my $value ;
-            if ($ignore)
-              { $value = "IGNORE, # $version" }
-            else
-              { $value = "'$version'," }
-
-            $result .= "\n\t# enum $name\n";
-            my $out = $filler ;
-	    foreach $name (@new)
-	    {
-                next if $ignore_exact_enum{$name} ;
-	        $out = $filler ;
-	        substr($out,0, length $name) = $name;
-                $result .= "\t$out => $value\n" ;
-	    }
-	}
-    }
-
-    return $result ;
-}
-
-
-sub StripCommentsAndStrings
-{
-
-  # Strip C & C++ coments
-  # From the perlfaq
-  $_[0] =~
-
-    s{
-       /\*         ##  Start of /* ... */ comment
-       [^*]*\*+    ##  Non-* followed by 1-or-more *'s
-       (
-         [^/*][^*]*\*+
-       )*          ##  0-or-more things which don't start with /
-                   ##    but do end with '*'
-       /           ##  End of /* ... */ comment
- 
-     |         ##     OR  C++ Comment
-       //          ## Start of C++ comment // 
-       [^\n]*      ## followed by 0-or-more non end of line characters
-
-     |         ##     OR  various things which aren't comments:
- 
-       (
-         "           ##  Start of " ... " string
-         (
-           \\.           ##  Escaped char
-         |               ##    OR
-           [^"\\]        ##  Non "\
-         )*
-         "           ##  End of " ... " string
- 
-       |         ##     OR
- 
-         '           ##  Start of ' ... ' string
-         (
-           \\.           ##  Escaped char
-         |               ##    OR
-           [^'\\]        ##  Non '\
-         )*
-         '           ##  End of ' ... ' string
- 
-       |         ##     OR
- 
-         .           ##  Anything other char
-         [^/"'\\]*   ##  Chars which doesn't start a comment, string or escape
-       )
-     }{$2}gxs;
-
-
-
-  # Remove double-quoted strings.
-  #$_[0] =~ s#"(\\.|[^"\\])*"##g;
-
-  # Remove single-quoted strings.
-  #$_[0] =~ s#'(\\.|[^'\\])*'##g;
-
-  # Remove leading whitespace.
-  $_[0] =~ s/\A\s+//m ;
-
-  # Remove trailing whitespace.
-  $_[0] =~ s/\s+\Z//m ;
-
-  # Replace all multiple whitespace by a single space.
-  #$_[0] =~ s/\s+/ /g ;
-}
-
-
-sub readFile
-{
-   my $filename = shift ;
-   open F, "<$filename" || die "Cannot open $filename: $!\n" ;
-   local $/ ;
-   my $x =  ;
-   close F ;
-   return $x ;
-}
-
-sub tuple
-{
-    my (@a) = split(/\./, $a) ;
-    my (@b) = split(/\./, $b) ;
-    if (@a != @b) {
-        my $diff = @a - @b ;
-        push @b, (0 x $diff) if $diff > 0 ;
-        push @a, (0 x -$diff) if $diff < 0 ;
-    }
-    foreach $A (@a) {
-        $B = shift @b ;
-        $A == $B or return $A <=> $B ;
-    }
-    return 0;
-}          
-
-__END__
-
diff --git a/storage/bdb/perl/BerkeleyDB/t/btree.t b/storage/bdb/perl/BerkeleyDB/t/btree.t
deleted file mode 100644
index 152c366015a..00000000000
--- a/storage/bdb/perl/BerkeleyDB/t/btree.t
+++ /dev/null
@@ -1,932 +0,0 @@
-#!./perl -w
-
-# ID: %I%, %G%   
-
-use strict ;
-
-BEGIN {
-    unless(grep /blib/, @INC) {
-        chdir 't' if -d 't';
-        @INC = '../lib' if -d '../lib';
-    }
-}
-
-use BerkeleyDB; 
-use t::util ;
-
-print "1..244\n";
-
-my $Dfile = "dbhash.tmp";
-my $Dfile2 = "dbhash2.tmp";
-my $Dfile3 = "dbhash3.tmp";
-unlink $Dfile;
-
-umask(0) ;
-
-
-# Check for invalid parameters
-{
-    # Check for invalid parameters
-    my $db ;
-    eval ' $db = new BerkeleyDB::Btree  -Stupid => 3 ; ' ;
-    ok 1, $@ =~ /unknown key value\(s\) Stupid/  ;
-
-    eval ' $db = new BerkeleyDB::Btree -Bad => 2, -Mode => 0345, -Stupid => 3; ' ;
-    ok 2, $@ =~ /unknown key value\(s\) (Bad |Stupid ){2}/  ;
-
-    eval ' $db = new BerkeleyDB::Btree -Env => 2 ' ;
-    ok 3, $@ =~ /^Env not of type BerkeleyDB::Env/ ;
-
-    eval ' $db = new BerkeleyDB::Btree -Txn => "x" ' ;
-    ok 4, $@ =~ /^Txn not of type BerkeleyDB::Txn/ ;
-
-    my $obj = bless [], "main" ;
-    eval ' $db = new BerkeleyDB::Btree -Env => $obj ' ;
-    ok 5, $@ =~ /^Env not of type BerkeleyDB::Env/ ;
-}
-
-# Now check the interface to Btree
-
-{
-    my $lex = new LexFile $Dfile ;
-
-    ok 6, my $db = new BerkeleyDB::Btree -Filename => $Dfile, 
-				    -Flags    => DB_CREATE ;
-
-    # Add a k/v pair
-    my $value ;
-    my $status ;
-    ok 7, $db->db_put("some key", "some value") == 0  ;
-    ok 8, $db->status() == 0 ;
-    ok 9, $db->db_get("some key", $value) == 0 ;
-    ok 10, $value eq "some value" ;
-    ok 11, $db->db_put("key", "value") == 0  ;
-    ok 12, $db->db_get("key", $value) == 0 ;
-    ok 13, $value eq "value" ;
-    ok 14, $db->db_del("some key") == 0 ;
-    ok 15, ($status = $db->db_get("some key", $value)) == DB_NOTFOUND ;
-    ok 16, $db->status() == DB_NOTFOUND ;
-    ok 17, $db->status() eq $DB_errors{'DB_NOTFOUND'} ;
-
-    ok 18, $db->db_sync() == 0 ;
-
-    # Check NOOVERWRITE will make put fail when attempting to overwrite
-    # an existing record.
-
-    ok 19, $db->db_put( 'key', 'x', DB_NOOVERWRITE) == DB_KEYEXIST ;
-    ok 20, $db->status() eq $DB_errors{'DB_KEYEXIST'} ;
-    ok 21, $db->status() == DB_KEYEXIST ;
-
-
-    # check that the value of the key  has not been changed by the
-    # previous test
-    ok 22, $db->db_get("key", $value) == 0 ;
-    ok 23, $value eq "value" ;
-
-    # test DB_GET_BOTH
-    my ($k, $v) = ("key", "value") ;
-    ok 24, $db->db_get($k, $v, DB_GET_BOTH) == 0 ;
-
-    ($k, $v) = ("key", "fred") ;
-    ok 25, $db->db_get($k, $v, DB_GET_BOTH) == DB_NOTFOUND ;
-
-    ($k, $v) = ("another", "value") ;
-    ok 26, $db->db_get($k, $v, DB_GET_BOTH) == DB_NOTFOUND ;
-
-
-}
-
-{
-    # Check simple env works with a hash.
-    my $lex = new LexFile $Dfile ;
-
-    my $home = "./fred" ;
-    ok 27, my $lexD = new LexDir($home) ;
-
-    ok 28, my $env = new BerkeleyDB::Env -Flags => DB_CREATE|DB_INIT_MPOOL,
-    					 @StdErrFile, -Home => $home ;
-    ok 29, my $db = new BerkeleyDB::Btree -Filename => $Dfile, 
-				    -Env      => $env,
-				    -Flags    => DB_CREATE ;
-
-    # Add a k/v pair
-    my $value ;
-    ok 30, $db->db_put("some key", "some value") == 0 ;
-    ok 31, $db->db_get("some key", $value) == 0 ;
-    ok 32, $value eq "some value" ;
-    undef $db ;
-    undef $env ;
-}
-
- 
-{
-    # cursors
-
-    my $lex = new LexFile $Dfile ;
-    my %hash ;
-    my ($k, $v) ;
-    ok 33, my $db = new BerkeleyDB::Btree -Filename => $Dfile, 
-				     -Flags    => DB_CREATE ;
-print "[$db] [$!] $BerkeleyDB::Error\n" ;				     
-
-    # create some data
-    my %data =  (
-		"red"	=> 2,
-		"green"	=> "house",
-		"blue"	=> "sea",
-		) ;
-
-    my $ret = 0 ;
-    while (($k, $v) = each %data) {
-        $ret += $db->db_put($k, $v) ;
-    }
-    ok 34, $ret == 0 ;
-
-    # create the cursor
-    ok 35, my $cursor = $db->db_cursor() ;
-
-    $k = $v = "" ;
-    my %copy = %data ;
-    my $extras = 0 ;
-    # sequence forwards
-    while ($cursor->c_get($k, $v, DB_NEXT) == 0) {
-        if ( $copy{$k} eq $v ) 
-            { delete $copy{$k} }
-	else
-	    { ++ $extras }
-    }
-    ok 36, $cursor->status() == DB_NOTFOUND ;
-    ok 37, $cursor->status() eq $DB_errors{'DB_NOTFOUND'};
-    ok 38, keys %copy == 0 ;
-    ok 39, $extras == 0 ;
-
-    # sequence backwards
-    %copy = %data ;
-    $extras = 0 ;
-    my $status ;
-    for ( $status = $cursor->c_get($k, $v, DB_LAST) ;
-	  $status == 0 ;
-    	  $status = $cursor->c_get($k, $v, DB_PREV)) {
-        if ( $copy{$k} eq $v ) 
-            { delete $copy{$k} }
-	else
-	    { ++ $extras }
-    }
-    ok 40, $status == DB_NOTFOUND ;
-    ok 41, $status eq $DB_errors{'DB_NOTFOUND'};
-    ok 42, $cursor->status() == $status ;
-    ok 43, $cursor->status() eq $status ;
-    ok 44, keys %copy == 0 ;
-    ok 45, $extras == 0 ;
-
-    ($k, $v) = ("green", "house") ;
-    ok 46, $cursor->c_get($k, $v, DB_GET_BOTH) == 0 ;
-
-    ($k, $v) = ("green", "door") ;
-    ok 47, $cursor->c_get($k, $v, DB_GET_BOTH) == DB_NOTFOUND ;
-
-    ($k, $v) = ("black", "house") ;
-    ok 48, $cursor->c_get($k, $v, DB_GET_BOTH) == DB_NOTFOUND ;
-
-}
- 
-{
-    # Tied Hash interface
-
-    my $lex = new LexFile $Dfile ;
-    my %hash ;
-    ok 49, tie %hash, 'BerkeleyDB::Btree', -Filename => $Dfile,
-                                      -Flags    => DB_CREATE ;
-
-    # check "each" with an empty database
-    my $count = 0 ;
-    while (my ($k, $v) = each %hash) {
-	++ $count ;
-    }
-    ok 50, (tied %hash)->status() == DB_NOTFOUND ;
-    ok 51, $count == 0 ;
-
-    # Add a k/v pair
-    my $value ;
-    $hash{"some key"} = "some value";
-    ok 52, (tied %hash)->status() == 0 ;
-    ok 53, $hash{"some key"} eq "some value";
-    ok 54, defined $hash{"some key"} ;
-    ok 55, (tied %hash)->status() == 0 ;
-    ok 56, exists $hash{"some key"} ;
-    ok 57, !defined $hash{"jimmy"} ;
-    ok 58, (tied %hash)->status() == DB_NOTFOUND ;
-    ok 59, !exists $hash{"jimmy"} ;
-    ok 60, (tied %hash)->status() == DB_NOTFOUND ;
-
-    delete $hash{"some key"} ;
-    ok 61, (tied %hash)->status() == 0 ;
-    ok 62, ! defined $hash{"some key"} ;
-    ok 63, (tied %hash)->status() == DB_NOTFOUND ;
-    ok 64, ! exists $hash{"some key"} ;
-    ok 65, (tied %hash)->status() == DB_NOTFOUND ;
-
-    $hash{1} = 2 ;
-    $hash{10} = 20 ;
-    $hash{1000} = 2000 ;
-
-    my ($keys, $values) = (0,0);
-    $count = 0 ;
-    while (my ($k, $v) = each %hash) {
-        $keys += $k ;
-	$values += $v ;
-	++ $count ;
-    }
-    ok 66, $count == 3 ;
-    ok 67, $keys == 1011 ;
-    ok 68, $values == 2022 ;
-
-    # now clear the hash
-    %hash = () ;
-    ok 69, keys %hash == 0 ;
-
-    untie %hash ;
-}
-
-{
-    # override default compare
-    my $lex = new LexFile $Dfile, $Dfile2, $Dfile3 ;
-    my $value ;
-    my (%h, %g, %k) ;
-    my @Keys = qw( 0123 12 -1234 9 987654321 def  ) ; 
-    ok 70, tie %h, "BerkeleyDB::Btree", -Filename => $Dfile, 
-				     -Compare   => sub { $_[0] <=> $_[1] },
-				     -Flags    => DB_CREATE ;
-
-    ok 71, tie %g, 'BerkeleyDB::Btree', -Filename => $Dfile2, 
-				     -Compare   => sub { $_[0] cmp $_[1] },
-				     -Flags    => DB_CREATE ;
-
-    ok 72, tie %k, 'BerkeleyDB::Btree', -Filename => $Dfile3, 
-				   -Compare   => sub { length $_[0] <=> length $_[1] },
-				   -Flags    => DB_CREATE ;
-
-    my @srt_1 ;
-    { local $^W = 0 ;
-      @srt_1 = sort { $a <=> $b } @Keys ; 
-    }
-    my @srt_2 = sort { $a cmp $b } @Keys ;
-    my @srt_3 = sort { length $a <=> length $b } @Keys ;
-
-    foreach (@Keys) {
-        local $^W = 0 ;
-        $h{$_} = 1 ; 
-        $g{$_} = 1 ;
-        $k{$_} = 1 ;
-    }
-
-    sub ArrayCompare
-    {
-        my($a, $b) = @_ ;
-    
-        return 0 if @$a != @$b ;
-    
-        foreach (1 .. length @$a)
-        {
-            return 0 unless $$a[$_] eq $$b[$_] ;
-        }
-
-        1 ;
-    }
-
-    ok 73, ArrayCompare (\@srt_1, [keys %h]);
-    ok 74, ArrayCompare (\@srt_2, [keys %g]);
-    ok 75, ArrayCompare (\@srt_3, [keys %k]);
-
-}
-
-{
-    # override default compare, with duplicates, don't sort values
-    my $lex = new LexFile $Dfile, $Dfile2, $Dfile3 ;
-    my $value ;
-    my (%h, %g, %k) ;
-    my @Keys   = qw( 0123 9 12 -1234 9 987654321 def  ) ; 
-    my @Values = qw( 1    0 3   dd   x abc       0    ) ; 
-    ok 76, tie %h, "BerkeleyDB::Btree", -Filename => $Dfile, 
-				     -Compare   => sub { $_[0] <=> $_[1] },
-				     -Property  => DB_DUP,
-				     -Flags    => DB_CREATE ;
-
-    ok 77, tie %g, 'BerkeleyDB::Btree', -Filename => $Dfile2, 
-				     -Compare   => sub { $_[0] cmp $_[1] },
-				     -Property  => DB_DUP,
-				     -Flags    => DB_CREATE ;
-
-    ok 78, tie %k, 'BerkeleyDB::Btree', -Filename => $Dfile3, 
-				   -Compare   => sub { length $_[0] <=> length $_[1] },
-				   -Property  => DB_DUP,
-				   -Flags    => DB_CREATE ;
-
-    my @srt_1 ;
-    { local $^W = 0 ;
-      @srt_1 = sort { $a <=> $b } @Keys ; 
-    }
-    my @srt_2 = sort { $a cmp $b } @Keys ;
-    my @srt_3 = sort { length $a <=> length $b } @Keys ;
-
-    foreach (@Keys) {
-        local $^W = 0 ;
-	my $value = shift @Values ;
-        $h{$_} = $value ; 
-        $g{$_} = $value ;
-        $k{$_} = $value ;
-    }
-
-    sub getValues
-    {
-         my $hash = shift ;
-	 my $db = tied %$hash ;
-	 my $cursor = $db->db_cursor() ;
-	 my @values = () ;
-	 my ($k, $v) = (0,0) ;
-         while ($cursor->c_get($k, $v, DB_NEXT) == 0) {
-	     push @values, $v ;
-	 }
-	 return @values ;
-    }
-
-    ok 79, ArrayCompare (\@srt_1, [keys %h]);
-    ok 80, ArrayCompare (\@srt_2, [keys %g]);
-    ok 81, ArrayCompare (\@srt_3, [keys %k]);
-    ok 82, ArrayCompare ([qw(dd 0 0 x 3 1 abc)], [getValues \%h]);
-    ok 83, ArrayCompare ([qw(dd 1 0 3 x abc 0)], [getValues \%g]);
-    ok 84, ArrayCompare ([qw(0 x 3 0 1 dd abc)], [getValues \%k]);
-
-    # test DB_DUP_NEXT
-    ok 85, my $cur = (tied %g)->db_cursor() ;
-    my ($k, $v) = (9, "") ;
-    ok 86, $cur->c_get($k, $v, DB_SET) == 0 ;
-    ok 87, $k == 9 && $v == 0 ;
-    ok 88, $cur->c_get($k, $v, DB_NEXT_DUP) == 0 ;
-    ok 89, $k == 9 && $v eq "x" ;
-    ok 90, $cur->c_get($k, $v, DB_NEXT_DUP) == DB_NOTFOUND ;
-}
-
-{
-    # override default compare, with duplicates, sort values
-    my $lex = new LexFile $Dfile, $Dfile2;
-    my $value ;
-    my (%h, %g) ;
-    my @Keys   = qw( 0123 9 12 -1234 9 987654321 9 def  ) ; 
-    my @Values = qw( 1    11 3   dd   x abc      2 0    ) ; 
-    ok 91, tie %h, "BerkeleyDB::Btree", -Filename => $Dfile, 
-				     -Compare   => sub { $_[0] <=> $_[1] },
-				     -DupCompare   => sub { $_[0] cmp $_[1] },
-				     -Property  => DB_DUP,
-				     -Flags    => DB_CREATE ;
-
-    ok 92, tie %g, 'BerkeleyDB::Btree', -Filename => $Dfile2, 
-				     -Compare   => sub { $_[0] cmp $_[1] },
-				     -DupCompare   => sub { $_[0] <=> $_[1] },
-				     -Property  => DB_DUP,
-				     
-				     
-				     
-				     -Flags    => DB_CREATE ;
-
-    my @srt_1 ;
-    { local $^W = 0 ;
-      @srt_1 = sort { $a <=> $b } @Keys ; 
-    }
-    my @srt_2 = sort { $a cmp $b } @Keys ;
-
-    foreach (@Keys) {
-        local $^W = 0 ;
-	my $value = shift @Values ;
-        $h{$_} = $value ; 
-        $g{$_} = $value ;
-    }
-
-    ok 93, ArrayCompare (\@srt_1, [keys %h]);
-    ok 94, ArrayCompare (\@srt_2, [keys %g]);
-    ok 95, ArrayCompare ([qw(dd 1 3 x 2 11 abc 0)], [getValues \%g]);
-    ok 96, ArrayCompare ([qw(dd 0 11 2 x 3 1 abc)], [getValues \%h]);
-
-}
-
-{
-    # get_dup etc
-    my $lex = new LexFile $Dfile;
-    my %hh ;
-
-    ok 97, my $YY = tie %hh, "BerkeleyDB::Btree", -Filename => $Dfile, 
-				     -DupCompare   => sub { $_[0] cmp $_[1] },
-				     -Property  => DB_DUP,
-				     -Flags    => DB_CREATE ;
-
-    $hh{'Wall'} = 'Larry' ;
-    $hh{'Wall'} = 'Stone' ; # Note the duplicate key
-    $hh{'Wall'} = 'Brick' ; # Note the duplicate key
-    $hh{'Smith'} = 'John' ;
-    $hh{'mouse'} = 'mickey' ;
-    
-    # first work in scalar context
-    ok 98, scalar $YY->get_dup('Unknown') == 0 ;
-    ok 99, scalar $YY->get_dup('Smith') == 1 ;
-    ok 100, scalar $YY->get_dup('Wall') == 3 ;
-    
-    # now in list context
-    my @unknown = $YY->get_dup('Unknown') ;
-    ok 101, "@unknown" eq "" ;
-    
-    my @smith = $YY->get_dup('Smith') ;
-    ok 102, "@smith" eq "John" ;
-    
-    {
-    my @wall = $YY->get_dup('Wall') ;
-    my %wall ;
-    @wall{@wall} = @wall ;
-    ok 103, (@wall == 3 && $wall{'Larry'} && $wall{'Stone'} && $wall{'Brick'});
-    }
-    
-    # hash
-    my %unknown = $YY->get_dup('Unknown', 1) ;
-    ok 104, keys %unknown == 0 ;
-    
-    my %smith = $YY->get_dup('Smith', 1) ;
-    ok 105, keys %smith == 1 && $smith{'John'} ;
-    
-    my %wall = $YY->get_dup('Wall', 1) ;
-    ok 106, keys %wall == 3 && $wall{'Larry'} == 1 && $wall{'Stone'} == 1 
-    		&& $wall{'Brick'} == 1 ;
-    
-    undef $YY ;
-    untie %hh ;
-
-}
-
-{
-    # in-memory file
-
-    my $lex = new LexFile $Dfile ;
-    my %hash ;
-    my $fd ;
-    my $value ;
-    ok 107, my $db = tie %hash, 'BerkeleyDB::Btree' ;
-
-    ok 108, $db->db_put("some key", "some value") == 0  ;
-    ok 109, $db->db_get("some key", $value) == 0 ;
-    ok 110, $value eq "some value" ;
-
-}
- 
-{
-    # partial
-    # check works via API
-
-    my $lex = new LexFile $Dfile ;
-    my $value ;
-    ok 111, my $db = new BerkeleyDB::Btree, -Filename => $Dfile,
-                                      	       -Flags    => DB_CREATE ;
-
-    # create some data
-    my %data =  (
-		"red"	=> "boat",
-		"green"	=> "house",
-		"blue"	=> "sea",
-		) ;
-
-    my $ret = 0 ;
-    while (my ($k, $v) = each %data) {
-        $ret += $db->db_put($k, $v) ;
-    }
-    ok 112, $ret == 0 ;
-
-
-    # do a partial get
-    my ($pon, $off, $len) = $db->partial_set(0,2) ;
-    ok 113, ! $pon && $off == 0 && $len == 0 ;
-    ok 114, $db->db_get("red", $value) == 0 && $value eq "bo" ;
-    ok 115, $db->db_get("green", $value) == 0 && $value eq "ho" ;
-    ok 116, $db->db_get("blue", $value) == 0 && $value eq "se" ;
-
-    # do a partial get, off end of data
-    ($pon, $off, $len) = $db->partial_set(3,2) ;
-    ok 117, $pon ;
-    ok 118, $off == 0 ;
-    ok 119, $len == 2 ;
-    ok 120, $db->db_get("red", $value) == 0 && $value eq "t" ;
-    ok 121, $db->db_get("green", $value) == 0 && $value eq "se" ;
-    ok 122, $db->db_get("blue", $value) == 0 && $value eq "" ;
-
-    # switch of partial mode
-    ($pon, $off, $len) = $db->partial_clear() ;
-    ok 123, $pon ;
-    ok 124, $off == 3 ;
-    ok 125, $len == 2 ;
-    ok 126, $db->db_get("red", $value) == 0 && $value eq "boat" ;
-    ok 127, $db->db_get("green", $value) == 0 && $value eq "house" ;
-    ok 128, $db->db_get("blue", $value) == 0 && $value eq "sea" ;
-
-    # now partial put
-    $db->partial_set(0,2) ;
-    ok 129, $db->db_put("red", "") == 0 ;
-    ok 130, $db->db_put("green", "AB") == 0 ;
-    ok 131, $db->db_put("blue", "XYZ") == 0 ;
-    ok 132, $db->db_put("new", "KLM") == 0 ;
-
-    ($pon, $off, $len) = $db->partial_clear() ;
-    ok 133, $pon ;
-    ok 134, $off == 0 ;
-    ok 135, $len == 2 ;
-    ok 136, $db->db_get("red", $value) == 0 && $value eq "at" ;
-    ok 137, $db->db_get("green", $value) == 0 && $value eq "ABuse" ;
-    ok 138, $db->db_get("blue", $value) == 0 && $value eq "XYZa" ;
-    ok 139, $db->db_get("new", $value) == 0 && $value eq "KLM" ;
-
-    # now partial put
-    ($pon, $off, $len) = $db->partial_set(3,2) ;
-    ok 140, ! $pon ;
-    ok 141, $off == 0 ;
-    ok 142, $len == 0 ;
-    ok 143, $db->db_put("red", "PPP") == 0 ;
-    ok 144, $db->db_put("green", "Q") == 0 ;
-    ok 145, $db->db_put("blue", "XYZ") == 0 ;
-    ok 146, $db->db_put("new", "TU") == 0 ;
-
-    $db->partial_clear() ;
-    ok 147, $db->db_get("red", $value) == 0 && $value eq "at\0PPP" ;
-    ok 148, $db->db_get("green", $value) == 0 && $value eq "ABuQ" ;
-    ok 149, $db->db_get("blue", $value) == 0 && $value eq "XYZXYZ" ;
-    ok 150, $db->db_get("new", $value) == 0 && $value eq "KLMTU" ;
-}
-
-{
-    # partial
-    # check works via tied hash 
-
-    my $lex = new LexFile $Dfile ;
-    my %hash ;
-    my $value ;
-    ok 151, my $db = tie %hash, 'BerkeleyDB::Btree', -Filename => $Dfile,
-                                      	       -Flags    => DB_CREATE ;
-
-    # create some data
-    my %data =  (
-		"red"	=> "boat",
-		"green"	=> "house",
-		"blue"	=> "sea",
-		) ;
-
-    while (my ($k, $v) = each %data) {
-	$hash{$k} = $v ;
-    }
-
-
-    # do a partial get
-    $db->partial_set(0,2) ;
-    ok 152, $hash{"red"} eq "bo" ;
-    ok 153, $hash{"green"} eq "ho" ;
-    ok 154, $hash{"blue"}  eq "se" ;
-
-    # do a partial get, off end of data
-    $db->partial_set(3,2) ;
-    ok 155, $hash{"red"} eq "t" ;
-    ok 156, $hash{"green"} eq "se" ;
-    ok 157, $hash{"blue"} eq "" ;
-
-    # switch of partial mode
-    $db->partial_clear() ;
-    ok 158, $hash{"red"} eq "boat" ;
-    ok 159, $hash{"green"} eq "house" ;
-    ok 160, $hash{"blue"} eq "sea" ;
-
-    # now partial put
-    $db->partial_set(0,2) ;
-    ok 161, $hash{"red"} = "" ;
-    ok 162, $hash{"green"} = "AB" ;
-    ok 163, $hash{"blue"} = "XYZ" ;
-    ok 164, $hash{"new"} = "KLM" ;
-
-    $db->partial_clear() ;
-    ok 165, $hash{"red"} eq "at" ;
-    ok 166, $hash{"green"} eq "ABuse" ;
-    ok 167, $hash{"blue"} eq "XYZa" ;
-    ok 168, $hash{"new"} eq "KLM" ;
-
-    # now partial put
-    $db->partial_set(3,2) ;
-    ok 169, $hash{"red"} = "PPP" ;
-    ok 170, $hash{"green"} = "Q" ;
-    ok 171, $hash{"blue"} = "XYZ" ;
-    ok 172, $hash{"new"} = "TU" ;
-
-    $db->partial_clear() ;
-    ok 173, $hash{"red"} eq "at\0PPP" ;
-    ok 174, $hash{"green"} eq "ABuQ" ;
-    ok 175, $hash{"blue"} eq "XYZXYZ" ;
-    ok 176, $hash{"new"} eq "KLMTU" ;
-}
-
-{
-    # transaction
-
-    my $lex = new LexFile $Dfile ;
-    my %hash ;
-    my $value ;
-
-    my $home = "./fred" ;
-    ok 177, my $lexD = new LexDir($home) ;
-    ok 178, my $env = new BerkeleyDB::Env -Home => $home, @StdErrFile,
-				     -Flags => DB_CREATE|DB_INIT_TXN|
-					  	DB_INIT_MPOOL|DB_INIT_LOCK ;
-    ok 179, my $txn = $env->txn_begin() ;
-    ok 180, my $db1 = tie %hash, 'BerkeleyDB::Btree', -Filename => $Dfile,
-                                      	       -Flags    =>  DB_CREATE ,
-					       -Env 	 => $env,
-					       -Txn	 => $txn ;
-
-    ok 181, (my $Z = $txn->txn_commit()) == 0 ;
-    ok 182, $txn = $env->txn_begin() ;
-    $db1->Txn($txn);
-    
-    # create some data
-    my %data =  (
-		"red"	=> "boat",
-		"green"	=> "house",
-		"blue"	=> "sea",
-		) ;
-
-    my $ret = 0 ;
-    while (my ($k, $v) = each %data) {
-        $ret += $db1->db_put($k, $v) ;
-    }
-    ok 183, $ret == 0 ;
-
-    # should be able to see all the records
-
-    ok 184, my $cursor = $db1->db_cursor() ;
-    my ($k, $v) = ("", "") ;
-    my $count = 0 ;
-    # sequence forwards
-    while ($cursor->c_get($k, $v, DB_NEXT) == 0) {
-        ++ $count ;
-    }
-    ok 185, $count == 3 ;
-    undef $cursor ;
-
-    # now abort the transaction
-    #ok 151, $txn->txn_abort() == 0 ;
-    ok 186, ($Z = $txn->txn_abort()) == 0 ;
-
-    # there shouldn't be any records in the database
-    $count = 0 ;
-    # sequence forwards
-    ok 187, $cursor = $db1->db_cursor() ;
-    while ($cursor->c_get($k, $v, DB_NEXT) == 0) {
-        ++ $count ;
-    }
-    ok 188, $count == 0 ;
-
-    undef $txn ;
-    undef $cursor ;
-    undef $db1 ;
-    undef $env ;
-    untie %hash ;
-}
-
-{
-    # DB_DUP
-
-    my $lex = new LexFile $Dfile ;
-    my %hash ;
-    ok 189, my $db = tie %hash, 'BerkeleyDB::Btree', -Filename => $Dfile,
-				      -Property  => DB_DUP,
-                                      -Flags    => DB_CREATE ;
-
-    $hash{'Wall'} = 'Larry' ;
-    $hash{'Wall'} = 'Stone' ;
-    $hash{'Smith'} = 'John' ;
-    $hash{'Wall'} = 'Brick' ;
-    $hash{'Wall'} = 'Brick' ;
-    $hash{'mouse'} = 'mickey' ;
-
-    ok 190, keys %hash == 6 ;
-
-    # create a cursor
-    ok 191, my $cursor = $db->db_cursor() ;
-
-    my $key = "Wall" ;
-    my $value ;
-    ok 192, $cursor->c_get($key, $value, DB_SET) == 0 ;
-    ok 193, $key eq "Wall" && $value eq "Larry" ;
-    ok 194, $cursor->c_get($key, $value, DB_NEXT) == 0 ;
-    ok 195, $key eq "Wall" && $value eq "Stone" ;
-    ok 196, $cursor->c_get($key, $value, DB_NEXT) == 0 ;
-    ok 197, $key eq "Wall" && $value eq "Brick" ;
-    ok 198, $cursor->c_get($key, $value, DB_NEXT) == 0 ;
-    ok 199, $key eq "Wall" && $value eq "Brick" ;
-
-    #my $ref = $db->db_stat() ; 
-    #ok 200, ($ref->{bt_flags} | DB_DUP) == DB_DUP ;
-#print "bt_flags " . $ref->{bt_flags} . " DB_DUP " . DB_DUP ."\n";
-
-    undef $db ;
-    undef $cursor ;
-    untie %hash ;
-
-}
-
-{
-    # db_stat
-
-    my $lex = new LexFile $Dfile ;
-    my $recs = ($BerkeleyDB::db_version >= 3.1 ? "bt_ndata" : "bt_nrecs") ;
-    my %hash ;
-    my ($k, $v) ;
-    ok 200, my $db = new BerkeleyDB::Btree -Filename => $Dfile, 
-				     -Flags    => DB_CREATE,
-				 	-Minkey	=>3 ,
-					-Pagesize	=> 2 **12 
-					;
-
-    my $ref = $db->db_stat() ; 
-    ok 201, $ref->{$recs} == 0;
-    ok 202, $ref->{'bt_minkey'} == 3;
-    ok 203, $ref->{'bt_pagesize'} == 2 ** 12;
-
-    # create some data
-    my %data =  (
-		"red"	=> 2,
-		"green"	=> "house",
-		"blue"	=> "sea",
-		) ;
-
-    my $ret = 0 ;
-    while (($k, $v) = each %data) {
-        $ret += $db->db_put($k, $v) ;
-    }
-    ok 204, $ret == 0 ;
-
-    $ref = $db->db_stat() ; 
-    ok 205, $ref->{$recs} == 3;
-}
-
-{
-   # sub-class test
-
-   package Another ;
-
-   use strict ;
-
-   open(FILE, ">SubDB.pm") or die "Cannot open SubDB.pm: $!\n" ;
-   print FILE <<'EOM' ;
-
-   package SubDB ;
-
-   use strict ;
-   use vars qw( @ISA @EXPORT) ;
-
-   require Exporter ;
-   use BerkeleyDB;
-   @ISA=qw(BerkeleyDB BerkeleyDB::Btree );
-   @EXPORT = @BerkeleyDB::EXPORT ;
-
-   sub db_put { 
-	my $self = shift ;
-        my $key = shift ;
-        my $value = shift ;
-        $self->SUPER::db_put($key, $value * 3) ;
-   }
-
-   sub db_get { 
-	my $self = shift ;
-        $self->SUPER::db_get($_[0], $_[1]) ;
-	$_[1] -= 2 ;
-   }
-
-   sub A_new_method
-   {
-	my $self = shift ;
-        my $key = shift ;
-        my $value = $self->FETCH($key) ;
-	return "[[$value]]" ;
-   }
-
-   1 ;
-EOM
-
-    close FILE ;
-
-    BEGIN { push @INC, '.'; }    
-    eval 'use SubDB ; ';
-    main::ok 206, $@ eq "" ;
-    my %h ;
-    my $X ;
-    eval '
-	$X = tie(%h, "SubDB", -Filename => "dbbtree.tmp", 
-			-Flags => DB_CREATE,
-			-Mode => 0640 );
-	' ;
-
-    main::ok 207, $@ eq "" && $X ;
-
-    my $ret = eval '$h{"fred"} = 3 ; return $h{"fred"} ' ;
-    main::ok 208, $@ eq "" ;
-    main::ok 209, $ret == 7 ;
-
-    my $value = 0;
-    $ret = eval '$X->db_put("joe", 4) ; $X->db_get("joe", $value) ; return $value' ;
-    main::ok 210, $@ eq "" ;
-    main::ok 211, $ret == 10 ;
-
-    $ret = eval ' DB_NEXT eq main::DB_NEXT ' ;
-    main::ok 212, $@ eq ""  ;
-    main::ok 213, $ret == 1 ;
-
-    $ret = eval '$X->A_new_method("joe") ' ;
-    main::ok 214, $@ eq "" ;
-    main::ok 215, $ret eq "[[10]]" ;
-
-    undef $X;
-    untie %h;
-    unlink "SubDB.pm", "dbbtree.tmp" ;
-
-}
-
-{
-    # DB_RECNUM, DB_SET_RECNO & DB_GET_RECNO
-
-    my $lex = new LexFile $Dfile ;
-    my %hash ;
-    my ($k, $v) = ("", "");
-    ok 216, my $db = new BerkeleyDB::Btree 
-				-Filename  => $Dfile, 
-			     	-Flags     => DB_CREATE,
-			     	-Property  => DB_RECNUM ;
-
-
-    # create some data
-    my @data =  (
-		"A zero",
-		"B one",
-		"C two",
-		"D three",
-		"E four"
-		) ;
-
-    my $ix = 0 ;
-    my $ret = 0 ;
-    foreach (@data) {
-        $ret += $db->db_put($_, $ix) ;
-	++ $ix ;
-    }
-    ok 217, $ret == 0 ;
-
-    # db_get & DB_SET_RECNO
-    $k = 1 ;
-    ok 218, $db->db_get($k, $v, DB_SET_RECNO) == 0;
-    ok 219, $k eq "B one" && $v == 1 ;
-
-    $k = 3 ;
-    ok 220, $db->db_get($k, $v, DB_SET_RECNO) == 0;
-    ok 221, $k eq "D three" && $v == 3 ;
-
-    $k = 4 ;
-    ok 222, $db->db_get($k, $v, DB_SET_RECNO) == 0;
-    ok 223, $k eq "E four" && $v == 4 ;
-
-    $k = 0 ;
-    ok 224, $db->db_get($k, $v, DB_SET_RECNO) == 0;
-    ok 225, $k eq "A zero" && $v == 0 ;
-
-    # cursor & DB_SET_RECNO
-
-    # create the cursor
-    ok 226, my $cursor = $db->db_cursor() ;
-
-    $k = 2 ;
-    ok 227, $db->db_get($k, $v, DB_SET_RECNO) == 0;
-    ok 228, $k eq "C two" && $v == 2 ;
-
-    $k = 0 ;
-    ok 229, $cursor->c_get($k, $v, DB_SET_RECNO) == 0;
-    ok 230, $k eq "A zero" && $v == 0 ;
-
-    $k = 3 ;
-    ok 231, $db->db_get($k, $v, DB_SET_RECNO) == 0;
-    ok 232, $k eq "D three" && $v == 3 ;
-
-    # cursor & DB_GET_RECNO
-    ok 233, $cursor->c_get($k, $v, DB_FIRST) == 0 ;
-    ok 234, $k eq "A zero" && $v == 0 ;
-    ok 235, $cursor->c_get($k, $v, DB_GET_RECNO) == 0;
-    ok 236, $v == 0 ;
-
-    ok 237, $cursor->c_get($k, $v, DB_NEXT) == 0 ;
-    ok 238, $k eq "B one" && $v == 1 ;
-    ok 239, $cursor->c_get($k, $v, DB_GET_RECNO) == 0;
-    ok 240, $v == 1 ;
-
-    ok 241, $cursor->c_get($k, $v, DB_LAST) == 0 ;
-    ok 242, $k eq "E four" && $v == 4 ;
-    ok 243, $cursor->c_get($k, $v, DB_GET_RECNO) == 0;
-    ok 244, $v == 4 ;
-
-}
-
diff --git a/storage/bdb/perl/BerkeleyDB/t/cds.t b/storage/bdb/perl/BerkeleyDB/t/cds.t
deleted file mode 100644
index 4d129a0a66c..00000000000
--- a/storage/bdb/perl/BerkeleyDB/t/cds.t
+++ /dev/null
@@ -1,80 +0,0 @@
-#!./perl -w
-
-# Tests for Concurrent Data Store mode
-
-use strict ;
-
-BEGIN {
-    unless(grep /blib/, @INC) {
-        chdir 't' if -d 't';
-        @INC = '../lib' if -d '../lib';
-    }
-}
-
-use BerkeleyDB; 
-use t::util ;
-
-BEGIN
-{
-    if ($BerkeleyDB::db_version < 2) {
-        print "1..0 # Skip: this needs Berkeley DB 2.x.x or better\n" ;
-        exit 0 ;
-    }
-}
-
-
-
-print "1..12\n";
-
-my $Dfile = "dbhash.tmp";
-unlink $Dfile;
-
-umask(0) ;
-
-{
-    # Error case -- env not opened in CDS mode
-
-    my $lex = new LexFile $Dfile ;
-
-    my $home = "./fred" ;
-    ok 1, my $lexD = new LexDir($home) ;
-
-    ok 2, my $env = new BerkeleyDB::Env -Flags => DB_CREATE|DB_INIT_MPOOL,
-    					 -Home => $home, @StdErrFile ;
-
-    ok 3, my $db = new BerkeleyDB::Btree -Filename => $Dfile, 
-				    -Env      => $env,
-				    -Flags    => DB_CREATE ;
-
-    ok 4, ! $env->cds_enabled() ;
-    ok 5, ! $db->cds_enabled() ;
-
-    eval { $db->cds_lock() };
-    ok 6, $@ =~ /CDS not enabled for this database/;
-
-    undef $db;
-    undef $env ;
-}
-
-{
-    my $lex = new LexFile $Dfile ;
-
-    my $home = "./fred" ;
-    ok 7, my $lexD = new LexDir($home) ;
-
-    ok 8, my $env = new BerkeleyDB::Env -Flags => DB_INIT_CDB|DB_CREATE|DB_INIT_MPOOL,
-    					 -Home => $home, @StdErrFile ;
-
-    ok 9, my $db = new BerkeleyDB::Btree -Filename => $Dfile, 
-				    -Env      => $env,
-				    -Flags    => DB_CREATE ;
-
-    ok 10,   $env->cds_enabled() ;
-    ok 11,   $db->cds_enabled() ;
-
-    my $cds = $db->cds_lock() ;
-    ok 12, $cds ;
-
-    undef $db;
-    undef $env ;
-}
diff --git a/storage/bdb/perl/BerkeleyDB/t/destroy.t b/storage/bdb/perl/BerkeleyDB/t/destroy.t
deleted file mode 100644
index 445d0740770..00000000000
--- a/storage/bdb/perl/BerkeleyDB/t/destroy.t
+++ /dev/null
@@ -1,105 +0,0 @@
-#!./perl -w
-
-use strict ;
-
-BEGIN {
-    unless(grep /blib/, @INC) {
-        chdir 't' if -d 't';
-        @INC = '../lib' if -d '../lib';
-    }
-}
-
-use BerkeleyDB; 
-use t::util ;
-
-print "1..15\n";
-
-my $Dfile = "dbhash.tmp";
-my $home = "./fred" ;
-
-umask(0);
-
-{
-    # let object destruction kill everything
-
-    my $lex = new LexFile $Dfile ;
-    my %hash ;
-    my $value ;
-
-    ok 1, my $lexD = new LexDir($home) ;
-    ok 2, my $env = new BerkeleyDB::Env -Home => $home,  @StdErrFile,
-				     -Flags => DB_CREATE|DB_INIT_TXN|
-					  	DB_INIT_MPOOL|DB_INIT_LOCK ;
-    ok 3, my $txn = $env->txn_begin() ;
-    ok 4, my $db1 = tie %hash, 'BerkeleyDB::Hash', -Filename => $Dfile,
-                                      	       	-Flags     => DB_CREATE ,
-					       	-Env 	   => $env,
-					    	-Txn	   => $txn  ;
-
-    ok 5, $txn->txn_commit() == 0 ;
-    ok 6, $txn = $env->txn_begin() ;
-    $db1->Txn($txn);
-    
-    # create some data
-    my %data =  (
-		"red"	=> "boat",
-		"green"	=> "house",
-		"blue"	=> "sea",
-		) ;
-
-    my $ret = 0 ;
-    while (my ($k, $v) = each %data) {
-        $ret += $db1->db_put($k, $v) ;
-    }
-    ok 7, $ret == 0 ;
-
-    # should be able to see all the records
-
-    ok 8, my $cursor = $db1->db_cursor() ;
-    my ($k, $v) = ("", "") ;
-    my $count = 0 ;
-    # sequence forwards
-    while ($cursor->c_get($k, $v, DB_NEXT) == 0) {
-        ++ $count ;
-    }
-    ok 9, $count == 3 ;
-    undef $cursor ;
-
-    # now abort the transaction
-    ok 10, $txn->txn_abort() == 0 ;
-
-    # there shouldn't be any records in the database
-    $count = 0 ;
-    # sequence forwards
-    ok 11, $cursor = $db1->db_cursor() ;
-    while ($cursor->c_get($k, $v, DB_NEXT) == 0) {
-        ++ $count ;
-    }
-    ok 12, $count == 0 ;
-
-    #undef $txn ;
-    #undef $cursor ;
-    #undef $db1 ;
-    #undef $env ;
-    #untie %hash ;
-
-}
-
-{
-    my $lex = new LexFile $Dfile ;
-    my %hash ;
-    my $cursor ;
-    my ($k, $v) = ("", "") ;
-    ok 13, my $db1 = tie %hash, 'BerkeleyDB::Hash', 
-		-Filename	=> $Dfile,
-               	-Flags		=> DB_CREATE ;
-    my $count = 0 ;
-    # sequence forwards
-    ok 14, $cursor = $db1->db_cursor() ;
-    while ($cursor->c_get($k, $v, DB_NEXT) == 0) {
-        ++ $count ;
-    }
-    ok 15, $count == 0 ;
-}
-
-
diff --git a/storage/bdb/perl/BerkeleyDB/t/encrypt.t b/storage/bdb/perl/BerkeleyDB/t/encrypt.t
deleted file mode 100644
index b3cc13821ef..00000000000
--- a/storage/bdb/perl/BerkeleyDB/t/encrypt.t
+++ /dev/null
@@ -1,646 +0,0 @@
-#!./perl -w
-
-# ID: %I%, %G%   
-
-use strict ;
-
-BEGIN {
-    unless(grep /blib/, @INC) {
-        chdir 't' if -d 't';
-        @INC = '../lib' if -d '../lib';
-    }
-}
-
-use BerkeleyDB; 
-use t::util ;
-
-BEGIN
-{
-    if ($BerkeleyDB::db_version < 4.1) {
-        print "1..0 # Skip: this needs Berkeley DB 4.1.x or better\n" ;
-        exit 0 ;
-    }
-
-    # Is encryption available?
-    my $env = new BerkeleyDB::Env @StdErrFile,
-             -Encrypt => {Password => "abc",
-	                  Flags    => DB_ENCRYPT_AES
-	                 };
-
-    if ($BerkeleyDB::Error =~ /Operation not supported/)
-    {
-        print "1..0 # Skip: encryption support not present\n" ;
-        exit 0 ;
-    }
-}     
-
-umask(0);
-
-print "1..80\n";        
-
-{    
-    eval
-    {
-        my $env = new BerkeleyDB::Env @StdErrFile,
-             -Encrypt => 1,
-             -Flags => DB_CREATE ;
-     };
-     ok 1, $@ =~ /^Encrypt parameter must be a hash reference at/;
-
-    eval
-    {
-        my $env = new BerkeleyDB::Env @StdErrFile,
-             -Encrypt => {},
-             -Flags => DB_CREATE ;
-     };
-     ok 2, $@ =~ /^Must specify Password and Flags with Encrypt parameter at/;
-
-    eval
-    {
-        my $env = new BerkeleyDB::Env @StdErrFile,
-             -Encrypt => {Password => "fred"},
-             -Flags => DB_CREATE ;
-     };
-     ok 3, $@ =~ /^Must specify Password and Flags with Encrypt parameter at/;
-
-    eval
-    {
-        my $env = new BerkeleyDB::Env @StdErrFile,
-             -Encrypt => {Flags => 1},
-             -Flags => DB_CREATE ;
-     };
-     ok 4, $@ =~ /^Must specify Password and Flags with Encrypt parameter at/;
-
-    eval
-    {
-        my $env = new BerkeleyDB::Env @StdErrFile,
-             -Encrypt => {Fred => 1},
-             -Flags => DB_CREATE ;
-     };
-     ok 5, $@ =~ /^\Qunknown key value(s) Fred at/;
-
-}
-
-{
-    # new BerkeleyDB::Env -Encrypt =>
-
-    # create an environment with a Home
-    my $home = "./fred" ;
-    #mkdir $home;
-    ok 6, my $lexD = new LexDir($home) ;
-    ok 7, my $env = new BerkeleyDB::Env @StdErrFile,
-             -Home => $home,
-             -Encrypt => {Password => "abc",
-	                  Flags    => DB_ENCRYPT_AES
-	                 },
-             -Flags => DB_CREATE | DB_INIT_MPOOL ;
-
-
-
-    my $Dfile = "abc.enc";
-    my $lex = new LexFile $Dfile ;
-    my %hash ;
-    my ($k, $v) ;
-    ok 8, my $db = new BerkeleyDB::Hash -Filename => $Dfile, 
-	                             -Env         => $env,
-				     -Flags       => DB_CREATE, 
-				     -Property    => DB_ENCRYPT ;
-
-    # create some data
-    my %data =  (
-		"red"	=> 2,
-		"green"	=> "house",
-		"blue"	=> "sea",
-		) ;
-
-    my $ret = 0 ;
-    while (($k, $v) = each %data) {
-        $ret += $db->db_put($k, $v) ;
-    }
-    ok 9, $ret == 0 ;
-
-    # check there are three records
-    ok 10, countRecords($db) == 3 ;
-
-    undef $db;
-
-    # once the database is created, do not need to specify DB_ENCRYPT
-    ok 11, my $db1 = new BerkeleyDB::Hash -Filename => $Dfile, 
-	                              -Env      => $env,
-				      -Flags    => DB_CREATE ;
-    $v = '';				      
-    ok 12, ! $db1->db_get("red", $v) ;
-    ok 13, $v eq $data{"red"},
-    undef $db1;
-    undef $env;
-
-    # open a database without specifying encryption
-    ok 14,  ! new BerkeleyDB::Hash -Filename => "$home/$Dfile"; 
-
-    ok 15,  ! new BerkeleyDB::Env 
-             -Home => $home,
-             -Encrypt => {Password => "def",
-	                  Flags    => DB_ENCRYPT_AES
-	                 },
-             -Flags => DB_CREATE | DB_INIT_MPOOL ;
-}
-
-{    
-    eval
-    {
-        my $env = new BerkeleyDB::Hash 
-             -Encrypt => 1,
-             -Flags => DB_CREATE ;
-     };
-     ok 16, $@ =~ /^Encrypt parameter must be a hash reference at/;
-
-    eval
-    {
-        my $env = new BerkeleyDB::Hash 
-             -Encrypt => {},
-             -Flags => DB_CREATE ;
-     };
-     ok 17, $@ =~ /^Must specify Password and Flags with Encrypt parameter at/;
-
-    eval
-    {
-        my $env = new BerkeleyDB::Hash 
-             -Encrypt => {Password => "fred"},
-             -Flags => DB_CREATE ;
-     };
-     ok 18, $@ =~ /^Must specify Password and Flags with Encrypt parameter at/;
-
-    eval
-    {
-        my $env = new BerkeleyDB::Hash 
-             -Encrypt => {Flags => 1},
-             -Flags => DB_CREATE ;
-     };
-     ok 19, $@ =~ /^Must specify Password and Flags with Encrypt parameter at/;
-
-    eval
-    {
-        my $env = new BerkeleyDB::Hash 
-             -Encrypt => {Fred => 1},
-             -Flags => DB_CREATE ;
-     };
-     ok 20, $@ =~ /^\Qunknown key value(s) Fred at/;
-
-}
-
-{    
-    eval
-    {
-        my $env = new BerkeleyDB::Btree 
-             -Encrypt => 1,
-             -Flags => DB_CREATE ;
-     };
-     ok 21, $@ =~ /^Encrypt parameter must be a hash reference at/;
-
-    eval
-    {
-        my $env = new BerkeleyDB::Btree 
-             -Encrypt => {},
-             -Flags => DB_CREATE ;
-     };
-     ok 22, $@ =~ /^Must specify Password and Flags with Encrypt parameter at/;
-
-    eval
-    {
-        my $env = new BerkeleyDB::Btree 
-             -Encrypt => {Password => "fred"},
-             -Flags => DB_CREATE ;
-     };
-     ok 23, $@ =~ /^Must specify Password and Flags with Encrypt parameter at/;
-
-    eval
-    {
-        my $env = new BerkeleyDB::Btree 
-             -Encrypt => {Flags => 1},
-             -Flags => DB_CREATE ;
-     };
-     ok 24, $@ =~ /^Must specify Password and Flags with Encrypt parameter at/;
-
-    eval
-    {
-        my $env = new BerkeleyDB::Btree 
-             -Encrypt => {Fred => 1},
-             -Flags => DB_CREATE ;
-     };
-     ok 25, $@ =~ /^\Qunknown key value(s) Fred at/;
-
-}
-
-{    
-    eval
-    {
-        my $env = new BerkeleyDB::Queue 
-             -Encrypt => 1,
-             -Flags => DB_CREATE ;
-     };
-     ok 26, $@ =~ /^Encrypt parameter must be a hash reference at/;
-
-    eval
-    {
-        my $env = new BerkeleyDB::Queue 
-             -Encrypt => {},
-             -Flags => DB_CREATE ;
-     };
-     ok 27, $@ =~ /^Must specify Password and Flags with Encrypt parameter at/;
-
-    eval
-    {
-        my $env = new BerkeleyDB::Queue 
-             -Encrypt => {Password => "fred"},
-             -Flags => DB_CREATE ;
-     };
-     ok 28, $@ =~ /^Must specify Password and Flags with Encrypt parameter at/;
-
-    eval
-    {
-        my $env = new BerkeleyDB::Queue 
-             -Encrypt => {Flags => 1},
-             -Flags => DB_CREATE ;
-     };
-     ok 29, $@ =~ /^Must specify Password and Flags with Encrypt parameter at/;
-
-    eval
-    {
-        my $env = new BerkeleyDB::Queue 
-             -Encrypt => {Fred => 1},
-             -Flags => DB_CREATE ;
-     };
-     ok 30, $@ =~ /^\Qunknown key value(s) Fred at/;
-
-}
-
-{    
-    eval
-    {
-        my $env = new BerkeleyDB::Recno 
-             -Encrypt => 1,
-             -Flags => DB_CREATE ;
-     };
-     ok 31, $@ =~ /^Encrypt parameter must be a hash reference at/;
-
-    eval
-    {
-        my $env = new BerkeleyDB::Recno 
-             -Encrypt => {},
-             -Flags => DB_CREATE ;
-     };
-     ok 32, $@ =~ /^Must specify Password and Flags with Encrypt parameter at/;
-
-    eval
-    {
-        my $env = new BerkeleyDB::Recno 
-             -Encrypt => {Password => "fred"},
-             -Flags => DB_CREATE ;
-     };
-     ok 33, $@ =~ /^Must specify Password and Flags with Encrypt parameter at/;
-
-    eval
-    {
-        my $env = new BerkeleyDB::Recno 
-             -Encrypt => {Flags => 1},
-             -Flags => DB_CREATE ;
-     };
-     ok 34, $@ =~ /^Must specify Password and Flags with Encrypt parameter at/;
-
-    eval
-    {
-        my $env = new BerkeleyDB::Recno 
-             -Encrypt => {Fred => 1},
-             -Flags => DB_CREATE ;
-     };
-     ok 35, $@ =~ /^\Qunknown key value(s) Fred at/;
-
-}
-
-
-{
-    # new BerkeleyDB::Hash -Encrypt =>
-
-    my $Dfile = "abcd.enc";
-    my $lex = new LexFile $Dfile ;
-    my %hash ;
-    my ($k, $v) ;
-    ok 36, my $db = new BerkeleyDB::Hash 
-                           -Filename => $Dfile, 
-		           -Flags    => DB_CREATE, 
-                           -Encrypt  => {Password => "beta",
-	                                 Flags    => DB_ENCRYPT_AES
-	                                },
-		           -Property => DB_ENCRYPT ;
-
-    # create some data
-    my %data =  (
-		"red"	=> 2,
-		"green"	=> "house",
-		"blue"	=> "sea",
-		) ;
-
-    my $ret = 0 ;
-    while (($k, $v) = each %data) {
-        $ret += $db->db_put($k, $v) ;
-    }
-    ok 37, $ret == 0 ;
-
-    # check there are three records
-    ok 38, countRecords($db) == 3 ;
-
-    undef $db;
-
-    # attempt to open a database without specifying encryption
-    ok 39, ! new BerkeleyDB::Hash -Filename => $Dfile, 
-				      -Flags    => DB_CREATE ;
-
-
-    # try opening with the wrong password				      
-    ok 40, ! new BerkeleyDB::Hash -Filename => $Dfile, 
-                           -Filename => $Dfile, 
-                           -Encrypt => {Password => "def",
-	                                Flags    => DB_ENCRYPT_AES
-	                               },
-		           -Property    => DB_ENCRYPT ;
-
-
-    # read the encrypted data				      
-    ok 41, my $db1 = new BerkeleyDB::Hash -Filename => $Dfile, 
-                           -Filename => $Dfile, 
-                           -Encrypt => {Password => "beta",
-	                                Flags    => DB_ENCRYPT_AES
-	                               },
-		           -Property    => DB_ENCRYPT ;
-
-
-    $v = '';				      
-    ok 42, ! $db1->db_get("red", $v) ;
-    ok 43, $v eq $data{"red"};
-    # check there are three records
-    ok 44, countRecords($db1) == 3 ;
-    undef $db1;
-}
-
-{
-    # new BerkeleyDB::Btree -Encrypt =>
-
-    my $Dfile = "abcd.enc";
-    my $lex = new LexFile $Dfile ;
-    my %hash ;
-    my ($k, $v) ;
-    ok 45, my $db = new BerkeleyDB::Btree 
-                           -Filename => $Dfile, 
-		           -Flags    => DB_CREATE, 
-                           -Encrypt  => {Password => "beta",
-	                                 Flags    => DB_ENCRYPT_AES
-	                                },
-		           -Property => DB_ENCRYPT ;
-
-    # create some data
-    my %data =  (
-		"red"	=> 2,
-		"green"	=> "house",
-		"blue"	=> "sea",
-		) ;
-
-    my $ret = 0 ;
-    while (($k, $v) = each %data) {
-        $ret += $db->db_put($k, $v) ;
-    }
-    ok 46, $ret == 0 ;
-
-    # check there are three records
-    ok 47, countRecords($db) == 3 ;
-
-    undef $db;
-
-    # attempt to open a database without specifying encryption
-    ok 48, ! new BerkeleyDB::Btree -Filename => $Dfile, 
-				      -Flags    => DB_CREATE ;
-
-
-    # try opening with the wrong password				      
-    ok 49, ! new BerkeleyDB::Btree -Filename => $Dfile, 
-                           -Filename => $Dfile, 
-                           -Encrypt => {Password => "def",
-	                                Flags    => DB_ENCRYPT_AES
-	                               },
-		           -Property    => DB_ENCRYPT ;
-
-
-    # read the encrypted data				      
-    ok 50, my $db1 = new BerkeleyDB::Btree -Filename => $Dfile, 
-                           -Filename => $Dfile, 
-                           -Encrypt => {Password => "beta",
-	                                Flags    => DB_ENCRYPT_AES
-	                               },
-		           -Property    => DB_ENCRYPT ;
-
-
-    $v = '';				      
-    ok 51, ! $db1->db_get("red", $v) ;
-    ok 52, $v eq $data{"red"};
-    # check there are three records
-    ok 53, countRecords($db1) == 3 ;
-    undef $db1;
-}
-
-{
-    # new BerkeleyDB::Queue -Encrypt =>
-
-    my $Dfile = "abcd.enc";
-    my $lex = new LexFile $Dfile ;
-    my %hash ;
-    my ($k, $v) ;
-    ok 54, my $db = new BerkeleyDB::Queue 
-                           -Filename => $Dfile, 
-                           -Len      => 5,
-                           -Pad      => "x",
-		           -Flags    => DB_CREATE, 
-                           -Encrypt  => {Password => "beta",
-	                                 Flags    => DB_ENCRYPT_AES
-	                                },
-		           -Property => DB_ENCRYPT ;
-
-    # create some data
-    my %data =  (
-		1	=> 2,
-		2	=> "house",
-		3	=> "sea",
-		) ;
-
-    my $ret = 0 ;
-    while (($k, $v) = each %data) {
-        $ret += $db->db_put($k, $v) ;
-    }
-    ok 55, $ret == 0 ;
-
-    # check there are three records
-    ok 56, countRecords($db) == 3 ;
-
-    undef $db;
-
-    # attempt to open a database without specifying encryption
-    ok 57, ! new BerkeleyDB::Queue -Filename => $Dfile, 
-                                   -Len      => 5,
-                                   -Pad      => "x",
-				   -Flags    => DB_CREATE ;
-
-
-    # try opening with the wrong password				      
-    ok 58, ! new BerkeleyDB::Queue -Filename => $Dfile, 
-                                   -Len      => 5,
-                                   -Pad      => "x",
-                                   -Encrypt => {Password => "def",
-	                                        Flags    => DB_ENCRYPT_AES
-	                                       },
-		                   -Property    => DB_ENCRYPT ;
-
-
-    # read the encrypted data				      
-    ok 59, my $db1 = new BerkeleyDB::Queue -Filename => $Dfile, 
-                                           -Len      => 5,
-                                           -Pad      => "x",
-                                           -Encrypt => {Password => "beta",
-	                                        Flags    => DB_ENCRYPT_AES
-	                                       },
-		                           -Property    => DB_ENCRYPT ;
-
-
-    $v = '';				      
-    ok 60, ! $db1->db_get(3, $v) ;
-    ok 61, $v eq fillout($data{3}, 5, 'x');
-    # check there are three records
-    ok 62, countRecords($db1) == 3 ;
-    undef $db1;
-}
-
-{
-    # new BerkeleyDB::Recno -Encrypt =>
-
-    my $Dfile = "abcd.enc";
-    my $lex = new LexFile $Dfile ;
-    my %hash ;
-    my ($k, $v) ;
-    ok 63, my $db = new BerkeleyDB::Recno 
-                           -Filename => $Dfile, 
-		           -Flags    => DB_CREATE, 
-                           -Encrypt  => {Password => "beta",
-	                                 Flags    => DB_ENCRYPT_AES
-	                                },
-		           -Property => DB_ENCRYPT ;
-
-    # create some data
-    my %data =  (
-		1	=> 2,
-		2	=> "house",
-		3	=> "sea",
-		) ;
-
-    my $ret = 0 ;
-    while (($k, $v) = each %data) {
-        $ret += $db->db_put($k, $v) ;
-    }
-    ok 64, $ret == 0 ;
-
-    # check there are three records
-    ok 65, countRecords($db) == 3 ;
-
-    undef $db;
-
-    # attempt to open a database without specifying encryption
-    ok 66, ! new BerkeleyDB::Recno -Filename => $Dfile, 
-				      -Flags    => DB_CREATE ;
-
-
-    # try opening with the wrong password				      
-    ok 67, ! new BerkeleyDB::Recno -Filename => $Dfile, 
-                           -Filename => $Dfile, 
-                           -Encrypt => {Password => "def",
-	                                Flags    => DB_ENCRYPT_AES
-	                               },
-		           -Property    => DB_ENCRYPT ;
-
-
-    # read the encrypted data				      
-    ok 68, my $db1 = new BerkeleyDB::Recno -Filename => $Dfile, 
-                           -Filename => $Dfile, 
-                           -Encrypt => {Password => "beta",
-	                                Flags    => DB_ENCRYPT_AES
-	                               },
-		           -Property    => DB_ENCRYPT ;
-
-
-    $v = '';				      
-    ok 69, ! $db1->db_get(3, $v) ;
-    ok 70, $v eq $data{3};
-    # check there are three records
-    ok 71, countRecords($db1) == 3 ;
-    undef $db1;
-}
-
-{
-    # new BerkeleyDB::Unknown -Encrypt =>
-
-    my $Dfile = "abcd.enc";
-    my $lex = new LexFile $Dfile ;
-    my %hash ;
-    my ($k, $v) ;
-    ok 72, my $db = new BerkeleyDB::Hash 
-                           -Filename => $Dfile, 
-		           -Flags    => DB_CREATE, 
-                           -Encrypt  => {Password => "beta",
-	                                 Flags    => DB_ENCRYPT_AES
-	                                },
-		           -Property => DB_ENCRYPT ;
-
-    # create some data
-    my %data =  (
-		"red"	=> 2,
-		"green"	=> "house",
-		"blue"	=> "sea",
-		) ;
-
-    my $ret = 0 ;
-    while (($k, $v) = each %data) {
-        $ret += $db->db_put($k, $v) ;
-    }
-    ok 73, $ret == 0 ;
-
-    # check there are three records
-    ok 74, countRecords($db) == 3 ;
-
-    undef $db;
-
-    # attempt to open a database without specifying encryption
-    ok 75, ! new BerkeleyDB::Unknown -Filename => $Dfile, 
-				      -Flags    => DB_CREATE ;
-
-
-    # try opening with the wrong password				      
-    ok 76, ! new BerkeleyDB::Unknown -Filename => $Dfile, 
-                           -Filename => $Dfile, 
-                           -Encrypt => {Password => "def",
-	                                Flags    => DB_ENCRYPT_AES
-	                               },
-		           -Property    => DB_ENCRYPT ;
-
-
-    # read the encrypted data				      
-    ok 77, my $db1 = new BerkeleyDB::Unknown -Filename => $Dfile, 
-                           -Filename => $Dfile, 
-                           -Encrypt => {Password => "beta",
-	                                Flags    => DB_ENCRYPT_AES
-	                               },
-		           -Property    => DB_ENCRYPT ;
-
-
-    $v = '';				      
-    ok 78, ! $db1->db_get("red", $v) ;
-    ok 79, $v eq $data{"red"};
-    # check there are three records
-    ok 80, countRecords($db1) == 3 ;
-    undef $db1;
-}
-
diff --git a/storage/bdb/perl/BerkeleyDB/t/env.t b/storage/bdb/perl/BerkeleyDB/t/env.t
deleted file mode 100644
index 6729ed92bdd..00000000000
--- a/storage/bdb/perl/BerkeleyDB/t/env.t
+++ /dev/null
@@ -1,260 +0,0 @@
-#!./perl -w
-
-# ID: 1.2, 7/17/97
-
-use strict ;
-
-BEGIN {
-    unless(grep /blib/, @INC) {
-        chdir 't' if -d 't';
-        @INC = '../lib' if -d '../lib';
-    }
-}
-
-
-BEGIN {
-    $ENV{LC_ALL} = 'de_DE@euro';
-}
-
-use BerkeleyDB; 
-use t::util ;
-
-print "1..53\n";
-
-my $Dfile = "dbhash.tmp";
-
-umask(0);
-
-my $version_major  = 0;
-
-{
-    # db version stuff
-    my ($major, $minor, $patch) = (0, 0, 0) ;
-
-    ok 1, my $VER = BerkeleyDB::DB_VERSION_STRING ;
-    ok 2, my $ver = BerkeleyDB::db_version($version_major, $minor, $patch) ;
-    ok 3, $VER eq $ver ;
-    ok 4, $version_major > 1 ;
-    ok 5, defined $minor ;
-    ok 6, defined $patch ;
-}
-
-{
-    # Check for invalid parameters
-    my $env ;
-    eval ' $env = new BerkeleyDB::Env( -Stupid => 3) ; ' ;
-    ok 7, $@ =~ /unknown key value\(s\) Stupid/  ;
-
-    eval ' $env = new BerkeleyDB::Env( -Bad => 2, -Home => "/tmp", -Stupid => 3) ; ' ;
-    ok 8, $@ =~ /unknown key value\(s\) (Bad |Stupid ){2}/  ;
-
-    eval ' $env = new BerkeleyDB::Env (-Config => {"fred" => " "} ) ; ' ;
-    ok 9, !$env ;
-    ok 10, $BerkeleyDB::Error =~ /^(illegal name-value pair|Invalid argument)/ ;
-    #print " $BerkeleyDB::Error\n";
-}
-
-{
-    # create a very simple environment
-    my $home = "./fred" ;
-    ok 11, my $lexD = new LexDir($home) ;
-    chdir "./fred" ;
-    ok 12, my $env = new BerkeleyDB::Env -Flags => DB_CREATE,
-					@StdErrFile;
-    chdir ".." ;
-    undef $env ;
-}
-
-{
-    # create an environment with a Home
-    my $home = "./fred" ;
-    ok 13, my $lexD = new LexDir($home) ;
-    ok 14, my $env = new BerkeleyDB::Env -Home => $home,
-    					 -Flags => DB_CREATE ;
-
-    undef $env ;
-}
-
-{
-    # make new fail.
-    my $home = "./not_there" ;
-    rmtree $home ;
-    ok 15, ! -d $home ;
-    my $env = new BerkeleyDB::Env -Home => $home, @StdErrFile,
-			          -Flags => DB_INIT_LOCK ;
-    ok 16, ! $env ;
-    ok 17,   $! != 0 || $^E != 0 ;
-
-    rmtree $home ;
-}
-
-{
-    # Config
-    use Cwd ;
-    my $cwd = cwd() ;
-    my $home = "$cwd/fred" ;
-    my $data_dir = "$home/data_dir" ;
-    my $log_dir = "$home/log_dir" ;
-    my $data_file = "data.db" ;
-    ok 18, my $lexD = new LexDir($home) ;
-    ok 19, -d $data_dir ? chmod 0777, $data_dir : mkdir($data_dir, 0777) ;
-    ok 20, -d $log_dir ? chmod 0777, $log_dir : mkdir($log_dir, 0777) ;
-    my $env = new BerkeleyDB::Env -Home   => $home, @StdErrFile,
-			      -Config => { DB_DATA_DIR => $data_dir,
-					   DB_LOG_DIR  => $log_dir
-					 },
-			      -Flags  => DB_CREATE|DB_INIT_TXN|DB_INIT_LOG|
-					 DB_INIT_MPOOL|DB_INIT_LOCK ;
-    ok 21, $env ;
-
-    ok 22, my $txn = $env->txn_begin() ;
-
-    my %hash ;
-    ok 23, tie %hash, 'BerkeleyDB::Hash', -Filename => $data_file,
-                                       -Flags     => DB_CREATE ,
-                                       -Env       => $env,
-                                       -Txn       => $txn  ;
-
-    $hash{"abc"} = 123 ;
-    $hash{"def"} = 456 ;
-
-    $txn->txn_commit() ;
-
-    untie %hash ;
-
-    undef $txn ;
-    undef $env ;
-}
-
-{
-    # -ErrFile with a filename
-    my $errfile = "./errfile" ;
-    my $home = "./fred" ;
-    ok 24, my $lexD = new LexDir($home) ;
-    my $lex = new LexFile $errfile ;
-    ok 25, my $env = new BerkeleyDB::Env( -ErrFile => $errfile, 
-    					  -Flags => DB_CREATE,
-					  -Home   => $home) ;
-    my $db = new BerkeleyDB::Hash -Filename => $Dfile,
-			     -Env      => $env,
-			     -Flags    => -1;
-    ok 26, !$db ;
-
-    ok 27, $BerkeleyDB::Error =~ /^illegal flag specified to (db_open|DB->open)/;
-    ok 28, -e $errfile ;
-    my $contents = docat($errfile) ;
-    chomp $contents ;
-    ok 29, $BerkeleyDB::Error eq $contents ;
-
-    undef $env ;
-}
-
-{
-    # -ErrFile with a filehandle
-    use IO::File ;
-    my $errfile = "./errfile" ;
-    my $home = "./fred" ;
-    ok 30, my $lexD = new LexDir($home) ;
-    my $lex = new LexFile $errfile ;
-    my $fh = new IO::File ">$errfile" ;
-    ok 31, my $env = new BerkeleyDB::Env( -ErrFile => $fh, 
-    					  -Flags => DB_CREATE,
-					  -Home   => $home) ;
-    my $db = new BerkeleyDB::Hash -Filename => $Dfile,
-			     -Env      => $env,
-			     -Flags    => -1;
-    ok 32, !$db ;
-
-    ok 33, $BerkeleyDB::Error =~ /^illegal flag specified to (db_open|DB->open)/;
-    ok 34, -e $errfile ;
-    my $contents = docat($errfile) ;
-    chomp $contents ;
-    ok 35, $BerkeleyDB::Error eq $contents ;
-
-    undef $env ;
-}
-
-{
-    # -ErrPrefix
-    my $home = "./fred" ;
-    ok 36, my $lexD = new LexDir($home) ;
-    my $errfile = "./errfile" ;
-    my $lex = new LexFile $errfile ;
-    ok 37, my $env = new BerkeleyDB::Env( -ErrFile => $errfile,
-					-ErrPrefix => "PREFIX",
-    					  -Flags => DB_CREATE,
-					  -Home   => $home) ;
-    my $db = new BerkeleyDB::Hash -Filename => $Dfile,
-			     -Env      => $env,
-			     -Flags    => -1;
-    ok 38, !$db ;
-
-    ok 39, $BerkeleyDB::Error =~ /^PREFIX: illegal flag specified to (db_open|DB->open)/;
-    ok 40, -e $errfile ;
-    my $contents = docat($errfile) ;
-    chomp $contents ;
-    ok 41, $BerkeleyDB::Error eq $contents ;
-
-    # change the prefix on the fly
-    my $old = $env->errPrefix("NEW ONE") ;
-    ok 42, $old eq "PREFIX" ;
-
-    $db = new BerkeleyDB::Hash -Filename => $Dfile,
-			     -Env      => $env,
-			     -Flags    => -1;
-    ok 43, !$db ;
-    ok 44, $BerkeleyDB::Error =~ /^NEW ONE: illegal flag specified to (db_open|DB->open)/;
-    $contents = docat($errfile) ;
-    chomp $contents ;
-    ok 45, $contents =~ /$BerkeleyDB::Error$/ ;
-    undef $env ;
-}
-
-{
-    # test db_appexit
-    use Cwd ;
-    my $cwd = cwd() ;
-    my $home = "$cwd/fred" ;
-    my $data_dir = "$home/data_dir" ;
-    my $log_dir = "$home/log_dir" ;
-    my $data_file = "data.db" ;
-    ok 46, my $lexD = new LexDir($home);
-    ok 47, -d $data_dir ? chmod 0777, $data_dir : mkdir($data_dir, 0777) ;
-    ok 48, -d $log_dir ? chmod 0777, $log_dir : mkdir($log_dir, 0777) ;
-    my $env = new BerkeleyDB::Env -Home   => $home, @StdErrFile,
-			      -Config => { DB_DATA_DIR => $data_dir,
-					   DB_LOG_DIR  => $log_dir
-					 },
-			      -Flags  => DB_CREATE|DB_INIT_TXN|DB_INIT_LOG|
-					 DB_INIT_MPOOL|DB_INIT_LOCK ;
-    ok 49, $env ;
-
-    ok 50, my $txn_mgr = $env->TxnMgr() ;
-
-    ok 51, $env->db_appexit() == 0 ;
-
-}
-
-{
-    # attempt to open a new environment without DB_CREATE
-    # should fail with Berkeley DB 3.x or better.
-
-    my $home = "./fred" ;
-    ok 52, my $lexD = new LexDir($home) ;
-    chdir "./fred" ;
-    my $env = new BerkeleyDB::Env -Home => $home, -Flags => DB_CREATE ;
-    ok 53, $version_major == 2 ? $env : ! $env ;
-
-    # The test below is not portable -- the error message returned by
-    # $BerkeleyDB::Error is locale dependant.
-
-    #ok 54, $version_major == 2 ? 1 
-    #                           : $BerkeleyDB::Error =~ /No such file or directory/ ;
-    #    or print "# BerkeleyDB::Error is $BerkeleyDB::Error\n";
-    chdir ".." ;
-    undef $env ;
-}
-
-# test -Verbose
-# test -Flags
-# db_value_set
diff --git a/storage/bdb/perl/BerkeleyDB/t/examples.t b/storage/bdb/perl/BerkeleyDB/t/examples.t
deleted file mode 100644
index 69b7f8ff8c5..00000000000
--- a/storage/bdb/perl/BerkeleyDB/t/examples.t
+++ /dev/null
@@ -1,401 +0,0 @@
-#!./perl -w
-
-use strict ; 
-
-BEGIN {
-    unless(grep /blib/, @INC) {
-        chdir 't' if -d 't';
-        @INC = '../lib' if -d '../lib';
-    }
-}
-
-use BerkeleyDB; 
-use t::util;
-
-print "1..7\n";
-
-my $Dfile = "dbhash.tmp";
-my $Dfile2 = "dbhash2.tmp";
-my $Dfile3 = "dbhash3.tmp";
-unlink $Dfile;
-
-umask(0) ;
-
-my $redirect = "xyzt" ;
-
-
-{
-my $x = $BerkeleyDB::Error;
-my $redirect = "xyzt" ;
- {
-    my $redirectObj = new Redirect $redirect ;
-
-    use strict ;
-    use BerkeleyDB ;
-    use vars qw( %h $k $v ) ;
-    
-    my $filename = "fruit" ;
-    unlink $filename ;
-    tie %h, "BerkeleyDB::Hash", 
-                -Filename => $filename, 
-		-Flags    => DB_CREATE
-        or die "Cannot open file $filename: $! $BerkeleyDB::Error\n" ;
-
-    # Add a few key/value pairs to the file
-    $h{"apple"} = "red" ;
-    $h{"orange"} = "orange" ;
-    $h{"banana"} = "yellow" ;
-    $h{"tomato"} = "red" ;
-    
-    # Check for existence of a key
-    print "Banana Exists\n\n" if $h{"banana"} ;
-    
-    # Delete a key/value pair.
-    delete $h{"apple"} ;
-    
-    # print the contents of the file
-    while (($k, $v) = each %h)
-      { print "$k -> $v\n" }
-      
-    untie %h ;
-    unlink $filename ;
- }
-
-  #print "[" . docat($redirect) . "]" ;
-  ok(1, docat_del($redirect) eq <<'EOM') ;
-Banana Exists
-
-orange -> orange
-tomato -> red
-banana -> yellow
-EOM
-
-
-}
-
-{
-my $redirect = "xyzt" ;
- {
-
-    my $redirectObj = new Redirect $redirect ;
-
-    use strict ;
-    use BerkeleyDB ;
-    
-    my $filename = "fruit" ;
-    unlink $filename ;
-    my $db = new BerkeleyDB::Hash 
-                -Filename => $filename, 
-		-Flags    => DB_CREATE
-        or die "Cannot open file $filename: $! $BerkeleyDB::Error\n" ;
-
-    # Add a few key/value pairs to the file
-    $db->db_put("apple", "red") ;
-    $db->db_put("orange", "orange") ;
-    $db->db_put("banana", "yellow") ;
-    $db->db_put("tomato", "red") ;
-    
-    # Check for existence of a key
-    print "Banana Exists\n\n" if $db->db_get("banana", $v) == 0;
-    
-    # Delete a key/value pair.
-    $db->db_del("apple") ;
-    
-    # print the contents of the file
-    my ($k, $v) = ("", "") ;
-    my $cursor = $db->db_cursor() ;
-    while ($cursor->c_get($k, $v, DB_NEXT) == 0)
-      { print "$k -> $v\n" }
-      
-    undef $cursor ;
-    undef $db ;
-    unlink $filename ;
- }
-
-  #print "[" . docat($redirect) . "]" ;
-  ok(2, docat_del($redirect) eq <<'EOM') ;
-Banana Exists
-
-orange -> orange
-tomato -> red
-banana -> yellow
-EOM
-
-}
-
-{
-my $redirect = "xyzt" ;
- {
-
-    my $redirectObj = new Redirect $redirect ;
-
-    use strict ;
-    use BerkeleyDB ;
-
-    my $filename = "tree" ;
-    unlink $filename ;
-    my %h ;
-    tie %h, 'BerkeleyDB::Btree', 
-    		-Filename   => $filename, 
-	        -Flags      => DB_CREATE
-      or die "Cannot open $filename: $!\n" ;
-
-    # Add a key/value pair to the file
-    $h{'Wall'} = 'Larry' ;
-    $h{'Smith'} = 'John' ;
-    $h{'mouse'} = 'mickey' ;
-    $h{'duck'}  = 'donald' ;
-
-    # Delete
-    delete $h{"duck"} ;
-
-    # Cycle through the keys printing them in order.
-    # Note it is not necessary to sort the keys as
-    # the btree will have kept them in order automatically.
-    foreach (keys %h)
-      { print "$_\n" }
-
-    untie %h ;
-    unlink $filename ;
- }
-
-  #print "[" . docat($redirect) . "]\n" ;
-  ok(3, docat_del($redirect) eq <<'EOM') ;
-Smith
-Wall
-mouse
-EOM
-
-}
-
-{
-my $redirect = "xyzt" ;
- {
-
-    my $redirectObj = new Redirect $redirect ;
-
-    use strict ;
-    use BerkeleyDB ;
-
-    my $filename = "tree" ;
-    unlink $filename ;
-    my %h ;
-    tie %h, 'BerkeleyDB::Btree', 
-    		-Filename   => $filename, 
-	        -Flags      => DB_CREATE,
-		-Compare    => sub { lc $_[0] cmp lc $_[1] }
-      or die "Cannot open $filename: $!\n" ;
-
-    # Add a key/value pair to the file
-    $h{'Wall'} = 'Larry' ;
-    $h{'Smith'} = 'John' ;
-    $h{'mouse'} = 'mickey' ;
-    $h{'duck'}  = 'donald' ;
-
-    # Delete
-    delete $h{"duck"} ;
-
-    # Cycle through the keys printing them in order.
-    # Note it is not necessary to sort the keys as
-    # the btree will have kept them in order automatically.
-    foreach (keys %h)
-      { print "$_\n" }
-
-    untie %h ;
-    unlink $filename ;
- }
-
-  #print "[" . docat($redirect) . "]\n" ;
-  ok(4, docat_del($redirect) eq <<'EOM') ;
-mouse
-Smith
-Wall
-EOM
-
-}
-
-{
-my $redirect = "xyzt" ;
- {
-
-    my $redirectObj = new Redirect $redirect ;
-
-    use strict ;
-    use BerkeleyDB ;
-
-    my %hash ;
-    my $filename = "filt.db" ;
-    unlink $filename ;
-
-    my $db = tie %hash, 'BerkeleyDB::Hash', 
-    		-Filename   => $filename, 
-	        -Flags      => DB_CREATE
-      or die "Cannot open $filename: $!\n" ;
-
-    # Install DBM Filters
-    $db->filter_fetch_key  ( sub { s/\0$//    } ) ;
-    $db->filter_store_key  ( sub { $_ .= "\0" } ) ;
-    $db->filter_fetch_value( sub { s/\0$//    } ) ;
-    $db->filter_store_value( sub { $_ .= "\0" } ) ;
-
-    $hash{"abc"} = "def" ;
-    my $a = $hash{"ABC"} ;
-    # ...
-    undef $db ;
-    untie %hash ;
-    $db = tie %hash, 'BerkeleyDB::Hash', 
-    		-Filename   => $filename, 
-	        -Flags      => DB_CREATE
-      or die "Cannot open $filename: $!\n" ;
-    while (($k, $v) = each %hash)
-      { print "$k -> $v\n" }
-    undef $db ;
-    untie %hash ;
-
-    unlink $filename ;
- }
-
-  #print "[" . docat($redirect) . "]\n" ;
-  ok(5, docat_del($redirect) eq <<"EOM") ;
-abc\x00 -> def\x00
-EOM
-
-}
-
-{
-my $redirect = "xyzt" ;
- {
-
-    my $redirectObj = new Redirect $redirect ;
-
-    use strict ;
-    use BerkeleyDB ;
-    my %hash ;
-    my $filename = "filt.db" ;
-    unlink $filename ;
-
-
-    my $db = tie %hash, 'BerkeleyDB::Btree', 
-    		-Filename   => $filename, 
-	        -Flags      => DB_CREATE
-      or die "Cannot open $filename: $!\n" ;
-
-    $db->filter_fetch_key  ( sub { $_ = unpack("i", $_) } ) ;
-    $db->filter_store_key  ( sub { $_ = pack ("i", $_) } ) ;
-    $hash{123} = "def" ;
-    # ...
-    undef $db ;
-    untie %hash ;
-    $db = tie %hash, 'BerkeleyDB::Btree', 
-    		-Filename   => $filename, 
-	        -Flags      => DB_CREATE
-      or die "Cannot Open $filename: $!\n" ;
-    while (($k, $v) = each %hash)
-      { print "$k -> $v\n" }
-    undef $db ;
-    untie %hash ;
-
-    unlink $filename ;
- }
-
-  my $val = pack("i", 123) ;
-  #print "[" . docat($redirect) . "]\n" ;
-  ok(6, docat_del($redirect) eq <<"EOM") ;
-$val -> def
-EOM
-
-}
-
-{
-my $redirect = "xyzt" ;
- {
-
-    my $redirectObj = new Redirect $redirect ;
-
-    if ($FA) {
-    use strict ;
-    use BerkeleyDB ;
-
-    my $filename = "text" ;
-    unlink $filename ;
-
-    my @h ;
-    tie @h, 'BerkeleyDB::Recno', 
-    		-Filename   => $filename, 
-	        -Flags      => DB_CREATE,
-		-Property   => DB_RENUMBER
-      or die "Cannot open $filename: $!\n" ;
-
-    # Add a few key/value pairs to the file
-    $h[0] = "orange" ;
-    $h[1] = "blue" ;
-    $h[2] = "yellow" ;
-
-    push @h, "green", "black" ;
-
-    my $elements = scalar @h ;
-    print "The array contains $elements entries\n" ;
-
-    my $last = pop @h ;
-    print "popped $last\n" ;
-
-    unshift @h, "white" ;
-    my $first = shift @h ;
-    print "shifted $first\n" ;
-
-    # Check for existence of a key
-    print "Element 1 Exists with value $h[1]\n" if $h[1] ;
-
-    untie @h ;
-    unlink $filename ;
-    } else {
-    use strict ;
-    use BerkeleyDB ;
-
-    my $filename = "text" ;
-    unlink $filename ;
-
-    my @h ;
-    my $db = tie @h, 'BerkeleyDB::Recno', 
-    		-Filename   => $filename, 
-	        -Flags      => DB_CREATE,
-		-Property   => DB_RENUMBER
-      or die "Cannot open $filename: $!\n" ;
-
-    # Add a few key/value pairs to the file
-    $h[0] = "orange" ;
-    $h[1] = "blue" ;
-    $h[2] = "yellow" ;
-
-    $db->push("green", "black") ;
-
-    my $elements = $db->length() ;
-    print "The array contains $elements entries\n" ;
-
-    my $last = $db->pop ;
-    print "popped $last\n" ;
-
-    $db->unshift("white") ;
-    my $first = $db->shift ;
-    print "shifted $first\n" ;
-
-    # Check for existence of a key
-    print "Element 1 Exists with value $h[1]\n" if $h[1] ;
-
-    undef $db ;
-    untie @h ;
-    unlink $filename ;
-    }
-
- }
-
-  #print "[" . docat($redirect) . "]\n" ;
-  ok(7, docat_del($redirect) eq <<"EOM") ;
-The array contains 5 entries
-popped black
-shifted white
-Element 1 Exists with value blue
-EOM
-
-}
-
diff --git a/storage/bdb/perl/BerkeleyDB/t/examples.t.T b/storage/bdb/perl/BerkeleyDB/t/examples.t.T
deleted file mode 100644
index fe9bdf76b06..00000000000
--- a/storage/bdb/perl/BerkeleyDB/t/examples.t.T
+++ /dev/null
@@ -1,415 +0,0 @@
-#!./perl -w
-
-use strict ; 
-
-BEGIN {
-    unless(grep /blib/, @INC) {
-        chdir 't' if -d 't';
-        @INC = '../lib' if -d '../lib';
-    }
-}
-
-use BerkeleyDB; 
-use t::util;
-
-print "1..7\n";
-
-my $Dfile = "dbhash.tmp";
-my $Dfile2 = "dbhash2.tmp";
-my $Dfile3 = "dbhash3.tmp";
-unlink $Dfile;
-
-umask(0) ;
-
-my $redirect = "xyzt" ;
-
-
-{
-my $x = $BerkeleyDB::Error;
-my $redirect = "xyzt" ;
- {
-    my $redirectObj = new Redirect $redirect ;
-
-## BEGIN simpleHash
-    use strict ;
-    use BerkeleyDB ;
-    use vars qw( %h $k $v ) ;
-    
-    my $filename = "fruit" ;
-    unlink $filename ;
-    tie %h, "BerkeleyDB::Hash", 
-                -Filename => $filename, 
-		-Flags    => DB_CREATE
-        or die "Cannot open file $filename: $! $BerkeleyDB::Error\n" ;
-
-    # Add a few key/value pairs to the file
-    $h{"apple"} = "red" ;
-    $h{"orange"} = "orange" ;
-    $h{"banana"} = "yellow" ;
-    $h{"tomato"} = "red" ;
-    
-    # Check for existence of a key
-    print "Banana Exists\n\n" if $h{"banana"} ;
-    
-    # Delete a key/value pair.
-    delete $h{"apple"} ;
-    
-    # print the contents of the file
-    while (($k, $v) = each %h)
-      { print "$k -> $v\n" }
-      
-    untie %h ;
-## END simpleHash
-    unlink $filename ;
- }
-
-  #print "[" . docat($redirect) . "]" ;
-  ok(1, docat_del($redirect) eq <<'EOM') ;
-Banana Exists
-
-orange -> orange
-tomato -> red
-banana -> yellow
-EOM
-
-
-}
-
-{
-my $redirect = "xyzt" ;
- {
-
-    my $redirectObj = new Redirect $redirect ;
-
-## BEGIN simpleHash2
-    use strict ;
-    use BerkeleyDB ;
-    
-    my $filename = "fruit" ;
-    unlink $filename ;
-    my $db = new BerkeleyDB::Hash 
-                -Filename => $filename, 
-		-Flags    => DB_CREATE
-        or die "Cannot open file $filename: $! $BerkeleyDB::Error\n" ;
-
-    # Add a few key/value pairs to the file
-    $db->db_put("apple", "red") ;
-    $db->db_put("orange", "orange") ;
-    $db->db_put("banana", "yellow") ;
-    $db->db_put("tomato", "red") ;
-    
-    # Check for existence of a key
-    print "Banana Exists\n\n" if $db->db_get("banana", $v) == 0;
-    
-    # Delete a key/value pair.
-    $db->db_del("apple") ;
-    
-    # print the contents of the file
-    my ($k, $v) = ("", "") ;
-    my $cursor = $db->db_cursor() ;
-    while ($cursor->c_get($k, $v, DB_NEXT) == 0)
-      { print "$k -> $v\n" }
-      
-    undef $cursor ;
-    undef $db ;
-## END simpleHash2
-    unlink $filename ;
- }
-
-  #print "[" . docat($redirect) . "]" ;
-  ok(2, docat_del($redirect) eq <<'EOM') ;
-Banana Exists
-
-orange -> orange
-tomato -> red
-banana -> yellow
-EOM
-
-}
-
-{
-my $redirect = "xyzt" ;
- {
-
-    my $redirectObj = new Redirect $redirect ;
-
-## BEGIN btreeSimple
-    use strict ;
-    use BerkeleyDB ;
-
-    my $filename = "tree" ;
-    unlink $filename ;
-    my %h ;
-    tie %h, 'BerkeleyDB::Btree', 
-    		-Filename   => $filename, 
-	        -Flags      => DB_CREATE
-      or die "Cannot open $filename: $!\n" ;
-
-    # Add a key/value pair to the file
-    $h{'Wall'} = 'Larry' ;
-    $h{'Smith'} = 'John' ;
-    $h{'mouse'} = 'mickey' ;
-    $h{'duck'}  = 'donald' ;
-
-    # Delete
-    delete $h{"duck"} ;
-
-    # Cycle through the keys printing them in order.
-    # Note it is not necessary to sort the keys as
-    # the btree will have kept them in order automatically.
-    foreach (keys %h)
-      { print "$_\n" }
-
-    untie %h ;
-## END btreeSimple
-    unlink $filename ;
- }
-
-  #print "[" . docat($redirect) . "]\n" ;
-  ok(3, docat_del($redirect) eq <<'EOM') ;
-Smith
-Wall
-mouse
-EOM
-
-}
-
-{
-my $redirect = "xyzt" ;
- {
-
-    my $redirectObj = new Redirect $redirect ;
-
-## BEGIN btreeSortOrder
-    use strict ;
-    use BerkeleyDB ;
-
-    my $filename = "tree" ;
-    unlink $filename ;
-    my %h ;
-    tie %h, 'BerkeleyDB::Btree', 
-    		-Filename   => $filename, 
-	        -Flags      => DB_CREATE,
-		-Compare    => sub { lc $_[0] cmp lc $_[1] }
-      or die "Cannot open $filename: $!\n" ;
-
-    # Add a key/value pair to the file
-    $h{'Wall'} = 'Larry' ;
-    $h{'Smith'} = 'John' ;
-    $h{'mouse'} = 'mickey' ;
-    $h{'duck'}  = 'donald' ;
-
-    # Delete
-    delete $h{"duck"} ;
-
-    # Cycle through the keys printing them in order.
-    # Note it is not necessary to sort the keys as
-    # the btree will have kept them in order automatically.
-    foreach (keys %h)
-      { print "$_\n" }
-
-    untie %h ;
-## END btreeSortOrder
-    unlink $filename ;
- }
-
-  #print "[" . docat($redirect) . "]\n" ;
-  ok(4, docat_del($redirect) eq <<'EOM') ;
-mouse
-Smith
-Wall
-EOM
-
-}
-
-{
-my $redirect = "xyzt" ;
- {
-
-    my $redirectObj = new Redirect $redirect ;
-
-## BEGIN nullFilter
-    use strict ;
-    use BerkeleyDB ;
-
-    my %hash ;
-    my $filename = "filt.db" ;
-    unlink $filename ;
-
-    my $db = tie %hash, 'BerkeleyDB::Hash', 
-    		-Filename   => $filename, 
-	        -Flags      => DB_CREATE
-      or die "Cannot open $filename: $!\n" ;
-
-    # Install DBM Filters
-    $db->filter_fetch_key  ( sub { s/\0$//    } ) ;
-    $db->filter_store_key  ( sub { $_ .= "\0" } ) ;
-    $db->filter_fetch_value( sub { s/\0$//    } ) ;
-    $db->filter_store_value( sub { $_ .= "\0" } ) ;
-
-    $hash{"abc"} = "def" ;
-    my $a = $hash{"ABC"} ;
-    # ...
-    undef $db ;
-    untie %hash ;
-## END nullFilter
-    $db = tie %hash, 'BerkeleyDB::Hash', 
-    		-Filename   => $filename, 
-	        -Flags      => DB_CREATE
-      or die "Cannot open $filename: $!\n" ;
-    while (($k, $v) = each %hash)
-      { print "$k -> $v\n" }
-    undef $db ;
-    untie %hash ;
-
-    unlink $filename ;
- }
-
-  #print "[" . docat($redirect) . "]\n" ;
-  ok(5, docat_del($redirect) eq <<"EOM") ;
-abc\x00 -> def\x00
-EOM
-
-}
-
-{
-my $redirect = "xyzt" ;
- {
-
-    my $redirectObj = new Redirect $redirect ;
-
-## BEGIN intFilter
-    use strict ;
-    use BerkeleyDB ;
-    my %hash ;
-    my $filename = "filt.db" ;
-    unlink $filename ;
-
-
-    my $db = tie %hash, 'BerkeleyDB::Btree', 
-    		-Filename   => $filename, 
-	        -Flags      => DB_CREATE
-      or die "Cannot open $filename: $!\n" ;
-
-    $db->filter_fetch_key  ( sub { $_ = unpack("i", $_) } ) ;
-    $db->filter_store_key  ( sub { $_ = pack ("i", $_) } ) ;
-    $hash{123} = "def" ;
-    # ...
-    undef $db ;
-    untie %hash ;
-## END intFilter
-    $db = tie %hash, 'BerkeleyDB::Btree', 
-    		-Filename   => $filename, 
-	        -Flags      => DB_CREATE
-      or die "Cannot Open $filename: $!\n" ;
-    while (($k, $v) = each %hash)
-      { print "$k -> $v\n" }
-    undef $db ;
-    untie %hash ;
-
-    unlink $filename ;
- }
-
-  my $val = pack("i", 123) ;
-  #print "[" . docat($redirect) . "]\n" ;
-  ok(6, docat_del($redirect) eq <<"EOM") ;
-$val -> def
-EOM
-
-}
-
-{
-my $redirect = "xyzt" ;
- {
-
-    my $redirectObj = new Redirect $redirect ;
-
-    if ($FA) {
-## BEGIN simpleRecno
-    use strict ;
-    use BerkeleyDB ;
-
-    my $filename = "text" ;
-    unlink $filename ;
-
-    my @h ;
-    tie @h, 'BerkeleyDB::Recno', 
-    		-Filename   => $filename, 
-	        -Flags      => DB_CREATE,
-		-Property   => DB_RENUMBER
-      or die "Cannot open $filename: $!\n" ;
-
-    # Add a few key/value pairs to the file
-    $h[0] = "orange" ;
-    $h[1] = "blue" ;
-    $h[2] = "yellow" ;
-
-    push @h, "green", "black" ;
-
-    my $elements = scalar @h ;
-    print "The array contains $elements entries\n" ;
-
-    my $last = pop @h ;
-    print "popped $last\n" ;
-
-    unshift @h, "white" ;
-    my $first = shift @h ;
-    print "shifted $first\n" ;
-
-    # Check for existence of a key
-    print "Element 1 Exists with value $h[1]\n" if $h[1] ;
-
-    untie @h ;
-## END simpleRecno
-    unlink $filename ;
-    } else {
-    use strict ;
-    use BerkeleyDB ;
-
-    my $filename = "text" ;
-    unlink $filename ;
-
-    my @h ;
-    my $db = tie @h, 'BerkeleyDB::Recno', 
-    		-Filename   => $filename, 
-	        -Flags      => DB_CREATE,
-		-Property   => DB_RENUMBER
-      or die "Cannot open $filename: $!\n" ;
-
-    # Add a few key/value pairs to the file
-    $h[0] = "orange" ;
-    $h[1] = "blue" ;
-    $h[2] = "yellow" ;
-
-    $db->push("green", "black") ;
-
-    my $elements = $db->length() ;
-    print "The array contains $elements entries\n" ;
-
-    my $last = $db->pop ;
-    print "popped $last\n" ;
-
-    $db->unshift("white") ;
-    my $first = $db->shift ;
-    print "shifted $first\n" ;
-
-    # Check for existence of a key
-    print "Element 1 Exists with value $h[1]\n" if $h[1] ;
-
-    undef $db ;
-    untie @h ;
-    unlink $filename ;
-    }
-
- }
-
-  #print "[" . docat($redirect) . "]\n" ;
-  ok(7, docat_del($redirect) eq <<"EOM") ;
-The array contains 5 entries
-popped black
-shifted white
-Element 1 Exists with value blue
-EOM
-
-}
-
diff --git a/storage/bdb/perl/BerkeleyDB/t/examples3.t b/storage/bdb/perl/BerkeleyDB/t/examples3.t
deleted file mode 100644
index 22e94b770e1..00000000000
--- a/storage/bdb/perl/BerkeleyDB/t/examples3.t
+++ /dev/null
@@ -1,132 +0,0 @@
-#!./perl -w
-
-use strict ; 
-
-BEGIN {
-    unless(grep /blib/, @INC) {
-        chdir 't' if -d 't';
-        @INC = '../lib' if -d '../lib';
-    }
-}
-
-use BerkeleyDB; 
-use t::util;
-
-BEGIN 
-{
-    if ($BerkeleyDB::db_version < 3) {
-        print "1..0 # Skipping test, this needs Berkeley DB 3.x or better\n" ;
-        exit 0 ;
-    }
-}
-
-
-print "1..2\n";
-
-my $Dfile = "dbhash.tmp";
-my $Dfile2 = "dbhash2.tmp";
-my $Dfile3 = "dbhash3.tmp";
-unlink $Dfile;
-
-umask(0) ;
-
-my $redirect = "xyzt" ;
-
-
-{
-my $redirect = "xyzt" ;
- {
-
-    my $redirectObj = new Redirect $redirect ;
-
-    use strict ;
-    use BerkeleyDB ;
-    
-    my $filename = "fruit" ;
-    unlink $filename ;
-    my $db = new BerkeleyDB::Hash 
-                -Filename => $filename, 
-		-Flags    => DB_CREATE,
-		-Property  => DB_DUP
-        or die "Cannot open file $filename: $! $BerkeleyDB::Error\n" ;
-
-    # Add a few key/value pairs to the file
-    $db->db_put("red", "apple") ;
-    $db->db_put("orange", "orange") ;
-    $db->db_put("green", "banana") ;
-    $db->db_put("yellow", "banana") ;
-    $db->db_put("red", "tomato") ;
-    $db->db_put("green", "apple") ;
-    
-    # print the contents of the file
-    my ($k, $v) = ("", "") ;
-    my $cursor = $db->db_cursor() ;
-    while ($cursor->c_get($k, $v, DB_NEXT) == 0)
-      { print "$k -> $v\n" }
-      
-    undef $cursor ;
-    undef $db ;
-    unlink $filename ;
- }
-
-  #print "[" . docat($redirect) . "]" ;
-  ok(1, docat_del($redirect) eq <<'EOM') ;
-orange -> orange
-yellow -> banana
-red -> apple
-red -> tomato
-green -> banana
-green -> apple
-EOM
-
-}
-
-{
-my $redirect = "xyzt" ;
- {
-
-    my $redirectObj = new Redirect $redirect ;
-
-    use strict ;
-    use BerkeleyDB ;
-    
-    my $filename = "fruit" ;
-    unlink $filename ;
-    my $db = new BerkeleyDB::Hash 
-                -Filename => $filename, 
-		-Flags    => DB_CREATE,
-		-Property  => DB_DUP | DB_DUPSORT
-        or die "Cannot open file $filename: $! $BerkeleyDB::Error\n" ;
-
-    # Add a few key/value pairs to the file
-    $db->db_put("red", "apple") ;
-    $db->db_put("orange", "orange") ;
-    $db->db_put("green", "banana") ;
-    $db->db_put("yellow", "banana") ;
-    $db->db_put("red", "tomato") ;
-    $db->db_put("green", "apple") ;
-    
-    # print the contents of the file
-    my ($k, $v) = ("", "") ;
-    my $cursor = $db->db_cursor() ;
-    while ($cursor->c_get($k, $v, DB_NEXT) == 0)
-      { print "$k -> $v\n" }
-      
-    undef $cursor ;
-    undef $db ;
-    unlink $filename ;
- }
-
-  #print "[" . docat($redirect) . "]" ;
-  ok(2, docat_del($redirect) eq <<'EOM') ;
-orange -> orange
-yellow -> banana
-red -> apple
-red -> tomato
-green -> apple
-green -> banana
-EOM
-
-}
-
-
diff --git a/storage/bdb/perl/BerkeleyDB/t/examples3.t.T b/storage/bdb/perl/BerkeleyDB/t/examples3.t.T
deleted file mode 100644
index 5eeaa14d00c..00000000000
--- a/storage/bdb/perl/BerkeleyDB/t/examples3.t.T
+++ /dev/null
@@ -1,136 +0,0 @@
-#!./perl -w
-
-use strict ; 
-
-BEGIN {
-    unless(grep /blib/, @INC) {
-        chdir 't' if -d 't';
-        @INC = '../lib' if -d '../lib';
-    }
-}
-
-use BerkeleyDB; 
-use t::util;
-
-BEGIN 
-{
-    if ($BerkeleyDB::db_version < 3) {
-        print "1..0 # Skipping test, this needs Berkeley DB 3.x or better\n" ;
-        exit 0 ;
-    }
-}
-
-
-print "1..2\n";
-
-my $Dfile = "dbhash.tmp";
-my $Dfile2 = "dbhash2.tmp";
-my $Dfile3 = "dbhash3.tmp";
-unlink $Dfile;
-
-umask(0) ;
-
-my $redirect = "xyzt" ;
-
-
-{
-my $redirect = "xyzt" ;
- {
-
-    my $redirectObj = new Redirect $redirect ;
-
-## BEGIN dupHash
-    use strict ;
-    use BerkeleyDB ;
-    
-    my $filename = "fruit" ;
-    unlink $filename ;
-    my $db = new BerkeleyDB::Hash 
-                -Filename => $filename, 
-		-Flags    => DB_CREATE,
-		-Property  => DB_DUP
-        or die "Cannot open file $filename: $! $BerkeleyDB::Error\n" ;
-
-    # Add a few key/value pairs to the file
-    $db->db_put("red", "apple") ;
-    $db->db_put("orange", "orange") ;
-    $db->db_put("green", "banana") ;
-    $db->db_put("yellow", "banana") ;
-    $db->db_put("red", "tomato") ;
-    $db->db_put("green", "apple") ;
-    
-    # print the contents of the file
-    my ($k, $v) = ("", "") ;
-    my $cursor = $db->db_cursor() ;
-    while ($cursor->c_get($k, $v, DB_NEXT) == 0)
-      { print "$k -> $v\n" }
-      
-    undef $cursor ;
-    undef $db ;
-## END dupHash
-    unlink $filename ;
- }
-
-  #print "[" . docat($redirect) . "]" ;
-  ok(1, docat_del($redirect) eq <<'EOM') ;
-orange -> orange
-yellow -> banana
-red -> apple
-red -> tomato
-green -> banana
-green -> apple
-EOM
-
-}
-
-{
-my $redirect = "xyzt" ;
- {
-
-    my $redirectObj = new Redirect $redirect ;
-
-## BEGIN dupSortHash
-    use strict ;
-    use BerkeleyDB ;
-    
-    my $filename = "fruit" ;
-    unlink $filename ;
-    my $db = new BerkeleyDB::Hash 
-                -Filename => $filename, 
-		-Flags    => DB_CREATE,
-		-Property  => DB_DUP | DB_DUPSORT
-        or die "Cannot open file $filename: $! $BerkeleyDB::Error\n" ;
-
-    # Add a few key/value pairs to the file
-    $db->db_put("red", "apple") ;
-    $db->db_put("orange", "orange") ;
-    $db->db_put("green", "banana") ;
-    $db->db_put("yellow", "banana") ;
-    $db->db_put("red", "tomato") ;
-    $db->db_put("green", "apple") ;
-    
-    # print the contents of the file
-    my ($k, $v) = ("", "") ;
-    my $cursor = $db->db_cursor() ;
-    while ($cursor->c_get($k, $v, DB_NEXT) == 0)
-      { print "$k -> $v\n" }
-      
-    undef $cursor ;
-    undef $db ;
-## END dupSortHash
-    unlink $filename ;
- }
-
-  #print "[" . docat($redirect) . "]" ;
-  ok(2, docat_del($redirect) eq <<'EOM') ;
-orange -> orange
-yellow -> banana
-red -> apple
-red -> tomato
-green -> apple
-green -> banana
-EOM
-
-}
-
-
diff --git a/storage/bdb/perl/BerkeleyDB/t/filter.t b/storage/bdb/perl/BerkeleyDB/t/filter.t
deleted file mode 100644
index ff1435ae75e..00000000000
--- a/storage/bdb/perl/BerkeleyDB/t/filter.t
+++ /dev/null
@@ -1,330 +0,0 @@
-#!./perl -w
-
-# ID: %I%, %G%   
-
-use strict ;
-
-BEGIN {
-    unless(grep /blib/, @INC) {
-        chdir 't' if -d 't';
-        @INC = '../lib' if -d '../lib';
-    }
-}
-
-use BerkeleyDB; 
-use t::util ;
-
-print "1..52\n";
-
-my $Dfile = "dbhash.tmp";
-unlink $Dfile;
-
-umask(0) ;
-
-
-{
-   # DBM Filter tests
-   use strict ;
-   my (%h, $db) ;
-   my ($fetch_key, $store_key, $fetch_value, $store_value) = ("") x 4 ;
-   unlink $Dfile;
-
-   sub checkOutput
-   {
-       my($fk, $sk, $fv, $sv) = @_ ;
-       return
-           $fetch_key eq $fk && $store_key eq $sk && 
-	   $fetch_value eq $fv && $store_value eq $sv &&
-	   $_ eq 'original' ;
-   }
-   
-    ok 1, $db = tie %h, 'BerkeleyDB::Hash', 
-    		-Filename   => $Dfile, 
-	        -Flags      => DB_CREATE; 
-
-   $db->filter_fetch_key   (sub { $fetch_key = $_ }) ;
-   $db->filter_store_key   (sub { $store_key = $_ }) ;
-   $db->filter_fetch_value (sub { $fetch_value = $_}) ;
-   $db->filter_store_value (sub { $store_value = $_ }) ;
-
-   $_ = "original" ;
-
-   $h{"fred"} = "joe" ;
-   #                   fk   sk     fv   sv
-   ok 2, checkOutput( "", "fred", "", "joe") ;
-
-   ($fetch_key, $store_key, $fetch_value, $store_value) = ("") x 4 ;
-   ok 3, $h{"fred"} eq "joe";
-   #                   fk    sk     fv    sv
-   ok 4, checkOutput( "", "fred", "joe", "") ;
-
-   ($fetch_key, $store_key, $fetch_value, $store_value) = ("") x 4 ;
-   ok 5, $db->FIRSTKEY() eq "fred" ;
-   #                    fk     sk  fv  sv
-   ok 6, checkOutput( "fred", "", "", "") ;
-
-   # replace the filters, but remember the previous set
-   my ($old_fk) = $db->filter_fetch_key   
-   			(sub { $_ = uc $_ ; $fetch_key = $_ }) ;
-   my ($old_sk) = $db->filter_store_key   
-   			(sub { $_ = lc $_ ; $store_key = $_ }) ;
-   my ($old_fv) = $db->filter_fetch_value 
-   			(sub { $_ = "[$_]"; $fetch_value = $_ }) ;
-   my ($old_sv) = $db->filter_store_value 
-   			(sub { s/o/x/g; $store_value = $_ }) ;
-   
-   ($fetch_key, $store_key, $fetch_value, $store_value) = ("") x 4 ;
-   $h{"Fred"} = "Joe" ;
-   #                   fk   sk     fv    sv
-   ok 7, checkOutput( "", "fred", "", "Jxe") ;
-
-   ($fetch_key, $store_key, $fetch_value, $store_value) = ("") x 4 ;
-   ok 8, $h{"Fred"} eq "[Jxe]";
-   print "$h{'Fred'}\n";
-   #                   fk   sk     fv    sv
-   ok 9, checkOutput( "", "fred", "[Jxe]", "") ;
-
-   ($fetch_key, $store_key, $fetch_value, $store_value) = ("") x 4 ;
-   ok 10, $db->FIRSTKEY() eq "FRED" ;
-   #                   fk   sk     fv    sv
-   ok 11, checkOutput( "FRED", "", "", "") ;
-
-   # put the original filters back
-   $db->filter_fetch_key   ($old_fk);
-   $db->filter_store_key   ($old_sk);
-   $db->filter_fetch_value ($old_fv);
-   $db->filter_store_value ($old_sv);
-
-   ($fetch_key, $store_key, $fetch_value, $store_value) = ("") x 4 ;
-   $h{"fred"} = "joe" ;
-   ok 12, checkOutput( "", "fred", "", "joe") ;
-
-   ($fetch_key, $store_key, $fetch_value, $store_value) = ("") x 4 ;
-   ok 13, $h{"fred"} eq "joe";
-   ok 14, checkOutput( "", "fred", "joe", "") ;
-
-   ($fetch_key, $store_key, $fetch_value, $store_value) = ("") x 4 ;
-   ok 15, $db->FIRSTKEY() eq "fred" ;
-   ok 16, checkOutput( "fred", "", "", "") ;
-
-   # delete the filters
-   $db->filter_fetch_key   (undef);
-   $db->filter_store_key   (undef);
-   $db->filter_fetch_value (undef);
-   $db->filter_store_value (undef);
-
-   ($fetch_key, $store_key, $fetch_value, $store_value) = ("") x 4 ;
-   $h{"fred"} = "joe" ;
-   ok 17, checkOutput( "", "", "", "") ;
-
-   ($fetch_key, $store_key, $fetch_value, $store_value) = ("") x 4 ;
-   ok 18, $h{"fred"} eq "joe";
-   ok 19, checkOutput( "", "", "", "") ;
-
-   ($fetch_key, $store_key, $fetch_value, $store_value) = ("") x 4 ;
-   ok 20, $db->FIRSTKEY() eq "fred" ;
-   ok 21, checkOutput( "", "", "", "") ;
-
-   undef $db ;
-   untie %h;
-   unlink $Dfile;
-}
-
-{    
-    # DBM Filter with a closure
-
-    use strict ;
-    my (%h, $db) ;
-
-    unlink $Dfile;
-    ok 22, $db = tie %h, 'BerkeleyDB::Hash', 
-    		-Filename   => $Dfile, 
-	        -Flags      => DB_CREATE; 
-
-    my %result = () ;
-
-    sub Closure
-    {
-        my ($name) = @_ ;
-	my $count = 0 ;
-	my @kept = () ;
-
-	return sub { ++$count ; 
-		     push @kept, $_ ; 
-		     $result{$name} = "$name - $count: [@kept]" ;
-		   }
-    }
-
-    $db->filter_store_key(Closure("store key"))  ;
-    $db->filter_store_value(Closure("store value")) ;
-    $db->filter_fetch_key(Closure("fetch key")) ;
-    $db->filter_fetch_value(Closure("fetch value")) ;
-
-    $_ = "original" ;
-
-    $h{"fred"} = "joe" ;
-    ok 23, $result{"store key"} eq "store key - 1: [fred]" ;
-    ok 24, $result{"store value"} eq "store value - 1: [joe]" ;
-    ok 25, ! defined $result{"fetch key"}  ;
-    ok 26, ! defined $result{"fetch value"}  ;
-    ok 27, $_ eq "original"  ;
-
-    ok 28, $db->FIRSTKEY() eq "fred"  ;
-    ok 29, $result{"store key"} eq "store key - 1: [fred]" ;
-    ok 30, $result{"store value"} eq "store value - 1: [joe]" ;
-    ok 31, $result{"fetch key"} eq "fetch key - 1: [fred]" ;
-    ok 32, ! defined $result{"fetch value"}  ;
-    ok 33, $_ eq "original"  ;
-
-    $h{"jim"}  = "john" ;
-    ok 34, $result{"store key"} eq "store key - 2: [fred jim]" ;
-    ok 35, $result{"store value"} eq "store value - 2: [joe john]" ;
-    ok 36, $result{"fetch key"} eq "fetch key - 1: [fred]" ;
-    ok 37, ! defined $result{"fetch value"}  ;
-    ok 38, $_ eq "original"  ;
-
-    ok 39, $h{"fred"} eq "joe" ;
-    ok 40, $result{"store key"} eq "store key - 3: [fred jim fred]" ;
-    ok 41, $result{"store value"} eq "store value - 2: [joe john]" ;
-    ok 42, $result{"fetch key"} eq "fetch key - 1: [fred]" ;
-    ok 43, $result{"fetch value"} eq "fetch value - 1: [joe]" ;
-    ok 44, $_ eq "original" ;
-
-    undef $db ;
-    untie %h;
-    unlink $Dfile;
-}		
-
-{
-   # DBM Filter recursion detection
-   use strict ;
-   my (%h, $db) ;
-   unlink $Dfile;
-
-    ok 45, $db = tie %h, 'BerkeleyDB::Hash', 
-    		-Filename   => $Dfile, 
-	        -Flags      => DB_CREATE; 
-
-   $db->filter_store_key (sub { $_ = $h{$_} }) ;
-
-   eval '$h{1} = 1234' ;
-   ok 46, $@ =~ /^recursion detected in filter_store_key at/ ;
-   
-   undef $db ;
-   untie %h;
-   unlink $Dfile;
-}
-
-{
-   # Check that DBM Filter can cope with read-only $_
-
-   #use warnings ;
-   use strict ;
-   my (%h, $db) ;
-   unlink $Dfile;
-
-   ok 47, $db = tie %h, 'BerkeleyDB::Hash', 
-    		-Filename   => $Dfile, 
-	        -Flags      => DB_CREATE; 
-
-   $db->filter_fetch_key   (sub { }) ;
-   $db->filter_store_key   (sub { }) ;
-   $db->filter_fetch_value (sub { }) ;
-   $db->filter_store_value (sub { }) ;
-
-   $_ = "original" ;
-
-   $h{"fred"} = "joe" ;
-   ok(48, $h{"fred"} eq "joe");
-
-   eval { grep { $h{$_} } (1, 2, 3) };
-   ok (49, ! $@);
-
-
-   # delete the filters
-   $db->filter_fetch_key   (undef);
-   $db->filter_store_key   (undef);
-   $db->filter_fetch_value (undef);
-   $db->filter_store_value (undef);
-
-   $h{"fred"} = "joe" ;
-
-   ok(50, $h{"fred"} eq "joe");
-
-   ok(51, $db->FIRSTKEY() eq "fred") ;
-   
-   eval { grep { $h{$_} } (1, 2, 3) };
-   ok (52, ! $@);
-
-   undef $db ;
-   untie %h;
-   unlink $Dfile;
-}
-
-if(0)
-{
-    # Filter without tie
-    use strict ;
-    my (%h, $db) ;
-
-    unlink $Dfile;
-    ok 53, $db = tie %h, 'BerkeleyDB::Hash', 
-    		-Filename   => $Dfile, 
-	        -Flags      => DB_CREATE; 
-
-    my %result = () ;
-
-    sub INC { return ++ $_[0] }
-    sub DEC { return -- $_[0] }
-    #$db->filter_fetch_key   (sub { warn "FFK $_\n"; $_ = INC($_); warn "XX\n" }) ;
-    #$db->filter_store_key   (sub { warn "FSK $_\n"; $_ = DEC($_); warn "XX\n" }) ;
-    #$db->filter_fetch_value (sub { warn "FFV $_\n"; $_ = INC($_); warn "XX\n"}) ;
-    #$db->filter_store_value (sub { warn "FSV $_\n"; $_ = DEC($_); warn "XX\n" }) ;
-
-    $db->filter_fetch_key   (sub { warn "FFK $_\n"; $_ = pack("i", $_); warn "XX\n" }) ;
-    $db->filter_store_key   (sub { warn "FSK $_\n"; $_ = unpack("i", $_); warn "XX\n" }) ;
-    $db->filter_fetch_value (sub { warn "FFV $_\n"; $_ = pack("i", $_); warn "XX\n"}) ;
-    #$db->filter_store_value (sub { warn "FSV $_\n"; $_ = unpack("i", $_); warn "XX\n" }) ;
-
-    #$db->filter_fetch_key   (sub { ++ $_ }) ;
-    #$db->filter_store_key   (sub { -- $_ }) ;
-    #$db->filter_fetch_value (sub { ++ $_ }) ;
-    #$db->filter_store_value (sub { -- $_ }) ;
-
-    my ($k, $v) = (0,0);
-    ok 54, ! $db->db_put(3,5);
-    exit;
-    ok 55, ! $db->db_get(3, $v);
-    ok 56, $v == 5 ;
-
-    $h{4} = 7 ;
-    ok 57, $h{4} == 7;
-
-    $k = 10;
-    $v = 30;
-    $h{$k} = $v ;
-    ok 58, $k == 10;
-    ok 59, $v == 30;
-    ok 60, $h{$k} == 30;
-
-    $k = 3;
-    ok 61, ! $db->db_get($k, $v, DB_GET_BOTH);
-    ok 62, $k == 3 ;
-    ok 63, $v == 5 ;
-
-    my $cursor = $db->db_cursor();
-
-    my %tmp = ();
-    while ($cursor->c_get($k, $v, DB_NEXT) == 0)
-    {
-	$tmp{$k} = $v;
-    }
-
-    ok 64, keys %tmp == 3 ;
-    ok 65, $tmp{3} == 5;
-
-    undef $cursor ;
-    undef $db ;
-    untie %h;
-    unlink $Dfile;
-}
diff --git a/storage/bdb/perl/BerkeleyDB/t/hash.t b/storage/bdb/perl/BerkeleyDB/t/hash.t
deleted file mode 100644
index 25b8b20cd11..00000000000
--- a/storage/bdb/perl/BerkeleyDB/t/hash.t
+++ /dev/null
@@ -1,729 +0,0 @@
-#!./perl -w
-
-# ID: %I%, %G%   
-
-use strict ;
-
-BEGIN {
-    unless(grep /blib/, @INC) {
-        chdir 't' if -d 't';
-        @INC = '../lib' if -d '../lib';
-    }
-}
-
-use BerkeleyDB; 
-use t::util ;
-
-print "1..212\n";
-
-my $Dfile = "dbhash.tmp";
-my $Dfile2 = "dbhash2.tmp";
-my $Dfile3 = "dbhash3.tmp";
-unlink $Dfile;
-
-umask(0) ;
-
-
-# Check for invalid parameters
-{
-    # Check for invalid parameters
-    my $db ;
-    eval ' $db = new BerkeleyDB::Hash  -Stupid => 3 ; ' ;
-    ok 1, $@ =~ /unknown key value\(s\) Stupid/  ;
-
-    eval ' $db = new BerkeleyDB::Hash -Bad => 2, -Mode => 0345, -Stupid => 3; ' ;
-    ok 2, $@ =~ /unknown key value\(s\) (Bad |Stupid ){2}/  ;
-
-    eval ' $db = new BerkeleyDB::Hash -Env => 2 ' ;
-    ok 3, $@ =~ /^Env not of type BerkeleyDB::Env/ ;
-
-    eval ' $db = new BerkeleyDB::Hash -Txn => "fred" ' ;
-    ok 4, $@ =~ /^Txn not of type BerkeleyDB::Txn/ ;
-
-    my $obj = bless [], "main" ;
-    eval ' $db = new BerkeleyDB::Hash -Env => $obj ' ;
-    ok 5, $@ =~ /^Env not of type BerkeleyDB::Env/ ;
-}
-
-# Now check the interface to HASH
-
-{
-    my $lex = new LexFile $Dfile ;
-
-    ok 6, my $db = new BerkeleyDB::Hash -Filename => $Dfile, 
-				    -Flags    => DB_CREATE ;
-
-    # Add a k/v pair
-    my $value ;
-    my $status ;
-    ok 7, $db->db_put("some key", "some value") == 0  ;
-    ok 8, $db->status() == 0 ;
-    ok 9, $db->db_get("some key", $value) == 0 ;
-    ok 10, $value eq "some value" ;
-    ok 11, $db->db_put("key", "value") == 0  ;
-    ok 12, $db->db_get("key", $value) == 0 ;
-    ok 13, $value eq "value" ;
-    ok 14, $db->db_del("some key") == 0 ;
-    ok 15, ($status = $db->db_get("some key", $value)) == DB_NOTFOUND ;
-    ok 16, $status eq $DB_errors{'DB_NOTFOUND'} ;
-    ok 17, $db->status() == DB_NOTFOUND ;
-    ok 18, $db->status() eq $DB_errors{'DB_NOTFOUND'};
-
-    ok 19, $db->db_sync() == 0 ;
-
-    # Check NOOVERWRITE will make put fail when attempting to overwrite
-    # an existing record.
-
-    ok 20, $db->db_put( 'key', 'x', DB_NOOVERWRITE) == DB_KEYEXIST ;
-    ok 21, $db->status() eq $DB_errors{'DB_KEYEXIST'};
-    ok 22, $db->status() == DB_KEYEXIST ;
-
-    # check that the value of the key  has not been changed by the
-    # previous test
-    ok 23, $db->db_get("key", $value) == 0 ;
-    ok 24, $value eq "value" ;
-
-    # test DB_GET_BOTH
-    my ($k, $v) = ("key", "value") ;
-    ok 25, $db->db_get($k, $v, DB_GET_BOTH) == 0 ;
-
-    ($k, $v) = ("key", "fred") ;
-    ok 26, $db->db_get($k, $v, DB_GET_BOTH) == DB_NOTFOUND ;
-
-    ($k, $v) = ("another", "value") ;
-    ok 27, $db->db_get($k, $v, DB_GET_BOTH) == DB_NOTFOUND ;
-
-
-}
-
-{
-    # Check simple env works with a hash.
-    my $lex = new LexFile $Dfile ;
-
-    my $home = "./fred" ;
-    ok 28, my $lexD = new LexDir($home);
-
-    ok 29, my $env = new BerkeleyDB::Env -Flags => DB_CREATE| DB_INIT_MPOOL,@StdErrFile,
-    					 -Home  => $home ;
-    ok 30, my $db = new BerkeleyDB::Hash -Filename => $Dfile, 
-				    -Env      => $env,
-				    -Flags    => DB_CREATE ;
-
-    # Add a k/v pair
-    my $value ;
-    ok 31, $db->db_put("some key", "some value") == 0 ;
-    ok 32, $db->db_get("some key", $value) == 0 ;
-    ok 33, $value eq "some value" ;
-    undef $db ;
-    undef $env ;
-}
-
-
-{
-    # override default hash
-    my $lex = new LexFile $Dfile ;
-    my $value ;
-    $::count = 0 ;
-    ok 34, my $db = new BerkeleyDB::Hash -Filename => $Dfile, 
-				     -Hash     => sub {  ++$::count ; length $_[0] },
-				     -Flags    => DB_CREATE ;
-
-    ok 35, $db->db_put("some key", "some value") == 0 ;
-    ok 36, $db->db_get("some key", $value) == 0 ;
-    ok 37, $value eq "some value" ;
-    ok 38, $::count > 0 ;
-
-}
- 
-{
-    # cursors
-
-    my $lex = new LexFile $Dfile ;
-    my %hash ;
-    my ($k, $v) ;
-    ok 39, my $db = new BerkeleyDB::Hash -Filename => $Dfile, 
-				     -Flags    => DB_CREATE ;
-
-    # create some data
-    my %data =  (
-		"red"	=> 2,
-		"green"	=> "house",
-		"blue"	=> "sea",
-		) ;
-
-    my $ret = 0 ;
-    while (($k, $v) = each %data) {
-        $ret += $db->db_put($k, $v) ;
-    }
-    ok 40, $ret == 0 ;
-
-    # create the cursor
-    ok 41, my $cursor = $db->db_cursor() ;
-
-    $k = $v = "" ;
-    my %copy = %data ;
-    my $extras = 0 ;
-    # sequence forwards
-    while ($cursor->c_get($k, $v, DB_NEXT) == 0) {
-        if ( $copy{$k} eq $v ) 
-            { delete $copy{$k} }
-	else
-	    { ++ $extras }
-    }
-    ok 42, $cursor->status() == DB_NOTFOUND ;
-    ok 43, $cursor->status() eq $DB_errors{'DB_NOTFOUND'} ;
-    ok 44, keys %copy == 0 ;
-    ok 45, $extras == 0 ;
-
-    # sequence backwards
-    %copy = %data ;
-    $extras = 0 ;
-    my $status ;
-    for ( $status = $cursor->c_get($k, $v, DB_LAST) ;
-	  $status == 0 ;
-    	  $status = $cursor->c_get($k, $v, DB_PREV)) {
-        if ( $copy{$k} eq $v ) 
-            { delete $copy{$k} }
-	else
-	    { ++ $extras }
-    }
-    ok 46, $status == DB_NOTFOUND ;
-    ok 47, $status eq $DB_errors{'DB_NOTFOUND'} ;
-    ok 48, $cursor->status() == $status ;
-    ok 49, $cursor->status() eq $status ;
-    ok 50, keys %copy == 0 ;
-    ok 51, $extras == 0 ;
-
-    ($k, $v) = ("green", "house") ;
-    ok 52, $cursor->c_get($k, $v, DB_GET_BOTH) == 0 ;
-
-    ($k, $v) = ("green", "door") ;
-    ok 53, $cursor->c_get($k, $v, DB_GET_BOTH) == DB_NOTFOUND ;
-
-    ($k, $v) = ("black", "house") ;
-    ok 54, $cursor->c_get($k, $v, DB_GET_BOTH) == DB_NOTFOUND ;
-    
-}
- 
-{
-    # Tied Hash interface
-
-    my $lex = new LexFile $Dfile ;
-    my %hash ;
-    ok 55, tie %hash, 'BerkeleyDB::Hash', -Filename => $Dfile,
-                                      -Flags    => DB_CREATE ;
-
-    # check "each" with an empty database
-    my $count = 0 ;
-    while (my ($k, $v) = each %hash) {
-	++ $count ;
-    }
-    ok 56, (tied %hash)->status() == DB_NOTFOUND ;
-    ok 57, $count == 0 ;
-
-    # Add a k/v pair
-    my $value ;
-    $hash{"some key"} = "some value";
-    ok 58, (tied %hash)->status() == 0 ;
-    ok 59, $hash{"some key"} eq "some value";
-    ok 60, defined $hash{"some key"} ;
-    ok 61, (tied %hash)->status() == 0 ;
-    ok 62, exists $hash{"some key"} ;
-    ok 63, !defined $hash{"jimmy"} ;
-    ok 64, (tied %hash)->status() == DB_NOTFOUND ;
-    ok 65, !exists $hash{"jimmy"} ;
-    ok 66, (tied %hash)->status() == DB_NOTFOUND ;
-
-    delete $hash{"some key"} ;
-    ok 67, (tied %hash)->status() == 0 ;
-    ok 68, ! defined $hash{"some key"} ;
-    ok 69, (tied %hash)->status() == DB_NOTFOUND ;
-    ok 70, ! exists $hash{"some key"} ;
-    ok 71, (tied %hash)->status() == DB_NOTFOUND ;
-
-    $hash{1} = 2 ;
-    $hash{10} = 20 ;
-    $hash{1000} = 2000 ;
-
-    my ($keys, $values) = (0,0);
-    $count = 0 ;
-    while (my ($k, $v) = each %hash) {
-        $keys += $k ;
-	$values += $v ;
-	++ $count ;
-    }
-    ok 72, $count == 3 ;
-    ok 73, $keys == 1011 ;
-    ok 74, $values == 2022 ;
-
-    # now clear the hash
-    %hash = () ;
-    ok 75, keys %hash == 0 ;
-
-    untie %hash ;
-}
-
-{
-    # in-memory file
-
-    my $lex = new LexFile $Dfile ;
-    my %hash ;
-    my $fd ;
-    my $value ;
-    ok 76, my $db = tie %hash, 'BerkeleyDB::Hash' ;
-
-    ok 77, $db->db_put("some key", "some value") == 0  ;
-    ok 78, $db->db_get("some key", $value) == 0 ;
-    ok 79, $value eq "some value" ;
-
-    undef $db ;
-    untie %hash ;
-}
- 
-{
-    # partial
-    # check works via API
-
-    my $lex = new LexFile $Dfile ;
-    my %hash ;
-    my $value ;
-    ok 80, my $db = tie %hash, 'BerkeleyDB::Hash', -Filename => $Dfile,
-                                      	       -Flags    => DB_CREATE ;
-
-    # create some data
-    my %data =  (
-		"red"	=> "boat",
-		"green"	=> "house",
-		"blue"	=> "sea",
-		) ;
-
-    my $ret = 0 ;
-    while (my ($k, $v) = each %data) {
-        $ret += $db->db_put($k, $v) ;
-    }
-    ok 81, $ret == 0 ;
-
-
-    # do a partial get
-    my($pon, $off, $len) = $db->partial_set(0,2) ;
-    ok 82, $pon == 0 && $off == 0 && $len == 0 ;
-    ok 83, ( $db->db_get("red", $value) == 0) && $value eq "bo" ;
-    ok 84, ( $db->db_get("green", $value) == 0) && $value eq "ho" ;
-    ok 85, ( $db->db_get("blue", $value) == 0) && $value eq "se" ;
-
-    # do a partial get, off end of data
-    ($pon, $off, $len) = $db->partial_set(3,2) ;
-    ok 86, $pon ;
-    ok 87, $off == 0 ;
-    ok 88, $len == 2 ;
-    ok 89, $db->db_get("red", $value) == 0 && $value eq "t" ;
-    ok 90, $db->db_get("green", $value) == 0 && $value eq "se" ;
-    ok 91, $db->db_get("blue", $value) == 0 && $value eq "" ;
-
-    # switch of partial mode
-    ($pon, $off, $len) = $db->partial_clear() ;
-    ok 92, $pon ;
-    ok 93, $off == 3 ;
-    ok 94, $len == 2 ;
-    ok 95, $db->db_get("red", $value) == 0 && $value eq "boat" ;
-    ok 96, $db->db_get("green", $value) == 0 && $value eq "house" ;
-    ok 97, $db->db_get("blue", $value) == 0 && $value eq "sea" ;
-
-    # now partial put
-    ($pon, $off, $len) = $db->partial_set(0,2) ;
-    ok 98, ! $pon ;
-    ok 99, $off == 0 ;
-    ok 100, $len == 0 ;
-    ok 101, $db->db_put("red", "") == 0 ;
-    ok 102, $db->db_put("green", "AB") == 0 ;
-    ok 103, $db->db_put("blue", "XYZ") == 0 ;
-    ok 104, $db->db_put("new", "KLM") == 0 ;
-
-    $db->partial_clear() ;
-    ok 105, $db->db_get("red", $value) == 0 && $value eq "at" ;
-    ok 106, $db->db_get("green", $value) == 0 && $value eq "ABuse" ;
-    ok 107, $db->db_get("blue", $value) == 0 && $value eq "XYZa" ;
-    ok 108, $db->db_get("new", $value) == 0 && $value eq "KLM" ;
-
-    # now partial put
-    $db->partial_set(3,2) ;
-    ok 109, $db->db_put("red", "PPP") == 0 ;
-    ok 110, $db->db_put("green", "Q") == 0 ;
-    ok 111, $db->db_put("blue", "XYZ") == 0 ;
-    ok 112, $db->db_put("new", "--") == 0 ;
-
-    ($pon, $off, $len) = $db->partial_clear() ;
-    ok 113, $pon ;
-    ok 114, $off == 3 ;
-    ok 115, $len == 2 ;
-    ok 116, $db->db_get("red", $value) == 0 && $value eq "at\0PPP" ;
-    ok 117, $db->db_get("green", $value) == 0 && $value eq "ABuQ" ;
-    ok 118, $db->db_get("blue", $value) == 0 && $value eq "XYZXYZ" ;
-    ok 119, $db->db_get("new", $value) == 0 && $value eq "KLM--" ;
-}
-
-{
-    # partial
-    # check works via tied hash 
-
-    my $lex = new LexFile $Dfile ;
-    my %hash ;
-    my $value ;
-    ok 120, my $db = tie %hash, 'BerkeleyDB::Hash', -Filename => $Dfile,
-                                      	       -Flags    => DB_CREATE ;
-
-    # create some data
-    my %data =  (
-		"red"	=> "boat",
-		"green"	=> "house",
-		"blue"	=> "sea",
-		) ;
-
-    while (my ($k, $v) = each %data) {
-	$hash{$k} = $v ;
-    }
-
-
-    # do a partial get
-    $db->partial_set(0,2) ;
-    ok 121, $hash{"red"} eq "bo" ;
-    ok 122, $hash{"green"} eq "ho" ;
-    ok 123, $hash{"blue"}  eq "se" ;
-
-    # do a partial get, off end of data
-    $db->partial_set(3,2) ;
-    ok 124, $hash{"red"} eq "t" ;
-    ok 125, $hash{"green"} eq "se" ;
-    ok 126, $hash{"blue"} eq "" ;
-
-    # switch of partial mode
-    $db->partial_clear() ;
-    ok 127, $hash{"red"} eq "boat" ;
-    ok 128, $hash{"green"} eq "house" ;
-    ok 129, $hash{"blue"} eq "sea" ;
-
-    # now partial put
-    $db->partial_set(0,2) ;
-    ok 130, $hash{"red"} = "" ;
-    ok 131, $hash{"green"} = "AB" ;
-    ok 132, $hash{"blue"} = "XYZ" ;
-    ok 133, $hash{"new"} = "KLM" ;
-
-    $db->partial_clear() ;
-    ok 134, $hash{"red"} eq "at" ;
-    ok 135, $hash{"green"} eq "ABuse" ;
-    ok 136, $hash{"blue"} eq "XYZa" ;
-    ok 137, $hash{"new"} eq "KLM" ;
-
-    # now partial put
-    $db->partial_set(3,2) ;
-    ok 138, $hash{"red"} = "PPP" ;
-    ok 139, $hash{"green"} = "Q" ;
-    ok 140, $hash{"blue"} = "XYZ" ;
-    ok 141, $hash{"new"} = "TU" ;
-
-    $db->partial_clear() ;
-    ok 142, $hash{"red"} eq "at\0PPP" ;
-    ok 143, $hash{"green"} eq "ABuQ" ;
-    ok 144, $hash{"blue"} eq "XYZXYZ" ;
-    ok 145, $hash{"new"} eq "KLMTU" ;
-}
-
-{
-    # transaction
-
-    my $lex = new LexFile $Dfile ;
-    my %hash ;
-    my $value ;
-
-    my $home = "./fred" ;
-    ok 146, my $lexD = new LexDir($home);
-    ok 147, my $env = new BerkeleyDB::Env -Home => $home,@StdErrFile,
-				     -Flags => DB_CREATE|DB_INIT_TXN|
-					  	DB_INIT_MPOOL|DB_INIT_LOCK ;
-    ok 148, my $txn = $env->txn_begin() ;
-    ok 149, my $db1 = tie %hash, 'BerkeleyDB::Hash', -Filename => $Dfile,
-                                      	       	-Flags     => DB_CREATE ,
-					       	-Env 	   => $env,
-					    	-Txn	   => $txn  ;
-
-    
-    ok 150, $txn->txn_commit() == 0 ;
-    ok 151, $txn = $env->txn_begin() ;
-    $db1->Txn($txn);
-    # create some data
-    my %data =  (
-		"red"	=> "boat",
-		"green"	=> "house",
-		"blue"	=> "sea",
-		) ;
-
-    my $ret = 0 ;
-    while (my ($k, $v) = each %data) {
-        $ret += $db1->db_put($k, $v) ;
-    }
-    ok 152, $ret == 0 ;
-
-    # should be able to see all the records
-
-    ok 153, my $cursor = $db1->db_cursor() ;
-    my ($k, $v) = ("", "") ;
-    my $count = 0 ;
-    # sequence forwards
-    while ($cursor->c_get($k, $v, DB_NEXT) == 0) {
-        ++ $count ;
-    }
-    ok 154, $count == 3 ;
-    undef $cursor ;
-
-    # now abort the transaction
-    ok 155, $txn->txn_abort() == 0 ;
-
-    # there shouldn't be any records in the database
-    $count = 0 ;
-    # sequence forwards
-    ok 156, $cursor = $db1->db_cursor() ;
-    while ($cursor->c_get($k, $v, DB_NEXT) == 0) {
-        ++ $count ;
-    }
-    ok 157, $count == 0 ;
-
-    undef $txn ;
-    undef $cursor ;
-    undef $db1 ;
-    undef $env ;
-    untie %hash ;
-}
-
-
-{
-    # DB_DUP
-
-    my $lex = new LexFile $Dfile ;
-    my %hash ;
-    ok 158, my $db = tie %hash, 'BerkeleyDB::Hash', -Filename => $Dfile,
-				      -Property  => DB_DUP,
-                                      -Flags    => DB_CREATE ;
-
-    $hash{'Wall'} = 'Larry' ;
-    $hash{'Wall'} = 'Stone' ;
-    $hash{'Smith'} = 'John' ;
-    $hash{'Wall'} = 'Brick' ;
-    $hash{'Wall'} = 'Brick' ;
-    $hash{'mouse'} = 'mickey' ;
-
-    ok 159, keys %hash == 6 ;
-
-    # create a cursor
-    ok 160, my $cursor = $db->db_cursor() ;
-
-    my $key = "Wall" ;
-    my $value ;
-    ok 161, $cursor->c_get($key, $value, DB_SET) == 0 ;
-    ok 162, $key eq "Wall" && $value eq "Larry" ;
-    ok 163, $cursor->c_get($key, $value, DB_NEXT) == 0 ;
-    ok 164, $key eq "Wall" && $value eq "Stone" ;
-    ok 165, $cursor->c_get($key, $value, DB_NEXT) == 0 ;
-    ok 166, $key eq "Wall" && $value eq "Brick" ;
-    ok 167, $cursor->c_get($key, $value, DB_NEXT) == 0 ;
-    ok 168, $key eq "Wall" && $value eq "Brick" ;
-
-    #my $ref = $db->db_stat() ; 
-    #ok 143, $ref->{bt_flags} | DB_DUP ;
-
-    # test DB_DUP_NEXT
-    my ($k, $v) = ("Wall", "") ;
-    ok 169, $cursor->c_get($k, $v, DB_SET) == 0 ;
-    ok 170, $k eq "Wall" && $v eq "Larry" ;
-    ok 171, $cursor->c_get($k, $v, DB_NEXT_DUP) == 0 ;
-    ok 172, $k eq "Wall" && $v eq "Stone" ;
-    ok 173, $cursor->c_get($k, $v, DB_NEXT_DUP) == 0 ;
-    ok 174, $k eq "Wall" && $v eq "Brick" ;
-    ok 175, $cursor->c_get($k, $v, DB_NEXT_DUP) == 0 ;
-    ok 176, $k eq "Wall" && $v eq "Brick" ;
-    ok 177, $cursor->c_get($k, $v, DB_NEXT_DUP) == DB_NOTFOUND ;
-    
-
-    undef $db ;
-    undef $cursor ;
-    untie %hash ;
-
-}
-
-{
-    # DB_DUP & DupCompare
-    my $lex = new LexFile $Dfile, $Dfile2;
-    my ($key, $value) ;
-    my (%h, %g) ;
-    my @Keys   = qw( 0123 9 12 -1234 9 987654321 9 def  ) ; 
-    my @Values = qw( 1    11 3   dd   x abc      2 0    ) ; 
-
-    ok 178, tie %h, "BerkeleyDB::Hash", -Filename => $Dfile, 
-				     -DupCompare   => sub { $_[0] cmp $_[1] },
-				     -Property  => DB_DUP|DB_DUPSORT,
-				     -Flags    => DB_CREATE ;
-
-    ok 179, tie %g, 'BerkeleyDB::Hash', -Filename => $Dfile2, 
-				     -DupCompare   => sub { $_[0] <=> $_[1] },
-				     -Property  => DB_DUP|DB_DUPSORT,
-				     -Flags    => DB_CREATE ;
-
-    foreach (@Keys) {
-        local $^W = 0 ;
-	my $value = shift @Values ;
-        $h{$_} = $value ; 
-        $g{$_} = $value ;
-    }
-
-    ok 180, my $cursor = (tied %h)->db_cursor() ;
-    $key = 9 ; $value = "";
-    ok 181, $cursor->c_get($key, $value, DB_SET) == 0 ;
-    ok 182, $key == 9 && $value eq 11 ;
-    ok 183, $cursor->c_get($key, $value, DB_NEXT) == 0 ;
-    ok 184, $key == 9 && $value == 2 ;
-    ok 185, $cursor->c_get($key, $value, DB_NEXT) == 0 ;
-    ok 186, $key == 9 && $value eq "x" ;
-
-    $cursor = (tied %g)->db_cursor() ;
-    $key = 9 ;
-    ok 187, $cursor->c_get($key, $value, DB_SET) == 0 ;
-    ok 188, $key == 9 && $value eq "x" ;
-    ok 189, $cursor->c_get($key, $value, DB_NEXT) == 0 ;
-    ok 190, $key == 9 && $value == 2 ;
-    ok 191, $cursor->c_get($key, $value, DB_NEXT) == 0 ;
-    ok 192, $key == 9 && $value  == 11 ;
-
-
-}
-
-{
-    # get_dup etc
-    my $lex = new LexFile $Dfile;
-    my %hh ;
-
-    ok 193, my $YY = tie %hh, "BerkeleyDB::Hash", -Filename => $Dfile, 
-				     -DupCompare   => sub { $_[0] cmp $_[1] },
-				     -Property  => DB_DUP,
-				     -Flags    => DB_CREATE ;
-
-    $hh{'Wall'} = 'Larry' ;
-    $hh{'Wall'} = 'Stone' ; # Note the duplicate key
-    $hh{'Wall'} = 'Brick' ; # Note the duplicate key
-    $hh{'Smith'} = 'John' ;
-    $hh{'mouse'} = 'mickey' ;
-    
-    # first work in scalar context
-    ok 194, scalar $YY->get_dup('Unknown') == 0 ;
-    ok 195, scalar $YY->get_dup('Smith') == 1 ;
-    ok 196, scalar $YY->get_dup('Wall') == 3 ;
-    
-    # now in list context
-    my @unknown = $YY->get_dup('Unknown') ;
-    ok 197, "@unknown" eq "" ;
-    
-    my @smith = $YY->get_dup('Smith') ;
-    ok 198, "@smith" eq "John" ;
-    
-    {
-        my @wall = $YY->get_dup('Wall') ;
-        my %wall ;
-        @wall{@wall} = @wall ;
-        ok 199, (@wall == 3 && $wall{'Larry'} 
-			&& $wall{'Stone'} && $wall{'Brick'});
-    }
-    
-    # hash
-    my %unknown = $YY->get_dup('Unknown', 1) ;
-    ok 200, keys %unknown == 0 ;
-    
-    my %smith = $YY->get_dup('Smith', 1) ;
-    ok 201, keys %smith == 1 && $smith{'John'} ;
-    
-    my %wall = $YY->get_dup('Wall', 1) ;
-    ok 202, keys %wall == 3 && $wall{'Larry'} == 1 && $wall{'Stone'} == 1 
-    		&& $wall{'Brick'} == 1 ;
-    
-    undef $YY ;
-    untie %hh ;
-
-}
-
-{
-   # sub-class test
-
-   package Another ;
-
-   use strict ;
-
-   open(FILE, ">SubDB.pm") or die "Cannot open SubDB.pm: $!\n" ;
-   print FILE <<'EOM' ;
-
-   package SubDB ;
-
-   use strict ;
-   use vars qw( @ISA @EXPORT) ;
-
-   require Exporter ;
-   use BerkeleyDB;
-   @ISA=qw(BerkeleyDB BerkeleyDB::Hash);
-   @EXPORT = @BerkeleyDB::EXPORT ;
-
-   sub db_put { 
-	my $self = shift ;
-        my $key = shift ;
-        my $value = shift ;
-        $self->SUPER::db_put($key, $value * 3) ;
-   }
-
-   sub db_get { 
-	my $self = shift ;
-        $self->SUPER::db_get($_[0], $_[1]) ;
-	$_[1] -= 2 ;
-   }
-
-   sub A_new_method
-   {
-	my $self = shift ;
-        my $key = shift ;
-        my $value = $self->FETCH($key) ;
-	return "[[$value]]" ;
-   }
-
-   1 ;
-EOM
-
-    close FILE ;
-
-    BEGIN { push @INC, '.'; }    
-    eval 'use SubDB ; ';
-    main::ok 203, $@ eq "" ;
-    my %h ;
-    my $X ;
-    eval '
-	$X = tie(%h, "SubDB", -Filename => "dbhash.tmp", 
-			-Flags => DB_CREATE,
-			-Mode => 0640 );
-	' ;
-
-    main::ok 204, $@ eq "" ;
-
-    my $ret = eval '$h{"fred"} = 3 ; return $h{"fred"} ' ;
-    main::ok 205, $@ eq "" ;
-    main::ok 206, $ret == 7 ;
-
-    my $value = 0;
-    $ret = eval '$X->db_put("joe", 4) ; $X->db_get("joe", $value) ; return $value' ;
-    main::ok 207, $@ eq "" ;
-    main::ok 208, $ret == 10 ;
-
-    $ret = eval ' DB_NEXT eq main::DB_NEXT ' ;
-    main::ok 209, $@ eq ""  ;
-    main::ok 210, $ret == 1 ;
-
-    $ret = eval '$X->A_new_method("joe") ' ;
-    main::ok 211, $@ eq "" ;
-    main::ok 212, $ret eq "[[10]]" ;
-
-    unlink "SubDB.pm", "dbhash.tmp" ;
-
-}
diff --git a/storage/bdb/perl/BerkeleyDB/t/join.t b/storage/bdb/perl/BerkeleyDB/t/join.t
deleted file mode 100644
index ae7942f2c6b..00000000000
--- a/storage/bdb/perl/BerkeleyDB/t/join.t
+++ /dev/null
@@ -1,242 +0,0 @@
-#!./perl -w
-
-# ID: %I%, %G%   
-
-use strict ;
-
-BEGIN {
-    unless(grep /blib/, @INC) {
-        chdir 't' if -d 't';
-        @INC = '../lib' if -d '../lib';
-    }
-}
-
-use BerkeleyDB; 
-use t::util ;
-
-if ($BerkeleyDB::db_ver < 2.005002)
-{
-    print "1..0 # Skip: join needs Berkeley DB 2.5.2 or later\n" ;
-    exit 0 ;
-}
-
-
-print "1..42\n";
-
-my $Dfile1 = "dbhash1.tmp";
-my $Dfile2 = "dbhash2.tmp";
-my $Dfile3 = "dbhash3.tmp";
-unlink $Dfile1, $Dfile2, $Dfile3 ;
-
-umask(0) ;
-
-{
-    # error cases
-    my $lex = new LexFile $Dfile1, $Dfile2, $Dfile3 ;
-    my %hash1 ;
-    my $value ;
-    my $status ;
-    my $cursor ;
-
-    ok 1, my $db1 = tie %hash1, 'BerkeleyDB::Hash', 
-				-Filename => $Dfile1,
-                               	-Flags     => DB_CREATE,
-                                -DupCompare   => sub { $_[0] lt $_[1] },
-                                -Property  => DB_DUP|DB_DUPSORT ;
-
-    # no cursors supplied
-    eval '$cursor = $db1->db_join() ;' ;
-    ok 2, $@ =~ /Usage: \$db->BerkeleyDB::db_join\Q([cursors], flags=0)/;
-
-    # empty list
-    eval '$cursor = $db1->db_join([]) ;' ;
-    ok 3, $@ =~ /db_join: No cursors in parameter list/;
-
-    # cursor list, isn not a []
-    eval '$cursor = $db1->db_join({}) ;' ;
-    ok 4, $@ =~ /db_join: first parameter is not an array reference/;
-
-    eval '$cursor = $db1->db_join(\1) ;' ;
-    ok 5, $@ =~ /db_join: first parameter is not an array reference/;
-
-    my ($a, $b) = ("a", "b");
-    $a = bless [], "fred";
-    $b = bless [], "fred";
-    eval '$cursor = $db1->db_join($a, $b) ;' ;
-    ok 6, $@ =~ /db_join: first parameter is not an array reference/;
-
-}
-
-{
-    # test a 2-way & 3-way join
-
-    my $lex = new LexFile $Dfile1, $Dfile2, $Dfile3 ;
-    my %hash1 ;
-    my %hash2 ;
-    my %hash3 ;
-    my $value ;
-    my $status ;
-
-    my $home = "./fred7" ;
-    rmtree $home;
-    ok 7, ! -d $home;
-    ok 8, my $lexD = new LexDir($home);
-    ok 9, my $env = new BerkeleyDB::Env -Home => $home, @StdErrFile,
-				     -Flags => DB_CREATE|DB_INIT_TXN
-					  	|DB_INIT_MPOOL;
-					  	#|DB_INIT_MPOOL| DB_INIT_LOCK;
-    ok 10, my $txn = $env->txn_begin() ;
-    ok 11, my $db1 = tie %hash1, 'BerkeleyDB::Hash', 
-				-Filename => $Dfile1,
-                               	-Flags     => DB_CREATE,
-                                -DupCompare   => sub { $_[0] cmp $_[1] },
-                                -Property  => DB_DUP|DB_DUPSORT,
-			       	-Env 	   => $env,
-			    	-Txn	   => $txn  ;
-				;
-
-    ok 12, my $db2 = tie %hash2, 'BerkeleyDB::Hash', 
-				-Filename => $Dfile2,
-                               	-Flags     => DB_CREATE,
-                                -DupCompare   => sub { $_[0] cmp $_[1] },
-                                -Property  => DB_DUP|DB_DUPSORT,
-			       	-Env 	   => $env,
-			    	-Txn	   => $txn  ;
-
-    ok 13, my $db3 = tie %hash3, 'BerkeleyDB::Btree', 
-				-Filename => $Dfile3,
-                               	-Flags     => DB_CREATE,
-                                -DupCompare   => sub { $_[0] cmp $_[1] },
-                                -Property  => DB_DUP|DB_DUPSORT,
-			       	-Env 	   => $env,
-			    	-Txn	   => $txn  ;
-
-    
-    ok 14, addData($db1, qw( 	apple		Convenience
-    				peach		Shopway
-				pear		Farmer
-				raspberry	Shopway
-				strawberry	Shopway
-				gooseberry	Farmer
-				blueberry	Farmer
-    			));
-
-    ok 15, addData($db2, qw( 	red	apple
-    				red	raspberry
-    				red	strawberry
-				yellow	peach
-				yellow	pear
-				green	gooseberry
-				blue	blueberry)) ;
-
-    ok 16, addData($db3, qw( 	expensive	apple
-    				reasonable	raspberry
-    				expensive	strawberry
-				reasonable	peach
-				reasonable	pear
-				expensive	gooseberry
-				reasonable	blueberry)) ;
-
-    ok 17, my $cursor2 = $db2->db_cursor() ;
-    my $k = "red" ;
-    my $v = "" ;
-    ok 18, $cursor2->c_get($k, $v, DB_SET) == 0 ;
-
-    # Two way Join
-    ok 19, my $cursor1 = $db1->db_join([$cursor2]) ;
-
-    my %expected = qw( apple Convenience
-			raspberry Shopway
-			strawberry Shopway
-		) ;
-
-    # sequence forwards
-    while ($cursor1->c_get($k, $v) == 0) {
-	delete $expected{$k} 
-	    if defined $expected{$k} && $expected{$k} eq $v ;
-	#print "[$k] [$v]\n" ;
-    }
-    ok 20, keys %expected == 0 ;
-    ok 21, $cursor1->status() == DB_NOTFOUND ;
-
-    # Three way Join
-    ok 22, $cursor2 = $db2->db_cursor() ;
-    $k = "red" ;
-    $v = "" ;
-    ok 23, $cursor2->c_get($k, $v, DB_SET) == 0 ;
-
-    ok 24, my $cursor3 = $db3->db_cursor() ;
-    $k = "expensive" ;
-    $v = "" ;
-    ok 25, $cursor3->c_get($k, $v, DB_SET) == 0 ;
-    ok 26, $cursor1 = $db1->db_join([$cursor2, $cursor3]) ;
-
-    %expected = qw( apple Convenience
-			strawberry Shopway
-		) ;
-
-    # sequence forwards
-    while ($cursor1->c_get($k, $v) == 0) {
-	delete $expected{$k} 
-	    if defined $expected{$k} && $expected{$k} eq $v ;
-	#print "[$k] [$v]\n" ;
-    }
-    ok 27, keys %expected == 0 ;
-    ok 28, $cursor1->status() == DB_NOTFOUND ;
-
-    # test DB_JOIN_ITEM
-    # #################
-    ok 29, $cursor2 = $db2->db_cursor() ;
-    $k = "red" ;
-    $v = "" ;
-    ok 30, $cursor2->c_get($k, $v, DB_SET) == 0 ;
- 
-    ok 31, $cursor3 = $db3->db_cursor() ;
-    $k = "expensive" ;
-    $v = "" ;
-    ok 32, $cursor3->c_get($k, $v, DB_SET) == 0 ;
-    ok 33, $cursor1 = $db1->db_join([$cursor2, $cursor3]) ;
- 
-    %expected = qw( apple 1
-                        strawberry 1
-                ) ;
- 
-    # sequence forwards
-    $k = "" ;
-    $v = "" ;
-    while ($cursor1->c_get($k, $v, DB_JOIN_ITEM) == 0) {
-        delete $expected{$k}
-            if defined $expected{$k} ;
-        #print "[$k]\n" ;
-    }
-    ok 34, keys %expected == 0 ;
-    ok 35, $cursor1->status() == DB_NOTFOUND ;
-
-    ok 36, $cursor1->c_close() == 0 ;
-    ok 37, $cursor2->c_close() == 0 ;
-    ok 38, $cursor3->c_close() == 0 ;
-
-    ok 39, ($status = $txn->txn_commit()) == 0;
-
-    undef $txn ;
-
-    ok 40, my $cursor1a = $db1->db_cursor() ;
-    eval { $cursor1 = $db1->db_join([$cursor1a]) };
-    ok 41, $@ =~ /BerkeleyDB Aborting: attempted to do a self-join at/;
-    eval { $cursor1 = $db1->db_join([$cursor1]) } ;
-    ok 42, $@ =~ /BerkeleyDB Aborting: attempted to do a self-join at/;
-
-    undef $cursor1a;
-    #undef $cursor1;
-    #undef $cursor2;
-    #undef $cursor3;
-    undef $db1 ;
-    undef $db2 ;
-    undef $db3 ;
-    undef $env ;
-    untie %hash1 ;
-    untie %hash2 ;
-    untie %hash3 ;
-}
-
-print "# at the end\n";
diff --git a/storage/bdb/perl/BerkeleyDB/t/mldbm.t b/storage/bdb/perl/BerkeleyDB/t/mldbm.t
deleted file mode 100644
index 215d34f4265..00000000000
--- a/storage/bdb/perl/BerkeleyDB/t/mldbm.t
+++ /dev/null
@@ -1,107 +0,0 @@
-#!/usr/bin/perl -w
-
-use strict ;
-
-BEGIN 
-{
-    if ($] < 5.005) {
-	print "1..0 # Skip: this is Perl $], skipping test\n" ;
-	exit 0 ;
-    }
-
-    eval { require Data::Dumper ; };
-    if ($@) {
-	print "1..0 # Skip: Data::Dumper is not installed on this system.\n";
-	exit 0 ;
-    }
-    if ($Data::Dumper::VERSION < 2.08) {
-	print "1..0 # Skip: Data::Dumper 2.08 or better required (found $Data::Dumper::VERSION).\n";
-	exit 0 ;
-    }
-    eval { require MLDBM ; };
-    if ($@) {
-	print "1..0 # Skip: MLDBM is not installed on this system.\n";
-	exit 0 ;
-    }
-}
-
-use t::util ;
-
-print "1..12\n";
-
-{
-    package BTREE ;
-    
-    use BerkeleyDB ;
-    use MLDBM qw(BerkeleyDB::Btree) ; 
-    use Data::Dumper;
-    
-    my $filename = "";
-    my $lex = new LexFile $filename;
-    
-    $MLDBM::UseDB = "BerkeleyDB::Btree" ;
-    my %o ;
-    my $db = tie %o, 'MLDBM', -Filename => $filename,
-    		     -Flags    => DB_CREATE
-    		or die $!;
-    ::ok 1, $db ;
-    ::ok 2, $db->type() == DB_BTREE ;
-    
-    my $c = [\'c'];
-    my $b = {};
-    my $a = [1, $b, $c];
-    $b->{a} = $a;
-    $b->{b} = $a->[1];
-    $b->{c} = $a->[2];
-    @o{qw(a b c)} = ($a, $b, $c);
-    $o{d} = "{once upon a time}";
-    $o{e} = 1024;
-    $o{f} = 1024.1024;
-    
-    my $struct = [@o{qw(a b c)}];
-    ::ok 3, ::_compare([$a, $b, $c], $struct);
-    ::ok 4, $o{d} eq "{once upon a time}" ;
-    ::ok 5, $o{e} == 1024 ;
-    ::ok 6, $o{f} eq 1024.1024 ;
-    
-}
-
-{
-
-    package HASH ;
-
-    use BerkeleyDB ;
-    use MLDBM qw(BerkeleyDB::Hash) ; 
-    use Data::Dumper;
-
-    my $filename = "";
-    my $lex = new LexFile $filename;
-
-    unlink $filename ;
-    $MLDBM::UseDB = "BerkeleyDB::Hash" ;
-    my %o ;
-    my $db = tie %o, 'MLDBM', -Filename => $filename,
-		         -Flags    => DB_CREATE
-		    or die $!;
-    ::ok 7, $db ;
-    ::ok 8, $db->type() == DB_HASH ;
-
-
-    my $c = [\'c'];
-    my $b = {};
-    my $a = [1, $b, $c];
-    $b->{a} = $a;
-    $b->{b} = $a->[1];
-    $b->{c} = $a->[2];
-    @o{qw(a b c)} = ($a, $b, $c);
-    $o{d} = "{once upon a time}";
-    $o{e} = 1024;
-    $o{f} = 1024.1024;
-
-    my $struct = [@o{qw(a b c)}];
-    ::ok 9, ::_compare([$a, $b, $c], $struct);
-    ::ok 10, $o{d} eq "{once upon a time}" ;
-    ::ok 11, $o{e} == 1024 ;
-    ::ok 12, $o{f} eq 1024.1024 ;
-
-}
diff --git a/storage/bdb/perl/BerkeleyDB/t/pod.t b/storage/bdb/perl/BerkeleyDB/t/pod.t
deleted file mode 100644
index 230df4bd9c3..00000000000
--- a/storage/bdb/perl/BerkeleyDB/t/pod.t
+++ /dev/null
@@ -1,18 +0,0 @@
-eval " use Test::More " ;
-
-if ($@)
-{
-    print "1..0 # Skip: Test::More required for testing POD\n" ;
-    exit 0;
-}
-
-eval "use Test::Pod 1.00";
-
-if ($@)
-{
-    print "1..0 # Skip: Test::Pod 1.00 required for testing POD\n" ;
-    exit 0;
-}
-
-all_pod_files_ok();
-
diff --git a/storage/bdb/perl/BerkeleyDB/t/queue.t b/storage/bdb/perl/BerkeleyDB/t/queue.t
deleted file mode 100644
index 00291641c4f..00000000000
--- a/storage/bdb/perl/BerkeleyDB/t/queue.t
+++ /dev/null
@@ -1,751 +0,0 @@
-#!./perl -w
-
-# ID: %I%, %G%   
-
-use strict ;
-
-BEGIN {
-    unless(grep /blib/, @INC) {
-        chdir 't' if -d 't';
-        @INC = '../lib' if -d '../lib';
-    }
-}
-
-use BerkeleyDB; 
-use t::util ;
-
-BEGIN 
-{
-    if ($BerkeleyDB::db_version < 3.3) {
-	print "1..0 # Skip: Queue needs Berkeley DB 3.3.x or better\n" ;
-	exit 0 ;
-    }
-}    
-
-print "1..200\n";
-
-my $Dfile = "dbhash.tmp";
-my $Dfile2 = "dbhash2.tmp";
-my $Dfile3 = "dbhash3.tmp";
-unlink $Dfile;
-
-umask(0) ;
-
-
-# Check for invalid parameters
-{
-    # Check for invalid parameters
-    my $db ;
-    eval ' $db = new BerkeleyDB::Queue  -Stupid => 3 ; ' ;
-    ok 1, $@ =~ /unknown key value\(s\) Stupid/  ;
-
-    eval ' $db = new BerkeleyDB::Queue -Bad => 2, -Mode => 0345, -Stupid => 3; ' ;
-    ok 2, $@ =~ /unknown key value\(s\) /  ;
-
-    eval ' $db = new BerkeleyDB::Queue -Env => 2 ' ;
-    ok 3, $@ =~ /^Env not of type BerkeleyDB::Env/ ;
-
-    eval ' $db = new BerkeleyDB::Queue -Txn => "x" ' ;
-    ok 4, $@ =~ /^Txn not of type BerkeleyDB::Txn/ ;
-
-    my $obj = bless [], "main" ;
-    eval ' $db = new BerkeleyDB::Queue -Env => $obj ' ;
-    ok 5, $@ =~ /^Env not of type BerkeleyDB::Env/ ;
-}
-
-# Now check the interface to Queue
-
-{
-    my $lex = new LexFile $Dfile ;
-    my $rec_len = 10 ;
-    my $pad = "x" ;
-
-    ok 6, my $db = new BerkeleyDB::Queue -Filename => $Dfile, 
-				    -Flags    => DB_CREATE,
-				    -Len      => $rec_len,
-				    -Pad      => $pad;
-
-    # Add a k/v pair
-    my $value ;
-    my $status ;
-    ok 7, $db->db_put(1, "some value") == 0  ;
-    ok 8, $db->status() == 0 ;
-    ok 9, $db->db_get(1, $value) == 0 ;
-    ok 10, $value eq fillout("some value", $rec_len, $pad) ;
-    ok 11, $db->db_put(2, "value") == 0  ;
-    ok 12, $db->db_get(2, $value) == 0 ;
-    ok 13, $value eq fillout("value", $rec_len, $pad) ;
-    ok 14, $db->db_put(3, "value") == 0  ;
-    ok 15, $db->db_get(3, $value) == 0 ;
-    ok 16, $value eq fillout("value", $rec_len, $pad) ;
-    ok 17, $db->db_del(2) == 0 ;
-    ok 18, ($status = $db->db_get(2, $value)) == DB_KEYEMPTY ;
-    ok 19, $db->status() == DB_KEYEMPTY ;
-    ok 20, $db->status() eq $DB_errors{'DB_KEYEMPTY'} ;
-
-    ok 21, ($status = $db->db_get(7, $value)) == DB_NOTFOUND ;
-    ok 22, $db->status() == DB_NOTFOUND ;
-    ok 23, $db->status() eq $DB_errors{'DB_NOTFOUND'} ;
-
-    ok 24, $db->db_sync() == 0 ;
-
-    # Check NOOVERWRITE will make put fail when attempting to overwrite
-    # an existing record.
-
-    ok 25, $db->db_put( 1, 'x', DB_NOOVERWRITE) == DB_KEYEXIST ;
-    ok 26, $db->status() eq $DB_errors{'DB_KEYEXIST'} ;
-    ok 27, $db->status() == DB_KEYEXIST ;
-
-
-    # check that the value of the key  has not been changed by the
-    # previous test
-    ok 28, $db->db_get(1, $value) == 0 ;
-    ok 29, $value eq fillout("some value", $rec_len, $pad) ;
-
-
-}
-
-
-{
-    # Check simple env works with a array.
-    # and pad defaults to space
-    my $lex = new LexFile $Dfile ;
-
-    my $home = "./fred" ;
-    my $rec_len = 11 ;
-    ok 30, my $lexD = new LexDir($home);
-
-    ok 31, my $env = new BerkeleyDB::Env -Flags => DB_CREATE|DB_INIT_MPOOL,@StdErrFile,
-    					 -Home => $home ;
-    ok 32, my $db = new BerkeleyDB::Queue -Filename => $Dfile, 
-				    -Env      => $env,
-				    -Flags    => DB_CREATE,
-				    -Len      => $rec_len;
-
-    # Add a k/v pair
-    my $value ;
-    ok 33, $db->db_put(1, "some value") == 0 ;
-    ok 34, $db->db_get(1, $value) == 0 ;
-    ok 35, $value eq fillout("some value", $rec_len)  ;
-    undef $db ;
-    undef $env ;
-}
-
- 
-{
-    # cursors
-
-    my $lex = new LexFile $Dfile ;
-    my @array ;
-    my ($k, $v) ;
-    my $rec_len = 5 ;
-    ok 36, my $db = new BerkeleyDB::Queue -Filename  => $Dfile, 
-				    	  -ArrayBase => 0,
-				    	  -Flags     => DB_CREATE ,
-				    	  -Len       => $rec_len;
-
-    # create some data
-    my @data =  (
-		"red"	,
-		"green"	,
-		"blue"	,
-		) ;
-
-    my $i ;
-    my %data ;
-    my $ret = 0 ;
-    for ($i = 0 ; $i < @data ; ++$i) {
-        $ret += $db->db_put($i, $data[$i]) ;
-	$data{$i} = $data[$i] ;
-    }
-    ok 37, $ret == 0 ;
-
-    # create the cursor
-    ok 38, my $cursor = $db->db_cursor() ;
-
-    $k = 0 ; $v = "" ;
-    my %copy = %data;
-    my $extras = 0 ;
-    # sequence forwards
-    while ($cursor->c_get($k, $v, DB_NEXT) == 0) 
-    {
-        if ( fillout($copy{$k}, $rec_len) eq $v ) 
-            { delete $copy{$k} }
-	else
-	    { ++ $extras }
-    }
-
-    ok 39, $cursor->status() == DB_NOTFOUND ;
-    ok 40, $cursor->status() eq $DB_errors{'DB_NOTFOUND'} ;
-    ok 41, keys %copy == 0 ;
-    ok 42, $extras == 0 ;
-
-    # sequence backwards
-    %copy = %data ;
-    $extras = 0 ;
-    my $status ;
-    for ( $status = $cursor->c_get($k, $v, DB_LAST) ;
-	  $status == 0 ;
-    	  $status = $cursor->c_get($k, $v, DB_PREV)) {
-        if ( fillout($copy{$k}, $rec_len) eq $v ) 
-            { delete $copy{$k} }
-	else
-	    { ++ $extras }
-    }
-    ok 43, $status == DB_NOTFOUND ;
-    ok 44, $status eq $DB_errors{'DB_NOTFOUND'} ;
-    ok 45, $cursor->status() == $status ;
-    ok 46, $cursor->status() eq $status ;
-    ok 47, keys %copy == 0 ;
-    ok 48, $extras == 0 ;
-}
- 
-{
-    # Tied Array interface
-
-    my $lex = new LexFile $Dfile ;
-    my @array ;
-    my $db ;
-    my $rec_len = 10 ;
-    ok 49, $db = tie @array, 'BerkeleyDB::Queue', -Filename  => $Dfile,
-				    	    -ArrayBase => 0,
-                                            -Flags     => DB_CREATE ,
-				    	    -Len       => $rec_len;
-
-    ok 50, my $cursor = (tied @array)->db_cursor() ;
-    # check the database is empty
-    my $count = 0 ;
-    my ($k, $v) = (0,"") ;
-    while ($cursor->c_get($k, $v, DB_NEXT) == 0) {
-	++ $count ;
-    }
-    ok 51, $cursor->status() == DB_NOTFOUND ;
-    ok 52, $count == 0 ;
-
-    ok 53, @array == 0 ;
-
-    # Add a k/v pair
-    my $value ;
-    $array[1] = "some value";
-    ok 54, (tied @array)->status() == 0 ;
-    ok 55, $array[1] eq fillout("some value", $rec_len);
-    ok 56, defined $array[1];
-    ok 57, (tied @array)->status() == 0 ;
-    ok 58, !defined $array[3];
-    ok 59, (tied @array)->status() == DB_NOTFOUND ;
-
-    $array[1] = 2 ;
-    $array[10] = 20 ;
-    $array[1000] = 2000 ;
-
-    my ($keys, $values) = (0,0);
-    $count = 0 ;
-    for ( my $status = $cursor->c_get($k, $v, DB_FIRST) ;
-	  $status == 0 ;
-    	  $status = $cursor->c_get($k, $v, DB_NEXT)) {
-        $keys += $k ;
-	$values += $v ;
-	++ $count ;
-    }
-    ok 60, $count == 3 ;
-    ok 61, $keys == 1011 ;
-    ok 62, $values == 2022 ;
-
-    # unshift isn't allowed
-#    eval {
-#    	$FA ? unshift @array, "red", "green", "blue" 
-#        : $db->unshift("red", "green", "blue" ) ;
-#	} ;
-#    ok 64, $@ =~ /^unshift is unsupported with Queue databases/ ;	
-    $array[0] = "red" ;
-    $array[1] = "green" ;
-    $array[2] = "blue" ;
-    $array[4] = 2 ;
-    ok 63, $array[0] eq fillout("red", $rec_len) ;
-    ok 64, $cursor->c_get($k, $v, DB_FIRST) == 0 ;
-    ok 65, $k == 0 ;
-    ok 66, $v eq fillout("red", $rec_len) ;
-    ok 67, $array[1] eq fillout("green", $rec_len) ;
-    ok 68, $cursor->c_get($k, $v, DB_NEXT) == 0 ;
-    ok 69, $k == 1 ;
-    ok 70, $v eq fillout("green", $rec_len) ;
-    ok 71, $array[2] eq fillout("blue", $rec_len) ;
-    ok 72, $cursor->c_get($k, $v, DB_NEXT) == 0 ;
-    ok 73, $k == 2 ;
-    ok 74, $v eq fillout("blue", $rec_len) ;
-    ok 75, $array[4] == 2 ;
-    ok 76, $cursor->c_get($k, $v, DB_NEXT) == 0 ;
-    ok 77, $k == 4 ;
-    ok 78, $v == 2 ;
-
-    # shift
-    ok 79, ($FA ? shift @array : $db->shift()) eq fillout("red", $rec_len) ;
-    ok 80, ($FA ? shift @array : $db->shift()) eq fillout("green", $rec_len) ;
-    ok 81, ($FA ? shift @array : $db->shift()) eq fillout("blue", $rec_len) ;
-    ok 82, ($FA ? shift @array : $db->shift()) == 2 ;
-
-    # push
-    $FA ? push @array, "the", "end" 
-        : $db->push("the", "end") ;
-    ok 83, $cursor->c_get($k, $v, DB_LAST) == 0 ;
-    ok 84, $k == 1002 ;
-    ok 85, $v eq fillout("end", $rec_len) ;
-    ok 86, $cursor->c_get($k, $v, DB_PREV) == 0 ;
-    ok 87, $k == 1001 ;
-    ok 88, $v eq fillout("the", $rec_len) ;
-    ok 89, $cursor->c_get($k, $v, DB_PREV) == 0 ;
-    ok 90, $k == 1000 ;
-    ok 91, $v == 2000 ;
-
-    # pop
-    ok 92, ( $FA ? pop @array : $db->pop ) eq fillout("end", $rec_len) ;
-    ok 93, ( $FA ? pop @array : $db->pop ) eq fillout("the", $rec_len) ;
-    ok 94, ( $FA ? pop @array : $db->pop ) == 2000  ;
-
-    # now clear the array 
-    $FA ? @array = () 
-        : $db->clear() ;
-    ok 95, $cursor->c_get($k, $v, DB_FIRST) == DB_NOTFOUND ;
-
-    undef $cursor ;
-    undef $db ;
-    untie @array ;
-}
-
-{
-    # in-memory file
-
-    my @array ;
-    my $fd ;
-    my $value ;
-    my $rec_len = 15 ;
-    ok 96, my $db = tie @array, 'BerkeleyDB::Queue',
-				    	    -Len       => $rec_len;
-
-    ok 97, $db->db_put(1, "some value") == 0  ;
-    ok 98, $db->db_get(1, $value) == 0 ;
-    ok 99, $value eq fillout("some value", $rec_len) ;
-
-}
- 
-{
-    # partial
-    # check works via API
-
-    my $lex = new LexFile $Dfile ;
-    my $value ;
-    my $rec_len = 8 ;
-    ok 100, my $db = new BerkeleyDB::Queue  -Filename => $Dfile,
-                                      	    -Flags    => DB_CREATE ,
-				    	    -Len      => $rec_len,
-				    	    -Pad      => " " ;
-
-    # create some data
-    my @data =  (
-		"",
-		"boat",
-		"house",
-		"sea",
-		) ;
-
-    my $ret = 0 ;
-    my $i ;
-    for ($i = 0 ; $i < @data ; ++$i) {
-        my $r = $db->db_put($i, $data[$i]) ;
-        $ret += $r ;
-    }
-    ok 101, $ret == 0 ;
-
-    # do a partial get
-    my ($pon, $off, $len) = $db->partial_set(0,2) ;
-    ok 102, ! $pon && $off == 0 && $len == 0 ;
-    ok 103, $db->db_get(1, $value) == 0 && $value eq "bo" ;
-    ok 104, $db->db_get(2, $value) == 0 && $value eq "ho" ;
-    ok 105, $db->db_get(3, $value) == 0 && $value eq "se" ;
-
-    # do a partial get, off end of data
-    ($pon, $off, $len) = $db->partial_set(3,2) ;
-    ok 106, $pon ;
-    ok 107, $off == 0 ;
-    ok 108, $len == 2 ;
-    ok 109, $db->db_get(1, $value) == 0 && $value eq fillout("t", 2) ;
-    ok 110, $db->db_get(2, $value) == 0 && $value eq "se" ;
-    ok 111, $db->db_get(3, $value) == 0 && $value eq "  " ;
-
-    # switch of partial mode
-    ($pon, $off, $len) = $db->partial_clear() ;
-    ok 112, $pon ;
-    ok 113, $off == 3 ;
-    ok 114, $len == 2 ;
-    ok 115, $db->db_get(1, $value) == 0 && $value eq fillout("boat", $rec_len) ;
-    ok 116, $db->db_get(2, $value) == 0 && $value eq fillout("house", $rec_len) ;
-    ok 117, $db->db_get(3, $value) == 0 && $value eq fillout("sea", $rec_len) ;
-
-    # now partial put
-    $db->partial_set(0,2) ;
-    ok 118, $db->db_put(1, "") != 0 ;
-    ok 119, $db->db_put(2, "AB") == 0 ;
-    ok 120, $db->db_put(3, "XY") == 0 ;
-    ok 121, $db->db_put(4, "KLM") != 0 ;
-    ok 122, $db->db_put(4, "KL") == 0 ;
-
-    ($pon, $off, $len) = $db->partial_clear() ;
-    ok 123, $pon ;
-    ok 124, $off == 0 ;
-    ok 125, $len == 2 ;
-    ok 126, $db->db_get(1, $value) == 0 && $value eq fillout("boat", $rec_len) ;
-    ok 127, $db->db_get(2, $value) == 0 && $value eq fillout("ABuse", $rec_len) ;
-    ok 128, $db->db_get(3, $value) == 0 && $value eq fillout("XYa", $rec_len) ;
-    ok 129, $db->db_get(4, $value) == 0 && $value eq fillout("KL", $rec_len) ;
-
-    # now partial put
-    ($pon, $off, $len) = $db->partial_set(3,2) ;
-    ok 130, ! $pon ;
-    ok 131, $off == 0 ;
-    ok 132, $len == 0 ;
-    ok 133, $db->db_put(1, "PP") == 0 ;
-    ok 134, $db->db_put(2, "Q") != 0 ;
-    ok 135, $db->db_put(3, "XY") == 0 ;
-    ok 136, $db->db_put(4, "TU") == 0 ;
-
-    $db->partial_clear() ;
-    ok 137, $db->db_get(1, $value) == 0 && $value eq fillout("boaPP", $rec_len) ;
-    ok 138, $db->db_get(2, $value) == 0 && $value eq fillout("ABuse",$rec_len) ;
-    ok 139, $db->db_get(3, $value) == 0 && $value eq fillout("XYaXY", $rec_len) ;
-    ok 140, $db->db_get(4, $value) == 0 && $value eq fillout("KL TU", $rec_len) ;
-}
-
-{
-    # partial
-    # check works via tied array 
-
-    my $lex = new LexFile $Dfile ;
-    my @array ;
-    my $value ;
-    my $rec_len = 8 ;
-    ok 141, my $db = tie @array, 'BerkeleyDB::Queue', -Filename => $Dfile,
-                                      	        -Flags    => DB_CREATE ,
-				    	        -Len       => $rec_len,
-				    	        -Pad       => " " ;
-
-    # create some data
-    my @data =  (
-		"",
-		"boat",
-		"house",
-		"sea",
-		) ;
-
-    my $i ;
-    my $status = 0 ;
-    for ($i = 1 ; $i < @data ; ++$i) {
-	$array[$i] = $data[$i] ;
-	$status += $db->status() ;
-    }
-
-    ok 142, $status == 0 ;
-
-    # do a partial get
-    $db->partial_set(0,2) ;
-    ok 143, $array[1] eq fillout("bo", 2) ;
-    ok 144, $array[2] eq fillout("ho", 2) ;
-    ok 145, $array[3]  eq fillout("se", 2) ;
-
-    # do a partial get, off end of data
-    $db->partial_set(3,2) ;
-    ok 146, $array[1] eq fillout("t", 2) ;
-    ok 147, $array[2] eq fillout("se", 2) ;
-    ok 148, $array[3] eq fillout("", 2) ;
-
-    # switch of partial mode
-    $db->partial_clear() ;
-    ok 149, $array[1] eq fillout("boat", $rec_len) ;
-    ok 150, $array[2] eq fillout("house", $rec_len) ;
-    ok 151, $array[3] eq fillout("sea", $rec_len) ;
-
-    # now partial put
-    $db->partial_set(0,2) ;
-    $array[1] = "" ;
-    ok 152, $db->status() != 0 ;
-    $array[2] = "AB" ;
-    ok 153, $db->status() == 0 ;
-    $array[3] = "XY" ;
-    ok 154, $db->status() == 0 ;
-    $array[4] = "KL" ;
-    ok 155, $db->status() == 0 ;
-
-    $db->partial_clear() ;
-    ok 156, $array[1] eq fillout("boat", $rec_len) ;
-    ok 157, $array[2] eq fillout("ABuse", $rec_len) ;
-    ok 158, $array[3] eq fillout("XYa", $rec_len) ;
-    ok 159, $array[4] eq fillout("KL", $rec_len) ;
-
-    # now partial put
-    $db->partial_set(3,2) ;
-    $array[1] = "PP" ;
-    ok 160, $db->status() == 0 ;
-    $array[2] = "Q" ;
-    ok 161, $db->status() != 0 ;
-    $array[3] = "XY" ;
-    ok 162, $db->status() == 0 ;
-    $array[4] = "TU" ;
-    ok 163, $db->status() == 0 ;
-
-    $db->partial_clear() ;
-    ok 164, $array[1] eq fillout("boaPP", $rec_len) ;
-    ok 165, $array[2] eq fillout("ABuse", $rec_len) ;
-    ok 166, $array[3] eq fillout("XYaXY", $rec_len) ;
-    ok 167, $array[4] eq fillout("KL TU", $rec_len) ;
-}
-
-{
-    # transaction
-
-    my $lex = new LexFile $Dfile ;
-    my @array ;
-    my $value ;
-
-    my $home = "./fred" ;
-    ok 168, my $lexD = new LexDir($home);
-    my $rec_len = 9 ;
-    ok 169, my $env = new BerkeleyDB::Env -Home => $home,@StdErrFile,
-				     -Flags => DB_CREATE|DB_INIT_TXN|
-					  	DB_INIT_MPOOL|DB_INIT_LOCK ;
-    ok 170, my $txn = $env->txn_begin() ;
-    ok 171, my $db1 = tie @array, 'BerkeleyDB::Queue', 
-				-Filename => $Dfile,
-				-ArrayBase => 0,
-                      		-Flags    =>  DB_CREATE ,
-		        	-Env 	  => $env,
-		        	-Txn	  => $txn ,
-				-Len      => $rec_len,
-				-Pad      => " " ;
-
-    
-    ok 172, $txn->txn_commit() == 0 ;
-    ok 173, $txn = $env->txn_begin() ;
-    $db1->Txn($txn);
-
-    # create some data
-    my @data =  (
-		"boat",
-		"house",
-		"sea",
-		) ;
-
-    my $ret = 0 ;
-    my $i ;
-    for ($i = 0 ; $i < @data ; ++$i) {
-        $ret += $db1->db_put($i, $data[$i]) ;
-    }
-    ok 174, $ret == 0 ;
-
-    # should be able to see all the records
-
-    ok 175, my $cursor = $db1->db_cursor() ;
-    my ($k, $v) = (0, "") ;
-    my $count = 0 ;
-    # sequence forwards
-    while ($cursor->c_get($k, $v, DB_NEXT) == 0) {
-        ++ $count ;
-    }
-    ok 176, $count == 3 ;
-    undef $cursor ;
-
-    # now abort the transaction
-    ok 177, $txn->txn_abort() == 0 ;
-
-    # there shouldn't be any records in the database
-    $count = 0 ;
-    # sequence forwards
-    ok 178, $cursor = $db1->db_cursor() ;
-    while ($cursor->c_get($k, $v, DB_NEXT) == 0) {
-        ++ $count ;
-    }
-    ok 179, $count == 0 ;
-
-    undef $txn ;
-    undef $cursor ;
-    undef $db1 ;
-    undef $env ;
-    untie @array ;
-}
-
-
-{
-    # db_stat
-
-    my $lex = new LexFile $Dfile ;
-    my $recs = ($BerkeleyDB::db_version >= 3.1 ? "qs_ndata" : "qs_nrecs") ;
-    my @array ;
-    my ($k, $v) ;
-    my $rec_len = 7 ;
-    ok 180, my $db = new BerkeleyDB::Queue -Filename 	=> $Dfile, 
-				     	   -Flags    	=> DB_CREATE,
-					   -Pagesize	=> 4 * 1024,
-				           -Len       => $rec_len,
-				           -Pad       => " " 
-					;
-
-    my $ref = $db->db_stat() ; 
-    ok 181, $ref->{$recs} == 0;
-    ok 182, $ref->{'qs_pagesize'} == 4 * 1024;
-
-    # create some data
-    my @data =  (
-		2,
-		"house",
-		"sea",
-		) ;
-
-    my $ret = 0 ;
-    my $i ;
-    for ($i = $db->ArrayOffset ; @data ; ++$i) {
-        $ret += $db->db_put($i, shift @data) ;
-    }
-    ok 183, $ret == 0 ;
-
-    $ref = $db->db_stat() ; 
-    ok 184, $ref->{$recs} == 3;
-}
-
-{
-   # sub-class test
-
-   package Another ;
-
-   use strict ;
-
-   open(FILE, ">SubDB.pm") or die "Cannot open SubDB.pm: $!\n" ;
-   print FILE <<'EOM' ;
-
-   package SubDB ;
-
-   use strict ;
-   use vars qw( @ISA @EXPORT) ;
-
-   require Exporter ;
-   use BerkeleyDB;
-   @ISA=qw(BerkeleyDB BerkeleyDB::Queue);
-   @EXPORT = @BerkeleyDB::EXPORT ;
-
-   sub db_put { 
-	my $self = shift ;
-        my $key = shift ;
-        my $value = shift ;
-        $self->SUPER::db_put($key, $value * 3) ;
-   }
-
-   sub db_get { 
-	my $self = shift ;
-        $self->SUPER::db_get($_[0], $_[1]) ;
-	$_[1] -= 2 ;
-   }
-
-   sub A_new_method
-   {
-	my $self = shift ;
-        my $key = shift ;
-        my $value = $self->FETCH($key) ;
-	return "[[$value]]" ;
-   }
-
-   1 ;
-EOM
-
-    close FILE ;
-
-    BEGIN { push @INC, '.'; }    
-    eval 'use SubDB ; ';
-    main::ok 185, $@ eq "" ;
-    my @h ;
-    my $X ;
-    my $rec_len = 34 ;
-    eval '
-	$X = tie(@h, "SubDB", -Filename => "dbqueue.tmp", 
-			-Flags => DB_CREATE,
-			-Mode => 0640 ,
-	                -Len       => $rec_len,
-	                -Pad       => " " 
-			);		   
-	' ;
-
-    main::ok 186, $@ eq "" ;
-
-    my $ret = eval '$h[1] = 3 ; return $h[1] ' ;
-    main::ok 187, $@ eq "" ;
-    main::ok 188, $ret == 7 ;
-
-    my $value = 0;
-    $ret = eval '$X->db_put(1, 4) ; $X->db_get(1, $value) ; return $value' ;
-    main::ok 189, $@ eq "" ;
-    main::ok 190, $ret == 10 ;
-
-    $ret = eval ' DB_NEXT eq main::DB_NEXT ' ;
-    main::ok 191, $@ eq ""  ;
-    main::ok 192, $ret == 1 ;
-
-    $ret = eval '$X->A_new_method(1) ' ;
-    main::ok 193, $@ eq "" ;
-    main::ok 194, $ret eq "[[10]]" ;
-
-    undef $X ;
-    untie @h ;
-    unlink "SubDB.pm", "dbqueue.tmp" ;
-
-}
-
-{
-    # DB_APPEND
-
-    my $lex = new LexFile $Dfile;
-    my @array ;
-    my $value ;
-    my $rec_len = 21 ;
-    ok 195, my $db = tie @array, 'BerkeleyDB::Queue', 
-					-Filename  => $Dfile,
-                                       	-Flags     => DB_CREATE ,
-	                		-Len       => $rec_len,
-	                		-Pad       => " " ;
-
-    # create a few records
-    $array[1] = "def" ;
-    $array[3] = "ghi" ;
-
-    my $k = 0 ;
-    ok 196, $db->db_put($k, "fred", DB_APPEND) == 0 ;
-    ok 197, $k == 4 ;
-    ok 198, $array[4] eq fillout("fred", $rec_len) ;
-
-    undef $db ;
-    untie @array ;
-}
-
-{
-    # 23 Sept 2001 -- push into an empty array
-    my $lex = new LexFile $Dfile ;
-    my @array ;
-    my $db ;
-    my $rec_len = 21 ;
-    ok 199, $db = tie @array, 'BerkeleyDB::Queue', 
-                                      	       	-Flags  => DB_CREATE ,
-				    	        -ArrayBase => 0,
-	                		        -Len       => $rec_len,
-	                		        -Pad       => " " ,
-						-Filename => $Dfile ;
-    $FA ? push @array, "first"
-        : $db->push("first") ;
-
-    ok 200, ($FA ? pop @array : $db->pop()) eq fillout("first", $rec_len) ;
-
-    undef $db;
-    untie @array ;
-
-}
-
-__END__
-
-
-# TODO
-#
-# DB_DELIMETER DB_FIXEDLEN DB_PAD DB_SNAPSHOT with partial records
diff --git a/storage/bdb/perl/BerkeleyDB/t/recno.t b/storage/bdb/perl/BerkeleyDB/t/recno.t
deleted file mode 100644
index 7bbb50169fb..00000000000
--- a/storage/bdb/perl/BerkeleyDB/t/recno.t
+++ /dev/null
@@ -1,913 +0,0 @@
-#!./perl -w
-
-# ID: %I%, %G%   
-
-use strict ;
-
-BEGIN {
-    unless(grep /blib/, @INC) {
-        chdir 't' if -d 't';
-        @INC = '../lib' if -d '../lib';
-    }
-}
-
-use BerkeleyDB; 
-use t::util ;
-
-print "1..226\n";
-
-my $Dfile = "dbhash.tmp";
-my $Dfile2 = "dbhash2.tmp";
-my $Dfile3 = "dbhash3.tmp";
-unlink $Dfile;
-
-umask(0) ;
-
-# Check for invalid parameters
-{
-    # Check for invalid parameters
-    my $db ;
-    eval ' $db = new BerkeleyDB::Recno  -Stupid => 3 ; ' ;
-    ok 1, $@ =~ /unknown key value\(s\) Stupid/  ;
-
-    eval ' $db = new BerkeleyDB::Recno -Bad => 2, -Mode => 0345, -Stupid => 3; ' ;
-    ok 2, $@ =~ /unknown key value\(s\) /  ;
-
-    eval ' $db = new BerkeleyDB::Recno -Env => 2 ' ;
-    ok 3, $@ =~ /^Env not of type BerkeleyDB::Env/ ;
-
-    eval ' $db = new BerkeleyDB::Recno -Txn => "x" ' ;
-    ok 4, $@ =~ /^Txn not of type BerkeleyDB::Txn/ ;
-
-    my $obj = bless [], "main" ;
-    eval ' $db = new BerkeleyDB::Recno -Env => $obj ' ;
-    ok 5, $@ =~ /^Env not of type BerkeleyDB::Env/ ;
-}
-
-# Now check the interface to Recno
-
-{
-    my $lex = new LexFile $Dfile ;
-
-    ok 6, my $db = new BerkeleyDB::Recno -Filename => $Dfile, 
-				    -Flags    => DB_CREATE ;
-
-    # Add a k/v pair
-    my $value ;
-    my $status ;
-    ok 7, $db->db_put(1, "some value") == 0  ;
-    ok 8, $db->status() == 0 ;
-    ok 9, $db->db_get(1, $value) == 0 ;
-    ok 10, $value eq "some value" ;
-    ok 11, $db->db_put(2, "value") == 0  ;
-    ok 12, $db->db_get(2, $value) == 0 ;
-    ok 13, $value eq "value" ;
-    ok 14, $db->db_del(1) == 0 ;
-    ok 15, ($status = $db->db_get(1, $value)) == DB_KEYEMPTY ;
-    ok 16, $db->status() == DB_KEYEMPTY ;
-    ok 17, $db->status() eq $DB_errors{'DB_KEYEMPTY'} ;
-
-    ok 18, ($status = $db->db_get(7, $value)) == DB_NOTFOUND ;
-    ok 19, $db->status() == DB_NOTFOUND ;
-    ok 20, $db->status() eq $DB_errors{'DB_NOTFOUND'} ;
-
-    ok 21, $db->db_sync() == 0 ;
-
-    # Check NOOVERWRITE will make put fail when attempting to overwrite
-    # an existing record.
-
-    ok 22, $db->db_put( 2, 'x', DB_NOOVERWRITE) == DB_KEYEXIST ;
-    ok 23, $db->status() eq $DB_errors{'DB_KEYEXIST'} ;
-    ok 24, $db->status() == DB_KEYEXIST ;
-
-
-    # check that the value of the key  has not been changed by the
-    # previous test
-    ok 25, $db->db_get(2, $value) == 0 ;
-    ok 26, $value eq "value" ;
-
-
-}
-
-
-{
-    # Check simple env works with a array.
-    my $lex = new LexFile $Dfile ;
-
-    my $home = "./fred" ;
-    ok 27, my $lexD = new LexDir($home);
-
-    ok 28, my $env = new BerkeleyDB::Env -Flags => DB_CREATE|DB_INIT_MPOOL,@StdErrFile,
-    					 -Home => $home ;
-
-    ok 29, my $db = new BerkeleyDB::Recno -Filename => $Dfile, 
-				    -Env      => $env,
-				    -Flags    => DB_CREATE ;
-
-    # Add a k/v pair
-    my $value ;
-    ok 30, $db->db_put(1, "some value") == 0 ;
-    ok 31, $db->db_get(1, $value) == 0 ;
-    ok 32, $value eq "some value" ;
-    undef $db ;
-    undef $env ;
-}
-
- 
-{
-    # cursors
-
-    my $lex = new LexFile $Dfile ;
-    my @array ;
-    my ($k, $v) ;
-    ok 33, my $db = new BerkeleyDB::Recno -Filename  => $Dfile, 
-				    	  -ArrayBase => 0,
-				    	  -Flags     => DB_CREATE ;
-
-    # create some data
-    my @data =  (
-		"red"	,
-		"green"	,
-		"blue"	,
-		) ;
-
-    my $i ;
-    my %data ;
-    my $ret = 0 ;
-    for ($i = 0 ; $i < @data ; ++$i) {
-        $ret += $db->db_put($i, $data[$i]) ;
-	$data{$i} = $data[$i] ;
-    }
-    ok 34, $ret == 0 ;
-
-    # create the cursor
-    ok 35, my $cursor = $db->db_cursor() ;
-
-    $k = 0 ; $v = "" ;
-    my %copy = %data;
-    my $extras = 0 ;
-    # sequence forwards
-    while ($cursor->c_get($k, $v, DB_NEXT) == 0) 
-    {
-        if ( $copy{$k} eq $v ) 
-            { delete $copy{$k} }
-	else
-	    { ++ $extras }
-    }
-
-    ok 36, $cursor->status() == DB_NOTFOUND ;
-    ok 37, $cursor->status() eq $DB_errors{'DB_NOTFOUND'} ;
-    ok 38, keys %copy == 0 ;
-    ok 39, $extras == 0 ;
-
-    # sequence backwards
-    %copy = %data ;
-    $extras = 0 ;
-    my $status ;
-    for ( $status = $cursor->c_get($k, $v, DB_LAST) ;
-	  $status == 0 ;
-    	  $status = $cursor->c_get($k, $v, DB_PREV)) {
-        if ( $copy{$k} eq $v ) 
-            { delete $copy{$k} }
-	else
-	    { ++ $extras }
-    }
-    ok 40, $status == DB_NOTFOUND ;
-    ok 41, $status eq $DB_errors{'DB_NOTFOUND'} ;
-    ok 42, $cursor->status() == $status ;
-    ok 43, $cursor->status() eq $status ;
-    ok 44, keys %copy == 0 ;
-    ok 45, $extras == 0 ;
-}
- 
-{
-    # Tied Array interface
-
-
-    my $lex = new LexFile $Dfile ;
-    my @array ;
-    my $db ;
-    ok 46, $db = tie @array, 'BerkeleyDB::Recno', -Filename  => $Dfile,
-				    	    -Property => DB_RENUMBER,
-				    	    -ArrayBase => 0,
-                                            -Flags     => DB_CREATE ;
-
-    ok 47, my $cursor = (tied @array)->db_cursor() ;
-    # check the database is empty
-    my $count = 0 ;
-    my ($k, $v) = (0,"") ;
-    while ($cursor->c_get($k, $v, DB_NEXT) == 0) {
-	++ $count ;
-    }
-    ok 48, $cursor->status() == DB_NOTFOUND ;
-    ok 49, $count == 0 ;
-
-    ok 50, @array == 0 ;
-
-    # Add a k/v pair
-    my $value ;
-    $array[1] = "some value";
-    ok 51, (tied @array)->status() == 0 ;
-    ok 52, $array[1] eq "some value";
-    ok 53, defined $array[1];
-    ok 54, (tied @array)->status() == 0 ;
-    ok 55, !defined $array[3];
-    ok 56, (tied @array)->status() == DB_NOTFOUND ;
-
-    ok 57, (tied @array)->db_del(1) == 0 ;
-    ok 58, (tied @array)->status() == 0 ;
-    ok 59, ! defined $array[1];
-    ok 60, (tied @array)->status() == DB_NOTFOUND ;
-
-    $array[1] = 2 ;
-    $array[10] = 20 ;
-    $array[1000] = 2000 ;
-
-    my ($keys, $values) = (0,0);
-    $count = 0 ;
-    for ( my $status = $cursor->c_get($k, $v, DB_FIRST) ;
-	  $status == 0 ;
-    	  $status = $cursor->c_get($k, $v, DB_NEXT)) {
-        $keys += $k ;
-	$values += $v ;
-	++ $count ;
-    }
-    ok 61, $count == 3 ;
-    ok 62, $keys == 1011 ;
-    ok 63, $values == 2022 ;
-
-    # unshift
-    $FA ? unshift @array, "red", "green", "blue" 
-        : $db->unshift("red", "green", "blue" ) ;
-    ok 64, $array[1] eq "red" ;
-    ok 65, $cursor->c_get($k, $v, DB_FIRST) == 0 ;
-    ok 66, $k == 1 ;
-    ok 67, $v eq "red" ;
-    ok 68, $array[2] eq "green" ;
-    ok 69, $cursor->c_get($k, $v, DB_NEXT) == 0 ;
-    ok 70, $k == 2 ;
-    ok 71, $v eq "green" ;
-    ok 72, $array[3] eq "blue" ;
-    ok 73, $cursor->c_get($k, $v, DB_NEXT) == 0 ;
-    ok 74, $k == 3 ;
-    ok 75, $v eq "blue" ;
-    ok 76, $array[4] == 2 ;
-    ok 77, $cursor->c_get($k, $v, DB_NEXT) == 0 ;
-    ok 78, $k == 4 ;
-    ok 79, $v == 2 ;
-
-    # shift
-    ok 80, ($FA ? shift @array : $db->shift()) eq "red" ;
-    ok 81, ($FA ? shift @array : $db->shift()) eq "green" ;
-    ok 82, ($FA ? shift @array : $db->shift()) eq "blue" ;
-    ok 83, ($FA ? shift @array : $db->shift()) == 2 ;
-
-    # push
-    $FA ? push @array, "the", "end" 
-        : $db->push("the", "end") ;
-    ok 84, $cursor->c_get($k, $v, DB_LAST) == 0 ;
-    ok 85, $k == 1001 ;
-    ok 86, $v eq "end" ;
-    ok 87, $cursor->c_get($k, $v, DB_PREV) == 0 ;
-    ok 88, $k == 1000 ;
-    ok 89, $v eq "the" ;
-    ok 90, $cursor->c_get($k, $v, DB_PREV) == 0 ;
-    ok 91, $k == 999 ;
-    ok 92, $v == 2000 ;
-
-    # pop
-    ok 93, ( $FA ? pop @array : $db->pop ) eq "end" ;
-    ok 94, ( $FA ? pop @array : $db->pop ) eq "the" ;
-    ok 95, ( $FA ? pop @array : $db->pop ) == 2000  ;
-
-    # now clear the array 
-    $FA ? @array = () 
-        : $db->clear() ;
-    ok 96, $cursor->c_get($k, $v, DB_FIRST) == DB_NOTFOUND ;
-
-    undef $cursor ;
-    undef $db ;
-    untie @array ;
-}
-
-{
-    # in-memory file
-
-    my @array ;
-    my $fd ;
-    my $value ;
-    ok 97, my $db = tie @array, 'BerkeleyDB::Recno' ;
-
-    ok 98, $db->db_put(1, "some value") == 0  ;
-    ok 99, $db->db_get(1, $value) == 0 ;
-    ok 100, $value eq "some value" ;
-
-}
- 
-{
-    # partial
-    # check works via API
-
-    my $lex = new LexFile $Dfile ;
-    my $value ;
-    ok 101, my $db = new BerkeleyDB::Recno, -Filename => $Dfile,
-                                      	        -Flags    => DB_CREATE ;
-
-    # create some data
-    my @data =  (
-		"",
-		"boat",
-		"house",
-		"sea",
-		) ;
-
-    my $ret = 0 ;
-    my $i ;
-    for ($i = 1 ; $i < @data ; ++$i) {
-        $ret += $db->db_put($i, $data[$i]) ;
-    }
-    ok 102, $ret == 0 ;
-
-
-    # do a partial get
-    my ($pon, $off, $len) = $db->partial_set(0,2) ;
-    ok 103, ! $pon && $off == 0 && $len == 0 ;
-    ok 104, $db->db_get(1, $value) == 0 && $value eq "bo" ;
-    ok 105, $db->db_get(2, $value) == 0 && $value eq "ho" ;
-    ok 106, $db->db_get(3, $value) == 0 && $value eq "se" ;
-
-    # do a partial get, off end of data
-    ($pon, $off, $len) = $db->partial_set(3,2) ;
-    ok 107, $pon ;
-    ok 108, $off == 0 ;
-    ok 109, $len == 2 ;
-    ok 110, $db->db_get(1, $value) == 0 && $value eq "t" ;
-    ok 111, $db->db_get(2, $value) == 0 && $value eq "se" ;
-    ok 112, $db->db_get(3, $value) == 0 && $value eq "" ;
-
-    # switch of partial mode
-    ($pon, $off, $len) = $db->partial_clear() ;
-    ok 113, $pon ;
-    ok 114, $off == 3 ;
-    ok 115, $len == 2 ;
-    ok 116, $db->db_get(1, $value) == 0 && $value eq "boat" ;
-    ok 117, $db->db_get(2, $value) == 0 && $value eq "house" ;
-    ok 118, $db->db_get(3, $value) == 0 && $value eq "sea" ;
-
-    # now partial put
-    $db->partial_set(0,2) ;
-    ok 119, $db->db_put(1, "") == 0 ;
-    ok 120, $db->db_put(2, "AB") == 0 ;
-    ok 121, $db->db_put(3, "XYZ") == 0 ;
-    ok 122, $db->db_put(4, "KLM") == 0 ;
-
-    ($pon, $off, $len) = $db->partial_clear() ;
-    ok 123, $pon ;
-    ok 124, $off == 0 ;
-    ok 125, $len == 2 ;
-    ok 126, $db->db_get(1, $value) == 0 && $value eq "at" ;
-    ok 127, $db->db_get(2, $value) == 0 && $value eq "ABuse" ;
-    ok 128, $db->db_get(3, $value) == 0 && $value eq "XYZa" ;
-    ok 129, $db->db_get(4, $value) == 0 && $value eq "KLM" ;
-
-    # now partial put
-    ($pon, $off, $len) = $db->partial_set(3,2) ;
-    ok 130, ! $pon ;
-    ok 131, $off == 0 ;
-    ok 132, $len == 0 ;
-    ok 133, $db->db_put(1, "PPP") == 0 ;
-    ok 134, $db->db_put(2, "Q") == 0 ;
-    ok 135, $db->db_put(3, "XYZ") == 0 ;
-    ok 136, $db->db_put(4, "TU") == 0 ;
-
-    $db->partial_clear() ;
-    ok 137, $db->db_get(1, $value) == 0 && $value eq "at\0PPP" ;
-    ok 138, $db->db_get(2, $value) == 0 && $value eq "ABuQ" ;
-    ok 139, $db->db_get(3, $value) == 0 && $value eq "XYZXYZ" ;
-    ok 140, $db->db_get(4, $value) == 0 && $value eq "KLMTU" ;
-}
-
-{
-    # partial
-    # check works via tied array 
-
-    my $lex = new LexFile $Dfile ;
-    my @array ;
-    my $value ;
-    ok 141, my $db = tie @array, 'BerkeleyDB::Recno', -Filename => $Dfile,
-                                      	        -Flags    => DB_CREATE ;
-
-    # create some data
-    my @data =  (
-		"",
-		"boat",
-		"house",
-		"sea",
-		) ;
-
-    my $i ;
-    for ($i = 1 ; $i < @data ; ++$i) {
-	$array[$i] = $data[$i] ;
-    }
-
-
-    # do a partial get
-    $db->partial_set(0,2) ;
-    ok 142, $array[1] eq "bo" ;
-    ok 143, $array[2] eq "ho" ;
-    ok 144, $array[3]  eq "se" ;
-
-    # do a partial get, off end of data
-    $db->partial_set(3,2) ;
-    ok 145, $array[1] eq "t" ;
-    ok 146, $array[2] eq "se" ;
-    ok 147, $array[3] eq "" ;
-
-    # switch of partial mode
-    $db->partial_clear() ;
-    ok 148, $array[1] eq "boat" ;
-    ok 149, $array[2] eq "house" ;
-    ok 150, $array[3] eq "sea" ;
-
-    # now partial put
-    $db->partial_set(0,2) ;
-    ok 151, $array[1] = "" ;
-    ok 152, $array[2] = "AB" ;
-    ok 153, $array[3] = "XYZ" ;
-    ok 154, $array[4] = "KLM" ;
-
-    $db->partial_clear() ;
-    ok 155, $array[1] eq "at" ;
-    ok 156, $array[2] eq "ABuse" ;
-    ok 157, $array[3] eq "XYZa" ;
-    ok 158, $array[4] eq "KLM" ;
-
-    # now partial put
-    $db->partial_set(3,2) ;
-    ok 159, $array[1] = "PPP" ;
-    ok 160, $array[2] = "Q" ;
-    ok 161, $array[3] = "XYZ" ;
-    ok 162, $array[4] = "TU" ;
-
-    $db->partial_clear() ;
-    ok 163, $array[1] eq "at\0PPP" ;
-    ok 164, $array[2] eq "ABuQ" ;
-    ok 165, $array[3] eq "XYZXYZ" ;
-    ok 166, $array[4] eq "KLMTU" ;
-}
-
-{
-    # transaction
-
-    my $lex = new LexFile $Dfile ;
-    my @array ;
-    my $value ;
-
-    my $home = "./fred" ;
-    ok 167, my $lexD = new LexDir($home);
-    ok 168, my $env = new BerkeleyDB::Env -Home => $home,@StdErrFile,
-				     -Flags => DB_CREATE|DB_INIT_TXN|
-					  	DB_INIT_MPOOL|DB_INIT_LOCK ;
-    ok 169, my $txn = $env->txn_begin() ;
-    ok 170, my $db1 = tie @array, 'BerkeleyDB::Recno', 
-				-Filename => $Dfile,
-				-ArrayBase => 0,
-                      		-Flags    =>  DB_CREATE ,
-		        	-Env 	  => $env,
-		        	-Txn	  => $txn ;
-
-    
-    ok 171, $txn->txn_commit() == 0 ;
-    ok 172, $txn = $env->txn_begin() ;
-    $db1->Txn($txn);
-
-    # create some data
-    my @data =  (
-		"boat",
-		"house",
-		"sea",
-		) ;
-
-    my $ret = 0 ;
-    my $i ;
-    for ($i = 0 ; $i < @data ; ++$i) {
-        $ret += $db1->db_put($i, $data[$i]) ;
-    }
-    ok 173, $ret == 0 ;
-
-    # should be able to see all the records
-
-    ok 174, my $cursor = $db1->db_cursor() ;
-    my ($k, $v) = (0, "") ;
-    my $count = 0 ;
-    # sequence forwards
-    while ($cursor->c_get($k, $v, DB_NEXT) == 0) {
-        ++ $count ;
-    }
-    ok 175, $count == 3 ;
-    undef $cursor ;
-
-    # now abort the transaction
-    ok 176, $txn->txn_abort() == 0 ;
-
-    # there shouldn't be any records in the database
-    $count = 0 ;
-    # sequence forwards
-    ok 177, $cursor = $db1->db_cursor() ;
-    while ($cursor->c_get($k, $v, DB_NEXT) == 0) {
-        ++ $count ;
-    }
-    ok 178, $count == 0 ;
-
-    undef $txn ;
-    undef $cursor ;
-    undef $db1 ;
-    undef $env ;
-    untie @array ;
-}
-
-
-{
-    # db_stat
-
-    my $lex = new LexFile $Dfile ;
-    my $recs = ($BerkeleyDB::db_version >= 3.1 ? "bt_ndata" : "bt_nrecs") ;
-    my @array ;
-    my ($k, $v) ;
-    ok 179, my $db = new BerkeleyDB::Recno -Filename 	=> $Dfile, 
-				     	   -Flags    	=> DB_CREATE,
-					   -Pagesize	=> 4 * 1024,
-					;
-
-    my $ref = $db->db_stat() ; 
-    ok 180, $ref->{$recs} == 0;
-    ok 181, $ref->{'bt_pagesize'} == 4 * 1024;
-
-    # create some data
-    my @data =  (
-		2,
-		"house",
-		"sea",
-		) ;
-
-    my $ret = 0 ;
-    my $i ;
-    for ($i = $db->ArrayOffset ; @data ; ++$i) {
-        $ret += $db->db_put($i, shift @data) ;
-    }
-    ok 182, $ret == 0 ;
-
-    $ref = $db->db_stat() ; 
-    ok 183, $ref->{$recs} == 3;
-}
-
-{
-   # sub-class test
-
-   package Another ;
-
-   use strict ;
-
-   open(FILE, ">SubDB.pm") or die "Cannot open SubDB.pm: $!\n" ;
-   print FILE <<'EOM' ;
-
-   package SubDB ;
-
-   use strict ;
-   use vars qw( @ISA @EXPORT) ;
-
-   require Exporter ;
-   use BerkeleyDB;
-   @ISA=qw(BerkeleyDB BerkeleyDB::Recno);
-   @EXPORT = @BerkeleyDB::EXPORT ;
-
-   sub db_put { 
-	my $self = shift ;
-        my $key = shift ;
-        my $value = shift ;
-        $self->SUPER::db_put($key, $value * 3) ;
-   }
-
-   sub db_get { 
-	my $self = shift ;
-        $self->SUPER::db_get($_[0], $_[1]) ;
-	$_[1] -= 2 ;
-   }
-
-   sub A_new_method
-   {
-	my $self = shift ;
-        my $key = shift ;
-        my $value = $self->FETCH($key) ;
-	return "[[$value]]" ;
-   }
-
-   1 ;
-EOM
-
-    close FILE ;
-
-    BEGIN { push @INC, '.'; }    
-    eval 'use SubDB ; ';
-    main::ok 184, $@ eq "" ;
-    my @h ;
-    my $X ;
-    eval '
-	$X = tie(@h, "SubDB", -Filename => "dbrecno.tmp", 
-			-Flags => DB_CREATE,
-			-Mode => 0640 );
-	' ;
-
-    main::ok 185, $@ eq "" ;
-
-    my $ret = eval '$h[1] = 3 ; return $h[1] ' ;
-    main::ok 186, $@ eq "" ;
-    main::ok 187, $ret == 7 ;
-
-    my $value = 0;
-    $ret = eval '$X->db_put(1, 4) ; $X->db_get(1, $value) ; return $value' ;
-    main::ok 188, $@ eq "" ;
-    main::ok 189, $ret == 10 ;
-
-    $ret = eval ' DB_NEXT eq main::DB_NEXT ' ;
-    main::ok 190, $@ eq ""  ;
-    main::ok 191, $ret == 1 ;
-
-    $ret = eval '$X->A_new_method(1) ' ;
-    main::ok 192, $@ eq "" ;
-    main::ok 193, $ret eq "[[10]]" ;
-
-    undef $X;
-    untie @h;
-    unlink "SubDB.pm", "dbrecno.tmp" ;
-
-}
-
-{
-    # variable length records, DB_DELIMETER -- defaults to \n
-
-    my $lex = new LexFile $Dfile, $Dfile2 ;
-    touch $Dfile2 ;
-    my @array ;
-    my $value ;
-    ok 194, tie @array, 'BerkeleyDB::Recno', -Filename  => $Dfile,
-						-ArrayBase => 0,
-                                      	       	-Flags  => DB_CREATE ,
-						-Source	=> $Dfile2 ;
-    $array[0] = "abc" ;
-    $array[1] = "def" ;
-    $array[3] = "ghi" ;
-    untie @array ;
-
-    my $x = docat($Dfile2) ;
-    ok 195, $x eq "abc\ndef\n\nghi\n" ;
-}
-
-{
-    # variable length records, change DB_DELIMETER
-
-    my $lex = new LexFile $Dfile, $Dfile2 ;
-    touch $Dfile2 ;
-    my @array ;
-    my $value ;
-    ok 196, tie @array, 'BerkeleyDB::Recno', -Filename  => $Dfile,
-						-ArrayBase => 0,
-                                      	       	-Flags  => DB_CREATE ,
-						-Source	=> $Dfile2 ,
-						-Delim	=> "-";
-    $array[0] = "abc" ;
-    $array[1] = "def" ;
-    $array[3] = "ghi" ;
-    untie @array ;
-
-    my $x = docat($Dfile2) ;
-    ok 197, $x eq "abc-def--ghi-";
-}
-
-{
-    # fixed length records, default DB_PAD
-
-    my $lex = new LexFile $Dfile, $Dfile2 ;
-    touch $Dfile2 ;
-    my @array ;
-    my $value ;
-    ok 198, tie @array, 'BerkeleyDB::Recno', -Filename  => $Dfile,
-						-ArrayBase => 0,
-                                      	       	-Flags  => DB_CREATE ,
-						-Len 	=> 5,
-						-Source	=> $Dfile2 ;
-    $array[0] = "abc" ;
-    $array[1] = "def" ;
-    $array[3] = "ghi" ;
-    untie @array ;
-
-    my $x = docat($Dfile2) ;
-    ok 199, $x eq "abc  def       ghi  " ;
-}
-
-{
-    # fixed length records, change Pad
-
-    my $lex = new LexFile $Dfile, $Dfile2 ;
-    touch $Dfile2 ;
-    my @array ;
-    my $value ;
-    ok 200, tie @array, 'BerkeleyDB::Recno', -Filename  => $Dfile,
-						-ArrayBase => 0,
-                                      	       	-Flags  => DB_CREATE ,
-						-Len	=> 5,
-						-Pad	=> "-",
-						-Source	=> $Dfile2 ;
-    $array[0] = "abc" ;
-    $array[1] = "def" ;
-    $array[3] = "ghi" ;
-    untie @array ;
-
-    my $x = docat($Dfile2) ;
-    ok 201, $x eq "abc--def-------ghi--" ;
-}
-
-{
-    # DB_RENUMBER
-
-    my $lex = new LexFile $Dfile;
-    my @array ;
-    my $value ;
-    ok 202, my $db = tie @array, 'BerkeleyDB::Recno', -Filename  => $Dfile,
-				    	    	-Property => DB_RENUMBER,
-						-ArrayBase => 0,
-                                      	       	-Flags  => DB_CREATE ;
-    # create a few records
-    $array[0] = "abc" ;
-    $array[1] = "def" ;
-    $array[3] = "ghi" ;
-
-    ok 203, my ($length, $joined) = joiner($db, "|") ;
-    ok 204, $length == 3 ;
-    ok 205, $joined eq "abc|def|ghi";
-
-    ok 206, $db->db_del(1) == 0 ;
-    ok 207, ($length, $joined) = joiner($db, "|") ;
-    ok 208, $length == 2 ;
-    ok 209, $joined eq "abc|ghi";
-
-    undef $db ;
-    untie @array ;
-
-}
-
-{
-    # DB_APPEND
-
-    my $lex = new LexFile $Dfile;
-    my @array ;
-    my $value ;
-    ok 210, my $db = tie @array, 'BerkeleyDB::Recno', 
-					-Filename  => $Dfile,
-                                       	-Flags     => DB_CREATE ;
-
-    # create a few records
-    $array[1] = "def" ;
-    $array[3] = "ghi" ;
-
-    my $k = 0 ;
-    ok 211, $db->db_put($k, "fred", DB_APPEND) == 0 ;
-    ok 212, $k == 4 ;
-
-    undef $db ;
-    untie @array ;
-}
-
-{
-    # in-memory Btree with an associated text file
-
-    my $lex = new LexFile $Dfile2 ;
-    touch $Dfile2 ;
-    my @array ;
-    my $value ;
-    ok 213, tie @array, 'BerkeleyDB::Recno',    -Source => $Dfile2 ,
-						-ArrayBase => 0,
-				    	    	-Property => DB_RENUMBER,
-                                      	       	-Flags  => DB_CREATE ;
-    $array[0] = "abc" ;
-    $array[1] = "def" ;
-    $array[3] = "ghi" ;
-    untie @array ;
-
-    my $x = docat($Dfile2) ;
-    ok 214, $x eq "abc\ndef\n\nghi\n" ;
-}
-
-{
-    # in-memory, variable length records, change DB_DELIMETER
-
-    my $lex = new LexFile $Dfile, $Dfile2 ;
-    touch $Dfile2 ;
-    my @array ;
-    my $value ;
-    ok 215, tie @array, 'BerkeleyDB::Recno', 
-						-ArrayBase => 0,
-                                      	       	-Flags  => DB_CREATE ,
-						-Source	=> $Dfile2 ,
-				    	    	-Property => DB_RENUMBER,
-						-Delim	=> "-";
-    $array[0] = "abc" ;
-    $array[1] = "def" ;
-    $array[3] = "ghi" ;
-    untie @array ;
-
-    my $x = docat($Dfile2) ;
-    ok 216, $x eq "abc-def--ghi-";
-}
-
-{
-    # in-memory, fixed length records, default DB_PAD
-
-    my $lex = new LexFile $Dfile, $Dfile2 ;
-    touch $Dfile2 ;
-    my @array ;
-    my $value ;
-    ok 217, tie @array, 'BerkeleyDB::Recno', 	-ArrayBase => 0,
-                                      	       	-Flags  => DB_CREATE ,
-				    	    	-Property => DB_RENUMBER,
-						-Len 	=> 5,
-						-Source	=> $Dfile2 ;
-    $array[0] = "abc" ;
-    $array[1] = "def" ;
-    $array[3] = "ghi" ;
-    untie @array ;
-
-    my $x = docat($Dfile2) ;
-    ok 218, $x eq "abc  def       ghi  " ;
-}
-
-{
-    # in-memory, fixed length records, change Pad
-
-    my $lex = new LexFile $Dfile, $Dfile2 ;
-    touch $Dfile2 ;
-    my @array ;
-    my $value ;
-    ok 219, tie @array, 'BerkeleyDB::Recno', 
-						-ArrayBase => 0,
-                                      	       	-Flags  => DB_CREATE ,
-				    	    	-Property => DB_RENUMBER,
-						-Len	=> 5,
-						-Pad	=> "-",
-						-Source	=> $Dfile2 ;
-    $array[0] = "abc" ;
-    $array[1] = "def" ;
-    $array[3] = "ghi" ;
-    untie @array ;
-
-    my $x = docat($Dfile2) ;
-    ok 220, $x eq "abc--def-------ghi--" ;
-}
-
-{
-    # 23 Sept 2001 -- push into an empty array
-    my $lex = new LexFile $Dfile ;
-    my @array ;
-    my $db ;
-    ok 221, $db = tie @array, 'BerkeleyDB::Recno', 
-						-ArrayBase => 0,
-                                      	       	-Flags  => DB_CREATE ,
-				    	    	-Property => DB_RENUMBER,
-						-Filename => $Dfile ;
-    $FA ? push @array, "first"
-        : $db->push("first") ;
-
-    ok 222, $array[0] eq "first" ;
-    ok 223, $FA ? pop @array : $db->pop() eq "first" ;
-
-    undef $db;
-    untie @array ;
-
-}
-
-{
-    # 23 Sept 2001 -- unshift into an empty array
-    my $lex = new LexFile $Dfile ;
-    my @array ;
-    my $db ;
-    ok 224, $db = tie @array, 'BerkeleyDB::Recno', 
-						-ArrayBase => 0,
-                                      	       	-Flags  => DB_CREATE ,
-				    	    	-Property => DB_RENUMBER,
-						-Filename => $Dfile ;
-    $FA ? unshift @array, "first"
-        : $db->unshift("first") ;
-
-    ok 225, $array[0] eq "first" ;
-    ok 226, ($FA ? shift @array : $db->shift()) eq "first" ;
-
-    undef $db;
-    untie @array ;
-
-}
-__END__
-
-
-# TODO
-#
-# DB_DELIMETER DB_FIXEDLEN DB_PAD DB_SNAPSHOT with partial records
diff --git a/storage/bdb/perl/BerkeleyDB/t/strict.t b/storage/bdb/perl/BerkeleyDB/t/strict.t
deleted file mode 100644
index 4774cd15dad..00000000000
--- a/storage/bdb/perl/BerkeleyDB/t/strict.t
+++ /dev/null
@@ -1,177 +0,0 @@
-#!./perl -w
-
-use strict ;
-
-BEGIN {
-    unless(grep /blib/, @INC) {
-        chdir 't' if -d 't';
-        @INC = '../lib' if -d '../lib';
-    }
-}
-
-use BerkeleyDB; 
-use t::util ;
-
-print "1..44\n";
-
-my $Dfile = "dbhash.tmp";
-my $home = "./fred" ;
-
-umask(0);
-
-{
-    # closing a database & an environment in the correct order.
-    my $lex = new LexFile $Dfile ;
-    my %hash ;
-    my $status ;
-
-    ok 1, my $lexD = new LexDir($home);
-    ok 2, my $env = new BerkeleyDB::Env -Home => $home,@StdErrFile,
-                                     -Flags => DB_CREATE|DB_INIT_TXN|
-                                                DB_INIT_MPOOL|DB_INIT_LOCK ;
-					  	
-    ok 3, my $db1 = tie %hash, 'BerkeleyDB::Hash', -Filename => $Dfile,
-                                      	       	-Flags     => DB_CREATE ,
-					       	-Env 	   => $env;
-
-    ok 4, $db1->db_close() == 0 ; 
-
-    eval { $status = $env->db_appexit() ; } ;
-    ok 5, $status == 0 ;
-    ok 6, $@ eq "" ;
-    #print "[$@]\n" ;
-
-}
-
-{
-    # closing an environment with an open database
-    my $lex = new LexFile $Dfile ;
-    my %hash ;
-
-    ok 7, my $lexD = new LexDir($home);
-    ok 8, my $env = new BerkeleyDB::Env -Home => $home,@StdErrFile,
-                                     -Flags => DB_CREATE|DB_INIT_TXN|
-                                                DB_INIT_MPOOL|DB_INIT_LOCK ;
-					  	
-    ok 9, my $db1 = tie %hash, 'BerkeleyDB::Hash', -Filename => $Dfile,
-                                      	       	-Flags     => DB_CREATE ,
-					       	-Env 	   => $env;
-
-    eval { $env->db_appexit() ; } ;
-    ok 10, $@ =~ /BerkeleyDB Aborting: attempted to close an environment with 1 open database/ ;
-    #print "[$@]\n" ;
-
-    undef $db1 ;
-    untie %hash ;
-    undef $env ;
-}
-
-{
-    # closing a transaction & a database 
-    my $lex = new LexFile $Dfile ;
-    my %hash ;
-    my $status ;
-
-    ok 11, my $lexD = new LexDir($home);
-    ok 12, my $env = new BerkeleyDB::Env -Home => $home,@StdErrFile,
-                                     -Flags => DB_CREATE|DB_INIT_TXN|
-                                                DB_INIT_MPOOL|DB_INIT_LOCK ;
-
-    ok 13, my $txn = $env->txn_begin() ;
-    ok 14, my $db = tie %hash, 'BerkeleyDB::Hash', -Filename => $Dfile,
-                                                -Flags     => DB_CREATE ,
-					       	-Env 	   => $env,
-                                                -Txn       => $txn  ;
-
-    ok 15, $txn->txn_commit()  == 0 ;
-    eval { $status = $db->db_close() ; } ;
-    ok 16, $status == 0 ;
-    ok 17, $@ eq "" ;
-    #print "[$@]\n" ;
-    eval { $status = $env->db_appexit() ; } ;
-    ok 18, $status == 0 ;
-    ok 19, $@ eq "" ;
-    #print "[$@]\n" ;
-}
-
-{
-    # closing a database with an open transaction
-    my $lex = new LexFile $Dfile ;
-    my %hash ;
-
-    ok 20, my $lexD = new LexDir($home);
-    ok 21, my $env = new BerkeleyDB::Env -Home => $home,@StdErrFile,
-                                     -Flags => DB_CREATE|DB_INIT_TXN|
-                                                DB_INIT_MPOOL|DB_INIT_LOCK ;
-
-    ok 22, my $txn = $env->txn_begin() ;
-    ok 23, my $db = tie %hash, 'BerkeleyDB::Hash', -Filename => $Dfile,
-                                                -Flags     => DB_CREATE ,
-					       	-Env 	   => $env,
-                                                -Txn       => $txn  ;
-
-    eval { $db->db_close() ; } ;
-    ok 24, $@ =~ /BerkeleyDB Aborting: attempted to close a database while a transaction was still open at/ ;
-    #print "[$@]\n" ;
-    $txn->txn_abort();
-    $db->db_close();
-}
-
-{
-    # closing a cursor & a database 
-    my $lex = new LexFile $Dfile ;
-    my %hash ;
-    my $status ;
-    ok 25, my $db = tie %hash, 'BerkeleyDB::Hash', -Filename => $Dfile,
-                                                -Flags     => DB_CREATE ;
-    ok 26, my $cursor = $db->db_cursor() ;
-    ok 27, $cursor->c_close() == 0 ;
-    eval { $status = $db->db_close() ; } ;
-    ok 28, $status == 0 ;
-    ok 29, $@ eq "" ;
-    #print "[$@]\n" ;
-}
-
-{
-    # closing a database with an open cursor
-    my $lex = new LexFile $Dfile ;
-    my %hash ;
-    ok 30, my $db = tie %hash, 'BerkeleyDB::Hash', -Filename => $Dfile,
-                                                -Flags     => DB_CREATE ;
-    ok 31, my $cursor = $db->db_cursor() ;
-    eval { $db->db_close() ; } ;
-    ok 32, $@ =~ /\QBerkeleyDB Aborting: attempted to close a database with 1 open cursor(s) at/;
-    #print "[$@]\n" ;
-}
-
-{
-    # closing a transaction & a cursor 
-    my $lex = new LexFile $Dfile ;
-    my %hash ;
-    my $status ;
-    my $home = 'fred1';
-
-    ok 33, my $lexD = new LexDir($home);
-    ok 34, my $env = new BerkeleyDB::Env -Home => $home,@StdErrFile,
-                                     -Flags => DB_CREATE|DB_INIT_TXN|
-                                                DB_INIT_MPOOL|DB_INIT_LOCK ;
-    ok 35, my $txn = $env->txn_begin() ;
-    ok 36, my $db = tie %hash, 'BerkeleyDB::Hash', -Filename => $Dfile,
-                                                -Flags     => DB_CREATE ,
-					       	-Env 	   => $env,
-                                                -Txn       => $txn  ;
-    ok 37, my $cursor = $db->db_cursor() ;
-    eval { $status = $cursor->c_close() ; } ;
-    ok 38, $status == 0 ;
-    ok 39, ($status = $txn->txn_commit())  == 0 ;
-    ok 40, $@ eq "" ;
-    eval { $status = $db->db_close() ; } ;
-    ok 41, $status == 0 ;
-    ok 42, $@ eq "" ;
-    #print "[$@]\n" ;
-    eval { $status = $env->db_appexit() ; } ;
-    ok 43, $status == 0 ;
-    ok 44, $@ eq "" ;
-    #print "[$@]\n" ;
-}
-
diff --git a/storage/bdb/perl/BerkeleyDB/t/subdb.t b/storage/bdb/perl/BerkeleyDB/t/subdb.t
deleted file mode 100644
index 4e56332eba3..00000000000
--- a/storage/bdb/perl/BerkeleyDB/t/subdb.t
+++ /dev/null
@@ -1,243 +0,0 @@
-#!./perl -w
-
-use strict ;
-
-BEGIN {
-    unless(grep /blib/, @INC) {
-        chdir 't' if -d 't';
-        @INC = '../lib' if -d '../lib';
-    }
-}
-
-use BerkeleyDB; 
-use t::util ;
-
-BEGIN 
-{
-    if ($BerkeleyDB::db_version < 3) {
-	print "1..0 # Skip: this needs Berkeley DB 3.x or better\n" ;
-	exit 0 ;
-    }
-}
-
-print "1..43\n";
-
-my $Dfile = "dbhash.tmp";
-my $Dfile2 = "dbhash2.tmp";
-my $Dfile3 = "dbhash3.tmp";
-unlink $Dfile;
-
-umask(0) ;
-
-# Berkeley DB 3.x specific functionality
-
-# Check for invalid parameters
-{
-    # Check for invalid parameters
-    my $db ;
-    eval ' BerkeleyDB::db_remove  -Stupid => 3 ; ' ;
-    ok 1, $@ =~ /unknown key value\(s\) Stupid/  ;
-
-    eval ' BerkeleyDB::db_remove -Bad => 2, -Filename => "fred", -Stupid => 3; ' ;
-    ok 2, $@ =~ /unknown key value\(s\) (Bad |Stupid ){2}/  ;
-
-    eval ' BerkeleyDB::db_remove -Filename => "a", -Env => 2 ' ;
-    ok 3, $@ =~ /^Env not of type BerkeleyDB::Env/ ;
-
-    eval ' BerkeleyDB::db_remove -Subname => "a"' ;
-    ok 4, $@ =~ /^Must specify a filename/ ;
-
-    my $obj = bless [], "main" ;
-    eval ' BerkeleyDB::db_remove -Filename => "x", -Env => $obj ' ;
-    ok 5, $@ =~ /^Env not of type BerkeleyDB::Env/ ;
-}
-
-{
-    # subdatabases
-
-    # opening a subdatabse in an exsiting database that doesn't have
-    # subdatabases at all should fail
-
-    my $lex = new LexFile $Dfile ;
-
-    ok 6, my $db = new BerkeleyDB::Hash -Filename => $Dfile, 
-				        -Flags    => DB_CREATE ;
-
-    # Add a k/v pair
-    my %data = qw(
-    			red	sky
-			blue	sea
-			black	heart
-			yellow	belley
-			green	grass
-    		) ;
-
-    ok 7, addData($db, %data) ;
-
-    undef $db ;
-
-    $db = new BerkeleyDB::Hash -Filename => $Dfile, 
-			       -Subname  => "fred" ;
-    ok 8, ! $db ;				    
-
-    ok 9, -e $Dfile ;
-    ok 10, ! BerkeleyDB::db_remove(-Filename => $Dfile)  ;
-}
-
-{
-    # subdatabases
-
-    # opening a subdatabse in an exsiting database that does have
-    # subdatabases at all, but not this one
-
-    my $lex = new LexFile $Dfile ;
-
-    ok 11, my $db = new BerkeleyDB::Hash -Filename => $Dfile, 
-				         -Subname  => "fred" ,
-				         -Flags    => DB_CREATE ;
-
-    # Add a k/v pair
-    my %data = qw(
-    			red	sky
-			blue	sea
-			black	heart
-			yellow	belley
-			green	grass
-    		) ;
-
-    ok 12, addData($db, %data) ;
-
-    undef $db ;
-
-    $db = new BerkeleyDB::Hash -Filename => $Dfile, 
-				    -Subname  => "joe" ;
-
-    ok 13, !$db ;				    
-
-}
-
-{
-    # subdatabases
-
-    my $lex = new LexFile $Dfile ;
-
-    ok 14, my $db = new BerkeleyDB::Hash -Filename => $Dfile, 
-				        -Subname  => "fred" ,
-				        -Flags    => DB_CREATE ;
-
-    # Add a k/v pair
-    my %data = qw(
-    			red	sky
-			blue	sea
-			black	heart
-			yellow	belley
-			green	grass
-    		) ;
-
-    ok 15, addData($db, %data) ;
-
-    undef $db ;
-
-    ok 16, $db = new BerkeleyDB::Hash -Filename => $Dfile, 
-				    -Subname  => "fred" ;
-
-    ok 17, my $cursor = $db->db_cursor() ;
-    my ($k, $v) = ("", "") ;
-    my $status ;
-    while (($status = $cursor->c_get($k, $v, DB_NEXT)) == 0) {
-        if ($data{$k} eq $v) {
-	    delete $data{$k} ;
-	}
-    }
-    ok 18, $status == DB_NOTFOUND ;
-    ok 19, keys %data == 0 ;
-}
-
-{
-    # subdatabases
-
-    # opening a database with multiple subdatabases - handle should be a list
-    # of the subdatabase names
-
-    my $lex = new LexFile $Dfile ;
-  
-    ok 20, my $db1 = new BerkeleyDB::Hash -Filename => $Dfile, 
-				        -Subname  => "fred" ,
-				        -Flags    => DB_CREATE ;
-
-    ok 21, my $db2 = new BerkeleyDB::Btree -Filename => $Dfile, 
-				        -Subname  => "joe" ,
-				        -Flags    => DB_CREATE ;
-
-    # Add a k/v pair
-    my %data = qw(
-    			red	sky
-			blue	sea
-			black	heart
-			yellow	belley
-			green	grass
-    		) ;
-
-    ok 22, addData($db1, %data) ;
-    ok 23, addData($db2, %data) ;
-
-    undef $db1 ;
-    undef $db2 ;
-  
-    ok 24, my $db = new BerkeleyDB::Unknown -Filename => $Dfile ,
-				         -Flags    => DB_RDONLY ;
-
-    #my $type = $db->type() ; print "type $type\n" ;
-    ok 25, my $cursor = $db->db_cursor() ;
-    my ($k, $v) = ("", "") ;
-    my $status ;
-    my @dbnames = () ;
-    while (($status = $cursor->c_get($k, $v, DB_NEXT)) == 0) {
-        push @dbnames, $k ;
-    }
-    ok 26, $status == DB_NOTFOUND ;
-    ok 27, join(",", sort @dbnames) eq "fred,joe" ;
-    undef $db ;
-
-    ok 28, BerkeleyDB::db_remove(-Filename => $Dfile, -Subname => "harry") != 0;
-    ok 29, BerkeleyDB::db_remove(-Filename => $Dfile, -Subname => "fred") == 0 ;
-    
-    # should only be one subdatabase
-    ok 30, $db = new BerkeleyDB::Unknown -Filename => $Dfile ,
-				         -Flags    => DB_RDONLY ;
-
-    ok 31, $cursor = $db->db_cursor() ;
-    @dbnames = () ;
-    while (($status = $cursor->c_get($k, $v, DB_NEXT)) == 0) {
-        push @dbnames, $k ;
-    }
-    ok 32, $status == DB_NOTFOUND ;
-    ok 33, join(",", sort @dbnames) eq "joe" ;
-    undef $db ;
-
-    # can't delete an already deleted subdatabase
-    ok 34, BerkeleyDB::db_remove(-Filename => $Dfile, -Subname => "fred") != 0;
-    
-    ok 35, BerkeleyDB::db_remove(-Filename => $Dfile, -Subname => "joe") == 0 ;
-    
-    # should only be one subdatabase
-    ok 36, $db = new BerkeleyDB::Unknown -Filename => $Dfile ,
-				         -Flags    => DB_RDONLY ;
-
-    ok 37, $cursor = $db->db_cursor() ;
-    @dbnames = () ;
-    while (($status = $cursor->c_get($k, $v, DB_NEXT)) == 0) {
-        push @dbnames, $k ;
-    }
-    ok 38, $status == DB_NOTFOUND ;
-    ok 39, @dbnames == 0 ;
-    undef $db ;
-    undef $cursor ;
-
-    ok 40, -e $Dfile ;
-    ok 41, BerkeleyDB::db_remove(-Filename => $Dfile)  == 0 ;
-    ok 42, ! -e $Dfile ;
-    ok 43, BerkeleyDB::db_remove(-Filename => $Dfile) != 0 ;
-}
-
-# db_remove with env
diff --git a/storage/bdb/perl/BerkeleyDB/t/txn.t b/storage/bdb/perl/BerkeleyDB/t/txn.t
deleted file mode 100644
index f8fa2ceb897..00000000000
--- a/storage/bdb/perl/BerkeleyDB/t/txn.t
+++ /dev/null
@@ -1,320 +0,0 @@
-#!./perl -w
-
-use strict ;
-
-BEGIN {
-    unless(grep /blib/, @INC) {
-        chdir 't' if -d 't';
-        @INC = '../lib' if -d '../lib';
-    }
-}
-
-use BerkeleyDB; 
-use t::util ;
-
-print "1..58\n";
-
-my $Dfile = "dbhash.tmp";
-
-umask(0);
-
-{
-    # error cases
-
-    my $lex = new LexFile $Dfile ;
-    my %hash ;
-    my $value ;
-
-    my $home = "./fred" ;
-    ok 1, my $lexD = new LexDir($home);
-    ok 2, my $env = new BerkeleyDB::Env -Home => $home, @StdErrFile,
-				     -Flags => DB_CREATE| DB_INIT_MPOOL;
-    eval { $env->txn_begin() ; } ;
-    ok 3, $@ =~ /^BerkeleyDB Aborting: Transaction Manager not enabled at/ ;
-
-    eval { my $txn_mgr = $env->TxnMgr() ; } ;
-    ok 4, $@ =~ /^BerkeleyDB Aborting: Transaction Manager not enabled at/ ;
-    undef $env ;
-
-}
-
-{
-    # transaction - abort works
-
-    my $lex = new LexFile $Dfile ;
-    my %hash ;
-    my $value ;
-
-    my $home = "./fred" ;
-    ok 5, my $lexD = new LexDir($home);
-    ok 6, my $env = new BerkeleyDB::Env -Home => $home, @StdErrFile,
-				     -Flags => DB_CREATE|DB_INIT_TXN|
-					  	DB_INIT_MPOOL|DB_INIT_LOCK ;
-    ok 7, my $txn = $env->txn_begin() ;
-    ok 8, my $db1 = tie %hash, 'BerkeleyDB::Hash', -Filename => $Dfile,
-                                      	       	-Flags     => DB_CREATE ,
-					       	-Env 	   => $env,
-					    	-Txn	   => $txn  ;
-
-    
-    ok 9, $txn->txn_commit() == 0 ;
-    ok 10, $txn = $env->txn_begin() ;
-    $db1->Txn($txn);
-
-    # create some data
-    my %data =  (
-		"red"	=> "boat",
-		"green"	=> "house",
-		"blue"	=> "sea",
-		) ;
-
-    my $ret = 0 ;
-    while (my ($k, $v) = each %data) {
-        $ret += $db1->db_put($k, $v) ;
-    }
-    ok 11, $ret == 0 ;
-
-    # should be able to see all the records
-
-    ok 12, my $cursor = $db1->db_cursor() ;
-    my ($k, $v) = ("", "") ;
-    my $count = 0 ;
-    # sequence forwards
-    while ($cursor->c_get($k, $v, DB_NEXT) == 0) {
-        ++ $count ;
-    }
-    ok 13, $count == 3 ;
-    undef $cursor ;
-
-    # now abort the transaction
-    ok 14, $txn->txn_abort() == 0 ;
-
-    # there shouldn't be any records in the database
-    $count = 0 ;
-    # sequence forwards
-    ok 15, $cursor = $db1->db_cursor() ;
-    while ($cursor->c_get($k, $v, DB_NEXT) == 0) {
-        ++ $count ;
-    }
-    ok 16, $count == 0 ;
-
-    my $stat = $env->txn_stat() ;
-    ok 17, $stat->{'st_naborts'} == 1 ;
-
-    undef $txn ;
-    undef $cursor ;
-    undef $db1 ;
-    undef $env ;
-    untie %hash ;
-}
-
-{
-    # transaction - abort works via txnmgr
-
-    my $lex = new LexFile $Dfile ;
-    my %hash ;
-    my $value ;
-
-    my $home = "./fred" ;
-    ok 18, my $lexD = new LexDir($home);
-    ok 19, my $env = new BerkeleyDB::Env -Home => $home, @StdErrFile,
-				     -Flags => DB_CREATE|DB_INIT_TXN|
-					  	DB_INIT_MPOOL|DB_INIT_LOCK ;
-    ok 20, my $txn_mgr = $env->TxnMgr() ;
-    ok 21, my $txn = $txn_mgr->txn_begin() ;
-    ok 22, my $db1 = tie %hash, 'BerkeleyDB::Hash', -Filename => $Dfile,
-                                      	       	-Flags     => DB_CREATE ,
-					       	-Env 	   => $env,
-					    	-Txn	   => $txn  ;
-
-    ok 23, $txn->txn_commit() == 0 ;
-    ok 24, $txn = $env->txn_begin() ;
-    $db1->Txn($txn);
-    
-    # create some data
-    my %data =  (
-		"red"	=> "boat",
-		"green"	=> "house",
-		"blue"	=> "sea",
-		) ;
-
-    my $ret = 0 ;
-    while (my ($k, $v) = each %data) {
-        $ret += $db1->db_put($k, $v) ;
-    }
-    ok 25, $ret == 0 ;
-
-    # should be able to see all the records
-
-    ok 26, my $cursor = $db1->db_cursor() ;
-    my ($k, $v) = ("", "") ;
-    my $count = 0 ;
-    # sequence forwards
-    while ($cursor->c_get($k, $v, DB_NEXT) == 0) {
-        ++ $count ;
-    }
-    ok 27, $count == 3 ;
-    undef $cursor ;
-
-    # now abort the transaction
-    ok 28, $txn->txn_abort() == 0 ;
-
-    # there shouldn't be any records in the database
-    $count = 0 ;
-    # sequence forwards
-    ok 29, $cursor = $db1->db_cursor() ;
-    while ($cursor->c_get($k, $v, DB_NEXT) == 0) {
-        ++ $count ;
-    }
-    ok 30, $count == 0 ;
-
-    my $stat = $txn_mgr->txn_stat() ;
-    ok 31, $stat->{'st_naborts'} == 1 ;
-
-    undef $txn ;
-    undef $cursor ;
-    undef $db1 ;
-    undef $txn_mgr ;
-    undef $env ;
-    untie %hash ;
-}
-
-{
-    # transaction - commit works
-
-    my $lex = new LexFile $Dfile ;
-    my %hash ;
-    my $value ;
-
-    my $home = "./fred" ;
-    ok 32, my $lexD = new LexDir($home);
-    ok 33, my $env = new BerkeleyDB::Env -Home => $home, @StdErrFile,
-				     -Flags => DB_CREATE|DB_INIT_TXN|
-					  	DB_INIT_MPOOL|DB_INIT_LOCK ;
-    ok 34, my $txn = $env->txn_begin() ;
-    ok 35, my $db1 = tie %hash, 'BerkeleyDB::Hash', -Filename => $Dfile,
-                                      	       	-Flags     => DB_CREATE ,
-					       	-Env 	   => $env,
-					    	-Txn	   => $txn  ;
-
-    
-    ok 36, $txn->txn_commit() == 0 ;
-    ok 37, $txn = $env->txn_begin() ;
-    $db1->Txn($txn);
-
-    # create some data
-    my %data =  (
-		"red"	=> "boat",
-		"green"	=> "house",
-		"blue"	=> "sea",
-		) ;
-
-    my $ret = 0 ;
-    while (my ($k, $v) = each %data) {
-        $ret += $db1->db_put($k, $v) ;
-    }
-    ok 38, $ret == 0 ;
-
-    # should be able to see all the records
-
-    ok 39, my $cursor = $db1->db_cursor() ;
-    my ($k, $v) = ("", "") ;
-    my $count = 0 ;
-    # sequence forwards
-    while ($cursor->c_get($k, $v, DB_NEXT) == 0) {
-        ++ $count ;
-    }
-    ok 40, $count == 3 ;
-    undef $cursor ;
-
-    # now commit the transaction
-    ok 41, $txn->txn_commit() == 0 ;
-
-    $count = 0 ;
-    # sequence forwards
-    ok 42, $cursor = $db1->db_cursor() ;
-    while ($cursor->c_get($k, $v, DB_NEXT) == 0) {
-        ++ $count ;
-    }
-    ok 43, $count == 3 ;
-
-    my $stat = $env->txn_stat() ;
-    ok 44, $stat->{'st_naborts'} == 0 ;
-
-    undef $txn ;
-    undef $cursor ;
-    undef $db1 ;
-    undef $env ;
-    untie %hash ;
-}
-
-{
-    # transaction - commit works via txnmgr
-
-    my $lex = new LexFile $Dfile ;
-    my %hash ;
-    my $value ;
-
-    my $home = "./fred" ;
-    ok 45, my $lexD = new LexDir($home);
-    ok 46, my $env = new BerkeleyDB::Env -Home => $home, @StdErrFile,
-				     -Flags => DB_CREATE|DB_INIT_TXN|
-					  	DB_INIT_MPOOL|DB_INIT_LOCK ;
-    ok 47, my $txn_mgr = $env->TxnMgr() ;
-    ok 48, my $txn = $txn_mgr->txn_begin() ;
-    ok 49, my $db1 = tie %hash, 'BerkeleyDB::Hash', -Filename => $Dfile,
-                                      	       	-Flags     => DB_CREATE ,
-					       	-Env 	   => $env,
-					    	-Txn	   => $txn  ;
-
-    ok 50, $txn->txn_commit() == 0 ;
-    ok 51, $txn = $env->txn_begin() ;
-    $db1->Txn($txn);
-    
-    # create some data
-    my %data =  (
-		"red"	=> "boat",
-		"green"	=> "house",
-		"blue"	=> "sea",
-		) ;
-
-    my $ret = 0 ;
-    while (my ($k, $v) = each %data) {
-        $ret += $db1->db_put($k, $v) ;
-    }
-    ok 52, $ret == 0 ;
-
-    # should be able to see all the records
-
-    ok 53, my $cursor = $db1->db_cursor() ;
-    my ($k, $v) = ("", "") ;
-    my $count = 0 ;
-    # sequence forwards
-    while ($cursor->c_get($k, $v, DB_NEXT) == 0) {
-        ++ $count ;
-    }
-    ok 54, $count == 3 ;
-    undef $cursor ;
-
-    # now commit the transaction
-    ok 55, $txn->txn_commit() == 0 ;
-
-    $count = 0 ;
-    # sequence forwards
-    ok 56, $cursor = $db1->db_cursor() ;
-    while ($cursor->c_get($k, $v, DB_NEXT) == 0) {
-        ++ $count ;
-    }
-    ok 57, $count == 3 ;
-
-    my $stat = $txn_mgr->txn_stat() ;
-    ok 58, $stat->{'st_naborts'} == 0 ;
-
-    undef $txn ;
-    undef $cursor ;
-    undef $db1 ;
-    undef $txn_mgr ;
-    undef $env ;
-    untie %hash ;
-}
-
diff --git a/storage/bdb/perl/BerkeleyDB/t/unknown.t b/storage/bdb/perl/BerkeleyDB/t/unknown.t
deleted file mode 100644
index f2630b585c0..00000000000
--- a/storage/bdb/perl/BerkeleyDB/t/unknown.t
+++ /dev/null
@@ -1,176 +0,0 @@
-#!./perl -w
-
-# ID: %I%, %G%   
-
-use strict ;
-
-BEGIN {
-    unless(grep /blib/, @INC) {
-        chdir 't' if -d 't';
-        @INC = '../lib' if -d '../lib';
-    }
-}
-
-use BerkeleyDB; 
-use t::util ;
-
-print "1..41\n";
-
-my $Dfile = "dbhash.tmp";
-unlink $Dfile;
-
-umask(0) ;
-
-
-# Check for invalid parameters
-{
-    # Check for invalid parameters
-    my $db ;
-    eval ' $db = new BerkeleyDB::Unknown  -Stupid => 3 ; ' ;
-    ok 1, $@ =~ /unknown key value\(s\) Stupid/  ;
-
-    eval ' $db = new BerkeleyDB::Unknown -Bad => 2, -Mode => 0345, -Stupid => 3; ' ;
-    ok 2, $@ =~ /unknown key value\(s\) (Bad |Stupid ){2}/  ;
-
-    eval ' $db = new BerkeleyDB::Unknown -Env => 2 ' ;
-    ok 3, $@ =~ /^Env not of type BerkeleyDB::Env/ ;
-
-    eval ' $db = new BerkeleyDB::Unknown -Txn => "fred" ' ;
-    ok 4, $@ =~ /^Txn not of type BerkeleyDB::Txn/ ;
-
-    my $obj = bless [], "main" ;
-    eval ' $db = new BerkeleyDB::Unknown -Env => $obj ' ;
-    ok 5, $@ =~ /^Env not of type BerkeleyDB::Env/ ;
-}
-
-# check the interface to a rubbish database
-{
-    # first an empty file
-    my $lex = new LexFile $Dfile ;
-    ok 6, writeFile($Dfile, "") ;
-
-    ok 7, ! (new BerkeleyDB::Unknown -Filename => $Dfile); 
-
-    # now a non-database file
-    writeFile($Dfile, "\x2af6") ;
-    ok 8, ! (new BerkeleyDB::Unknown -Filename => $Dfile); 
-}
-
-# check the interface to a Hash database
-
-{
-    my $lex = new LexFile $Dfile ;
-
-    # create a hash database
-    ok 9, my $db = new BerkeleyDB::Hash -Filename => $Dfile, 
-				    -Flags    => DB_CREATE ;
-
-    # Add a few k/v pairs
-    my $value ;
-    my $status ;
-    ok 10, $db->db_put("some key", "some value") == 0  ;
-    ok 11, $db->db_put("key", "value") == 0  ;
-
-    # close the database
-    undef $db ;
-
-    # now open it with Unknown
-    ok 12, $db = new BerkeleyDB::Unknown -Filename => $Dfile; 
-
-    ok 13, $db->type() == DB_HASH ;
-    ok 14, $db->db_get("some key", $value) == 0 ;
-    ok 15, $value eq "some value" ;
-    ok 16, $db->db_get("key", $value) == 0 ;
-    ok 17, $value eq "value" ;
-
-    my @array ;
-    eval { $db->Tie(\@array)} ;
-    ok 18, $@ =~ /^Tie needs a reference to a hash/ ;
-
-    my %hash ;
-    $db->Tie(\%hash) ;
-    ok 19, $hash{"some key"} eq "some value" ;
-
-}
-
-# check the interface to a Btree database
-
-{
-    my $lex = new LexFile $Dfile ;
-
-    # create a hash database
-    ok 20, my $db = new BerkeleyDB::Btree -Filename => $Dfile, 
-				    -Flags    => DB_CREATE ;
-
-    # Add a few k/v pairs
-    my $value ;
-    my $status ;
-    ok 21, $db->db_put("some key", "some value") == 0  ;
-    ok 22, $db->db_put("key", "value") == 0  ;
-
-    # close the database
-    undef $db ;
-
-    # now open it with Unknown
-    # create a hash database
-    ok 23, $db = new BerkeleyDB::Unknown -Filename => $Dfile; 
-
-    ok 24, $db->type() == DB_BTREE ;
-    ok 25, $db->db_get("some key", $value) == 0 ;
-    ok 26, $value eq "some value" ;
-    ok 27, $db->db_get("key", $value) == 0 ;
-    ok 28, $value eq "value" ;
-
-
-    my @array ;
-    eval { $db->Tie(\@array)} ;
-    ok 29, $@ =~ /^Tie needs a reference to a hash/ ;
-
-    my %hash ;
-    $db->Tie(\%hash) ;
-    ok 30, $hash{"some key"} eq "some value" ;
-
-
-}
-
-# check the interface to a Recno database
-
-{
-    my $lex = new LexFile $Dfile ;
-
-    # create a recno database
-    ok 31, my $db = new BerkeleyDB::Recno -Filename => $Dfile, 
-				    -Flags    => DB_CREATE ;
-
-    # Add a few k/v pairs
-    my $value ;
-    my $status ;
-    ok 32, $db->db_put(0, "some value") == 0  ;
-    ok 33, $db->db_put(1, "value") == 0  ;
-
-    # close the database
-    undef $db ;
-
-    # now open it with Unknown
-    # create a hash database
-    ok 34, $db = new BerkeleyDB::Unknown -Filename => $Dfile; 
-
-    ok 35, $db->type() == DB_RECNO ;
-    ok 36, $db->db_get(0, $value) == 0 ;
-    ok 37, $value eq "some value" ;
-    ok 38, $db->db_get(1, $value) == 0 ;
-    ok 39, $value eq "value" ;
-
-
-    my %hash ;
-    eval { $db->Tie(\%hash)} ;
-    ok 40, $@ =~ /^Tie needs a reference to an array/ ;
-
-    my @array ;
-    $db->Tie(\@array) ;
-    ok 41, $array[1] eq "value" ;
-
-
-}
-
-# check i/f to text
diff --git a/storage/bdb/perl/BerkeleyDB/t/util.pm b/storage/bdb/perl/BerkeleyDB/t/util.pm
deleted file mode 100644
index 9f55c40d8c2..00000000000
--- a/storage/bdb/perl/BerkeleyDB/t/util.pm
+++ /dev/null
@@ -1,326 +0,0 @@
-package util ;
-
-package main ;
-
-use strict ;
-use BerkeleyDB ;
-use File::Path qw(rmtree);
-use vars qw(%DB_errors $FA) ;
-
-use vars qw( @StdErrFile );
-
-@StdErrFile = ( -ErrFile => *STDERR, -ErrPrefix => "\n# " ) ;
-
-$| = 1;
-
-%DB_errors = (
-    'DB_INCOMPLETE'	=> "DB_INCOMPLETE: Sync was unable to complete",
-    'DB_KEYEMPTY'	=> "DB_KEYEMPTY: Non-existent key/data pair",
-    'DB_KEYEXIST'	=> "DB_KEYEXIST: Key/data pair already exists",
-    'DB_LOCK_DEADLOCK'  => "DB_LOCK_DEADLOCK: Locker killed to resolve a deadlock",
-    'DB_LOCK_NOTGRANTED' => "DB_LOCK_NOTGRANTED: Lock not granted",
-    'DB_NOTFOUND'	=> "DB_NOTFOUND: No matching key/data pair found",
-    'DB_OLD_VERSION'	=> "DB_OLDVERSION: Database requires a version upgrade",
-    'DB_RUNRECOVERY'	=> "DB_RUNRECOVERY: Fatal error, run database recovery",
-) ;
-
-# full tied array support started in Perl 5.004_57
-# just double check.
-$FA = 0 ;
-{
-    sub try::TIEARRAY { bless [], "try" }
-    sub try::FETCHSIZE { $FA = 1 }
-    my @a ; 
-    tie @a, 'try' ;
-    my $a = @a ;
-}
-
-{
-    package LexFile ;
-
-    use vars qw( $basename @files ) ;
-    $basename = "db0000" ;
-
-    sub new
-    {
-	my $self = shift ;
-        #my @files = () ;
-        foreach (@_)
-        {
-            $_ = $basename ;
-            unlink $basename ;
-            push @files, $basename ;
-            ++ $basename ;
-        }
- 	bless [ @files ], $self ;
-    }
-
-    sub DESTROY
-    {
-	my $self = shift ;
-	#unlink @{ $self } ;
-    }
-
-    END
-    {
-        foreach (@files) { unlink $_ }
-    }
-}
-
-
-{
-    package LexDir ;
-
-    use File::Path qw(rmtree);
-
-    use vars qw( $basename %dirs ) ;
-
-    sub new
-    {
-        my $self = shift ;
-        my $dir = shift ;
-    
-        rmtree $dir if -e $dir ;
-    
-        mkdir $dir, 0777 or return undef ;
-
-        return bless [ $dir ], $self ;
-    }
-    
-    sub DESTROY 
-    {
-        my $self = shift ;
-        my $dir = $self->[0];
-        #rmtree $dir;
-        $dirs{$dir} ++ ;
-    }
-
-    END
-    {
-        foreach (keys %dirs) {
-            rmtree $_ if -d $_ ;
-        }
-    }
-
-}
-
-{
-    package Redirect ;
-    use Symbol ;
-
-    sub new
-    {
-        my $class = shift ;
-        my $filename = shift ;
-	my $fh = gensym ;
-	open ($fh, ">$filename") || die "Cannot open $filename: $!" ;
-	my $real_stdout = select($fh) ;
-	return bless [$fh, $real_stdout ] ;
-
-    }
-    sub DESTROY
-    {
-        my $self = shift ;
-	close $self->[0] ;
-	select($self->[1]) ;
-    }
-}
-
-sub normalise
-{
-    my $data = shift ;
-    $data =~ s#\r\n#\n#g
-        if $^O eq 'cygwin' ;
-
-    return $data ;
-}
-
-
-sub docat
-{
-    my $file = shift;
-    local $/ = undef;
-    open(CAT,$file) || die "Cannot open $file:$!";
-    my $result = ;
-    close(CAT);
-    $result = normalise($result);
-    return $result;
-}
-
-sub docat_del
-{ 
-    my $file = shift;
-    local $/ = undef;
-    open(CAT,$file) || die "Cannot open $file: $!";
-    my $result =  || "" ;
-    close(CAT);
-    unlink $file ;
-    $result = normalise($result);
-    return $result;
-}   
-
-sub writeFile
-{
-    my $name = shift ;
-    open(FH, ">$name") or return 0 ;
-    print FH @_ ;
-    close FH ;
-    return 1 ;
-}
-
-sub touch
-{
-    my $file = shift ;
-    open(CAT,">$file") || die "Cannot open $file:$!";
-    close(CAT);
-}
-
-sub joiner
-{
-    my $db = shift ;
-    my $sep = shift ;
-    my ($k, $v) = (0, "") ;
-    my @data = () ;
-
-    my $cursor = $db->db_cursor()  or return () ;
-    for ( my $status = $cursor->c_get($k, $v, DB_FIRST) ;
-          $status == 0 ;
-          $status = $cursor->c_get($k, $v, DB_NEXT)) {
-	push @data, $v ;
-    }
-
-    (scalar(@data), join($sep, @data)) ;
-}
-
-sub joinkeys
-{
-    my $db = shift ;
-    my $sep = shift || " " ;
-    my ($k, $v) = (0, "") ;
-    my @data = () ;
-
-    my $cursor = $db->db_cursor()  or return () ;
-    for ( my $status = $cursor->c_get($k, $v, DB_FIRST) ;
-          $status == 0 ;
-          $status = $cursor->c_get($k, $v, DB_NEXT)) {
-	push @data, $k ;
-    }
-
-    return join($sep, @data) ;
-
-}
-
-sub dumpdb
-{
-    my $db = shift ;
-    my $sep = shift || " " ;
-    my ($k, $v) = (0, "") ;
-    my @data = () ;
-
-    my $cursor = $db->db_cursor()  or return () ;
-    for ( my $status = $cursor->c_get($k, $v, DB_FIRST) ;
-          $status == 0 ;
-          $status = $cursor->c_get($k, $v, DB_NEXT)) {
-	print "  [$k][$v]\n" ;
-    }
-
-
-}
-
-sub countRecords
-{
-   my $db = shift ;
-   my ($k, $v) = (0,0) ;
-   my ($count) = 0 ;
-   my ($cursor) = $db->db_cursor() ;
-   #for ($status = $cursor->c_get($k, $v, DB_FIRST) ;
-#	$status == 0 ;
-#	$status = $cursor->c_get($k, $v, DB_NEXT) )
-   while ($cursor->c_get($k, $v, DB_NEXT) == 0)
-     { ++ $count }
-
-   return $count ;
-}
-
-sub addData
-{
-    my $db = shift ;
-    my @data = @_ ;
-    die "addData odd data\n" if @data % 2 != 0 ;
-    my ($k, $v) ;
-    my $ret = 0 ;
-    while (@data) {
-        $k = shift @data ;
-        $v = shift @data ;
-        $ret += $db->db_put($k, $v) ;
-    }
-
-    return ($ret == 0) ;
-}
-
-sub ok
-{
-    my $no = shift ;
-    my $result = shift ;
- 
-    print "not " unless $result ;
-    print "ok $no\n" ;
-}
-
-
-# These two subs lifted directly from MLDBM.pm
-#
-sub _compare {
-    use vars qw(%compared);
-    local %compared;
-    return _cmp(@_);
-}
-
-sub _cmp {
-    my($a, $b) = @_;
-
-    # catch circular loops
-    return(1) if $compared{$a.'&*&*&*&*&*'.$b}++;
-#    print "$a $b\n";
-#    print &Data::Dumper::Dumper($a, $b);
-
-    if(ref($a) and ref($a) eq ref($b)) {
-	if(eval { @$a }) {
-#	    print "HERE ".@$a." ".@$b."\n";
-	    @$a == @$b or return 0;
-#	    print @$a, ' ', @$b, "\n";
-#	    print "HERE2\n";
-
-	    for(0..@$a-1) {
-		&_cmp($a->[$_], $b->[$_]) or return 0;
-	    }
-	} elsif(eval { %$a }) {
-	    keys %$a == keys %$b or return 0;
-	    for (keys %$a) {
-		&_cmp($a->{$_}, $b->{$_}) or return 0;
-	    }
-	} elsif(eval { $$a }) {
-	    &_cmp($$a, $$b) or return 0;
-	} else {
-	    die("data $a $b not handled");
-	}
-	return 1;
-    } elsif(! ref($a) and ! ref($b)) {
-	return ($a eq $b);
-    } else {
-	return 0;
-    }
-
-}
-
-sub fillout
-{
-    my $var = shift ;
-    my $length = shift ;
-    my $pad = shift || " " ;
-    my $template = $pad x $length ;
-    substr($template, 0, length($var)) = $var ;
-    return $template ;
-}
-
-1;
diff --git a/storage/bdb/perl/BerkeleyDB/typemap b/storage/bdb/perl/BerkeleyDB/typemap
deleted file mode 100644
index 66c622bd048..00000000000
--- a/storage/bdb/perl/BerkeleyDB/typemap
+++ /dev/null
@@ -1,292 +0,0 @@
-# typemap for Perl 5 interface to Berkeley DB version 2 & 3
-#
-# SCCS: %I%, %G%     
-#
-# written by Paul Marquess 
-#
-#################################### DB SECTION
-#
-# 
-
-void *			T_PV
-u_int			T_U_INT
-u_int32_t		T_U_INT
-const char * 		T_PV_NULL
-PV_or_NULL		T_PV_NULL
-IO_or_NULL		T_IO_NULL
-
-AV *			T_AV
-
-BerkeleyDB		T_PTROBJ
-BerkeleyDB::Common	T_PTROBJ_AV
-BerkeleyDB::Hash	T_PTROBJ_AV
-BerkeleyDB::Btree	T_PTROBJ_AV
-BerkeleyDB::Recno	T_PTROBJ_AV
-BerkeleyDB::Queue	T_PTROBJ_AV
-BerkeleyDB::Cursor	T_PTROBJ_AV
-BerkeleyDB::TxnMgr	T_PTROBJ_AV
-BerkeleyDB::Txn		T_PTROBJ_AV
-BerkeleyDB::Log		T_PTROBJ_AV
-BerkeleyDB::Lock	T_PTROBJ_AV
-BerkeleyDB::Env		T_PTROBJ_AV
-
-BerkeleyDB::Raw		T_RAW
-BerkeleyDB::Common::Raw	T_RAW
-BerkeleyDB::Hash::Raw	T_RAW
-BerkeleyDB::Btree::Raw	T_RAW
-BerkeleyDB::Recno::Raw	T_RAW
-BerkeleyDB::Queue::Raw	T_RAW
-BerkeleyDB::Cursor::Raw	T_RAW
-BerkeleyDB::TxnMgr::Raw	T_RAW
-BerkeleyDB::Txn::Raw	T_RAW
-BerkeleyDB::Log::Raw	T_RAW
-BerkeleyDB::Lock::Raw	T_RAW
-BerkeleyDB::Env::Raw	T_RAW
-
-BerkeleyDB::Env::Inner	T_INNER
-BerkeleyDB::Common::Inner	T_INNER
-BerkeleyDB::Txn::Inner	T_INNER
-BerkeleyDB::TxnMgr::Inner	T_INNER
-# BerkeleyDB__Env 	T_PTR
-DBT			T_dbtdatum
-DBT_OPT			T_dbtdatum_opt
-DBT_B			T_dbtdatum_btree
-DBTKEY			T_dbtkeydatum
-DBTKEY_B		T_dbtkeydatum_btree
-DBTYPE			T_U_INT
-DualType		T_DUAL
-BerkeleyDB_type *	T_IV
-BerkeleyDB_ENV_type *	T_IV
-BerkeleyDB_TxnMgr_type * T_IV
-BerkeleyDB_Txn_type *	T_IV
-BerkeleyDB__Cursor_type * T_IV
-DB *			T_IV
-DB_ENV *		T_IV
-
-INPUT
-
-T_AV
-	if (SvROK($arg) && SvTYPE(SvRV($arg)) == SVt_PVAV)
-        /* if (sv_isa($arg, \"${ntype}\")) */
-            $var = (AV*)SvRV($arg);
-        else
-            croak(\"$var is not an array reference\")
-
-T_RAW
-        $var = INT2PTR($type,SvIV($arg)
-
-T_U_INT
-        $var = SvUV($arg)
-
-T_SV_REF_NULL
-	if ($arg == &PL_sv_undef)
-	    $var = NULL ;
-        else if (sv_derived_from($arg, \"${ntype}\")) {
-            IV tmp = SvIV((SV *)GetInternalObject($arg));
-            $var =  INT2PTR($type, tmp);
-        }
-        else
-            croak(\"$var is not of type ${ntype}\")
-
-T_HV_REF_NULL
-	if ($arg == &PL_sv_undef)
-	    $var = NULL ;
-        else if (sv_derived_from($arg, \"${ntype}\")) {
-            HV * hv = (HV *)GetInternalObject($arg);
-            SV ** svp = hv_fetch(hv, \"db\", 2, FALSE);
-            IV tmp = SvIV(*svp);
-            $var =  INT2PTR($type, tmp);
-        }
-        else
-            croak(\"$var is not of type ${ntype}\")
-
-T_HV_REF
-        if (sv_derived_from($arg, \"${ntype}\")) {
-            HV * hv = (HV *)GetInternalObject($arg);
-            SV ** svp = hv_fetch(hv, \"db\", 2, FALSE);
-            IV tmp = SvIV(*svp);
-            $var =  INT2PTR($type, tmp);
-        }
-        else
-            croak(\"$var is not of type ${ntype}\")
-
-
-T_P_REF
-        if (sv_derived_from($arg, \"${ntype}\")) {
-            IV tmp = SvIV((SV*)SvRV($arg));
-            $var = INT2PTR($type, tmp);
-        }
-        else
-            croak(\"$var is not of type ${ntype}\")
-
-
-T_INNER
-	{
-	    HV * hv = (HV *)SvRV($arg);
-            SV ** svp = hv_fetch(hv, \"db\", 2, FALSE);
-            IV tmp = SvIV(*svp);
-            $var =  INT2PTR($type, tmp);
-	}
-
-T_PV_NULL
-	if ($arg == &PL_sv_undef)
-	    $var = NULL ;
-	else {
-            $var = ($type)SvPV($arg,PL_na) ;
-	    if (PL_na == 0)
-		$var = NULL ;
-	}
-
-T_IO_NULL
-	if ($arg == &PL_sv_undef)
-	    $var = NULL ; 
-	else 
-            $var = IoOFP(sv_2io($arg))
-
-T_PTROBJ_NULL
-	if ($arg == &PL_sv_undef)
-	    $var = NULL ;
-        else if (sv_derived_from($arg, \"${ntype}\")) {
-            IV tmp = SvIV((SV*)SvRV($arg));
-            $var = INT2PTR($type, tmp);
-        }
-        else
-            croak(\"$var is not of type ${ntype}\")
-
-T_PTROBJ_SELF
-	if ($arg == &PL_sv_undef)
-	    $var = NULL ;
-        else if (sv_derived_from($arg, \"${ntype}\")) {
-            IV tmp = SvIV((SV*)SvRV($arg));
-            $var = INT2PTR($type, tmp);
-        }
-        else
-            croak(\"$var is not of type ${ntype}\")
-
-T_PTROBJ_AV
-        if ($arg == &PL_sv_undef || $arg == NULL)
-            $var = NULL ;
-        else if (sv_derived_from($arg, \"${ntype}\")) {
-            IV tmp = SvIV(getInnerObject($arg)) ;
-            $var = INT2PTR($type, tmp);
-        }
-        else
-            croak(\"$var is not of type ${ntype}\")
-
-T_dbtkeydatum
-	{
-	    SV* my_sv = $arg ;
-	    DBM_ckFilter(my_sv, filter_store_key, \"filter_store_key\");
-	    DBT_clear($var) ;
-            SvGETMAGIC($arg) ;
-	    if (db->recno_or_queue) {
-	        Value = GetRecnoKey(db, SvIV(my_sv)) ; 
-	        $var.data = & Value; 
-	        $var.size = (int)sizeof(db_recno_t);
-	    }
-	    else {
-	        $var.data = SvPV(my_sv, PL_na);
-	        $var.size = (int)PL_na;
-	    }
-	}
-
-T_dbtkeydatum_btree
-	{
-	    SV* my_sv = $arg ;
-	    DBM_ckFilter(my_sv, filter_store_key, \"filter_store_key\");
-	    DBT_clear($var) ;
-            SvGETMAGIC($arg) ;
-	    if (db->recno_or_queue ||
-		    (db->type == DB_BTREE && flagSet(DB_SET_RECNO))) {
-	        Value = GetRecnoKey(db, SvIV(my_sv)) ; 
-	        $var.data = & Value; 
-	        $var.size = (int)sizeof(db_recno_t);
-	    }
-	    else {
-	        $var.data = SvPV(my_sv, PL_na);
-	        $var.size = (int)PL_na;
-	    }
-	}
-
-T_dbtdatum
-	{
-	    SV* my_sv = $arg ;
-	    DBM_ckFilter(my_sv, filter_store_value, \"filter_store_value\");
-	    DBT_clear($var) ;
-            SvGETMAGIC($arg) ;
-	    $var.data = SvPV(my_sv, PL_na);
-	    $var.size = (int)PL_na;
-  	    $var.flags = db->partial ;
-    	    $var.dlen  = db->dlen ;
-	    $var.doff  = db->doff ;
-	}
-	
-T_dbtdatum_opt
-	DBT_clear($var) ;
-	if (flagSet(DB_GET_BOTH)) {
-	   SV* my_sv = $arg ;
-	   DBM_ckFilter(my_sv, filter_store_value, \"filter_store_value\");
-           SvGETMAGIC($arg) ;
-	   $var.data = SvPV(my_sv, PL_na);
-	   $var.size = (int)PL_na;
-  	   $var.flags = db->partial ;
-    	   $var.dlen  = db->dlen ;
-	   $var.doff  = db->doff ;
-	}
-	
-T_dbtdatum_btree
-	DBT_clear($var) ;
-	if (flagSet(DB_GET_BOTH)) {
-	    SV* my_sv = $arg ;
-	    DBM_ckFilter(my_sv, filter_store_value, \"filter_store_value\");
-            SvGETMAGIC($arg) ;
-	    $var.data = SvPV(my_sv, PL_na);
-	    $var.size = (int)PL_na;
-  	    $var.flags = db->partial ;
-    	    $var.dlen  = db->dlen ;
-	    $var.doff  = db->doff ;
-	}
-	
-
-OUTPUT
-
-T_RAW
-        sv_setiv($arg, PTR2IV($var));
-
-T_SV_REF_NULL
-	sv_setiv($arg, PTR2IV($var));
-
-T_HV_REF_NULL
-	sv_setiv($arg, PTR2IV($var));
-
-T_HV_REF
-	sv_setiv($arg, PTR2IV($var));
-
-T_P_REF
-	sv_setiv($arg, PTR2IV($var));
-
-T_DUAL
-	setDUALerrno($arg, $var) ;
-
-T_U_INT
-        sv_setuv($arg, (UV)$var);
-
-T_PV_NULL
-        sv_setpv((SV*)$arg, $var);
-
-T_dbtkeydatum_btree
-	OutputKey_B($arg, $var)
-T_dbtkeydatum
-	OutputKey($arg, $var)
-T_dbtdatum
-	OutputValue($arg, $var)
-T_dbtdatum_opt
-	OutputValue($arg, $var)
-T_dbtdatum_btree
-	OutputValue_B($arg, $var)
-
-T_PTROBJ_NULL
-        sv_setref_pv($arg, \"${ntype}\", (void*)$var);
-
-T_PTROBJ_SELF
-        sv_setref_pv($arg, self, (void*)$var);
diff --git a/storage/bdb/perl/DB_File/Changes b/storage/bdb/perl/DB_File/Changes
deleted file mode 100644
index 89027d13f60..00000000000
--- a/storage/bdb/perl/DB_File/Changes
+++ /dev/null
@@ -1,476 +0,0 @@
-
-
-1.810 7th August 2004
-
-   * Fixed db-hash.t for Cygwin
-
-   * Added substr tests to db-hast.t
-
-1.809 20th June 2004
-
-   * Merged core patch 22258
-
-   * Merged core patch 22741
-
-   * Fixed core bug 30237. 
-     Using substr to pass parameters to the low-level Berkeley DB interface
-     causes problems with Perl 5.8.1 or better.
-     typemap fix supplied by Marcus Holland-Moritz.
-
-1.808 22nd December 2003
-
-   * Added extra DBM Filter tests.
-
-   * Fixed a memory leak in ParseOpenInfo, which whould occur if the
-     opening of the database failed. Leak spotted by Adrian Enache.
-
-1.807 1st November 2003
-
-   * Fixed minor typos on pod documetation - reported by Jeremy Mates &
-     Mark Jason Dominus.
-
-   * dbinfo updated to report when a database is encrypted.
-
-1.806 22nd October 2002
-
-   * Fixed problem when trying to build with a multi-threaded perl.
-
-   * Tidied up the recursion detetion code.
-
-   * merged core patch 17844 - missing dTHX declarations.
-
-   * merged core patch 17838 
-
-1.805 1st September 2002
-
-   * Added support to allow DB_File to build with Berkeley DB 4.1.X
-
-   * Tightened up the test harness to test that calls to untie don't generate
-     the "untie attempted while %d inner references still exist" warning.
-
-   * added code to guard against calling the callbacks (compare,hash & prefix) 
-     recursively.
-
-   * pasing undef for the flags and/or mode when opening a database could cause
-     a "Use of uninitialized value in subroutine entry" warning. Now silenced.
-
-   * DBM filter code beefed up to cope with read-only $_.
-
-1.804 2nd June 2002
-
-   * Perl core patch 14939 added a new warning to "splice". This broke the
-     db-recno test harness. Fixed.
-
-   * merged core patches 16502 & 16540.
-
-1.803 1st March 2002
-
-   * Fixed a problem with db-btree.t where it complained about an "our"
-     variable redeclaation.
-
-   * FETCH, STORE & DELETE don't map the flags parameter into the
-     equivalent Berkeley DB function anymore.
-
-1.802 6th January 2002
-
-   * The message about some test failing in db-recno.t had the wrong test
-     numbers. Fixed.
-
-   * merged core patch 13942.
-
-1.801 26th November 2001
-
-   * Fixed typo in Makefile.PL
-
-   * Added "clean" attribute to Makefile.PL
- 
-1.800 23rd November 2001
-
-   * use pport.h for perl backward compatability code.
-
-   * use new  ExtUtils::Constant module to generate XS constants.
-
-   * upgrade Makefile.PL upgrade/downgrade code to toggle "our" with
-     "use vars"
-
-1.79 22nd October 2001
-
-   * Added a "local $SIG{__DIE__}" inside the eval that checks for
-     the presence of XSLoader s suggested by Andrew Hryckowin.
-
-   * merged core patch 12277.
-
-   * Changed NEXTKEY to not initialise the input key. It isn't used anyway.
-
-1.79 22nd October 2001
-
-   * Fixed test harness for cygwin
-
-1.78 30th July 2001
-
-   * the test in Makefile.PL for AIX used -plthreads. Should have been
-     -lpthreads
-
-   * merged Core patches 
-        10372, 10335, 10372, 10534, 10549, 10643, 11051, 11194, 11432
-
-   * added documentation patch regarding duplicate keys from Andrew Johnson
-
-
-1.77 26th April 2001
-
-   * AIX is reported to need -lpthreads, so Makefile.PL now checks for
-     AIX and adds it to the link options.
-
-   * Minor documentation updates.
-
-   * Merged Core patch 9176
-
-   * Added a patch from Edward Avis that adds support for splice with
-     recno databases.
-
-   * Modified Makefile.PL to only enable the warnings pragma if using perl
-     5.6.1 or better.    
-
-1.76 15th January 2001
-
-   * Added instructions for using LD_PRELOAD to get Berkeley DB 2.x to work
-     with DB_File on Linux. Thanks to Norbert Bollow for sending details of
-     this approach.
-
-
-1.75 17th December 2000
-
-   * Fixed perl core patch 7703
-
-   * Added suppport to allow DB_File to be built with Berkeley DB 3.2 --
-     btree_compare, btree_prefix and hash_cb needed to be changed.
-
-   * Updated dbinfo to support Berkeley DB 3.2 file format changes.
-
-
-1.74 10th December 2000
-
-   * A "close" call in DB_File.xs needed parenthesised to stop win32 from
-     thinking it was one of its macros.
-
-   * Updated dbinfo to support Berkeley DB 3.1 file format changes.
-
-   * DB_File.pm & the test hasness now use the warnings pragma (when
-     available).
-
-   * Included Perl core patch 7703 -- size argument for hash_cb is different
-     for Berkeley DB 3.x
-
-   * Included Perl core patch 7801 -- Give __getBerkeleyDBInfo the ANSI C
-     treatment.
-
-   * @a = () produced the warning 'Argument "" isn't numeric in entersub'
-     This has been fixed. Thanks to Edward Avis for spotting this bug.
-
-   * Added note about building under Linux. Included patches.
-
-   * Included Perl core patch 8068 -- fix for bug 20001013.009 
-     When run with warnings enabled "$hash{XX} = undef " produced an
-     "Uninitialized value" warning. This has been fixed.
-
-1.73 31st May 2000
-
-   * Added support in version.c for building with threaded Perl.
-
-   * Berkeley DB 3.1 has reenabled support for null keys. The test
-     harness has been updated to reflect this.
-
-1.72 16th January 2000
-
-   * Added hints/sco.pl
-
-   * The module will now use XSLoader when it is available. When it
-     isn't it will use DynaLoader.
-
-   * The locking section in DB_File.pm has been discredited. Many thanks
-     to David Harris for spotting the underlying problem, contributing
-     the updates to the documentation and writing DB_File::Lock (available
-     on CPAN).
-
-1.71 7th September 1999
-
-   * Fixed a bug that prevented 1.70 from compiling under win32
-
-   * Updated to support Berkeley DB 3.x
-
-   * Updated dbinfo for Berkeley DB 3.x file formats.
-
-1.70 4th August 1999
-
-   * Initialise $DB_File::db_ver and $DB_File::db_version with
-     GV_ADD|GV_ADDMULT -- bug spotted by Nick Ing-Simmons.
-
-   * Added a BOOT check to test for equivalent versions of db.h &
-     libdb.a/so.
-
-1.69 3rd August 1999
-
-   * fixed a bug in push -- DB_APPEND wasn't working properly.
-
-   * Fixed the R_SETCURSOR bug introduced in 1.68
-
-   * Added a new Perl variable $DB_File::db_ver
-   
-1.68 22nd July 1999
-
-   * Merged changes from 5.005_58 
-
-   * Fixed a bug in R_IBEFORE & R_IAFTER procesing in Berkeley DB
-     2 databases.
-
-   * Added some of the examples in the POD into the test harness.
-
-1.67 6th June 1999
-
-   * Added DBM Filter documentation to DB_File.pm
-
-   * Fixed DBM Filter code to work with 5.004
-
-   * A few instances of newSVpvn were used in 1.66. This isn't available in
-     Perl 5.004_04 or earlier. Replaced with newSVpv.
-
-1.66 15th March 1999
-
-   * Added DBM Filter code
-
-1.65 6th March 1999
-
-   * Fixed a bug in the recno PUSH logic.
-   * The BOOT version check now needs 2.3.4 when using Berkeley DB version 2
-
-1.64 21st February 1999
-
-   * Tidied the 1.x to 2.x flag mapping code.
-   * Added a patch from Mark Kettenis  to fix a flag
-     mapping problem with O_RDONLY on the Hurd
-   * Updated the message that db-recno.t prints when tests 51, 53 or 55 fail.
-
-1.63 19th December 1998
-
-   * Fix to allow DB 2.6.x to build with DB_File
-   * Documentation updated to use push,pop etc in the RECNO example &
-     to include the find_dup & del_dup methods.
-
-1.62 30th November 1998
-
-   Added hints/dynixptx.pl.
-   Fixed typemap -- 1.61 used PL_na instead of na
-
-1.61 19th November 1998
-
-   Added a note to README about how to build Berkeley DB 2.x when
-   using HP-UX.
-   Minor modifications to get the module to build with DB 2.5.x
-   Fixed a typo in the definition of O_RDONLY, courtesy of Mark Kettenis.
-
-1.60
-   Changed the test to check for full tied array support
-
-1.59
-   Updated the license section.
-
-   Berkeley DB 2.4.10 disallows zero length keys. Tests 32 & 42 in
-   db-btree.t and test 27 in db-hash.t failed because of this change.
-   Those tests have been zapped.
-
-   Added dbinfo to the distribution.
-
-1.58
-   Tied Array support was enhanced in Perl 5.004_57. DB_File now
-   supports PUSH,POP,SHIFT,UNSHIFT & STORESIZE.
-
-   Fixed a problem with the use of sv_setpvn. When the size is
-   specified as 0, it does a strlen on the data.  This was ok for DB
-   1.x, but isn't for DB 2.x.
-
-1.57
-   If Perl has been compiled with Threads support,the symbol op will be
-   defined. This clashes with a field name in db.h, so it needs to be
-   #undef'ed before db.h is included.
-
-1.56
-   Documented the Solaris 2.5 mutex bug
-
-1.55
-   Merged 1.16 changes.
-
-1.54
-
-   Fixed a small bug in the test harness when run under win32
-   The emulation of fd when useing DB 2.x was busted.
-
-1.53
-
-   Added DB_RENUMBER to flags for recno.
-
-1.52
-
-   Patch from Nick Ing-Simmons now allows DB_File to build on NT.
-   Merged 1.15 patch.
-
-1.51
-
-    Fixed the test harness so that it doesn't expect DB_File to have
-    been installed by the main Perl build.
-
-
-    Fixed a bug in mapping 1.x O_RDONLY flag to 2.x DB_RDONLY equivalent
-
-1.50
-
-    DB_File can now build with either DB 1.x or 2.x, but not both at
-    the same time.
-
-1.16
-
-   A harmless looking tab was causing Makefile.PL to fail on AIX 3.2.5
-
-    Small fix for the AIX strict C compiler XLC which doesn't like
-    __attribute__ being defined via proto.h and redefined via db.h. Fix
-    courtesy of Jarkko Hietaniemi.
-
-1.15
-
-    Patch from Gisle Aas  to suppress "use of undefined
-    value" warning with db_get and db_seq.
-
-    Patch from Gisle Aas  to make DB_File export only the
-    O_* constants from Fcntl.
-
-    Removed the DESTROY method from the DB_File::HASHINFO module.
-
-    Previously DB_File hard-wired the class name of any object that it
-    created to "DB_File". This makes sub-classing difficult. Now
-    DB_File creats objects in the namespace of the package it has been
-    inherited into.
-
-
-1.14
-
-    Made it illegal to tie an associative array to a RECNO database and
-    an ordinary array to a HASH or BTREE database.
-
-1.13
-
-    Minor changes to DB_FIle.xs and DB_File.pm
-
-1.12
-
-    Documented the incompatibility with version 2 of Berkeley DB.
-
-1.11
-
-    Documented the untie gotcha.
-
-1.10
-
-    Fixed fd method so that it still returns -1 for in-memory files
-    when db 1.86 is used.
-
-1.09
-
-    Minor bug fix in DB_File::HASHINFO, DB_File::RECNOINFO and
-    DB_File::BTREEINFO.
-
-    Changed default mode to 0666.
-
-1.08
-
-    Documented operation of bval.
-
-1.07
-
-    Fixed bug with RECNO, where bval wasn't defaulting to "\n".
-
-1.06
-
-    Minor namespace cleanup: Localized PrintBtree.
-
-1.05
-
-    Made all scripts in the documentation strict and -w clean.
-
-    Added logic to DB_File.xs to allow the module to be built after
-    Perl is installed.
-
-1.04
-
-    Minor documentation changes.
-
-    Fixed a bug in hash_cb. Patches supplied by Dave Hammen,
-    .
-
-    Fixed a bug with the constructors for DB_File::HASHINFO,
-    DB_File::BTREEINFO and DB_File::RECNOINFO. Also tidied up the
-    constructors to make them -w clean.
-
-    Reworked part of the test harness to be more locale friendly.
-
-1.03
-
-    Documentation update.
-
-    DB_File now imports the constants (O_RDWR, O_CREAT etc.) from Fcntl
-    automatically.
-
-    The standard hash function exists is now supported.
-
-    Modified the behavior of get_dup. When it returns an associative
-    array, the value is the count of the number of matching BTREE
-    values.
-
-1.02
-
-    Merged OS/2 specific code into DB_File.xs
-
-    Removed some redundant code in DB_File.xs.
-
-    Documentation update.
-
-    Allow negative subscripts with RECNO interface.
-
-    Changed the default flags from O_RDWR to O_CREAT|O_RDWR.
-
-    The example code which showed how to lock a database needed a call
-    to sync added. Without it the resultant database file was empty.
-
-    Added get_dup method.
-
-1.01
-
-    Fixed a core dump problem with SunOS.
-
-    The return value from TIEHASH wasn't set to NULL when dbopen
-    returned an error.
-
-1.0
-
-    DB_File has been in use for over a year. To reflect that, the
-    version number has been incremented to 1.0.
-
-    Added complete support for multiple concurrent callbacks.
-
-    Using the push method on an empty list didn't work properly. This
-    has been fixed.
-
-0.3
-
-    Added prototype support for multiple btree compare callbacks.
-
-0.2
-
-    When DB_File is opening a database file it no longer terminates the
-    process if dbopen returned an error. This allows file protection
-    errors to be caught at run time. Thanks to Judith Grass
-     for spotting the bug.
-
-0.1
-
-    First Release.
-
diff --git a/storage/bdb/perl/DB_File/DB_File.pm b/storage/bdb/perl/DB_File/DB_File.pm
deleted file mode 100644
index 5ddac46c963..00000000000
--- a/storage/bdb/perl/DB_File/DB_File.pm
+++ /dev/null
@@ -1,2292 +0,0 @@
-# DB_File.pm -- Perl 5 interface to Berkeley DB 
-#
-# written by Paul Marquess (pmqs@cpan.org)
-# last modified 7th August 2004
-# version 1.810
-#
-#     Copyright (c) 1995-2004 Paul Marquess. All rights reserved.
-#     This program is free software; you can redistribute it and/or
-#     modify it under the same terms as Perl itself.
-
-
-package DB_File::HASHINFO ;
-
-require 5.00404;
-
-use warnings;
-use strict;
-use Carp;
-require Tie::Hash;
-@DB_File::HASHINFO::ISA = qw(Tie::Hash);
-
-sub new
-{
-    my $pkg = shift ;
-    my %x ;
-    tie %x, $pkg ;
-    bless \%x, $pkg ;
-}
-
-
-sub TIEHASH
-{
-    my $pkg = shift ;
-
-    bless { VALID => { 
-		       	bsize	  => 1,
-			ffactor	  => 1,
-			nelem	  => 1,
-			cachesize => 1,
-			hash	  => 2,
-			lorder	  => 1,
-		     }, 
-	    GOT   => {}
-          }, $pkg ;
-}
-
-
-sub FETCH 
-{  
-    my $self  = shift ;
-    my $key   = shift ;
-
-    return $self->{GOT}{$key} if exists $self->{VALID}{$key}  ;
-
-    my $pkg = ref $self ;
-    croak "${pkg}::FETCH - Unknown element '$key'" ;
-}
-
-
-sub STORE 
-{
-    my $self  = shift ;
-    my $key   = shift ;
-    my $value = shift ;
-
-    my $type = $self->{VALID}{$key};
-
-    if ( $type )
-    {
-    	croak "Key '$key' not associated with a code reference" 
-	    if $type == 2 && !ref $value && ref $value ne 'CODE';
-        $self->{GOT}{$key} = $value ;
-        return ;
-    }
-    
-    my $pkg = ref $self ;
-    croak "${pkg}::STORE - Unknown element '$key'" ;
-}
-
-sub DELETE 
-{
-    my $self = shift ;
-    my $key  = shift ;
-
-    if ( exists $self->{VALID}{$key} )
-    {
-        delete $self->{GOT}{$key} ;
-        return ;
-    }
-    
-    my $pkg = ref $self ;
-    croak "DB_File::HASHINFO::DELETE - Unknown element '$key'" ;
-}
-
-sub EXISTS
-{
-    my $self = shift ;
-    my $key  = shift ;
-
-    exists $self->{VALID}{$key} ;
-}
-
-sub NotHere
-{
-    my $self = shift ;
-    my $method = shift ;
-
-    croak ref($self) . " does not define the method ${method}" ;
-}
-
-sub FIRSTKEY { my $self = shift ; $self->NotHere("FIRSTKEY") }
-sub NEXTKEY  { my $self = shift ; $self->NotHere("NEXTKEY") }
-sub CLEAR    { my $self = shift ; $self->NotHere("CLEAR") }
-
-package DB_File::RECNOINFO ;
-
-use warnings;
-use strict ;
-
-@DB_File::RECNOINFO::ISA = qw(DB_File::HASHINFO) ;
-
-sub TIEHASH
-{
-    my $pkg = shift ;
-
-    bless { VALID => { map {$_, 1} 
-		       qw( bval cachesize psize flags lorder reclen bfname )
-		     },
-	    GOT   => {},
-          }, $pkg ;
-}
-
-package DB_File::BTREEINFO ;
-
-use warnings;
-use strict ;
-
-@DB_File::BTREEINFO::ISA = qw(DB_File::HASHINFO) ;
-
-sub TIEHASH
-{
-    my $pkg = shift ;
-
-    bless { VALID => { 
-		      	flags	   => 1,
-			cachesize  => 1,
-			maxkeypage => 1,
-			minkeypage => 1,
-			psize	   => 1,
-			compare	   => 2,
-			prefix	   => 2,
-			lorder	   => 1,
-	    	     },
-	    GOT   => {},
-          }, $pkg ;
-}
-
-
-package DB_File ;
-
-use warnings;
-use strict;
-our ($VERSION, @ISA, @EXPORT, $AUTOLOAD, $DB_BTREE, $DB_HASH, $DB_RECNO);
-our ($db_version, $use_XSLoader, $splice_end_array);
-use Carp;
-
-
-$VERSION = "1.810" ;
-
-{
-    local $SIG{__WARN__} = sub {$splice_end_array = "@_";};
-    my @a =(1); splice(@a, 3);
-    $splice_end_array = 
-        ($splice_end_array =~ /^splice\(\) offset past end of array at /);
-}      
-
-#typedef enum { DB_BTREE, DB_HASH, DB_RECNO } DBTYPE;
-$DB_BTREE = new DB_File::BTREEINFO ;
-$DB_HASH  = new DB_File::HASHINFO ;
-$DB_RECNO = new DB_File::RECNOINFO ;
-
-require Tie::Hash;
-require Exporter;
-use AutoLoader;
-BEGIN {
-    $use_XSLoader = 1 ;
-    { local $SIG{__DIE__} ; eval { require XSLoader } ; }
-
-    if ($@) {
-        $use_XSLoader = 0 ;
-        require DynaLoader;
-        @ISA = qw(DynaLoader);
-    }
-}
-
-push @ISA, qw(Tie::Hash Exporter);
-@EXPORT = qw(
-        $DB_BTREE $DB_HASH $DB_RECNO 
-
-	BTREEMAGIC
-	BTREEVERSION
-	DB_LOCK
-	DB_SHMEM
-	DB_TXN
-	HASHMAGIC
-	HASHVERSION
-	MAX_PAGE_NUMBER
-	MAX_PAGE_OFFSET
-	MAX_REC_NUMBER
-	RET_ERROR
-	RET_SPECIAL
-	RET_SUCCESS
-	R_CURSOR
-	R_DUP
-	R_FIRST
-	R_FIXEDLEN
-	R_IAFTER
-	R_IBEFORE
-	R_LAST
-	R_NEXT
-	R_NOKEY
-	R_NOOVERWRITE
-	R_PREV
-	R_RECNOSYNC
-	R_SETCURSOR
-	R_SNAPSHOT
-	__R_UNUSED
-
-);
-
-sub AUTOLOAD {
-    my($constname);
-    ($constname = $AUTOLOAD) =~ s/.*:://;
-    my ($error, $val) = constant($constname);
-    Carp::croak $error if $error;
-    no strict 'refs';
-    *{$AUTOLOAD} = sub { $val };
-    goto &{$AUTOLOAD};
-}           
-
-
-eval {
-    # Make all Fcntl O_XXX constants available for importing
-    require Fcntl;
-    my @O = grep /^O_/, @Fcntl::EXPORT;
-    Fcntl->import(@O);  # first we import what we want to export
-    push(@EXPORT, @O);
-};
-
-if ($use_XSLoader)
-  { XSLoader::load("DB_File", $VERSION)}
-else
-  { bootstrap DB_File $VERSION }
-
-# Preloaded methods go here.  Autoload methods go after __END__, and are
-# processed by the autosplit program.
-
-sub tie_hash_or_array
-{
-    my (@arg) = @_ ;
-    my $tieHASH = ( (caller(1))[3] =~ /TIEHASH/ ) ;
-
-    $arg[4] = tied %{ $arg[4] } 
-	if @arg >= 5 && ref $arg[4] && $arg[4] =~ /=HASH/ && tied %{ $arg[4] } ;
-
-    $arg[2] = O_CREAT()|O_RDWR() if @arg >=3 && ! defined $arg[2];
-    $arg[3] = 0666               if @arg >=4 && ! defined $arg[3];
-
-    # make recno in Berkeley DB version 2 (or better) work like 
-    # recno in version 1.
-    if ($db_version > 1 and defined $arg[4] and $arg[4] =~ /RECNO/ and 
-	$arg[1] and ! -e $arg[1]) {
-	open(FH, ">$arg[1]") or return undef ;
-	close FH ;
-	chmod $arg[3] ? $arg[3] : 0666 , $arg[1] ;
-    }
-
-    DoTie_($tieHASH, @arg) ;
-}
-
-sub TIEHASH
-{
-    tie_hash_or_array(@_) ;
-}
-
-sub TIEARRAY
-{
-    tie_hash_or_array(@_) ;
-}
-
-sub CLEAR 
-{
-    my $self = shift;
-    my $key = 0 ;
-    my $value = "" ;
-    my $status = $self->seq($key, $value, R_FIRST());
-    my @keys;
- 
-    while ($status == 0) {
-        push @keys, $key;
-        $status = $self->seq($key, $value, R_NEXT());
-    }
-    foreach $key (reverse @keys) {
-        my $s = $self->del($key); 
-    }
-}
-
-sub EXTEND { }
-
-sub STORESIZE
-{
-    my $self = shift;
-    my $length = shift ;
-    my $current_length = $self->length() ;
-
-    if ($length < $current_length) {
-	my $key ;
-        for ($key = $current_length - 1 ; $key >= $length ; -- $key)
-	  { $self->del($key) }
-    }
-    elsif ($length > $current_length) {
-        $self->put($length-1, "") ;
-    }
-}
- 
-
-sub SPLICE
-{
-    my $self = shift;
-    my $offset = shift;
-    if (not defined $offset) {
-	warnings::warnif('uninitialized', 'Use of uninitialized value in splice');
-	$offset = 0;
-    }
-
-    my $length = @_ ? shift : 0;
-    # Carping about definedness comes _after_ the OFFSET sanity check.
-    # This is so we get the same error messages as Perl's splice().
-    # 
-
-    my @list = @_;
-
-    my $size = $self->FETCHSIZE();
-    
-    # 'If OFFSET is negative then it start that far from the end of
-    # the array.'
-    # 
-    if ($offset < 0) {
-	my $new_offset = $size + $offset;
-	if ($new_offset < 0) {
-	    die "Modification of non-creatable array value attempted, "
-	      . "subscript $offset";
-	}
-	$offset = $new_offset;
-    }
-
-    if (not defined $length) {
-	warnings::warnif('uninitialized', 'Use of uninitialized value in splice');
-	$length = 0;
-    }
-
-    if ($offset > $size) {
- 	$offset = $size;
-	warnings::warnif('misc', 'splice() offset past end of array')
-            if $splice_end_array;
-    }
-
-    # 'If LENGTH is omitted, removes everything from OFFSET onward.'
-    if (not defined $length) {
-	$length = $size - $offset;
-    }
-
-    # 'If LENGTH is negative, leave that many elements off the end of
-    # the array.'
-    # 
-    if ($length < 0) {
-	$length = $size - $offset + $length;
-
-	if ($length < 0) {
-	    # The user must have specified a length bigger than the
-	    # length of the array passed in.  But perl's splice()
-	    # doesn't catch this, it just behaves as for length=0.
-	    # 
-	    $length = 0;
-	}
-    }
-
-    if ($length > $size - $offset) {
-	$length = $size - $offset;
-    }
-
-    # $num_elems holds the current number of elements in the database.
-    my $num_elems = $size;
-
-    # 'Removes the elements designated by OFFSET and LENGTH from an
-    # array,'...
-    # 
-    my @removed = ();
-    foreach (0 .. $length - 1) {
-	my $old;
-	my $status = $self->get($offset, $old);
-	if ($status != 0) {
-	    my $msg = "error from Berkeley DB on get($offset, \$old)";
-	    if ($status == 1) {
-		$msg .= ' (no such element?)';
-	    }
-	    else {
-		$msg .= ": error status $status";
-		if (defined $! and $! ne '') {
-		    $msg .= ", message $!";
-		}
-	    }
-	    die $msg;
-	}
-	push @removed, $old;
-
-	$status = $self->del($offset);
-	if ($status != 0) {
-	    my $msg = "error from Berkeley DB on del($offset)";
-	    if ($status == 1) {
-		$msg .= ' (no such element?)';
-	    }
-	    else {
-		$msg .= ": error status $status";
-		if (defined $! and $! ne '') {
-		    $msg .= ", message $!";
-		}
-	    }
-	    die $msg;
-	}
-
-	-- $num_elems;
-    }
-
-    # ...'and replaces them with the elements of LIST, if any.'
-    my $pos = $offset;
-    while (defined (my $elem = shift @list)) {
-	my $old_pos = $pos;
-	my $status;
-	if ($pos >= $num_elems) {
-	    $status = $self->put($pos, $elem);
-	}
-	else {
-	    $status = $self->put($pos, $elem, $self->R_IBEFORE);
-	}
-
-	if ($status != 0) {
-	    my $msg = "error from Berkeley DB on put($pos, $elem, ...)";
-	    if ($status == 1) {
-		$msg .= ' (no such element?)';
-	    }
-	    else {
-		$msg .= ", error status $status";
-		if (defined $! and $! ne '') {
-		    $msg .= ", message $!";
-		}
-	    }
-	    die $msg;
-	}
-
-	die "pos unexpectedly changed from $old_pos to $pos with R_IBEFORE"
-	  if $old_pos != $pos;
-
-	++ $pos;
-	++ $num_elems;
-    }
-
-    if (wantarray) {
-	# 'In list context, returns the elements removed from the
-	# array.'
-	# 
-	return @removed;
-    }
-    elsif (defined wantarray and not wantarray) {
-	# 'In scalar context, returns the last element removed, or
-	# undef if no elements are removed.'
-	# 
-	if (@removed) {
-	    my $last = pop @removed;
-	    return "$last";
-	}
-	else {
-	    return undef;
-	}
-    }
-    elsif (not defined wantarray) {
-	# Void context
-    }
-    else { die }
-}
-sub ::DB_File::splice { &SPLICE }
-
-sub find_dup
-{
-    croak "Usage: \$db->find_dup(key,value)\n"
-        unless @_ == 3 ;
- 
-    my $db        = shift ;
-    my ($origkey, $value_wanted) = @_ ;
-    my ($key, $value) = ($origkey, 0);
-    my ($status) = 0 ;
-
-    for ($status = $db->seq($key, $value, R_CURSOR() ) ;
-         $status == 0 ;
-         $status = $db->seq($key, $value, R_NEXT() ) ) {
-
-        return 0 if $key eq $origkey and $value eq $value_wanted ;
-    }
-
-    return $status ;
-}
-
-sub del_dup
-{
-    croak "Usage: \$db->del_dup(key,value)\n"
-        unless @_ == 3 ;
- 
-    my $db        = shift ;
-    my ($key, $value) = @_ ;
-    my ($status) = $db->find_dup($key, $value) ;
-    return $status if $status != 0 ;
-
-    $status = $db->del($key, R_CURSOR() ) ;
-    return $status ;
-}
-
-sub get_dup
-{
-    croak "Usage: \$db->get_dup(key [,flag])\n"
-        unless @_ == 2 or @_ == 3 ;
- 
-    my $db        = shift ;
-    my $key       = shift ;
-    my $flag	  = shift ;
-    my $value 	  = 0 ;
-    my $origkey   = $key ;
-    my $wantarray = wantarray ;
-    my %values	  = () ;
-    my @values    = () ;
-    my $counter   = 0 ;
-    my $status    = 0 ;
- 
-    # iterate through the database until either EOF ($status == 0)
-    # or a different key is encountered ($key ne $origkey).
-    for ($status = $db->seq($key, $value, R_CURSOR()) ;
-	 $status == 0 and $key eq $origkey ;
-         $status = $db->seq($key, $value, R_NEXT()) ) {
- 
-        # save the value or count number of matches
-        if ($wantarray) {
-	    if ($flag)
-                { ++ $values{$value} }
-	    else
-                { push (@values, $value) }
-	}
-        else
-            { ++ $counter }
-     
-    }
- 
-    return ($wantarray ? ($flag ? %values : @values) : $counter) ;
-}
-
-
-1;
-__END__
-
-=head1 NAME
-
-DB_File - Perl5 access to Berkeley DB version 1.x
-
-=head1 SYNOPSIS
-
- use DB_File;
-
- [$X =] tie %hash,  'DB_File', [$filename, $flags, $mode, $DB_HASH] ;
- [$X =] tie %hash,  'DB_File', $filename, $flags, $mode, $DB_BTREE ;
- [$X =] tie @array, 'DB_File', $filename, $flags, $mode, $DB_RECNO ;
-
- $status = $X->del($key [, $flags]) ;
- $status = $X->put($key, $value [, $flags]) ;
- $status = $X->get($key, $value [, $flags]) ;
- $status = $X->seq($key, $value, $flags) ;
- $status = $X->sync([$flags]) ;
- $status = $X->fd ;
-
- # BTREE only
- $count = $X->get_dup($key) ;
- @list  = $X->get_dup($key) ;
- %list  = $X->get_dup($key, 1) ;
- $status = $X->find_dup($key, $value) ;
- $status = $X->del_dup($key, $value) ;
-
- # RECNO only
- $a = $X->length;
- $a = $X->pop ;
- $X->push(list);
- $a = $X->shift;
- $X->unshift(list);
- @r = $X->splice(offset, length, elements);
-
- # DBM Filters
- $old_filter = $db->filter_store_key  ( sub { ... } ) ;
- $old_filter = $db->filter_store_value( sub { ... } ) ;
- $old_filter = $db->filter_fetch_key  ( sub { ... } ) ;
- $old_filter = $db->filter_fetch_value( sub { ... } ) ;
-
- untie %hash ;
- untie @array ;
-
-=head1 DESCRIPTION
-
-B is a module which allows Perl programs to make use of the
-facilities provided by Berkeley DB version 1.x (if you have a newer
-version of DB, see L).
-It is assumed that you have a copy of the Berkeley DB manual pages at
-hand when reading this documentation. The interface defined here
-mirrors the Berkeley DB interface closely.
-
-Berkeley DB is a C library which provides a consistent interface to a
-number of database formats.  B provides an interface to all
-three of the database types currently supported by Berkeley DB.
-
-The file types are:
-
-=over 5
-
-=item B
-
-This database type allows arbitrary key/value pairs to be stored in data
-files. This is equivalent to the functionality provided by other
-hashing packages like DBM, NDBM, ODBM, GDBM, and SDBM. Remember though,
-the files created using DB_HASH are not compatible with any of the
-other packages mentioned.
-
-A default hashing algorithm, which will be adequate for most
-applications, is built into Berkeley DB. If you do need to use your own
-hashing algorithm it is possible to write your own in Perl and have
-B use it instead.
-
-=item B
-
-The btree format allows arbitrary key/value pairs to be stored in a
-sorted, balanced binary tree.
-
-As with the DB_HASH format, it is possible to provide a user defined
-Perl routine to perform the comparison of keys. By default, though, the
-keys are stored in lexical order.
-
-=item B
-
-DB_RECNO allows both fixed-length and variable-length flat text files
-to be manipulated using the same key/value pair interface as in DB_HASH
-and DB_BTREE.  In this case the key will consist of a record (line)
-number.
-
-=back
-
-=head2 Using DB_File with Berkeley DB version 2 or greater
-
-Although B is intended to be used with Berkeley DB version 1,
-it can also be used with version 2, 3 or 4. In this case the interface is
-limited to the functionality provided by Berkeley DB 1.x. Anywhere the
-version 2 or greater interface differs, B arranges for it to work
-like version 1. This feature allows B scripts that were built
-with version 1 to be migrated to version 2 or greater without any changes.
-
-If you want to make use of the new features available in Berkeley DB
-2.x or greater, use the Perl module B instead.
-
-B The database file format has changed multiple times in Berkeley
-DB version 2, 3 and 4. If you cannot recreate your databases, you
-must dump any existing databases with either the C or the
-C utility that comes with Berkeley DB.
-Once you have rebuilt DB_File to use Berkeley DB version 2 or greater,
-your databases can be recreated using C. Refer to the Berkeley DB
-documentation for further details.
-
-Please read L<"COPYRIGHT"> before using version 2.x or greater of Berkeley
-DB with DB_File.
-
-=head2 Interface to Berkeley DB
-
-B allows access to Berkeley DB files using the tie() mechanism
-in Perl 5 (for full details, see L). This facility
-allows B to access Berkeley DB files using either an
-associative array (for DB_HASH & DB_BTREE file types) or an ordinary
-array (for the DB_RECNO file type).
-
-In addition to the tie() interface, it is also possible to access most
-of the functions provided in the Berkeley DB API directly.
-See L.
-
-=head2 Opening a Berkeley DB Database File
-
-Berkeley DB uses the function dbopen() to open or create a database.
-Here is the C prototype for dbopen():
-
-      DB*
-      dbopen (const char * file, int flags, int mode, 
-              DBTYPE type, const void * openinfo)
-
-The parameter C is an enumeration which specifies which of the 3
-interface methods (DB_HASH, DB_BTREE or DB_RECNO) is to be used.
-Depending on which of these is actually chosen, the final parameter,
-I points to a data structure which allows tailoring of the
-specific interface method.
-
-This interface is handled slightly differently in B. Here is
-an equivalent call using B:
-
-        tie %array, 'DB_File', $filename, $flags, $mode, $DB_HASH ;
-
-The C, C and C parameters are the direct
-equivalent of their dbopen() counterparts. The final parameter $DB_HASH
-performs the function of both the C and C parameters in
-dbopen().
-
-In the example above $DB_HASH is actually a pre-defined reference to a
-hash object. B has three of these pre-defined references.
-Apart from $DB_HASH, there is also $DB_BTREE and $DB_RECNO.
-
-The keys allowed in each of these pre-defined references is limited to
-the names used in the equivalent C structure. So, for example, the
-$DB_HASH reference will only allow keys called C, C,
-C, C, C and C. 
-
-To change one of these elements, just assign to it like this:
-
-	$DB_HASH->{'cachesize'} = 10000 ;
-
-The three predefined variables $DB_HASH, $DB_BTREE and $DB_RECNO are
-usually adequate for most applications.  If you do need to create extra
-instances of these objects, constructors are available for each file
-type.
-
-Here are examples of the constructors and the valid options available
-for DB_HASH, DB_BTREE and DB_RECNO respectively.
-
-     $a = new DB_File::HASHINFO ;
-     $a->{'bsize'} ;
-     $a->{'cachesize'} ;
-     $a->{'ffactor'};
-     $a->{'hash'} ;
-     $a->{'lorder'} ;
-     $a->{'nelem'} ;
-
-     $b = new DB_File::BTREEINFO ;
-     $b->{'flags'} ;
-     $b->{'cachesize'} ;
-     $b->{'maxkeypage'} ;
-     $b->{'minkeypage'} ;
-     $b->{'psize'} ;
-     $b->{'compare'} ;
-     $b->{'prefix'} ;
-     $b->{'lorder'} ;
-
-     $c = new DB_File::RECNOINFO ;
-     $c->{'bval'} ;
-     $c->{'cachesize'} ;
-     $c->{'psize'} ;
-     $c->{'flags'} ;
-     $c->{'lorder'} ;
-     $c->{'reclen'} ;
-     $c->{'bfname'} ;
-
-The values stored in the hashes above are mostly the direct equivalent
-of their C counterpart. Like their C counterparts, all are set to a
-default values - that means you don't have to set I of the
-values when you only want to change one. Here is an example:
-
-     $a = new DB_File::HASHINFO ;
-     $a->{'cachesize'} =  12345 ;
-     tie %y, 'DB_File', "filename", $flags, 0777, $a ;
-
-A few of the options need extra discussion here. When used, the C
-equivalent of the keys C, C and C store pointers
-to C functions. In B these keys are used to store references
-to Perl subs. Below are templates for each of the subs:
-
-    sub hash
-    {
-        my ($data) = @_ ;
-        ...
-        # return the hash value for $data
-	return $hash ;
-    }
-
-    sub compare
-    {
-	my ($key, $key2) = @_ ;
-        ...
-        # return  0 if $key1 eq $key2
-        #        -1 if $key1 lt $key2
-        #         1 if $key1 gt $key2
-        return (-1 , 0 or 1) ;
-    }
-
-    sub prefix
-    {
-	my ($key, $key2) = @_ ;
-        ...
-        # return number of bytes of $key2 which are 
-        # necessary to determine that it is greater than $key1
-        return $bytes ;
-    }
-
-See L for an example of using the
-C template.
-
-If you are using the DB_RECNO interface and you intend making use of
-C, you should check out L.
-
-=head2 Default Parameters
-
-It is possible to omit some or all of the final 4 parameters in the
-call to C and let them take default values. As DB_HASH is the most
-common file format used, the call:
-
-    tie %A, "DB_File", "filename" ;
-
-is equivalent to:
-
-    tie %A, "DB_File", "filename", O_CREAT|O_RDWR, 0666, $DB_HASH ;
-
-It is also possible to omit the filename parameter as well, so the
-call:
-
-    tie %A, "DB_File" ;
-
-is equivalent to:
-
-    tie %A, "DB_File", undef, O_CREAT|O_RDWR, 0666, $DB_HASH ;
-
-See L for a discussion on the use of C
-in place of a filename.
-
-=head2 In Memory Databases
-
-Berkeley DB allows the creation of in-memory databases by using NULL
-(that is, a C<(char *)0> in C) in place of the filename.  B
-uses C instead of NULL to provide this functionality.
-
-=head1 DB_HASH
-
-The DB_HASH file format is probably the most commonly used of the three
-file formats that B supports. It is also very straightforward
-to use.
-
-=head2 A Simple Example
-
-This example shows how to create a database, add key/value pairs to the
-database, delete keys/value pairs and finally how to enumerate the
-contents of the database.
-
-    use warnings ;
-    use strict ;
-    use DB_File ;
-    our (%h, $k, $v) ;
-
-    unlink "fruit" ;
-    tie %h, "DB_File", "fruit", O_RDWR|O_CREAT, 0666, $DB_HASH 
-        or die "Cannot open file 'fruit': $!\n";
-
-    # Add a few key/value pairs to the file
-    $h{"apple"} = "red" ;
-    $h{"orange"} = "orange" ;
-    $h{"banana"} = "yellow" ;
-    $h{"tomato"} = "red" ;
-
-    # Check for existence of a key
-    print "Banana Exists\n\n" if $h{"banana"} ;
-
-    # Delete a key/value pair.
-    delete $h{"apple"} ;
-
-    # print the contents of the file
-    while (($k, $v) = each %h)
-      { print "$k -> $v\n" }
-
-    untie %h ;
-
-here is the output:
-
-    Banana Exists
-
-    orange -> orange
-    tomato -> red
-    banana -> yellow
-
-Note that the like ordinary associative arrays, the order of the keys
-retrieved is in an apparently random order.
-
-=head1 DB_BTREE
-
-The DB_BTREE format is useful when you want to store data in a given
-order. By default the keys will be stored in lexical order, but as you
-will see from the example shown in the next section, it is very easy to
-define your own sorting function.
-
-=head2 Changing the BTREE sort order
-
-This script shows how to override the default sorting algorithm that
-BTREE uses. Instead of using the normal lexical ordering, a case
-insensitive compare function will be used.
-
-    use warnings ;
-    use strict ;
-    use DB_File ;
-
-    my %h ;
-
-    sub Compare
-    {
-        my ($key1, $key2) = @_ ;
-        "\L$key1" cmp "\L$key2" ;
-    }
-
-    # specify the Perl sub that will do the comparison
-    $DB_BTREE->{'compare'} = \&Compare ;
-
-    unlink "tree" ;
-    tie %h, "DB_File", "tree", O_RDWR|O_CREAT, 0666, $DB_BTREE 
-        or die "Cannot open file 'tree': $!\n" ;
-
-    # Add a key/value pair to the file
-    $h{'Wall'} = 'Larry' ;
-    $h{'Smith'} = 'John' ;
-    $h{'mouse'} = 'mickey' ;
-    $h{'duck'}  = 'donald' ;
-
-    # Delete
-    delete $h{"duck"} ;
-
-    # Cycle through the keys printing them in order.
-    # Note it is not necessary to sort the keys as
-    # the btree will have kept them in order automatically.
-    foreach (keys %h)
-      { print "$_\n" }
-
-    untie %h ;
-
-Here is the output from the code above.
-
-    mouse
-    Smith
-    Wall
-
-There are a few point to bear in mind if you want to change the
-ordering in a BTREE database:
-
-=over 5
-
-=item 1.
-
-The new compare function must be specified when you create the database.
-
-=item 2.
-
-You cannot change the ordering once the database has been created. Thus
-you must use the same compare function every time you access the
-database.
-
-=item 3
-
-Duplicate keys are entirely defined by the comparison function.
-In the case-insensitive example above, the keys: 'KEY' and 'key'
-would be considered duplicates, and assigning to the second one
-would overwrite the first. If duplicates are allowed for (with the
-R_DUP flag discussed below), only a single copy of duplicate keys
-is stored in the database --- so (again with example above) assigning
-three values to the keys: 'KEY', 'Key', and 'key' would leave just
-the first key: 'KEY' in the database with three values. For some
-situations this results in information loss, so care should be taken
-to provide fully qualified comparison functions when necessary.
-For example, the above comparison routine could be modified to
-additionally compare case-sensitively if two keys are equal in the
-case insensitive comparison:
-
-    sub compare {
-        my($key1, $key2) = @_;
-        lc $key1 cmp lc $key2 ||
-        $key1 cmp $key2;
-    }
-
-And now you will only have duplicates when the keys themselves
-are truly the same. (note: in versions of the db library prior to
-about November 1996, such duplicate keys were retained so it was
-possible to recover the original keys in sets of keys that
-compared as equal).
-
-
-=back 
-
-=head2 Handling Duplicate Keys 
-
-The BTREE file type optionally allows a single key to be associated
-with an arbitrary number of values. This option is enabled by setting
-the flags element of C<$DB_BTREE> to R_DUP when creating the database.
-
-There are some difficulties in using the tied hash interface if you
-want to manipulate a BTREE database with duplicate keys. Consider this
-code:
-
-    use warnings ;
-    use strict ;
-    use DB_File ;
-
-    my ($filename, %h) ;
-
-    $filename = "tree" ;
-    unlink $filename ;
-
-    # Enable duplicate records
-    $DB_BTREE->{'flags'} = R_DUP ;
-
-    tie %h, "DB_File", $filename, O_RDWR|O_CREAT, 0666, $DB_BTREE 
-	or die "Cannot open $filename: $!\n";
-
-    # Add some key/value pairs to the file
-    $h{'Wall'} = 'Larry' ;
-    $h{'Wall'} = 'Brick' ; # Note the duplicate key
-    $h{'Wall'} = 'Brick' ; # Note the duplicate key and value
-    $h{'Smith'} = 'John' ;
-    $h{'mouse'} = 'mickey' ;
-
-    # iterate through the associative array
-    # and print each key/value pair.
-    foreach (sort keys %h)
-      { print "$_  -> $h{$_}\n" }
-
-    untie %h ;
-
-Here is the output:
-
-    Smith   -> John
-    Wall    -> Larry
-    Wall    -> Larry
-    Wall    -> Larry
-    mouse   -> mickey
-
-As you can see 3 records have been successfully created with key C
-- the only thing is, when they are retrieved from the database they
-I to have the same value, namely C. The problem is caused
-by the way that the associative array interface works. Basically, when
-the associative array interface is used to fetch the value associated
-with a given key, it will only ever retrieve the first value.
-
-Although it may not be immediately obvious from the code above, the
-associative array interface can be used to write values with duplicate
-keys, but it cannot be used to read them back from the database.
-
-The way to get around this problem is to use the Berkeley DB API method
-called C.  This method allows sequential access to key/value
-pairs. See L for details of both the C method
-and the API in general.
-
-Here is the script above rewritten using the C API method.
-
-    use warnings ;
-    use strict ;
-    use DB_File ;
-
-    my ($filename, $x, %h, $status, $key, $value) ;
-
-    $filename = "tree" ;
-    unlink $filename ;
-
-    # Enable duplicate records
-    $DB_BTREE->{'flags'} = R_DUP ;
-
-    $x = tie %h, "DB_File", $filename, O_RDWR|O_CREAT, 0666, $DB_BTREE 
-	or die "Cannot open $filename: $!\n";
-
-    # Add some key/value pairs to the file
-    $h{'Wall'} = 'Larry' ;
-    $h{'Wall'} = 'Brick' ; # Note the duplicate key
-    $h{'Wall'} = 'Brick' ; # Note the duplicate key and value
-    $h{'Smith'} = 'John' ;
-    $h{'mouse'} = 'mickey' ;
-
-    # iterate through the btree using seq
-    # and print each key/value pair.
-    $key = $value = 0 ;
-    for ($status = $x->seq($key, $value, R_FIRST) ;
-         $status == 0 ;
-         $status = $x->seq($key, $value, R_NEXT) )
-      {  print "$key -> $value\n" }
-
-    undef $x ;
-    untie %h ;
-
-that prints:
-
-    Smith   -> John
-    Wall    -> Brick
-    Wall    -> Brick
-    Wall    -> Larry
-    mouse   -> mickey
-
-This time we have got all the key/value pairs, including the multiple
-values associated with the key C.
-
-To make life easier when dealing with duplicate keys, B comes with 
-a few utility methods.
-
-=head2 The get_dup() Method
-
-The C method assists in
-reading duplicate values from BTREE databases. The method can take the
-following forms:
-
-    $count = $x->get_dup($key) ;
-    @list  = $x->get_dup($key) ;
-    %list  = $x->get_dup($key, 1) ;
-
-In a scalar context the method returns the number of values associated
-with the key, C<$key>.
-
-In list context, it returns all the values which match C<$key>. Note
-that the values will be returned in an apparently random order.
-
-In list context, if the second parameter is present and evaluates
-TRUE, the method returns an associative array. The keys of the
-associative array correspond to the values that matched in the BTREE
-and the values of the array are a count of the number of times that
-particular value occurred in the BTREE.
-
-So assuming the database created above, we can use C like
-this:
-
-    use warnings ;
-    use strict ;
-    use DB_File ;
-
-    my ($filename, $x, %h) ;
-
-    $filename = "tree" ;
-
-    # Enable duplicate records
-    $DB_BTREE->{'flags'} = R_DUP ;
-
-    $x = tie %h, "DB_File", $filename, O_RDWR|O_CREAT, 0666, $DB_BTREE 
-	or die "Cannot open $filename: $!\n";
-
-    my $cnt  = $x->get_dup("Wall") ;
-    print "Wall occurred $cnt times\n" ;
-
-    my %hash = $x->get_dup("Wall", 1) ;
-    print "Larry is there\n" if $hash{'Larry'} ;
-    print "There are $hash{'Brick'} Brick Walls\n" ;
-
-    my @list = sort $x->get_dup("Wall") ;
-    print "Wall =>	[@list]\n" ;
-
-    @list = $x->get_dup("Smith") ;
-    print "Smith =>	[@list]\n" ;
-
-    @list = $x->get_dup("Dog") ;
-    print "Dog =>	[@list]\n" ;
-
-
-and it will print:
-
-    Wall occurred 3 times
-    Larry is there
-    There are 2 Brick Walls
-    Wall =>	[Brick Brick Larry]
-    Smith =>	[John]
-    Dog =>	[]
-
-=head2 The find_dup() Method
-
-    $status = $X->find_dup($key, $value) ;
-
-This method checks for the existence of a specific key/value pair. If the
-pair exists, the cursor is left pointing to the pair and the method 
-returns 0. Otherwise the method returns a non-zero value.
-
-Assuming the database from the previous example:
-
-    use warnings ;
-    use strict ;
-    use DB_File ;
-
-    my ($filename, $x, %h, $found) ;
-
-    $filename = "tree" ;
-
-    # Enable duplicate records
-    $DB_BTREE->{'flags'} = R_DUP ;
-
-    $x = tie %h, "DB_File", $filename, O_RDWR|O_CREAT, 0666, $DB_BTREE 
-	or die "Cannot open $filename: $!\n";
-
-    $found = ( $x->find_dup("Wall", "Larry") == 0 ? "" : "not") ; 
-    print "Larry Wall is $found there\n" ;
-
-    $found = ( $x->find_dup("Wall", "Harry") == 0 ? "" : "not") ; 
-    print "Harry Wall is $found there\n" ;
-
-    undef $x ;
-    untie %h ;
-
-prints this
-
-    Larry Wall is  there
-    Harry Wall is not there
-
-
-=head2 The del_dup() Method
-
-    $status = $X->del_dup($key, $value) ;
-
-This method deletes a specific key/value pair. It returns
-0 if they exist and have been deleted successfully.
-Otherwise the method returns a non-zero value.
-
-Again assuming the existence of the C database
-
-    use warnings ;
-    use strict ;
-    use DB_File ;
-
-    my ($filename, $x, %h, $found) ;
-
-    $filename = "tree" ;
-
-    # Enable duplicate records
-    $DB_BTREE->{'flags'} = R_DUP ;
-
-    $x = tie %h, "DB_File", $filename, O_RDWR|O_CREAT, 0666, $DB_BTREE 
-	or die "Cannot open $filename: $!\n";
-
-    $x->del_dup("Wall", "Larry") ;
-
-    $found = ( $x->find_dup("Wall", "Larry") == 0 ? "" : "not") ; 
-    print "Larry Wall is $found there\n" ;
-
-    undef $x ;
-    untie %h ;
-
-prints this
-
-    Larry Wall is not there
-
-=head2 Matching Partial Keys 
-
-The BTREE interface has a feature which allows partial keys to be
-matched. This functionality is I available when the C method
-is used along with the R_CURSOR flag.
-
-    $x->seq($key, $value, R_CURSOR) ;
-
-Here is the relevant quote from the dbopen man page where it defines
-the use of the R_CURSOR flag with seq:
-
-    Note, for the DB_BTREE access method, the returned key is not
-    necessarily an exact match for the specified key. The returned key
-    is the smallest key greater than or equal to the specified key,
-    permitting partial key matches and range searches.
-
-In the example script below, the C sub uses this feature to find
-and print the first matching key/value pair given a partial key.
-
-    use warnings ;
-    use strict ;
-    use DB_File ;
-    use Fcntl ;
-
-    my ($filename, $x, %h, $st, $key, $value) ;
-
-    sub match
-    {
-        my $key = shift ;
-        my $value = 0;
-        my $orig_key = $key ;
-        $x->seq($key, $value, R_CURSOR) ;
-        print "$orig_key\t-> $key\t-> $value\n" ;
-    }
-
-    $filename = "tree" ;
-    unlink $filename ;
-
-    $x = tie %h, "DB_File", $filename, O_RDWR|O_CREAT, 0666, $DB_BTREE
-        or die "Cannot open $filename: $!\n";
-
-    # Add some key/value pairs to the file
-    $h{'mouse'} = 'mickey' ;
-    $h{'Wall'} = 'Larry' ;
-    $h{'Walls'} = 'Brick' ; 
-    $h{'Smith'} = 'John' ;
-
-
-    $key = $value = 0 ;
-    print "IN ORDER\n" ;
-    for ($st = $x->seq($key, $value, R_FIRST) ;
-	 $st == 0 ;
-         $st = $x->seq($key, $value, R_NEXT) )
-
-      {  print "$key	-> $value\n" }
-
-    print "\nPARTIAL MATCH\n" ;
-
-    match "Wa" ;
-    match "A" ;
-    match "a" ;
-
-    undef $x ;
-    untie %h ;
-
-Here is the output:
-
-    IN ORDER
-    Smith -> John
-    Wall  -> Larry
-    Walls -> Brick
-    mouse -> mickey
-
-    PARTIAL MATCH
-    Wa -> Wall  -> Larry
-    A  -> Smith -> John
-    a  -> mouse -> mickey
-
-=head1 DB_RECNO
-
-DB_RECNO provides an interface to flat text files. Both variable and
-fixed length records are supported.
-
-In order to make RECNO more compatible with Perl, the array offset for
-all RECNO arrays begins at 0 rather than 1 as in Berkeley DB.
-
-As with normal Perl arrays, a RECNO array can be accessed using
-negative indexes. The index -1 refers to the last element of the array,
--2 the second last, and so on. Attempting to access an element before
-the start of the array will raise a fatal run-time error.
-
-=head2 The 'bval' Option
-
-The operation of the bval option warrants some discussion. Here is the
-definition of bval from the Berkeley DB 1.85 recno manual page:
-
-    The delimiting byte to be used to mark  the  end  of  a
-    record for variable-length records, and the pad charac-
-    ter for fixed-length records.  If no  value  is  speci-
-    fied,  newlines  (``\n'')  are  used to mark the end of
-    variable-length records and  fixed-length  records  are
-    padded with spaces.
-
-The second sentence is wrong. In actual fact bval will only default to
-C<"\n"> when the openinfo parameter in dbopen is NULL. If a non-NULL
-openinfo parameter is used at all, the value that happens to be in bval
-will be used. That means you always have to specify bval when making
-use of any of the options in the openinfo parameter. This documentation
-error will be fixed in the next release of Berkeley DB.
-
-That clarifies the situation with regards Berkeley DB itself. What
-about B? Well, the behavior defined in the quote above is
-quite useful, so B conforms to it.
-
-That means that you can specify other options (e.g. cachesize) and
-still have bval default to C<"\n"> for variable length records, and
-space for fixed length records.
-
-Also note that the bval option only allows you to specify a single byte
-as a delimiter.
-
-=head2 A Simple Example
-
-Here is a simple example that uses RECNO (if you are using a version 
-of Perl earlier than 5.004_57 this example won't work -- see 
-L for a workaround).
-
-    use warnings ;
-    use strict ;
-    use DB_File ;
-
-    my $filename = "text" ;
-    unlink $filename ;
-
-    my @h ;
-    tie @h, "DB_File", $filename, O_RDWR|O_CREAT, 0666, $DB_RECNO 
-        or die "Cannot open file 'text': $!\n" ;
-
-    # Add a few key/value pairs to the file
-    $h[0] = "orange" ;
-    $h[1] = "blue" ;
-    $h[2] = "yellow" ;
-
-    push @h, "green", "black" ;
-
-    my $elements = scalar @h ;
-    print "The array contains $elements entries\n" ;
-
-    my $last = pop @h ;
-    print "popped $last\n" ;
-
-    unshift @h, "white" ;
-    my $first = shift @h ;
-    print "shifted $first\n" ;
-
-    # Check for existence of a key
-    print "Element 1 Exists with value $h[1]\n" if $h[1] ;
-
-    # use a negative index
-    print "The last element is $h[-1]\n" ;
-    print "The 2nd last element is $h[-2]\n" ;
-
-    untie @h ;
-
-Here is the output from the script:
-
-    The array contains 5 entries
-    popped black
-    shifted white
-    Element 1 Exists with value blue
-    The last element is green
-    The 2nd last element is yellow
-
-=head2 Extra RECNO Methods
-
-If you are using a version of Perl earlier than 5.004_57, the tied
-array interface is quite limited. In the example script above
-C, C, C, C
-or determining the array length will not work with a tied array.
-
-To make the interface more useful for older versions of Perl, a number
-of methods are supplied with B to simulate the missing array
-operations. All these methods are accessed via the object returned from
-the tie call.
-
-Here are the methods:
-
-=over 5
-
-=item B<$X-Epush(list) ;>
-
-Pushes the elements of C to the end of the array.
-
-=item B<$value = $X-Epop ;>
-
-Removes and returns the last element of the array.
-
-=item B<$X-Eshift>
-
-Removes and returns the first element of the array.
-
-=item B<$X-Eunshift(list) ;>
-
-Pushes the elements of C to the start of the array.
-
-=item B<$X-Elength>
-
-Returns the number of elements in the array.
-
-=item B<$X-Esplice(offset, length, elements);>
-
-Returns a splice of the array.
-
-=back
-
-=head2 Another Example
-
-Here is a more complete example that makes use of some of the methods
-described above. It also makes use of the API interface directly (see 
-L).
-
-    use warnings ;
-    use strict ;
-    my (@h, $H, $file, $i) ;
-    use DB_File ;
-    use Fcntl ;
-
-    $file = "text" ;
-
-    unlink $file ;
-
-    $H = tie @h, "DB_File", $file, O_RDWR|O_CREAT, 0666, $DB_RECNO 
-        or die "Cannot open file $file: $!\n" ;
-
-    # first create a text file to play with
-    $h[0] = "zero" ;
-    $h[1] = "one" ;
-    $h[2] = "two" ;
-    $h[3] = "three" ;
-    $h[4] = "four" ;
-
-
-    # Print the records in order.
-    #
-    # The length method is needed here because evaluating a tied
-    # array in a scalar context does not return the number of
-    # elements in the array.  
-
-    print "\nORIGINAL\n" ;
-    foreach $i (0 .. $H->length - 1) {
-        print "$i: $h[$i]\n" ;
-    }
-
-    # use the push & pop methods
-    $a = $H->pop ;
-    $H->push("last") ;
-    print "\nThe last record was [$a]\n" ;
-
-    # and the shift & unshift methods
-    $a = $H->shift ;
-    $H->unshift("first") ;
-    print "The first record was [$a]\n" ;
-
-    # Use the API to add a new record after record 2.
-    $i = 2 ;
-    $H->put($i, "Newbie", R_IAFTER) ;
-
-    # and a new record before record 1.
-    $i = 1 ;
-    $H->put($i, "New One", R_IBEFORE) ;
-
-    # delete record 3
-    $H->del(3) ;
-
-    # now print the records in reverse order
-    print "\nREVERSE\n" ;
-    for ($i = $H->length - 1 ; $i >= 0 ; -- $i)
-      { print "$i: $h[$i]\n" }
-
-    # same again, but use the API functions instead
-    print "\nREVERSE again\n" ;
-    my ($s, $k, $v)  = (0, 0, 0) ;
-    for ($s = $H->seq($k, $v, R_LAST) ; 
-             $s == 0 ; 
-             $s = $H->seq($k, $v, R_PREV))
-      { print "$k: $v\n" }
-
-    undef $H ;
-    untie @h ;
-
-and this is what it outputs:
-
-    ORIGINAL
-    0: zero
-    1: one
-    2: two
-    3: three
-    4: four
-
-    The last record was [four]
-    The first record was [zero]
-
-    REVERSE
-    5: last
-    4: three
-    3: Newbie
-    2: one
-    1: New One
-    0: first
-
-    REVERSE again
-    5: last
-    4: three
-    3: Newbie
-    2: one
-    1: New One
-    0: first
-
-Notes:
-
-=over 5
-
-=item 1.
-
-Rather than iterating through the array, C<@h> like this:
-
-    foreach $i (@h)
-
-it is necessary to use either this:
-
-    foreach $i (0 .. $H->length - 1) 
-
-or this:
-
-    for ($a = $H->get($k, $v, R_FIRST) ;
-         $a == 0 ;
-         $a = $H->get($k, $v, R_NEXT) )
-
-=item 2.
-
-Notice that both times the C method was used the record index was
-specified using a variable, C<$i>, rather than the literal value
-itself. This is because C will return the record number of the
-inserted line via that parameter.
-
-=back
-
-=head1 THE API INTERFACE
-
-As well as accessing Berkeley DB using a tied hash or array, it is also
-possible to make direct use of most of the API functions defined in the
-Berkeley DB documentation.
-
-To do this you need to store a copy of the object returned from the tie.
-
-	$db = tie %hash, "DB_File", "filename" ;
-
-Once you have done that, you can access the Berkeley DB API functions
-as B methods directly like this:
-
-	$db->put($key, $value, R_NOOVERWRITE) ;
-
-B If you have saved a copy of the object returned from
-C, the underlying database file will I be closed until both
-the tied variable is untied and all copies of the saved object are
-destroyed. 
-
-    use DB_File ;
-    $db = tie %hash, "DB_File", "filename" 
-        or die "Cannot tie filename: $!" ;
-    ...
-    undef $db ;
-    untie %hash ;
-
-See L for more details.
-
-All the functions defined in L are available except for
-close() and dbopen() itself. The B method interface to the
-supported functions have been implemented to mirror the way Berkeley DB
-works whenever possible. In particular note that:
-
-=over 5
-
-=item *
-
-The methods return a status value. All return 0 on success.
-All return -1 to signify an error and set C<$!> to the exact
-error code. The return code 1 generally (but not always) means that the
-key specified did not exist in the database.
-
-Other return codes are defined. See below and in the Berkeley DB
-documentation for details. The Berkeley DB documentation should be used
-as the definitive source.
-
-=item *
-
-Whenever a Berkeley DB function returns data via one of its parameters,
-the equivalent B method does exactly the same.
-
-=item *
-
-If you are careful, it is possible to mix API calls with the tied
-hash/array interface in the same piece of code. Although only a few of
-the methods used to implement the tied interface currently make use of
-the cursor, you should always assume that the cursor has been changed
-any time the tied hash/array interface is used. As an example, this
-code will probably not do what you expect:
-
-    $X = tie %x, 'DB_File', $filename, O_RDWR|O_CREAT, 0777, $DB_BTREE
-        or die "Cannot tie $filename: $!" ;
-
-    # Get the first key/value pair and set  the cursor
-    $X->seq($key, $value, R_FIRST) ;
-
-    # this line will modify the cursor
-    $count = scalar keys %x ; 
-
-    # Get the second key/value pair.
-    # oops, it didn't, it got the last key/value pair!
-    $X->seq($key, $value, R_NEXT) ;
-
-The code above can be rearranged to get around the problem, like this:
-
-    $X = tie %x, 'DB_File', $filename, O_RDWR|O_CREAT, 0777, $DB_BTREE
-        or die "Cannot tie $filename: $!" ;
-
-    # this line will modify the cursor
-    $count = scalar keys %x ; 
-
-    # Get the first key/value pair and set  the cursor
-    $X->seq($key, $value, R_FIRST) ;
-
-    # Get the second key/value pair.
-    # worked this time.
-    $X->seq($key, $value, R_NEXT) ;
-
-=back
-
-All the constants defined in L for use in the flags parameters
-in the methods defined below are also available. Refer to the Berkeley
-DB documentation for the precise meaning of the flags values.
-
-Below is a list of the methods available.
-
-=over 5
-
-=item B<$status = $X-Eget($key, $value [, $flags]) ;>
-
-Given a key (C<$key>) this method reads the value associated with it
-from the database. The value read from the database is returned in the
-C<$value> parameter.
-
-If the key does not exist the method returns 1.
-
-No flags are currently defined for this method.
-
-=item B<$status = $X-Eput($key, $value [, $flags]) ;>
-
-Stores the key/value pair in the database.
-
-If you use either the R_IAFTER or R_IBEFORE flags, the C<$key> parameter
-will have the record number of the inserted key/value pair set.
-
-Valid flags are R_CURSOR, R_IAFTER, R_IBEFORE, R_NOOVERWRITE and
-R_SETCURSOR.
-
-=item B<$status = $X-Edel($key [, $flags]) ;>
-
-Removes all key/value pairs with key C<$key> from the database.
-
-A return code of 1 means that the requested key was not in the
-database.
-
-R_CURSOR is the only valid flag at present.
-
-=item B<$status = $X-Efd ;>
-
-Returns the file descriptor for the underlying database.
-
-See L for an explanation for why you should
-not use C to lock your database.
-
-=item B<$status = $X-Eseq($key, $value, $flags) ;>
-
-This interface allows sequential retrieval from the database. See
-L for full details.
-
-Both the C<$key> and C<$value> parameters will be set to the key/value
-pair read from the database.
-
-The flags parameter is mandatory. The valid flag values are R_CURSOR,
-R_FIRST, R_LAST, R_NEXT and R_PREV.
-
-=item B<$status = $X-Esync([$flags]) ;>
-
-Flushes any cached buffers to disk.
-
-R_RECNOSYNC is the only valid flag at present.
-
-=back
-
-=head1 DBM FILTERS
-
-A DBM Filter is a piece of code that is be used when you I
-want to make the same transformation to all keys and/or values in a
-DBM database.
-
-There are four methods associated with DBM Filters. All work identically,
-and each is used to install (or uninstall) a single DBM Filter. Each
-expects a single parameter, namely a reference to a sub. The only
-difference between them is the place that the filter is installed.
-
-To summarise:
-
-=over 5
-
-=item B
-
-If a filter has been installed with this method, it will be invoked
-every time you write a key to a DBM database.
-
-=item B
-
-If a filter has been installed with this method, it will be invoked
-every time you write a value to a DBM database.
-
-
-=item B
-
-If a filter has been installed with this method, it will be invoked
-every time you read a key from a DBM database.
-
-=item B
-
-If a filter has been installed with this method, it will be invoked
-every time you read a value from a DBM database.
-
-=back
-
-You can use any combination of the methods, from none, to all four.
-
-All filter methods return the existing filter, if present, or C
-in not.
-
-To delete a filter pass C to it.
-
-=head2 The Filter
-
-When each filter is called by Perl, a local copy of C<$_> will contain
-the key or value to be filtered. Filtering is achieved by modifying
-the contents of C<$_>. The return code from the filter is ignored.
-
-=head2 An Example -- the NULL termination problem.
-
-Consider the following scenario. You have a DBM database
-that you need to share with a third-party C application. The C application
-assumes that I keys and values are NULL terminated. Unfortunately
-when Perl writes to DBM databases it doesn't use NULL termination, so
-your Perl application will have to manage NULL termination itself. When
-you write to the database you will have to use something like this:
-
-    $hash{"$key\0"} = "$value\0" ;
-
-Similarly the NULL needs to be taken into account when you are considering
-the length of existing keys/values.
-
-It would be much better if you could ignore the NULL terminations issue
-in the main application code and have a mechanism that automatically
-added the terminating NULL to all keys and values whenever you write to
-the database and have them removed when you read from the database. As I'm
-sure you have already guessed, this is a problem that DBM Filters can
-fix very easily.
-
-    use warnings ;
-    use strict ;
-    use DB_File ;
-
-    my %hash ;
-    my $filename = "filt" ;
-    unlink $filename ;
-
-    my $db = tie %hash, 'DB_File', $filename, O_CREAT|O_RDWR, 0666, $DB_HASH 
-      or die "Cannot open $filename: $!\n" ;
-
-    # Install DBM Filters
-    $db->filter_fetch_key  ( sub { s/\0$//    } ) ;
-    $db->filter_store_key  ( sub { $_ .= "\0" } ) ;
-    $db->filter_fetch_value( sub { s/\0$//    } ) ;
-    $db->filter_store_value( sub { $_ .= "\0" } ) ;
-
-    $hash{"abc"} = "def" ;
-    my $a = $hash{"ABC"} ;
-    # ...
-    undef $db ;
-    untie %hash ;
-
-Hopefully the contents of each of the filters should be
-self-explanatory. Both "fetch" filters remove the terminating NULL,
-and both "store" filters add a terminating NULL.
-
-
-=head2 Another Example -- Key is a C int.
-
-Here is another real-life example. By default, whenever Perl writes to
-a DBM database it always writes the key and value as strings. So when
-you use this:
-
-    $hash{12345} = "soemthing" ;
-
-the key 12345 will get stored in the DBM database as the 5 byte string
-"12345". If you actually want the key to be stored in the DBM database
-as a C int, you will have to use C when writing, and C
-when reading.
-
-Here is a DBM Filter that does it:
-
-    use warnings ;
-    use strict ;
-    use DB_File ;
-    my %hash ;
-    my $filename = "filt" ;
-    unlink $filename ;
-
-
-    my $db = tie %hash, 'DB_File', $filename, O_CREAT|O_RDWR, 0666, $DB_HASH 
-      or die "Cannot open $filename: $!\n" ;
-
-    $db->filter_fetch_key  ( sub { $_ = unpack("i", $_) } ) ;
-    $db->filter_store_key  ( sub { $_ = pack ("i", $_) } ) ;
-    $hash{123} = "def" ;
-    # ...
-    undef $db ;
-    untie %hash ;
-
-This time only two filters have been used -- we only need to manipulate
-the contents of the key, so it wasn't necessary to install any value
-filters.
-
-=head1 HINTS AND TIPS 
-
-
-=head2 Locking: The Trouble with fd
-
-Until version 1.72 of this module, the recommended technique for locking
-B databases was to flock the filehandle returned from the "fd"
-function. Unfortunately this technique has been shown to be fundamentally
-flawed (Kudos to David Harris for tracking this down). Use it at your own
-peril!
-
-The locking technique went like this. 
-
-    $db = tie(%db, 'DB_File', 'foo.db', O_CREAT|O_RDWR, 0644)
-        || die "dbcreat foo.db $!";
-    $fd = $db->fd;
-    open(DB_FH, "+<&=$fd") || die "dup $!";
-    flock (DB_FH, LOCK_EX) || die "flock: $!";
-    ...
-    $db{"Tom"} = "Jerry" ;
-    ...
-    flock(DB_FH, LOCK_UN);
-    undef $db;
-    untie %db;
-    close(DB_FH);
-
-In simple terms, this is what happens:
-
-=over 5
-
-=item 1.
-
-Use "tie" to open the database.
-
-=item 2.
-
-Lock the database with fd & flock.
-
-=item 3.
-
-Read & Write to the database.
-
-=item 4.
-
-Unlock and close the database.
-
-=back
-
-Here is the crux of the problem. A side-effect of opening the B
-database in step 2 is that an initial block from the database will get
-read from disk and cached in memory.
-
-To see why this is a problem, consider what can happen when two processes,
-say "A" and "B", both want to update the same B database
-using the locking steps outlined above. Assume process "A" has already
-opened the database and has a write lock, but it hasn't actually updated
-the database yet (it has finished step 2, but not started step 3 yet). Now
-process "B" tries to open the same database - step 1 will succeed,
-but it will block on step 2 until process "A" releases the lock. The
-important thing to notice here is that at this point in time both
-processes will have cached identical initial blocks from the database.
-
-Now process "A" updates the database and happens to change some of the
-data held in the initial buffer. Process "A" terminates, flushing
-all cached data to disk and releasing the database lock. At this point
-the database on disk will correctly reflect the changes made by process
-"A".
-
-With the lock released, process "B" can now continue. It also updates the
-database and unfortunately it too modifies the data that was in its
-initial buffer. Once that data gets flushed to disk it will overwrite
-some/all of the changes process "A" made to the database.
-
-The result of this scenario is at best a database that doesn't contain
-what you expect. At worst the database will corrupt.
-
-The above won't happen every time competing process update the same
-B database, but it does illustrate why the technique should
-not be used.
-
-=head2 Safe ways to lock a database
-
-Starting with version 2.x, Berkeley DB  has internal support for locking.
-The companion module to this one, B, provides an interface
-to this locking functionality. If you are serious about locking
-Berkeley DB databases, I strongly recommend using B.
-
-If using B isn't an option, there are a number of modules
-available on CPAN that can be used to implement locking. Each one
-implements locking differently and has different goals in mind. It is
-therefore worth knowing the difference, so that you can pick the right
-one for your application. Here are the three locking wrappers:
-
-=over 5
-
-=item B
-
-A B wrapper which creates copies of the database file for
-read access, so that you have a kind of a multiversioning concurrent read
-system. However, updates are still serial. Use for databases where reads
-may be lengthy and consistency problems may occur.
-
-=item B 
-
-A B wrapper that has the ability to lock and unlock the database
-while it is being used. Avoids the tie-before-flock problem by simply
-re-tie-ing the database when you get or drop a lock.  Because of the
-flexibility in dropping and re-acquiring the lock in the middle of a
-session, this can be massaged into a system that will work with long
-updates and/or reads if the application follows the hints in the POD
-documentation.
-
-=item B 
-
-An extremely lightweight B wrapper that simply flocks a lockfile
-before tie-ing the database and drops the lock after the untie. Allows
-one to use the same lockfile for multiple databases to avoid deadlock
-problems, if desired. Use for databases where updates are reads are
-quick and simple flock locking semantics are enough.
-
-=back
-
-=head2 Sharing Databases With C Applications
-
-There is no technical reason why a Berkeley DB database cannot be
-shared by both a Perl and a C application.
-
-The vast majority of problems that are reported in this area boil down
-to the fact that C strings are NULL terminated, whilst Perl strings are
-not. See L for a generic way to work around this problem.
-
-Here is a real example. Netscape 2.0 keeps a record of the locations you
-visit along with the time you last visited them in a DB_HASH database.
-This is usually stored in the file F<~/.netscape/history.db>. The key
-field in the database is the location string and the value field is the
-time the location was last visited stored as a 4 byte binary value.
-
-If you haven't already guessed, the location string is stored with a
-terminating NULL. This means you need to be careful when accessing the
-database.
-
-Here is a snippet of code that is loosely based on Tom Christiansen's
-I script (available from your nearest CPAN archive in
-F).
-
-    use warnings ;
-    use strict ;
-    use DB_File ;
-    use Fcntl ;
-
-    my ($dotdir, $HISTORY, %hist_db, $href, $binary_time, $date) ;
-    $dotdir = $ENV{HOME} || $ENV{LOGNAME};
-
-    $HISTORY = "$dotdir/.netscape/history.db";
-
-    tie %hist_db, 'DB_File', $HISTORY
-        or die "Cannot open $HISTORY: $!\n" ;;
-
-    # Dump the complete database
-    while ( ($href, $binary_time) = each %hist_db ) {
-
-        # remove the terminating NULL
-        $href =~ s/\x00$// ;
-
-        # convert the binary time into a user friendly string
-        $date = localtime unpack("V", $binary_time);
-        print "$date $href\n" ;
-    }
-
-    # check for the existence of a specific key
-    # remember to add the NULL
-    if ( $binary_time = $hist_db{"http://mox.perl.com/\x00"} ) {
-        $date = localtime unpack("V", $binary_time) ;
-        print "Last visited mox.perl.com on $date\n" ;
-    }
-    else {
-        print "Never visited mox.perl.com\n"
-    }
-
-    untie %hist_db ;
-
-=head2 The untie() Gotcha
-
-If you make use of the Berkeley DB API, it is I strongly
-recommended that you read L. 
-
-Even if you don't currently make use of the API interface, it is still
-worth reading it.
-
-Here is an example which illustrates the problem from a B
-perspective:
-
-    use DB_File ;
-    use Fcntl ;
-
-    my %x ;
-    my $X ;
-
-    $X = tie %x, 'DB_File', 'tst.fil' , O_RDWR|O_TRUNC
-        or die "Cannot tie first time: $!" ;
-
-    $x{123} = 456 ;
-
-    untie %x ;
-
-    tie %x, 'DB_File', 'tst.fil' , O_RDWR|O_CREAT
-        or die "Cannot tie second time: $!" ;
-
-    untie %x ;
-
-When run, the script will produce this error message:
-
-    Cannot tie second time: Invalid argument at bad.file line 14.
-
-Although the error message above refers to the second tie() statement
-in the script, the source of the problem is really with the untie()
-statement that precedes it.
-
-Having read L you will probably have already guessed that the
-error is caused by the extra copy of the tied object stored in C<$X>.
-If you haven't, then the problem boils down to the fact that the
-B destructor, DESTROY, will not be called until I
-references to the tied object are destroyed. Both the tied variable,
-C<%x>, and C<$X> above hold a reference to the object. The call to
-untie() will destroy the first, but C<$X> still holds a valid
-reference, so the destructor will not get called and the database file
-F will remain open. The fact that Berkeley DB then reports the
-attempt to open a database that is already open via the catch-all
-"Invalid argument" doesn't help.
-
-If you run the script with the C<-w> flag the error message becomes:
-
-    untie attempted while 1 inner references still exist at bad.file line 12.
-    Cannot tie second time: Invalid argument at bad.file line 14.
-
-which pinpoints the real problem. Finally the script can now be
-modified to fix the original problem by destroying the API object
-before the untie:
-
-    ...
-    $x{123} = 456 ;
-
-    undef $X ;
-    untie %x ;
-
-    $X = tie %x, 'DB_File', 'tst.fil' , O_RDWR|O_CREAT
-    ...
-
-
-=head1 COMMON QUESTIONS
-
-=head2 Why is there Perl source in my database?
-
-If you look at the contents of a database file created by DB_File,
-there can sometimes be part of a Perl script included in it.
-
-This happens because Berkeley DB uses dynamic memory to allocate
-buffers which will subsequently be written to the database file. Being
-dynamic, the memory could have been used for anything before DB
-malloced it. As Berkeley DB doesn't clear the memory once it has been
-allocated, the unused portions will contain random junk. In the case
-where a Perl script gets written to the database, the random junk will
-correspond to an area of dynamic memory that happened to be used during
-the compilation of the script.
-
-Unless you don't like the possibility of there being part of your Perl
-scripts embedded in a database file, this is nothing to worry about.
-
-=head2 How do I store complex data structures with DB_File?
-
-Although B cannot do this directly, there is a module which
-can layer transparently over B to accomplish this feat.
-
-Check out the MLDBM module, available on CPAN in the directory
-F.
-
-=head2 What does "Invalid Argument" mean?
-
-You will get this error message when one of the parameters in the
-C call is wrong. Unfortunately there are quite a few parameters to
-get wrong, so it can be difficult to figure out which one it is.
-
-Here are a couple of possibilities:
-
-=over 5
-
-=item 1.
-
-Attempting to reopen a database without closing it. 
-
-=item 2.
-
-Using the O_WRONLY flag.
-
-=back
-
-=head2 What does "Bareword 'DB_File' not allowed" mean? 
-
-You will encounter this particular error message when you have the
-C pragma (or the full strict pragma) in your script.
-Consider this script:
-
-    use warnings ;
-    use strict ;
-    use DB_File ;
-    my %x ;
-    tie %x, DB_File, "filename" ;
-
-Running it produces the error in question:
-
-    Bareword "DB_File" not allowed while "strict subs" in use 
-
-To get around the error, place the word C in either single or
-double quotes, like this:
-
-    tie %x, "DB_File", "filename" ;
-
-Although it might seem like a real pain, it is really worth the effort
-of having a C in all your scripts.
-
-=head1 REFERENCES
-
-Articles that are either about B or make use of it.
-
-=over 5
-
-=item 1.
-
-I, Tim Kientzle (tkientzle@ddj.com),
-Dr. Dobb's Journal, Issue 295, January 1999, pp 34-41
-
-=back
-
-=head1 HISTORY
-
-Moved to the Changes file.
-
-=head1 BUGS
-
-Some older versions of Berkeley DB had problems with fixed length
-records using the RECNO file format. This problem has been fixed since
-version 1.85 of Berkeley DB.
-
-I am sure there are bugs in the code. If you do find any, or can
-suggest any enhancements, I would welcome your comments.
-
-=head1 AVAILABILITY
-
-B comes with the standard Perl source distribution. Look in
-the directory F. Given the amount of time between releases
-of Perl the version that ships with Perl is quite likely to be out of
-date, so the most recent version can always be found on CPAN (see
-L for details), in the directory
-F.
-
-This version of B will work with either version 1.x, 2.x or
-3.x of Berkeley DB, but is limited to the functionality provided by
-version 1.
-
-The official web site for Berkeley DB is F.
-All versions of Berkeley DB are available there.
-
-Alternatively, Berkeley DB version 1 is available at your nearest CPAN
-archive in F.
-
-If you are running IRIX, then get Berkeley DB version 1 from
-F. It has the patches necessary to
-compile properly on IRIX 5.3.
-
-=head1 COPYRIGHT
-
-Copyright (c) 1995-2004 Paul Marquess. All rights reserved. This program
-is free software; you can redistribute it and/or modify it under the
-same terms as Perl itself.
-
-Although B is covered by the Perl license, the library it
-makes use of, namely Berkeley DB, is not. Berkeley DB has its own
-copyright and its own license. Please take the time to read it.
-
-Here are are few words taken from the Berkeley DB FAQ (at
-F) regarding the license:
-
-    Do I have to license DB to use it in Perl scripts? 
-
-    No. The Berkeley DB license requires that software that uses
-    Berkeley DB be freely redistributable. In the case of Perl, that
-    software is Perl, and not your scripts. Any Perl scripts that you
-    write are your property, including scripts that make use of
-    Berkeley DB. Neither the Perl license nor the Berkeley DB license
-    place any restriction on what you may do with them.
-
-If you are in any doubt about the license situation, contact either the
-Berkeley DB authors or the author of DB_File. See L<"AUTHOR"> for details.
-
-
-=head1 SEE ALSO
-
-L, L, L, L, L,
-L
-
-=head1 AUTHOR
-
-The DB_File interface was written by Paul Marquess
-Epmqs@cpan.orgE.
-Questions about the DB system itself may be addressed to
-Edb@sleepycat.comE.
-
-=cut
diff --git a/storage/bdb/perl/DB_File/DB_File.xs b/storage/bdb/perl/DB_File/DB_File.xs
deleted file mode 100644
index 8f6cec1cc39..00000000000
--- a/storage/bdb/perl/DB_File/DB_File.xs
+++ /dev/null
@@ -1,1969 +0,0 @@
-/* 
-
- DB_File.xs -- Perl 5 interface to Berkeley DB 
-
- written by Paul Marquess 
- last modified 7th August 2004
- version 1.810
-
- All comments/suggestions/problems are welcome
-
-     Copyright (c) 1995-2004 Paul Marquess. All rights reserved.
-     This program is free software; you can redistribute it and/or
-     modify it under the same terms as Perl itself.
-
- Changes:
-	0.1 - 	Initial Release
-	0.2 - 	No longer bombs out if dbopen returns an error.
-	0.3 - 	Added some support for multiple btree compares
-	1.0 - 	Complete support for multiple callbacks added.
-	      	Fixed a problem with pushing a value onto an empty list.
-	1.01 - 	Fixed a SunOS core dump problem.
-		The return value from TIEHASH wasn't set to NULL when
-		dbopen returned an error.
-	1.02 - 	Use ALIAS to define TIEARRAY.
-		Removed some redundant commented code.
-		Merged OS2 code into the main distribution.
-		Allow negative subscripts with RECNO interface.
-		Changed the default flags to O_CREAT|O_RDWR
-	1.03 - 	Added EXISTS
-	1.04 -  fixed a couple of bugs in hash_cb. Patches supplied by
-		Dave Hammen, hammen@gothamcity.jsc.nasa.gov
-	1.05 -  Added logic to allow prefix & hash types to be specified via
-		Makefile.PL
-	1.06 -  Minor namespace cleanup: Localized PrintBtree.
-	1.07 -  Fixed bug with RECNO, where bval wasn't defaulting to "\n". 
-	1.08 -  No change to DB_File.xs
-	1.09 -  Default mode for dbopen changed to 0666
-	1.10 -  Fixed fd method so that it still returns -1 for
-		in-memory files when db 1.86 is used.
-	1.11 -  No change to DB_File.xs
-	1.12 -  No change to DB_File.xs
-	1.13 -  Tidied up a few casts.     
-	1.14 -	Made it illegal to tie an associative array to a RECNO
-		database and an ordinary array to a HASH or BTREE database.
-	1.50 -  Make work with both DB 1.x or DB 2.x
-	1.51 -  Fixed a bug in mapping 1.x O_RDONLY flag to 2.x DB_RDONLY equivalent
-	1.52 -  Patch from Gisle Aas  to suppress "use of 
-		undefined value" warning with db_get and db_seq.
-	1.53 -  Added DB_RENUMBER to flags for recno.
-	1.54 -  Fixed bug in the fd method
-        1.55 -  Fix for AIX from Jarkko Hietaniemi
-        1.56 -  No change to DB_File.xs
-        1.57 -  added the #undef op to allow building with Threads support.
-	1.58 -  Fixed a problem with the use of sv_setpvn. When the
-		size is specified as 0, it does a strlen on the data.
-		This was ok for DB 1.x, but isn't for DB 2.x.
-        1.59 -  No change to DB_File.xs
-        1.60 -  Some code tidy up
-        1.61 -  added flagSet macro for DB 2.5.x
-		fixed typo in O_RDONLY test.
-        1.62 -  No change to DB_File.xs
-        1.63 -  Fix to alllow DB 2.6.x to build.
-        1.64 -  Tidied up the 1.x to 2.x flags mapping code.
-		Added a patch from Mark Kettenis 
-		to fix a flag mapping problem with O_RDONLY on the Hurd
-        1.65 -  Fixed a bug in the PUSH logic.
-		Added BOOT check that using 2.3.4 or greater
-        1.66 -  Added DBM filter code
-        1.67 -  Backed off the use of newSVpvn.
-		Fixed DBM Filter code for Perl 5.004.
-		Fixed a small memory leak in the filter code.
-        1.68 -  fixed backward compatability bug with R_IAFTER & R_IBEFORE
-		merged in the 5.005_58 changes
-        1.69 -  fixed a bug in push -- DB_APPEND wasn't working properly.
-		Fixed the R_SETCURSOR bug introduced in 1.68
-		Added a new Perl variable $DB_File::db_ver 
-        1.70 -  Initialise $DB_File::db_ver and $DB_File::db_version with 
-		GV_ADD|GV_ADDMULT -- bug spotted by Nick Ing-Simmons.
-		Added a BOOT check to test for equivalent versions of db.h &
-		libdb.a/so.
-        1.71 -  Support for Berkeley DB version 3.
-		Support for Berkeley DB 2/3's backward compatability mode.
-		Rewrote push
-        1.72 -  No change to DB_File.xs
-        1.73 -  No change to DB_File.xs
-        1.74 -  A call to open needed parenthesised to stop it clashing
-                with a win32 macro.
-		Added Perl core patches 7703 & 7801.
-        1.75 -  Fixed Perl core patch 7703.
-		Added suppport to allow DB_File to be built with 
-		Berkeley DB 3.2 -- btree_compare, btree_prefix and hash_cb
-		needed to be changed.
-        1.76 -  No change to DB_File.xs
-        1.77 -  Tidied up a few types used in calling newSVpvn.
-        1.78 -  Core patch 10335, 10372, 10534, 10549, 11051 included.
-        1.79 -  NEXTKEY ignores the input key.
-                Added lots of casts
-        1.800 - Moved backward compatability code into ppport.h.
-                Use the new constants code.
-        1.801 - No change to DB_File.xs
-        1.802 - No change to DB_File.xs
-        1.803 - FETCH, STORE & DELETE don't map the flags parameter
-                into the equivalent Berkeley DB function anymore.
-        1.804 - no change.
-        1.805 - recursion detection added to the callbacks
-                Support for 4.1.X added.
-                Filter code can now cope with read-only $_
-        1.806 - recursion detection beefed up.
-        1.807 - no change
-        1.808 - leak fixed in ParseOpenInfo
-        1.809 - no change
-        1.810 - no change
-
-*/
-
-#define PERL_NO_GET_CONTEXT
-#include "EXTERN.h"  
-#include "perl.h"
-#include "XSUB.h"
-
-#ifdef _NOT_CORE
-#  include "ppport.h"
-#endif
-
-/* Mention DB_VERSION_MAJOR_CFG, DB_VERSION_MINOR_CFG, and
-   DB_VERSION_PATCH_CFG here so that Configure pulls them all in. */
-
-/* Being the Berkeley DB we prefer the  (which will be
- * shortly #included by the ) __attribute__ to the possibly
- * already defined __attribute__, for example by GNUC or by Perl. */
-
-/* #if DB_VERSION_MAJOR_CFG < 2  */
-#ifndef DB_VERSION_MAJOR
-#    undef __attribute__
-#endif
-
-#ifdef COMPAT185
-#    include 
-#else
-#    include 
-#endif
-
-/* Wall starts with 5.7.x */
-
-#if PERL_REVISION > 5 || (PERL_REVISION == 5 && PERL_VERSION >= 7)
-
-/* Since we dropped the gccish definition of __attribute__ we will want
- * to redefine dNOOP, however (so that dTHX continues to work).  Yes,
- * all this means that we can't do attribute checking on the DB_File,
- * boo, hiss. */
-#  ifndef DB_VERSION_MAJOR
-
-#    undef  dNOOP
-#    define dNOOP extern int Perl___notused
-
-    /* Ditto for dXSARGS. */
-#    undef  dXSARGS
-#    define dXSARGS				\
-	dSP; dMARK;			\
-	I32 ax = mark - PL_stack_base + 1;	\
-	I32 items = sp - mark
-
-#  endif
-
-/* avoid -Wall; DB_File xsubs never make use of `ix' setup for ALIASes */
-#  undef dXSI32
-#  define dXSI32 dNOOP
-
-#endif /* Perl >= 5.7 */
-
-#include  
-
-/* #define TRACE */
-
-#ifdef TRACE
-#    define Trace(x)        printf x
-#else
-#    define Trace(x)
-#endif
-
-
-#define DBT_clear(x)	Zero(&x, 1, DBT) ;
-
-#ifdef DB_VERSION_MAJOR
-
-#if DB_VERSION_MAJOR == 2
-#    define BERKELEY_DB_1_OR_2
-#endif
-
-#if DB_VERSION_MAJOR > 3 || (DB_VERSION_MAJOR == 3 && DB_VERSION_MINOR >= 2)
-#    define AT_LEAST_DB_3_2
-#endif
-
-#if DB_VERSION_MAJOR > 4 || (DB_VERSION_MAJOR == 4 && DB_VERSION_MINOR >= 1)
-#    define AT_LEAST_DB_4_1
-#endif
-
-/* map version 2 features & constants onto their version 1 equivalent */
-
-#ifdef DB_Prefix_t
-#    undef DB_Prefix_t
-#endif
-#define DB_Prefix_t	size_t
-
-#ifdef DB_Hash_t
-#    undef DB_Hash_t
-#endif
-#define DB_Hash_t	u_int32_t
-
-/* DBTYPE stays the same */
-/* HASHINFO, RECNOINFO and BTREEINFO  map to DB_INFO */
-#if DB_VERSION_MAJOR == 2
-    typedef DB_INFO	INFO ;
-#else /* DB_VERSION_MAJOR > 2 */
-#    define DB_FIXEDLEN	(0x8000)
-#endif /* DB_VERSION_MAJOR == 2 */
-
-/* version 2 has db_recno_t in place of recno_t	*/
-typedef db_recno_t	recno_t;
-
-
-#define R_CURSOR        DB_SET_RANGE
-#define R_FIRST         DB_FIRST
-#define R_IAFTER        DB_AFTER
-#define R_IBEFORE       DB_BEFORE
-#define R_LAST          DB_LAST
-#define R_NEXT          DB_NEXT
-#define R_NOOVERWRITE   DB_NOOVERWRITE
-#define R_PREV          DB_PREV
-
-#if DB_VERSION_MAJOR == 2 && DB_VERSION_MINOR < 5
-#  define R_SETCURSOR	0x800000
-#else
-#  define R_SETCURSOR	(-100)
-#endif
-
-#define R_RECNOSYNC     0
-#define R_FIXEDLEN	DB_FIXEDLEN
-#define R_DUP		DB_DUP
-
-
-#define db_HA_hash 	h_hash
-#define db_HA_ffactor	h_ffactor
-#define db_HA_nelem	h_nelem
-#define db_HA_bsize	db_pagesize
-#define db_HA_cachesize	db_cachesize
-#define db_HA_lorder	db_lorder
-
-#define db_BT_compare	bt_compare
-#define db_BT_prefix	bt_prefix
-#define db_BT_flags	flags
-#define db_BT_psize	db_pagesize
-#define db_BT_cachesize	db_cachesize
-#define db_BT_lorder	db_lorder
-#define db_BT_maxkeypage
-#define db_BT_minkeypage
-
-
-#define db_RE_reclen	re_len
-#define db_RE_flags	flags
-#define db_RE_bval	re_pad
-#define db_RE_bfname	re_source
-#define db_RE_psize	db_pagesize
-#define db_RE_cachesize	db_cachesize
-#define db_RE_lorder	db_lorder
-
-#define TXN	NULL,
-
-#define do_SEQ(db, key, value, flag)	(db->cursor->c_get)(db->cursor, &key, &value, flag)
-
-
-#define DBT_flags(x)	x.flags = 0
-#define DB_flags(x, v)	x |= v 
-
-#if DB_VERSION_MAJOR == 2 && DB_VERSION_MINOR < 5
-#    define flagSet(flags, bitmask)	((flags) & (bitmask))
-#else
-#    define flagSet(flags, bitmask)	(((flags) & DB_OPFLAGS_MASK) == (bitmask))
-#endif
-
-#else /* db version 1.x */
-
-#define BERKELEY_DB_1
-#define BERKELEY_DB_1_OR_2
-
-typedef union INFO {
-        HASHINFO 	hash ;
-        RECNOINFO 	recno ;
-        BTREEINFO 	btree ;
-      } INFO ;
-
-
-#ifdef mDB_Prefix_t 
-#  ifdef DB_Prefix_t
-#    undef DB_Prefix_t
-#  endif
-#  define DB_Prefix_t	mDB_Prefix_t 
-#endif
-
-#ifdef mDB_Hash_t
-#  ifdef DB_Hash_t
-#    undef DB_Hash_t
-#  endif
-#  define DB_Hash_t	mDB_Hash_t
-#endif
-
-#define db_HA_hash 	hash.hash
-#define db_HA_ffactor	hash.ffactor
-#define db_HA_nelem	hash.nelem
-#define db_HA_bsize	hash.bsize
-#define db_HA_cachesize	hash.cachesize
-#define db_HA_lorder	hash.lorder
-
-#define db_BT_compare	btree.compare
-#define db_BT_prefix	btree.prefix
-#define db_BT_flags	btree.flags
-#define db_BT_psize	btree.psize
-#define db_BT_cachesize	btree.cachesize
-#define db_BT_lorder	btree.lorder
-#define db_BT_maxkeypage btree.maxkeypage
-#define db_BT_minkeypage btree.minkeypage
-
-#define db_RE_reclen	recno.reclen
-#define db_RE_flags	recno.flags
-#define db_RE_bval	recno.bval
-#define db_RE_bfname	recno.bfname
-#define db_RE_psize	recno.psize
-#define db_RE_cachesize	recno.cachesize
-#define db_RE_lorder	recno.lorder
-
-#define TXN	
-
-#define do_SEQ(db, key, value, flag)	(db->dbp->seq)(db->dbp, &key, &value, flag)
-#define DBT_flags(x)	
-#define DB_flags(x, v)	
-#define flagSet(flags, bitmask)        ((flags) & (bitmask))
-
-#endif /* db version 1 */
-
-
-
-#define db_DELETE(db, key, flags)       ((db->dbp)->del)(db->dbp, TXN &key, 0)
-#define db_STORE(db, key, value, flags) ((db->dbp)->put)(db->dbp, TXN &key, &value, 0)
-#define db_FETCH(db, key, flags)        ((db->dbp)->get)(db->dbp, TXN &key, &value, 0)
-
-#define db_sync(db, flags)              ((db->dbp)->sync)(db->dbp, flags)
-#define db_get(db, key, value, flags)   ((db->dbp)->get)(db->dbp, TXN &key, &value, flags)
-
-#ifdef DB_VERSION_MAJOR
-#define db_DESTROY(db)                  (!db->aborted && ( db->cursor->c_close(db->cursor),\
-					  (db->dbp->close)(db->dbp, 0) ))
-#define db_close(db)			((db->dbp)->close)(db->dbp, 0)
-#define db_del(db, key, flags)          (flagSet(flags, R_CURSOR) 					\
-						? ((db->cursor)->c_del)(db->cursor, 0)		\
-						: ((db->dbp)->del)(db->dbp, NULL, &key, flags) )
-
-#else /* ! DB_VERSION_MAJOR */
-
-#define db_DESTROY(db)                  (!db->aborted && ((db->dbp)->close)(db->dbp))
-#define db_close(db)			((db->dbp)->close)(db->dbp)
-#define db_del(db, key, flags)          ((db->dbp)->del)(db->dbp, &key, flags)
-#define db_put(db, key, value, flags)   ((db->dbp)->put)(db->dbp, &key, &value, flags)
-
-#endif /* ! DB_VERSION_MAJOR */
-
-
-#define db_seq(db, key, value, flags)   do_SEQ(db, key, value, flags)
-
-typedef struct {
-	DBTYPE	type ;
-	DB * 	dbp ;
-	SV *	compare ;
-	bool	in_compare ;
-	SV *	prefix ;
-	bool	in_prefix ;
-	SV *	hash ;
-	bool	in_hash ;
-	bool	aborted ;
-	int	in_memory ;
-#ifdef BERKELEY_DB_1_OR_2
-	INFO 	info ;
-#endif	
-#ifdef DB_VERSION_MAJOR
-	DBC *	cursor ;
-#endif
-	SV *    filter_fetch_key ;
-	SV *    filter_store_key ;
-	SV *    filter_fetch_value ;
-	SV *    filter_store_value ;
-	int     filtering ;
-
-	} DB_File_type;
-
-typedef DB_File_type * DB_File ;
-typedef DBT DBTKEY ;
-
-#define my_sv_setpvn(sv, d, s) sv_setpvn(sv, (s ? d : (void*)""), s)
-
-#define OutputValue(arg, name)  					\
-	{ if (RETVAL == 0) {						\
-	      SvGETMAGIC(arg) ;          				\
-	      my_sv_setpvn(arg, name.data, name.size) ;			\
-	      TAINT;                                       		\
-	      SvTAINTED_on(arg);                                       	\
-	      SvUTF8_off(arg);                                       	\
-	      DBM_ckFilter(arg, filter_fetch_value,"filter_fetch_value") ; 	\
-	  }								\
-	}
-
-#define OutputKey(arg, name)	 					\
-	{ if (RETVAL == 0) 						\
-	  { 								\
-		SvGETMAGIC(arg) ;          				\
-		if (db->type != DB_RECNO) {				\
-		    my_sv_setpvn(arg, name.data, name.size); 		\
-		}							\
-		else 							\
-		    sv_setiv(arg, (I32)*(I32*)name.data - 1); 		\
-	      TAINT;                                       		\
-	      SvTAINTED_on(arg);                                       	\
-	      SvUTF8_off(arg);                                       	\
-	      DBM_ckFilter(arg, filter_fetch_key,"filter_fetch_key") ; 	\
-	  } 								\
-	}
-
-#define my_SvUV32(sv) ((u_int32_t)SvUV(sv))
-
-#ifdef CAN_PROTOTYPE
-extern void __getBerkeleyDBInfo(void);
-#endif
-
-/* Internal Global Data */
-
-#define MY_CXT_KEY "DB_File::_guts" XS_VERSION
-
-typedef struct {
-    recno_t	x_Value; 
-    recno_t	x_zero;
-    DB_File	x_CurrentDB;
-    DBTKEY	x_empty;
-} my_cxt_t;
-
-START_MY_CXT
-
-#define Value		(MY_CXT.x_Value)
-#define zero		(MY_CXT.x_zero)
-#define CurrentDB	(MY_CXT.x_CurrentDB)
-#define empty		(MY_CXT.x_empty)
-
-#define ERR_BUFF "DB_File::Error"
-
-#ifdef DB_VERSION_MAJOR
-
-static int
-#ifdef CAN_PROTOTYPE
-db_put(DB_File db, DBTKEY key, DBT value, u_int flags)
-#else
-db_put(db, key, value, flags)
-DB_File		db ;
-DBTKEY		key ;
-DBT		value ;
-u_int		flags ;
-#endif
-{
-    int status ;
-
-    if (flagSet(flags, R_IAFTER) || flagSet(flags, R_IBEFORE)) {
-        DBC * temp_cursor ;
-	DBT l_key, l_value;
-        
-#if DB_VERSION_MAJOR == 2 && DB_VERSION_MINOR < 6
-        if (((db->dbp)->cursor)(db->dbp, NULL, &temp_cursor) != 0)
-#else
-        if (((db->dbp)->cursor)(db->dbp, NULL, &temp_cursor, 0) != 0)
-#endif
-	    return (-1) ;
-
-	memset(&l_key, 0, sizeof(l_key));
-	l_key.data = key.data;
-	l_key.size = key.size;
-	memset(&l_value, 0, sizeof(l_value));
-	l_value.data = value.data;
-	l_value.size = value.size;
-
-	if ( temp_cursor->c_get(temp_cursor, &l_key, &l_value, DB_SET) != 0) {
-	    (void)temp_cursor->c_close(temp_cursor);
-	    return (-1);
-	}
-
-	status = temp_cursor->c_put(temp_cursor, &key, &value, flags);
-	(void)temp_cursor->c_close(temp_cursor);
-	    
-        return (status) ;
-    }	
-    
-    
-    if (flagSet(flags, R_CURSOR)) {
-	return ((db->cursor)->c_put)(db->cursor, &key, &value, DB_CURRENT);
-    }
-
-    if (flagSet(flags, R_SETCURSOR)) {
-	if ((db->dbp)->put(db->dbp, NULL, &key, &value, 0) != 0)
-		return -1 ;
-        return ((db->cursor)->c_get)(db->cursor, &key, &value, DB_SET_RANGE);
-    
-    }
-
-    return ((db->dbp)->put)(db->dbp, NULL, &key, &value, flags) ;
-
-}
-
-#endif /* DB_VERSION_MAJOR */
-
-static void
-tidyUp(DB_File db)
-{
-    db->aborted = TRUE ;
-}
-
-
-static int
-#ifdef AT_LEAST_DB_3_2
-
-#ifdef CAN_PROTOTYPE
-btree_compare(DB * db, const DBT *key1, const DBT *key2)
-#else
-btree_compare(db, key1, key2)
-DB * db ;
-const DBT * key1 ;
-const DBT * key2 ;
-#endif /* CAN_PROTOTYPE */
-
-#else /* Berkeley DB < 3.2 */
-
-#ifdef CAN_PROTOTYPE
-btree_compare(const DBT *key1, const DBT *key2)
-#else
-btree_compare(key1, key2)
-const DBT * key1 ;
-const DBT * key2 ;
-#endif
-
-#endif
-
-{
-#ifdef dTHX
-    dTHX;
-#endif    
-    dSP ;
-    dMY_CXT ;
-    void * data1, * data2 ;
-    int retval ;
-    int count ;
-    
-
-    if (CurrentDB->in_compare) {
-        tidyUp(CurrentDB);
-        croak ("DB_File btree_compare: recursion detected\n") ;
-    }
-
-    data1 = (char *) key1->data ;
-    data2 = (char *) key2->data ;
-
-#ifndef newSVpvn
-    /* As newSVpv will assume that the data pointer is a null terminated C 
-       string if the size parameter is 0, make sure that data points to an 
-       empty string if the length is 0
-    */
-    if (key1->size == 0)
-        data1 = "" ; 
-    if (key2->size == 0)
-        data2 = "" ;
-#endif	
-
-    ENTER ;
-    SAVETMPS;
-    SAVESPTR(CurrentDB);
-    CurrentDB->in_compare = FALSE;
-    SAVEINT(CurrentDB->in_compare);
-    CurrentDB->in_compare = TRUE;
-
-    PUSHMARK(SP) ;
-    EXTEND(SP,2) ;
-    PUSHs(sv_2mortal(newSVpvn(data1,key1->size)));
-    PUSHs(sv_2mortal(newSVpvn(data2,key2->size)));
-    PUTBACK ;
-
-    count = perl_call_sv(CurrentDB->compare, G_SCALAR); 
-
-    SPAGAIN ;
-
-    if (count != 1){
-        tidyUp(CurrentDB);
-        croak ("DB_File btree_compare: expected 1 return value from compare sub, got %d\n", count) ;
-    }
-
-    retval = POPi ;
-
-    PUTBACK ;
-    FREETMPS ;
-    LEAVE ;
-
-    return (retval) ;
-
-}
-
-static DB_Prefix_t
-#ifdef AT_LEAST_DB_3_2
-
-#ifdef CAN_PROTOTYPE
-btree_prefix(DB * db, const DBT *key1, const DBT *key2)
-#else
-btree_prefix(db, key1, key2)
-Db * db ;
-const DBT * key1 ;
-const DBT * key2 ;
-#endif
-
-#else /* Berkeley DB < 3.2 */
-
-#ifdef CAN_PROTOTYPE
-btree_prefix(const DBT *key1, const DBT *key2)
-#else
-btree_prefix(key1, key2)
-const DBT * key1 ;
-const DBT * key2 ;
-#endif
-
-#endif
-{
-#ifdef dTHX
-    dTHX;
-#endif    
-    dSP ;
-    dMY_CXT ;
-    char * data1, * data2 ;
-    int retval ;
-    int count ;
-    
-    if (CurrentDB->in_prefix){
-        tidyUp(CurrentDB);
-        croak ("DB_File btree_prefix: recursion detected\n") ;
-    }
-
-    data1 = (char *) key1->data ;
-    data2 = (char *) key2->data ;
-
-#ifndef newSVpvn
-    /* As newSVpv will assume that the data pointer is a null terminated C 
-       string if the size parameter is 0, make sure that data points to an 
-       empty string if the length is 0
-    */
-    if (key1->size == 0)
-        data1 = "" ;
-    if (key2->size == 0)
-        data2 = "" ;
-#endif	
-
-    ENTER ;
-    SAVETMPS;
-    SAVESPTR(CurrentDB);
-    CurrentDB->in_prefix = FALSE;
-    SAVEINT(CurrentDB->in_prefix);
-    CurrentDB->in_prefix = TRUE;
-
-    PUSHMARK(SP) ;
-    EXTEND(SP,2) ;
-    PUSHs(sv_2mortal(newSVpvn(data1,key1->size)));
-    PUSHs(sv_2mortal(newSVpvn(data2,key2->size)));
-    PUTBACK ;
-
-    count = perl_call_sv(CurrentDB->prefix, G_SCALAR); 
-
-    SPAGAIN ;
-
-    if (count != 1){
-        tidyUp(CurrentDB);
-        croak ("DB_File btree_prefix: expected 1 return value from prefix sub, got %d\n", count) ;
-    }
- 
-    retval = POPi ;
- 
-    PUTBACK ;
-    FREETMPS ;
-    LEAVE ;
-
-    return (retval) ;
-}
-
-
-#ifdef BERKELEY_DB_1
-#    define HASH_CB_SIZE_TYPE size_t
-#else
-#    define HASH_CB_SIZE_TYPE u_int32_t
-#endif
-
-static DB_Hash_t
-#ifdef AT_LEAST_DB_3_2
-
-#ifdef CAN_PROTOTYPE
-hash_cb(DB * db, const void *data, u_int32_t size)
-#else
-hash_cb(db, data, size)
-DB * db ;
-const void * data ;
-HASH_CB_SIZE_TYPE size ;
-#endif
-
-#else /* Berkeley DB < 3.2 */
-
-#ifdef CAN_PROTOTYPE
-hash_cb(const void *data, HASH_CB_SIZE_TYPE size)
-#else
-hash_cb(data, size)
-const void * data ;
-HASH_CB_SIZE_TYPE size ;
-#endif
-
-#endif
-{
-#ifdef dTHX
-    dTHX;
-#endif    
-    dSP ;
-    dMY_CXT;
-    int retval = 0;
-    int count ;
-
-    if (CurrentDB->in_hash){
-        tidyUp(CurrentDB);
-        croak ("DB_File hash callback: recursion detected\n") ;
-    }
-
-#ifndef newSVpvn
-    if (size == 0)
-        data = "" ;
-#endif	
-
-     /* DGH - Next two lines added to fix corrupted stack problem */
-    ENTER ;
-    SAVETMPS;
-    SAVESPTR(CurrentDB);
-    CurrentDB->in_hash = FALSE;
-    SAVEINT(CurrentDB->in_hash);
-    CurrentDB->in_hash = TRUE;
-
-    PUSHMARK(SP) ;
-
-
-    XPUSHs(sv_2mortal(newSVpvn((char*)data,size)));
-    PUTBACK ;
-
-    count = perl_call_sv(CurrentDB->hash, G_SCALAR); 
-
-    SPAGAIN ;
-
-    if (count != 1){
-        tidyUp(CurrentDB);
-        croak ("DB_File hash_cb: expected 1 return value from hash sub, got %d\n", count) ;
-    }
-
-    retval = POPi ;
-
-    PUTBACK ;
-    FREETMPS ;
-    LEAVE ;
-
-    return (retval) ;
-}
-
-#if 0
-static void
-#ifdef CAN_PROTOTYPE
-db_errcall_cb(const char * db_errpfx, char * buffer)
-#else
-db_errcall_cb(db_errpfx, buffer)
-const char * db_errpfx;
-char * buffer;
-#endif
-{
-#ifdef dTHX
-    dTHX;
-#endif    
-    SV * sv = perl_get_sv(ERR_BUFF, FALSE) ;
-    if (sv) {
-        if (db_errpfx)
-            sv_setpvf(sv, "%s: %s", db_errpfx, buffer) ;
-        else
-            sv_setpv(sv, buffer) ;
-    }
-} 
-#endif
-
-#if defined(TRACE) && defined(BERKELEY_DB_1_OR_2)
-
-static void
-#ifdef CAN_PROTOTYPE
-PrintHash(INFO *hash)
-#else
-PrintHash(hash)
-INFO * hash ;
-#endif
-{
-    printf ("HASH Info\n") ;
-    printf ("  hash      = %s\n", 
-		(hash->db_HA_hash != NULL ? "redefined" : "default")) ;
-    printf ("  bsize     = %d\n", hash->db_HA_bsize) ;
-    printf ("  ffactor   = %d\n", hash->db_HA_ffactor) ;
-    printf ("  nelem     = %d\n", hash->db_HA_nelem) ;
-    printf ("  cachesize = %d\n", hash->db_HA_cachesize) ;
-    printf ("  lorder    = %d\n", hash->db_HA_lorder) ;
-
-}
-
-static void
-#ifdef CAN_PROTOTYPE
-PrintRecno(INFO *recno)
-#else
-PrintRecno(recno)
-INFO * recno ;
-#endif
-{
-    printf ("RECNO Info\n") ;
-    printf ("  flags     = %d\n", recno->db_RE_flags) ;
-    printf ("  cachesize = %d\n", recno->db_RE_cachesize) ;
-    printf ("  psize     = %d\n", recno->db_RE_psize) ;
-    printf ("  lorder    = %d\n", recno->db_RE_lorder) ;
-    printf ("  reclen    = %lu\n", (unsigned long)recno->db_RE_reclen) ;
-    printf ("  bval      = %d 0x%x\n", recno->db_RE_bval, recno->db_RE_bval) ;
-    printf ("  bfname    = %d [%s]\n", recno->db_RE_bfname, recno->db_RE_bfname) ;
-}
-
-static void
-#ifdef CAN_PROTOTYPE
-PrintBtree(INFO *btree)
-#else
-PrintBtree(btree)
-INFO * btree ;
-#endif
-{
-    printf ("BTREE Info\n") ;
-    printf ("  compare    = %s\n", 
-		(btree->db_BT_compare ? "redefined" : "default")) ;
-    printf ("  prefix     = %s\n", 
-		(btree->db_BT_prefix ? "redefined" : "default")) ;
-    printf ("  flags      = %d\n", btree->db_BT_flags) ;
-    printf ("  cachesize  = %d\n", btree->db_BT_cachesize) ;
-    printf ("  psize      = %d\n", btree->db_BT_psize) ;
-#ifndef DB_VERSION_MAJOR
-    printf ("  maxkeypage = %d\n", btree->db_BT_maxkeypage) ;
-    printf ("  minkeypage = %d\n", btree->db_BT_minkeypage) ;
-#endif
-    printf ("  lorder     = %d\n", btree->db_BT_lorder) ;
-}
-
-#else
-
-#define PrintRecno(recno)
-#define PrintHash(hash)
-#define PrintBtree(btree)
-
-#endif /* TRACE */
-
-
-static I32
-#ifdef CAN_PROTOTYPE
-GetArrayLength(pTHX_ DB_File db)
-#else
-GetArrayLength(db)
-DB_File db ;
-#endif
-{
-    DBT		key ;
-    DBT		value ;
-    int		RETVAL ;
-
-    DBT_clear(key) ;
-    DBT_clear(value) ;
-    RETVAL = do_SEQ(db, key, value, R_LAST) ;
-    if (RETVAL == 0)
-        RETVAL = *(I32 *)key.data ;
-    else /* No key means empty file */
-        RETVAL = 0 ;
-
-    return ((I32)RETVAL) ;
-}
-
-static recno_t
-#ifdef CAN_PROTOTYPE
-GetRecnoKey(pTHX_ DB_File db, I32 value)
-#else
-GetRecnoKey(db, value)
-DB_File  db ;
-I32      value ;
-#endif
-{
-    if (value < 0) {
-	/* Get the length of the array */
-	I32 length = GetArrayLength(aTHX_ db) ;
-
-	/* check for attempt to write before start of array */
-	if (length + value + 1 <= 0) {
-            tidyUp(db);
-	    croak("Modification of non-creatable array value attempted, subscript %ld", (long)value) ;
-	}
-
-	value = length + value + 1 ;
-    }
-    else
-        ++ value ;
-
-    return value ;
-}
-
-
-static DB_File
-#ifdef CAN_PROTOTYPE
-ParseOpenInfo(pTHX_ int isHASH, char *name, int flags, int mode, SV *sv)
-#else
-ParseOpenInfo(isHASH, name, flags, mode, sv)
-int    isHASH ;
-char * name ;
-int    flags ;
-int    mode ;
-SV *   sv ;
-#endif
-{
-
-#ifdef BERKELEY_DB_1_OR_2 /* Berkeley DB Version 1  or 2 */
-
-    SV **	svp;
-    HV *	action ;
-    DB_File	RETVAL = (DB_File)safemalloc(sizeof(DB_File_type)) ;
-    void *	openinfo = NULL ;
-    INFO	* info  = &RETVAL->info ;
-    STRLEN	n_a;
-    dMY_CXT;
-
-#ifdef TRACE    
-    printf("In ParseOpenInfo name=[%s] flags=[%d] mode=[%d] SV NULL=[%d]\n", 
-		    name, flags, mode, sv == NULL) ;  
-#endif
-    Zero(RETVAL, 1, DB_File_type) ;
-
-    /* Default to HASH */
-    RETVAL->filtering = 0 ;
-    RETVAL->filter_fetch_key = RETVAL->filter_store_key = 
-    RETVAL->filter_fetch_value = RETVAL->filter_store_value =
-    RETVAL->hash = RETVAL->compare = RETVAL->prefix = NULL ;
-    RETVAL->type = DB_HASH ;
-
-     /* DGH - Next line added to avoid SEGV on existing hash DB */
-    CurrentDB = RETVAL; 
-
-    /* fd for 1.86 hash in memory files doesn't return -1 like 1.85 */
-    RETVAL->in_memory = (name == NULL) ;
-
-    if (sv)
-    {
-        if (! SvROK(sv) )
-            croak ("type parameter is not a reference") ;
-
-        svp  = hv_fetch( (HV*)SvRV(sv), "GOT", 3, FALSE) ;
-        if (svp && SvOK(*svp))
-            action  = (HV*) SvRV(*svp) ;
-	else
-	    croak("internal error") ;
-
-        if (sv_isa(sv, "DB_File::HASHINFO"))
-        {
-
-	    if (!isHASH)
-	        croak("DB_File can only tie an associative array to a DB_HASH database") ;
-
-            RETVAL->type = DB_HASH ;
-            openinfo = (void*)info ;
-  
-            svp = hv_fetch(action, "hash", 4, FALSE); 
-
-            if (svp && SvOK(*svp))
-            {
-                info->db_HA_hash = hash_cb ;
-		RETVAL->hash = newSVsv(*svp) ;
-            }
-            else
-	        info->db_HA_hash = NULL ;
-
-           svp = hv_fetch(action, "ffactor", 7, FALSE);
-           info->db_HA_ffactor = svp ? SvIV(*svp) : 0;
-         
-           svp = hv_fetch(action, "nelem", 5, FALSE);
-           info->db_HA_nelem = svp ? SvIV(*svp) : 0;
-         
-           svp = hv_fetch(action, "bsize", 5, FALSE);
-           info->db_HA_bsize = svp ? SvIV(*svp) : 0;
-           
-           svp = hv_fetch(action, "cachesize", 9, FALSE);
-           info->db_HA_cachesize = svp ? SvIV(*svp) : 0;
-         
-           svp = hv_fetch(action, "lorder", 6, FALSE);
-           info->db_HA_lorder = svp ? SvIV(*svp) : 0;
-
-           PrintHash(info) ; 
-        }
-        else if (sv_isa(sv, "DB_File::BTREEINFO"))
-        {
-	    if (!isHASH)
-	        croak("DB_File can only tie an associative array to a DB_BTREE database");
-
-            RETVAL->type = DB_BTREE ;
-            openinfo = (void*)info ;
-   
-            svp = hv_fetch(action, "compare", 7, FALSE);
-            if (svp && SvOK(*svp))
-            {
-                info->db_BT_compare = btree_compare ;
-		RETVAL->compare = newSVsv(*svp) ;
-            }
-            else
-                info->db_BT_compare = NULL ;
-
-            svp = hv_fetch(action, "prefix", 6, FALSE);
-            if (svp && SvOK(*svp))
-            {
-                info->db_BT_prefix = btree_prefix ;
-		RETVAL->prefix = newSVsv(*svp) ;
-            }
-            else
-                info->db_BT_prefix = NULL ;
-
-            svp = hv_fetch(action, "flags", 5, FALSE);
-            info->db_BT_flags = svp ? SvIV(*svp) : 0;
-   
-            svp = hv_fetch(action, "cachesize", 9, FALSE);
-            info->db_BT_cachesize = svp ? SvIV(*svp) : 0;
-         
-#ifndef DB_VERSION_MAJOR
-            svp = hv_fetch(action, "minkeypage", 10, FALSE);
-            info->btree.minkeypage = svp ? SvIV(*svp) : 0;
-        
-            svp = hv_fetch(action, "maxkeypage", 10, FALSE);
-            info->btree.maxkeypage = svp ? SvIV(*svp) : 0;
-#endif
-
-            svp = hv_fetch(action, "psize", 5, FALSE);
-            info->db_BT_psize = svp ? SvIV(*svp) : 0;
-         
-            svp = hv_fetch(action, "lorder", 6, FALSE);
-            info->db_BT_lorder = svp ? SvIV(*svp) : 0;
-
-            PrintBtree(info) ;
-         
-        }
-        else if (sv_isa(sv, "DB_File::RECNOINFO"))
-        {
-	    if (isHASH)
-	        croak("DB_File can only tie an array to a DB_RECNO database");
-
-            RETVAL->type = DB_RECNO ;
-            openinfo = (void *)info ;
-
-	    info->db_RE_flags = 0 ;
-
-            svp = hv_fetch(action, "flags", 5, FALSE);
-            info->db_RE_flags = (u_long) (svp ? SvIV(*svp) : 0);
-         
-            svp = hv_fetch(action, "reclen", 6, FALSE);
-            info->db_RE_reclen = (size_t) (svp ? SvIV(*svp) : 0);
-         
-            svp = hv_fetch(action, "cachesize", 9, FALSE);
-            info->db_RE_cachesize = (u_int) (svp ? SvIV(*svp) : 0);
-         
-            svp = hv_fetch(action, "psize", 5, FALSE);
-            info->db_RE_psize = (u_int) (svp ? SvIV(*svp) : 0);
-         
-            svp = hv_fetch(action, "lorder", 6, FALSE);
-            info->db_RE_lorder = (int) (svp ? SvIV(*svp) : 0);
-
-#ifdef DB_VERSION_MAJOR
-	    info->re_source = name ;
-	    name = NULL ;
-#endif
-            svp = hv_fetch(action, "bfname", 6, FALSE); 
-            if (svp && SvOK(*svp)) {
-		char * ptr = SvPV(*svp,n_a) ;
-#ifdef DB_VERSION_MAJOR
-		name = (char*) n_a ? ptr : NULL ;
-#else
-                info->db_RE_bfname = (char*) (n_a ? ptr : NULL) ;
-#endif
-	    }
-	    else
-#ifdef DB_VERSION_MAJOR
-		name = NULL ;
-#else
-                info->db_RE_bfname = NULL ;
-#endif
-         
-	    svp = hv_fetch(action, "bval", 4, FALSE);
-#ifdef DB_VERSION_MAJOR
-            if (svp && SvOK(*svp))
-            {
-		int value ;
-                if (SvPOK(*svp))
-		    value = (int)*SvPV(*svp, n_a) ;
-		else
-		    value = SvIV(*svp) ;
-
-		if (info->flags & DB_FIXEDLEN) {
-		    info->re_pad = value ;
-		    info->flags |= DB_PAD ;
-		}
-		else {
-		    info->re_delim = value ;
-		    info->flags |= DB_DELIMITER ;
-		}
-
-            }
-#else
-            if (svp && SvOK(*svp))
-            {
-                if (SvPOK(*svp))
-		    info->db_RE_bval = (u_char)*SvPV(*svp, n_a) ;
-		else
-		    info->db_RE_bval = (u_char)(unsigned long) SvIV(*svp) ;
-		DB_flags(info->flags, DB_DELIMITER) ;
-
-            }
-            else
- 	    {
-		if (info->db_RE_flags & R_FIXEDLEN)
-                    info->db_RE_bval = (u_char) ' ' ;
-		else
-                    info->db_RE_bval = (u_char) '\n' ;
-		DB_flags(info->flags, DB_DELIMITER) ;
-	    }
-#endif
-
-#ifdef DB_RENUMBER
-	    info->flags |= DB_RENUMBER ;
-#endif
-         
-            PrintRecno(info) ;
-        }
-        else
-            croak("type is not of type DB_File::HASHINFO, DB_File::BTREEINFO or DB_File::RECNOINFO");
-    }
-
-
-    /* OS2 Specific Code */
-#ifdef OS2
-#ifdef __EMX__
-    flags |= O_BINARY;
-#endif /* __EMX__ */
-#endif /* OS2 */
-
-#ifdef DB_VERSION_MAJOR
-
-    {
-        int	 	Flags = 0 ;
-        int		status ;
-
-        /* Map 1.x flags to 2.x flags */
-        if ((flags & O_CREAT) == O_CREAT)
-            Flags |= DB_CREATE ;
-
-#if O_RDONLY == 0
-        if (flags == O_RDONLY)
-#else
-        if ((flags & O_RDONLY) == O_RDONLY && (flags & O_RDWR) != O_RDWR)
-#endif
-            Flags |= DB_RDONLY ;
-
-#ifdef O_TRUNC
-        if ((flags & O_TRUNC) == O_TRUNC)
-            Flags |= DB_TRUNCATE ;
-#endif
-
-        status = db_open(name, RETVAL->type, Flags, mode, NULL, openinfo, &RETVAL->dbp) ; 
-        if (status == 0)
-#if DB_VERSION_MAJOR == 2 && DB_VERSION_MINOR < 6
-            status = (RETVAL->dbp->cursor)(RETVAL->dbp, NULL, &RETVAL->cursor) ;
-#else
-            status = (RETVAL->dbp->cursor)(RETVAL->dbp, NULL, &RETVAL->cursor,
-			0) ;
-#endif
-
-        if (status)
-	    RETVAL->dbp = NULL ;
-
-    }
-#else
-
-#if defined(DB_LIBRARY_COMPATIBILITY_API) && DB_VERSION_MAJOR > 2
-    RETVAL->dbp = __db185_open(name, flags, mode, RETVAL->type, openinfo) ; 
-#else    
-    RETVAL->dbp = dbopen(name, flags, mode, RETVAL->type, openinfo) ; 
-#endif /* DB_LIBRARY_COMPATIBILITY_API */
-
-#endif
-
-    return (RETVAL) ;
-
-#else /* Berkeley DB Version > 2 */
-
-    SV **	svp;
-    HV *	action ;
-    DB_File	RETVAL = (DB_File)safemalloc(sizeof(DB_File_type)) ;
-    DB *	dbp ;
-    STRLEN	n_a;
-    int		status ;
-    dMY_CXT;
-
-/* printf("In ParseOpenInfo name=[%s] flags=[%d] mode = [%d]\n", name, flags, mode) ;  */
-    Zero(RETVAL, 1, DB_File_type) ;
-
-    /* Default to HASH */
-    RETVAL->filtering = 0 ;
-    RETVAL->filter_fetch_key = RETVAL->filter_store_key = 
-    RETVAL->filter_fetch_value = RETVAL->filter_store_value =
-    RETVAL->hash = RETVAL->compare = RETVAL->prefix = NULL ;
-    RETVAL->type = DB_HASH ;
-
-     /* DGH - Next line added to avoid SEGV on existing hash DB */
-    CurrentDB = RETVAL; 
-
-    /* fd for 1.86 hash in memory files doesn't return -1 like 1.85 */
-    RETVAL->in_memory = (name == NULL) ;
-
-    status = db_create(&RETVAL->dbp, NULL,0) ;
-    /* printf("db_create returned %d %s\n", status, db_strerror(status)) ; */
-    if (status) {
-	RETVAL->dbp = NULL ;
-        return (RETVAL) ;
-    }	
-    dbp = RETVAL->dbp ;
-
-    if (sv)
-    {
-        if (! SvROK(sv) )
-            croak ("type parameter is not a reference") ;
-
-        svp  = hv_fetch( (HV*)SvRV(sv), "GOT", 3, FALSE) ;
-        if (svp && SvOK(*svp))
-            action  = (HV*) SvRV(*svp) ;
-	else
-	    croak("internal error") ;
-
-        if (sv_isa(sv, "DB_File::HASHINFO"))
-        {
-
-	    if (!isHASH)
-	        croak("DB_File can only tie an associative array to a DB_HASH database") ;
-
-            RETVAL->type = DB_HASH ;
-  
-            svp = hv_fetch(action, "hash", 4, FALSE); 
-
-            if (svp && SvOK(*svp))
-            {
-		(void)dbp->set_h_hash(dbp, hash_cb) ;
-		RETVAL->hash = newSVsv(*svp) ;
-            }
-
-           svp = hv_fetch(action, "ffactor", 7, FALSE);
-	   if (svp)
-	       (void)dbp->set_h_ffactor(dbp, my_SvUV32(*svp)) ;
-         
-           svp = hv_fetch(action, "nelem", 5, FALSE);
-	   if (svp)
-               (void)dbp->set_h_nelem(dbp, my_SvUV32(*svp)) ;
-         
-           svp = hv_fetch(action, "bsize", 5, FALSE);
-	   if (svp)
-               (void)dbp->set_pagesize(dbp, my_SvUV32(*svp));
-           
-           svp = hv_fetch(action, "cachesize", 9, FALSE);
-	   if (svp)
-               (void)dbp->set_cachesize(dbp, 0, my_SvUV32(*svp), 0) ;
-         
-           svp = hv_fetch(action, "lorder", 6, FALSE);
-	   if (svp)
-               (void)dbp->set_lorder(dbp, (int)SvIV(*svp)) ;
-
-           PrintHash(info) ; 
-        }
-        else if (sv_isa(sv, "DB_File::BTREEINFO"))
-        {
-	    if (!isHASH)
-	        croak("DB_File can only tie an associative array to a DB_BTREE database");
-
-            RETVAL->type = DB_BTREE ;
-   
-            svp = hv_fetch(action, "compare", 7, FALSE);
-            if (svp && SvOK(*svp))
-            {
-                (void)dbp->set_bt_compare(dbp, btree_compare) ;
-		RETVAL->compare = newSVsv(*svp) ;
-            }
-
-            svp = hv_fetch(action, "prefix", 6, FALSE);
-            if (svp && SvOK(*svp))
-            {
-                (void)dbp->set_bt_prefix(dbp, btree_prefix) ;
-		RETVAL->prefix = newSVsv(*svp) ;
-            }
-
-           svp = hv_fetch(action, "flags", 5, FALSE);
-	   if (svp)
-	       (void)dbp->set_flags(dbp, my_SvUV32(*svp)) ;
-   
-           svp = hv_fetch(action, "cachesize", 9, FALSE);
-	   if (svp)
-               (void)dbp->set_cachesize(dbp, 0, my_SvUV32(*svp), 0) ;
-         
-           svp = hv_fetch(action, "psize", 5, FALSE);
-	   if (svp)
-               (void)dbp->set_pagesize(dbp, my_SvUV32(*svp)) ;
-         
-           svp = hv_fetch(action, "lorder", 6, FALSE);
-	   if (svp)
-               (void)dbp->set_lorder(dbp, (int)SvIV(*svp)) ;
-
-            PrintBtree(info) ;
-         
-        }
-        else if (sv_isa(sv, "DB_File::RECNOINFO"))
-        {
-	    int fixed = FALSE ;
-
-	    if (isHASH)
-	        croak("DB_File can only tie an array to a DB_RECNO database");
-
-            RETVAL->type = DB_RECNO ;
-
-           svp = hv_fetch(action, "flags", 5, FALSE);
-	   if (svp) {
-		int flags = SvIV(*svp) ;
-		/* remove FIXDLEN, if present */
-		if (flags & DB_FIXEDLEN) {
-		    fixed = TRUE ;
-		    flags &= ~DB_FIXEDLEN ;
-	   	}
-	   }
-
-           svp = hv_fetch(action, "cachesize", 9, FALSE);
-	   if (svp) {
-               status = dbp->set_cachesize(dbp, 0, my_SvUV32(*svp), 0) ;
-	   }
-         
-           svp = hv_fetch(action, "psize", 5, FALSE);
-	   if (svp) {
-               status = dbp->set_pagesize(dbp, my_SvUV32(*svp)) ;
-	    }
-         
-           svp = hv_fetch(action, "lorder", 6, FALSE);
-	   if (svp) {
-               status = dbp->set_lorder(dbp, (int)SvIV(*svp)) ;
-	   }
-
-	    svp = hv_fetch(action, "bval", 4, FALSE);
-            if (svp && SvOK(*svp))
-            {
-		int value ;
-                if (SvPOK(*svp))
-		    value = (int)*SvPV(*svp, n_a) ;
-		else
-		    value = (int)SvIV(*svp) ;
-
-		if (fixed) {
-		    status = dbp->set_re_pad(dbp, value) ;
-		}
-		else {
-		    status = dbp->set_re_delim(dbp, value) ;
-		}
-
-            }
-
-	   if (fixed) {
-               svp = hv_fetch(action, "reclen", 6, FALSE);
-	       if (svp) {
-		   u_int32_t len =  my_SvUV32(*svp) ;
-                   status = dbp->set_re_len(dbp, len) ;
-	       }    
-	   }
-         
-	    if (name != NULL) {
-	        status = dbp->set_re_source(dbp, name) ;
-	        name = NULL ;
-	    }	
-
-            svp = hv_fetch(action, "bfname", 6, FALSE); 
-            if (svp && SvOK(*svp)) {
-		char * ptr = SvPV(*svp,n_a) ;
-		name = (char*) n_a ? ptr : NULL ;
-	    }
-	    else
-		name = NULL ;
-         
-
-	    status = dbp->set_flags(dbp, (u_int32_t)DB_RENUMBER) ;
-         
-		if (flags){
-	            (void)dbp->set_flags(dbp, (u_int32_t)flags) ;
-		}
-            PrintRecno(info) ;
-        }
-        else
-            croak("type is not of type DB_File::HASHINFO, DB_File::BTREEINFO or DB_File::RECNOINFO");
-    }
-
-    {
-        u_int32_t 	Flags = 0 ;
-        int		status ;
-
-        /* Map 1.x flags to 3.x flags */
-        if ((flags & O_CREAT) == O_CREAT)
-            Flags |= DB_CREATE ;
-
-#if O_RDONLY == 0
-        if (flags == O_RDONLY)
-#else
-        if ((flags & O_RDONLY) == O_RDONLY && (flags & O_RDWR) != O_RDWR)
-#endif
-            Flags |= DB_RDONLY ;
-
-#ifdef O_TRUNC
-        if ((flags & O_TRUNC) == O_TRUNC)
-            Flags |= DB_TRUNCATE ;
-#endif
-
-#ifdef AT_LEAST_DB_4_1
-        status = (RETVAL->dbp->open)(RETVAL->dbp, NULL, name, NULL, RETVAL->type, 
-	    			Flags, mode) ; 
-#else
-        status = (RETVAL->dbp->open)(RETVAL->dbp, name, NULL, RETVAL->type, 
-	    			Flags, mode) ; 
-#endif
-	/* printf("open returned %d %s\n", status, db_strerror(status)) ; */
-
-        if (status == 0) {
-	    /* RETVAL->dbp->set_errcall(RETVAL->dbp, db_errcall_cb) ;*/
-
-            status = (RETVAL->dbp->cursor)(RETVAL->dbp, NULL, &RETVAL->cursor,
-			0) ;
-	    /* printf("cursor returned %d %s\n", status, db_strerror(status)) ; */
-	}
-
-        if (status)
-	    RETVAL->dbp = NULL ;
-
-    }
-
-    return (RETVAL) ;
-
-#endif /* Berkeley DB Version > 2 */
-
-} /* ParseOpenInfo */
-
-
-#include "constants.h"   
-
-MODULE = DB_File	PACKAGE = DB_File	PREFIX = db_
-
-INCLUDE: constants.xs
-
-BOOT:
-  {
-#ifdef dTHX
-    dTHX;
-#endif    
-    /* SV * sv_err = perl_get_sv(ERR_BUFF, GV_ADD|GV_ADDMULTI) ;  */
-    MY_CXT_INIT;
-    __getBerkeleyDBInfo() ;
- 
-    DBT_clear(empty) ; 
-    empty.data = &zero ;
-    empty.size =  sizeof(recno_t) ;
-  }
-
-
-
-DB_File
-db_DoTie_(isHASH, dbtype, name=undef, flags=O_CREAT|O_RDWR, mode=0666, type=DB_HASH)
-	int		isHASH
-	char *		dbtype
-	int		flags
-	int		mode
-	CODE:
-	{
-	    char *	name = (char *) NULL ; 
-	    SV *	sv = (SV *) NULL ; 
-	    STRLEN	n_a;
-
-	    if (items >= 3 && SvOK(ST(2))) 
-	        name = (char*) SvPV(ST(2), n_a) ; 
-
-            if (items == 6)
-	        sv = ST(5) ;
-
-	    RETVAL = ParseOpenInfo(aTHX_ isHASH, name, flags, mode, sv) ;
-	    if (RETVAL->dbp == NULL) {
-	        Safefree(RETVAL);
-	        RETVAL = NULL ;
-	    }
-	}
-	OUTPUT:	
-	    RETVAL
-
-int
-db_DESTROY(db)
-	DB_File		db
-	PREINIT:
-	  dMY_CXT;
-	INIT:
-	  CurrentDB = db ;
-	  Trace(("DESTROY %p\n", db));
-	CLEANUP:
-	  Trace(("DESTROY %p done\n", db));
-	  if (db->hash)
-	    SvREFCNT_dec(db->hash) ;
-	  if (db->compare)
-	    SvREFCNT_dec(db->compare) ;
-	  if (db->prefix)
-	    SvREFCNT_dec(db->prefix) ;
-	  if (db->filter_fetch_key)
-	    SvREFCNT_dec(db->filter_fetch_key) ;
-	  if (db->filter_store_key)
-	    SvREFCNT_dec(db->filter_store_key) ;
-	  if (db->filter_fetch_value)
-	    SvREFCNT_dec(db->filter_fetch_value) ;
-	  if (db->filter_store_value)
-	    SvREFCNT_dec(db->filter_store_value) ;
-	  safefree(db) ;
-#ifdef DB_VERSION_MAJOR
-	  if (RETVAL > 0)
-	    RETVAL = -1 ;
-#endif
-
-
-int
-db_DELETE(db, key, flags=0)
-	DB_File		db
-	DBTKEY		key
-	u_int		flags
-	PREINIT:
-	  dMY_CXT;
-	INIT:
-	  CurrentDB = db ;
-
-
-int
-db_EXISTS(db, key)
-	DB_File		db
-	DBTKEY		key
-	PREINIT:
-	  dMY_CXT;
-	CODE:
-	{
-          DBT		value ;
-	
-	  DBT_clear(value) ; 
-	  CurrentDB = db ;
-	  RETVAL = (((db->dbp)->get)(db->dbp, TXN &key, &value, 0) == 0) ;
-	}
-	OUTPUT:
-	  RETVAL
-
-void
-db_FETCH(db, key, flags=0)
-	DB_File		db
-	DBTKEY		key
-	u_int		flags
-	PREINIT:
-	  dMY_CXT ;
-	  int RETVAL ;
-	CODE:
-	{
-            DBT		value ;
-
-	    DBT_clear(value) ; 
-	    CurrentDB = db ;
-	    RETVAL = db_get(db, key, value, flags) ;
-	    ST(0) = sv_newmortal();
-	    OutputValue(ST(0), value)
-	}
-
-int
-db_STORE(db, key, value, flags=0)
-	DB_File		db
-	DBTKEY		key
-	DBT		value
-	u_int		flags
-	PREINIT:
-	  dMY_CXT;
-	INIT:
-	  CurrentDB = db ;
-
-
-void
-db_FIRSTKEY(db)
-	DB_File		db
-	PREINIT:
-	  dMY_CXT ;
-	  int RETVAL ;
-	CODE:
-	{
-	    DBTKEY	key ;
-	    DBT		value ;
-
-	    DBT_clear(key) ; 
-	    DBT_clear(value) ; 
-	    CurrentDB = db ;
-	    RETVAL = do_SEQ(db, key, value, R_FIRST) ;
-	    ST(0) = sv_newmortal();
-	    OutputKey(ST(0), key) ;
-	}
-
-void
-db_NEXTKEY(db, key)
-	DB_File		db
-	DBTKEY		key = NO_INIT
-	PREINIT:
-	  dMY_CXT ;
-	  int RETVAL ;
-	CODE:
-	{
-	    DBT		value ;
-
-	    DBT_clear(key) ; 
-	    DBT_clear(value) ; 
-	    CurrentDB = db ;
-	    RETVAL = do_SEQ(db, key, value, R_NEXT) ;
-	    ST(0) = sv_newmortal();
-	    OutputKey(ST(0), key) ;
-	}
-
-#
-# These would be nice for RECNO
-#
-
-int
-unshift(db, ...)
-	DB_File		db
-	ALIAS:		UNSHIFT = 1
-	PREINIT:
-	  dMY_CXT;
-	CODE:
-	{
-	    DBTKEY	key ;
-	    DBT		value ;
-	    int		i ;
-	    int		One ;
-	    STRLEN	n_a;
-
-	    DBT_clear(key) ; 
-	    DBT_clear(value) ; 
-	    CurrentDB = db ;
-#ifdef DB_VERSION_MAJOR
-	    /* get the first value */
-	    RETVAL = do_SEQ(db, key, value, DB_FIRST) ;	 
-	    RETVAL = 0 ;
-#else
-	    RETVAL = -1 ;
-#endif
-	    for (i = items-1 ; i > 0 ; --i)
-	    {
-		DBM_ckFilter(ST(i), filter_store_value, "filter_store_value");
-	        value.data = SvPVbyte(ST(i), n_a) ;
-	        value.size = n_a ;
-	        One = 1 ;
-	        key.data = &One ;
-	        key.size = sizeof(int) ;
-#ifdef DB_VERSION_MAJOR
-           	RETVAL = (db->cursor->c_put)(db->cursor, &key, &value, DB_BEFORE) ;
-#else
-	        RETVAL = (db->dbp->put)(db->dbp, &key, &value, R_IBEFORE) ;
-#endif
-	        if (RETVAL != 0)
-	            break;
-	    }
-	}
-	OUTPUT:
-	    RETVAL
-
-void
-pop(db)
-	DB_File		db
-	PREINIT:
-	  dMY_CXT;
-	ALIAS:		POP = 1
-	PREINIT:
-	  I32 RETVAL;
-	CODE:
-	{
-	    DBTKEY	key ;
-	    DBT		value ;
-
-	    DBT_clear(key) ; 
-	    DBT_clear(value) ; 
-	    CurrentDB = db ;
-
-	    /* First get the final value */
-	    RETVAL = do_SEQ(db, key, value, R_LAST) ;	 
-	    ST(0) = sv_newmortal();
-	    /* Now delete it */
-	    if (RETVAL == 0)
-	    {
-		/* the call to del will trash value, so take a copy now */
-		OutputValue(ST(0), value) ;
-	        RETVAL = db_del(db, key, R_CURSOR) ;
-	        if (RETVAL != 0) 
-	            sv_setsv(ST(0), &PL_sv_undef); 
-	    }
-	}
-
-void
-shift(db)
-	DB_File		db
-	PREINIT:
-	  dMY_CXT;
-	ALIAS:		SHIFT = 1
-	PREINIT:
-	  I32 RETVAL;
-	CODE:
-	{
-	    DBT		value ;
-	    DBTKEY	key ;
-
-	    DBT_clear(key) ; 
-	    DBT_clear(value) ; 
-	    CurrentDB = db ;
-	    /* get the first value */
-	    RETVAL = do_SEQ(db, key, value, R_FIRST) ;	 
-	    ST(0) = sv_newmortal();
-	    /* Now delete it */
-	    if (RETVAL == 0)
-	    {
-		/* the call to del will trash value, so take a copy now */
-		OutputValue(ST(0), value) ;
-	        RETVAL = db_del(db, key, R_CURSOR) ;
-	        if (RETVAL != 0)
-	            sv_setsv (ST(0), &PL_sv_undef) ;
-	    }
-	}
-
-
-I32
-push(db, ...)
-	DB_File		db
-	PREINIT:
-	  dMY_CXT;
-	ALIAS:		PUSH = 1
-	CODE:
-	{
-	    DBTKEY	key ;
-	    DBT		value ;
-	    DB *	Db = db->dbp ;
-	    int		i ;
-	    STRLEN	n_a;
-	    int		keyval ;
-
-	    DBT_flags(key) ; 
-	    DBT_flags(value) ; 
-	    CurrentDB = db ;
-	    /* Set the Cursor to the Last element */
-	    RETVAL = do_SEQ(db, key, value, R_LAST) ;
-#ifndef DB_VERSION_MAJOR		    		    
-	    if (RETVAL >= 0)
-#endif	    
-	    {
-	    	if (RETVAL == 0)
-		    keyval = *(int*)key.data ;
-		else
-		    keyval = 0 ;
-	        for (i = 1 ; i < items ; ++i)
-	        {
-		    DBM_ckFilter(ST(i), filter_store_value, "filter_store_value");
-	            value.data = SvPVbyte(ST(i), n_a) ;
-	            value.size = n_a ;
-		    ++ keyval ;
-	            key.data = &keyval ;
-	            key.size = sizeof(int) ;
-		    RETVAL = (Db->put)(Db, TXN &key, &value, 0) ;
-	            if (RETVAL != 0)
-	                break;
-	        }
-	    }
-	}
-	OUTPUT:
-	    RETVAL
-
-I32
-length(db)
-	DB_File		db
-	PREINIT:
-	  dMY_CXT;
-	ALIAS:		FETCHSIZE = 1
-	CODE:
-	    CurrentDB = db ;
-	    RETVAL = GetArrayLength(aTHX_ db) ;
-	OUTPUT:
-	    RETVAL
-
-
-#
-# Now provide an interface to the rest of the DB functionality
-#
-
-int
-db_del(db, key, flags=0)
-	DB_File		db
-	DBTKEY		key
-	u_int		flags
-	PREINIT:
-	  dMY_CXT;
-	CODE:
-	  CurrentDB = db ;
-	  RETVAL = db_del(db, key, flags) ;
-#ifdef DB_VERSION_MAJOR
-	  if (RETVAL > 0)
-	    RETVAL = -1 ;
-	  else if (RETVAL == DB_NOTFOUND)
-	    RETVAL = 1 ;
-#endif
-	OUTPUT:
-	  RETVAL
-
-
-int
-db_get(db, key, value, flags=0)
-	DB_File		db
-	DBTKEY		key
-	DBT		value = NO_INIT
-	u_int		flags
-	PREINIT:
-	  dMY_CXT;
-	CODE:
-	  CurrentDB = db ;
-	  DBT_clear(value) ; 
-	  RETVAL = db_get(db, key, value, flags) ;
-#ifdef DB_VERSION_MAJOR
-	  if (RETVAL > 0)
-	    RETVAL = -1 ;
-	  else if (RETVAL == DB_NOTFOUND)
-	    RETVAL = 1 ;
-#endif
-	OUTPUT:
-	  RETVAL
-	  value
-
-int
-db_put(db, key, value, flags=0)
-	DB_File		db
-	DBTKEY		key
-	DBT		value
-	u_int		flags
-	PREINIT:
-	  dMY_CXT;
-	CODE:
-	  CurrentDB = db ;
-	  RETVAL = db_put(db, key, value, flags) ;
-#ifdef DB_VERSION_MAJOR
-	  if (RETVAL > 0)
-	    RETVAL = -1 ;
-	  else if (RETVAL == DB_KEYEXIST)
-	    RETVAL = 1 ;
-#endif
-	OUTPUT:
-	  RETVAL
-	  key		if (flagSet(flags, R_IAFTER) || flagSet(flags, R_IBEFORE)) OutputKey(ST(1), key);
-
-int
-db_fd(db)
-	DB_File		db
-	PREINIT:
-	  dMY_CXT ;
-	CODE:
-	  CurrentDB = db ;
-#ifdef DB_VERSION_MAJOR
-	  RETVAL = -1 ;
-	  {
-	    int	status = 0 ;
-	    status = (db->in_memory
-		      ? -1 
-		      : ((db->dbp)->fd)(db->dbp, &RETVAL) ) ;
-	    if (status != 0)
-	      RETVAL = -1 ;
-	  }
-#else
-	  RETVAL = (db->in_memory
-		? -1 
-		: ((db->dbp)->fd)(db->dbp) ) ;
-#endif
-	OUTPUT:
-	  RETVAL
-
-int
-db_sync(db, flags=0)
-	DB_File		db
-	u_int		flags
-	PREINIT:
-	  dMY_CXT;
-	CODE:
-	  CurrentDB = db ;
-	  RETVAL = db_sync(db, flags) ;
-#ifdef DB_VERSION_MAJOR
-	  if (RETVAL > 0)
-	    RETVAL = -1 ;
-#endif
-	OUTPUT:
-	  RETVAL
-
-
-int
-db_seq(db, key, value, flags)
-	DB_File		db
-	DBTKEY		key 
-	DBT		value = NO_INIT
-	u_int		flags
-	PREINIT:
-	  dMY_CXT;
-	CODE:
-	  CurrentDB = db ;
-	  DBT_clear(value) ; 
-	  RETVAL = db_seq(db, key, value, flags);
-#ifdef DB_VERSION_MAJOR
-	  if (RETVAL > 0)
-	    RETVAL = -1 ;
-	  else if (RETVAL == DB_NOTFOUND)
-	    RETVAL = 1 ;
-#endif
-	OUTPUT:
-	  RETVAL
-	  key
-	  value
-
-SV *
-filter_fetch_key(db, code)
-	DB_File		db
-	SV *		code
-	SV *		RETVAL = &PL_sv_undef ;
-	CODE:
-	    DBM_setFilter(db->filter_fetch_key, code) ;
-
-SV *
-filter_store_key(db, code)
-	DB_File		db
-	SV *		code
-	SV *		RETVAL = &PL_sv_undef ;
-	CODE:
-	    DBM_setFilter(db->filter_store_key, code) ;
-
-SV *
-filter_fetch_value(db, code)
-	DB_File		db
-	SV *		code
-	SV *		RETVAL = &PL_sv_undef ;
-	CODE:
-	    DBM_setFilter(db->filter_fetch_value, code) ;
-
-SV *
-filter_store_value(db, code)
-	DB_File		db
-	SV *		code
-	SV *		RETVAL = &PL_sv_undef ;
-	CODE:
-	    DBM_setFilter(db->filter_store_value, code) ;
-
diff --git a/storage/bdb/perl/DB_File/DB_File_BS b/storage/bdb/perl/DB_File/DB_File_BS
deleted file mode 100644
index 9282c498811..00000000000
--- a/storage/bdb/perl/DB_File/DB_File_BS
+++ /dev/null
@@ -1,6 +0,0 @@
-# NeXT needs /usr/lib/libposix.a to load along with DB_File.so
-if ( $dlsrc eq "dl_next.xs" ) {
-    @DynaLoader::dl_resolve_using = ( '/usr/lib/libposix.a' );
-}
-
-1;
diff --git a/storage/bdb/perl/DB_File/MANIFEST b/storage/bdb/perl/DB_File/MANIFEST
deleted file mode 100644
index 06b70ee6866..00000000000
--- a/storage/bdb/perl/DB_File/MANIFEST
+++ /dev/null
@@ -1,31 +0,0 @@
-Changes
-DB_File.pm          
-DB_File.xs          
-DB_File_BS
-MANIFEST
-Makefile.PL         
-README
-config.in
-dbinfo
-fallback.h
-fallback.xs
-hints/dynixptx.pl
-hints/sco.pl
-patches/5.004
-patches/5.004_01
-patches/5.004_02
-patches/5.004_03
-patches/5.004_04
-patches/5.004_05
-patches/5.005
-patches/5.005_01
-patches/5.005_02
-patches/5.005_03
-patches/5.6.0
-ppport.h
-t/db-btree.t
-t/db-hash.t
-t/db-recno.t
-typemap
-version.c
-META.yml                                 Module meta-data (added by MakeMaker)
diff --git a/storage/bdb/perl/DB_File/META.yml b/storage/bdb/perl/DB_File/META.yml
deleted file mode 100644
index 2cb481b8cdb..00000000000
--- a/storage/bdb/perl/DB_File/META.yml
+++ /dev/null
@@ -1,10 +0,0 @@
-# http://module-build.sourceforge.net/META-spec.html
-#XXXXXXX This is a prototype!!!  It will change in the future!!! XXXXX#
-name:         DB_File
-version:      1.810
-version_from: DB_File.pm
-installdirs:  site
-requires:
-
-distribution_type: module
-generated_by: ExtUtils::MakeMaker version 6.21_02
diff --git a/storage/bdb/perl/DB_File/Makefile.PL b/storage/bdb/perl/DB_File/Makefile.PL
deleted file mode 100644
index a8c671002ba..00000000000
--- a/storage/bdb/perl/DB_File/Makefile.PL
+++ /dev/null
@@ -1,333 +0,0 @@
-#! perl -w
-
-use strict ;
-use ExtUtils::MakeMaker 5.16 ;
-use Config ;
-
-die "DB_File needs Perl 5.004_05 or better. This is $]\n" 
-    if $] <= 5.00404; 
-
-my $VER_INFO ;
-my $LIB_DIR ;
-my $INC_DIR ;
-my $DB_NAME ;
-my $LIBS ;
-my $COMPAT185 = "" ;
-
-ParseCONFIG() ;
-
-my @files = ('DB_File.pm', glob "t/*.t") ;
-UpDowngrade(@files);
-
-if (defined $DB_NAME)
-  { $LIBS = $DB_NAME }
-else {
-    if ($^O eq 'MSWin32')
-      { $LIBS = '-llibdb' }
-    else
-      { $LIBS = '-ldb' }
-}
-
-# Solaris is special.
-#$LIBS .= " -lthread" if $^O eq 'solaris' ;
-
-# AIX is special.
-$LIBS .= " -lpthread" if $^O eq 'aix' ;
-
-# OS2 is a special case, so check for it now.
-my $OS2 = "" ;
-$OS2 = "-DOS2" if $Config{'osname'} eq 'os2' ;
-
-my $WALL = '' ;
-#$WALL = ' -Wall ';
-
-WriteMakefile(
-	NAME 		=> 'DB_File',
-	LIBS 		=> ["-L${LIB_DIR} $LIBS"],
-        #MAN3PODS        => {},         # Pods will be built by installman.
-	INC		=> "-I$INC_DIR",
-	VERSION_FROM	=> 'DB_File.pm',
-	XSPROTOARG	=> '-noprototypes',
-	DEFINE		=> "-D_NOT_CORE $OS2 $VER_INFO $COMPAT185 $WALL",
-	OBJECT		=> 'version$(OBJ_EXT) DB_File$(OBJ_EXT)',
-	#OPTIMIZE	=> '-g',
-	'depend'	=> { 'Makefile'          => 'config.in',
-                             'version$(OBJ_EXT)' => 'version.c'},
-	'clean'		=> { FILES => 'constants.h constants.xs' },
-	'macro'		=> { INSTALLDIRS => 'perl', my_files => "@files" },
-        'dist'          => { COMPRESS => 'gzip', SUFFIX => 'gz',
-			     DIST_DEFAULT => 'MyDoubleCheck tardist'},    
-	);
-
-
-my @names = qw(
-	BTREEMAGIC
-	BTREEVERSION
-	DB_LOCK
-	DB_SHMEM
-	DB_TXN
-	HASHMAGIC
-	HASHVERSION
-	MAX_PAGE_NUMBER
-	MAX_PAGE_OFFSET
-	MAX_REC_NUMBER
-	RET_ERROR
-	RET_SPECIAL
-	RET_SUCCESS
-	R_CURSOR
-	R_DUP
-	R_FIRST
-	R_FIXEDLEN
-	R_IAFTER
-	R_IBEFORE
-	R_LAST
-	R_NEXT
-	R_NOKEY
-	R_NOOVERWRITE
-	R_PREV
-	R_RECNOSYNC
-	R_SETCURSOR
-	R_SNAPSHOT
-	__R_UNUSED
-	);
-
-if (eval {require ExtUtils::Constant; 1}) {
-    # Check the constants above all appear in @EXPORT in DB_File.pm
-    my %names = map { $_, 1} @names;
-    open F, ")
-    {
-        last if /^\s*\@EXPORT\s+=\s+qw\(/ ;
-    }
-
-    while ()
-    {
-        last if /^\s*\)/ ;
-        /(\S+)/ ;
-        delete $names{$1} if defined $1 ;
-    }
-    close F ;
-
-    if ( keys %names )
-    {
-        my $missing = join ("\n\t", sort keys %names) ;
-        die "The following names are missing from \@EXPORT in DB_File.pm\n" .
-            "\t$missing\n" ;
-    }
-    
-
-    ExtUtils::Constant::WriteConstants(
-                                     NAME => 'DB_File',
-                                     NAMES => \@names,
-                                     C_FILE  => 'constants.h',
-                                     XS_FILE  => 'constants.xs',
-                                                                       
-                                    );
-} 
-else {
-    use File::Copy;
-    copy ('fallback.h', 'constants.h')
-      or die "Can't copy fallback.h to constants.h: $!";
-    copy ('fallback.xs', 'constants.xs')
-      or die "Can't copy fallback.xs to constants.xs: $!";
-}
-
-exit;
-
-
-sub MY::postamble { <<'EOM' } ;
-
-MyDoubleCheck:
-	@echo Checking config.in is setup for a release
-	@(grep "^LIB.*/usr/local/BerkeleyDB" config.in && 	\
-	grep "^INCLUDE.*/usr/local/BerkeleyDB" config.in &&	\
-	grep "^#DBNAME.*" config.in) >/dev/null ||		\
-	    (echo config.in needs fixing ; exit 1)
-	@echo config.in is ok
-	@echo 
-	@echo Checking DB_File.xs is ok for a release.
-	@(perl -ne ' exit 1 if /^\s*#\s*define\s+TRACE/ ; ' DB_File.xs || \
-	    (echo DB_File.xs needs fixing ; exit 1))
-	@echo DB_File.xs is ok
-	@echo 
-	@echo Checking for $$^W in files: $(my_files)
-	@perl -ne '						\
-	    exit 1 if /^\s*local\s*\(\s*\$$\^W\s*\)/;' $(my_files) ||	\
-	  (echo found unexpected $$^W ; exit 1)
-	@echo No $$^W found.
-	@echo 
-	@echo Checking for 'use vars' in files: $(my_files)
-	@perl -ne '						\
-	    exit 0 if /^__(DATA|END)__/;               	\
-	    exit 1 if /^\s*use\s+vars/;' $(my_files) ||	\
-	  (echo found unexpected "use vars"; exit 1)
-	@echo No 'use vars' found.
-	@echo 
-	@echo All files are OK for a release.
-	@echo 
-
-EOM
-
-
-
-sub ParseCONFIG
-{
-    my ($k, $v) ;
-    my @badkey = () ;
-    my %Info = () ;
-    my @Options = qw( INCLUDE LIB PREFIX HASH DBNAME COMPAT185 ) ;
-    my %ValidOption = map {$_, 1} @Options ;
-    my %Parsed = %ValidOption ;
-    my $CONFIG = 'config.in' ;
-
-    print "Parsing $CONFIG...\n" ;
-
-    # DBNAME & COMPAT185 are optional, so pretend they  have 
-    # been parsed.
-    delete $Parsed{'DBNAME'} ;
-    delete $Parsed{'COMPAT185'} ;
-    $Info{COMPAT185} = "No" ;
-
-
-    open(F, "$CONFIG") or die "Cannot open file $CONFIG: $!\n" ;
-    while () {
-	s/^\s*|\s*$//g ;
-	next if /^\s*$/ or /^\s*#/ ;
-	s/\s*#\s*$// ;
-
-	($k, $v) = split(/\s+=\s+/, $_, 2) ;
-	$k = uc $k ;
-	if ($ValidOption{$k}) {
-	    delete $Parsed{$k} ;
-	    $Info{$k} = $v ;
-	}
-	else {
-	    push(@badkey, $k) ;
-	}
-    }
-    close F ;
-
-    print "Unknown keys in $CONFIG ignored [@badkey]\n"
-	if @badkey ;
-
-    # check parsed values
-    my @missing = () ;
-    die "The following keys are missing from $CONFIG file: [@missing]\n" 
-        if @missing = keys %Parsed ;
-
-    $INC_DIR = $ENV{'DB_FILE_INCLUDE'} || $Info{'INCLUDE'} ;
-    $LIB_DIR = $ENV{'DB_FILE_LIB'} || $Info{'LIB'} ;
-    $DB_NAME = $ENV{'DB_FILE_NAME'} || $Info{'DBNAME'} ;
-    $COMPAT185 = "-DCOMPAT185 -DDB_LIBRARY_COMPATIBILITY_API" 
-        if (defined $ENV{'DB_FILE_COMPAT185'} && 
-		$ENV{'DB_FILE_COMPAT185'} =~ /^\s*(on|true|1)\s*$/i) ||
-		$Info{'COMPAT185'} =~ /^\s*(on|true|1)\s*$/i ; 
-    my $PREFIX  = $Info{'PREFIX'} ;
-    my $HASH    = $Info{'HASH'} ;
-
-    $VER_INFO = "-DmDB_Prefix_t=${PREFIX} -DmDB_Hash_t=${HASH}" ;
-
-    print <)
-    {
-	print, last if /^__(END|DATA)__/ ;
-
-	&{ $our_sub }();
-	&{ $warn_sub }();
-	print ;
-    }
-
-    return if eof ;
-
-    while (<>)
-      { print }
-}
-
-# end of file Makefile.PL
diff --git a/storage/bdb/perl/DB_File/README b/storage/bdb/perl/DB_File/README
deleted file mode 100644
index 5a435fd0cee..00000000000
--- a/storage/bdb/perl/DB_File/README
+++ /dev/null
@@ -1,583 +0,0 @@
-                                     DB_File
-
-                                  Version 1.810
-
-                                 7th August 2004
-
- 	Copyright (c) 1995-2004 Paul Marquess. All rights reserved. This
-	program is free software; you can redistribute it and/or modify
-	it under the same terms as Perl itself.
-
-
-IMPORTANT NOTICE
-================
-
-If are using the locking technique described in older versions of
-DB_File, please read the section called "Locking: The Trouble with fd"
-in DB_File.pm immediately. The locking method has been found to be
-unsafe. You risk corrupting your data if you continue to use it.
-
-DESCRIPTION
------------
-
-DB_File is a module which allows Perl programs to make use of the
-facilities provided by Berkeley DB version 1. (DB_File can be built
-version 2, 3 or 4 of Berkeley DB, but it will only support the 1.x
-features),
-
-If you want to make use of the new features available in Berkeley DB
-2.x, 3.x or 4.x, use the Perl module BerkeleyDB instead.
-
-Berkeley DB is a C library which provides a consistent interface to a
-number of database formats. DB_File provides an interface to all three
-of the database types (hash, btree and recno) currently supported by
-Berkeley DB.
-
-For further details see the documentation included at the end of the
-file DB_File.pm.
-
-PREREQUISITES
--------------
-
-Before you can build DB_File you must have the following installed on
-your system:
-
-    * Perl 5.004_05 or greater.
-
-    * Berkeley DB.
-
-      The official web site for Berkeley DB is http://www.sleepycat.com.
-      The latest version of Berkeley DB is always available there. It
-      is recommended that you use the most recent version available at
-      the Sleepycat site.
-
-      The one exception to this advice is where you want to use DB_File
-      to access database files created by a third-party application, like
-      Sendmail or Netscape. In these cases you must build DB_File with a
-      compatible version of Berkeley DB.
-
-      If you want to use Berkeley DB 2.x, you must have version 2.3.4
-      or greater.  If you want to use Berkeley DB 3.x or 4.x, any version
-      will do. For Berkeley DB 1.x, use either version 1.85 or 1.86.
-
-
-BUILDING THE MODULE
--------------------
-
-Assuming you have met all the prerequisites, building the module should
-be relatively straightforward.
-
-Step 1 : If you are running either Solaris 2.5 or HP-UX 10 and want
-         to use Berkeley DB version 2, 3 or 4, read either the Solaris Notes
-         or HP-UX Notes sections below.  If you are running Linux please
-         read the Linux Notes section before proceeding.
-
-Step 2 : Edit the file config.in to suit you local installation.
-         Instructions are given in the file.
-
-Step 3 : Build and test the module using this sequence of commands:
-
-             perl Makefile.PL
-             make
-             make test
-
-
-  NOTE:
-      If you have a very old version of Berkeley DB (i.e. pre 1.85),
-      three of the tests in the recno test harness may fail (tests 51,
-      53 and 55). You can safely ignore the errors if you're never
-      going to use the broken functionality (recno databases with a
-      modified bval).  Otherwise you'll have to upgrade your DB
-      library.
-
-
-INSTALLATION
-------------
-
-    make install
-
-UPDATES
-=======
-
-The most recent version of DB_File is always available at 
-
-    http://www.cpan.org/modules/by-module/DB_File/
-
-TROUBLESHOOTING
-===============
-
-Here are some of the common problems people encounter when building
-DB_File.
-
-Missing db.h or libdb.a
------------------------
-
-If you get an error like this:
-
-  cc -c -I/usr/local/include -Dbool=char -DHAS_BOOL
-  -O2    -DVERSION=\"1.64\" -DXS_VERSION=\"1.64\" -fpic
-  -I/usr/local/lib/perl5/i586-linux/5.00404/CORE -DmDB_Prefix_t=size_t
-  -DmDB_Hash_t=u_int32_t DB_File.c
-  DB_File.xs:101: db.h: No such file or directory
-
-or this:
-
-  LD_RUN_PATH="/lib" cc -o blib/arch/auto/DB_File/DB_File.so  -shared
-  -L/usr/local/lib DB_File.o    -L/usr/local/lib -ldb
-  ld: cannot open -ldb: No such file or directory
-
-This symptom can imply:
-
- 1. You don't have Berkeley DB installed on your system at all.
-    Solution: get & install Berkeley DB.
-
- 2. You do have Berkeley DB installed, but it isn't in a standard place.
-    Solution: Edit config.in and set the LIB and INCLUDE variables to point
-              to the directories where libdb.a and db.h are installed.
-
-
-Undefined symbol db_version
----------------------------
-
-DB_File seems to have built correctly, but you get an error like this
-when you run the test harness:
-
-  $ make test
-  PERL_DL_NONLAZY=1 /usr/bin/perl5.00404 -I./blib/arch -I./blib/lib
-  -I/usr/local/lib/perl5/i586-linux/5.00404 -I/usr/local/lib/perl5 -e 'use
-  Test::Harness qw(&runtests $verbose); $verbose=0; runtests @ARGV;' t/*.t
-  t/db-btree..........Can't load './blib/arch/auto/DB_File/DB_File.so' for
-  module DB_File: ./blib/arch/auto/DB_File/DB_File.so: undefined symbol:
-  db_version at /usr/local/lib/perl5/i586-linux/5.00404/DynaLoader.pm
-  line 166.
-
-  at t/db-btree.t line 21
-  BEGIN failed--compilation aborted at t/db-btree.t line 21.
-  dubious Test returned status 2 (wstat 512, 0x200)
-
-This error usually happens when you have both version 1 and version
-2 of Berkeley DB installed on your system and DB_File attempts to
-build using the db.h for Berkeley DB version 2 and the version 1
-library. Unfortunately the two versions aren't compatible with each
-other. The undefined symbol error is actually caused because Berkeley
-DB version 1 doesn't have the symbol db_version.
-
-Solution: Setting the LIB & INCLUDE variables in config.in to point to the
-          correct directories can sometimes be enough to fix this
-          problem. If that doesn't work the easiest way to fix the
-          problem is to either delete or temporarily rename the copies
-          of db.h and libdb.a that you don't want DB_File to use.
-
-
-Undefined symbol dbopen
------------------------
-
-DB_File seems to have built correctly, but you get an error like this
-when you run the test harness:
-
-  ...
-  t/db-btree..........Can't load 'blib/arch/auto/DB_File/DB_File.so' for
-  module DB_File: blib/arch/auto/DB_File/DB_File.so: undefined symbol:
-  dbopen at /usr/local/lib/perl5/5.6.1/i586-linux/DynaLoader.pm line 206.
-   at t/db-btree.t line 23
-  Compilation failed in require at t/db-btree.t line 23.
-  ...
-
-This error usually happens when you have both version 1 and a more recent
-version of Berkeley DB installed on your system and DB_File attempts
-to build using the db.h for Berkeley DB version 1 and the newer version
-library. Unfortunately the two versions aren't compatible with each
-other. The undefined symbol error is actually caused because versions
-of Berkeley DB newer than version 1 doesn't have the symbol dbopen.
-
-Solution: Setting the LIB & INCLUDE variables in config.in to point to the
-          correct directories can sometimes be enough to fix this
-          problem. If that doesn't work the easiest way to fix the
-          problem is to either delete or temporarily rename the copies
-          of db.h and libdb.a that you don't want DB_File to use.
-
-
-Incompatible versions of db.h and libdb
----------------------------------------
-
-BerkeleyDB seems to have built correctly, but you get an error like this
-when you run the test harness:
-
-  $ make test
-  PERL_DL_NONLAZY=1 /home/paul/perl/install/bin/perl5.00560 -Iblib/arch
-  -Iblib/lib -I/home/paul/perl/install/5.005_60/lib/5.00560/i586-linux
-  -I/home/paul/perl/install/5.005_60/lib/5.00560 -e 'use Test::Harness
-  qw(&runtests $verbose); $verbose=0; runtests @ARGV;' t/*.t
-  t/db-btree..........
-  DB_File needs compatible versions of libdb & db.h
-          you have db.h version 2.3.7 and libdb version 2.7.5
-  BEGIN failed--compilation aborted at t/db-btree.t line 21.
-  ...
-
-Another variation on the theme of having two versions of Berkeley DB on
-your system.
-
-Solution: Setting the LIB & INCLUDE variables in config.in to point to the
-          correct directories can sometimes be enough to fix this
-          problem. If that doesn't work the easiest way to fix the
-          problem is to either delete or temporarily rename the copies
-          of db.h and libdb.a that you don't want BerkeleyDB to use.
-          If you are running Linux, please read the Linux Notes section
-          below.
-
-
-Solaris build fails with "language optional software package not installed"
----------------------------------------------------------------------------
-
-If you are trying to build this module under Solaris and you get an
-error message like this
-
-    /usr/ucb/cc: language optional software package not installed
-
-it means that Perl cannot find the C compiler on your system. The cryptic
-message is just Sun's way of telling you that you haven't bought their
-C compiler.
-
-When you build a Perl module that needs a C compiler, the Perl build
-system tries to use the same C compiler that was used to build perl
-itself. In this case your Perl binary was built with a C compiler that
-lived in /usr/ucb.
-
-To continue with building this module, you need to get a C compiler,
-or tell Perl where your C compiler is, if you already have one.
-
-Assuming you have now got a C compiler, what you do next will be dependant
-on what C compiler you have installed. If you have just installed Sun's
-C compiler, you shouldn't have to do anything. Just try rebuilding
-this module.
-
-If you have installed another C compiler, say gcc, you have to tell perl
-how to use it instead of /usr/ucb/cc.
-
-This set of options seems to work if you want to use gcc. Your mileage
-may vary.
-
-    perl Makefile.PL CC=gcc CCCDLFLAGS=-fPIC OPTIMIZE=" "
-    make test
-
-If that doesn't work for you, it's time to make changes to the Makefile
-by hand. Good luck!
-
-
-
-Solaris build fails with "gcc: unrecognized option `-KPIC'"
------------------------------------------------------------
-
-You are running Solaris and you get an error like this when you try to
-build this Perl module
-
-    gcc: unrecognized option `-KPIC'
-
-This symptom usually means that you are using a Perl binary that has been
-built with the Sun C compiler, but you are using gcc to build this module.
-
-When Perl builds modules that need a C compiler, it will attempt to use
-the same C compiler and command line options that was used to build perl
-itself. In this case "-KPIC" is a valid option for the Sun C compiler,
-but not for gcc. The equivalent option for gcc is "-fPIC".
-
-The solution is either:
-
-    1. Build both Perl and this module with the same C compiler, either
-       by using the Sun C compiler for both or gcc for both.
-
-    2. Try generating the Makefile for this module like this perl
-
-           perl Makefile.PL CC=gcc CCCDLFLAGS=-fPIC OPTIMIZE=" " LD=gcc
-           make test
-
-       This second option seems to work when mixing a Perl binary built
-       with the Sun C compiler and this module built with gcc. Your
-       mileage may vary.
-
-
-
-
-Linux Notes
------------
-
-Newer versions of Linux (e.g. RedHat 6, SuSe 6) ship with a C library
-that has version 2.x of Berkeley DB linked into it. This makes it
-difficult to build this module with anything other than the version of
-Berkeley DB that shipped with your Linux release. If you do try to use
-a different version of Berkeley DB you will most likely get the error
-described in the "Incompatible versions of db.h and libdb" section of
-this file.
-
-To make matters worse, prior to Perl 5.6.1, the perl binary itself
-*always* included the Berkeley DB library.
-
-If you want to use a newer version of Berkeley DB with this module, the
-easiest solution is to use Perl 5.6.1 (or better) and Berkeley DB 3.x
-(or better).
-
-There are two approaches you can use to get older versions of Perl to
-work with specific versions of Berkeley DB. Both have their advantages
-and disadvantages.
-
-The first approach will only work when you want to build a version of
-Perl older than 5.6.1 along with Berkeley DB 3.x. If you want to use
-Berkeley DB 2.x, you must use the next approach. This approach involves
-rebuilding your existing version of Perl after applying an unofficial
-patch. The "patches" directory in the this module's source distribution
-contains a number of patch files. There is one patch file for every
-stable version of Perl since 5.004. Apply the appropriate patch to your
-Perl source tree before re-building and installing Perl from scratch.
-For example, assuming you are in the top-level source directory for
-Perl 5.6.0, the command below will apply the necessary patch. Remember
-to replace the path shown below with one that points to this module's
-patches directory.
-
-    patch -p1 -N 
diff --git a/storage/bdb/perl/DB_File/config.in b/storage/bdb/perl/DB_File/config.in
deleted file mode 100644
index 292b09a5fb3..00000000000
--- a/storage/bdb/perl/DB_File/config.in
+++ /dev/null
@@ -1,97 +0,0 @@
-# Filename:	config.in
-#
-# written by Paul Marquess 
-# last modified 9th Sept 1997
-# version 1.55
-
-# 1. Where is the file db.h?
-#
-#    Change the path below to point to the directory where db.h is
-#    installed on your system.
-
-INCLUDE	= /usr/local/BerkeleyDB/include
-#INCLUDE	= /usr/local/include
-#INCLUDE	= /usr/include
-
-# 2. Where is libdb?
-#
-#    Change the path below to point to the directory where libdb is
-#    installed on your system.
-
-LIB	= /usr/local/BerkeleyDB/lib
-#LIB	= /usr/local/lib
-#LIB	= /usr/lib
-
-# 3. What version of Berkely DB have you got?
-#
-#    If you have version 2.0 or greater, you can skip this question.
-#
-#    If you have Berkeley DB 1.78 or greater you shouldn't have to
-#    change the definitions for PREFIX and HASH below.
-#
-#    For older versions of Berkeley DB change both PREFIX and HASH to int.
-#    Version 1.71, 1.72 and 1.73 are known to need this change.
-#
-#    If you don't know what version you have have a look in the file db.h. 
-#
-#    Search for the string "DB_VERSION_MAJOR". If it is present, you
-#    have Berkeley DB version 2 (or greater).
-#
-#    If that didn't work, find the definition of the BTREEINFO typedef.
-#    Check the return type from the prefix element. It should look like
-#    this in an older copy of db.h:
-#
-#        int      (*prefix)      __P((const DBT *, const DBT *));  
-#
-#    and like this in a more recent copy:
-#
-#        size_t  (*prefix)       /* prefix function */
-#            __P((const DBT *, const DBT *));
-#
-#    Change the definition of PREFIX, below, to reflect the return type
-#    of the prefix function in your db.h.
-#
-#    Now find the definition of the HASHINFO typedef. Check the return
-#    type of the hash element. Older versions look like this:
-#
-#        int      (*hash) __P((const void *, size_t));      
-#
-#    newer like this:
-#
-#        u_int32_t               /* hash function */
-#                (*hash) __P((const void *, size_t));
-#
-#    Change the definition of HASH, below, to reflect the return type of
-#    the hash function in your db.h.
-#
-
-PREFIX	=	size_t
-HASH	=	u_int32_t
-
-# 4. Is the library called libdb?
-#
-#    If you have copies of both 1.x and 2.x Berkeley DB installed on
-#    your system it can sometimes be tricky to make sure you are using
-#    the correct one. Renaming one (or creating a symbolic link) to
-#    include the version number of the library can help.
-#
-#    For example, if you have both Berkeley DB 2.3.12 and 1.85 on your
-#    system and you want to use the Berkeley DB version 2 library you
-#    could rename the version 2 library from libdb.a to libdb-2.3.12.a and
-#    change the DBNAME line below to look like this:
-#
-#        DBNAME = -ldb-2.3.12
-#
-#    That will ensure you are linking the correct version of the DB
-#    library.
-#
-#    Note: If you are building this module with Win32, -llibdb will be
-#    used by default.
-#
-#    If you have changed the name of the library, uncomment the line
-#    below (by removing the leading #) and edit the line to use the name
-#    you have picked.
- 
-#DBNAME = -ldb-2.4.10
-
-# end of file config.in
diff --git a/storage/bdb/perl/DB_File/dbinfo b/storage/bdb/perl/DB_File/dbinfo
deleted file mode 100644
index 421d36c6260..00000000000
--- a/storage/bdb/perl/DB_File/dbinfo
+++ /dev/null
@@ -1,129 +0,0 @@
-#!/usr/local/bin/perl
-
-# Name:		dbinfo -- identify berkeley DB version used to create 
-#			  a database file
-#
-# Author:	Paul Marquess  
-# Version: 	1.05 
-# Date		1sh November 2003
-#
-#     Copyright (c) 1998-2003 Paul Marquess. All rights reserved.
-#     This program is free software; you can redistribute it and/or
-#     modify it under the same terms as Perl itself.
-
-# Todo: Print more stats on a db file, e.g. no of records
-#       add log/txn/lock files
-
-use strict ;
-
-my %Data =
-	(
-	0x053162 =>	{
-			  Type 	   => "Btree",
-			  Versions => 
-				{
-				  1	=> [0, "Unknown (older than 1.71)"],
-				  2	=> [0, "Unknown (older than 1.71)"],
-				  3	=> [0, "1.71 -> 1.85, 1.86"],
-				  4	=> [0, "Unknown"],
-				  5	=> [0, "2.0.0 -> 2.3.0"],
-				  6	=> [0, "2.3.1 -> 2.7.7"],
-				  7	=> [0, "3.0.x"],
-				  8	=> [0, "3.1.x -> 4.0.x"],
-				  9	=> [1, "4.1.x or greater"],
-				}
-			},
-	0x061561 => 	{
-			  Type     => "Hash",
-			  Versions =>
-				{
-				  1	=> [0, "Unknown (older than 1.71)"],
-        			  2     => [0, "1.71 -> 1.85"],
-        			  3     => [0, "1.86"],
-        			  4     => [0, "2.0.0 -> 2.1.0"],
-        			  5     => [0, "2.2.6 -> 2.7.7"],
-        			  6     => [0, "3.0.x"],
-				  7	=> [0, "3.1.x -> 4.0.x"],
-				  8	=> [1, "4.1.x or greater"],
-				}
-			},
-	0x042253 => 	{
-			  Type     => "Queue",
-			  Versions =>
-				{
-				  1	=> [0, "3.0.x"],
-				  2	=> [0, "3.1.x"],
-				  3	=> [0, "3.2.x -> 4.0.x"],
-				  4	=> [1, "4.1.x or greater"],
-				}
-			},
-	) ;
-
-die "Usage: dbinfo file\n" unless @ARGV == 1 ;
-
-print "testing file $ARGV[0]...\n\n" ;
-open (F, "<$ARGV[0]") or die "Cannot open file $ARGV[0]: $!\n" ;
-
-my $buff ;
-read F, $buff, 30 ;
-
-
-my (@info) = unpack("NNNNNNC", $buff) ;
-my (@info1) = unpack("VVVVVVC", $buff) ;
-my ($magic, $version, $endian, $encrypt) ;
-
-if ($Data{$info[0]}) # first try DB 1.x format, big endian
-{
-    $magic = $info[0] ;
-    $version = $info[1] ;
-    $endian  = "Big Endian" ;
-    $encrypt = "Not Supported";
-}
-elsif ($Data{$info1[0]}) # first try DB 1.x format, little endian
-{
-    $magic = $info1[0] ;
-    $version = $info1[1] ;
-    $endian  = "Little Endian" ;
-    $encrypt = "Not Supported";
-}
-elsif ($Data{$info[3]}) # next DB 2.x big endian
-{
-    $magic = $info[3] ;
-    $version = $info[4] ;
-    $endian  = "Big Endian" ;
-}
-elsif ($Data{$info1[3]}) # next DB 2.x little endian
-{
-    $magic = $info1[3] ;
-    $version = $info1[4] ;
-    $endian  = "Little Endian" ;
-}
-else
-  { die "not a Berkeley DB database file.\n" }
-
-my $type = $Data{$magic} ;
-$magic = sprintf "%06X", $magic ;
-
-my $ver_string = "Unknown" ;
-
-if ( defined $type->{Versions}{$version} )
-{
-     $ver_string = $type->{Versions}{$version}[1];
-     if ($type->{Versions}{$version}[0] )
-       { $encrypt = $info[6] ? "Enabled" : "Disabled" }
-     else
-       { $encrypt = "Not Supported" }
-}
-
-print <{Type} file.
-File Version ID:	$version
-Built with Berkeley DB:	$ver_string
-Byte Order:		$endian
-Magic:			$magic
-Encryption:             $encrypt
-EOM
-
-close F ;
-
-exit ;
diff --git a/storage/bdb/perl/DB_File/fallback.h b/storage/bdb/perl/DB_File/fallback.h
deleted file mode 100644
index 0213308a0ee..00000000000
--- a/storage/bdb/perl/DB_File/fallback.h
+++ /dev/null
@@ -1,455 +0,0 @@
-#define PERL_constant_NOTFOUND	1
-#define PERL_constant_NOTDEF	2
-#define PERL_constant_ISIV	3
-#define PERL_constant_ISNO	4
-#define PERL_constant_ISNV	5
-#define PERL_constant_ISPV	6
-#define PERL_constant_ISPVN	7
-#define PERL_constant_ISSV	8
-#define PERL_constant_ISUNDEF	9
-#define PERL_constant_ISUV	10
-#define PERL_constant_ISYES	11
-
-#ifndef NVTYPE
-typedef double NV; /* 5.6 and later define NVTYPE, and typedef NV to it.  */
-#endif
-#ifndef aTHX_
-#define aTHX_ /* 5.6 or later define this for threading support.  */
-#endif
-#ifndef pTHX_
-#define pTHX_ /* 5.6 or later define this for threading support.  */
-#endif
-
-static int
-constant_6 (pTHX_ const char *name, IV *iv_return) {
-  /* When generated this function returned values for the list of names given
-     here.  However, subsequent manual editing may have added or removed some.
-     DB_TXN R_LAST R_NEXT R_PREV */
-  /* Offset 2 gives the best switch position.  */
-  switch (name[2]) {
-  case 'L':
-    if (memEQ(name, "R_LAST", 6)) {
-    /*                 ^         */
-#ifdef R_LAST
-      *iv_return = R_LAST;
-      return PERL_constant_ISIV;
-#else
-      return PERL_constant_NOTDEF;
-#endif
-    }
-    break;
-  case 'N':
-    if (memEQ(name, "R_NEXT", 6)) {
-    /*                 ^         */
-#ifdef R_NEXT
-      *iv_return = R_NEXT;
-      return PERL_constant_ISIV;
-#else
-      return PERL_constant_NOTDEF;
-#endif
-    }
-    break;
-  case 'P':
-    if (memEQ(name, "R_PREV", 6)) {
-    /*                 ^         */
-#ifdef R_PREV
-      *iv_return = R_PREV;
-      return PERL_constant_ISIV;
-#else
-      return PERL_constant_NOTDEF;
-#endif
-    }
-    break;
-  case '_':
-    if (memEQ(name, "DB_TXN", 6)) {
-    /*                 ^         */
-#ifdef DB_TXN
-      *iv_return = DB_TXN;
-      return PERL_constant_ISIV;
-#else
-      return PERL_constant_NOTDEF;
-#endif
-    }
-    break;
-  }
-  return PERL_constant_NOTFOUND;
-}
-
-static int
-constant_7 (pTHX_ const char *name, IV *iv_return) {
-  /* When generated this function returned values for the list of names given
-     here.  However, subsequent manual editing may have added or removed some.
-     DB_LOCK R_FIRST R_NOKEY */
-  /* Offset 3 gives the best switch position.  */
-  switch (name[3]) {
-  case 'I':
-    if (memEQ(name, "R_FIRST", 7)) {
-    /*                  ^         */
-#ifdef R_FIRST
-      *iv_return = R_FIRST;
-      return PERL_constant_ISIV;
-#else
-      return PERL_constant_NOTDEF;
-#endif
-    }
-    break;
-  case 'L':
-    if (memEQ(name, "DB_LOCK", 7)) {
-    /*                  ^         */
-#ifdef DB_LOCK
-      *iv_return = DB_LOCK;
-      return PERL_constant_ISIV;
-#else
-      return PERL_constant_NOTDEF;
-#endif
-    }
-    break;
-  case 'O':
-    if (memEQ(name, "R_NOKEY", 7)) {
-    /*                  ^         */
-#ifdef R_NOKEY
-      *iv_return = R_NOKEY;
-      return PERL_constant_ISIV;
-#else
-      return PERL_constant_NOTDEF;
-#endif
-    }
-    break;
-  }
-  return PERL_constant_NOTFOUND;
-}
-
-static int
-constant_8 (pTHX_ const char *name, IV *iv_return) {
-  /* When generated this function returned values for the list of names given
-     here.  However, subsequent manual editing may have added or removed some.
-     DB_SHMEM R_CURSOR R_IAFTER */
-  /* Offset 5 gives the best switch position.  */
-  switch (name[5]) {
-  case 'M':
-    if (memEQ(name, "DB_SHMEM", 8)) {
-    /*                    ^        */
-#ifdef DB_SHMEM
-      *iv_return = DB_SHMEM;
-      return PERL_constant_ISIV;
-#else
-      return PERL_constant_NOTDEF;
-#endif
-    }
-    break;
-  case 'S':
-    if (memEQ(name, "R_CURSOR", 8)) {
-    /*                    ^        */
-#ifdef R_CURSOR
-      *iv_return = R_CURSOR;
-      return PERL_constant_ISIV;
-#else
-      return PERL_constant_NOTDEF;
-#endif
-    }
-    break;
-  case 'T':
-    if (memEQ(name, "R_IAFTER", 8)) {
-    /*                    ^        */
-#ifdef R_IAFTER
-      *iv_return = R_IAFTER;
-      return PERL_constant_ISIV;
-#else
-      return PERL_constant_NOTDEF;
-#endif
-    }
-    break;
-  }
-  return PERL_constant_NOTFOUND;
-}
-
-static int
-constant_9 (pTHX_ const char *name, IV *iv_return) {
-  /* When generated this function returned values for the list of names given
-     here.  However, subsequent manual editing may have added or removed some.
-     HASHMAGIC RET_ERROR R_IBEFORE */
-  /* Offset 7 gives the best switch position.  */
-  switch (name[7]) {
-  case 'I':
-    if (memEQ(name, "HASHMAGIC", 9)) {
-    /*                      ^       */
-#ifdef HASHMAGIC
-      *iv_return = HASHMAGIC;
-      return PERL_constant_ISIV;
-#else
-      return PERL_constant_NOTDEF;
-#endif
-    }
-    break;
-  case 'O':
-    if (memEQ(name, "RET_ERROR", 9)) {
-    /*                      ^       */
-#ifdef RET_ERROR
-      *iv_return = RET_ERROR;
-      return PERL_constant_ISIV;
-#else
-      return PERL_constant_NOTDEF;
-#endif
-    }
-    break;
-  case 'R':
-    if (memEQ(name, "R_IBEFORE", 9)) {
-    /*                      ^       */
-#ifdef R_IBEFORE
-      *iv_return = R_IBEFORE;
-      return PERL_constant_ISIV;
-#else
-      return PERL_constant_NOTDEF;
-#endif
-    }
-    break;
-  }
-  return PERL_constant_NOTFOUND;
-}
-
-static int
-constant_10 (pTHX_ const char *name, IV *iv_return) {
-  /* When generated this function returned values for the list of names given
-     here.  However, subsequent manual editing may have added or removed some.
-     BTREEMAGIC R_FIXEDLEN R_SNAPSHOT __R_UNUSED */
-  /* Offset 5 gives the best switch position.  */
-  switch (name[5]) {
-  case 'E':
-    if (memEQ(name, "R_FIXEDLEN", 10)) {
-    /*                    ^           */
-#ifdef R_FIXEDLEN
-      *iv_return = R_FIXEDLEN;
-      return PERL_constant_ISIV;
-#else
-      return PERL_constant_NOTDEF;
-#endif
-    }
-    break;
-  case 'M':
-    if (memEQ(name, "BTREEMAGIC", 10)) {
-    /*                    ^           */
-#ifdef BTREEMAGIC
-      *iv_return = BTREEMAGIC;
-      return PERL_constant_ISIV;
-#else
-      return PERL_constant_NOTDEF;
-#endif
-    }
-    break;
-  case 'N':
-    if (memEQ(name, "__R_UNUSED", 10)) {
-    /*                    ^           */
-#ifdef __R_UNUSED
-      *iv_return = __R_UNUSED;
-      return PERL_constant_ISIV;
-#else
-      return PERL_constant_NOTDEF;
-#endif
-    }
-    break;
-  case 'P':
-    if (memEQ(name, "R_SNAPSHOT", 10)) {
-    /*                    ^           */
-#ifdef R_SNAPSHOT
-      *iv_return = R_SNAPSHOT;
-      return PERL_constant_ISIV;
-#else
-      return PERL_constant_NOTDEF;
-#endif
-    }
-    break;
-  }
-  return PERL_constant_NOTFOUND;
-}
-
-static int
-constant_11 (pTHX_ const char *name, IV *iv_return) {
-  /* When generated this function returned values for the list of names given
-     here.  However, subsequent manual editing may have added or removed some.
-     HASHVERSION RET_SPECIAL RET_SUCCESS R_RECNOSYNC R_SETCURSOR */
-  /* Offset 10 gives the best switch position.  */
-  switch (name[10]) {
-  case 'C':
-    if (memEQ(name, "R_RECNOSYNC", 11)) {
-    /*                         ^       */
-#ifdef R_RECNOSYNC
-      *iv_return = R_RECNOSYNC;
-      return PERL_constant_ISIV;
-#else
-      return PERL_constant_NOTDEF;
-#endif
-    }
-    break;
-  case 'L':
-    if (memEQ(name, "RET_SPECIAL", 11)) {
-    /*                         ^       */
-#ifdef RET_SPECIAL
-      *iv_return = RET_SPECIAL;
-      return PERL_constant_ISIV;
-#else
-      return PERL_constant_NOTDEF;
-#endif
-    }
-    break;
-  case 'N':
-    if (memEQ(name, "HASHVERSION", 11)) {
-    /*                         ^       */
-#ifdef HASHVERSION
-      *iv_return = HASHVERSION;
-      return PERL_constant_ISIV;
-#else
-      return PERL_constant_NOTDEF;
-#endif
-    }
-    break;
-  case 'R':
-    if (memEQ(name, "R_SETCURSOR", 11)) {
-    /*                         ^       */
-#ifdef R_SETCURSOR
-      *iv_return = R_SETCURSOR;
-      return PERL_constant_ISIV;
-#else
-      return PERL_constant_NOTDEF;
-#endif
-    }
-    break;
-  case 'S':
-    if (memEQ(name, "RET_SUCCESS", 11)) {
-    /*                         ^       */
-#ifdef RET_SUCCESS
-      *iv_return = RET_SUCCESS;
-      return PERL_constant_ISIV;
-#else
-      return PERL_constant_NOTDEF;
-#endif
-    }
-    break;
-  }
-  return PERL_constant_NOTFOUND;
-}
-
-static int
-constant (pTHX_ const char *name, STRLEN len, IV *iv_return) {
-  /* Initially switch on the length of the name.  */
-  /* When generated this function returned values for the list of names given
-     in this section of perl code.  Rather than manually editing these functions
-     to add or remove constants, which would result in this comment and section
-     of code becoming inaccurate, we recommend that you edit this section of
-     code, and use it to regenerate a new set of constant functions which you
-     then use to replace the originals.
-
-     Regenerate these constant functions by feeding this entire source file to
-     perl -x
-
-#!bleedperl -w
-use ExtUtils::Constant qw (constant_types C_constant XS_constant);
-
-my $types = {map {($_, 1)} qw(IV)};
-my @names = (qw(BTREEMAGIC BTREEVERSION DB_LOCK DB_SHMEM DB_TXN HASHMAGIC
-	       HASHVERSION MAX_PAGE_NUMBER MAX_PAGE_OFFSET MAX_REC_NUMBER
-	       RET_ERROR RET_SPECIAL RET_SUCCESS R_CURSOR R_DUP R_FIRST
-	       R_FIXEDLEN R_IAFTER R_IBEFORE R_LAST R_NEXT R_NOKEY
-	       R_NOOVERWRITE R_PREV R_RECNOSYNC R_SETCURSOR R_SNAPSHOT
-	       __R_UNUSED));
-
-print constant_types(); # macro defs
-foreach (C_constant ("DB_File", 'constant', 'IV', $types, undef, 3, @names) ) {
-    print $_, "\n"; # C constant subs
-}
-print "#### XS Section:\n";
-print XS_constant ("DB_File", $types);
-__END__
-   */
-
-  switch (len) {
-  case 5:
-    if (memEQ(name, "R_DUP", 5)) {
-#ifdef R_DUP
-      *iv_return = R_DUP;
-      return PERL_constant_ISIV;
-#else
-      return PERL_constant_NOTDEF;
-#endif
-    }
-    break;
-  case 6:
-    return constant_6 (aTHX_ name, iv_return);
-    break;
-  case 7:
-    return constant_7 (aTHX_ name, iv_return);
-    break;
-  case 8:
-    return constant_8 (aTHX_ name, iv_return);
-    break;
-  case 9:
-    return constant_9 (aTHX_ name, iv_return);
-    break;
-  case 10:
-    return constant_10 (aTHX_ name, iv_return);
-    break;
-  case 11:
-    return constant_11 (aTHX_ name, iv_return);
-    break;
-  case 12:
-    if (memEQ(name, "BTREEVERSION", 12)) {
-#ifdef BTREEVERSION
-      *iv_return = BTREEVERSION;
-      return PERL_constant_ISIV;
-#else
-      return PERL_constant_NOTDEF;
-#endif
-    }
-    break;
-  case 13:
-    if (memEQ(name, "R_NOOVERWRITE", 13)) {
-#ifdef R_NOOVERWRITE
-      *iv_return = R_NOOVERWRITE;
-      return PERL_constant_ISIV;
-#else
-      return PERL_constant_NOTDEF;
-#endif
-    }
-    break;
-  case 14:
-    if (memEQ(name, "MAX_REC_NUMBER", 14)) {
-#ifdef MAX_REC_NUMBER
-      *iv_return = MAX_REC_NUMBER;
-      return PERL_constant_ISIV;
-#else
-      return PERL_constant_NOTDEF;
-#endif
-    }
-    break;
-  case 15:
-    /* Names all of length 15.  */
-    /* MAX_PAGE_NUMBER MAX_PAGE_OFFSET */
-    /* Offset 9 gives the best switch position.  */
-    switch (name[9]) {
-    case 'N':
-      if (memEQ(name, "MAX_PAGE_NUMBER", 15)) {
-      /*                        ^            */
-#ifdef MAX_PAGE_NUMBER
-        *iv_return = MAX_PAGE_NUMBER;
-        return PERL_constant_ISIV;
-#else
-        return PERL_constant_NOTDEF;
-#endif
-      }
-      break;
-    case 'O':
-      if (memEQ(name, "MAX_PAGE_OFFSET", 15)) {
-      /*                        ^            */
-#ifdef MAX_PAGE_OFFSET
-        *iv_return = MAX_PAGE_OFFSET;
-        return PERL_constant_ISIV;
-#else
-        return PERL_constant_NOTDEF;
-#endif
-      }
-      break;
-    }
-    break;
-  }
-  return PERL_constant_NOTFOUND;
-}
-
diff --git a/storage/bdb/perl/DB_File/fallback.xs b/storage/bdb/perl/DB_File/fallback.xs
deleted file mode 100644
index 8650cdf7646..00000000000
--- a/storage/bdb/perl/DB_File/fallback.xs
+++ /dev/null
@@ -1,88 +0,0 @@
-void
-constant(sv)
-    PREINIT:
-#ifdef dXSTARG
-	dXSTARG; /* Faster if we have it.  */
-#else
-	dTARGET;
-#endif
-	STRLEN		len;
-        int		type;
-	IV		iv;
-	/* NV		nv;	Uncomment this if you need to return NVs */
-	/* const char	*pv;	Uncomment this if you need to return PVs */
-    INPUT:
-	SV *		sv;
-        const char *	s = SvPV(sv, len);
-    PPCODE:
-        /* Change this to constant(aTHX_ s, len, &iv, &nv);
-           if you need to return both NVs and IVs */
-	type = constant(aTHX_ s, len, &iv);
-      /* Return 1 or 2 items. First is error message, or undef if no error.
-           Second, if present, is found value */
-        switch (type) {
-        case PERL_constant_NOTFOUND:
-          sv = sv_2mortal(newSVpvf("%s is not a valid DB_File macro", s));
-          PUSHs(sv);
-          break;
-        case PERL_constant_NOTDEF:
-          sv = sv_2mortal(newSVpvf(
-	    "Your vendor has not defined DB_File macro %s, used", s));
-          PUSHs(sv);
-          break;
-        case PERL_constant_ISIV:
-          EXTEND(SP, 1);
-          PUSHs(&PL_sv_undef);
-          PUSHi(iv);
-          break;
-	/* Uncomment this if you need to return NOs
-        case PERL_constant_ISNO:
-          EXTEND(SP, 1);
-          PUSHs(&PL_sv_undef);
-          PUSHs(&PL_sv_no);
-          break; */
-	/* Uncomment this if you need to return NVs
-        case PERL_constant_ISNV:
-          EXTEND(SP, 1);
-          PUSHs(&PL_sv_undef);
-          PUSHn(nv);
-          break; */
-	/* Uncomment this if you need to return PVs
-        case PERL_constant_ISPV:
-          EXTEND(SP, 1);
-          PUSHs(&PL_sv_undef);
-          PUSHp(pv, strlen(pv));
-          break; */
-	/* Uncomment this if you need to return PVNs
-        case PERL_constant_ISPVN:
-          EXTEND(SP, 1);
-          PUSHs(&PL_sv_undef);
-          PUSHp(pv, iv);
-          break; */
-	/* Uncomment this if you need to return SVs
-        case PERL_constant_ISSV:
-          EXTEND(SP, 1);
-          PUSHs(&PL_sv_undef);
-          PUSHs(sv);
-          break; */
-	/* Uncomment this if you need to return UNDEFs
-        case PERL_constant_ISUNDEF:
-          break; */
-	/* Uncomment this if you need to return UVs
-        case PERL_constant_ISUV:
-          EXTEND(SP, 1);
-          PUSHs(&PL_sv_undef);
-          PUSHu((UV)iv);
-          break; */
-	/* Uncomment this if you need to return YESs
-        case PERL_constant_ISYES:
-          EXTEND(SP, 1);
-          PUSHs(&PL_sv_undef);
-          PUSHs(&PL_sv_yes);
-          break; */
-        default:
-          sv = sv_2mortal(newSVpvf(
-	    "Unexpected return type %d while processing DB_File macro %s, used",
-               type, s));
-          PUSHs(sv);
-        }
diff --git a/storage/bdb/perl/DB_File/hints/dynixptx.pl b/storage/bdb/perl/DB_File/hints/dynixptx.pl
deleted file mode 100644
index bb5ffa56e6b..00000000000
--- a/storage/bdb/perl/DB_File/hints/dynixptx.pl
+++ /dev/null
@@ -1,3 +0,0 @@
-# Need to add an extra '-lc' to the end to work around a DYNIX/ptx bug
-
-$self->{LIBS} = ['-lm -lc'];
diff --git a/storage/bdb/perl/DB_File/hints/sco.pl b/storage/bdb/perl/DB_File/hints/sco.pl
deleted file mode 100644
index ff604409496..00000000000
--- a/storage/bdb/perl/DB_File/hints/sco.pl
+++ /dev/null
@@ -1,2 +0,0 @@
-# osr5 needs to explicitly link against libc to pull in some static symbols
-$self->{LIBS} = ['-ldb -lc'] if $Config{'osvers'} =~ '3\.2v5\.0\..' ;
diff --git a/storage/bdb/perl/DB_File/patches/5.004 b/storage/bdb/perl/DB_File/patches/5.004
deleted file mode 100644
index 0665d1f6c40..00000000000
--- a/storage/bdb/perl/DB_File/patches/5.004
+++ /dev/null
@@ -1,93 +0,0 @@
-diff -rc perl5.004.orig/Configure perl5.004/Configure
-*** perl5.004.orig/Configure	1997-05-13 18:20:34.000000000 +0100
---- perl5.004/Configure	2003-04-26 16:36:53.000000000 +0100
-***************
-*** 188,193 ****
---- 188,194 ----
-  mv=''
-  nroff=''
-  perl=''
-+ perllibs=''
-  pg=''
-  pmake=''
-  pr=''
-***************
-*** 9902,9907 ****
---- 9903,9916 ----
-  shift
-  extensions="$*"
-  
-+ : Remove libraries needed only for extensions
-+ : The appropriate ext/Foo/Makefile.PL will add them back in, if
-+ : necessary.
-+ set X `echo " $libs " | 
-+   sed -e 's@ -lndbm @ @' -e 's@ -lgdbm @ @' -e 's@ -ldbm @ @' -e 's@ -ldb @ @'` 
-+ shift
-+ perllibs="$*"
-+ 
-  : Remove build directory name from cppstdin so it can be used from
-  : either the present location or the final installed location.
-  echo " "
-***************
-*** 10370,10375 ****
---- 10379,10385 ----
-  patchlevel='$patchlevel'
-  path_sep='$path_sep'
-  perl='$perl'
-+ perllibs='$perllibs'
-  perladmin='$perladmin'
-  perlpath='$perlpath'
-  pg='$pg'
-diff -rc perl5.004.orig/Makefile.SH perl5.004/Makefile.SH
-*** perl5.004.orig/Makefile.SH	1997-05-01 15:22:39.000000000 +0100
---- perl5.004/Makefile.SH	2003-04-26 16:37:23.000000000 +0100
-***************
-*** 119,125 ****
-  ext = \$(dynamic_ext) \$(static_ext)
-  DYNALOADER = lib/auto/DynaLoader/DynaLoader\$(LIB_EXT)
-  
-! libs = $libs $cryptlib
-  
-  public = perl $suidperl utilities translators
-  
---- 119,125 ----
-  ext = \$(dynamic_ext) \$(static_ext)
-  DYNALOADER = lib/auto/DynaLoader/DynaLoader\$(LIB_EXT)
-  
-! libs = $perllibs $cryptlib
-  
-  public = perl $suidperl utilities translators
-  
-diff -rc perl5.004.orig/myconfig perl5.004/myconfig
-*** perl5.004.orig/myconfig	1996-12-21 01:13:20.000000000 +0000
---- perl5.004/myconfig	2003-04-26 16:37:51.000000000 +0100
-***************
-*** 35,41 ****
-    Linker and Libraries:
-      ld='$ld', ldflags ='$ldflags'
-      libpth=$libpth
-!     libs=$libs
-      libc=$libc, so=$so
-      useshrplib=$useshrplib, libperl=$libperl
-    Dynamic Linking:
---- 35,41 ----
-    Linker and Libraries:
-      ld='$ld', ldflags ='$ldflags'
-      libpth=$libpth
-!     libs=$perllibs
-      libc=$libc, so=$so
-      useshrplib=$useshrplib, libperl=$libperl
-    Dynamic Linking:
-diff -rc perl5.004.orig/patchlevel.h perl5.004/patchlevel.h
-*** perl5.004.orig/patchlevel.h	1997-05-15 23:15:17.000000000 +0100
---- perl5.004/patchlevel.h	2003-04-26 16:38:11.000000000 +0100
-***************
-*** 38,43 ****
---- 38,44 ----
-   */
-  static	char	*local_patches[] = {
-  	NULL
-+ 	,"NODB-1.0 - remove -ldb from core perl binary."
-  	,NULL
-  };
-  
diff --git a/storage/bdb/perl/DB_File/patches/5.004_01 b/storage/bdb/perl/DB_File/patches/5.004_01
deleted file mode 100644
index 1b05eb4e02b..00000000000
--- a/storage/bdb/perl/DB_File/patches/5.004_01
+++ /dev/null
@@ -1,217 +0,0 @@
-diff -rc perl5.004_01.orig/Configure perl5.004_01/Configure
-*** perl5.004_01.orig/Configure	Wed Jun 11 00:28:03 1997
---- perl5.004_01/Configure	Sun Nov 12 22:12:35 2000
-***************
-*** 188,193 ****
---- 188,194 ----
-  mv=''
-  nroff=''
-  perl=''
-+ perllibs=''
-  pg=''
-  pmake=''
-  pr=''
-***************
-*** 9907,9912 ****
---- 9908,9921 ----
-  shift
-  extensions="$*"
-  
-+ : Remove libraries needed only for extensions
-+ : The appropriate ext/Foo/Makefile.PL will add them back in, if
-+ : necessary.
-+ set X `echo " $libs " | 
-+   sed -e 's@ -lndbm @ @' -e 's@ -lgdbm @ @' -e 's@ -ldbm @ @' -e 's@ -ldb @ @'` 
-+ shift
-+ perllibs="$*"
-+ 
-  : Remove build directory name from cppstdin so it can be used from
-  : either the present location or the final installed location.
-  echo " "
-***************
-*** 10375,10380 ****
---- 10384,10390 ----
-  patchlevel='$patchlevel'
-  path_sep='$path_sep'
-  perl='$perl'
-+ perllibs='$perllibs'
-  perladmin='$perladmin'
-  perlpath='$perlpath'
-  pg='$pg'
-diff -rc perl5.004_01.orig/Makefile.SH perl5.004_01/Makefile.SH
-*** perl5.004_01.orig/Makefile.SH	Thu Jun 12 23:27:56 1997
---- perl5.004_01/Makefile.SH	Sun Nov 12 22:12:35 2000
-***************
-*** 126,132 ****
-  ext = \$(dynamic_ext) \$(static_ext)
-  DYNALOADER = lib/auto/DynaLoader/DynaLoader\$(LIB_EXT)
-  
-! libs = $libs $cryptlib
-  
-  public = perl $suidperl utilities translators
-  
---- 126,132 ----
-  ext = \$(dynamic_ext) \$(static_ext)
-  DYNALOADER = lib/auto/DynaLoader/DynaLoader\$(LIB_EXT)
-  
-! libs = $perllibs $cryptlib
-  
-  public = perl $suidperl utilities translators
-  
-diff -rc perl5.004_01.orig/lib/ExtUtils/Embed.pm perl5.004_01/lib/ExtUtils/Embed.pm
-*** perl5.004_01.orig/lib/ExtUtils/Embed.pm	Wed Apr  2 22:12:04 1997
---- perl5.004_01/lib/ExtUtils/Embed.pm	Sun Nov 12 22:12:35 2000
-***************
-*** 170,176 ****
-      @path = $path ? split(/:/, $path) : @INC;
-  
-      push(@potential_libs, @link_args)    if scalar @link_args;
-!     push(@potential_libs, $Config{libs}) if defined $std;
-  
-      push(@mods, static_ext()) if $std;
-  
---- 170,176 ----
-      @path = $path ? split(/:/, $path) : @INC;
-  
-      push(@potential_libs, @link_args)    if scalar @link_args;
-!     push(@potential_libs, $Config{perllibs}) if defined $std;
-  
-      push(@mods, static_ext()) if $std;
-  
-diff -rc perl5.004_01.orig/lib/ExtUtils/Liblist.pm perl5.004_01/lib/ExtUtils/Liblist.pm
-*** perl5.004_01.orig/lib/ExtUtils/Liblist.pm	Sat Jun  7 01:19:44 1997
---- perl5.004_01/lib/ExtUtils/Liblist.pm	Sun Nov 12 22:13:27 2000
-***************
-*** 16,33 ****
-  
-  sub _unix_os2_ext {
-      my($self,$potential_libs, $Verbose) = @_;
-!     if ($^O =~ 'os2' and $Config{libs}) { 
-  	# Dynamic libraries are not transitive, so we may need including
-  	# the libraries linked against perl.dll again.
-  
-  	$potential_libs .= " " if $potential_libs;
-! 	$potential_libs .= $Config{libs};
-      }
-      return ("", "", "", "") unless $potential_libs;
-      print STDOUT "Potential libraries are '$potential_libs':\n" if $Verbose;
-  
-      my($so)   = $Config{'so'};
-!     my($libs) = $Config{'libs'};
-      my $Config_libext = $Config{lib_ext} || ".a";
-  
-  
---- 16,33 ----
-  
-  sub _unix_os2_ext {
-      my($self,$potential_libs, $Verbose) = @_;
-!     if ($^O =~ 'os2' and $Config{perllibs}) { 
-  	# Dynamic libraries are not transitive, so we may need including
-  	# the libraries linked against perl.dll again.
-  
-  	$potential_libs .= " " if $potential_libs;
-! 	$potential_libs .= $Config{perllibs};
-      }
-      return ("", "", "", "") unless $potential_libs;
-      print STDOUT "Potential libraries are '$potential_libs':\n" if $Verbose;
-  
-      my($so)   = $Config{'so'};
-!     my($libs) = $Config{'perllibs'};
-      my $Config_libext = $Config{lib_ext} || ".a";
-  
-  
-***************
-*** 186,196 ****
-      my($self, $potential_libs, $Verbose) = @_;
-  
-      # If user did not supply a list, we punt.
-!     # (caller should probably use the list in $Config{libs})
-      return ("", "", "", "") unless $potential_libs;
-  
-      my($so)   = $Config{'so'};
-!     my($libs) = $Config{'libs'};
-      my($libpth) = $Config{'libpth'};
-      my($libext) = $Config{'lib_ext'} || ".lib";
-  
---- 186,196 ----
-      my($self, $potential_libs, $Verbose) = @_;
-  
-      # If user did not supply a list, we punt.
-!     # (caller should probably use the list in $Config{perllibs})
-      return ("", "", "", "") unless $potential_libs;
-  
-      my($so)   = $Config{'so'};
-!     my($libs) = $Config{'perllibs'};
-      my($libpth) = $Config{'libpth'};
-      my($libext) = $Config{'lib_ext'} || ".lib";
-  
-***************
-*** 540,546 ****
-  =item *
-  
-  If C<$potential_libs> is empty, the return value will be empty.
-! Otherwise, the libraries specified by C<$Config{libs}> (see Config.pm)
-  will be appended to the list of C<$potential_libs>.  The libraries
-  will be searched for in the directories specified in C<$potential_libs>
-  as well as in C<$Config{libpth}>. For each library that is found,  a
---- 540,546 ----
-  =item *
-  
-  If C<$potential_libs> is empty, the return value will be empty.
-! Otherwise, the libraries specified by C<$Config{perllibs}> (see Config.pm)
-  will be appended to the list of C<$potential_libs>.  The libraries
-  will be searched for in the directories specified in C<$potential_libs>
-  as well as in C<$Config{libpth}>. For each library that is found,  a
-diff -rc perl5.004_01.orig/lib/ExtUtils/MM_Unix.pm perl5.004_01/lib/ExtUtils/MM_Unix.pm
-*** perl5.004_01.orig/lib/ExtUtils/MM_Unix.pm	Thu Jun 12 22:06:18 1997
---- perl5.004_01/lib/ExtUtils/MM_Unix.pm	Sun Nov 12 22:12:35 2000
-***************
-*** 2137,2143 ****
-  MAP_STATIC    = ",
-  join(" \\\n\t", reverse sort keys %static), "
-  
-! MAP_PRELIBS   = $Config::Config{libs} $Config::Config{cryptlib}
-  ";
-  
-      if (defined $libperl) {
---- 2137,2143 ----
-  MAP_STATIC    = ",
-  join(" \\\n\t", reverse sort keys %static), "
-  
-! MAP_PRELIBS   = $Config::Config{perllibs} $Config::Config{cryptlib}
-  ";
-  
-      if (defined $libperl) {
-diff -rc perl5.004_01.orig/myconfig perl5.004_01/myconfig
-*** perl5.004_01.orig/myconfig	Sat Dec 21 01:13:20 1996
---- perl5.004_01/myconfig	Sun Nov 12 22:12:35 2000
-***************
-*** 35,41 ****
-    Linker and Libraries:
-      ld='$ld', ldflags ='$ldflags'
-      libpth=$libpth
-!     libs=$libs
-      libc=$libc, so=$so
-      useshrplib=$useshrplib, libperl=$libperl
-    Dynamic Linking:
---- 35,41 ----
-    Linker and Libraries:
-      ld='$ld', ldflags ='$ldflags'
-      libpth=$libpth
-!     libs=$perllibs
-      libc=$libc, so=$so
-      useshrplib=$useshrplib, libperl=$libperl
-    Dynamic Linking:
-diff -rc perl5.004_01.orig/patchlevel.h perl5.004_01/patchlevel.h
-*** perl5.004_01.orig/patchlevel.h	Wed Jun 11 03:06:10 1997
---- perl5.004_01/patchlevel.h	Sun Nov 12 22:12:35 2000
-***************
-*** 38,43 ****
---- 38,44 ----
-   */
-  static	char	*local_patches[] = {
-  	NULL
-+ 	,"NODB-1.0 - remove -ldb from core perl binary."
-  	,NULL
-  };
-  
diff --git a/storage/bdb/perl/DB_File/patches/5.004_02 b/storage/bdb/perl/DB_File/patches/5.004_02
deleted file mode 100644
index 238f8737941..00000000000
--- a/storage/bdb/perl/DB_File/patches/5.004_02
+++ /dev/null
@@ -1,217 +0,0 @@
-diff -rc perl5.004_02.orig/Configure perl5.004_02/Configure
-*** perl5.004_02.orig/Configure	Thu Aug  7 15:08:44 1997
---- perl5.004_02/Configure	Sun Nov 12 22:06:24 2000
-***************
-*** 188,193 ****
---- 188,194 ----
-  mv=''
-  nroff=''
-  perl=''
-+ perllibs=''
-  pg=''
-  pmake=''
-  pr=''
-***************
-*** 9911,9916 ****
---- 9912,9925 ----
-  shift
-  extensions="$*"
-  
-+ : Remove libraries needed only for extensions
-+ : The appropriate ext/Foo/Makefile.PL will add them back in, if
-+ : necessary.
-+ set X `echo " $libs " | 
-+   sed -e 's@ -lndbm @ @' -e 's@ -lgdbm @ @' -e 's@ -ldbm @ @' -e 's@ -ldb @ @'` 
-+ shift
-+ perllibs="$*"
-+ 
-  : Remove build directory name from cppstdin so it can be used from
-  : either the present location or the final installed location.
-  echo " "
-***************
-*** 10379,10384 ****
---- 10388,10394 ----
-  patchlevel='$patchlevel'
-  path_sep='$path_sep'
-  perl='$perl'
-+ perllibs='$perllibs'
-  perladmin='$perladmin'
-  perlpath='$perlpath'
-  pg='$pg'
-diff -rc perl5.004_02.orig/Makefile.SH perl5.004_02/Makefile.SH
-*** perl5.004_02.orig/Makefile.SH	Thu Aug  7 13:10:53 1997
---- perl5.004_02/Makefile.SH	Sun Nov 12 22:06:24 2000
-***************
-*** 126,132 ****
-  ext = \$(dynamic_ext) \$(static_ext)
-  DYNALOADER = lib/auto/DynaLoader/DynaLoader\$(LIB_EXT)
-  
-! libs = $libs $cryptlib
-  
-  public = perl $suidperl utilities translators
-  
---- 126,132 ----
-  ext = \$(dynamic_ext) \$(static_ext)
-  DYNALOADER = lib/auto/DynaLoader/DynaLoader\$(LIB_EXT)
-  
-! libs = $perllibs $cryptlib
-  
-  public = perl $suidperl utilities translators
-  
-diff -rc perl5.004_02.orig/lib/ExtUtils/Embed.pm perl5.004_02/lib/ExtUtils/Embed.pm
-*** perl5.004_02.orig/lib/ExtUtils/Embed.pm	Fri Aug  1 15:08:44 1997
---- perl5.004_02/lib/ExtUtils/Embed.pm	Sun Nov 12 22:06:24 2000
-***************
-*** 178,184 ****
-      @path = $path ? split(/:/, $path) : @INC;
-  
-      push(@potential_libs, @link_args)    if scalar @link_args;
-!     push(@potential_libs, $Config{libs}) if defined $std;
-  
-      push(@mods, static_ext()) if $std;
-  
---- 178,184 ----
-      @path = $path ? split(/:/, $path) : @INC;
-  
-      push(@potential_libs, @link_args)    if scalar @link_args;
-!     push(@potential_libs, $Config{perllibs}) if defined $std;
-  
-      push(@mods, static_ext()) if $std;
-  
-diff -rc perl5.004_02.orig/lib/ExtUtils/Liblist.pm perl5.004_02/lib/ExtUtils/Liblist.pm
-*** perl5.004_02.orig/lib/ExtUtils/Liblist.pm	Fri Aug  1 19:36:58 1997
---- perl5.004_02/lib/ExtUtils/Liblist.pm	Sun Nov 12 22:06:24 2000
-***************
-*** 16,33 ****
-  
-  sub _unix_os2_ext {
-      my($self,$potential_libs, $verbose) = @_;
-!     if ($^O =~ 'os2' and $Config{libs}) { 
-  	# Dynamic libraries are not transitive, so we may need including
-  	# the libraries linked against perl.dll again.
-  
-  	$potential_libs .= " " if $potential_libs;
-! 	$potential_libs .= $Config{libs};
-      }
-      return ("", "", "", "") unless $potential_libs;
-      print STDOUT "Potential libraries are '$potential_libs':\n" if $verbose;
-  
-      my($so)   = $Config{'so'};
-!     my($libs) = $Config{'libs'};
-      my $Config_libext = $Config{lib_ext} || ".a";
-  
-  
---- 16,33 ----
-  
-  sub _unix_os2_ext {
-      my($self,$potential_libs, $verbose) = @_;
-!     if ($^O =~ 'os2' and $Config{perllibs}) { 
-  	# Dynamic libraries are not transitive, so we may need including
-  	# the libraries linked against perl.dll again.
-  
-  	$potential_libs .= " " if $potential_libs;
-! 	$potential_libs .= $Config{perllibs};
-      }
-      return ("", "", "", "") unless $potential_libs;
-      print STDOUT "Potential libraries are '$potential_libs':\n" if $verbose;
-  
-      my($so)   = $Config{'so'};
-!     my($libs) = $Config{'perllibs'};
-      my $Config_libext = $Config{lib_ext} || ".a";
-  
-  
-***************
-*** 186,196 ****
-      my($self, $potential_libs, $verbose) = @_;
-  
-      # If user did not supply a list, we punt.
-!     # (caller should probably use the list in $Config{libs})
-      return ("", "", "", "") unless $potential_libs;
-  
-      my($so)   = $Config{'so'};
-!     my($libs) = $Config{'libs'};
-      my($libpth) = $Config{'libpth'};
-      my($libext) = $Config{'lib_ext'} || ".lib";
-  
---- 186,196 ----
-      my($self, $potential_libs, $verbose) = @_;
-  
-      # If user did not supply a list, we punt.
-!     # (caller should probably use the list in $Config{perllibs})
-      return ("", "", "", "") unless $potential_libs;
-  
-      my($so)   = $Config{'so'};
-!     my($libs) = $Config{'perllibs'};
-      my($libpth) = $Config{'libpth'};
-      my($libext) = $Config{'lib_ext'} || ".lib";
-  
-***************
-*** 540,546 ****
-  =item *
-  
-  If C<$potential_libs> is empty, the return value will be empty.
-! Otherwise, the libraries specified by C<$Config{libs}> (see Config.pm)
-  will be appended to the list of C<$potential_libs>.  The libraries
-  will be searched for in the directories specified in C<$potential_libs>
-  as well as in C<$Config{libpth}>. For each library that is found,  a
---- 540,546 ----
-  =item *
-  
-  If C<$potential_libs> is empty, the return value will be empty.
-! Otherwise, the libraries specified by C<$Config{perllibs}> (see Config.pm)
-  will be appended to the list of C<$potential_libs>.  The libraries
-  will be searched for in the directories specified in C<$potential_libs>
-  as well as in C<$Config{libpth}>. For each library that is found,  a
-diff -rc perl5.004_02.orig/lib/ExtUtils/MM_Unix.pm perl5.004_02/lib/ExtUtils/MM_Unix.pm
-*** perl5.004_02.orig/lib/ExtUtils/MM_Unix.pm	Tue Aug  5 14:28:08 1997
---- perl5.004_02/lib/ExtUtils/MM_Unix.pm	Sun Nov 12 22:06:25 2000
-***************
-*** 2224,2230 ****
-  MAP_STATIC    = ",
-  join(" \\\n\t", reverse sort keys %static), "
-  
-! MAP_PRELIBS   = $Config::Config{libs} $Config::Config{cryptlib}
-  ";
-  
-      if (defined $libperl) {
---- 2224,2230 ----
-  MAP_STATIC    = ",
-  join(" \\\n\t", reverse sort keys %static), "
-  
-! MAP_PRELIBS   = $Config::Config{perllibs} $Config::Config{cryptlib}
-  ";
-  
-      if (defined $libperl) {
-diff -rc perl5.004_02.orig/myconfig perl5.004_02/myconfig
-*** perl5.004_02.orig/myconfig	Sat Dec 21 01:13:20 1996
---- perl5.004_02/myconfig	Sun Nov 12 22:06:25 2000
-***************
-*** 35,41 ****
-    Linker and Libraries:
-      ld='$ld', ldflags ='$ldflags'
-      libpth=$libpth
-!     libs=$libs
-      libc=$libc, so=$so
-      useshrplib=$useshrplib, libperl=$libperl
-    Dynamic Linking:
---- 35,41 ----
-    Linker and Libraries:
-      ld='$ld', ldflags ='$ldflags'
-      libpth=$libpth
-!     libs=$perllibs
-      libc=$libc, so=$so
-      useshrplib=$useshrplib, libperl=$libperl
-    Dynamic Linking:
-diff -rc perl5.004_02.orig/patchlevel.h perl5.004_02/patchlevel.h
-*** perl5.004_02.orig/patchlevel.h	Fri Aug  1 15:07:34 1997
---- perl5.004_02/patchlevel.h	Sun Nov 12 22:06:25 2000
-***************
-*** 38,43 ****
---- 38,44 ----
-   */
-  static	char	*local_patches[] = {
-  	NULL
-+ 	,"NODB-1.0 - remove -ldb from core perl binary."
-  	,NULL
-  };
-  
diff --git a/storage/bdb/perl/DB_File/patches/5.004_03 b/storage/bdb/perl/DB_File/patches/5.004_03
deleted file mode 100644
index 06331eac922..00000000000
--- a/storage/bdb/perl/DB_File/patches/5.004_03
+++ /dev/null
@@ -1,223 +0,0 @@
-diff -rc perl5.004_03.orig/Configure perl5.004_03/Configure
-*** perl5.004_03.orig/Configure	Wed Aug 13 16:09:46 1997
---- perl5.004_03/Configure	Sun Nov 12 21:56:18 2000
-***************
-*** 188,193 ****
---- 188,194 ----
-  mv=''
-  nroff=''
-  perl=''
-+ perllibs=''
-  pg=''
-  pmake=''
-  pr=''
-***************
-*** 9911,9916 ****
---- 9912,9925 ----
-  shift
-  extensions="$*"
-  
-+ : Remove libraries needed only for extensions
-+ : The appropriate ext/Foo/Makefile.PL will add them back in, if
-+ : necessary.
-+ set X `echo " $libs " | 
-+   sed -e 's@ -lndbm @ @' -e 's@ -lgdbm @ @' -e 's@ -ldbm @ @' -e 's@ -ldb @ @'` 
-+ shift
-+ perllibs="$*"
-+ 
-  : Remove build directory name from cppstdin so it can be used from
-  : either the present location or the final installed location.
-  echo " "
-***************
-*** 10379,10384 ****
---- 10388,10394 ----
-  patchlevel='$patchlevel'
-  path_sep='$path_sep'
-  perl='$perl'
-+ perllibs='$perllibs'
-  perladmin='$perladmin'
-  perlpath='$perlpath'
-  pg='$pg'
-Only in perl5.004_03: Configure.orig
-diff -rc perl5.004_03.orig/Makefile.SH perl5.004_03/Makefile.SH
-*** perl5.004_03.orig/Makefile.SH	Mon Aug 18 19:24:29 1997
---- perl5.004_03/Makefile.SH	Sun Nov 12 21:56:18 2000
-***************
-*** 126,132 ****
-  ext = \$(dynamic_ext) \$(static_ext)
-  DYNALOADER = lib/auto/DynaLoader/DynaLoader\$(LIB_EXT)
-  
-! libs = $libs $cryptlib
-  
-  public = perl $suidperl utilities translators
-  
---- 126,132 ----
-  ext = \$(dynamic_ext) \$(static_ext)
-  DYNALOADER = lib/auto/DynaLoader/DynaLoader\$(LIB_EXT)
-  
-! libs = $perllibs $cryptlib
-  
-  public = perl $suidperl utilities translators
-  
-Only in perl5.004_03: Makefile.SH.orig
-diff -rc perl5.004_03.orig/lib/ExtUtils/Embed.pm perl5.004_03/lib/ExtUtils/Embed.pm
-*** perl5.004_03.orig/lib/ExtUtils/Embed.pm	Fri Aug  1 15:08:44 1997
---- perl5.004_03/lib/ExtUtils/Embed.pm	Sun Nov 12 21:56:18 2000
-***************
-*** 178,184 ****
-      @path = $path ? split(/:/, $path) : @INC;
-  
-      push(@potential_libs, @link_args)    if scalar @link_args;
-!     push(@potential_libs, $Config{libs}) if defined $std;
-  
-      push(@mods, static_ext()) if $std;
-  
---- 178,184 ----
-      @path = $path ? split(/:/, $path) : @INC;
-  
-      push(@potential_libs, @link_args)    if scalar @link_args;
-!     push(@potential_libs, $Config{perllibs}) if defined $std;
-  
-      push(@mods, static_ext()) if $std;
-  
-diff -rc perl5.004_03.orig/lib/ExtUtils/Liblist.pm perl5.004_03/lib/ExtUtils/Liblist.pm
-*** perl5.004_03.orig/lib/ExtUtils/Liblist.pm	Fri Aug  1 19:36:58 1997
---- perl5.004_03/lib/ExtUtils/Liblist.pm	Sun Nov 12 21:57:17 2000
-***************
-*** 16,33 ****
-  
-  sub _unix_os2_ext {
-      my($self,$potential_libs, $verbose) = @_;
-!     if ($^O =~ 'os2' and $Config{libs}) { 
-  	# Dynamic libraries are not transitive, so we may need including
-  	# the libraries linked against perl.dll again.
-  
-  	$potential_libs .= " " if $potential_libs;
-! 	$potential_libs .= $Config{libs};
-      }
-      return ("", "", "", "") unless $potential_libs;
-      print STDOUT "Potential libraries are '$potential_libs':\n" if $verbose;
-  
-      my($so)   = $Config{'so'};
-!     my($libs) = $Config{'libs'};
-      my $Config_libext = $Config{lib_ext} || ".a";
-  
-  
---- 16,33 ----
-  
-  sub _unix_os2_ext {
-      my($self,$potential_libs, $verbose) = @_;
-!     if ($^O =~ 'os2' and $Config{perllibs}) { 
-  	# Dynamic libraries are not transitive, so we may need including
-  	# the libraries linked against perl.dll again.
-  
-  	$potential_libs .= " " if $potential_libs;
-! 	$potential_libs .= $Config{perllibs};
-      }
-      return ("", "", "", "") unless $potential_libs;
-      print STDOUT "Potential libraries are '$potential_libs':\n" if $verbose;
-  
-      my($so)   = $Config{'so'};
-!     my($libs) = $Config{'perllibs'};
-      my $Config_libext = $Config{lib_ext} || ".a";
-  
-  
-***************
-*** 186,196 ****
-      my($self, $potential_libs, $verbose) = @_;
-  
-      # If user did not supply a list, we punt.
-!     # (caller should probably use the list in $Config{libs})
-      return ("", "", "", "") unless $potential_libs;
-  
-      my($so)   = $Config{'so'};
-!     my($libs) = $Config{'libs'};
-      my($libpth) = $Config{'libpth'};
-      my($libext) = $Config{'lib_ext'} || ".lib";
-  
---- 186,196 ----
-      my($self, $potential_libs, $verbose) = @_;
-  
-      # If user did not supply a list, we punt.
-!     # (caller should probably use the list in $Config{perllibs})
-      return ("", "", "", "") unless $potential_libs;
-  
-      my($so)   = $Config{'so'};
-!     my($libs) = $Config{'perllibs'};
-      my($libpth) = $Config{'libpth'};
-      my($libext) = $Config{'lib_ext'} || ".lib";
-  
-***************
-*** 540,546 ****
-  =item *
-  
-  If C<$potential_libs> is empty, the return value will be empty.
-! Otherwise, the libraries specified by C<$Config{libs}> (see Config.pm)
-  will be appended to the list of C<$potential_libs>.  The libraries
-  will be searched for in the directories specified in C<$potential_libs>
-  as well as in C<$Config{libpth}>. For each library that is found,  a
---- 540,546 ----
-  =item *
-  
-  If C<$potential_libs> is empty, the return value will be empty.
-! Otherwise, the libraries specified by C<$Config{perllibs}> (see Config.pm)
-  will be appended to the list of C<$potential_libs>.  The libraries
-  will be searched for in the directories specified in C<$potential_libs>
-  as well as in C<$Config{libpth}>. For each library that is found,  a
-Only in perl5.004_03/lib/ExtUtils: Liblist.pm.orig
-Only in perl5.004_03/lib/ExtUtils: Liblist.pm.rej
-diff -rc perl5.004_03.orig/lib/ExtUtils/MM_Unix.pm perl5.004_03/lib/ExtUtils/MM_Unix.pm
-*** perl5.004_03.orig/lib/ExtUtils/MM_Unix.pm	Mon Aug 18 19:16:12 1997
---- perl5.004_03/lib/ExtUtils/MM_Unix.pm	Sun Nov 12 21:56:19 2000
-***************
-*** 2224,2230 ****
-  MAP_STATIC    = ",
-  join(" \\\n\t", reverse sort keys %static), "
-  
-! MAP_PRELIBS   = $Config::Config{libs} $Config::Config{cryptlib}
-  ";
-  
-      if (defined $libperl) {
---- 2224,2230 ----
-  MAP_STATIC    = ",
-  join(" \\\n\t", reverse sort keys %static), "
-  
-! MAP_PRELIBS   = $Config::Config{perllibs} $Config::Config{cryptlib}
-  ";
-  
-      if (defined $libperl) {
-Only in perl5.004_03/lib/ExtUtils: MM_Unix.pm.orig
-diff -rc perl5.004_03.orig/myconfig perl5.004_03/myconfig
-*** perl5.004_03.orig/myconfig	Sat Dec 21 01:13:20 1996
---- perl5.004_03/myconfig	Sun Nov 12 21:56:19 2000
-***************
-*** 35,41 ****
-    Linker and Libraries:
-      ld='$ld', ldflags ='$ldflags'
-      libpth=$libpth
-!     libs=$libs
-      libc=$libc, so=$so
-      useshrplib=$useshrplib, libperl=$libperl
-    Dynamic Linking:
---- 35,41 ----
-    Linker and Libraries:
-      ld='$ld', ldflags ='$ldflags'
-      libpth=$libpth
-!     libs=$perllibs
-      libc=$libc, so=$so
-      useshrplib=$useshrplib, libperl=$libperl
-    Dynamic Linking:
-diff -rc perl5.004_03.orig/patchlevel.h perl5.004_03/patchlevel.h
-*** perl5.004_03.orig/patchlevel.h	Wed Aug 13 11:42:01 1997
---- perl5.004_03/patchlevel.h	Sun Nov 12 21:56:19 2000
-***************
-*** 38,43 ****
---- 38,44 ----
-   */
-  static	char	*local_patches[] = {
-  	NULL
-+ 	,"NODB-1.0 - remove -ldb from core perl binary."
-  	,NULL
-  };
-  
-Only in perl5.004_03: patchlevel.h.orig
diff --git a/storage/bdb/perl/DB_File/patches/5.004_04 b/storage/bdb/perl/DB_File/patches/5.004_04
deleted file mode 100644
index a227dc700d9..00000000000
--- a/storage/bdb/perl/DB_File/patches/5.004_04
+++ /dev/null
@@ -1,209 +0,0 @@
-diff -rc perl5.004_04.orig/Configure perl5.004_04/Configure
-*** perl5.004_04.orig/Configure	Fri Oct  3 18:57:39 1997
---- perl5.004_04/Configure	Sun Nov 12 21:50:51 2000
-***************
-*** 188,193 ****
---- 188,194 ----
-  mv=''
-  nroff=''
-  perl=''
-+ perllibs=''
-  pg=''
-  pmake=''
-  pr=''
-***************
-*** 9910,9915 ****
---- 9911,9924 ----
-  shift
-  extensions="$*"
-  
-+ : Remove libraries needed only for extensions
-+ : The appropriate ext/Foo/Makefile.PL will add them back in, if
-+ : necessary.
-+ set X `echo " $libs " | 
-+   sed -e 's@ -lndbm @ @' -e 's@ -lgdbm @ @' -e 's@ -ldbm @ @' -e 's@ -ldb @ @'` 
-+ shift
-+ perllibs="$*"
-+ 
-  : Remove build directory name from cppstdin so it can be used from
-  : either the present location or the final installed location.
-  echo " "
-***************
-*** 10378,10383 ****
---- 10387,10393 ----
-  patchlevel='$patchlevel'
-  path_sep='$path_sep'
-  perl='$perl'
-+ perllibs='$perllibs'
-  perladmin='$perladmin'
-  perlpath='$perlpath'
-  pg='$pg'
-diff -rc perl5.004_04.orig/Makefile.SH perl5.004_04/Makefile.SH
-*** perl5.004_04.orig/Makefile.SH	Wed Oct 15 10:33:16 1997
---- perl5.004_04/Makefile.SH	Sun Nov 12 21:50:51 2000
-***************
-*** 129,135 ****
-  ext = \$(dynamic_ext) \$(static_ext)
-  DYNALOADER = lib/auto/DynaLoader/DynaLoader\$(LIB_EXT)
-  
-! libs = $libs $cryptlib
-  
-  public = perl $suidperl utilities translators
-  
---- 129,135 ----
-  ext = \$(dynamic_ext) \$(static_ext)
-  DYNALOADER = lib/auto/DynaLoader/DynaLoader\$(LIB_EXT)
-  
-! libs = $perllibs $cryptlib
-  
-  public = perl $suidperl utilities translators
-  
-diff -rc perl5.004_04.orig/lib/ExtUtils/Embed.pm perl5.004_04/lib/ExtUtils/Embed.pm
-*** perl5.004_04.orig/lib/ExtUtils/Embed.pm	Fri Aug  1 15:08:44 1997
---- perl5.004_04/lib/ExtUtils/Embed.pm	Sun Nov 12 21:50:51 2000
-***************
-*** 178,184 ****
-      @path = $path ? split(/:/, $path) : @INC;
-  
-      push(@potential_libs, @link_args)    if scalar @link_args;
-!     push(@potential_libs, $Config{libs}) if defined $std;
-  
-      push(@mods, static_ext()) if $std;
-  
---- 178,184 ----
-      @path = $path ? split(/:/, $path) : @INC;
-  
-      push(@potential_libs, @link_args)    if scalar @link_args;
-!     push(@potential_libs, $Config{perllibs}) if defined $std;
-  
-      push(@mods, static_ext()) if $std;
-  
-diff -rc perl5.004_04.orig/lib/ExtUtils/Liblist.pm perl5.004_04/lib/ExtUtils/Liblist.pm
-*** perl5.004_04.orig/lib/ExtUtils/Liblist.pm	Tue Sep  9 17:41:32 1997
---- perl5.004_04/lib/ExtUtils/Liblist.pm	Sun Nov 12 21:51:33 2000
-***************
-*** 16,33 ****
-  
-  sub _unix_os2_ext {
-      my($self,$potential_libs, $verbose) = @_;
-!     if ($^O =~ 'os2' and $Config{libs}) { 
-  	# Dynamic libraries are not transitive, so we may need including
-  	# the libraries linked against perl.dll again.
-  
-  	$potential_libs .= " " if $potential_libs;
-! 	$potential_libs .= $Config{libs};
-      }
-      return ("", "", "", "") unless $potential_libs;
-      warn "Potential libraries are '$potential_libs':\n" if $verbose;
-  
-      my($so)   = $Config{'so'};
-!     my($libs) = $Config{'libs'};
-      my $Config_libext = $Config{lib_ext} || ".a";
-  
-  
---- 16,33 ----
-  
-  sub _unix_os2_ext {
-      my($self,$potential_libs, $verbose) = @_;
-!     if ($^O =~ 'os2' and $Config{perllibs}) { 
-  	# Dynamic libraries are not transitive, so we may need including
-  	# the libraries linked against perl.dll again.
-  
-  	$potential_libs .= " " if $potential_libs;
-! 	$potential_libs .= $Config{perllibs};
-      }
-      return ("", "", "", "") unless $potential_libs;
-      warn "Potential libraries are '$potential_libs':\n" if $verbose;
-  
-      my($so)   = $Config{'so'};
-!     my($libs) = $Config{'perllibs'};
-      my $Config_libext = $Config{lib_ext} || ".a";
-  
-  
-***************
-*** 189,195 ****
-      return ("", "", "", "") unless $potential_libs;
-  
-      my($so)   = $Config{'so'};
-!     my($libs) = $Config{'libs'};
-      my($libpth) = $Config{'libpth'};
-      my($libext) = $Config{'lib_ext'} || ".lib";
-  
---- 189,195 ----
-      return ("", "", "", "") unless $potential_libs;
-  
-      my($so)   = $Config{'so'};
-!     my($libs) = $Config{'perllibs'};
-      my($libpth) = $Config{'libpth'};
-      my($libext) = $Config{'lib_ext'} || ".lib";
-  
-***************
-*** 539,545 ****
-  =item *
-  
-  If C<$potential_libs> is empty, the return value will be empty.
-! Otherwise, the libraries specified by C<$Config{libs}> (see Config.pm)
-  will be appended to the list of C<$potential_libs>.  The libraries
-  will be searched for in the directories specified in C<$potential_libs>
-  as well as in C<$Config{libpth}>. For each library that is found,  a
---- 539,545 ----
-  =item *
-  
-  If C<$potential_libs> is empty, the return value will be empty.
-! Otherwise, the libraries specified by C<$Config{perllibs}> (see Config.pm)
-  will be appended to the list of C<$potential_libs>.  The libraries
-  will be searched for in the directories specified in C<$potential_libs>
-  as well as in C<$Config{libpth}>. For each library that is found,  a
-diff -rc perl5.004_04.orig/lib/ExtUtils/MM_Unix.pm perl5.004_04/lib/ExtUtils/MM_Unix.pm
-*** perl5.004_04.orig/lib/ExtUtils/MM_Unix.pm	Wed Oct  8 14:13:51 1997
---- perl5.004_04/lib/ExtUtils/MM_Unix.pm	Sun Nov 12 21:50:51 2000
-***************
-*** 2229,2235 ****
-  MAP_STATIC    = ",
-  join(" \\\n\t", reverse sort keys %static), "
-  
-! MAP_PRELIBS   = $Config::Config{libs} $Config::Config{cryptlib}
-  ";
-  
-      if (defined $libperl) {
---- 2229,2235 ----
-  MAP_STATIC    = ",
-  join(" \\\n\t", reverse sort keys %static), "
-  
-! MAP_PRELIBS   = $Config::Config{perllibs} $Config::Config{cryptlib}
-  ";
-  
-      if (defined $libperl) {
-diff -rc perl5.004_04.orig/myconfig perl5.004_04/myconfig
-*** perl5.004_04.orig/myconfig	Mon Oct  6 18:26:49 1997
---- perl5.004_04/myconfig	Sun Nov 12 21:50:51 2000
-***************
-*** 35,41 ****
-    Linker and Libraries:
-      ld='$ld', ldflags ='$ldflags'
-      libpth=$libpth
-!     libs=$libs
-      libc=$libc, so=$so
-      useshrplib=$useshrplib, libperl=$libperl
-    Dynamic Linking:
---- 35,41 ----
-    Linker and Libraries:
-      ld='$ld', ldflags ='$ldflags'
-      libpth=$libpth
-!     libs=$perllibs
-      libc=$libc, so=$so
-      useshrplib=$useshrplib, libperl=$libperl
-    Dynamic Linking:
-diff -rc perl5.004_04.orig/patchlevel.h perl5.004_04/patchlevel.h
-*** perl5.004_04.orig/patchlevel.h	Wed Oct 15 10:55:19 1997
---- perl5.004_04/patchlevel.h	Sun Nov 12 21:50:51 2000
-***************
-*** 39,44 ****
---- 39,45 ----
-  /* The following line and terminating '};' are read by perlbug.PL. Don't alter. */ 
-  static	char	*local_patches[] = {
-  	NULL
-+ 	,"NODB-1.0 - remove -ldb from core perl binary."
-  	,NULL
-  };
-  
diff --git a/storage/bdb/perl/DB_File/patches/5.004_05 b/storage/bdb/perl/DB_File/patches/5.004_05
deleted file mode 100644
index 51c8bf35009..00000000000
--- a/storage/bdb/perl/DB_File/patches/5.004_05
+++ /dev/null
@@ -1,209 +0,0 @@
-diff -rc perl5.004_05.orig/Configure perl5.004_05/Configure
-*** perl5.004_05.orig/Configure	Thu Jan  6 22:05:49 2000
---- perl5.004_05/Configure	Sun Nov 12 21:36:25 2000
-***************
-*** 188,193 ****
---- 188,194 ----
-  mv=''
-  nroff=''
-  perl=''
-+ perllibs=''
-  pg=''
-  pmake=''
-  pr=''
-***************
-*** 10164,10169 ****
---- 10165,10178 ----
-  shift
-  extensions="$*"
-  
-+ : Remove libraries needed only for extensions
-+ : The appropriate ext/Foo/Makefile.PL will add them back in, if
-+ : necessary.
-+ set X `echo " $libs " | 
-+   sed -e 's@ -lndbm @ @' -e 's@ -lgdbm @ @' -e 's@ -ldbm @ @' -e 's@ -ldb @ @'` 
-+ shift
-+ perllibs="$*"
-+ 
-  : Remove build directory name from cppstdin so it can be used from
-  : either the present location or the final installed location.
-  echo " "
-***************
-*** 10648,10653 ****
---- 10657,10663 ----
-  patchlevel='$patchlevel'
-  path_sep='$path_sep'
-  perl='$perl'
-+ perllibs='$perllibs'
-  perladmin='$perladmin'
-  perlpath='$perlpath'
-  pg='$pg'
-diff -rc perl5.004_05.orig/Makefile.SH perl5.004_05/Makefile.SH
-*** perl5.004_05.orig/Makefile.SH	Thu Jan  6 22:05:49 2000
---- perl5.004_05/Makefile.SH	Sun Nov 12 21:36:25 2000
-***************
-*** 151,157 ****
-  ext = \$(dynamic_ext) \$(static_ext)
-  DYNALOADER = lib/auto/DynaLoader/DynaLoader\$(LIB_EXT)
-  
-! libs = $libs $cryptlib
-  
-  public = perl $suidperl utilities translators
-  
---- 151,157 ----
-  ext = \$(dynamic_ext) \$(static_ext)
-  DYNALOADER = lib/auto/DynaLoader/DynaLoader\$(LIB_EXT)
-  
-! libs = $perllibs $cryptlib
-  
-  public = perl $suidperl utilities translators
-  
-diff -rc perl5.004_05.orig/lib/ExtUtils/Embed.pm perl5.004_05/lib/ExtUtils/Embed.pm
-*** perl5.004_05.orig/lib/ExtUtils/Embed.pm	Fri Aug  1 15:08:44 1997
---- perl5.004_05/lib/ExtUtils/Embed.pm	Sun Nov 12 21:36:25 2000
-***************
-*** 178,184 ****
-      @path = $path ? split(/:/, $path) : @INC;
-  
-      push(@potential_libs, @link_args)    if scalar @link_args;
-!     push(@potential_libs, $Config{libs}) if defined $std;
-  
-      push(@mods, static_ext()) if $std;
-  
---- 178,184 ----
-      @path = $path ? split(/:/, $path) : @INC;
-  
-      push(@potential_libs, @link_args)    if scalar @link_args;
-!     push(@potential_libs, $Config{perllibs}) if defined $std;
-  
-      push(@mods, static_ext()) if $std;
-  
-diff -rc perl5.004_05.orig/lib/ExtUtils/Liblist.pm perl5.004_05/lib/ExtUtils/Liblist.pm
-*** perl5.004_05.orig/lib/ExtUtils/Liblist.pm	Thu Jan  6 22:05:54 2000
---- perl5.004_05/lib/ExtUtils/Liblist.pm	Sun Nov 12 21:45:31 2000
-***************
-*** 16,33 ****
-  
-  sub _unix_os2_ext {
-      my($self,$potential_libs, $verbose) = @_;
-!     if ($^O =~ 'os2' and $Config{libs}) { 
-  	# Dynamic libraries are not transitive, so we may need including
-  	# the libraries linked against perl.dll again.
-  
-  	$potential_libs .= " " if $potential_libs;
-! 	$potential_libs .= $Config{libs};
-      }
-      return ("", "", "", "") unless $potential_libs;
-      warn "Potential libraries are '$potential_libs':\n" if $verbose;
-  
-      my($so)   = $Config{'so'};
-!     my($libs) = $Config{'libs'};
-      my $Config_libext = $Config{lib_ext} || ".a";
-  
-  
---- 16,33 ----
-  
-  sub _unix_os2_ext {
-      my($self,$potential_libs, $verbose) = @_;
-!     if ($^O =~ 'os2' and $Config{perllibs}) { 
-  	# Dynamic libraries are not transitive, so we may need including
-  	# the libraries linked against perl.dll again.
-  
-  	$potential_libs .= " " if $potential_libs;
-! 	$potential_libs .= $Config{perllibs};
-      }
-      return ("", "", "", "") unless $potential_libs;
-      warn "Potential libraries are '$potential_libs':\n" if $verbose;
-  
-      my($so)   = $Config{'so'};
-!     my($libs) = $Config{'perllibs'};
-      my $Config_libext = $Config{lib_ext} || ".a";
-  
-  
-***************
-*** 196,202 ****
-      my $BC		= 1 if $cc =~ /^bcc/i;
-      my $GC		= 1 if $cc =~ /^gcc/i;
-      my $so		= $Config{'so'};
-!     my $libs		= $Config{'libs'};
-      my $libpth		= $Config{'libpth'};
-      my $libext		= $Config{'lib_ext'} || ".lib";
-  
---- 196,202 ----
-      my $BC		= 1 if $cc =~ /^bcc/i;
-      my $GC		= 1 if $cc =~ /^gcc/i;
-      my $so		= $Config{'so'};
-!     my $libs		= $Config{'perllibs'};
-      my $libpth		= $Config{'libpth'};
-      my $libext		= $Config{'lib_ext'} || ".lib";
-  
-***************
-*** 590,596 ****
-  =item *
-  
-  If C<$potential_libs> is empty, the return value will be empty.
-! Otherwise, the libraries specified by C<$Config{libs}> (see Config.pm)
-  will be appended to the list of C<$potential_libs>.  The libraries
-  will be searched for in the directories specified in C<$potential_libs>
-  as well as in C<$Config{libpth}>. For each library that is found,  a
---- 590,596 ----
-  =item *
-  
-  If C<$potential_libs> is empty, the return value will be empty.
-! Otherwise, the libraries specified by C<$Config{perllibs}> (see Config.pm)
-  will be appended to the list of C<$potential_libs>.  The libraries
-  will be searched for in the directories specified in C<$potential_libs>
-  as well as in C<$Config{libpth}>. For each library that is found,  a
-diff -rc perl5.004_05.orig/lib/ExtUtils/MM_Unix.pm perl5.004_05/lib/ExtUtils/MM_Unix.pm
-*** perl5.004_05.orig/lib/ExtUtils/MM_Unix.pm	Thu Jan  6 22:05:54 2000
---- perl5.004_05/lib/ExtUtils/MM_Unix.pm	Sun Nov 12 21:36:25 2000
-***************
-*** 2246,2252 ****
-  MAP_STATIC    = ",
-  join(" \\\n\t", reverse sort keys %static), "
-  
-! MAP_PRELIBS   = $Config::Config{libs} $Config::Config{cryptlib}
-  ";
-  
-      if (defined $libperl) {
---- 2246,2252 ----
-  MAP_STATIC    = ",
-  join(" \\\n\t", reverse sort keys %static), "
-  
-! MAP_PRELIBS   = $Config::Config{perllibs} $Config::Config{cryptlib}
-  ";
-  
-      if (defined $libperl) {
-diff -rc perl5.004_05.orig/myconfig perl5.004_05/myconfig
-*** perl5.004_05.orig/myconfig	Thu Jan  6 22:05:55 2000
---- perl5.004_05/myconfig	Sun Nov 12 21:43:54 2000
-***************
-*** 34,40 ****
-    Linker and Libraries:
-      ld='$ld', ldflags ='$ldflags'
-      libpth=$libpth
-!     libs=$libs
-      libc=$libc, so=$so
-      useshrplib=$useshrplib, libperl=$libperl
-    Dynamic Linking:
---- 34,40 ----
-    Linker and Libraries:
-      ld='$ld', ldflags ='$ldflags'
-      libpth=$libpth
-!     libs=$perllibs
-      libc=$libc, so=$so
-      useshrplib=$useshrplib, libperl=$libperl
-    Dynamic Linking:
-diff -rc perl5.004_05.orig/patchlevel.h perl5.004_05/patchlevel.h
-*** perl5.004_05.orig/patchlevel.h	Thu Jan  6 22:05:48 2000
---- perl5.004_05/patchlevel.h	Sun Nov 12 21:36:25 2000
-***************
-*** 39,44 ****
---- 39,45 ----
-  /* The following line and terminating '};' are read by perlbug.PL. Don't alter. */ 
-  static	char	*local_patches[] = {
-  	NULL
-+ 	,"NODB-1.0 - remove -ldb from core perl binary."
-  	,NULL
-  };
-  
diff --git a/storage/bdb/perl/DB_File/patches/5.005 b/storage/bdb/perl/DB_File/patches/5.005
deleted file mode 100644
index effee3e8275..00000000000
--- a/storage/bdb/perl/DB_File/patches/5.005
+++ /dev/null
@@ -1,209 +0,0 @@
-diff -rc perl5.005.orig/Configure perl5.005/Configure
-*** perl5.005.orig/Configure	Wed Jul 15 08:05:44 1998
---- perl5.005/Configure	Sun Nov 12 21:30:40 2000
-***************
-*** 234,239 ****
---- 234,240 ----
-  nm=''
-  nroff=''
-  perl=''
-+ perllibs=''
-  pg=''
-  pmake=''
-  pr=''
-***************
-*** 11279,11284 ****
---- 11280,11293 ----
-  shift
-  extensions="$*"
-  
-+ : Remove libraries needed only for extensions
-+ : The appropriate ext/Foo/Makefile.PL will add them back in, if
-+ : necessary.
-+ set X `echo " $libs " | 
-+   sed -e 's@ -lndbm @ @' -e 's@ -lgdbm @ @' -e 's@ -ldbm @ @' -e 's@ -ldb @ @'` 
-+ shift
-+ perllibs="$*"
-+ 
-  : Remove build directory name from cppstdin so it can be used from
-  : either the present location or the final installed location.
-  echo " "
-***************
-*** 11804,11809 ****
---- 11813,11819 ----
-  patchlevel='$patchlevel'
-  path_sep='$path_sep'
-  perl='$perl'
-+ perllibs='$perllibs'
-  perladmin='$perladmin'
-  perlpath='$perlpath'
-  pg='$pg'
-diff -rc perl5.005.orig/Makefile.SH perl5.005/Makefile.SH
-*** perl5.005.orig/Makefile.SH	Sun Jul 19 08:06:35 1998
---- perl5.005/Makefile.SH	Sun Nov 12 21:30:40 2000
-***************
-*** 150,156 ****
-  ext = \$(dynamic_ext) \$(static_ext) \$(nonxs_ext)
-  DYNALOADER = lib/auto/DynaLoader/DynaLoader\$(LIB_EXT)
-  
-! libs = $libs $cryptlib
-  
-  public = perl $suidperl utilities translators
-  
---- 150,156 ----
-  ext = \$(dynamic_ext) \$(static_ext) \$(nonxs_ext)
-  DYNALOADER = lib/auto/DynaLoader/DynaLoader\$(LIB_EXT)
-  
-! libs = $perllibs $cryptlib
-  
-  public = perl $suidperl utilities translators
-  
-diff -rc perl5.005.orig/lib/ExtUtils/Embed.pm perl5.005/lib/ExtUtils/Embed.pm
-*** perl5.005.orig/lib/ExtUtils/Embed.pm	Wed Jul 22 07:45:02 1998
---- perl5.005/lib/ExtUtils/Embed.pm	Sun Nov 12 21:30:40 2000
-***************
-*** 194,200 ****
-      @path = $path ? split(/:/, $path) : @INC;
-  
-      push(@potential_libs, @link_args)    if scalar @link_args;
-!     push(@potential_libs, $Config{libs}) if defined $std;
-  
-      push(@mods, static_ext()) if $std;
-  
---- 194,200 ----
-      @path = $path ? split(/:/, $path) : @INC;
-  
-      push(@potential_libs, @link_args)    if scalar @link_args;
-!     push(@potential_libs, $Config{perllibs}) if defined $std;
-  
-      push(@mods, static_ext()) if $std;
-  
-diff -rc perl5.005.orig/lib/ExtUtils/Liblist.pm perl5.005/lib/ExtUtils/Liblist.pm
-*** perl5.005.orig/lib/ExtUtils/Liblist.pm	Wed Jul 22 07:09:42 1998
---- perl5.005/lib/ExtUtils/Liblist.pm	Sun Nov 12 21:30:40 2000
-***************
-*** 16,33 ****
-  
-  sub _unix_os2_ext {
-      my($self,$potential_libs, $verbose) = @_;
-!     if ($^O =~ 'os2' and $Config{libs}) { 
-  	# Dynamic libraries are not transitive, so we may need including
-  	# the libraries linked against perl.dll again.
-  
-  	$potential_libs .= " " if $potential_libs;
-! 	$potential_libs .= $Config{libs};
-      }
-      return ("", "", "", "") unless $potential_libs;
-      warn "Potential libraries are '$potential_libs':\n" if $verbose;
-  
-      my($so)   = $Config{'so'};
-!     my($libs) = $Config{'libs'};
-      my $Config_libext = $Config{lib_ext} || ".a";
-  
-  
---- 16,33 ----
-  
-  sub _unix_os2_ext {
-      my($self,$potential_libs, $verbose) = @_;
-!     if ($^O =~ 'os2' and $Config{perllibs}) { 
-  	# Dynamic libraries are not transitive, so we may need including
-  	# the libraries linked against perl.dll again.
-  
-  	$potential_libs .= " " if $potential_libs;
-! 	$potential_libs .= $Config{perllibs};
-      }
-      return ("", "", "", "") unless $potential_libs;
-      warn "Potential libraries are '$potential_libs':\n" if $verbose;
-  
-      my($so)   = $Config{'so'};
-!     my($libs) = $Config{'perllibs'};
-      my $Config_libext = $Config{lib_ext} || ".a";
-  
-  
-***************
-*** 290,296 ****
-                   $self->{CCFLAS}   || $Config{'ccflags'};
-    @crtls = ( ($dbgqual =~ m-/Debug-i ? $Config{'dbgprefix'} : '')
-                . 'PerlShr/Share' );
-!   push(@crtls, grep { not /\(/ } split /\s+/, $Config{'libs'});
-    push(@crtls, grep { not /\(/ } split /\s+/, $Config{'libc'});
-    # In general, we pass through the basic libraries from %Config unchanged.
-    # The one exception is that if we're building in the Perl source tree, and
---- 290,296 ----
-                   $self->{CCFLAS}   || $Config{'ccflags'};
-    @crtls = ( ($dbgqual =~ m-/Debug-i ? $Config{'dbgprefix'} : '')
-                . 'PerlShr/Share' );
-!   push(@crtls, grep { not /\(/ } split /\s+/, $Config{'perllibs'});
-    push(@crtls, grep { not /\(/ } split /\s+/, $Config{'libc'});
-    # In general, we pass through the basic libraries from %Config unchanged.
-    # The one exception is that if we're building in the Perl source tree, and
-***************
-*** 598,604 ****
-  =item *
-  
-  If C<$potential_libs> is empty, the return value will be empty.
-! Otherwise, the libraries specified by C<$Config{libs}> (see Config.pm)
-  will be appended to the list of C<$potential_libs>.  The libraries
-  will be searched for in the directories specified in C<$potential_libs>
-  as well as in C<$Config{libpth}>. For each library that is found,  a
---- 598,604 ----
-  =item *
-  
-  If C<$potential_libs> is empty, the return value will be empty.
-! Otherwise, the libraries specified by C<$Config{perllibs}> (see Config.pm)
-  will be appended to the list of C<$potential_libs>.  The libraries
-  will be searched for in the directories specified in C<$potential_libs>
-  as well as in C<$Config{libpth}>. For each library that is found,  a
-diff -rc perl5.005.orig/lib/ExtUtils/MM_Unix.pm perl5.005/lib/ExtUtils/MM_Unix.pm
-*** perl5.005.orig/lib/ExtUtils/MM_Unix.pm	Tue Jul 14 04:39:12 1998
---- perl5.005/lib/ExtUtils/MM_Unix.pm	Sun Nov 12 21:30:41 2000
-***************
-*** 2281,2287 ****
-  MAP_STATIC    = ",
-  join(" \\\n\t", reverse sort keys %static), "
-  
-! MAP_PRELIBS   = $Config::Config{libs} $Config::Config{cryptlib}
-  ";
-  
-      if (defined $libperl) {
---- 2281,2287 ----
-  MAP_STATIC    = ",
-  join(" \\\n\t", reverse sort keys %static), "
-  
-! MAP_PRELIBS   = $Config::Config{perllibs} $Config::Config{cryptlib}
-  ";
-  
-      if (defined $libperl) {
-diff -rc perl5.005.orig/myconfig perl5.005/myconfig
-*** perl5.005.orig/myconfig	Fri Apr  3 01:20:35 1998
---- perl5.005/myconfig	Sun Nov 12 21:30:41 2000
-***************
-*** 34,40 ****
-    Linker and Libraries:
-      ld='$ld', ldflags ='$ldflags'
-      libpth=$libpth
-!     libs=$libs
-      libc=$libc, so=$so, useshrplib=$useshrplib, libperl=$libperl
-    Dynamic Linking:
-      dlsrc=$dlsrc, dlext=$dlext, d_dlsymun=$d_dlsymun, ccdlflags='$ccdlflags'
---- 34,40 ----
-    Linker and Libraries:
-      ld='$ld', ldflags ='$ldflags'
-      libpth=$libpth
-!     libs=$perllibs
-      libc=$libc, so=$so, useshrplib=$useshrplib, libperl=$libperl
-    Dynamic Linking:
-      dlsrc=$dlsrc, dlext=$dlext, d_dlsymun=$d_dlsymun, ccdlflags='$ccdlflags'
-diff -rc perl5.005.orig/patchlevel.h perl5.005/patchlevel.h
-*** perl5.005.orig/patchlevel.h	Wed Jul 22 19:22:01 1998
---- perl5.005/patchlevel.h	Sun Nov 12 21:30:41 2000
-***************
-*** 39,44 ****
---- 39,45 ----
-   */
-  static	char	*local_patches[] = {
-  	NULL
-+ 	,"NODB-1.0 - remove -ldb from core perl binary."
-  	,NULL
-  };
-  
diff --git a/storage/bdb/perl/DB_File/patches/5.005_01 b/storage/bdb/perl/DB_File/patches/5.005_01
deleted file mode 100644
index 2a05dd545f6..00000000000
--- a/storage/bdb/perl/DB_File/patches/5.005_01
+++ /dev/null
@@ -1,209 +0,0 @@
-diff -rc perl5.005_01.orig/Configure perl5.005_01/Configure
-*** perl5.005_01.orig/Configure	Wed Jul 15 08:05:44 1998
---- perl5.005_01/Configure	Sun Nov 12 20:55:58 2000
-***************
-*** 234,239 ****
---- 234,240 ----
-  nm=''
-  nroff=''
-  perl=''
-+ perllibs=''
-  pg=''
-  pmake=''
-  pr=''
-***************
-*** 11279,11284 ****
---- 11280,11293 ----
-  shift
-  extensions="$*"
-  
-+ : Remove libraries needed only for extensions
-+ : The appropriate ext/Foo/Makefile.PL will add them back in, if
-+ : necessary.
-+ set X `echo " $libs " | 
-+   sed -e 's@ -lndbm @ @' -e 's@ -lgdbm @ @' -e 's@ -ldbm @ @' -e 's@ -ldb @ @'` 
-+ shift
-+ perllibs="$*"
-+ 
-  : Remove build directory name from cppstdin so it can be used from
-  : either the present location or the final installed location.
-  echo " "
-***************
-*** 11804,11809 ****
---- 11813,11819 ----
-  patchlevel='$patchlevel'
-  path_sep='$path_sep'
-  perl='$perl'
-+ perllibs='$perllibs'
-  perladmin='$perladmin'
-  perlpath='$perlpath'
-  pg='$pg'
-diff -rc perl5.005_01.orig/Makefile.SH perl5.005_01/Makefile.SH
-*** perl5.005_01.orig/Makefile.SH	Sun Jul 19 08:06:35 1998
---- perl5.005_01/Makefile.SH	Sun Nov 12 20:55:58 2000
-***************
-*** 150,156 ****
-  ext = \$(dynamic_ext) \$(static_ext) \$(nonxs_ext)
-  DYNALOADER = lib/auto/DynaLoader/DynaLoader\$(LIB_EXT)
-  
-! libs = $libs $cryptlib
-  
-  public = perl $suidperl utilities translators
-  
---- 150,156 ----
-  ext = \$(dynamic_ext) \$(static_ext) \$(nonxs_ext)
-  DYNALOADER = lib/auto/DynaLoader/DynaLoader\$(LIB_EXT)
-  
-! libs = $perllibs $cryptlib
-  
-  public = perl $suidperl utilities translators
-  
-diff -rc perl5.005_01.orig/lib/ExtUtils/Embed.pm perl5.005_01/lib/ExtUtils/Embed.pm
-*** perl5.005_01.orig/lib/ExtUtils/Embed.pm	Wed Jul 22 07:45:02 1998
---- perl5.005_01/lib/ExtUtils/Embed.pm	Sun Nov 12 20:55:58 2000
-***************
-*** 194,200 ****
-      @path = $path ? split(/:/, $path) : @INC;
-  
-      push(@potential_libs, @link_args)    if scalar @link_args;
-!     push(@potential_libs, $Config{libs}) if defined $std;
-  
-      push(@mods, static_ext()) if $std;
-  
---- 194,200 ----
-      @path = $path ? split(/:/, $path) : @INC;
-  
-      push(@potential_libs, @link_args)    if scalar @link_args;
-!     push(@potential_libs, $Config{perllibs}) if defined $std;
-  
-      push(@mods, static_ext()) if $std;
-  
-diff -rc perl5.005_01.orig/lib/ExtUtils/Liblist.pm perl5.005_01/lib/ExtUtils/Liblist.pm
-*** perl5.005_01.orig/lib/ExtUtils/Liblist.pm	Wed Jul 22 07:09:42 1998
---- perl5.005_01/lib/ExtUtils/Liblist.pm	Sun Nov 12 20:55:58 2000
-***************
-*** 16,33 ****
-  
-  sub _unix_os2_ext {
-      my($self,$potential_libs, $verbose) = @_;
-!     if ($^O =~ 'os2' and $Config{libs}) { 
-  	# Dynamic libraries are not transitive, so we may need including
-  	# the libraries linked against perl.dll again.
-  
-  	$potential_libs .= " " if $potential_libs;
-! 	$potential_libs .= $Config{libs};
-      }
-      return ("", "", "", "") unless $potential_libs;
-      warn "Potential libraries are '$potential_libs':\n" if $verbose;
-  
-      my($so)   = $Config{'so'};
-!     my($libs) = $Config{'libs'};
-      my $Config_libext = $Config{lib_ext} || ".a";
-  
-  
---- 16,33 ----
-  
-  sub _unix_os2_ext {
-      my($self,$potential_libs, $verbose) = @_;
-!     if ($^O =~ 'os2' and $Config{perllibs}) { 
-  	# Dynamic libraries are not transitive, so we may need including
-  	# the libraries linked against perl.dll again.
-  
-  	$potential_libs .= " " if $potential_libs;
-! 	$potential_libs .= $Config{perllibs};
-      }
-      return ("", "", "", "") unless $potential_libs;
-      warn "Potential libraries are '$potential_libs':\n" if $verbose;
-  
-      my($so)   = $Config{'so'};
-!     my($libs) = $Config{'perllibs'};
-      my $Config_libext = $Config{lib_ext} || ".a";
-  
-  
-***************
-*** 290,296 ****
-                   $self->{CCFLAS}   || $Config{'ccflags'};
-    @crtls = ( ($dbgqual =~ m-/Debug-i ? $Config{'dbgprefix'} : '')
-                . 'PerlShr/Share' );
-!   push(@crtls, grep { not /\(/ } split /\s+/, $Config{'libs'});
-    push(@crtls, grep { not /\(/ } split /\s+/, $Config{'libc'});
-    # In general, we pass through the basic libraries from %Config unchanged.
-    # The one exception is that if we're building in the Perl source tree, and
---- 290,296 ----
-                   $self->{CCFLAS}   || $Config{'ccflags'};
-    @crtls = ( ($dbgqual =~ m-/Debug-i ? $Config{'dbgprefix'} : '')
-                . 'PerlShr/Share' );
-!   push(@crtls, grep { not /\(/ } split /\s+/, $Config{'perllibs'});
-    push(@crtls, grep { not /\(/ } split /\s+/, $Config{'libc'});
-    # In general, we pass through the basic libraries from %Config unchanged.
-    # The one exception is that if we're building in the Perl source tree, and
-***************
-*** 598,604 ****
-  =item *
-  
-  If C<$potential_libs> is empty, the return value will be empty.
-! Otherwise, the libraries specified by C<$Config{libs}> (see Config.pm)
-  will be appended to the list of C<$potential_libs>.  The libraries
-  will be searched for in the directories specified in C<$potential_libs>
-  as well as in C<$Config{libpth}>. For each library that is found,  a
---- 598,604 ----
-  =item *
-  
-  If C<$potential_libs> is empty, the return value will be empty.
-! Otherwise, the libraries specified by C<$Config{perllibs}> (see Config.pm)
-  will be appended to the list of C<$potential_libs>.  The libraries
-  will be searched for in the directories specified in C<$potential_libs>
-  as well as in C<$Config{libpth}>. For each library that is found,  a
-diff -rc perl5.005_01.orig/lib/ExtUtils/MM_Unix.pm perl5.005_01/lib/ExtUtils/MM_Unix.pm
-*** perl5.005_01.orig/lib/ExtUtils/MM_Unix.pm	Tue Jul 14 04:39:12 1998
---- perl5.005_01/lib/ExtUtils/MM_Unix.pm	Sun Nov 12 20:55:58 2000
-***************
-*** 2281,2287 ****
-  MAP_STATIC    = ",
-  join(" \\\n\t", reverse sort keys %static), "
-  
-! MAP_PRELIBS   = $Config::Config{libs} $Config::Config{cryptlib}
-  ";
-  
-      if (defined $libperl) {
---- 2281,2287 ----
-  MAP_STATIC    = ",
-  join(" \\\n\t", reverse sort keys %static), "
-  
-! MAP_PRELIBS   = $Config::Config{perllibs} $Config::Config{cryptlib}
-  ";
-  
-      if (defined $libperl) {
-diff -rc perl5.005_01.orig/myconfig perl5.005_01/myconfig
-*** perl5.005_01.orig/myconfig	Fri Apr  3 01:20:35 1998
---- perl5.005_01/myconfig	Sun Nov 12 20:55:58 2000
-***************
-*** 34,40 ****
-    Linker and Libraries:
-      ld='$ld', ldflags ='$ldflags'
-      libpth=$libpth
-!     libs=$libs
-      libc=$libc, so=$so, useshrplib=$useshrplib, libperl=$libperl
-    Dynamic Linking:
-      dlsrc=$dlsrc, dlext=$dlext, d_dlsymun=$d_dlsymun, ccdlflags='$ccdlflags'
---- 34,40 ----
-    Linker and Libraries:
-      ld='$ld', ldflags ='$ldflags'
-      libpth=$libpth
-!     libs=$perllibs
-      libc=$libc, so=$so, useshrplib=$useshrplib, libperl=$libperl
-    Dynamic Linking:
-      dlsrc=$dlsrc, dlext=$dlext, d_dlsymun=$d_dlsymun, ccdlflags='$ccdlflags'
-diff -rc perl5.005_01.orig/patchlevel.h perl5.005_01/patchlevel.h
-*** perl5.005_01.orig/patchlevel.h	Mon Jan  3 11:07:45 2000
---- perl5.005_01/patchlevel.h	Sun Nov 12 20:55:58 2000
-***************
-*** 39,44 ****
---- 39,45 ----
-   */
-  static	char	*local_patches[] = {
-  	NULL
-+ 	,"NODB-1.0 - remove -ldb from core perl binary."
-  	,NULL
-  };
-  
diff --git a/storage/bdb/perl/DB_File/patches/5.005_02 b/storage/bdb/perl/DB_File/patches/5.005_02
deleted file mode 100644
index 5dd57ddc03f..00000000000
--- a/storage/bdb/perl/DB_File/patches/5.005_02
+++ /dev/null
@@ -1,264 +0,0 @@
-diff -rc perl5.005_02.orig/Configure perl5.005_02/Configure
-*** perl5.005_02.orig/Configure	Mon Jan  3 11:12:20 2000
---- perl5.005_02/Configure	Sun Nov 12 20:50:51 2000
-***************
-*** 234,239 ****
---- 234,240 ----
-  nm=''
-  nroff=''
-  perl=''
-+ perllibs=''
-  pg=''
-  pmake=''
-  pr=''
-***************
-*** 11334,11339 ****
---- 11335,11348 ----
-  shift
-  extensions="$*"
-  
-+ : Remove libraries needed only for extensions
-+ : The appropriate ext/Foo/Makefile.PL will add them back in, if
-+ : necessary.
-+ set X `echo " $libs " | 
-+   sed -e 's@ -lndbm @ @' -e 's@ -lgdbm @ @' -e 's@ -ldbm @ @' -e 's@ -ldb @ @'` 
-+ shift
-+ perllibs="$*"
-+ 
-  : Remove build directory name from cppstdin so it can be used from
-  : either the present location or the final installed location.
-  echo " "
-***************
-*** 11859,11864 ****
---- 11868,11874 ----
-  patchlevel='$patchlevel'
-  path_sep='$path_sep'
-  perl='$perl'
-+ perllibs='$perllibs'
-  perladmin='$perladmin'
-  perlpath='$perlpath'
-  pg='$pg'
-Only in perl5.005_02: Configure.orig
-diff -rc perl5.005_02.orig/Makefile.SH perl5.005_02/Makefile.SH
-*** perl5.005_02.orig/Makefile.SH	Sun Jul 19 08:06:35 1998
---- perl5.005_02/Makefile.SH	Sun Nov 12 20:50:51 2000
-***************
-*** 150,156 ****
-  ext = \$(dynamic_ext) \$(static_ext) \$(nonxs_ext)
-  DYNALOADER = lib/auto/DynaLoader/DynaLoader\$(LIB_EXT)
-  
-! libs = $libs $cryptlib
-  
-  public = perl $suidperl utilities translators
-  
---- 150,156 ----
-  ext = \$(dynamic_ext) \$(static_ext) \$(nonxs_ext)
-  DYNALOADER = lib/auto/DynaLoader/DynaLoader\$(LIB_EXT)
-  
-! libs = $perllibs $cryptlib
-  
-  public = perl $suidperl utilities translators
-  
-Only in perl5.005_02: Makefile.SH.orig
-diff -rc perl5.005_02.orig/lib/ExtUtils/Embed.pm perl5.005_02/lib/ExtUtils/Embed.pm
-*** perl5.005_02.orig/lib/ExtUtils/Embed.pm	Wed Jul 22 07:45:02 1998
---- perl5.005_02/lib/ExtUtils/Embed.pm	Sun Nov 12 20:50:51 2000
-***************
-*** 194,200 ****
-      @path = $path ? split(/:/, $path) : @INC;
-  
-      push(@potential_libs, @link_args)    if scalar @link_args;
-!     push(@potential_libs, $Config{libs}) if defined $std;
-  
-      push(@mods, static_ext()) if $std;
-  
---- 194,200 ----
-      @path = $path ? split(/:/, $path) : @INC;
-  
-      push(@potential_libs, @link_args)    if scalar @link_args;
-!     push(@potential_libs, $Config{perllibs}) if defined $std;
-  
-      push(@mods, static_ext()) if $std;
-  
-diff -rc perl5.005_02.orig/lib/ExtUtils/Liblist.pm perl5.005_02/lib/ExtUtils/Liblist.pm
-*** perl5.005_02.orig/lib/ExtUtils/Liblist.pm	Mon Jan  3 11:12:21 2000
---- perl5.005_02/lib/ExtUtils/Liblist.pm	Sun Nov 12 20:50:51 2000
-***************
-*** 16,33 ****
-  
-  sub _unix_os2_ext {
-      my($self,$potential_libs, $verbose) = @_;
-!     if ($^O =~ 'os2' and $Config{libs}) { 
-  	# Dynamic libraries are not transitive, so we may need including
-  	# the libraries linked against perl.dll again.
-  
-  	$potential_libs .= " " if $potential_libs;
-! 	$potential_libs .= $Config{libs};
-      }
-      return ("", "", "", "") unless $potential_libs;
-      warn "Potential libraries are '$potential_libs':\n" if $verbose;
-  
-      my($so)   = $Config{'so'};
-!     my($libs) = $Config{'libs'};
-      my $Config_libext = $Config{lib_ext} || ".a";
-  
-  
---- 16,33 ----
-  
-  sub _unix_os2_ext {
-      my($self,$potential_libs, $verbose) = @_;
-!     if ($^O =~ 'os2' and $Config{perllibs}) { 
-  	# Dynamic libraries are not transitive, so we may need including
-  	# the libraries linked against perl.dll again.
-  
-  	$potential_libs .= " " if $potential_libs;
-! 	$potential_libs .= $Config{perllibs};
-      }
-      return ("", "", "", "") unless $potential_libs;
-      warn "Potential libraries are '$potential_libs':\n" if $verbose;
-  
-      my($so)   = $Config{'so'};
-!     my($libs) = $Config{'perllibs'};
-      my $Config_libext = $Config{lib_ext} || ".a";
-  
-  
-***************
-*** 196,202 ****
-      my $BC		= 1 if $cc =~ /^bcc/i;
-      my $GC		= 1 if $cc =~ /^gcc/i;
-      my $so		= $Config{'so'};
-!     my $libs		= $Config{'libs'};
-      my $libpth		= $Config{'libpth'};
-      my $libext		= $Config{'lib_ext'} || ".lib";
-  
---- 196,202 ----
-      my $BC		= 1 if $cc =~ /^bcc/i;
-      my $GC		= 1 if $cc =~ /^gcc/i;
-      my $so		= $Config{'so'};
-!     my $libs		= $Config{'perllibs'};
-      my $libpth		= $Config{'libpth'};
-      my $libext		= $Config{'lib_ext'} || ".lib";
-  
-***************
-*** 333,339 ****
-                   $self->{CCFLAS}   || $Config{'ccflags'};
-    @crtls = ( ($dbgqual =~ m-/Debug-i ? $Config{'dbgprefix'} : '')
-                . 'PerlShr/Share' );
-!   push(@crtls, grep { not /\(/ } split /\s+/, $Config{'libs'});
-    push(@crtls, grep { not /\(/ } split /\s+/, $Config{'libc'});
-    # In general, we pass through the basic libraries from %Config unchanged.
-    # The one exception is that if we're building in the Perl source tree, and
---- 333,339 ----
-                   $self->{CCFLAS}   || $Config{'ccflags'};
-    @crtls = ( ($dbgqual =~ m-/Debug-i ? $Config{'dbgprefix'} : '')
-                . 'PerlShr/Share' );
-!   push(@crtls, grep { not /\(/ } split /\s+/, $Config{'perllibs'});
-    push(@crtls, grep { not /\(/ } split /\s+/, $Config{'libc'});
-    # In general, we pass through the basic libraries from %Config unchanged.
-    # The one exception is that if we're building in the Perl source tree, and
-***************
-*** 623,629 ****
-  =item *
-  
-  If C<$potential_libs> is empty, the return value will be empty.
-! Otherwise, the libraries specified by C<$Config{libs}> (see Config.pm)
-  will be appended to the list of C<$potential_libs>.  The libraries
-  will be searched for in the directories specified in C<$potential_libs>
-  as well as in C<$Config{libpth}>. For each library that is found,  a
---- 623,629 ----
-  =item *
-  
-  If C<$potential_libs> is empty, the return value will be empty.
-! Otherwise, the libraries specified by C<$Config{perllibs}> (see Config.pm)
-  will be appended to the list of C<$potential_libs>.  The libraries
-  will be searched for in the directories specified in C<$potential_libs>
-  as well as in C<$Config{libpth}>. For each library that is found,  a
-***************
-*** 666,672 ****
-  alphanumeric characters are treated as flags.  Unknown flags will be ignored.
-  
-  An entry that matches C disables the appending of default
-! libraries found in C<$Config{libs}> (this should be only needed very rarely).
-  
-  An entry that matches C disables all searching for
-  the libraries specified after it.  Translation of C<-Lfoo> and
---- 666,672 ----
-  alphanumeric characters are treated as flags.  Unknown flags will be ignored.
-  
-  An entry that matches C disables the appending of default
-! libraries found in C<$Config{perllibs}> (this should be only needed very rarely).
-  
-  An entry that matches C disables all searching for
-  the libraries specified after it.  Translation of C<-Lfoo> and
-***************
-*** 676,682 ****
-  
-  An entry that matches C reenables searching for
-  the libraries specified after it.  You can put it at the end to
-! enable searching for default libraries specified by C<$Config{libs}>.
-  
-  =item *
-  
---- 676,682 ----
-  
-  An entry that matches C reenables searching for
-  the libraries specified after it.  You can put it at the end to
-! enable searching for default libraries specified by C<$Config{perllibs}>.
-  
-  =item *
-  
-Only in perl5.005_02/lib/ExtUtils: Liblist.pm.orig
-diff -rc perl5.005_02.orig/lib/ExtUtils/MM_Unix.pm perl5.005_02/lib/ExtUtils/MM_Unix.pm
-*** perl5.005_02.orig/lib/ExtUtils/MM_Unix.pm	Tue Jul 14 04:39:12 1998
---- perl5.005_02/lib/ExtUtils/MM_Unix.pm	Sun Nov 12 20:50:51 2000
-***************
-*** 2281,2287 ****
-  MAP_STATIC    = ",
-  join(" \\\n\t", reverse sort keys %static), "
-  
-! MAP_PRELIBS   = $Config::Config{libs} $Config::Config{cryptlib}
-  ";
-  
-      if (defined $libperl) {
---- 2281,2287 ----
-  MAP_STATIC    = ",
-  join(" \\\n\t", reverse sort keys %static), "
-  
-! MAP_PRELIBS   = $Config::Config{perllibs} $Config::Config{cryptlib}
-  ";
-  
-      if (defined $libperl) {
-Only in perl5.005_02/lib/ExtUtils: MM_Unix.pm.orig
-diff -rc perl5.005_02.orig/myconfig perl5.005_02/myconfig
-*** perl5.005_02.orig/myconfig	Fri Apr  3 01:20:35 1998
---- perl5.005_02/myconfig	Sun Nov 12 20:50:51 2000
-***************
-*** 34,40 ****
-    Linker and Libraries:
-      ld='$ld', ldflags ='$ldflags'
-      libpth=$libpth
-!     libs=$libs
-      libc=$libc, so=$so, useshrplib=$useshrplib, libperl=$libperl
-    Dynamic Linking:
-      dlsrc=$dlsrc, dlext=$dlext, d_dlsymun=$d_dlsymun, ccdlflags='$ccdlflags'
---- 34,40 ----
-    Linker and Libraries:
-      ld='$ld', ldflags ='$ldflags'
-      libpth=$libpth
-!     libs=$perllibs
-      libc=$libc, so=$so, useshrplib=$useshrplib, libperl=$libperl
-    Dynamic Linking:
-      dlsrc=$dlsrc, dlext=$dlext, d_dlsymun=$d_dlsymun, ccdlflags='$ccdlflags'
-diff -rc perl5.005_02.orig/patchlevel.h perl5.005_02/patchlevel.h
-*** perl5.005_02.orig/patchlevel.h	Mon Jan  3 11:12:19 2000
---- perl5.005_02/patchlevel.h	Sun Nov 12 20:50:51 2000
-***************
-*** 40,45 ****
---- 40,46 ----
-   */
-  static	char	*local_patches[] = {
-  	NULL
-+ 	,"NODB-1.0 - remove -ldb from core perl binary."
-  	,NULL
-  };
-  
diff --git a/storage/bdb/perl/DB_File/patches/5.005_03 b/storage/bdb/perl/DB_File/patches/5.005_03
deleted file mode 100644
index 115f9f5b909..00000000000
--- a/storage/bdb/perl/DB_File/patches/5.005_03
+++ /dev/null
@@ -1,250 +0,0 @@
-diff -rc perl5.005_03.orig/Configure perl5.005_03/Configure
-*** perl5.005_03.orig/Configure	Sun Mar 28 17:12:57 1999
---- perl5.005_03/Configure	Sun Sep 17 22:19:16 2000
-***************
-*** 208,213 ****
---- 208,214 ----
-  nm=''
-  nroff=''
-  perl=''
-+ perllibs=''
-  pg=''
-  pmake=''
-  pr=''
-***************
-*** 11642,11647 ****
---- 11643,11656 ----
-  shift
-  extensions="$*"
-  
-+ : Remove libraries needed only for extensions
-+ : The appropriate ext/Foo/Makefile.PL will add them back in, if
-+ : necessary.
-+ set X `echo " $libs " | 
-+   sed -e 's@ -lndbm @ @' -e 's@ -lgdbm @ @' -e 's@ -ldbm @ @' -e 's@ -ldb @ @'` 
-+ shift
-+ perllibs="$*"
-+ 
-  : Remove build directory name from cppstdin so it can be used from
-  : either the present location or the final installed location.
-  echo " "
-***************
-*** 12183,12188 ****
---- 12192,12198 ----
-  patchlevel='$patchlevel'
-  path_sep='$path_sep'
-  perl='$perl'
-+ perllibs='$perllibs'
-  perladmin='$perladmin'
-  perlpath='$perlpath'
-  pg='$pg'
-diff -rc perl5.005_03.orig/Makefile.SH perl5.005_03/Makefile.SH
-*** perl5.005_03.orig/Makefile.SH	Thu Mar  4 02:35:25 1999
---- perl5.005_03/Makefile.SH	Sun Sep 17 22:21:01 2000
-***************
-*** 58,67 ****
-  		shrpldflags="-H512 -T512 -bhalt:4 -bM:SRE -bE:perl.exp"
-  		case "$osvers" in
-  		3*)
-! 			shrpldflags="$shrpldflags -e _nostart $ldflags $libs $cryptlib"
-  			;;
-  		*)
-! 			shrpldflags="$shrpldflags -b noentry $ldflags $libs $cryptlib"
-  			;;
-  		esac
-  		aixinstdir=`pwd | sed 's/\/UU$//'`
---- 58,67 ----
-  		shrpldflags="-H512 -T512 -bhalt:4 -bM:SRE -bE:perl.exp"
-  		case "$osvers" in
-  		3*)
-! 			shrpldflags="$shrpldflags -e _nostart $ldflags $perllibs $cryptlib"
-  			;;
-  		*)
-! 			shrpldflags="$shrpldflags -b noentry $ldflags $perllibs $cryptlib"
-  			;;
-  		esac
-  		aixinstdir=`pwd | sed 's/\/UU$//'`
-***************
-*** 155,161 ****
-  ext = \$(dynamic_ext) \$(static_ext) \$(nonxs_ext)
-  DYNALOADER = lib/auto/DynaLoader/DynaLoader\$(LIB_EXT)
-  
-! libs = $libs $cryptlib
-  
-  public = perl $suidperl utilities translators
-  
---- 155,161 ----
-  ext = \$(dynamic_ext) \$(static_ext) \$(nonxs_ext)
-  DYNALOADER = lib/auto/DynaLoader/DynaLoader\$(LIB_EXT)
-  
-! libs = $perllibs $cryptlib
-  
-  public = perl $suidperl utilities translators
-  
-diff -rc perl5.005_03.orig/lib/ExtUtils/Embed.pm perl5.005_03/lib/ExtUtils/Embed.pm
-*** perl5.005_03.orig/lib/ExtUtils/Embed.pm	Wed Jan  6 02:17:50 1999
---- perl5.005_03/lib/ExtUtils/Embed.pm	Sun Sep 17 22:19:16 2000
-***************
-*** 194,200 ****
-      @path = $path ? split(/:/, $path) : @INC;
-  
-      push(@potential_libs, @link_args)    if scalar @link_args;
-!     push(@potential_libs, $Config{libs}) if defined $std;
-  
-      push(@mods, static_ext()) if $std;
-  
---- 194,200 ----
-      @path = $path ? split(/:/, $path) : @INC;
-  
-      push(@potential_libs, @link_args)    if scalar @link_args;
-!     push(@potential_libs, $Config{perllibs}) if defined $std;
-  
-      push(@mods, static_ext()) if $std;
-  
-diff -rc perl5.005_03.orig/lib/ExtUtils/Liblist.pm perl5.005_03/lib/ExtUtils/Liblist.pm
-*** perl5.005_03.orig/lib/ExtUtils/Liblist.pm	Wed Jan  6 02:17:47 1999
---- perl5.005_03/lib/ExtUtils/Liblist.pm	Sun Sep 17 22:19:16 2000
-***************
-*** 16,33 ****
-  
-  sub _unix_os2_ext {
-      my($self,$potential_libs, $verbose) = @_;
-!     if ($^O =~ 'os2' and $Config{libs}) { 
-  	# Dynamic libraries are not transitive, so we may need including
-  	# the libraries linked against perl.dll again.
-  
-  	$potential_libs .= " " if $potential_libs;
-! 	$potential_libs .= $Config{libs};
-      }
-      return ("", "", "", "") unless $potential_libs;
-      warn "Potential libraries are '$potential_libs':\n" if $verbose;
-  
-      my($so)   = $Config{'so'};
-!     my($libs) = $Config{'libs'};
-      my $Config_libext = $Config{lib_ext} || ".a";
-  
-  
---- 16,33 ----
-  
-  sub _unix_os2_ext {
-      my($self,$potential_libs, $verbose) = @_;
-!     if ($^O =~ 'os2' and $Config{perllibs}) { 
-  	# Dynamic libraries are not transitive, so we may need including
-  	# the libraries linked against perl.dll again.
-  
-  	$potential_libs .= " " if $potential_libs;
-! 	$potential_libs .= $Config{perllibs};
-      }
-      return ("", "", "", "") unless $potential_libs;
-      warn "Potential libraries are '$potential_libs':\n" if $verbose;
-  
-      my($so)   = $Config{'so'};
-!     my($libs) = $Config{'perllibs'};
-      my $Config_libext = $Config{lib_ext} || ".a";
-  
-  
-***************
-*** 196,202 ****
-      my $BC		= 1 if $cc =~ /^bcc/i;
-      my $GC		= 1 if $cc =~ /^gcc/i;
-      my $so		= $Config{'so'};
-!     my $libs		= $Config{'libs'};
-      my $libpth		= $Config{'libpth'};
-      my $libext		= $Config{'lib_ext'} || ".lib";
-  
---- 196,202 ----
-      my $BC		= 1 if $cc =~ /^bcc/i;
-      my $GC		= 1 if $cc =~ /^gcc/i;
-      my $so		= $Config{'so'};
-!     my $libs		= $Config{'perllibs'};
-      my $libpth		= $Config{'libpth'};
-      my $libext		= $Config{'lib_ext'} || ".lib";
-  
-***************
-*** 336,342 ****
-                   $self->{CCFLAS}   || $Config{'ccflags'};
-    @crtls = ( ($dbgqual =~ m-/Debug-i ? $Config{'dbgprefix'} : '')
-                . 'PerlShr/Share' );
-!   push(@crtls, grep { not /\(/ } split /\s+/, $Config{'libs'});
-    push(@crtls, grep { not /\(/ } split /\s+/, $Config{'libc'});
-    # In general, we pass through the basic libraries from %Config unchanged.
-    # The one exception is that if we're building in the Perl source tree, and
---- 336,342 ----
-                   $self->{CCFLAS}   || $Config{'ccflags'};
-    @crtls = ( ($dbgqual =~ m-/Debug-i ? $Config{'dbgprefix'} : '')
-                . 'PerlShr/Share' );
-!   push(@crtls, grep { not /\(/ } split /\s+/, $Config{'perllibs'});
-    push(@crtls, grep { not /\(/ } split /\s+/, $Config{'libc'});
-    # In general, we pass through the basic libraries from %Config unchanged.
-    # The one exception is that if we're building in the Perl source tree, and
-***************
-*** 626,632 ****
-  =item *
-  
-  If C<$potential_libs> is empty, the return value will be empty.
-! Otherwise, the libraries specified by C<$Config{libs}> (see Config.pm)
-  will be appended to the list of C<$potential_libs>.  The libraries
-  will be searched for in the directories specified in C<$potential_libs>,
-  C<$Config{libpth}>, and in C<$Config{installarchlib}/CORE>.
---- 626,632 ----
-  =item *
-  
-  If C<$potential_libs> is empty, the return value will be empty.
-! Otherwise, the libraries specified by C<$Config{perllibs}> (see Config.pm)
-  will be appended to the list of C<$potential_libs>.  The libraries
-  will be searched for in the directories specified in C<$potential_libs>,
-  C<$Config{libpth}>, and in C<$Config{installarchlib}/CORE>.
-***************
-*** 670,676 ****
-  alphanumeric characters are treated as flags.  Unknown flags will be ignored.
-  
-  An entry that matches C disables the appending of default
-! libraries found in C<$Config{libs}> (this should be only needed very rarely).
-  
-  An entry that matches C disables all searching for
-  the libraries specified after it.  Translation of C<-Lfoo> and
---- 670,676 ----
-  alphanumeric characters are treated as flags.  Unknown flags will be ignored.
-  
-  An entry that matches C disables the appending of default
-! libraries found in C<$Config{perllibs}> (this should be only needed very rarely).
-  
-  An entry that matches C disables all searching for
-  the libraries specified after it.  Translation of C<-Lfoo> and
-***************
-*** 680,686 ****
-  
-  An entry that matches C reenables searching for
-  the libraries specified after it.  You can put it at the end to
-! enable searching for default libraries specified by C<$Config{libs}>.
-  
-  =item *
-  
---- 680,686 ----
-  
-  An entry that matches C reenables searching for
-  the libraries specified after it.  You can put it at the end to
-! enable searching for default libraries specified by C<$Config{perllibs}>.
-  
-  =item *
-  
-diff -rc perl5.005_03.orig/lib/ExtUtils/MM_Unix.pm perl5.005_03/lib/ExtUtils/MM_Unix.pm
-*** perl5.005_03.orig/lib/ExtUtils/MM_Unix.pm	Fri Mar  5 00:34:20 1999
---- perl5.005_03/lib/ExtUtils/MM_Unix.pm	Sun Sep 17 22:19:16 2000
-***************
-*** 2284,2290 ****
-  MAP_STATIC    = ",
-  join(" \\\n\t", reverse sort keys %static), "
-  
-! MAP_PRELIBS   = $Config::Config{libs} $Config::Config{cryptlib}
-  ";
-  
-      if (defined $libperl) {
---- 2284,2290 ----
-  MAP_STATIC    = ",
-  join(" \\\n\t", reverse sort keys %static), "
-  
-! MAP_PRELIBS   = $Config::Config{perllibs} $Config::Config{cryptlib}
-  ";
-  
-      if (defined $libperl) {
diff --git a/storage/bdb/perl/DB_File/patches/5.6.0 b/storage/bdb/perl/DB_File/patches/5.6.0
deleted file mode 100644
index 1f9b3b620de..00000000000
--- a/storage/bdb/perl/DB_File/patches/5.6.0
+++ /dev/null
@@ -1,294 +0,0 @@
-diff -cr perl-5.6.0.orig/Configure perl-5.6.0/Configure
-*** perl-5.6.0.orig/Configure	Wed Mar 22 20:36:37 2000
---- perl-5.6.0/Configure	Sun Sep 17 23:40:15 2000
-***************
-*** 217,222 ****
---- 217,223 ----
-  nm=''
-  nroff=''
-  perl=''
-+ perllibs=''
-  pg=''
-  pmake=''
-  pr=''
-***************
-*** 14971,14976 ****
---- 14972,14985 ----
-  shift
-  extensions="$*"
-  
-+ : Remove libraries needed only for extensions
-+ : The appropriate ext/Foo/Makefile.PL will add them back in, if
-+ : necessary.
-+ set X `echo " $libs " | 
-+   sed -e 's@ -lndbm @ @' -e 's@ -lgdbm @ @' -e 's@ -ldbm @ @' -e 's@ -ldb @ @'` 
-+ shift
-+ perllibs="$*"
-+ 
-  : Remove build directory name from cppstdin so it can be used from
-  : either the present location or the final installed location.
-  echo " "
-***************
-*** 15640,15645 ****
---- 15649,15655 ----
-  path_sep='$path_sep'
-  perl5='$perl5'
-  perl='$perl'
-+ perllibs='$perllibs'
-  perladmin='$perladmin'
-  perlpath='$perlpath'
-  pg='$pg'
-diff -cr perl-5.6.0.orig/Makefile.SH perl-5.6.0/Makefile.SH
-*** perl-5.6.0.orig/Makefile.SH	Sat Mar 11 16:05:24 2000
---- perl-5.6.0/Makefile.SH	Sun Sep 17 23:40:15 2000
-***************
-*** 70,76 ****
-  		*)	shrpldflags="$shrpldflags -b noentry"
-  			;;
-  		esac
-! 	        shrpldflags="$shrpldflags $ldflags $libs $cryptlib"
-  		linklibperl="-L $archlibexp/CORE -L `pwd | sed 's/\/UU$//'` -lperl"
-  		;;
-  	hpux*)
---- 70,76 ----
-  		*)	shrpldflags="$shrpldflags -b noentry"
-  			;;
-  		esac
-! 	        shrpldflags="$shrpldflags $ldflags $perllibs $cryptlib"
-  		linklibperl="-L $archlibexp/CORE -L `pwd | sed 's/\/UU$//'` -lperl"
-  		;;
-  	hpux*)
-***************
-*** 176,182 ****
-  ext = \$(dynamic_ext) \$(static_ext) \$(nonxs_ext)
-  DYNALOADER = lib/auto/DynaLoader/DynaLoader\$(LIB_EXT)
-  
-! libs = $libs $cryptlib
-  
-  public = perl $suidperl utilities translators
-  
---- 176,182 ----
-  ext = \$(dynamic_ext) \$(static_ext) \$(nonxs_ext)
-  DYNALOADER = lib/auto/DynaLoader/DynaLoader\$(LIB_EXT)
-  
-! libs = $perllibs $cryptlib
-  
-  public = perl $suidperl utilities translators
-  
-***************
-*** 333,339 ****
-  case "$osname" in
-  aix)
-  	$spitshell >>Makefile <>Makefile <{CCFLAS}   || $Config{'ccflags'};
-    @crtls = ( ($dbgqual =~ m-/Debug-i ? $Config{'dbgprefix'} : '')
-                . 'PerlShr/Share' );
-!   push(@crtls, grep { not /\(/ } split /\s+/, $Config{'libs'});
-    push(@crtls, grep { not /\(/ } split /\s+/, $Config{'libc'});
-    # In general, we pass through the basic libraries from %Config unchanged.
-    # The one exception is that if we're building in the Perl source tree, and
---- 338,344 ----
-                   $self->{CCFLAS}   || $Config{'ccflags'};
-    @crtls = ( ($dbgqual =~ m-/Debug-i ? $Config{'dbgprefix'} : '')
-                . 'PerlShr/Share' );
-!   push(@crtls, grep { not /\(/ } split /\s+/, $Config{'perllibs'});
-    push(@crtls, grep { not /\(/ } split /\s+/, $Config{'libc'});
-    # In general, we pass through the basic libraries from %Config unchanged.
-    # The one exception is that if we're building in the Perl source tree, and
-***************
-*** 624,630 ****
-  =item *
-  
-  If C<$potential_libs> is empty, the return value will be empty.
-! Otherwise, the libraries specified by C<$Config{libs}> (see Config.pm)
-  will be appended to the list of C<$potential_libs>.  The libraries
-  will be searched for in the directories specified in C<$potential_libs>,
-  C<$Config{libpth}>, and in C<$Config{installarchlib}/CORE>.
---- 624,630 ----
-  =item *
-  
-  If C<$potential_libs> is empty, the return value will be empty.
-! Otherwise, the libraries specified by C<$Config{perllibs}> (see Config.pm)
-  will be appended to the list of C<$potential_libs>.  The libraries
-  will be searched for in the directories specified in C<$potential_libs>,
-  C<$Config{libpth}>, and in C<$Config{installarchlib}/CORE>.
-***************
-*** 668,674 ****
-  alphanumeric characters are treated as flags.  Unknown flags will be ignored.
-  
-  An entry that matches C disables the appending of default
-! libraries found in C<$Config{libs}> (this should be only needed very rarely).
-  
-  An entry that matches C disables all searching for
-  the libraries specified after it.  Translation of C<-Lfoo> and
---- 668,674 ----
-  alphanumeric characters are treated as flags.  Unknown flags will be ignored.
-  
-  An entry that matches C disables the appending of default
-! libraries found in C<$Config{perllibs}> (this should be only needed very rarely).
-  
-  An entry that matches C disables all searching for
-  the libraries specified after it.  Translation of C<-Lfoo> and
-***************
-*** 678,684 ****
-  
-  An entry that matches C reenables searching for
-  the libraries specified after it.  You can put it at the end to
-! enable searching for default libraries specified by C<$Config{libs}>.
-  
-  =item *
-  
---- 678,684 ----
-  
-  An entry that matches C reenables searching for
-  the libraries specified after it.  You can put it at the end to
-! enable searching for default libraries specified by C<$Config{perllibs}>.
-  
-  =item *
-  
-diff -cr perl-5.6.0.orig/lib/ExtUtils/MM_Unix.pm perl-5.6.0/lib/ExtUtils/MM_Unix.pm
-*** perl-5.6.0.orig/lib/ExtUtils/MM_Unix.pm	Thu Mar  2 17:52:52 2000
---- perl-5.6.0/lib/ExtUtils/MM_Unix.pm	Sun Sep 17 23:40:15 2000
-***************
-*** 2450,2456 ****
-  MAP_STATIC    = ",
-  join(" \\\n\t", reverse sort keys %static), "
-  
-! MAP_PRELIBS   = $Config::Config{libs} $Config::Config{cryptlib}
-  ";
-  
-      if (defined $libperl) {
---- 2450,2456 ----
-  MAP_STATIC    = ",
-  join(" \\\n\t", reverse sort keys %static), "
-  
-! MAP_PRELIBS   = $Config::Config{perllibs} $Config::Config{cryptlib}
-  ";
-  
-      if (defined $libperl) {
-diff -cr perl-5.6.0.orig/myconfig.SH perl-5.6.0/myconfig.SH
-*** perl-5.6.0.orig/myconfig.SH	Sat Feb 26 06:34:49 2000
---- perl-5.6.0/myconfig.SH	Sun Sep 17 23:41:17 2000
-***************
-*** 48,54 ****
-    Linker and Libraries:
-      ld='$ld', ldflags ='$ldflags'
-      libpth=$libpth
-!     libs=$libs
-      libc=$libc, so=$so, useshrplib=$useshrplib, libperl=$libperl
-    Dynamic Linking:
-      dlsrc=$dlsrc, dlext=$dlext, d_dlsymun=$d_dlsymun, ccdlflags='$ccdlflags'
---- 48,54 ----
-    Linker and Libraries:
-      ld='$ld', ldflags ='$ldflags'
-      libpth=$libpth
-!     libs=$perllibs
-      libc=$libc, so=$so, useshrplib=$useshrplib, libperl=$libperl
-    Dynamic Linking:
-      dlsrc=$dlsrc, dlext=$dlext, d_dlsymun=$d_dlsymun, ccdlflags='$ccdlflags'
-diff -cr perl-5.6.0.orig/patchlevel.h perl-5.6.0/patchlevel.h
-*** perl-5.6.0.orig/patchlevel.h	Wed Mar 22 20:23:11 2000
---- perl-5.6.0/patchlevel.h	Sun Sep 17 23:40:15 2000
-***************
-*** 70,75 ****
---- 70,76 ----
-  #if !defined(PERL_PATCHLEVEL_H_IMPLICIT) && !defined(LOCAL_PATCH_COUNT)
-  static	char	*local_patches[] = {
-  	NULL
-+ 	,"NODB-1.0 - remove -ldb from core perl binary."
-  	,NULL
-  };
-  
diff --git a/storage/bdb/perl/DB_File/ppport.h b/storage/bdb/perl/DB_File/ppport.h
deleted file mode 100644
index effa5072936..00000000000
--- a/storage/bdb/perl/DB_File/ppport.h
+++ /dev/null
@@ -1,364 +0,0 @@
-/* This file is Based on output from 
- * Perl/Pollution/Portability Version 2.0000 */
-
-#ifndef _P_P_PORTABILITY_H_
-#define _P_P_PORTABILITY_H_
-
-#ifndef PERL_REVISION
-#   ifndef __PATCHLEVEL_H_INCLUDED__
-#       include "patchlevel.h"
-#   endif
-#   ifndef PERL_REVISION
-#	define PERL_REVISION	(5)
-        /* Replace: 1 */
-#       define PERL_VERSION	PATCHLEVEL
-#       define PERL_SUBVERSION	SUBVERSION
-        /* Replace PERL_PATCHLEVEL with PERL_VERSION */
-        /* Replace: 0 */
-#   endif
-#endif
-
-#define PERL_BCDVERSION ((PERL_REVISION * 0x1000000L) + (PERL_VERSION * 0x1000L) + PERL_SUBVERSION)
-
-#ifndef ERRSV
-#	define ERRSV perl_get_sv("@",FALSE)
-#endif
-
-#if (PERL_VERSION < 4) || ((PERL_VERSION == 4) && (PERL_SUBVERSION <= 5))
-/* Replace: 1 */
-#	define PL_Sv		Sv
-#	define PL_compiling	compiling
-#	define PL_copline	copline
-#	define PL_curcop	curcop
-#	define PL_curstash	curstash
-#	define PL_defgv		defgv
-#	define PL_dirty		dirty
-#	define PL_hints		hints
-#	define PL_na		na
-#	define PL_perldb	perldb
-#	define PL_rsfp_filters	rsfp_filters
-#	define PL_rsfp		rsfp
-#	define PL_stdingv	stdingv
-#	define PL_sv_no		sv_no
-#	define PL_sv_undef	sv_undef
-#	define PL_sv_yes	sv_yes
-/* Replace: 0 */
-#endif
-
-#ifndef pTHX
-#    define pTHX
-#    define pTHX_
-#    define aTHX
-#    define aTHX_
-#endif         
-
-#ifndef PTR2IV
-#    define PTR2IV(d)   (IV)(d)
-#endif
- 
-#ifndef INT2PTR
-#    define INT2PTR(any,d)      (any)(d)
-#endif
-
-#ifndef dTHR
-#  ifdef WIN32
-#	define dTHR extern int Perl___notused
-#  else
-#	define dTHR extern int errno
-#  endif
-#endif
-
-#ifndef boolSV
-#	define boolSV(b) ((b) ? &PL_sv_yes : &PL_sv_no)
-#endif
-
-#ifndef gv_stashpvn
-#	define gv_stashpvn(str,len,flags) gv_stashpv(str,flags)
-#endif
-
-#ifndef newSVpvn
-#	define newSVpvn(data,len) ((len) ? newSVpv ((data), (len)) : newSVpv ("", 0))
-#endif
-
-#ifndef newRV_inc
-/* Replace: 1 */
-#	define newRV_inc(sv) newRV(sv)
-/* Replace: 0 */
-#endif
-
-/* DEFSV appears first in 5.004_56 */
-#ifndef DEFSV
-#  define DEFSV	GvSV(PL_defgv)
-#endif
-
-#ifndef SAVE_DEFSV
-#    define SAVE_DEFSV SAVESPTR(GvSV(PL_defgv))
-#endif
-
-#ifndef newRV_noinc
-#  ifdef __GNUC__
-#    define newRV_noinc(sv)               \
-      ({                                  \
-          SV *nsv = (SV*)newRV(sv);       \
-          SvREFCNT_dec(sv);               \
-          nsv;                            \
-      })
-#  else
-#    if defined(CRIPPLED_CC) || defined(USE_THREADS)
-static SV * newRV_noinc (SV * sv)
-{
-          SV *nsv = (SV*)newRV(sv);       
-          SvREFCNT_dec(sv);               
-          return nsv;                     
-}
-#    else
-#      define newRV_noinc(sv)    \
-        ((PL_Sv=(SV*)newRV(sv), SvREFCNT_dec(sv), (SV*)PL_Sv)
-#    endif
-#  endif
-#endif
-
-/* Provide: newCONSTSUB */
-
-/* newCONSTSUB from IO.xs is in the core starting with 5.004_63 */
-#if (PERL_VERSION < 4) || ((PERL_VERSION == 4) && (PERL_SUBVERSION < 63))
-
-#if defined(NEED_newCONSTSUB)
-static
-#else
-extern void newCONSTSUB _((HV * stash, char * name, SV *sv));
-#endif
-
-#if defined(NEED_newCONSTSUB) || defined(NEED_newCONSTSUB_GLOBAL)
-void
-newCONSTSUB(stash,name,sv)
-HV *stash;
-char *name;
-SV *sv;
-{
-	U32 oldhints = PL_hints;
-	HV *old_cop_stash = PL_curcop->cop_stash;
-	HV *old_curstash = PL_curstash;
-	line_t oldline = PL_curcop->cop_line;
-	PL_curcop->cop_line = PL_copline;
-
-	PL_hints &= ~HINT_BLOCK_SCOPE;
-	if (stash)
-		PL_curstash = PL_curcop->cop_stash = stash;
-
-	newSUB(
-
-#if (PERL_VERSION < 3) || ((PERL_VERSION == 3) && (PERL_SUBVERSION < 22))
-     /* before 5.003_22 */
-		start_subparse(),
-#else
-#  if (PERL_VERSION == 3) && (PERL_SUBVERSION == 22)
-     /* 5.003_22 */
-     		start_subparse(0),
-#  else
-     /* 5.003_23  onwards */
-     		start_subparse(FALSE, 0),
-#  endif
-#endif
-
-		newSVOP(OP_CONST, 0, newSVpv(name,0)),
-		newSVOP(OP_CONST, 0, &PL_sv_no),   /* SvPV(&PL_sv_no) == "" -- GMB */
-		newSTATEOP(0, Nullch, newSVOP(OP_CONST, 0, sv))
-	);
-
-	PL_hints = oldhints;
-	PL_curcop->cop_stash = old_cop_stash;
-	PL_curstash = old_curstash;
-	PL_curcop->cop_line = oldline;
-}
-#endif
-
-#endif /* newCONSTSUB */
-
-
-#ifndef START_MY_CXT
-
-/*
- * Boilerplate macros for initializing and accessing interpreter-local
- * data from C.  All statics in extensions should be reworked to use
- * this, if you want to make the extension thread-safe.  See ext/re/re.xs
- * for an example of the use of these macros.
- *
- * Code that uses these macros is responsible for the following:
- * 1. #define MY_CXT_KEY to a unique string, e.g. "DynaLoader_guts"
- * 2. Declare a typedef named my_cxt_t that is a structure that contains
- *    all the data that needs to be interpreter-local.
- * 3. Use the START_MY_CXT macro after the declaration of my_cxt_t.
- * 4. Use the MY_CXT_INIT macro such that it is called exactly once
- *    (typically put in the BOOT: section).
- * 5. Use the members of the my_cxt_t structure everywhere as
- *    MY_CXT.member.
- * 6. Use the dMY_CXT macro (a declaration) in all the functions that
- *    access MY_CXT.
- */
-
-#if defined(MULTIPLICITY) || defined(PERL_OBJECT) || \
-    defined(PERL_CAPI)    || defined(PERL_IMPLICIT_CONTEXT)
-
-/* This must appear in all extensions that define a my_cxt_t structure,
- * right after the definition (i.e. at file scope).  The non-threads
- * case below uses it to declare the data as static. */
-#define START_MY_CXT
-
-#if PERL_REVISION == 5 && \
-    (PERL_VERSION < 4 || (PERL_VERSION == 4 && PERL_SUBVERSION < 68 ))
-/* Fetches the SV that keeps the per-interpreter data. */
-#define dMY_CXT_SV \
-	SV *my_cxt_sv = perl_get_sv(MY_CXT_KEY, FALSE)
-#else /* >= perl5.004_68 */
-#define dMY_CXT_SV \
-	SV *my_cxt_sv = *hv_fetch(PL_modglobal, MY_CXT_KEY,		\
-				  sizeof(MY_CXT_KEY)-1, TRUE)
-#endif /* < perl5.004_68 */
-
-/* This declaration should be used within all functions that use the
- * interpreter-local data. */
-#define dMY_CXT	\
-	dMY_CXT_SV;							\
-	my_cxt_t *my_cxtp = INT2PTR(my_cxt_t*,SvUV(my_cxt_sv))
-
-/* Creates and zeroes the per-interpreter data.
- * (We allocate my_cxtp in a Perl SV so that it will be released when
- * the interpreter goes away.) */
-#define MY_CXT_INIT \
-	dMY_CXT_SV;							\
-	/* newSV() allocates one more than needed */			\
-	my_cxt_t *my_cxtp = (my_cxt_t*)SvPVX(newSV(sizeof(my_cxt_t)-1));\
-	Zero(my_cxtp, 1, my_cxt_t);					\
-	sv_setuv(my_cxt_sv, PTR2UV(my_cxtp))
-
-/* This macro must be used to access members of the my_cxt_t structure.
- * e.g. MYCXT.some_data */
-#define MY_CXT		(*my_cxtp)
-
-/* Judicious use of these macros can reduce the number of times dMY_CXT
- * is used.  Use is similar to pTHX, aTHX etc. */
-#define pMY_CXT		my_cxt_t *my_cxtp
-#define pMY_CXT_	pMY_CXT,
-#define _pMY_CXT	,pMY_CXT
-#define aMY_CXT		my_cxtp
-#define aMY_CXT_	aMY_CXT,
-#define _aMY_CXT	,aMY_CXT
-
-#else /* single interpreter */
-
-#ifndef NOOP
-#  define NOOP (void)0
-#endif
-
-#ifdef HASATTRIBUTE
-#  define PERL_UNUSED_DECL __attribute__((unused))
-#else
-#  define PERL_UNUSED_DECL
-#endif    
-
-#ifndef dNOOP
-#  define dNOOP extern int Perl___notused PERL_UNUSED_DECL
-#endif
-
-#define START_MY_CXT	static my_cxt_t my_cxt;
-#define dMY_CXT_SV	dNOOP
-#define dMY_CXT		dNOOP
-#define MY_CXT_INIT	NOOP
-#define MY_CXT		my_cxt
-
-#define pMY_CXT		void
-#define pMY_CXT_
-#define _pMY_CXT
-#define aMY_CXT
-#define aMY_CXT_
-#define _aMY_CXT
-
-#endif 
-
-#endif /* START_MY_CXT */
-
-#ifdef SvPVbyte
-#   if PERL_REVISION == 5 && PERL_VERSION < 7
-       /* SvPVbyte does not work in perl-5.6.1, borrowed version for 5.7.3 */
-#       undef SvPVbyte
-#       define SvPVbyte(sv, lp) \
-          ((SvFLAGS(sv) & (SVf_POK|SVf_UTF8)) == (SVf_POK) \
-           ? ((lp = SvCUR(sv)), SvPVX(sv)) : my_sv_2pvbyte(aTHX_ sv, &lp))
-       static char *
-       my_sv_2pvbyte(pTHX_ register SV *sv, STRLEN *lp)
-       {
-           sv_utf8_downgrade(sv,0);
-           return SvPV(sv,*lp);
-       }
-#   endif
-#else
-#   define SvPVbyte SvPV
-#endif
-	
-#ifndef SvUTF8_off
-#    define SvUTF8_off(s)
-#endif
-
-#if 1
-#ifdef DBM_setFilter
-#undef DBM_setFilter
-#undef DBM_ckFilter
-#endif
-#endif
-	
-#ifndef DBM_setFilter
-
-/* 
-   The DBM_setFilter & DBM_ckFilter macros are only used by 
-   the *DB*_File modules 
-*/
-
-#define DBM_setFilter(db_type,code)				\
-	{							\
-	    if (db_type)					\
-	        RETVAL = sv_mortalcopy(db_type) ;		\
-	    ST(0) = RETVAL ;					\
-	    if (db_type && (code == &PL_sv_undef)) {		\
-                SvREFCNT_dec(db_type) ;				\
-	        db_type = NULL ;				\
-	    }							\
-	    else if (code) {					\
-	        if (db_type)					\
-	            sv_setsv(db_type, code) ;			\
-	        else						\
-	            db_type = newSVsv(code) ;			\
-	    }	    						\
-	}
-
-#define DBM_ckFilter(arg,type,name)				\
-	if (db->type) {						\
-	    /*printf("ckFilter %s\n", name);*/			\
-	    if (db->filtering) {				\
-	        croak("recursion detected in %s", name) ;	\
-	    }                     				\
-	    ENTER ;						\
-	    SAVETMPS ;						\
-	    SAVEINT(db->filtering) ;				\
-	    db->filtering = TRUE ;				\
-	    SAVESPTR(DEFSV) ;					\
-            if (name[7] == 's')                                 \
-                arg = newSVsv(arg);                             \
-	    DEFSV = arg ;					\
-	    SvTEMP_off(arg) ;					\
-	    PUSHMARK(SP) ;					\
-	    PUTBACK ;						\
-	    (void) perl_call_sv(db->type, G_DISCARD); 		\
-	    SPAGAIN ;						\
-	    PUTBACK ;						\
-	    FREETMPS ;						\
-	    LEAVE ;						\
-            if (name[7] == 's'){                                \
-                arg = sv_2mortal(arg);                          \
-            }                                                   \
-            SvOKp(arg);                                         \
-	}
-
-#endif /* DBM_setFilter */
-
-#endif /* _P_P_PORTABILITY_H_ */
diff --git a/storage/bdb/perl/DB_File/t/db-btree.t b/storage/bdb/perl/DB_File/t/db-btree.t
deleted file mode 100644
index deab41010eb..00000000000
--- a/storage/bdb/perl/DB_File/t/db-btree.t
+++ /dev/null
@@ -1,1658 +0,0 @@
-#!./perl -w
-
-BEGIN {
-    unless(grep /blib/, @INC) {
-        chdir 't' if -d 't';
-        @INC = '../lib' if -d '../lib';
-    }
-}
- 
-use warnings;
-use strict;
-use Config;
- 
-BEGIN {
-    if(-d "lib" && -f "TEST") {
-        if ($Config{'extensions'} !~ /\bDB_File\b/ ) {
-            print "1..0 # Skip: DB_File was not built\n";
-            exit 0;
-        }
-    }
-    if ($^O eq 'darwin'
-	&& $Config{db_version_major} == 1
-	&& $Config{db_version_minor} == 0
-	&& $Config{db_version_patch} == 0) {
-	warn < @b ? @b : @a) ;
-    my $i = 0 ;
-
-    foreach $i ( 0 .. $len -1) {
-        return $a[$i] - $b[$i] if $a[$i] != $b[$i] ;
-    }
-
-    return @a - @b ;
-}
-
-{
-    package Redirect ;
-    use Symbol ;
-
-    sub new
-    {
-        my $class = shift ;
-        my $filename = shift ;
-	my $fh = gensym ;
-	open ($fh, ">$filename") || die "Cannot open $filename: $!" ;
-	my $real_stdout = select($fh) ;
-	return bless [$fh, $real_stdout ] ;
-
-    }
-    sub DESTROY
-    {
-        my $self = shift ;
-	close $self->[0] ;
-	select($self->[1]) ;
-    }
-}
-
-sub docat
-{ 
-    my $file = shift;
-    local $/ = undef ;
-    open(CAT,$file) || die "Cannot open $file: $!";
-    my $result = ;
-    close(CAT);
-    $result = normalise($result) ;
-    return $result ;
-}   
-
-sub docat_del
-{ 
-    my $file = shift;
-    my $result = docat($file);
-    unlink $file ;
-    return $result ;
-}   
-
-sub normalise
-{
-    my $data = shift ;
-    $data =~ s#\r\n#\n#g 
-        if $^O eq 'cygwin' ;
-
-    return $data ;
-}
-
-sub safeUntie
-{
-    my $hashref = shift ;
-    my $no_inner = 1;
-    local $SIG{__WARN__} = sub {-- $no_inner } ;
-    untie %$hashref;
-    return $no_inner;
-}
-
-
-
-my $db185mode =  ($DB_File::db_version == 1 && ! $DB_File::db_185_compat) ;
-my $null_keys_allowed = ($DB_File::db_ver < 2.004010 
-				|| $DB_File::db_ver >= 3.1 );
-
-my $Dfile = "dbbtree.tmp";
-unlink $Dfile;
-
-umask(0);
-
-# Check the interface to BTREEINFO
-
-my $dbh = new DB_File::BTREEINFO ;
-ok(1, ! defined $dbh->{flags}) ;
-ok(2, ! defined $dbh->{cachesize}) ;
-ok(3, ! defined $dbh->{psize}) ;
-ok(4, ! defined $dbh->{lorder}) ;
-ok(5, ! defined $dbh->{minkeypage}) ;
-ok(6, ! defined $dbh->{maxkeypage}) ;
-ok(7, ! defined $dbh->{compare}) ;
-ok(8, ! defined $dbh->{prefix}) ;
-
-$dbh->{flags} = 3000 ;
-ok(9, $dbh->{flags} == 3000) ;
-
-$dbh->{cachesize} = 9000 ;
-ok(10, $dbh->{cachesize} == 9000);
-
-$dbh->{psize} = 400 ;
-ok(11, $dbh->{psize} == 400) ;
-
-$dbh->{lorder} = 65 ;
-ok(12, $dbh->{lorder} == 65) ;
-
-$dbh->{minkeypage} = 123 ;
-ok(13, $dbh->{minkeypage} == 123) ;
-
-$dbh->{maxkeypage} = 1234 ;
-ok(14, $dbh->{maxkeypage} == 1234 );
-
-# Check that an invalid entry is caught both for store & fetch
-eval '$dbh->{fred} = 1234' ;
-ok(15, $@ =~ /^DB_File::BTREEINFO::STORE - Unknown element 'fred' at/ ) ;
-eval 'my $q = $dbh->{fred}' ;
-ok(16, $@ =~ /^DB_File::BTREEINFO::FETCH - Unknown element 'fred' at/ ) ;
-
-# Now check the interface to BTREE
-
-my ($X, %h) ;
-ok(17, $X = tie(%h, 'DB_File',$Dfile, O_RDWR|O_CREAT, 0640, $DB_BTREE )) ;
-die "Could not tie: $!" unless $X;
-
-my ($dev,$ino,$mode,$nlink,$uid,$gid,$rdev,$size,$atime,$mtime,$ctime,
-   $blksize,$blocks) = stat($Dfile);
-
-my %noMode = map { $_, 1} qw( amigaos MSWin32 NetWare cygwin ) ;
-
-ok(18, ($mode & 0777) == (($^O eq 'os2' || $^O eq 'MacOS') ? 0666 : 0640)
-   || $noMode{$^O} );
-
-my ($key, $value, $i);
-while (($key,$value) = each(%h)) {
-    $i++;
-}
-ok(19, !$i ) ;
-
-$h{'goner1'} = 'snork';
-
-$h{'abc'} = 'ABC';
-ok(20, $h{'abc'} eq 'ABC' );
-ok(21, ! defined $h{'jimmy'} ) ;
-ok(22, ! exists $h{'jimmy'} ) ;
-ok(23,  defined $h{'abc'} ) ;
-
-$h{'def'} = 'DEF';
-$h{'jkl','mno'} = "JKL\034MNO";
-$h{'a',2,3,4,5} = join("\034",'A',2,3,4,5);
-$h{'a'} = 'A';
-
-#$h{'b'} = 'B';
-$X->STORE('b', 'B') ;
-
-$h{'c'} = 'C';
-
-#$h{'d'} = 'D';
-$X->put('d', 'D') ;
-
-$h{'e'} = 'E';
-$h{'f'} = 'F';
-$h{'g'} = 'X';
-$h{'h'} = 'H';
-$h{'i'} = 'I';
-
-$h{'goner2'} = 'snork';
-delete $h{'goner2'};
-
-
-# IMPORTANT - $X must be undefined before the untie otherwise the
-#             underlying DB close routine will not get called.
-undef $X ;
-untie(%h);
-
-# tie to the same file again
-ok(24, $X = tie(%h,'DB_File',$Dfile, O_RDWR, 0640, $DB_BTREE)) ;
-
-# Modify an entry from the previous tie
-$h{'g'} = 'G';
-
-$h{'j'} = 'J';
-$h{'k'} = 'K';
-$h{'l'} = 'L';
-$h{'m'} = 'M';
-$h{'n'} = 'N';
-$h{'o'} = 'O';
-$h{'p'} = 'P';
-$h{'q'} = 'Q';
-$h{'r'} = 'R';
-$h{'s'} = 'S';
-$h{'t'} = 'T';
-$h{'u'} = 'U';
-$h{'v'} = 'V';
-$h{'w'} = 'W';
-$h{'x'} = 'X';
-$h{'y'} = 'Y';
-$h{'z'} = 'Z';
-
-$h{'goner3'} = 'snork';
-
-delete $h{'goner1'};
-$X->DELETE('goner3');
-
-my @keys = keys(%h);
-my @values = values(%h);
-
-ok(25, $#keys == 29 && $#values == 29) ;
-
-$i = 0 ;
-while (($key,$value) = each(%h)) {
-    if ($key eq $keys[$i] && $value eq $values[$i] && $key eq lc($value)) {
-	$key =~ y/a-z/A-Z/;
-	$i++ if $key eq $value;
-    }
-}
-
-ok(26, $i == 30) ;
-
-@keys = ('blurfl', keys(%h), 'dyick');
-ok(27, $#keys == 31) ;
-
-#Check that the keys can be retrieved in order
-my @b = keys %h ;
-my @c = sort lexical @b ;
-ok(28, ArrayCompare(\@b, \@c)) ;
-
-$h{'foo'} = '';
-ok(29, $h{'foo'} eq '' ) ;
-
-# Berkeley DB from version 2.4.10 to 3.0 does not allow null keys.
-# This feature was reenabled in version 3.1 of Berkeley DB.
-my $result = 0 ;
-if ($null_keys_allowed) {
-    $h{''} = 'bar';
-    $result = ( $h{''} eq 'bar' );
-}
-else
-  { $result = 1 }
-ok(30, $result) ;
-
-# check cache overflow and numeric keys and contents
-my $ok = 1;
-for ($i = 1; $i < 200; $i++) { $h{$i + 0} = $i + 0; }
-for ($i = 1; $i < 200; $i++) { $ok = 0 unless $h{$i} == $i; }
-ok(31, $ok);
-
-($dev,$ino,$mode,$nlink,$uid,$gid,$rdev,$size,$atime,$mtime,$ctime,
-   $blksize,$blocks) = stat($Dfile);
-ok(32, $size > 0 );
-
-@h{0..200} = 200..400;
-my @foo = @h{0..200};
-ok(33, join(':',200..400) eq join(':',@foo) );
-
-# Now check all the non-tie specific stuff
-
-
-# Check R_NOOVERWRITE flag will make put fail when attempting to overwrite
-# an existing record.
- 
-my $status = $X->put( 'x', 'newvalue', R_NOOVERWRITE) ;
-ok(34, $status == 1 );
- 
-# check that the value of the key 'x' has not been changed by the 
-# previous test
-ok(35, $h{'x'} eq 'X' );
-
-# standard put
-$status = $X->put('key', 'value') ;
-ok(36, $status == 0 );
-
-#check that previous put can be retrieved
-$value = 0 ;
-$status = $X->get('key', $value) ;
-ok(37, $status == 0 );
-ok(38, $value eq 'value' );
-
-# Attempting to delete an existing key should work
-
-$status = $X->del('q') ;
-ok(39, $status == 0 );
-if ($null_keys_allowed) {
-    $status = $X->del('') ;
-} else {
-    $status = 0 ;
-}
-ok(40, $status == 0 );
-
-# Make sure that the key deleted, cannot be retrieved
-ok(41, ! defined $h{'q'}) ;
-ok(42, ! defined $h{''}) ;
-
-undef $X ;
-untie %h ;
-
-ok(43, $X = tie(%h, 'DB_File',$Dfile, O_RDWR, 0640, $DB_BTREE ));
-
-# Attempting to delete a non-existant key should fail
-
-$status = $X->del('joe') ;
-ok(44, $status == 1 );
-
-# Check the get interface
-
-# First a non-existing key
-$status = $X->get('aaaa', $value) ;
-ok(45, $status == 1 );
-
-# Next an existing key
-$status = $X->get('a', $value) ;
-ok(46, $status == 0 );
-ok(47, $value eq 'A' );
-
-# seq
-# ###
-
-# use seq to find an approximate match
-$key = 'ke' ;
-$value = '' ;
-$status = $X->seq($key, $value, R_CURSOR) ;
-ok(48, $status == 0 );
-ok(49, $key eq 'key' );
-ok(50, $value eq 'value' );
-
-# seq when the key does not match
-$key = 'zzz' ;
-$value = '' ;
-$status = $X->seq($key, $value, R_CURSOR) ;
-ok(51, $status == 1 );
-
-
-# use seq to set the cursor, then delete the record @ the cursor.
-
-$key = 'x' ;
-$value = '' ;
-$status = $X->seq($key, $value, R_CURSOR) ;
-ok(52, $status == 0 );
-ok(53, $key eq 'x' );
-ok(54, $value eq 'X' );
-$status = $X->del(0, R_CURSOR) ;
-ok(55, $status == 0 );
-$status = $X->get('x', $value) ;
-ok(56, $status == 1 );
-
-# ditto, but use put to replace the key/value pair.
-$key = 'y' ;
-$value = '' ;
-$status = $X->seq($key, $value, R_CURSOR) ;
-ok(57, $status == 0 );
-ok(58, $key eq 'y' );
-ok(59, $value eq 'Y' );
-
-$key = "replace key" ;
-$value = "replace value" ;
-$status = $X->put($key, $value, R_CURSOR) ;
-ok(60, $status == 0 );
-ok(61, $key eq 'replace key' );
-ok(62, $value eq 'replace value' );
-$status = $X->get('y', $value) ;
-ok(63, 1) ; # hard-wire to always pass. the previous test ($status == 1)
-	    # only worked because of a bug in 1.85/6
-
-# use seq to walk forwards through a file 
-
-$status = $X->seq($key, $value, R_FIRST) ;
-ok(64, $status == 0 );
-my $previous = $key ;
-
-$ok = 1 ;
-while (($status = $X->seq($key, $value, R_NEXT)) == 0)
-{
-    ($ok = 0), last if ($previous cmp $key) == 1 ;
-}
-
-ok(65, $status == 1 );
-ok(66, $ok == 1 );
-
-# use seq to walk backwards through a file 
-$status = $X->seq($key, $value, R_LAST) ;
-ok(67, $status == 0 );
-$previous = $key ;
-
-$ok = 1 ;
-while (($status = $X->seq($key, $value, R_PREV)) == 0)
-{
-    ($ok = 0), last if ($previous cmp $key) == -1 ;
-    #print "key = [$key] value = [$value]\n" ;
-}
-
-ok(68, $status == 1 );
-ok(69, $ok == 1 );
-
-
-# check seq FIRST/LAST
-
-# sync
-# ####
-
-$status = $X->sync ;
-ok(70, $status == 0 );
-
-
-# fd
-# ##
-
-$status = $X->fd ;
-ok(71, $status != 0 );
-
-
-undef $X ;
-untie %h ;
-
-unlink $Dfile;
-
-# Now try an in memory file
-my $Y;
-ok(72, $Y = tie(%h, 'DB_File',undef, O_RDWR|O_CREAT, 0640, $DB_BTREE ));
-
-# fd with an in memory file should return failure
-$status = $Y->fd ;
-ok(73, $status == -1 );
-
-
-undef $Y ;
-untie %h ;
-
-# Duplicate keys
-my $bt = new DB_File::BTREEINFO ;
-$bt->{flags} = R_DUP ;
-my ($YY, %hh);
-ok(74, $YY = tie(%hh, 'DB_File', $Dfile, O_RDWR|O_CREAT, 0640, $bt )) ;
-
-$hh{'Wall'} = 'Larry' ;
-$hh{'Wall'} = 'Stone' ; # Note the duplicate key
-$hh{'Wall'} = 'Brick' ; # Note the duplicate key
-$hh{'Wall'} = 'Brick' ; # Note the duplicate key and value
-$hh{'Smith'} = 'John' ;
-$hh{'mouse'} = 'mickey' ;
-
-# first work in scalar context
-ok(75, scalar $YY->get_dup('Unknown') == 0 );
-ok(76, scalar $YY->get_dup('Smith') == 1 );
-ok(77, scalar $YY->get_dup('Wall') == 4 );
-
-# now in list context
-my @unknown = $YY->get_dup('Unknown') ;
-ok(78, "@unknown" eq "" );
-
-my @smith = $YY->get_dup('Smith') ;
-ok(79, "@smith" eq "John" );
-
-{
-my @wall = $YY->get_dup('Wall') ;
-my %wall ;
-@wall{@wall} = @wall ;
-ok(80, (@wall == 4 && $wall{'Larry'} && $wall{'Stone'} && $wall{'Brick'}) );
-}
-
-# hash
-my %unknown = $YY->get_dup('Unknown', 1) ;
-ok(81, keys %unknown == 0 );
-
-my %smith = $YY->get_dup('Smith', 1) ;
-ok(82, keys %smith == 1 && $smith{'John'}) ;
-
-my %wall = $YY->get_dup('Wall', 1) ;
-ok(83, keys %wall == 3 && $wall{'Larry'} == 1 && $wall{'Stone'} == 1 
-		&& $wall{'Brick'} == 2);
-
-undef $YY ;
-untie %hh ;
-unlink $Dfile;
-
-
-# test multiple callbacks
-my $Dfile1 = "btree1" ;
-my $Dfile2 = "btree2" ;
-my $Dfile3 = "btree3" ;
- 
-my $dbh1 = new DB_File::BTREEINFO ;
-$dbh1->{compare} = sub { 
-	no warnings 'numeric' ;
-	$_[0] <=> $_[1] } ; 
- 
-my $dbh2 = new DB_File::BTREEINFO ;
-$dbh2->{compare} = sub { $_[0] cmp $_[1] } ;
- 
-my $dbh3 = new DB_File::BTREEINFO ;
-$dbh3->{compare} = sub { length $_[0] <=> length $_[1] } ;
- 
- 
-my (%g, %k);
-tie(%h, 'DB_File',$Dfile1, O_RDWR|O_CREAT, 0640, $dbh1 ) or die $!;
-tie(%g, 'DB_File',$Dfile2, O_RDWR|O_CREAT, 0640, $dbh2 ) or die $!;
-tie(%k, 'DB_File',$Dfile3, O_RDWR|O_CREAT, 0640, $dbh3 ) or die $!;
- 
-my @Keys = qw( 0123 12 -1234 9 987654321 def  ) ;
-my (@srt_1, @srt_2, @srt_3);
-{ 
-  no warnings 'numeric' ;
-  @srt_1 = sort { $a <=> $b } @Keys ; 
-}
-@srt_2 = sort { $a cmp $b } @Keys ;
-@srt_3 = sort { length $a <=> length $b } @Keys ;
- 
-foreach (@Keys) {
-    $h{$_} = 1 ;
-    $g{$_} = 1 ;
-    $k{$_} = 1 ;
-}
- 
-sub ArrayCompare
-{
-    my($a, $b) = @_ ;
- 
-    return 0 if @$a != @$b ;
- 
-    foreach (1 .. length @$a)
-    {
-        return 0 unless $$a[$_] eq $$b[$_] ;
-    }
- 
-    1 ;
-}
- 
-ok(84, ArrayCompare (\@srt_1, [keys %h]) );
-ok(85, ArrayCompare (\@srt_2, [keys %g]) );
-ok(86, ArrayCompare (\@srt_3, [keys %k]) );
-
-untie %h ;
-untie %g ;
-untie %k ;
-unlink $Dfile1, $Dfile2, $Dfile3 ;
-
-# clear
-# #####
-
-ok(87, tie(%h, 'DB_File', $Dfile1, O_RDWR|O_CREAT, 0640, $DB_BTREE ) );
-foreach (1 .. 10)
-  { $h{$_} = $_ * 100 }
-
-# check that there are 10 elements in the hash
-$i = 0 ;
-while (($key,$value) = each(%h)) {
-    $i++;
-}
-ok(88, $i == 10);
-
-# now clear the hash
-%h = () ;
-
-# check it is empty
-$i = 0 ;
-while (($key,$value) = each(%h)) {
-    $i++;
-}
-ok(89, $i == 0);
-
-untie %h ;
-unlink $Dfile1 ;
-
-{
-    # check that attempting to tie an array to a DB_BTREE will fail
-
-    my $filename = "xyz" ;
-    my @x ;
-    eval { tie @x, 'DB_File', $filename, O_RDWR|O_CREAT, 0640, $DB_BTREE ; } ;
-    ok(90, $@ =~ /^DB_File can only tie an associative array to a DB_BTREE database/) ;
-    unlink $filename ;
-}
-
-{
-   # sub-class test
-
-   package Another ;
-
-   use warnings ;
-   use strict ;
-
-   open(FILE, ">SubDB.pm") or die "Cannot open SubDB.pm: $!\n" ;
-   print FILE <<'EOM' ;
-
-   package SubDB ;
-
-   use warnings ;
-   use strict ;
-   our (@ISA, @EXPORT);
-
-   require Exporter ;
-   use DB_File;
-   @ISA=qw(DB_File);
-   @EXPORT = @DB_File::EXPORT ;
-
-   sub STORE { 
-	my $self = shift ;
-        my $key = shift ;
-        my $value = shift ;
-        $self->SUPER::STORE($key, $value * 2) ;
-   }
-
-   sub FETCH { 
-	my $self = shift ;
-        my $key = shift ;
-        $self->SUPER::FETCH($key) - 1 ;
-   }
-
-   sub put { 
-	my $self = shift ;
-        my $key = shift ;
-        my $value = shift ;
-        $self->SUPER::put($key, $value * 3) ;
-   }
-
-   sub get { 
-	my $self = shift ;
-        $self->SUPER::get($_[0], $_[1]) ;
-	$_[1] -= 2 ;
-   }
-
-   sub A_new_method
-   {
-	my $self = shift ;
-        my $key = shift ;
-        my $value = $self->FETCH($key) ;
-	return "[[$value]]" ;
-   }
-
-   1 ;
-EOM
-
-    close FILE ;
-
-    BEGIN { push @INC, '.'; }    
-    eval 'use SubDB ; ';
-    main::ok(91, $@ eq "") ;
-    my %h ;
-    my $X ;
-    eval '
-	$X = tie(%h, "SubDB","dbbtree.tmp", O_RDWR|O_CREAT, 0640, $DB_BTREE );
-	' ;
-
-    main::ok(92, $@ eq "") ;
-
-    my $ret = eval '$h{"fred"} = 3 ; return $h{"fred"} ' ;
-    main::ok(93, $@ eq "") ;
-    main::ok(94, $ret == 5) ;
-
-    my $value = 0;
-    $ret = eval '$X->put("joe", 4) ; $X->get("joe", $value) ; return $value' ;
-    main::ok(95, $@ eq "") ;
-    main::ok(96, $ret == 10) ;
-
-    $ret = eval ' R_NEXT eq main::R_NEXT ' ;
-    main::ok(97, $@ eq "" ) ;
-    main::ok(98, $ret == 1) ;
-
-    $ret = eval '$X->A_new_method("joe") ' ;
-    main::ok(99, $@ eq "") ;
-    main::ok(100, $ret eq "[[11]]") ;
-
-    undef $X;
-    untie(%h);
-    unlink "SubDB.pm", "dbbtree.tmp" ;
-
-}
-
-{
-   # DBM Filter tests
-   use warnings ;
-   use strict ;
-   my (%h, $db) ;
-   my ($fetch_key, $store_key, $fetch_value, $store_value) = ("") x 4 ;
-   unlink $Dfile;
-
-   sub checkOutput
-   {
-       my($fk, $sk, $fv, $sv) = @_ ;
-       return
-           $fetch_key eq $fk && $store_key eq $sk && 
-	   $fetch_value eq $fv && $store_value eq $sv &&
-	   $_ eq 'original' ;
-   }
-   
-   ok(101, $db = tie(%h, 'DB_File', $Dfile, O_RDWR|O_CREAT, 0640, $DB_BTREE ) );
-
-   $db->filter_fetch_key   (sub { $fetch_key = $_ }) ;
-   $db->filter_store_key   (sub { $store_key = $_ }) ;
-   $db->filter_fetch_value (sub { $fetch_value = $_}) ;
-   $db->filter_store_value (sub { $store_value = $_ }) ;
-
-   $_ = "original" ;
-
-   $h{"fred"} = "joe" ;
-   #                   fk   sk     fv   sv
-   ok(102, checkOutput( "", "fred", "", "joe")) ;
-
-   ($fetch_key, $store_key, $fetch_value, $store_value) = ("") x 4 ;
-   ok(103, $h{"fred"} eq "joe");
-   #                   fk    sk     fv    sv
-   ok(104, checkOutput( "", "fred", "joe", "")) ;
-
-   ($fetch_key, $store_key, $fetch_value, $store_value) = ("") x 4 ;
-   ok(105, $db->FIRSTKEY() eq "fred") ;
-   #                    fk     sk  fv  sv
-   ok(106, checkOutput( "fred", "", "", "")) ;
-
-   # replace the filters, but remember the previous set
-   my ($old_fk) = $db->filter_fetch_key   
-   			(sub { $_ = uc $_ ; $fetch_key = $_ }) ;
-   my ($old_sk) = $db->filter_store_key   
-   			(sub { $_ = lc $_ ; $store_key = $_ }) ;
-   my ($old_fv) = $db->filter_fetch_value 
-   			(sub { $_ = "[$_]"; $fetch_value = $_ }) ;
-   my ($old_sv) = $db->filter_store_value 
-   			(sub { s/o/x/g; $store_value = $_ }) ;
-   
-   ($fetch_key, $store_key, $fetch_value, $store_value) = ("") x 4 ;
-   $h{"Fred"} = "Joe" ;
-   #                   fk   sk     fv    sv
-   ok(107, checkOutput( "", "fred", "", "Jxe")) ;
-
-   ($fetch_key, $store_key, $fetch_value, $store_value) = ("") x 4 ;
-   ok(108, $h{"Fred"} eq "[Jxe]");
-   #                   fk   sk     fv    sv
-   ok(109, checkOutput( "", "fred", "[Jxe]", "")) ;
-
-   ($fetch_key, $store_key, $fetch_value, $store_value) = ("") x 4 ;
-   ok(110, $db->FIRSTKEY() eq "FRED") ;
-   #                   fk   sk     fv    sv
-   ok(111, checkOutput( "FRED", "", "", "")) ;
-
-   # put the original filters back
-   $db->filter_fetch_key   ($old_fk);
-   $db->filter_store_key   ($old_sk);
-   $db->filter_fetch_value ($old_fv);
-   $db->filter_store_value ($old_sv);
-
-   ($fetch_key, $store_key, $fetch_value, $store_value) = ("") x 4 ;
-   $h{"fred"} = "joe" ;
-   ok(112, checkOutput( "", "fred", "", "joe")) ;
-
-   ($fetch_key, $store_key, $fetch_value, $store_value) = ("") x 4 ;
-   ok(113, $h{"fred"} eq "joe");
-   ok(114, checkOutput( "", "fred", "joe", "")) ;
-
-   ($fetch_key, $store_key, $fetch_value, $store_value) = ("") x 4 ;
-   ok(115, $db->FIRSTKEY() eq "fred") ;
-   ok(116, checkOutput( "fred", "", "", "")) ;
-
-   # delete the filters
-   $db->filter_fetch_key   (undef);
-   $db->filter_store_key   (undef);
-   $db->filter_fetch_value (undef);
-   $db->filter_store_value (undef);
-
-   ($fetch_key, $store_key, $fetch_value, $store_value) = ("") x 4 ;
-   $h{"fred"} = "joe" ;
-   ok(117, checkOutput( "", "", "", "")) ;
-
-   ($fetch_key, $store_key, $fetch_value, $store_value) = ("") x 4 ;
-   ok(118, $h{"fred"} eq "joe");
-   ok(119, checkOutput( "", "", "", "")) ;
-
-   ($fetch_key, $store_key, $fetch_value, $store_value) = ("") x 4 ;
-   ok(120, $db->FIRSTKEY() eq "fred") ;
-   ok(121, checkOutput( "", "", "", "")) ;
-
-   undef $db ;
-   untie %h;
-   unlink $Dfile;
-}
-
-{    
-    # DBM Filter with a closure
-
-    use warnings ;
-    use strict ;
-    my (%h, $db) ;
-
-    unlink $Dfile;
-    ok(122, $db = tie(%h, 'DB_File', $Dfile, O_RDWR|O_CREAT, 0640, $DB_BTREE ) );
-
-    my %result = () ;
-
-    sub Closure
-    {
-        my ($name) = @_ ;
-	my $count = 0 ;
-	my @kept = () ;
-
-	return sub { ++$count ; 
-		     push @kept, $_ ; 
-		     $result{$name} = "$name - $count: [@kept]" ;
-		   }
-    }
-
-    $db->filter_store_key(Closure("store key")) ;
-    $db->filter_store_value(Closure("store value")) ;
-    $db->filter_fetch_key(Closure("fetch key")) ;
-    $db->filter_fetch_value(Closure("fetch value")) ;
-
-    $_ = "original" ;
-
-    $h{"fred"} = "joe" ;
-    ok(123, $result{"store key"} eq "store key - 1: [fred]");
-    ok(124, $result{"store value"} eq "store value - 1: [joe]");
-    ok(125, ! defined $result{"fetch key"} );
-    ok(126, ! defined $result{"fetch value"} );
-    ok(127, $_ eq "original") ;
-
-    ok(128, $db->FIRSTKEY() eq "fred") ;
-    ok(129, $result{"store key"} eq "store key - 1: [fred]");
-    ok(130, $result{"store value"} eq "store value - 1: [joe]");
-    ok(131, $result{"fetch key"} eq "fetch key - 1: [fred]");
-    ok(132, ! defined $result{"fetch value"} );
-    ok(133, $_ eq "original") ;
-
-    $h{"jim"}  = "john" ;
-    ok(134, $result{"store key"} eq "store key - 2: [fred jim]");
-    ok(135, $result{"store value"} eq "store value - 2: [joe john]");
-    ok(136, $result{"fetch key"} eq "fetch key - 1: [fred]");
-    ok(137, ! defined $result{"fetch value"} );
-    ok(138, $_ eq "original") ;
-
-    ok(139, $h{"fred"} eq "joe");
-    ok(140, $result{"store key"} eq "store key - 3: [fred jim fred]");
-    ok(141, $result{"store value"} eq "store value - 2: [joe john]");
-    ok(142, $result{"fetch key"} eq "fetch key - 1: [fred]");
-    ok(143, $result{"fetch value"} eq "fetch value - 1: [joe]");
-    ok(144, $_ eq "original") ;
-
-    undef $db ;
-    untie %h;
-    unlink $Dfile;
-}		
-
-{
-   # DBM Filter recursion detection
-   use warnings ;
-   use strict ;
-   my (%h, $db) ;
-   unlink $Dfile;
-
-   ok(145, $db = tie(%h, 'DB_File', $Dfile, O_RDWR|O_CREAT, 0640, $DB_BTREE ) );
-
-   $db->filter_store_key (sub { $_ = $h{$_} }) ;
-
-   eval '$h{1} = 1234' ;
-   ok(146, $@ =~ /^recursion detected in filter_store_key at/ );
-   
-   undef $db ;
-   untie %h;
-   unlink $Dfile;
-}
-
-
-{
-   # Examples from the POD
-
-
-  my $file = "xyzt" ;
-  {
-    my $redirect = new Redirect $file ;
-
-    # BTREE example 1
-    ###
-
-    use warnings FATAL => qw(all) ;
-    use strict ;
-    use DB_File ;
-
-    my %h ;
-
-    sub Compare
-    {
-        my ($key1, $key2) = @_ ;
-        "\L$key1" cmp "\L$key2" ;
-    }
-
-    # specify the Perl sub that will do the comparison
-    $DB_BTREE->{'compare'} = \&Compare ;
-
-    unlink "tree" ;
-    tie %h, "DB_File", "tree", O_RDWR|O_CREAT, 0640, $DB_BTREE 
-        or die "Cannot open file 'tree': $!\n" ;
-
-    # Add a key/value pair to the file
-    $h{'Wall'} = 'Larry' ;
-    $h{'Smith'} = 'John' ;
-    $h{'mouse'} = 'mickey' ;
-    $h{'duck'}  = 'donald' ;
-
-    # Delete
-    delete $h{"duck"} ;
-
-    # Cycle through the keys printing them in order.
-    # Note it is not necessary to sort the keys as
-    # the btree will have kept them in order automatically.
-    foreach (keys %h)
-      { print "$_\n" }
-
-    untie %h ;
-
-    unlink "tree" ;
-  }  
-
-  delete $DB_BTREE->{'compare'} ;
-
-  ok(147, docat_del($file) eq <<'EOM') ;
-mouse
-Smith
-Wall
-EOM
-   
-  {
-    my $redirect = new Redirect $file ;
-
-    # BTREE example 2
-    ###
-
-    use warnings FATAL => qw(all) ;
-    use strict ;
-    use DB_File ;
-
-    my ($filename, %h);
-
-    $filename = "tree" ;
-    unlink $filename ;
- 
-    # Enable duplicate records
-    $DB_BTREE->{'flags'} = R_DUP ;
- 
-    tie %h, "DB_File", $filename, O_RDWR|O_CREAT, 0640, $DB_BTREE 
-	or die "Cannot open $filename: $!\n";
- 
-    # Add some key/value pairs to the file
-    $h{'Wall'} = 'Larry' ;
-    $h{'Wall'} = 'Brick' ; # Note the duplicate key
-    $h{'Wall'} = 'Brick' ; # Note the duplicate key and value
-    $h{'Smith'} = 'John' ;
-    $h{'mouse'} = 'mickey' ;
-
-    # iterate through the associative array
-    # and print each key/value pair.
-    foreach (keys %h)
-      { print "$_	-> $h{$_}\n" }
-
-    untie %h ;
-
-    unlink $filename ;
-  }  
-
-  ok(148, docat_del($file) eq ($db185mode ? <<'EOM' : <<'EOM') ) ;
-Smith	-> John
-Wall	-> Brick
-Wall	-> Brick
-Wall	-> Brick
-mouse	-> mickey
-EOM
-Smith	-> John
-Wall	-> Larry
-Wall	-> Larry
-Wall	-> Larry
-mouse	-> mickey
-EOM
-
-  {
-    my $redirect = new Redirect $file ;
-
-    # BTREE example 3
-    ###
-
-    use warnings FATAL => qw(all) ;
-    use strict ;
-    use DB_File ;
- 
-    my ($filename, $x, %h, $status, $key, $value);
-
-    $filename = "tree" ;
-    unlink $filename ;
- 
-    # Enable duplicate records
-    $DB_BTREE->{'flags'} = R_DUP ;
- 
-    $x = tie %h, "DB_File", $filename, O_RDWR|O_CREAT, 0640, $DB_BTREE 
-	or die "Cannot open $filename: $!\n";
- 
-    # Add some key/value pairs to the file
-    $h{'Wall'} = 'Larry' ;
-    $h{'Wall'} = 'Brick' ; # Note the duplicate key
-    $h{'Wall'} = 'Brick' ; # Note the duplicate key and value
-    $h{'Smith'} = 'John' ;
-    $h{'mouse'} = 'mickey' ;
- 
-    # iterate through the btree using seq
-    # and print each key/value pair.
-    $key = $value = 0 ;
-    for ($status = $x->seq($key, $value, R_FIRST) ;
-         $status == 0 ;
-         $status = $x->seq($key, $value, R_NEXT) )
-      {  print "$key	-> $value\n" }
- 
- 
-    undef $x ;
-    untie %h ;
-  }
-
-  ok(149, docat_del($file) eq ($db185mode == 1 ? <<'EOM' : <<'EOM') ) ;
-Smith	-> John
-Wall	-> Brick
-Wall	-> Brick
-Wall	-> Larry
-mouse	-> mickey
-EOM
-Smith	-> John
-Wall	-> Larry
-Wall	-> Brick
-Wall	-> Brick
-mouse	-> mickey
-EOM
-
-
-  {
-    my $redirect = new Redirect $file ;
-
-    # BTREE example 4
-    ###
-
-    use warnings FATAL => qw(all) ;
-    use strict ;
-    use DB_File ;
- 
-    my ($filename, $x, %h);
-
-    $filename = "tree" ;
- 
-    # Enable duplicate records
-    $DB_BTREE->{'flags'} = R_DUP ;
- 
-    $x = tie %h, "DB_File", $filename, O_RDWR|O_CREAT, 0640, $DB_BTREE 
-	or die "Cannot open $filename: $!\n";
- 
-    my $cnt  = $x->get_dup("Wall") ;
-    print "Wall occurred $cnt times\n" ;
-
-    my %hash = $x->get_dup("Wall", 1) ;
-    print "Larry is there\n" if $hash{'Larry'} ;
-    print "There are $hash{'Brick'} Brick Walls\n" ;
-
-    my @list = sort $x->get_dup("Wall") ;
-    print "Wall =>	[@list]\n" ;
-
-    @list = $x->get_dup("Smith") ;
-    print "Smith =>	[@list]\n" ;
- 
-    @list = $x->get_dup("Dog") ;
-    print "Dog =>	[@list]\n" ; 
- 
-    undef $x ;
-    untie %h ;
-  }
-
-  ok(150, docat_del($file) eq <<'EOM') ;
-Wall occurred 3 times
-Larry is there
-There are 2 Brick Walls
-Wall =>	[Brick Brick Larry]
-Smith =>	[John]
-Dog =>	[]
-EOM
-
-  {
-    my $redirect = new Redirect $file ;
-
-    # BTREE example 5
-    ###
-
-    use warnings FATAL => qw(all) ;
-    use strict ;
-    use DB_File ;
- 
-    my ($filename, $x, %h, $found);
-
-    $filename = "tree" ;
- 
-    # Enable duplicate records
-    $DB_BTREE->{'flags'} = R_DUP ;
- 
-    $x = tie %h, "DB_File", $filename, O_RDWR|O_CREAT, 0640, $DB_BTREE 
-	or die "Cannot open $filename: $!\n";
-
-    $found = ( $x->find_dup("Wall", "Larry") == 0 ? "" : "not") ; 
-    print "Larry Wall is $found there\n" ;
-    
-    $found = ( $x->find_dup("Wall", "Harry") == 0 ? "" : "not") ; 
-    print "Harry Wall is $found there\n" ;
-    
-    undef $x ;
-    untie %h ;
-  }
-
-  ok(151, docat_del($file) eq <<'EOM') ;
-Larry Wall is  there
-Harry Wall is not there
-EOM
-
-  {
-    my $redirect = new Redirect $file ;
-
-    # BTREE example 6
-    ###
-
-    use warnings FATAL => qw(all) ;
-    use strict ;
-    use DB_File ;
- 
-    my ($filename, $x, %h, $found);
-
-    $filename = "tree" ;
- 
-    # Enable duplicate records
-    $DB_BTREE->{'flags'} = R_DUP ;
- 
-    $x = tie %h, "DB_File", $filename, O_RDWR|O_CREAT, 0640, $DB_BTREE 
-	or die "Cannot open $filename: $!\n";
-
-    $x->del_dup("Wall", "Larry") ;
-
-    $found = ( $x->find_dup("Wall", "Larry") == 0 ? "" : "not") ; 
-    print "Larry Wall is $found there\n" ;
-    
-    undef $x ;
-    untie %h ;
-
-    unlink $filename ;
-  }
-
-  ok(152, docat_del($file) eq <<'EOM') ;
-Larry Wall is not there
-EOM
-
-  {
-    my $redirect = new Redirect $file ;
-
-    # BTREE example 7
-    ###
-
-    use warnings FATAL => qw(all) ;
-    use strict ;
-    use DB_File ;
-    use Fcntl ;
-
-    my ($filename, $x, %h, $st, $key, $value);
-
-    sub match
-    {
-        my $key = shift ;
-        my $value = 0;
-        my $orig_key = $key ;
-        $x->seq($key, $value, R_CURSOR) ;
-        print "$orig_key\t-> $key\t-> $value\n" ;
-    }
-
-    $filename = "tree" ;
-    unlink $filename ;
-
-    $x = tie %h, "DB_File", $filename, O_RDWR|O_CREAT, 0640, $DB_BTREE
-        or die "Cannot open $filename: $!\n";
- 
-    # Add some key/value pairs to the file
-    $h{'mouse'} = 'mickey' ;
-    $h{'Wall'} = 'Larry' ;
-    $h{'Walls'} = 'Brick' ; 
-    $h{'Smith'} = 'John' ;
- 
-
-    $key = $value = 0 ;
-    print "IN ORDER\n" ;
-    for ($st = $x->seq($key, $value, R_FIRST) ;
-	 $st == 0 ;
-         $st = $x->seq($key, $value, R_NEXT) )
-	
-      {  print "$key	-> $value\n" }
- 
-    print "\nPARTIAL MATCH\n" ;
-
-    match "Wa" ;
-    match "A" ;
-    match "a" ;
-
-    undef $x ;
-    untie %h ;
-
-    unlink $filename ;
-
-  }
-
-  ok(153, docat_del($file) eq <<'EOM') ;
-IN ORDER
-Smith	-> John
-Wall	-> Larry
-Walls	-> Brick
-mouse	-> mickey
-
-PARTIAL MATCH
-Wa	-> Wall	-> Larry
-A	-> Smith	-> John
-a	-> mouse	-> mickey
-EOM
-
-}
-
-#{
-#   # R_SETCURSOR
-#   use strict ;
-#   my (%h, $db) ;
-#   unlink $Dfile;
-#
-#   ok(156, $db = tie(%h, 'DB_File', $Dfile, O_RDWR|O_CREAT, 0640, $DB_BTREE ) );
-#
-#   $h{abc} = 33 ;
-#   my $k = "newest" ;
-#   my $v = 44 ;
-#   my $status = $db->put($k, $v, R_SETCURSOR) ;
-#   print "status = [$status]\n" ;
-#   ok(157, $status == 0) ;
-#   $status = $db->del($k, R_CURSOR) ;
-#   print "status = [$status]\n" ;
-#   ok(158, $status == 0) ;
-#   $k = "newest" ;
-#   ok(159, $db->get($k, $v, R_CURSOR)) ;
-#
-#   ok(160, keys %h == 1) ;
-#   
-#   undef $db ;
-#   untie %h;
-#   unlink $Dfile;
-#}
-
-{
-    # Bug ID 20001013.009
-    #
-    # test that $hash{KEY} = undef doesn't produce the warning
-    #     Use of uninitialized value in null operation 
-    use warnings ;
-    use strict ;
-    use DB_File ;
-
-    unlink $Dfile;
-    my %h ;
-    my $a = "";
-    local $SIG{__WARN__} = sub {$a = $_[0]} ;
-    
-    tie %h, 'DB_File', $Dfile, O_RDWR|O_CREAT, 0664, $DB_BTREE
-	or die "Can't open file: $!\n" ;
-    $h{ABC} = undef;
-    ok(154, $a eq "") ;
-    untie %h ;
-    unlink $Dfile;
-}
-
-{
-    # test that %hash = () doesn't produce the warning
-    #     Argument "" isn't numeric in entersub
-    use warnings ;
-    use strict ;
-    use DB_File ;
-
-    unlink $Dfile;
-    my %h ;
-    my $a = "";
-    local $SIG{__WARN__} = sub {$a = $_[0]} ;
-    
-    tie %h, 'DB_File', $Dfile, O_RDWR|O_CREAT, 0664, $DB_BTREE
-	or die "Can't open file: $!\n" ;
-    %h = (); ;
-    ok(155, $a eq "") ;
-    untie %h ;
-    unlink $Dfile;
-}
-
-{
-    # When iterating over a tied hash using "each", the key passed to FETCH
-    # will be recycled and passed to NEXTKEY. If a Source Filter modifies the
-    # key in FETCH via a filter_fetch_key method we need to check that the
-    # modified key doesn't get passed to NEXTKEY.
-    # Also Test "keys" & "values" while we are at it.
-
-    use warnings ;
-    use strict ;
-    use DB_File ;
-
-    unlink $Dfile;
-    my $bad_key = 0 ;
-    my %h = () ;
-    my $db ;
-    ok(156, $db = tie(%h, 'DB_File', $Dfile, O_RDWR|O_CREAT, 0640, $DB_BTREE ) );
-    $db->filter_fetch_key (sub { $_ =~ s/^Beta_/Alpha_/ if defined $_}) ;
-    $db->filter_store_key (sub { $bad_key = 1 if /^Beta_/ ; $_ =~ s/^Alpha_/Beta_/}) ;
-
-    $h{'Alpha_ABC'} = 2 ;
-    $h{'Alpha_DEF'} = 5 ;
-
-    ok(157, $h{'Alpha_ABC'} == 2);
-    ok(158, $h{'Alpha_DEF'} == 5);
-
-    my ($k, $v) = ("","");
-    while (($k, $v) = each %h) {}
-    ok(159, $bad_key == 0);
-
-    $bad_key = 0 ;
-    foreach $k (keys %h) {}
-    ok(160, $bad_key == 0);
-
-    $bad_key = 0 ;
-    foreach $v (values %h) {}
-    ok(161, $bad_key == 0);
-
-    undef $db ;
-    untie %h ;
-    unlink $Dfile;
-}
-
-{
-    # now an error to pass 'compare' a non-code reference
-    my $dbh = new DB_File::BTREEINFO ;
-
-    eval { $dbh->{compare} = 2 };
-    ok(162, $@ =~ /^Key 'compare' not associated with a code reference at/);
-
-    eval { $dbh->{prefix} = 2 };
-    ok(163, $@ =~ /^Key 'prefix' not associated with a code reference at/);
-
-}
-
-
-#{
-#    # recursion detection in btree
-#    my %hash ;
-#    unlink $Dfile;
-#    my $dbh = new DB_File::BTREEINFO ;
-#    $dbh->{compare} = sub { $hash{3} = 4 ; length $_[0] } ;
-# 
-# 
-#    my (%h);
-#    ok(164, tie(%hash, 'DB_File',$Dfile, O_RDWR|O_CREAT, 0640, $dbh ) );
-#
-#    eval {	$hash{1} = 2;
-#    		$hash{4} = 5;
-#	 };
-#
-#    ok(165, $@ =~ /^DB_File btree_compare: recursion detected/);
-#    {
-#        no warnings;
-#        untie %hash;
-#    }
-#    unlink $Dfile;
-#}
-ok(164,1);
-ok(165,1);
-
-{
-    # Check that two callbacks don't interact
-    my %hash1 ;
-    my %hash2 ;
-    my $h1_count = 0;
-    my $h2_count = 0;
-    unlink $Dfile, $Dfile2;
-    my $dbh1 = new DB_File::BTREEINFO ;
-    $dbh1->{compare} = sub { ++ $h1_count ; $_[0] cmp $_[1] } ; 
-
-    my $dbh2 = new DB_File::BTREEINFO ;
-    $dbh2->{compare} = sub { ;++ $h2_count ; $_[0] cmp $_[1] } ; 
- 
- 
- 
-    my (%h);
-    ok(166, tie(%hash1, 'DB_File',$Dfile, O_RDWR|O_CREAT, 0640, $dbh1 ) );
-    ok(167, tie(%hash2, 'DB_File',$Dfile2, O_RDWR|O_CREAT, 0640, $dbh2 ) );
-
-    $hash1{DEFG} = 5;
-    $hash1{XYZ} = 2;
-    $hash1{ABCDE} = 5;
-
-    $hash2{defg} = 5;
-    $hash2{xyz} = 2;
-    $hash2{abcde} = 5;
-
-    ok(168, $h1_count > 0);
-    ok(169, $h1_count == $h2_count);
-
-    ok(170, safeUntie \%hash1);
-    ok(171, safeUntie \%hash2);
-    unlink $Dfile, $Dfile2;
-}
-
-{
-   # Check that DBM Filter can cope with read-only $_
-
-   use warnings ;
-   use strict ;
-   my (%h, $db) ;
-   unlink $Dfile;
-
-   ok(172, $db = tie(%h, 'DB_File', $Dfile, O_RDWR|O_CREAT, 0640, $DB_BTREE ) );
-
-   $db->filter_fetch_key   (sub { }) ;
-   $db->filter_store_key   (sub { }) ;
-   $db->filter_fetch_value (sub { }) ;
-   $db->filter_store_value (sub { }) ;
-
-   $_ = "original" ;
-
-   $h{"fred"} = "joe" ;
-   ok(173, $h{"fred"} eq "joe");
-
-   eval { grep { $h{$_} } (1, 2, 3) };
-   ok (174, ! $@);
-
-
-   # delete the filters
-   $db->filter_fetch_key   (undef);
-   $db->filter_store_key   (undef);
-   $db->filter_fetch_value (undef);
-   $db->filter_store_value (undef);
-
-   $h{"fred"} = "joe" ;
-
-   ok(175, $h{"fred"} eq "joe");
-
-   ok(176, $db->FIRSTKEY() eq "fred") ;
-   
-   eval { grep { $h{$_} } (1, 2, 3) };
-   ok (177, ! $@);
-
-   undef $db ;
-   untie %h;
-   unlink $Dfile;
-}
-
-{
-   # Check low-level API works with filter
-
-   use warnings ;
-   use strict ;
-   my (%h, $db) ;
-   my $Dfile = "xxy.db";
-   unlink $Dfile;
-
-   ok(178, $db = tie(%h, 'DB_File', $Dfile, O_RDWR|O_CREAT, 0640, $DB_BTREE ) );
-
-
-   $db->filter_fetch_key   (sub { $_ = unpack("i", $_) } );
-   $db->filter_store_key   (sub { $_ = pack("i", $_) } );
-   $db->filter_fetch_value (sub { $_ = unpack("i", $_) } );
-   $db->filter_store_value (sub { $_ = pack("i", $_) } );
-
-   $_ = 'fred';
-
-   my $key = 22 ;
-   my $value = 34 ;
-
-   $db->put($key, $value) ;
-   ok 179, $key == 22;
-   ok 180, $value == 34 ;
-   ok 181, $_ eq 'fred';
-   #print "k [$key][$value]\n" ;
-
-   my $val ;
-   $db->get($key, $val) ;
-   ok 182, $key == 22;
-   ok 183, $val == 34 ;
-   ok 184, $_ eq 'fred';
-
-   $key = 51 ;
-   $value = 454;
-   $h{$key} = $value ;
-   ok 185, $key == 51;
-   ok 186, $value == 454 ;
-   ok 187, $_ eq 'fred';
-
-   undef $db ;
-   untie %h;
-   unlink $Dfile;
-}
-
-
-
-{
-    # Regression Test for bug 30237
-    # Check that substr can be used in the key to db_put
-    # and that db_put does not trigger the warning
-    # 
-    #     Use of uninitialized value in subroutine entry
-
-
-    use warnings ;
-    use strict ;
-    my (%h, $db) ;
-    my $Dfile = "xxy.db";
-    unlink $Dfile;
-
-    ok(188, $db = tie(%h, 'DB_File', $Dfile, O_RDWR|O_CREAT, 0640, $DB_BTREE ));
-
-    my $warned = '';
-    local $SIG{__WARN__} = sub {$warned = $_[0]} ;
-
-    # db-put with substr of key
-    my %remember = () ;
-    for my $ix ( 10 .. 12 )
-    {
-        my $key = $ix . "data" ;
-        my $value = "value$ix" ;
-        $remember{$key} = $value ;
-        $db->put(substr($key,0), $value) ;
-    }
-
-    ok 189, $warned eq '' 
-      or print "# Caught warning [$warned]\n" ;
-
-    # db-put with substr of value
-    $warned = '';
-    for my $ix ( 20 .. 22 )
-    {
-        my $key = $ix . "data" ;
-        my $value = "value$ix" ;
-        $remember{$key} = $value ;
-        $db->put($key, substr($value,0)) ;
-    }
-
-    ok 190, $warned eq '' 
-      or print "# Caught warning [$warned]\n" ;
-
-    # via the tied hash is not a problem, but check anyway
-    # substr of key
-    $warned = '';
-    for my $ix ( 30 .. 32 )
-    {
-        my $key = $ix . "data" ;
-        my $value = "value$ix" ;
-        $remember{$key} = $value ;
-        $h{substr($key,0)} = $value ;
-    }
-
-    ok 191, $warned eq '' 
-      or print "# Caught warning [$warned]\n" ;
-
-    # via the tied hash is not a problem, but check anyway
-    # substr of value
-    $warned = '';
-    for my $ix ( 40 .. 42 )
-    {
-        my $key = $ix . "data" ;
-        my $value = "value$ix" ;
-        $remember{$key} = $value ;
-        $h{$key} = substr($value,0) ;
-    }
-
-    ok 192, $warned eq '' 
-      or print "# Caught warning [$warned]\n" ;
-
-    my %bad = () ;
-    $key = '';
-    for ($status = $db->seq($key, $value, R_FIRST ) ;
-         $status == 0 ;
-         $status = $db->seq($key, $value, R_NEXT ) ) {
-
-        #print "# key [$key] value [$value]\n" ;
-        if (defined $remember{$key} && defined $value && 
-             $remember{$key} eq $value) {
-            delete $remember{$key} ;
-        }
-        else {
-            $bad{$key} = $value ;
-        }
-    }
-    
-    ok 193, keys %bad == 0 ;
-    ok 194, keys %remember == 0 ;
-
-    print "# missing -- $key $value\n" while ($key, $value) = each %remember;
-    print "# bad     -- $key $value\n" while ($key, $value) = each %bad;
-
-    # Make sure this fix does not break code to handle an undef key
-    # Berkeley DB undef key is bron between versions 2.3.16 and 
-    my $value = 'fred';
-    $warned = '';
-    $db->put(undef, $value) ;
-    ok 195, $warned eq '' 
-      or print "# Caught warning [$warned]\n" ;
-    $warned = '';
-
-    my $no_NULL = ($DB_File::db_ver >= 2.003016 && $DB_File::db_ver < 3.001) ;
-    print "# db_ver $DB_File::db_ver\n";
-    $value = '' ;
-    $db->get(undef, $value) ;
-    ok 196, $no_NULL || $value eq 'fred' or print "# got [$value]\n" ;
-    ok 197, $warned eq '' 
-      or print "# Caught warning [$warned]\n" ;
-    $warned = '';
-
-    undef $db ;
-    untie %h;
-    unlink $Dfile;
-}
-exit ;
diff --git a/storage/bdb/perl/DB_File/t/db-hash.t b/storage/bdb/perl/DB_File/t/db-hash.t
deleted file mode 100644
index 018952f9d49..00000000000
--- a/storage/bdb/perl/DB_File/t/db-hash.t
+++ /dev/null
@@ -1,1231 +0,0 @@
-#!./perl 
-
-BEGIN {
-    unless(grep /blib/, @INC) {
-        chdir 't' if -d 't';
-        @INC = '../lib' if -d '../lib';
-    }
-}
- 
-use warnings;
-use strict;
-use Config;
- 
-BEGIN {
-    if(-d "lib" && -f "TEST") {
-        if ($Config{'extensions'} !~ /\bDB_File\b/ ) {
-            print "1..0 # Skip: DB_File was not built\n";
-            exit 0;
-        }
-    }
-}
-
-use DB_File; 
-use Fcntl;
-
-print "1..166\n";
-
-unlink glob "__db.*";
-
-sub ok
-{
-    my $no = shift ;
-    my $result = shift ;
- 
-    print "not " unless $result ;
-    print "ok $no\n" ;
-
-    return $result ;
-}
-
-{
-    package Redirect ;
-    use Symbol ;
-
-    sub new
-    {
-        my $class = shift ;
-        my $filename = shift ;
-	my $fh = gensym ;
-	open ($fh, ">$filename") || die "Cannot open $filename: $!" ;
-	my $real_stdout = select($fh) ;
-	return bless [$fh, $real_stdout ] ;
-
-    }
-    sub DESTROY
-    {
-        my $self = shift ;
-	close $self->[0] ;
-	select($self->[1]) ;
-    }
-}
-
-sub docat_del
-{ 
-    my $file = shift;
-    local $/ = undef;
-    open(CAT,$file) || die "Cannot open $file: $!";
-    my $result = ;
-    close(CAT);
-    $result = normalise($result) ;
-    unlink $file ;
-    return $result;
-}   
-
-sub normalise
-{
-    my $data = shift ;
-    $data =~ s#\r\n#\n#g 
-        if $^O eq 'cygwin' ;
-    return $data ;
-}
-
-sub safeUntie
-{
-    my $hashref = shift ;
-    my $no_inner = 1;
-    local $SIG{__WARN__} = sub {-- $no_inner } ;
-    untie %$hashref;
-    return $no_inner;
-}
-
-
-my $Dfile = "dbhash.tmp";
-my $Dfile2 = "dbhash2.tmp";
-my $null_keys_allowed = ($DB_File::db_ver < 2.004010 
-				|| $DB_File::db_ver >= 3.1 );
-
-unlink $Dfile;
-
-umask(0);
-
-# Check the interface to HASHINFO
-
-my $dbh = new DB_File::HASHINFO ;
-
-ok(1, ! defined $dbh->{bsize}) ;
-ok(2, ! defined $dbh->{ffactor}) ;
-ok(3, ! defined $dbh->{nelem}) ;
-ok(4, ! defined $dbh->{cachesize}) ;
-ok(5, ! defined $dbh->{hash}) ;
-ok(6, ! defined $dbh->{lorder}) ;
-
-$dbh->{bsize} = 3000 ;
-ok(7, $dbh->{bsize} == 3000 );
-
-$dbh->{ffactor} = 9000 ;
-ok(8, $dbh->{ffactor} == 9000 );
-
-$dbh->{nelem} = 400 ;
-ok(9, $dbh->{nelem} == 400 );
-
-$dbh->{cachesize} = 65 ;
-ok(10, $dbh->{cachesize} == 65 );
-
-my $some_sub = sub {} ;
-$dbh->{hash} = $some_sub;
-ok(11, $dbh->{hash} eq $some_sub );
-
-$dbh->{lorder} = 1234 ;
-ok(12, $dbh->{lorder} == 1234 );
-
-# Check that an invalid entry is caught both for store & fetch
-eval '$dbh->{fred} = 1234' ;
-ok(13, $@ =~ /^DB_File::HASHINFO::STORE - Unknown element 'fred' at/ );
-eval 'my $q = $dbh->{fred}' ;
-ok(14, $@ =~ /^DB_File::HASHINFO::FETCH - Unknown element 'fred' at/ );
-
-
-# Now check the interface to HASH
-my ($X, %h);
-ok(15, $X = tie(%h, 'DB_File',$Dfile, O_RDWR|O_CREAT, 0640, $DB_HASH ) );
-die "Could not tie: $!" unless $X;
-
-my ($dev,$ino,$mode,$nlink,$uid,$gid,$rdev,$size,$atime,$mtime,$ctime,
-   $blksize,$blocks) = stat($Dfile);
-
-my %noMode = map { $_, 1} qw( amigaos MSWin32 NetWare cygwin ) ;
-
-ok(16, ($mode & 0777) == (($^O eq 'os2' || $^O eq 'MacOS') ? 0666 : 0640) ||
-   $noMode{$^O} );
-
-my ($key, $value, $i);
-while (($key,$value) = each(%h)) {
-    $i++;
-}
-ok(17, !$i );
-
-$h{'goner1'} = 'snork';
-
-$h{'abc'} = 'ABC';
-ok(18, $h{'abc'} eq 'ABC' );
-ok(19, !defined $h{'jimmy'} );
-ok(20, !exists $h{'jimmy'} );
-ok(21, exists $h{'abc'} );
-
-$h{'def'} = 'DEF';
-$h{'jkl','mno'} = "JKL\034MNO";
-$h{'a',2,3,4,5} = join("\034",'A',2,3,4,5);
-$h{'a'} = 'A';
-
-#$h{'b'} = 'B';
-$X->STORE('b', 'B') ;
-
-$h{'c'} = 'C';
-
-#$h{'d'} = 'D';
-$X->put('d', 'D') ;
-
-$h{'e'} = 'E';
-$h{'f'} = 'F';
-$h{'g'} = 'X';
-$h{'h'} = 'H';
-$h{'i'} = 'I';
-
-$h{'goner2'} = 'snork';
-delete $h{'goner2'};
-
-
-# IMPORTANT - $X must be undefined before the untie otherwise the
-#             underlying DB close routine will not get called.
-undef $X ;
-untie(%h);
-
-
-# tie to the same file again, do not supply a type - should default to HASH
-ok(22, $X = tie(%h,'DB_File',$Dfile, O_RDWR, 0640) );
-
-# Modify an entry from the previous tie
-$h{'g'} = 'G';
-
-$h{'j'} = 'J';
-$h{'k'} = 'K';
-$h{'l'} = 'L';
-$h{'m'} = 'M';
-$h{'n'} = 'N';
-$h{'o'} = 'O';
-$h{'p'} = 'P';
-$h{'q'} = 'Q';
-$h{'r'} = 'R';
-$h{'s'} = 'S';
-$h{'t'} = 'T';
-$h{'u'} = 'U';
-$h{'v'} = 'V';
-$h{'w'} = 'W';
-$h{'x'} = 'X';
-$h{'y'} = 'Y';
-$h{'z'} = 'Z';
-
-$h{'goner3'} = 'snork';
-
-delete $h{'goner1'};
-$X->DELETE('goner3');
-
-my @keys = keys(%h);
-my @values = values(%h);
-
-ok(23, $#keys == 29 && $#values == 29) ;
-
-$i = 0 ;
-while (($key,$value) = each(%h)) {
-    if ($key eq $keys[$i] && $value eq $values[$i] && $key eq lc($value)) {
-	$key =~ y/a-z/A-Z/;
-	$i++ if $key eq $value;
-    }
-}
-
-ok(24, $i == 30) ;
-
-@keys = ('blurfl', keys(%h), 'dyick');
-ok(25, $#keys == 31) ;
-
-$h{'foo'} = '';
-ok(26, $h{'foo'} eq '' );
-
-# Berkeley DB from version 2.4.10 to 3.0 does not allow null keys.
-# This feature was reenabled in version 3.1 of Berkeley DB.
-my $result = 0 ;
-if ($null_keys_allowed) {
-    $h{''} = 'bar';
-    $result = ( $h{''} eq 'bar' );
-}
-else
-  { $result = 1 }
-ok(27, $result) ;
-
-# check cache overflow and numeric keys and contents
-my $ok = 1;
-for ($i = 1; $i < 200; $i++) { $h{$i + 0} = $i + 0; }
-for ($i = 1; $i < 200; $i++) { $ok = 0 unless $h{$i} == $i; }
-ok(28, $ok );
-
-($dev,$ino,$mode,$nlink,$uid,$gid,$rdev,$size,$atime,$mtime,$ctime,
-   $blksize,$blocks) = stat($Dfile);
-ok(29, $size > 0 );
-
-@h{0..200} = 200..400;
-my @foo = @h{0..200};
-ok(30, join(':',200..400) eq join(':',@foo) );
-
-
-# Now check all the non-tie specific stuff
-
-# Check NOOVERWRITE will make put fail when attempting to overwrite
-# an existing record.
- 
-my $status = $X->put( 'x', 'newvalue', R_NOOVERWRITE) ;
-ok(31, $status == 1 );
- 
-# check that the value of the key 'x' has not been changed by the 
-# previous test
-ok(32, $h{'x'} eq 'X' );
-
-# standard put
-$status = $X->put('key', 'value') ;
-ok(33, $status == 0 );
-
-#check that previous put can be retrieved
-$value = 0 ;
-$status = $X->get('key', $value) ;
-ok(34, $status == 0 );
-ok(35, $value eq 'value' );
-
-# Attempting to delete an existing key should work
-
-$status = $X->del('q') ;
-ok(36, $status == 0 );
-
-# Make sure that the key deleted, cannot be retrieved
-{
-    no warnings 'uninitialized' ;
-    ok(37, $h{'q'} eq undef );
-}
-
-# Attempting to delete a non-existant key should fail
-
-$status = $X->del('joe') ;
-ok(38, $status == 1 );
-
-# Check the get interface
-
-# First a non-existing key
-$status = $X->get('aaaa', $value) ;
-ok(39, $status == 1 );
-
-# Next an existing key
-$status = $X->get('a', $value) ;
-ok(40, $status == 0 );
-ok(41, $value eq 'A' );
-
-# seq
-# ###
-
-# ditto, but use put to replace the key/value pair.
-
-# use seq to walk backwards through a file - check that this reversed is
-
-# check seq FIRST/LAST
-
-# sync
-# ####
-
-$status = $X->sync ;
-ok(42, $status == 0 );
-
-
-# fd
-# ##
-
-$status = $X->fd ;
-ok(43, $status != 0 );
-
-undef $X ;
-untie %h ;
-
-unlink $Dfile;
-
-# clear
-# #####
-
-ok(44, tie(%h, 'DB_File', $Dfile, O_RDWR|O_CREAT, 0640, $DB_HASH ) );
-foreach (1 .. 10)
-  { $h{$_} = $_ * 100 }
-
-# check that there are 10 elements in the hash
-$i = 0 ;
-while (($key,$value) = each(%h)) {
-    $i++;
-}
-ok(45, $i == 10);
-
-# now clear the hash
-%h = () ;
-
-# check it is empty
-$i = 0 ;
-while (($key,$value) = each(%h)) {
-    $i++;
-}
-ok(46, $i == 0);
-
-untie %h ;
-unlink $Dfile ;
-
-
-# Now try an in memory file
-ok(47, $X = tie(%h, 'DB_File',undef, O_RDWR|O_CREAT, 0640, $DB_HASH ) );
-
-# fd with an in memory file should return fail
-$status = $X->fd ;
-ok(48, $status == -1 );
-
-undef $X ;
-untie %h ;
-
-{
-    # check ability to override the default hashing
-    my %x ;
-    my $filename = "xyz" ;
-    my $hi = new DB_File::HASHINFO ;
-    $::count = 0 ;
-    $hi->{hash} = sub { ++$::count ; length $_[0] } ;
-    ok(49, tie %x, 'DB_File', $filename, O_RDWR|O_CREAT, 0640, $hi ) ;
-    $h{"abc"} = 123 ;
-    ok(50, $h{"abc"} == 123) ;
-    untie %x ;
-    unlink $filename ;
-    ok(51, $::count >0) ;
-}
-
-{
-    # check that attempting to tie an array to a DB_HASH will fail
-
-    my $filename = "xyz" ;
-    my @x ;
-    eval { tie @x, 'DB_File', $filename, O_RDWR|O_CREAT, 0640, $DB_HASH ; } ;
-    ok(52, $@ =~ /^DB_File can only tie an associative array to a DB_HASH database/) ;
-    unlink $filename ;
-}
-
-{
-   # sub-class test
-
-   package Another ;
-
-   use warnings ;
-   use strict ;
-
-   open(FILE, ">SubDB.pm") or die "Cannot open SubDB.pm: $!\n" ;
-   print FILE <<'EOM' ;
-
-   package SubDB ;
-
-   use warnings ;
-   use strict ;
-   our (@ISA, @EXPORT);
-
-   require Exporter ;
-   use DB_File;
-   @ISA=qw(DB_File);
-   @EXPORT = @DB_File::EXPORT ;
-
-   sub STORE { 
-	my $self = shift ;
-        my $key = shift ;
-        my $value = shift ;
-        $self->SUPER::STORE($key, $value * 2) ;
-   }
-
-   sub FETCH { 
-	my $self = shift ;
-        my $key = shift ;
-        $self->SUPER::FETCH($key) - 1 ;
-   }
-
-   sub put { 
-	my $self = shift ;
-        my $key = shift ;
-        my $value = shift ;
-        $self->SUPER::put($key, $value * 3) ;
-   }
-
-   sub get { 
-	my $self = shift ;
-        $self->SUPER::get($_[0], $_[1]) ;
-	$_[1] -= 2 ;
-   }
-
-   sub A_new_method
-   {
-	my $self = shift ;
-        my $key = shift ;
-        my $value = $self->FETCH($key) ;
-	return "[[$value]]" ;
-   }
-
-   1 ;
-EOM
-
-    close FILE ;
-
-    BEGIN { push @INC, '.'; }             
-    eval 'use SubDB ; ';
-    main::ok(53, $@ eq "") ;
-    my %h ;
-    my $X ;
-    eval '
-	$X = tie(%h, "SubDB","dbhash.tmp", O_RDWR|O_CREAT, 0640, $DB_HASH );
-	' ;
-
-    main::ok(54, $@ eq "") ;
-
-    my $ret = eval '$h{"fred"} = 3 ; return $h{"fred"} ' ;
-    main::ok(55, $@ eq "") ;
-    main::ok(56, $ret == 5) ;
-
-    my $value = 0;
-    $ret = eval '$X->put("joe", 4) ; $X->get("joe", $value) ; return $value' ;
-    main::ok(57, $@ eq "") ;
-    main::ok(58, $ret == 10) ;
-
-    $ret = eval ' R_NEXT eq main::R_NEXT ' ;
-    main::ok(59, $@ eq "" ) ;
-    main::ok(60, $ret == 1) ;
-
-    $ret = eval '$X->A_new_method("joe") ' ;
-    main::ok(61, $@ eq "") ;
-    main::ok(62, $ret eq "[[11]]") ;
-
-    undef $X;
-    untie(%h);
-    unlink "SubDB.pm", "dbhash.tmp" ;
-
-}
-
-{
-   # DBM Filter tests
-   use warnings ;
-   use strict ;
-   my (%h, $db) ;
-   my ($fetch_key, $store_key, $fetch_value, $store_value) = ("") x 4 ;
-   unlink $Dfile;
-
-   sub checkOutput
-   {
-       no warnings 'uninitialized';
-       my($fk, $sk, $fv, $sv) = @_ ;
-
-       print "# Fetch Key   : expected '$fk' got '$fetch_key'\n" 
-           if $fetch_key ne $fk ;
-       print "# Fetch Value : expected '$fv' got '$fetch_value'\n" 
-           if $fetch_value ne $fv ;
-       print "# Store Key   : expected '$sk' got '$store_key'\n" 
-           if $store_key ne $sk ;
-       print "# Store Value : expected '$sv' got '$store_value'\n" 
-           if $store_value ne $sv ;
-       print "# \$_          : expected 'original' got '$_'\n" 
-           if $_ ne 'original' ;
-
-       return
-           $fetch_key   eq $fk && $store_key   eq $sk && 
-	   $fetch_value eq $fv && $store_value eq $sv &&
-	   $_ eq 'original' ;
-   }
-   
-   ok(63, $db = tie(%h, 'DB_File', $Dfile, O_RDWR|O_CREAT, 0640, $DB_HASH ) );
-
-   $db->filter_fetch_key   (sub { $fetch_key = $_ }) ;
-   $db->filter_store_key   (sub { $store_key = $_ }) ;
-   $db->filter_fetch_value (sub { $fetch_value = $_}) ;
-   $db->filter_store_value (sub { $store_value = $_ }) ;
-
-   $_ = "original" ;
-
-   $h{"fred"} = "joe" ;
-   #                   fk   sk     fv   sv
-   ok(64, checkOutput( "", "fred", "", "joe")) ;
-
-   ($fetch_key, $store_key, $fetch_value, $store_value) = ("") x 4 ;
-   ok(65, $h{"fred"} eq "joe");
-   #                   fk    sk     fv    sv
-   ok(66, checkOutput( "", "fred", "joe", "")) ;
-
-   ($fetch_key, $store_key, $fetch_value, $store_value) = ("") x 4 ;
-   my ($k, $v) ;
-   $k = 'fred';
-   ok(67, ! $db->seq($k, $v, R_FIRST) ) ;
-   ok(68, $k eq "fred") ;
-   ok(69, $v eq "joe") ;
-   #                    fk     sk  fv  sv
-   ok(70, checkOutput( "fred", "fred", "joe", "")) ;
-
-   # replace the filters, but remember the previous set
-   my ($old_fk) = $db->filter_fetch_key   
-   			(sub { $_ = uc $_ ; $fetch_key = $_ }) ;
-   my ($old_sk) = $db->filter_store_key   
-   			(sub { $_ = lc $_ ; $store_key = $_ }) ;
-   my ($old_fv) = $db->filter_fetch_value 
-   			(sub { $_ = "[$_]"; $fetch_value = $_ }) ;
-   my ($old_sv) = $db->filter_store_value 
-   			(sub { s/o/x/g; $store_value = $_ }) ;
-   
-   ($fetch_key, $store_key, $fetch_value, $store_value) = ("") x 4 ;
-   $h{"Fred"} = "Joe" ;
-   #                   fk   sk     fv    sv
-   ok(71, checkOutput( "", "fred", "", "Jxe")) ;
-
-   ($fetch_key, $store_key, $fetch_value, $store_value) = ("") x 4 ;
-   ok(72, $h{"Fred"} eq "[Jxe]");
-   #                   fk   sk     fv    sv
-   ok(73, checkOutput( "", "fred", "[Jxe]", "")) ;
-
-   ($fetch_key, $store_key, $fetch_value, $store_value) = ("") x 4 ;
-   $k = 'Fred'; $v ='';
-   ok(74, ! $db->seq($k, $v, R_FIRST) ) ;
-   ok(75, $k eq "Fred") ;
-    #print "k [$k]\n" ;
-   ok(76, $v eq "[Jxe]") ;
-   #                   fk   sk     fv    sv
-   ok(77, checkOutput( "FRED", "fred", "[Jxe]", "")) ;
-
-   # put the original filters back
-   $db->filter_fetch_key   ($old_fk);
-   $db->filter_store_key   ($old_sk);
-   $db->filter_fetch_value ($old_fv);
-   $db->filter_store_value ($old_sv);
-
-   ($fetch_key, $store_key, $fetch_value, $store_value) = ("") x 4 ;
-   $h{"fred"} = "joe" ;
-   ok(78, checkOutput( "", "fred", "", "joe")) ;
-
-   ($fetch_key, $store_key, $fetch_value, $store_value) = ("") x 4 ;
-   ok(79, $h{"fred"} eq "joe");
-   ok(80, checkOutput( "", "fred", "joe", "")) ;
-
-   ($fetch_key, $store_key, $fetch_value, $store_value) = ("") x 4 ;
-   #ok(77, $db->FIRSTKEY() eq "fred") ;
-   $k = 'fred';
-   ok(81, ! $db->seq($k, $v, R_FIRST) ) ;
-   ok(82, $k eq "fred") ;
-   ok(83, $v eq "joe") ;
-   #                   fk   sk     fv    sv
-   ok(84, checkOutput( "fred", "fred", "joe", "")) ;
-
-   # delete the filters
-   $db->filter_fetch_key   (undef);
-   $db->filter_store_key   (undef);
-   $db->filter_fetch_value (undef);
-   $db->filter_store_value (undef);
-
-   ($fetch_key, $store_key, $fetch_value, $store_value) = ("") x 4 ;
-   $h{"fred"} = "joe" ;
-   ok(85, checkOutput( "", "", "", "")) ;
-
-   ($fetch_key, $store_key, $fetch_value, $store_value) = ("") x 4 ;
-   ok(86, $h{"fred"} eq "joe");
-   ok(87, checkOutput( "", "", "", "")) ;
-
-   ($fetch_key, $store_key, $fetch_value, $store_value) = ("") x 4 ;
-   $k = 'fred';
-   ok(88, ! $db->seq($k, $v, R_FIRST) ) ;
-   ok(89, $k eq "fred") ;
-   ok(90, $v eq "joe") ;
-   ok(91, checkOutput( "", "", "", "")) ;
-
-   undef $db ;
-   untie %h;
-   unlink $Dfile;
-}
-
-{    
-    # DBM Filter with a closure
-
-    use warnings ;
-    use strict ;
-    my (%h, $db) ;
-
-    unlink $Dfile;
-    ok(92, $db = tie(%h, 'DB_File', $Dfile, O_RDWR|O_CREAT, 0640, $DB_HASH ) );
-
-    my %result = () ;
-
-    sub Closure
-    {
-        my ($name) = @_ ;
-	my $count = 0 ;
-	my @kept = () ;
-
-	return sub { ++$count ; 
-		     push @kept, $_ ; 
-		     $result{$name} = "$name - $count: [@kept]" ;
-		   }
-    }
-
-    $db->filter_store_key(Closure("store key")) ;
-    $db->filter_store_value(Closure("store value")) ;
-    $db->filter_fetch_key(Closure("fetch key")) ;
-    $db->filter_fetch_value(Closure("fetch value")) ;
-
-    $_ = "original" ;
-
-    $h{"fred"} = "joe" ;
-    ok(93, $result{"store key"} eq "store key - 1: [fred]");
-    ok(94, $result{"store value"} eq "store value - 1: [joe]");
-    ok(95, ! defined $result{"fetch key"} );
-    ok(96, ! defined $result{"fetch value"} );
-    ok(97, $_ eq "original") ;
-
-    ok(98, $db->FIRSTKEY() eq "fred") ;
-    ok(99, $result{"store key"} eq "store key - 1: [fred]");
-    ok(100, $result{"store value"} eq "store value - 1: [joe]");
-    ok(101, $result{"fetch key"} eq "fetch key - 1: [fred]");
-    ok(102, ! defined $result{"fetch value"} );
-    ok(103, $_ eq "original") ;
-
-    $h{"jim"}  = "john" ;
-    ok(104, $result{"store key"} eq "store key - 2: [fred jim]");
-    ok(105, $result{"store value"} eq "store value - 2: [joe john]");
-    ok(106, $result{"fetch key"} eq "fetch key - 1: [fred]");
-    ok(107, ! defined $result{"fetch value"} );
-    ok(108, $_ eq "original") ;
-
-    ok(109, $h{"fred"} eq "joe");
-    ok(110, $result{"store key"} eq "store key - 3: [fred jim fred]");
-    ok(111, $result{"store value"} eq "store value - 2: [joe john]");
-    ok(112, $result{"fetch key"} eq "fetch key - 1: [fred]");
-    ok(113, $result{"fetch value"} eq "fetch value - 1: [joe]");
-    ok(114, $_ eq "original") ;
-
-    undef $db ;
-    untie %h;
-    unlink $Dfile;
-}		
-
-{
-   # DBM Filter recursion detection
-   use warnings ;
-   use strict ;
-   my (%h, $db) ;
-   unlink $Dfile;
-
-   ok(115, $db = tie(%h, 'DB_File', $Dfile, O_RDWR|O_CREAT, 0640, $DB_HASH ) );
-
-   $db->filter_store_key (sub { $_ = $h{$_} }) ;
-
-   eval '$h{1} = 1234' ;
-   ok(116, $@ =~ /^recursion detected in filter_store_key at/ );
-   
-   undef $db ;
-   untie %h;
-   unlink $Dfile;
-}
-
-
-{
-   # Examples from the POD
-
-  my $file = "xyzt" ;
-  {
-    my $redirect = new Redirect $file ;
-
-    use warnings FATAL => qw(all);
-    use strict ;
-    use DB_File ;
-    our (%h, $k, $v);
-
-    unlink "fruit" ;
-    tie %h, "DB_File", "fruit", O_RDWR|O_CREAT, 0640, $DB_HASH 
-        or die "Cannot open file 'fruit': $!\n";
-
-    # Add a few key/value pairs to the file
-    $h{"apple"} = "red" ;
-    $h{"orange"} = "orange" ;
-    $h{"banana"} = "yellow" ;
-    $h{"tomato"} = "red" ;
-
-    # Check for existence of a key
-    print "Banana Exists\n\n" if $h{"banana"} ;
-
-    # Delete a key/value pair.
-    delete $h{"apple"} ;
-
-    # print the contents of the file
-    while (($k, $v) = each %h)
-      { print "$k -> $v\n" }
-
-    untie %h ;
-
-    unlink "fruit" ;
-  }  
-
-  ok(117, docat_del($file) eq <<'EOM') ;
-Banana Exists
-
-orange -> orange
-tomato -> red
-banana -> yellow
-EOM
-   
-}
-
-{
-    # Bug ID 20001013.009
-    #
-    # test that $hash{KEY} = undef doesn't produce the warning
-    #     Use of uninitialized value in null operation 
-    use warnings ;
-    use strict ;
-    use DB_File ;
-
-    unlink $Dfile;
-    my %h ;
-    my $a = "";
-    local $SIG{__WARN__} = sub {$a = $_[0]} ;
-    
-    tie %h, 'DB_File', $Dfile or die "Can't open file: $!\n" ;
-    $h{ABC} = undef;
-    ok(118, $a eq "") ;
-    untie %h ;
-    unlink $Dfile;
-}
-
-{
-    # test that %hash = () doesn't produce the warning
-    #     Argument "" isn't numeric in entersub
-    use warnings ;
-    use strict ;
-    use DB_File ;
-
-    unlink $Dfile;
-    my %h ;
-    my $a = "";
-    local $SIG{__WARN__} = sub {$a = $_[0]} ;
-    
-    tie %h, 'DB_File', $Dfile or die "Can't open file: $!\n" ;
-    %h = (); ;
-    ok(119, $a eq "") ;
-    untie %h ;
-    unlink $Dfile;
-}
-
-{
-    # When iterating over a tied hash using "each", the key passed to FETCH
-    # will be recycled and passed to NEXTKEY. If a Source Filter modifies the
-    # key in FETCH via a filter_fetch_key method we need to check that the
-    # modified key doesn't get passed to NEXTKEY.
-    # Also Test "keys" & "values" while we are at it.
-
-    use warnings ;
-    use strict ;
-    use DB_File ;
-
-    unlink $Dfile;
-    my $bad_key = 0 ;
-    my %h = () ;
-    my $db ;
-    ok(120, $db = tie(%h, 'DB_File', $Dfile, O_RDWR|O_CREAT, 0640, $DB_HASH ) );
-    $db->filter_fetch_key (sub { $_ =~ s/^Beta_/Alpha_/ if defined $_}) ;
-    $db->filter_store_key (sub { $bad_key = 1 if /^Beta_/ ; $_ =~ s/^Alpha_/Beta_/}) ;
-
-    $h{'Alpha_ABC'} = 2 ;
-    $h{'Alpha_DEF'} = 5 ;
-
-    ok(121, $h{'Alpha_ABC'} == 2);
-    ok(122, $h{'Alpha_DEF'} == 5);
-
-    my ($k, $v) = ("","");
-    while (($k, $v) = each %h) {}
-    ok(123, $bad_key == 0);
-
-    $bad_key = 0 ;
-    foreach $k (keys %h) {}
-    ok(124, $bad_key == 0);
-
-    $bad_key = 0 ;
-    foreach $v (values %h) {}
-    ok(125, $bad_key == 0);
-
-    undef $db ;
-    untie %h ;
-    unlink $Dfile;
-}
-
-{
-    # now an error to pass 'hash' a non-code reference
-    my $dbh = new DB_File::HASHINFO ;
-
-    eval { $dbh->{hash} = 2 };
-    ok(126, $@ =~ /^Key 'hash' not associated with a code reference at/);
-
-}
-
-
-#{
-#    # recursion detection in hash
-#    my %hash ;
-#    my $Dfile = "xxx.db";
-#    unlink $Dfile;
-#    my $dbh = new DB_File::HASHINFO ;
-#    $dbh->{hash} = sub { $hash{3} = 4 ; length $_[0] } ;
-# 
-# 
-#    ok(127, tie(%hash, 'DB_File',$Dfile, O_RDWR|O_CREAT, 0640, $dbh ) );
-#
-#    eval {	$hash{1} = 2;
-#    		$hash{4} = 5;
-#	 };
-#
-#    ok(128, $@ =~ /^DB_File hash callback: recursion detected/);
-#    {
-#        no warnings;
-#        untie %hash;
-#    }
-#    unlink $Dfile;
-#}
-
-#ok(127, 1);
-#ok(128, 1);
-
-{
-    # Check that two hash's don't interact
-    my %hash1 ;
-    my %hash2 ;
-    my $h1_count = 0;
-    my $h2_count = 0;
-    unlink $Dfile, $Dfile2;
-    my $dbh1 = new DB_File::HASHINFO ;
-    $dbh1->{hash} = sub { ++ $h1_count ; length $_[0] } ;
-
-    my $dbh2 = new DB_File::HASHINFO ;
-    $dbh2->{hash} = sub { ++ $h2_count ; length $_[0] } ;
- 
- 
- 
-    my (%h);
-    ok(127, tie(%hash1, 'DB_File',$Dfile, O_RDWR|O_CREAT, 0640, $dbh1 ) );
-    ok(128, tie(%hash2, 'DB_File',$Dfile2, O_RDWR|O_CREAT, 0640, $dbh2 ) );
-
-    $hash1{DEFG} = 5;
-    $hash1{XYZ} = 2;
-    $hash1{ABCDE} = 5;
-
-    $hash2{defg} = 5;
-    $hash2{xyz} = 2;
-    $hash2{abcde} = 5;
-
-    ok(129, $h1_count > 0);
-    ok(130, $h1_count == $h2_count);
-
-    ok(131, safeUntie \%hash1);
-    ok(132, safeUntie \%hash2);
-    unlink $Dfile, $Dfile2;
-}
-
-{
-    # Passing undef for flags and/or mode when calling tie could cause 
-    #     Use of uninitialized value in subroutine entry
-    
-
-    my $warn_count = 0 ;
-    #local $SIG{__WARN__} = sub { ++ $warn_count };
-    my %hash1;
-    unlink $Dfile;
-
-    tie %hash1, 'DB_File',$Dfile, undef;
-    ok(133, $warn_count == 0);
-    $warn_count = 0;
-    untie %hash1;
-    unlink $Dfile;
-    tie %hash1, 'DB_File',$Dfile, O_RDWR|O_CREAT, undef;
-    ok(134, $warn_count == 0);
-    untie %hash1;
-    unlink $Dfile;
-    tie %hash1, 'DB_File',$Dfile, undef, undef;
-    ok(135, $warn_count == 0);
-    $warn_count = 0;
-
-    untie %hash1;
-    unlink $Dfile;
-}
-
-{
-   # Check that DBM Filter can cope with read-only $_
-
-   use warnings ;
-   use strict ;
-   my (%h, $db) ;
-   my $Dfile = "xxy.db";
-   unlink $Dfile;
-
-   ok(136, $db = tie(%h, 'DB_File', $Dfile, O_RDWR|O_CREAT, 0640, $DB_HASH ) );
-
-   $db->filter_fetch_key   (sub { }) ;
-   $db->filter_store_key   (sub { }) ;
-   $db->filter_fetch_value (sub { }) ;
-   $db->filter_store_value (sub { }) ;
-
-   $_ = "original" ;
-
-   $h{"fred"} = "joe" ;
-   ok(137, $h{"fred"} eq "joe");
-
-   eval { grep { $h{$_} } (1, 2, 3) };
-   ok (138, ! $@);
-
-
-   # delete the filters
-   $db->filter_fetch_key   (undef);
-   $db->filter_store_key   (undef);
-   $db->filter_fetch_value (undef);
-   $db->filter_store_value (undef);
-
-   $h{"fred"} = "joe" ;
-
-   ok(139, $h{"fred"} eq "joe");
-
-   ok(140, $db->FIRSTKEY() eq "fred") ;
-   
-   eval { grep { $h{$_} } (1, 2, 3) };
-   ok (141, ! $@);
-
-   undef $db ;
-   untie %h;
-   unlink $Dfile;
-}
-
-{
-   # Check low-level API works with filter
-
-   use warnings ;
-   use strict ;
-   my (%h, $db) ;
-   my $Dfile = "xxy.db";
-   unlink $Dfile;
-
-   ok(142, $db = tie(%h, 'DB_File', $Dfile, O_RDWR|O_CREAT, 0640, $DB_HASH ) );
-
-
-   $db->filter_fetch_key   (sub { $_ = unpack("i", $_) } );
-   $db->filter_store_key   (sub { $_ = pack("i", $_) } );
-   $db->filter_fetch_value (sub { $_ = unpack("i", $_) } );
-   $db->filter_store_value (sub { $_ = pack("i", $_) } );
-
-   $_ = 'fred';
-
-   my $key = 22 ;
-   my $value = 34 ;
-
-   $db->put($key, $value) ;
-   ok 143, $key == 22;
-   ok 144, $value == 34 ;
-   ok 145, $_ eq 'fred';
-   #print "k [$key][$value]\n" ;
-
-   my $val ;
-   $db->get($key, $val) ;
-   ok 146, $key == 22;
-   ok 147, $val == 34 ;
-   ok 148, $_ eq 'fred';
-
-   $key = 51 ;
-   $value = 454;
-   $h{$key} = $value ;
-   ok 149, $key == 51;
-   ok 150, $value == 454 ;
-   ok 151, $_ eq 'fred';
-
-   undef $db ;
-   untie %h;
-   unlink $Dfile;
-}
-
-
-{
-    # Regression Test for bug 30237
-    # Check that substr can be used in the key to db_put
-    # and that db_put does not trigger the warning
-    # 
-    #     Use of uninitialized value in subroutine entry
-
-
-    use warnings ;
-    use strict ;
-    my (%h, $db) ;
-    my $Dfile = "xxy.db";
-    unlink $Dfile;
-
-    ok(152, $db = tie(%h, 'DB_File', $Dfile, O_RDWR|O_CREAT, 0640, $DB_HASH ) );
-
-    my $warned = '';
-    local $SIG{__WARN__} = sub {$warned = $_[0]} ;
-
-    # db-put with substr of key
-    my %remember = () ;
-    for my $ix ( 1 .. 2 )
-    {
-        my $key = $ix . "data" ;
-        my $value = "value$ix" ;
-        $remember{$key} = $value ;
-        $db->put(substr($key,0), $value) ;
-    }
-
-    ok 153, $warned eq '' 
-      or print "# Caught warning [$warned]\n" ;
-
-    # db-put with substr of value
-    $warned = '';
-    for my $ix ( 10 .. 12 )
-    {
-        my $key = $ix . "data" ;
-        my $value = "value$ix" ;
-        $remember{$key} = $value ;
-        $db->put($key, substr($value,0)) ;
-    }
-
-    ok 154, $warned eq '' 
-      or print "# Caught warning [$warned]\n" ;
-
-    # via the tied hash is not a problem, but check anyway
-    # substr of key
-    $warned = '';
-    for my $ix ( 30 .. 32 )
-    {
-        my $key = $ix . "data" ;
-        my $value = "value$ix" ;
-        $remember{$key} = $value ;
-        $h{substr($key,0)} = $value ;
-    }
-
-    ok 155, $warned eq '' 
-      or print "# Caught warning [$warned]\n" ;
-
-    # via the tied hash is not a problem, but check anyway
-    # substr of value
-    $warned = '';
-    for my $ix ( 40 .. 42 )
-    {
-        my $key = $ix . "data" ;
-        my $value = "value$ix" ;
-        $remember{$key} = $value ;
-        $h{$key} = substr($value,0) ;
-    }
-
-    ok 156, $warned eq '' 
-      or print "# Caught warning [$warned]\n" ;
-
-    my %bad = () ;
-    $key = '';
-    for ($status = $db->seq(substr($key,0), substr($value,0), R_FIRST ) ;
-         $status == 0 ;
-         $status = $db->seq(substr($key,0), substr($value,0), R_NEXT ) ) {
-
-        #print "# key [$key] value [$value]\n" ;
-        if (defined $remember{$key} && defined $value && 
-             $remember{$key} eq $value) {
-            delete $remember{$key} ;
-        }
-        else {
-            $bad{$key} = $value ;
-        }
-    }
-    
-    ok 157, keys %bad == 0 ;
-    ok 158, keys %remember == 0 ;
-
-    print "# missing -- $key=>$value\n" while ($key, $value) = each %remember;
-    print "# bad     -- $key=>$value\n" while ($key, $value) = each %bad;
-
-    # Make sure this fix does not break code to handle an undef key
-    # Berkeley DB undef key is broken between versions 2.3.16 and 3.1
-    my $value = 'fred';
-    $warned = '';
-    $db->put(undef, $value) ;
-    ok 159, $warned eq '' 
-      or print "# Caught warning [$warned]\n" ;
-    $warned = '';
-
-    my $no_NULL = ($DB_File::db_ver >= 2.003016 && $DB_File::db_ver < 3.001) ;
-    print "# db_ver $DB_File::db_ver\n";
-    $value = '' ;
-    $db->get(undef, $value) ;
-    ok 160, $no_NULL || $value eq 'fred' or print "# got [$value]\n" ;
-    ok 161, $warned eq '' 
-      or print "# Caught warning [$warned]\n" ;
-    $warned = '';
-
-    undef $db ;
-    untie %h;
-    unlink $Dfile;
-}
-
-{
-   # Check filter + substr
-
-   use warnings ;
-   use strict ;
-   my (%h, $db) ;
-   my $Dfile = "xxy.db";
-   unlink $Dfile;
-
-   ok(162, $db = tie(%h, 'DB_File', $Dfile, O_RDWR|O_CREAT, 0640, $DB_HASH ) );
-
-
-   {
-       $db->filter_fetch_key   (sub { lc $_ } );
-       $db->filter_store_key   (sub { uc $_ } );
-       $db->filter_fetch_value (sub { lc $_ } );
-       $db->filter_store_value (sub { uc $_ } );
-   }
-
-   $_ = 'fred';
-
-    # db-put with substr of key
-    my %remember = () ;
-    my $status = 0 ;
-    for my $ix ( 1 .. 2 )
-    {
-        my $key = $ix . "data" ;
-        my $value = "value$ix" ;
-        $remember{$key} = $value ;
-        $status += $db->put(substr($key,0), substr($value,0)) ;
-    }
-
-    ok 163, $status == 0 or print "# Status $status\n" ;
-
-    if (1)
-    {
-       $db->filter_fetch_key   (undef);
-       $db->filter_store_key   (undef);
-       $db->filter_fetch_value (undef);
-       $db->filter_store_value (undef);
-    }
-
-    my %bad = () ;
-    my $key = '';
-    my $value = '';
-    for ($status = $db->seq($key, $value, R_FIRST ) ;
-         $status == 0 ;
-         $status = $db->seq($key, $value, R_NEXT ) ) {
-
-        #print "# key [$key] value [$value]\n" ;
-        if (defined $remember{$key} && defined $value && 
-             $remember{$key} eq $value) {
-            delete $remember{$key} ;
-        }
-        else {
-            $bad{$key} = $value ;
-        }
-    }
-    
-    ok 164, $_ eq 'fred';
-    ok 165, keys %bad == 0 ;
-    ok 166, keys %remember == 0 ;
-
-    print "# missing -- $key $value\n" while ($key, $value) = each %remember;
-    print "# bad     -- $key $value\n" while ($key, $value) = each %bad;
-   undef $db ;
-   untie %h;
-   unlink $Dfile;
-}
-
-exit ;
diff --git a/storage/bdb/perl/DB_File/t/db-recno.t b/storage/bdb/perl/DB_File/t/db-recno.t
deleted file mode 100644
index 23bf0cdec5e..00000000000
--- a/storage/bdb/perl/DB_File/t/db-recno.t
+++ /dev/null
@@ -1,1601 +0,0 @@
-#!./perl -w
-
-BEGIN {
-    unless(grep /blib/, @INC) {
-        chdir 't' if -d 't';
-        @INC = '../lib' if -d '../lib';
-    }
-}
- 
-use warnings;
-use strict;
-use Config;
- 
-BEGIN {
-    if(-d "lib" && -f "TEST") {
-        if ($Config{'extensions'} !~ /\bDB_File\b/ ) {
-            print "1..0 # Skip: DB_File was not built\n";
-            exit 0;
-        }
-    }
-}
-
-use DB_File; 
-use Fcntl;
-our ($dbh, $Dfile, $bad_ones, $FA);
-
-# full tied array support started in Perl 5.004_57
-# Double check to see if it is available.
-
-{
-    sub try::TIEARRAY { bless [], "try" }
-    sub try::FETCHSIZE { $FA = 1 }
-    $FA = 0 ;
-    my @a ; 
-    tie @a, 'try' ;
-    my $a = @a ;
-}
-
-
-sub ok
-{
-    my $no = shift ;
-    my $result = shift ;
-
-    print "not " unless $result ;
-    print "ok $no\n" ;
-
-    return $result ;
-}
-
-{
-    package Redirect ;
-    use Symbol ;
-
-    sub new
-    {
-        my $class = shift ;
-        my $filename = shift ;
-	my $fh = gensym ;
-	open ($fh, ">$filename") || die "Cannot open $filename: $!" ;
-	my $real_stdout = select($fh) ;
-	return bless [$fh, $real_stdout ] ;
-
-    }
-    sub DESTROY
-    {
-        my $self = shift ;
-	close $self->[0] ;
-	select($self->[1]) ;
-    }
-}
-
-sub docat
-{
-    my $file = shift;
-    local $/ = undef;
-    open(CAT,$file) || die "Cannot open $file:$!";
-    my $result = ;
-    close(CAT);
-    normalise($result) ;
-    return $result;
-}
-
-sub docat_del
-{ 
-    my $file = shift;
-    my $result = docat($file);
-    unlink $file ;
-    return $result;
-}   
-
-sub safeUntie
-{
-    my $hashref = shift ;
-    my $no_inner = 1;
-    local $SIG{__WARN__} = sub {-- $no_inner } ;
-    untie @$hashref;
-    return $no_inner;
-}
-
-sub bad_one
-{
-    unless ($bad_ones++) {
-	print STDERR <{bval}) ;
-ok(2, ! defined $dbh->{cachesize}) ;
-ok(3, ! defined $dbh->{psize}) ;
-ok(4, ! defined $dbh->{flags}) ;
-ok(5, ! defined $dbh->{lorder}) ;
-ok(6, ! defined $dbh->{reclen}) ;
-ok(7, ! defined $dbh->{bfname}) ;
-
-$dbh->{bval} = 3000 ;
-ok(8, $dbh->{bval} == 3000 );
-
-$dbh->{cachesize} = 9000 ;
-ok(9, $dbh->{cachesize} == 9000 );
-
-$dbh->{psize} = 400 ;
-ok(10, $dbh->{psize} == 400 );
-
-$dbh->{flags} = 65 ;
-ok(11, $dbh->{flags} == 65 );
-
-$dbh->{lorder} = 123 ;
-ok(12, $dbh->{lorder} == 123 );
-
-$dbh->{reclen} = 1234 ;
-ok(13, $dbh->{reclen} == 1234 );
-
-$dbh->{bfname} = 1234 ;
-ok(14, $dbh->{bfname} == 1234 );
-
-
-# Check that an invalid entry is caught both for store & fetch
-eval '$dbh->{fred} = 1234' ;
-ok(15, $@ =~ /^DB_File::RECNOINFO::STORE - Unknown element 'fred' at/ );
-eval 'my $q = $dbh->{fred}' ;
-ok(16, $@ =~ /^DB_File::RECNOINFO::FETCH - Unknown element 'fred' at/ );
-
-# Now check the interface to RECNOINFO
-
-my $X  ;
-my @h ;
-ok(17, $X = tie @h, 'DB_File', $Dfile, O_RDWR|O_CREAT, 0640, $DB_RECNO ) ;
-
-my %noMode = map { $_, 1} qw( amigaos MSWin32 NetWare cygwin ) ;
-
-ok(18, ((stat($Dfile))[2] & 0777) == (($^O eq 'os2' || $^O eq 'MacOS') ? 0666 : 0640)
-	||  $noMode{$^O} );
-
-#my $l = @h ;
-my $l = $X->length ;
-ok(19, ($FA ? @h == 0 : !$l) );
-
-my @data = qw( a b c d ever f g h  i j k longername m n o p) ;
-
-$h[0] = shift @data ;
-ok(20, $h[0] eq 'a' );
-
-my $ i;
-foreach (@data)
-  { $h[++$i] = $_ }
-
-unshift (@data, 'a') ;
-
-ok(21, defined $h[1] );
-ok(22, ! defined $h[16] );
-ok(23, $FA ? @h == @data : $X->length == @data );
-
-
-# Overwrite an entry & check fetch it
-$h[3] = 'replaced' ;
-$data[3] = 'replaced' ;
-ok(24, $h[3] eq 'replaced' );
-
-#PUSH
-my @push_data = qw(added to the end) ;
-($FA ? push(@h, @push_data) : $X->push(@push_data)) ;
-push (@data, @push_data) ;
-ok(25, $h[++$i] eq 'added' );
-ok(26, $h[++$i] eq 'to' );
-ok(27, $h[++$i] eq 'the' );
-ok(28, $h[++$i] eq 'end' );
-
-# POP
-my $popped = pop (@data) ;
-my $value = ($FA ? pop @h : $X->pop) ;
-ok(29, $value eq $popped) ;
-
-# SHIFT
-$value = ($FA ? shift @h : $X->shift) ;
-my $shifted = shift @data ;
-ok(30, $value eq $shifted );
-
-# UNSHIFT
-
-# empty list
-($FA ? unshift @h,() : $X->unshift) ;
-ok(31, ($FA ? @h == @data : $X->length == @data ));
-
-my @new_data = qw(add this to the start of the array) ;
-$FA ? unshift (@h, @new_data) : $X->unshift (@new_data) ;
-unshift (@data, @new_data) ;
-ok(32, $FA ? @h == @data : $X->length == @data );
-ok(33, $h[0] eq "add") ;
-ok(34, $h[1] eq "this") ;
-ok(35, $h[2] eq "to") ;
-ok(36, $h[3] eq "the") ;
-ok(37, $h[4] eq "start") ;
-ok(38, $h[5] eq "of") ;
-ok(39, $h[6] eq "the") ;
-ok(40, $h[7] eq "array") ;
-ok(41, $h[8] eq $data[8]) ;
-
-# Brief test for SPLICE - more thorough 'soak test' is later.
-my @old;
-if ($FA) {
-    @old = splice(@h, 1, 2, qw(bananas just before));
-}
-else {
-    @old = $X->splice(1, 2, qw(bananas just before));
-}
-ok(42, $h[0] eq "add") ;
-ok(43, $h[1] eq "bananas") ;
-ok(44, $h[2] eq "just") ;
-ok(45, $h[3] eq "before") ;
-ok(46, $h[4] eq "the") ;
-ok(47, $h[5] eq "start") ;
-ok(48, $h[6] eq "of") ;
-ok(49, $h[7] eq "the") ;
-ok(50, $h[8] eq "array") ;
-ok(51, $h[9] eq $data[8]) ;
-$FA ? splice(@h, 1, 3, @old) : $X->splice(1, 3, @old);
-
-# Now both arrays should be identical
-
-my $ok = 1 ;
-my $j = 0 ;
-foreach (@data)
-{
-   $ok = 0, last if $_ ne $h[$j ++] ; 
-}
-ok(52, $ok );
-
-# Neagtive subscripts
-
-# get the last element of the array
-ok(53, $h[-1] eq $data[-1] );
-ok(54, $h[-1] eq $h[ ($FA ? @h : $X->length) -1] );
-
-# get the first element using a negative subscript
-eval '$h[ - ( $FA ? @h : $X->length)] = "abcd"' ;
-ok(55, $@ eq "" );
-ok(56, $h[0] eq "abcd" );
-
-# now try to read before the start of the array
-eval '$h[ - (1 + ($FA ? @h : $X->length))] = 1234' ;
-ok(57, $@ =~ '^Modification of non-creatable array value attempted' );
-
-# IMPORTANT - $X must be undefined before the untie otherwise the
-#             underlying DB close routine will not get called.
-undef $X ;
-ok(58, safeUntie \@h);
-
-unlink $Dfile;
-
-
-{
-    # Check bval defaults to \n
-
-    my @h = () ;
-    my $dbh = new DB_File::RECNOINFO ;
-    ok(59, tie @h, 'DB_File', $Dfile, O_RDWR|O_CREAT, 0640, $dbh ) ;
-    $h[0] = "abc" ;
-    $h[1] = "def" ;
-    $h[3] = "ghi" ;
-    ok(60, safeUntie \@h);
-    my $x = docat($Dfile) ;
-    unlink $Dfile;
-    ok(61, $x eq "abc\ndef\n\nghi\n") ;
-}
-
-{
-    # Change bval
-
-    my @h = () ;
-    my $dbh = new DB_File::RECNOINFO ;
-    $dbh->{bval} = "-" ;
-    ok(62, tie @h, 'DB_File', $Dfile, O_RDWR|O_CREAT, 0640, $dbh ) ;
-    $h[0] = "abc" ;
-    $h[1] = "def" ;
-    $h[3] = "ghi" ;
-    ok(63, safeUntie \@h);
-    my $x = docat($Dfile) ;
-    unlink $Dfile;
-    my $ok = ($x eq "abc-def--ghi-") ;
-    bad_one() unless $ok ;
-    ok(64, $ok) ;
-}
-
-{
-    # Check R_FIXEDLEN with default bval (space)
-
-    my @h = () ;
-    my $dbh = new DB_File::RECNOINFO ;
-    $dbh->{flags} = R_FIXEDLEN ;
-    $dbh->{reclen} = 5 ;
-    ok(65, tie @h, 'DB_File', $Dfile, O_RDWR|O_CREAT, 0640, $dbh ) ;
-    $h[0] = "abc" ;
-    $h[1] = "def" ;
-    $h[3] = "ghi" ;
-    ok(66, safeUntie \@h);
-    my $x = docat($Dfile) ;
-    unlink $Dfile;
-    my $ok = ($x eq "abc  def       ghi  ") ;
-    bad_one() unless $ok ;
-    ok(67, $ok) ;
-}
-
-{
-    # Check R_FIXEDLEN with user-defined bval
-
-    my @h = () ;
-    my $dbh = new DB_File::RECNOINFO ;
-    $dbh->{flags} = R_FIXEDLEN ;
-    $dbh->{bval} = "-" ;
-    $dbh->{reclen} = 5 ;
-    ok(68, tie @h, 'DB_File', $Dfile, O_RDWR|O_CREAT, 0640, $dbh ) ;
-    $h[0] = "abc" ;
-    $h[1] = "def" ;
-    $h[3] = "ghi" ;
-    ok(69, safeUntie \@h);
-    my $x = docat($Dfile) ;
-    unlink $Dfile;
-    my $ok = ($x eq "abc--def-------ghi--") ;
-    bad_one() unless $ok ;
-    ok(70, $ok) ;
-}
-
-{
-    # check that attempting to tie an associative array to a DB_RECNO will fail
-
-    my $filename = "xyz" ;
-    my %x ;
-    eval { tie %x, 'DB_File', $filename, O_RDWR|O_CREAT, 0640, $DB_RECNO ; } ;
-    ok(71, $@ =~ /^DB_File can only tie an array to a DB_RECNO database/) ;
-    unlink $filename ;
-}
-
-{
-   # sub-class test
-
-   package Another ;
-
-   use warnings ;
-   use strict ;
-
-   open(FILE, ">SubDB.pm") or die "Cannot open SubDB.pm: $!\n" ;
-   print FILE <<'EOM' ;
-
-   package SubDB ;
-
-   use warnings ;
-   use strict ;
-   our (@ISA, @EXPORT);
-
-   require Exporter ;
-   use DB_File;
-   @ISA=qw(DB_File);
-   @EXPORT = @DB_File::EXPORT ;
-
-   sub STORE { 
-	my $self = shift ;
-        my $key = shift ;
-        my $value = shift ;
-        $self->SUPER::STORE($key, $value * 2) ;
-   }
-
-   sub FETCH { 
-	my $self = shift ;
-        my $key = shift ;
-        $self->SUPER::FETCH($key) - 1 ;
-   }
-
-   sub put { 
-	my $self = shift ;
-        my $key = shift ;
-        my $value = shift ;
-        $self->SUPER::put($key, $value * 3) ;
-   }
-
-   sub get { 
-	my $self = shift ;
-        $self->SUPER::get($_[0], $_[1]) ;
-	$_[1] -= 2 ;
-   }
-
-   sub A_new_method
-   {
-	my $self = shift ;
-        my $key = shift ;
-        my $value = $self->FETCH($key) ;
-	return "[[$value]]" ;
-   }
-
-   1 ;
-EOM
-
-    close FILE  or die "Could not close: $!";
-
-    BEGIN { push @INC, '.'; } 
-    eval 'use SubDB ; ';
-    main::ok(72, $@ eq "") ;
-    my @h ;
-    my $X ;
-    eval '
-	$X = tie(@h, "SubDB","recno.tmp", O_RDWR|O_CREAT, 0640, $DB_RECNO );
-	' ;
-    die "Could not tie: $!" unless $X;
-
-    main::ok(73, $@ eq "") ;
-
-    my $ret = eval '$h[3] = 3 ; return $h[3] ' ;
-    main::ok(74, $@ eq "") ;
-    main::ok(75, $ret == 5) ;
-
-    my $value = 0;
-    $ret = eval '$X->put(1, 4) ; $X->get(1, $value) ; return $value' ;
-    main::ok(76, $@ eq "") ;
-    main::ok(77, $ret == 10) ;
-
-    $ret = eval ' R_NEXT eq main::R_NEXT ' ;
-    main::ok(78, $@ eq "" ) ;
-    main::ok(79, $ret == 1) ;
-
-    $ret = eval '$X->A_new_method(1) ' ;
-    main::ok(80, $@ eq "") ;
-    main::ok(81, $ret eq "[[11]]") ;
-
-    undef $X;
-    main::ok(82, main::safeUntie \@h);
-    unlink "SubDB.pm", "recno.tmp" ;
-
-}
-
-{
-
-    # test $#
-    my $self ;
-    unlink $Dfile;
-    ok(83, $self = tie @h, 'DB_File', $Dfile, O_RDWR|O_CREAT, 0640, $DB_RECNO ) ;
-    $h[0] = "abc" ;
-    $h[1] = "def" ;
-    $h[2] = "ghi" ;
-    $h[3] = "jkl" ;
-    ok(84, $FA ? $#h == 3 : $self->length() == 4) ;
-    undef $self ;
-    ok(85, safeUntie \@h);
-    my $x = docat($Dfile) ;
-    ok(86, $x eq "abc\ndef\nghi\njkl\n") ;
-
-    # $# sets array to same length
-    ok(87, $self = tie @h, 'DB_File', $Dfile, O_RDWR, 0640, $DB_RECNO ) ;
-    if ($FA)
-      { $#h = 3 }
-    else 
-      { $self->STORESIZE(4) }
-    ok(88, $FA ? $#h == 3 : $self->length() == 4) ;
-    undef $self ;
-    ok(89, safeUntie \@h);
-    $x = docat($Dfile) ;
-    ok(90, $x eq "abc\ndef\nghi\njkl\n") ;
-
-    # $# sets array to bigger
-    ok(91, $self = tie @h, 'DB_File', $Dfile, O_RDWR, 0640, $DB_RECNO ) ;
-    if ($FA)
-      { $#h = 6 }
-    else 
-      { $self->STORESIZE(7) }
-    ok(92, $FA ? $#h == 6 : $self->length() == 7) ;
-    undef $self ;
-    ok(93, safeUntie \@h);
-    $x = docat($Dfile) ;
-    ok(94, $x eq "abc\ndef\nghi\njkl\n\n\n\n") ;
-
-    # $# sets array smaller
-    ok(95, $self = tie @h, 'DB_File', $Dfile, O_RDWR, 0640, $DB_RECNO ) ;
-    if ($FA)
-      { $#h = 2 }
-    else 
-      { $self->STORESIZE(3) }
-    ok(96, $FA ? $#h == 2 : $self->length() == 3) ;
-    undef $self ;
-    ok(97, safeUntie \@h);
-    $x = docat($Dfile) ;
-    ok(98, $x eq "abc\ndef\nghi\n") ;
-
-    unlink $Dfile;
-
-
-}
-
-{
-   # DBM Filter tests
-   use warnings ;
-   use strict ;
-   my (@h, $db) ;
-   my ($fetch_key, $store_key, $fetch_value, $store_value) = ("") x 4 ;
-   unlink $Dfile;
-
-   sub checkOutput
-   {
-       my($fk, $sk, $fv, $sv) = @_ ;
-
-       print "# Fetch Key   : expected '$fk' got '$fetch_key'\n" 
-           if $fetch_key ne $fk ;
-       print "# Fetch Value : expected '$fv' got '$fetch_value'\n" 
-           if $fetch_value ne $fv ;
-       print "# Store Key   : expected '$sk' got '$store_key'\n" 
-           if $store_key ne $sk ;
-       print "# Store Value : expected '$sv' got '$store_value'\n" 
-           if $store_value ne $sv ;
-       print "# \$_          : expected 'original' got '$_'\n" 
-           if $_ ne 'original' ;
-
-       return
-           $fetch_key   eq $fk && $store_key   eq $sk && 
-	   $fetch_value eq $fv && $store_value eq $sv &&
-	   $_ eq 'original' ;
-   }
-   
-   ok(99, $db = tie(@h, 'DB_File', $Dfile, O_RDWR|O_CREAT, 0640, $DB_RECNO ) );
-
-   $db->filter_fetch_key   (sub { $fetch_key = $_ }) ;
-   $db->filter_store_key   (sub { $store_key = $_ }) ;
-   $db->filter_fetch_value (sub { $fetch_value = $_}) ;
-   $db->filter_store_value (sub { $store_value = $_ }) ;
-
-   $_ = "original" ;
-
-   $h[0] = "joe" ;
-   #                   fk   sk     fv   sv
-   ok(100, checkOutput( "", 0, "", "joe")) ;
-
-   ($fetch_key, $store_key, $fetch_value, $store_value) = ("") x 4 ;
-   ok(101, $h[0] eq "joe");
-   #                   fk  sk  fv    sv
-   ok(102, checkOutput( "", 0, "joe", "")) ;
-
-   ($fetch_key, $store_key, $fetch_value, $store_value) = ("") x 4 ;
-   ok(103, $db->FIRSTKEY() == 0) ;
-   #                    fk     sk  fv  sv
-   ok(104, checkOutput( 0, "", "", "")) ;
-
-   # replace the filters, but remember the previous set
-   my ($old_fk) = $db->filter_fetch_key   
-   			(sub { ++ $_ ; $fetch_key = $_ }) ;
-   my ($old_sk) = $db->filter_store_key   
-   			(sub { $_ *= 2 ; $store_key = $_ }) ;
-   my ($old_fv) = $db->filter_fetch_value 
-   			(sub { $_ = "[$_]"; $fetch_value = $_ }) ;
-   my ($old_sv) = $db->filter_store_value 
-   			(sub { s/o/x/g; $store_value = $_ }) ;
-   
-   ($fetch_key, $store_key, $fetch_value, $store_value) = ("") x 4 ;
-   $h[1] = "Joe" ;
-   #                   fk   sk     fv    sv
-   ok(105, checkOutput( "", 2, "", "Jxe")) ;
-
-   ($fetch_key, $store_key, $fetch_value, $store_value) = ("") x 4 ;
-   ok(106, $h[1] eq "[Jxe]");
-   #                   fk   sk     fv    sv
-   ok(107, checkOutput( "", 2, "[Jxe]", "")) ;
-
-   ($fetch_key, $store_key, $fetch_value, $store_value) = ("") x 4 ;
-   ok(108, $db->FIRSTKEY() == 1) ;
-   #                   fk   sk     fv    sv
-   ok(109, checkOutput( 1, "", "", "")) ;
-   
-   # put the original filters back
-   $db->filter_fetch_key   ($old_fk);
-   $db->filter_store_key   ($old_sk);
-   $db->filter_fetch_value ($old_fv);
-   $db->filter_store_value ($old_sv);
-
-   ($fetch_key, $store_key, $fetch_value, $store_value) = ("") x 4 ;
-   $h[0] = "joe" ;
-   ok(110, checkOutput( "", 0, "", "joe")) ;
-
-   ($fetch_key, $store_key, $fetch_value, $store_value) = ("") x 4 ;
-   ok(111, $h[0] eq "joe");
-   ok(112, checkOutput( "", 0, "joe", "")) ;
-
-   ($fetch_key, $store_key, $fetch_value, $store_value) = ("") x 4 ;
-   ok(113, $db->FIRSTKEY() == 0) ;
-   ok(114, checkOutput( 0, "", "", "")) ;
-
-   # delete the filters
-   $db->filter_fetch_key   (undef);
-   $db->filter_store_key   (undef);
-   $db->filter_fetch_value (undef);
-   $db->filter_store_value (undef);
-
-   ($fetch_key, $store_key, $fetch_value, $store_value) = ("") x 4 ;
-   $h[0] = "joe" ;
-   ok(115, checkOutput( "", "", "", "")) ;
-
-   ($fetch_key, $store_key, $fetch_value, $store_value) = ("") x 4 ;
-   ok(116, $h[0] eq "joe");
-   ok(117, checkOutput( "", "", "", "")) ;
-
-   ($fetch_key, $store_key, $fetch_value, $store_value) = ("") x 4 ;
-   ok(118, $db->FIRSTKEY() == 0) ;
-   ok(119, checkOutput( "", "", "", "")) ;
-
-   undef $db ;
-   ok(120, safeUntie \@h);
-   unlink $Dfile;
-}
-
-{    
-    # DBM Filter with a closure
-
-    use warnings ;
-    use strict ;
-    my (@h, $db) ;
-
-    unlink $Dfile;
-    ok(121, $db = tie(@h, 'DB_File', $Dfile, O_RDWR|O_CREAT, 0640, $DB_RECNO ) );
-
-    my %result = () ;
-
-    sub Closure
-    {
-        my ($name) = @_ ;
-	my $count = 0 ;
-	my @kept = () ;
-
-	return sub { ++$count ; 
-		     push @kept, $_ ; 
-		     $result{$name} = "$name - $count: [@kept]" ;
-		   }
-    }
-
-    $db->filter_store_key(Closure("store key")) ;
-    $db->filter_store_value(Closure("store value")) ;
-    $db->filter_fetch_key(Closure("fetch key")) ;
-    $db->filter_fetch_value(Closure("fetch value")) ;
-
-    $_ = "original" ;
-
-    $h[0] = "joe" ;
-    ok(122, $result{"store key"} eq "store key - 1: [0]");
-    ok(123, $result{"store value"} eq "store value - 1: [joe]");
-    ok(124, ! defined $result{"fetch key"} );
-    ok(125, ! defined $result{"fetch value"} );
-    ok(126, $_ eq "original") ;
-
-    ok(127, $db->FIRSTKEY() == 0 ) ;
-    ok(128, $result{"store key"} eq "store key - 1: [0]");
-    ok(129, $result{"store value"} eq "store value - 1: [joe]");
-    ok(130, $result{"fetch key"} eq "fetch key - 1: [0]");
-    ok(131, ! defined $result{"fetch value"} );
-    ok(132, $_ eq "original") ;
-
-    $h[7]  = "john" ;
-    ok(133, $result{"store key"} eq "store key - 2: [0 7]");
-    ok(134, $result{"store value"} eq "store value - 2: [joe john]");
-    ok(135, $result{"fetch key"} eq "fetch key - 1: [0]");
-    ok(136, ! defined $result{"fetch value"} );
-    ok(137, $_ eq "original") ;
-
-    ok(138, $h[0] eq "joe");
-    ok(139, $result{"store key"} eq "store key - 3: [0 7 0]");
-    ok(140, $result{"store value"} eq "store value - 2: [joe john]");
-    ok(141, $result{"fetch key"} eq "fetch key - 1: [0]");
-    ok(142, $result{"fetch value"} eq "fetch value - 1: [joe]");
-    ok(143, $_ eq "original") ;
-
-    undef $db ;
-    ok(144, safeUntie \@h);
-    unlink $Dfile;
-}		
-
-{
-   # DBM Filter recursion detection
-   use warnings ;
-   use strict ;
-   my (@h, $db) ;
-   unlink $Dfile;
-
-   ok(145, $db = tie(@h, 'DB_File', $Dfile, O_RDWR|O_CREAT, 0640, $DB_RECNO ) );
-
-   $db->filter_store_key (sub { $_ = $h[0] }) ;
-
-   eval '$h[1] = 1234' ;
-   ok(146, $@ =~ /^recursion detected in filter_store_key at/ );
-   
-   undef $db ;
-   ok(147, safeUntie \@h);
-   unlink $Dfile;
-}
-
-
-{
-   # Examples from the POD
-
-  my $file = "xyzt" ;
-  {
-    my $redirect = new Redirect $file ;
-
-    use warnings FATAL => qw(all);
-    use strict ;
-    use DB_File ;
-
-    my $filename = "text" ;
-    unlink $filename ;
-
-    my @h ;
-    my $x = tie @h, "DB_File", $filename, O_RDWR|O_CREAT, 0640, $DB_RECNO 
-        or die "Cannot open file 'text': $!\n" ;
-
-    # Add a few key/value pairs to the file
-    $h[0] = "orange" ;
-    $h[1] = "blue" ;
-    $h[2] = "yellow" ;
-
-    $FA ? push @h, "green", "black" 
-        : $x->push("green", "black") ;
-
-    my $elements = $FA ? scalar @h : $x->length ;
-    print "The array contains $elements entries\n" ;
-
-    my $last = $FA ? pop @h : $x->pop ;
-    print "popped $last\n" ;
-
-    $FA ? unshift @h, "white" 
-        : $x->unshift("white") ;
-    my $first = $FA ? shift @h : $x->shift ;
-    print "shifted $first\n" ;
-
-    # Check for existence of a key
-    print "Element 1 Exists with value $h[1]\n" if $h[1] ;
-
-    # use a negative index
-    print "The last element is $h[-1]\n" ;
-    print "The 2nd last element is $h[-2]\n" ;
-
-    undef $x ;
-    untie @h ;
-
-    unlink $filename ;
-  }  
-
-  ok(148, docat_del($file) eq <<'EOM') ;
-The array contains 5 entries
-popped black
-shifted white
-Element 1 Exists with value blue
-The last element is green
-The 2nd last element is yellow
-EOM
-
-  my $save_output = "xyzt" ;
-  {
-    my $redirect = new Redirect $save_output ;
-
-    use warnings FATAL => qw(all);
-    use strict ;
-    our (@h, $H, $file, $i);
-    use DB_File ;
-    use Fcntl ;
-    
-    $file = "text" ;
-
-    unlink $file ;
-
-    $H = tie @h, "DB_File", $file, O_RDWR|O_CREAT, 0640, $DB_RECNO 
-        or die "Cannot open file $file: $!\n" ;
-    
-    # first create a text file to play with
-    $h[0] = "zero" ;
-    $h[1] = "one" ;
-    $h[2] = "two" ;
-    $h[3] = "three" ;
-    $h[4] = "four" ;
-
-    
-    # Print the records in order.
-    #
-    # The length method is needed here because evaluating a tied
-    # array in a scalar context does not return the number of
-    # elements in the array.  
-
-    print "\nORIGINAL\n" ;
-    foreach $i (0 .. $H->length - 1) {
-        print "$i: $h[$i]\n" ;
-    }
-
-    # use the push & pop methods
-    $a = $H->pop ;
-    $H->push("last") ;
-    print "\nThe last record was [$a]\n" ;
-
-    # and the shift & unshift methods
-    $a = $H->shift ;
-    $H->unshift("first") ;
-    print "The first record was [$a]\n" ;
-
-    # Use the API to add a new record after record 2.
-    $i = 2 ;
-    $H->put($i, "Newbie", R_IAFTER) ;
-
-    # and a new record before record 1.
-    $i = 1 ;
-    $H->put($i, "New One", R_IBEFORE) ;
-
-    # delete record 3
-    $H->del(3) ;
-
-    # now print the records in reverse order
-    print "\nREVERSE\n" ;
-    for ($i = $H->length - 1 ; $i >= 0 ; -- $i)
-      { print "$i: $h[$i]\n" }
-
-    # same again, but use the API functions instead
-    print "\nREVERSE again\n" ;
-    my ($s, $k, $v)  = (0, 0, 0) ;
-    for ($s = $H->seq($k, $v, R_LAST) ; 
-             $s == 0 ; 
-             $s = $H->seq($k, $v, R_PREV))
-      { print "$k: $v\n" }
-
-    undef $H ;
-    untie @h ;    
-
-    unlink $file ;
-  }  
-
-  ok(149, docat_del($save_output) eq <<'EOM') ;
-
-ORIGINAL
-0: zero
-1: one
-2: two
-3: three
-4: four
-
-The last record was [four]
-The first record was [zero]
-
-REVERSE
-5: last
-4: three
-3: Newbie
-2: one
-1: New One
-0: first
-
-REVERSE again
-5: last
-4: three
-3: Newbie
-2: one
-1: New One
-0: first
-EOM
-   
-}
-
-{
-    # Bug ID 20001013.009
-    #
-    # test that $hash{KEY} = undef doesn't produce the warning
-    #     Use of uninitialized value in null operation 
-    use warnings ;
-    use strict ;
-    use DB_File ;
-
-    unlink $Dfile;
-    my @h ;
-    my $a = "";
-    local $SIG{__WARN__} = sub {$a = $_[0]} ;
-    
-    tie @h, 'DB_File', $Dfile, O_RDWR|O_CREAT, 0664, $DB_RECNO 
-	or die "Can't open file: $!\n" ;
-    $h[0] = undef;
-    ok(150, $a eq "") ;
-    ok(151, safeUntie \@h);
-    unlink $Dfile;
-}
-
-{
-    # test that %hash = () doesn't produce the warning
-    #     Argument "" isn't numeric in entersub
-    use warnings ;
-    use strict ;
-    use DB_File ;
-    my $a = "";
-    local $SIG{__WARN__} = sub {$a = $_[0]} ;
-
-    unlink $Dfile;
-    my @h ;
-    
-    tie @h, 'DB_File', $Dfile, O_RDWR|O_CREAT, 0664, $DB_RECNO 
-	or die "Can't open file: $!\n" ;
-    @h = (); ;
-    ok(152, $a eq "") ;
-    ok(153, safeUntie \@h);
-    unlink $Dfile;
-}
-
-{
-   # Check that DBM Filter can cope with read-only $_
-
-   use warnings ;
-   use strict ;
-   my (@h, $db) ;
-   unlink $Dfile;
-
-   ok(154, $db = tie(@h, 'DB_File', $Dfile, O_RDWR|O_CREAT, 0640, $DB_RECNO ) );
-
-   $db->filter_fetch_key   (sub { }) ;
-   $db->filter_store_key   (sub { }) ;
-   $db->filter_fetch_value (sub { }) ;
-   $db->filter_store_value (sub { }) ;
-
-   $_ = "original" ;
-
-   $h[0] = "joe" ;
-   ok(155, $h[0] eq "joe");
-
-   eval { grep { $h[$_] } (1, 2, 3) };
-   ok (156, ! $@);
-
-
-   # delete the filters
-   $db->filter_fetch_key   (undef);
-   $db->filter_store_key   (undef);
-   $db->filter_fetch_value (undef);
-   $db->filter_store_value (undef);
-
-   $h[1] = "joe" ;
-
-   ok(157, $h[1] eq "joe");
-
-   eval { grep { $h[$_] } (1, 2, 3) };
-   ok (158, ! $@);
-
-   undef $db ;
-   untie @h;
-   unlink $Dfile;
-}
-
-{
-   # Check low-level API works with filter
-
-   use warnings ;
-   use strict ;
-   my (@h, $db) ;
-   my $Dfile = "xxy.db";
-   unlink $Dfile;
-
-   ok(159, $db = tie(@h, 'DB_File', $Dfile, O_RDWR|O_CREAT, 0640, $DB_RECNO ) );
-
-
-   $db->filter_fetch_key   (sub { ++ $_ } );
-   $db->filter_store_key   (sub { -- $_ } );
-   $db->filter_fetch_value (sub { $_ = unpack("i", $_) } );
-   $db->filter_store_value (sub { $_ = pack("i", $_) } );
-
-   $_ = 'fred';
-
-   my $key = 22 ;
-   my $value = 34 ;
-
-   $db->put($key, $value) ;
-   ok 160, $key == 22;
-   ok 161, $value == 34 ;
-   ok 162, $_ eq 'fred';
-   #print "k [$key][$value]\n" ;
-
-   my $val ;
-   $db->get($key, $val) ;
-   ok 163, $key == 22;
-   ok 164, $val == 34 ;
-   ok 165, $_ eq 'fred';
-
-   $key = 51 ;
-   $value = 454;
-   $h[$key] = $value ;
-   ok 166, $key == 51;
-   ok 167, $value == 454 ;
-   ok 168, $_ eq 'fred';
-
-   undef $db ;
-   untie @h;
-   unlink $Dfile;
-}
-
-
-{
-    # Regression Test for bug 30237
-    # Check that substr can be used in the key to db_put
-    # and that db_put does not trigger the warning
-    # 
-    #     Use of uninitialized value in subroutine entry
-
-
-    use warnings ;
-    use strict ;
-    my (@h, $db) ;
-    my $status ;
-    my $Dfile = "xxy.db";
-    unlink $Dfile;
-
-    ok(169, $db = tie(@h, 'DB_File', $Dfile, O_RDWR|O_CREAT, 0640, $DB_RECNO) );
-
-    my $warned = '';
-    local $SIG{__WARN__} = sub {$warned = $_[0]} ;
-
-    # db-put with substr of key
-    my %remember = () ;
-    for my $ix ( 0 .. 2 )
-    {
-        my $key = $ix . "data" ;
-        my $value = "value$ix" ;
-        $remember{substr($key,0, 1)} = $value ;
-        $db->put(substr($key,0, 1), $value) ;
-    }
-
-    ok 170, $warned eq '' 
-      or print "# Caught warning [$warned]\n" ;
-
-    # db-put with substr of value
-    $warned = '';
-    for my $ix ( 3 .. 5 )
-    {
-        my $key = $ix . "data" ;
-        my $value = "value$ix" ;
-        $remember{$ix} = $value ;
-        $db->put($ix, substr($value,0)) ;
-    }
-
-    ok 171, $warned eq '' 
-      or print "# Caught warning [$warned]\n" ;
-
-    # via the tied array is not a problem, but check anyway
-    # substr of key
-    $warned = '';
-    for my $ix ( 6 .. 8 )
-    {
-        my $key = $ix . "data" ;
-        my $value = "value$ix" ;
-        $remember{substr($key,0,1)} = $value ;
-        $h[substr($key,0,1)] = $value ;
-    }
-
-    ok 172, $warned eq '' 
-      or print "# Caught warning [$warned]\n" ;
-
-    # via the tied array is not a problem, but check anyway
-    # substr of value
-    $warned = '';
-    for my $ix ( 9 .. 10 )
-    {
-        my $key = $ix . "data" ;
-        my $value = "value$ix" ;
-        $remember{$ix} = $value ;
-        $h[$ix] = substr($value,0) ;
-    }
-
-    ok 173, $warned eq '' 
-      or print "# Caught warning [$warned]\n" ;
-
-    my %bad = () ;
-    my $key = '';
-    for (my $status = $db->seq($key, $value, R_FIRST ) ;
-         $status == 0 ;
-         $status = $db->seq($key, $value, R_NEXT ) ) {
-
-        #print "# key [$key] value [$value]\n" ;
-        if (defined $remember{$key} && defined $value && 
-             $remember{$key} eq $value) {
-            delete $remember{$key} ;
-        }
-        else {
-            $bad{$key} = $value ;
-        }
-    }
-    
-    ok 174, keys %bad == 0 ;
-    ok 175, keys %remember == 0 ;
-
-    print "# missing -- $key $value\n" while ($key, $value) = each %remember;
-    print "# bad     -- $key $value\n" while ($key, $value) = each %bad;
-
-    # Make sure this fix does not break code to handle an undef key
-    my $value = 'fred';
-    $warned = '';
-    $status = $db->put(undef, $value) ;
-    ok 176, $status == 0
-      or print "# put failed - status $status\n";
-    ok 177, $warned eq '' 
-      or print "# Caught warning [$warned]\n" ;
-    $warned = '';
-
-    print "# db_ver $DB_File::db_ver\n";
-    $value = '' ;
-    $status = $db->get(undef, $value) ;
-    ok 178, $status == 0
-	or print "# get failed - status $status\n" ;
-    ok(179, $db->get(undef, $value) == 0) or print "# get failed\n" ;
-    ok 180, $value eq 'fred' or print "# got [$value]\n" ;
-    ok 181, $warned eq '' 
-      or print "# Caught warning [$warned]\n" ;
-    $warned = '';
-
-    undef $db ;
-    untie @h;
-    unlink $Dfile;
-}
-
-# Only test splice if this is a newish version of Perl
-exit unless $FA ;
-
-# Test SPLICE
-
-{
-    # check that the splice warnings are under the same lexical control
-    # as their non-tied counterparts.
-
-    use warnings;
-    use strict;
-
-    my $a = '';
-    my @a = (1);
-    local $SIG{__WARN__} = sub {$a = $_[0]} ;
-
-    unlink $Dfile;
-    my @tied ;
-    
-    tie @tied, 'DB_File', $Dfile, O_RDWR|O_CREAT, 0664, $DB_RECNO 
-	or die "Can't open file: $!\n" ;
-
-    # uninitialized offset
-    use warnings;
-    my $offset ;
-    $a = '';
-    splice(@a, $offset);
-    ok(182, $a =~ /^Use of uninitialized value /);
-    $a = '';
-    splice(@tied, $offset);
-    ok(183, $a =~ /^Use of uninitialized value in splice/);
-
-    no warnings 'uninitialized';
-    $a = '';
-    splice(@a, $offset);
-    ok(184, $a eq '');
-    $a = '';
-    splice(@tied, $offset);
-    ok(185, $a eq '');
-
-    # uninitialized length
-    use warnings;
-    my $length ;
-    $a = '';
-    splice(@a, 0, $length);
-    ok(186, $a =~ /^Use of uninitialized value /);
-    $a = '';
-    splice(@tied, 0, $length);
-    ok(187, $a =~ /^Use of uninitialized value in splice/);
-
-    no warnings 'uninitialized';
-    $a = '';
-    splice(@a, 0, $length);
-    ok(188, $a eq '');
-    $a = '';
-    splice(@tied, 0, $length);
-    ok(189, $a eq '');
-
-    # offset past end of array
-    use warnings;
-    $a = '';
-    splice(@a, 3);
-    my $splice_end_array = ($a =~ /^splice\(\) offset past end of array/);
-    $a = '';
-    splice(@tied, 3);
-    ok(190, !$splice_end_array || $a =~ /^splice\(\) offset past end of array/);
-
-    no warnings 'misc';
-    $a = '';
-    splice(@a, 3);
-    ok(191, $a eq '');
-    $a = '';
-    splice(@tied, 3);
-    ok(192, $a eq '');
-
-    ok(193, safeUntie \@tied);
-    unlink $Dfile;
-}
-
-# 
-# These are a few regression tests: bundles of five arguments to pass
-# to test_splice().  The first four arguments correspond to those
-# given to splice(), and the last says which context to call it in
-# (scalar, list or void).
-# 
-# The expected result is not needed because we get that by running
-# Perl's built-in splice().
-# 
-my @tests = ([ [ 'falsely', 'dinosaur', 'remedy', 'commotion',
-		 'rarely', 'paleness' ],
-	       -4, -2,
-	       [ 'redoubled', 'Taylorize', 'Zoe', 'halogen' ],
-	       'void' ],
-
-	     [ [ 'a' ], -2, 1, [ 'B' ], 'void' ],
-
-	     [ [ 'Hartley', 'Islandia', 'assents', 'wishful' ],
-	       0, -4,
-	       [ 'maids' ],
-	       'void' ],
-
-	     [ [ 'visibility', 'pocketful', 'rectangles' ],
-	       -10, 0,
-	       [ 'garbages' ],
-	       'void' ],
-
-	     [ [ 'sleeplessly' ],
-	       8, -4,
-	       [ 'Margery', 'clearing', 'repercussion', 'clubs',
-		 'arise' ],
-	       'void' ],
-
-	     [ [ 'chastises', 'recalculates' ],
-	       0, 0,
-	       [ 'momentariness', 'mediates', 'accents', 'toils',
-		 'regaled' ],
-	       'void' ],
-
-	     [ [ 'b', '' ],
-	       9, 8,
-	       [ 'otrb', 'stje', 'ixrpw', 'vxfx', 'lhhf' ],
-	       'scalar' ],
-
-	     [ [ 'b', '' ],
-	       undef, undef,
-	       [ 'otrb', 'stje', 'ixrpw', 'vxfx', 'lhhf' ],
-	       'scalar' ],
-	     
-	     [ [ 'riheb' ], -8, undef, [], 'void' ],
-
-	     [ [ 'uft', 'qnxs', '' ],
-	       6, -2,
-	       [ 'znp', 'mhnkh', 'bn' ],
-	       'void' ],
-	    );
-
-my $testnum = 194;
-my $failed = 0;
-my $tmp = "dbr$$";
-foreach my $test (@tests) {
-    my $err = test_splice(@$test);
-    if (defined $err) {
-	print STDERR "# failed: ", Dumper($test);
-	print STDERR "# error: $err\n";
-	$failed = 1;
-	ok($testnum++, 0);
-    }
-    else { ok($testnum++, 1) }
-}
-
-if ($failed) {
-    # Not worth running the random ones
-    print STDERR '# skipping ', $testnum++, "\n";
-}
-else {
-    # A thousand randomly-generated tests
-    $failed = 0;
-    srand(0);
-    foreach (0 .. 1000 - 1) {
-	my $test = rand_test();
-	my $err = test_splice(@$test);
-	if (defined $err) {
-	    print STDERR "# failed: ", Dumper($test);
-	    print STDERR "# error: $err\n";
-	    $failed = 1;
-	    print STDERR "# skipping any remaining random tests\n";
-	    last;
-	}
-    }
-
-    ok($testnum++, not $failed);
-}
-
-die "testnum ($testnum) != total_tests ($total_tests) + 1" 
-    if $testnum != $total_tests + 1;
-
-exit ;
-
-# Subroutines for SPLICE testing
-
-# test_splice()
-# 
-# Test the new splice() against Perl's built-in one.  The first four
-# parameters are those passed to splice(), except that the lists must
-# be (explicitly) passed by reference, and are not actually modified.
-# (It's just a test!)  The last argument specifies the context in
-# which to call the functions: 'list', 'scalar', or 'void'.
-# 
-# Returns:
-#   undef, if the two splices give the same results for the given
-#     arguments and context;
-# 
-#   an error message showing the difference, otherwise.
-# 
-# Reads global variable $tmp.
-# 
-sub test_splice {
-    die 'usage: test_splice(array, offset, length, list, context)' if @_ != 5;
-    my ($array, $offset, $length, $list, $context) = @_;
-    my @array = @$array;
-    my @list = @$list;
-
-    unlink $tmp;
-    
-    my @h;
-    my $H = tie @h, 'DB_File', $tmp, O_CREAT|O_RDWR, 0644, $DB_RECNO
-      or die "cannot open $tmp: $!";
-
-    my $i = 0;
-    foreach ( @array ) { $h[$i++] = $_ }
-    
-    return "basic DB_File sanity check failed"
-      if list_diff(\@array, \@h);
-
-    # Output from splice():
-    # Returned value (munged a bit), error msg, warnings
-    # 
-    my ($s_r, $s_error, @s_warnings);
-
-    my $gather_warning = sub { push @s_warnings, $_[0] };
-    if ($context eq 'list') {
-	my @r;
-	eval {
-	    local $SIG{__WARN__} = $gather_warning;
-	    @r = splice @array, $offset, $length, @list;
-	};
-	$s_error = $@;
-	$s_r = \@r;
-    }
-    elsif ($context eq 'scalar') {
-	my $r;
-	eval {
-	    local $SIG{__WARN__} = $gather_warning;
-	    $r = splice @array, $offset, $length, @list;
-	};
-	$s_error = $@;
-	$s_r = [ $r ];
-    }
-    elsif ($context eq 'void') {
-	eval {
-	    local $SIG{__WARN__} = $gather_warning;
-	    splice @array, $offset, $length, @list;
-	};
-	$s_error = $@;
-	$s_r = [];
-    }
-    else {
-	die "bad context $context";
-    }
-
-    foreach ($s_error, @s_warnings) {
-	chomp;
-	s/ at \S+ line \d+\.$//;
-	# only built-in splice identifies name of uninit value
-	s/(uninitialized value) \$\w+/$1/;
-    }
-
-    # Now do the same for DB_File's version of splice
-    my ($ms_r, $ms_error, @ms_warnings);
-    $gather_warning = sub { push @ms_warnings, $_[0] };
-    if ($context eq 'list') {
-	my @r;
-	eval {
-	    local $SIG{__WARN__} = $gather_warning;
-	    @r = splice @h, $offset, $length, @list;
-	};
-	$ms_error = $@;
-	$ms_r = \@r;
-    }
-    elsif ($context eq 'scalar') {
-	my $r;
-	eval {
-	    local $SIG{__WARN__} = $gather_warning;
-	    $r = splice @h, $offset, $length, @list;
-	};
-	$ms_error = $@;
-	$ms_r = [ $r ];
-    }
-    elsif ($context eq 'void') {
-	eval {
-	    local $SIG{__WARN__} = $gather_warning;
-	    splice @h, $offset, $length, @list;
-	};
-	$ms_error = $@;
-	$ms_r = [];
-    }
-    else {
-	die "bad context $context";
-    }
-
-    foreach ($ms_error, @ms_warnings) {
-	chomp;
-	s/ at \S+ line \d+\.?.*//s;
-    }
-
-    return "different errors: '$s_error' vs '$ms_error'"
-      if $s_error ne $ms_error;
-    return('different return values: ' . Dumper($s_r) . ' vs ' . Dumper($ms_r))
-      if list_diff($s_r, $ms_r);
-    return('different changed list: ' . Dumper(\@array) . ' vs ' . Dumper(\@h))
-      if list_diff(\@array, \@h);
-
-    if ((scalar @s_warnings) != (scalar @ms_warnings)) {
-	return 'different number of warnings';
-    }
-
-    while (@s_warnings) {
-	my $sw  = shift @s_warnings;
-	my $msw = shift @ms_warnings;
-	
-	if (defined $sw and defined $msw) {
-	    $msw =~ s/ \(.+\)$//;
-	    $msw =~ s/ in splice$// if $] < 5.006;
-	    if ($sw ne $msw) {
-		return "different warning: '$sw' vs '$msw'";
-	    }
-	}
-	elsif (not defined $sw and not defined $msw) {
-	    # Okay.
-	}
-	else {
-	    return "one warning defined, another undef";
-	}
-    }
-    
-    undef $H;
-    untie @h;
-    
-    open(TEXT, $tmp) or die "cannot open $tmp: $!";
-    @h = ; normalise @h; chomp @h;
-    close TEXT or die "cannot close $tmp: $!";
-    return('list is different when re-read from disk: '
-	   . Dumper(\@array) . ' vs ' . Dumper(\@h))
-      if list_diff(\@array, \@h);
-
-    unlink $tmp;
-
-    return undef; # success
-}
-
-
-# list_diff()
-#
-# Do two lists differ?
-#
-# Parameters:
-#   reference to first list
-#   reference to second list
-#
-# Returns true iff they differ.  Only works for lists of (string or
-# undef). 
-# 
-# Surely there is a better way to do this?
-# 
-sub list_diff {
-    die 'usage: list_diff(ref to first list, ref to second list)'
-      if @_ != 2;
-    my ($a, $b) = @_;
-    my @a = @$a; my @b = @$b;
-    return 1 if (scalar @a) != (scalar @b);
-    for (my $i = 0; $i < @a; $i++) {
-	my ($ae, $be) = ($a[$i], $b[$i]);
-	if (defined $ae and defined $be) {
-	    return 1 if $ae ne $be;
-	}
-	elsif (not defined $ae and not defined $be) {
-	    # Two undefined values are 'equal'
-	}
-	else {
-	    return 1;
-	}
-    }
-    return 0;
-} 
-
-
-# rand_test()
-# 
-# Think up a random ARRAY, OFFSET, LENGTH, LIST, and context.
-# ARRAY or LIST might be empty, and OFFSET or LENGTH might be
-# undefined.  Return a 'test' - a listref of these five things.
-# 
-sub rand_test {
-    die 'usage: rand_test()' if @_;
-    my @contexts = qw;
-    my $context = $contexts[int(rand @contexts)];
-    return [ rand_list(),
-	     (rand() < 0.5) ? (int(rand(20)) - 10) : undef,
-	     (rand() < 0.5) ? (int(rand(20)) - 10) : undef,
-	     rand_list(),
-	     $context ];
-}
-
-
-sub rand_list {
-    die 'usage: rand_list()' if @_;
-    my @r;
-
-    while (rand() > 0.1 * (scalar @r + 1)) {
-	push @r, rand_word();
-    }
-    return \@r;
-}
-
-
-sub rand_word {
-    die 'usage: rand_word()' if @_;
-    my $r = '';
-    my @chars = qw;
-    while (rand() > 0.1 * (length($r) + 1)) {
-	$r .= $chars[int(rand(scalar @chars))];
-    }
-    return $r;
-}
-
-
diff --git a/storage/bdb/perl/DB_File/typemap b/storage/bdb/perl/DB_File/typemap
deleted file mode 100644
index f159995080b..00000000000
--- a/storage/bdb/perl/DB_File/typemap
+++ /dev/null
@@ -1,49 +0,0 @@
-# typemap for Perl 5 interface to Berkeley 
-#
-# written by Paul Marquess 
-# last modified 20th June 2004
-# version 1.809
-#
-#################################### DB SECTION
-#
-# 
-
-u_int			T_U_INT
-DB_File			T_PTROBJ
-DBT			T_dbtdatum
-DBTKEY			T_dbtkeydatum
-
-INPUT
-T_dbtkeydatum
-	DBM_ckFilter($arg, filter_store_key, \"filter_store_key\");
-	DBT_clear($var) ;
-	SvGETMAGIC($arg) ;
-        if (db->type == DB_RECNO) {
-	    if (SvOK($arg))
-	        Value = GetRecnoKey(aTHX_ db, SvIV($arg)) ; 
-            else
-	        Value = 1 ;
-	    $var.data = & Value; 
-	    $var.size = (int)sizeof(recno_t);
-        }
-        else if (SvOK($arg)) {
-	    $var.data = SvPVbyte($arg, PL_na);
-	    $var.size = (int)PL_na;
-	}
-T_dbtdatum
-	DBM_ckFilter($arg, filter_store_value, \"filter_store_value\");
-	DBT_clear($var) ;
-	SvGETMAGIC($arg) ;
-	if (SvOK($arg)) {
-	    $var.data = SvPVbyte($arg, PL_na);
-	    $var.size = (int)PL_na;
-	}
-
-OUTPUT
-
-T_dbtkeydatum
-	OutputKey($arg, $var)
-T_dbtdatum
-	OutputValue($arg, $var)
-T_PTROBJ
-        sv_setref_pv($arg, dbtype, (void*)$var);
diff --git a/storage/bdb/perl/DB_File/version.c b/storage/bdb/perl/DB_File/version.c
deleted file mode 100644
index 03b17c18e60..00000000000
--- a/storage/bdb/perl/DB_File/version.c
+++ /dev/null
@@ -1,82 +0,0 @@
-/* 
-
- version.c -- Perl 5 interface to Berkeley DB 
-
- written by Paul Marquess 
- last modified 2nd Jan 2002
- version 1.802
-
- All comments/suggestions/problems are welcome
-
-     Copyright (c) 1995-2002 Paul Marquess. All rights reserved.
-     This program is free software; you can redistribute it and/or
-     modify it under the same terms as Perl itself.
-
- Changes:
-        1.71 -  Support for Berkeley DB version 3.
-		Support for Berkeley DB 2/3's backward compatability mode.
-        1.72 -  No change.
-        1.73 -  Added support for threading
-        1.74 -  Added Perl core patch 7801.
-
-
-*/
-
-#define PERL_NO_GET_CONTEXT
-#include "EXTERN.h"  
-#include "perl.h"
-#include "XSUB.h"
-
-#include 
-
-void
-#ifdef CAN_PROTOTYPE
-__getBerkeleyDBInfo(void)
-#else
-__getBerkeleyDBInfo()
-#endif
-{
-#ifdef dTHX	
-    dTHX;
-#endif    
-    SV * version_sv = perl_get_sv("DB_File::db_version", GV_ADD|GV_ADDMULTI) ;
-    SV * ver_sv = perl_get_sv("DB_File::db_ver", GV_ADD|GV_ADDMULTI) ;
-    SV * compat_sv = perl_get_sv("DB_File::db_185_compat", GV_ADD|GV_ADDMULTI) ;
-
-#ifdef DB_VERSION_MAJOR
-    int Major, Minor, Patch ;
-
-    (void)db_version(&Major, &Minor, &Patch) ;
-
-    /* Check that the versions of db.h and libdb.a are the same */
-    if (Major != DB_VERSION_MAJOR || Minor != DB_VERSION_MINOR 
-		|| Patch != DB_VERSION_PATCH)
-	croak("\nDB_File needs compatible versions of libdb & db.h\n\tyou have db.h version %d.%d.%d and libdb version %d.%d.%d\n",  
-		DB_VERSION_MAJOR, DB_VERSION_MINOR, DB_VERSION_PATCH, 
-		Major, Minor, Patch) ;
-    
-    /* check that libdb is recent enough  -- we need 2.3.4 or greater */
-    if (Major == 2 && (Minor < 3 || (Minor ==  3 && Patch < 4)))
-	croak("DB_File needs Berkeley DB 2.3.4 or greater, you have %d.%d.%d\n",
-		 Major, Minor, Patch) ;
- 
-    {
-        char buffer[40] ;
-        sprintf(buffer, "%d.%d", Major, Minor) ;
-        sv_setpv(version_sv, buffer) ; 
-        sprintf(buffer, "%d.%03d%03d", Major, Minor, Patch) ;
-        sv_setpv(ver_sv, buffer) ; 
-    }
- 
-#else /* ! DB_VERSION_MAJOR */
-    sv_setiv(version_sv, 1) ;
-    sv_setiv(ver_sv, 1) ;
-#endif /* ! DB_VERSION_MAJOR */
-
-#ifdef COMPAT185
-    sv_setiv(compat_sv, 1) ;
-#else /* ! COMPAT185 */
-    sv_setiv(compat_sv, 0) ;
-#endif /* ! COMPAT185 */
-
-}
diff --git a/storage/bdb/qam/qam.c b/storage/bdb/qam/qam.c
index 3ae2f90d088..778e3e6e07a 100644
--- a/storage/bdb/qam/qam.c
+++ b/storage/bdb/qam/qam.c
@@ -1,10 +1,10 @@
 /*-
  * See the file LICENSE for redistribution information.
  *
- * Copyright (c) 1999-2004
+ * Copyright (c) 1999-2005
  *	Sleepycat Software.  All rights reserved.
  *
- * $Id: qam.c,v 11.187 2004/10/18 20:21:13 ubell Exp $
+ * $Id: qam.c,v 12.12 2005/10/05 17:16:46 bostic Exp $
  */
 
 #include "db_config.h"
@@ -175,7 +175,7 @@ __qam_pitem(dbc, pagep, indx, recno, data)
 			if (F_ISSET(qp, QAM_VALID))
 				memcpy(dest, p, t->re_len);
 			else
-				memset(dest, t->re_pad, t->re_len);
+				memset(dest, (int)t->re_pad, t->re_len);
 
 			dest += data->doff;
 			memcpy(dest, data->data, data->size);
@@ -202,7 +202,8 @@ no_partial:
 	F_SET(qp, QAM_VALID | QAM_SET);
 	memcpy(p, datap->data, datap->size);
 	if (!F_ISSET(data, DB_DBT_PARTIAL))
-		memset(p + datap->size,  t->re_pad, t->re_len - datap->size);
+		memset(p + datap->size,
+		     (int)t->re_pad, t->re_len - datap->size);
 
 err:	if (allocated)
 		__os_free(dbenv, datap->data);
@@ -253,10 +254,12 @@ __qam_c_put(dbc, key, data, flags, pgnop)
 	}
 
 	/* Write lock the record. */
-	if ((ret = __db_lget(dbc,
-	    0, cp->recno, DB_LOCK_WRITE, DB_LOCK_RECORD, &lock)) != 0)
+	if ((ret = __db_lget(dbc, LCK_COUPLE,
+	     cp->recno, DB_LOCK_WRITE, DB_LOCK_RECORD, &cp->lock)) != 0)
 		return (ret);
 
+	lock = cp->lock;
+
 	if ((ret = __qam_position(dbc, &cp->recno, QAM_WRITE, &exact)) != 0) {
 		/* We could not get the page, we can release the record lock. */
 		(void)__LPUT(dbc, lock);
@@ -287,7 +290,8 @@ __qam_c_put(dbc, key, data, flags, pgnop)
 	 */
 	if ((ret = __memp_fget(mpf, &pg, 0, &meta)) != 0)
 		return (ret);
-	if ((ret = __db_lget(dbc, 0, pg,  DB_LOCK_WRITE, 0, &lock)) != 0) {
+	if ((ret = __db_lget(dbc, LCK_COUPLE,
+	     pg,  DB_LOCK_WRITE, 0, &cp->lock)) != 0) {
 		(void)__memp_fput(mpf, meta, 0);
 		return (ret);
 	}
@@ -349,7 +353,7 @@ __qam_c_put(dbc, key, data, flags, pgnop)
 		ret = t_ret;
 
 	/* Don't hold the meta page long term. */
-	if ((t_ret = __LPUT(dbc, lock)) != 0 && ret == 0)
+	if ((t_ret = __LPUT(dbc, cp->lock)) != 0 && ret == 0)
 		ret = t_ret;
 	return (ret);
 }
@@ -514,12 +518,12 @@ __qam_c_del(dbc)
 	QMETA *meta;
 	QUEUE_CURSOR *cp;
 	db_pgno_t pg;
-	db_recno_t first;
 	int exact, ret, t_ret;
 
 	dbp = dbc->dbp;
 	mpf = dbp->mpf;
 	cp = (QUEUE_CURSOR *)dbc->internal;
+	LOCK_INIT(lock);
 
 	pg = ((QUEUE *)dbp->q_internal)->q_meta;
 	/*
@@ -537,8 +541,6 @@ __qam_c_del(dbc)
 	if (QAM_NOT_VALID(meta, cp->recno))
 		ret = DB_NOTFOUND;
 
-	first = meta->first_recno;
-
 	/* Don't hold the meta page long term. */
 	if ((t_ret = __LPUT(dbc, metalock)) != 0 && ret == 0)
 		ret = t_ret;
@@ -546,10 +548,11 @@ __qam_c_del(dbc)
 	if (ret != 0)
 		goto err;
 
-	if ((ret = __db_lget(dbc,
-	    0, cp->recno, DB_LOCK_WRITE, DB_LOCK_RECORD, &lock)) != 0)
+	if ((ret = __db_lget(dbc, LCK_COUPLE,
+	    cp->recno, DB_LOCK_WRITE, DB_LOCK_RECORD, &cp->lock)) != 0)
 		goto err;
 	cp->lock_mode = DB_LOCK_WRITE;
+	lock = cp->lock;
 
 	/* Find the record ; delete only deletes exact matches. */
 	if ((ret = __qam_position(dbc, &cp->recno, QAM_WRITE, &exact)) != 0)
@@ -582,12 +585,21 @@ __qam_c_del(dbc)
 
 	F_CLR(qp, QAM_VALID);
 
-	if (cp->recno == first) {
+	/*
+	 * Peek at the first_recno before locking the meta page.
+	 * Other threads cannot move first_recno past
+	 * our position while we have the record locked.
+	 * If it's pointing at the deleted record then lock
+	 * the metapage and check again as lower numbered
+	 * record may have been inserted.
+	 */
+	if (cp->recno == meta->first_recno) {
 		pg = ((QUEUE *)dbp->q_internal)->q_meta;
 		if ((ret =
 		    __db_lget(dbc, 0, pg,  DB_LOCK_WRITE, 0, &metalock)) != 0)
 			goto err;
-		ret = __qam_consume(dbc, meta, first);
+		if (cp->recno == meta->first_recno)
+			ret = __qam_consume(dbc, meta, meta->first_recno);
 		if ((t_ret = __LPUT(dbc, metalock)) != 0 && ret == 0)
 			ret = t_ret;
 	}
@@ -637,13 +649,15 @@ __qam_c_get(dbc, key, data, flags, pgnop)
 	db_pgno_t metapno;
 	db_recno_t first;
 	qam_position_mode mode;
+	u_int32_t put_mode;
 	int exact, inorder, is_first, locked, ret, t_ret, wait, with_delete;
-	int put_mode, retrying;
+	int retrying;
 
 	dbp = dbc->dbp;
 	dbenv = dbp->dbenv;
 	mpf = dbp->mpf;
 	cp = (QUEUE_CURSOR *)dbc->internal;
+	LOCK_INIT(lock);
 
 	PANIC_CHECK(dbenv);
 
@@ -861,14 +875,16 @@ get_next:	if (cp->recno != RECNO_OOB) {
 	}
 
 	/* Lock the record. */
-	if ((ret = __db_lget(dbc, 0, cp->recno, lock_mode,
+	if (((ret = __db_lget(dbc, 0, cp->recno, lock_mode,
 	    (with_delete && !retrying) ?
 	    DB_LOCK_NOWAIT | DB_LOCK_RECORD : DB_LOCK_RECORD,
-	    &lock)) == DB_LOCK_DEADLOCK && with_delete) {
+	    &lock)) == DB_LOCK_DEADLOCK || ret == DB_LOCK_NOTGRANTED) &&
+	    with_delete) {
 #ifdef QDEBUG
-		__db_logmsg(dbenv,
-		    dbc->txn, "Queue S", 0, "%x %d %d %d",
-		    dbc->locker, cp->recno, first, meta->first_recno);
+		if (DBC_LOGGING(dbc))
+			(void)__log_printf(dbenv,
+			    dbc->txn, "Queue S: %x %d %d %d",
+			    dbc->locker, cp->recno, first, meta->first_recno);
 #endif
 		first = 0;
 		if ((ret =
@@ -1071,9 +1087,10 @@ release_retry:	/* Release locks and retry, if possible. */
 		locked = 1;
 
 #ifdef QDEBUG
-		__db_logmsg(dbenv,
-		    dbc->txn, "Queue D", 0, "%x %d %d %d",
-		    dbc->locker, cp->recno, first, meta->first_recno);
+		if (DBC_LOGGING(dbc))
+			(void)__log_printf(dbenv,
+			    dbc->txn, "Queue D: %x %d %d %d",
+			    dbc->locker, cp->recno, first, meta->first_recno);
 #endif
 		/*
 		 * See if we deleted the "first" record.  If
@@ -1198,16 +1215,16 @@ __qam_consume(dbc, meta, first)
 		 */
 		if (cp->page != NULL && rec_extent != 0 &&
 		    ((exact = (first % rec_extent == 0)) ||
-		    first % meta->rec_page == 0 ||
+		    (first % meta->rec_page == 0) ||
 		    first == UINT32_MAX)) {
 			if (exact == 1 && (ret = __db_lget(dbc,
 			    0, cp->pgno, DB_LOCK_WRITE, 0, &cp->lock)) != 0)
 				break;
-
 #ifdef QDEBUG
-			__db_logmsg(dbp->dbenv,
-			    dbc->txn, "Queue R", 0, "%x %d %d %d",
-			    dbc->locker, cp->pgno, first, meta->first_recno);
+			if (DBC_LOGGING(dbc))
+				(void)__log_printf(dbp->dbenv, dbc->txn,
+				    "Queue R: %x %d %d %d", dbc->locker,
+				    cp->pgno, first, meta->first_recno);
 #endif
 			put_mode |= DB_MPOOL_DISCARD;
 			if ((ret = __qam_fput(dbp,
@@ -1276,9 +1293,10 @@ __qam_consume(dbc, meta, first)
 	 */
 	if (ret == 0 && meta->first_recno != first) {
 #ifdef QDEBUG
-		__db_logmsg(dbp->dbenv, dbc->txn, "Queue M",
-		    0, "%x %d %d %d", dbc->locker, cp->recno,
-		    first, meta->first_recno);
+		if (DBC_LOGGING(dbc))
+			(void)__log_printf(dbp->dbenv, dbc->txn,
+			    "Queue M: %x %d %d %d", dbc->locker, cp->recno,
+			    first, meta->first_recno);
 #endif
 		if (DBC_LOGGING(dbc))
 			if ((ret = __qam_incfirst_log(dbp,
@@ -1310,10 +1328,11 @@ __qam_bulk(dbc, data, flags)
 	db_lockmode_t lkmode;
 	db_pgno_t metapno;
 	qam_position_mode mode;
-	int32_t  *endp, *offp;
+	u_int32_t  *endp, *offp;
+	u_int32_t pagesize, re_len, recs;
 	u_int8_t *dbuf, *dp, *np;
-	int exact, recs, re_len, ret, t_ret, valid;
-	int is_key, need_pg, pagesize, size, space;
+	int exact, ret, t_ret, valid;
+	int is_key, need_pg, size, space;
 
 	dbp = dbc->dbp;
 	mpf = dbp->mpf;
@@ -1346,11 +1365,11 @@ __qam_bulk(dbc, data, flags)
 	np = dp = dbuf;
 
 	/* Keep track of space that is left.  There is an termination entry */
-	space = data->ulen;
-	space -= sizeof(*offp);
+	space = (int)data->ulen;
+	space -= (int)sizeof(*offp);
 
-	/* Build the offset/size table form the end up. */
-	endp = (int32_t *) ((u_int8_t *)dbuf + data->ulen);
+	/* Build the offset/size table from the end up. */
+	endp = (u_int32_t *)((u_int8_t *)dbuf + data->ulen);
 	endp--;
 	offp = endp;
 	/* Save the lock on the current position of the cursor. */
@@ -1384,18 +1403,19 @@ next_pg:
 			qp = QAM_GET_RECORD(dbp, pg, indx);
 			if (F_ISSET(qp, QAM_VALID)) {
 				valid = 1;
-				space -= (is_key ? 3 : 2) * sizeof(*offp);
+				space -= (int)
+				     ((is_key ? 3 : 2) * sizeof(*offp));
 				if (space < 0)
 					goto get_space;
 				if (need_pg) {
 					dp = np;
-					size = pagesize - QPAGE_SZ(dbp);
+					size = (int)pagesize - QPAGE_SZ(dbp);
 					if (space < size) {
 get_space:
 						if (offp == endp) {
 							data->size = (u_int32_t)
-							    DB_ALIGN(size +
-							    pagesize,
+							    DB_ALIGN((u_int32_t)
+							    size + pagesize,
 							    sizeof(u_int32_t));
 							ret = DB_BUFFER_SMALL;
 							break;
@@ -1407,16 +1427,17 @@ get_space:
 						break;
 					}
 					memcpy(dp,
-					    (char *)pg + QPAGE_SZ(dbp), size);
+					    (char *)pg + QPAGE_SZ(dbp),
+					    (unsigned)size);
 					need_pg = 0;
 					space -= size;
 					np += size;
 				}
 				if (is_key)
 					*offp-- = cp->recno;
-				*offp-- = (int32_t)((u_int8_t*)qp -
-				    (u_int8_t*)pg - QPAGE_SZ(dbp) +
-				    dp - dbuf + SSZA(QAMDATA, data));
+				*offp-- = (u_int32_t)((((u_int8_t*)qp -
+				    (u_int8_t*)pg) - QPAGE_SZ(dbp)) +
+				    (dp - dbuf) + SSZA(QAMDATA, data));
 				*offp-- = re_len;
 			}
 		}
@@ -1457,7 +1478,7 @@ get_space:
 	if (is_key == 1)
 		*offp = RECNO_OOB;
 	else
-		*offp = -1;
+		*offp = (u_int32_t)-1;
 
 done:	/* Release the meta page. */
 	if ((t_ret = __memp_fput(mpf, meta, 0)) != 0 && ret == 0)
@@ -1555,7 +1576,7 @@ __qam_c_init(dbc)
 	}
 
 	/* Initialize methods. */
-	dbc->c_close = __db_c_close;
+	dbc->c_close = __db_c_close_pp;
 	dbc->c_count = __db_c_count_pp;
 	dbc->c_del = __db_c_del_pp;
 	dbc->c_dup = __db_c_dup_pp;
@@ -1665,7 +1686,31 @@ __qam_truncate(dbc, countp)
 	if ((t_ret = __LPUT(dbc, metalock)) != 0 && ret == 0)
 		ret = t_ret;
 
-	*countp = count;
+	if (countp != NULL)
+		*countp = count;
 
 	return (ret);
 }
+
+/*
+ * __qam_delete --
+ *	Queue fast delete function.
+ *
+ * PUBLIC: int __qam_delete __P((DBC *,  DBT *));
+ */
+int
+__qam_delete(dbc, key)
+	DBC *dbc;
+	DBT *key;
+{
+	QUEUE_CURSOR *cp;
+	int ret;
+
+	cp = (QUEUE_CURSOR *)dbc->internal;
+	if ((ret = __qam_getno(dbc->dbp, key, &cp->recno)) != 0)
+		goto err;
+
+	ret = __qam_c_del(dbc);
+
+err:	return (ret);
+}
diff --git a/storage/bdb/qam/qam.src b/storage/bdb/qam/qam.src
index 71063f0b66f..e63c15e666c 100644
--- a/storage/bdb/qam/qam.src
+++ b/storage/bdb/qam/qam.src
@@ -1,10 +1,10 @@
 /*-
  * See the file LICENSE for redistribution information.
  *
- * Copyright (c) 1999-2004
+ * Copyright (c) 1999-2005
  *	Sleepycat Software.  All rights reserved.
  *
- * $Id: qam.src,v 11.33 2004/06/17 17:35:22 bostic Exp $
+ * $Id: qam.src,v 12.1 2005/06/16 20:23:32 bostic Exp $
  */
 
 PREFIX	__qam
diff --git a/storage/bdb/qam/qam_conv.c b/storage/bdb/qam/qam_conv.c
index c2b7d53a44e..e3ff19c86e5 100644
--- a/storage/bdb/qam/qam_conv.c
+++ b/storage/bdb/qam/qam_conv.c
@@ -1,10 +1,10 @@
 /*-
  * See the file LICENSE for redistribution information.
  *
- * Copyright (c) 1999-2004
+ * Copyright (c) 1999-2005
  *	Sleepycat Software.  All rights reserved.
  *
- * $Id: qam_conv.c,v 11.17 2004/01/28 03:36:19 bostic Exp $
+ * $Id: qam_conv.c,v 12.1 2005/06/16 20:23:32 bostic Exp $
  */
 
 #include "db_config.h"
diff --git a/storage/bdb/qam/qam_files.c b/storage/bdb/qam/qam_files.c
index d3f04060370..3cf3bd5c55e 100644
--- a/storage/bdb/qam/qam_files.c
+++ b/storage/bdb/qam/qam_files.c
@@ -1,10 +1,10 @@
 /*-
  * See the file LICENSE for redistribution information.
  *
- * Copyright (c) 1999-2004
+ * Copyright (c) 1999-2005
  *	Sleepycat Software.  All rights reserved.
  *
- * $Id: qam_files.c,v 1.88 2004/10/21 14:54:42 bostic Exp $
+ * $Id: qam_files.c,v 12.6 2005/10/20 18:57:12 bostic Exp $
  */
 
 #include "db_config.h"
@@ -51,7 +51,7 @@ __qam_fprobe(dbp, pgno, addrp, mode, flags)
 	MPFARRAY *array;
 	QUEUE *qp;
 	u_int8_t fid[DB_FILE_ID_LEN];
-	u_int32_t extid, maxext, numext, offset, oldext, openflags;
+	u_int32_t i, extid, maxext, numext, lflags, offset, oldext, openflags;
 	char buf[MAXPATHLEN];
 	int ftype, less, ret, t_ret;
 
@@ -73,7 +73,7 @@ __qam_fprobe(dbp, pgno, addrp, mode, flags)
 	 * The file cannot go away because we must have a record locked
 	 * in that file.
 	 */
-	MUTEX_THREAD_LOCK(dbenv, dbp->mutexp);
+	MUTEX_LOCK(dbenv, dbp->mutex);
 	extid = QAM_PAGE_EXTENT(dbp, pgno);
 
 	/* Array1 will always be in use if array2 is in use. */
@@ -87,6 +87,7 @@ __qam_fprobe(dbp, pgno, addrp, mode, flags)
 		goto alloc;
 	}
 
+retry:
 	if (extid < array->low_extent) {
 		less = 1;
 		offset = array->low_extent - extid;
@@ -134,11 +135,6 @@ __qam_fprobe(dbp, pgno, addrp, mode, flags)
 			 * If this is at the end of the array and the file at
 			 * the beginning has a zero pin count we can close
 			 * the bottom extent and put this one at the end.
-			 * TODO: If this process is "slow" then it might be
-			 * appending but miss one or more extents.
-			 * We could check to see if all the extents
-			 * are unpinned and close them in the else
-			 * clause below.
 			 */
 			mpf = array->mpfarray[0].mpf;
 			if (mpf != NULL && (ret = __memp_fclose(mpf, 0)) != 0)
@@ -166,12 +162,43 @@ __qam_fprobe(dbp, pgno, addrp, mode, flags)
 				array->low_extent = extid;
 				offset = 0;
 				numext = 0;
+			} else if (array->mpfarray[0].pinref == 0) {
+				/*
+				 * Check to see if there are extents marked
+				 * for deletion at the beginning of the cache.
+				 * If so close them so they will go away.
+				 */
+				for (i = 0; i < array->n_extent; i++) {
+					if (array->mpfarray[i].pinref != 0)
+						break;
+					mpf = array->mpfarray[i].mpf;
+					if (mpf == NULL)
+						continue;
+					(void)__memp_get_flags(mpf, &lflags);
+					if (!FLD_ISSET(lflags, DB_MPOOL_UNLINK))
+						break;
+
+					array->mpfarray[i].mpf = NULL;
+					if ((ret = __memp_fclose(mpf, 0)) != 0)
+						goto err;
+				}
+				if (i == 0)
+					goto increase;
+				memmove(&array->mpfarray[0],
+				     &array->mpfarray[i],
+				    (array->n_extent - i) *
+				    sizeof(array->mpfarray[0]));
+				memset(&array->mpfarray[array->n_extent - i],
+				     '\0', i * sizeof(array->mpfarray[0]));
+				array->low_extent += i;
+				array->hi_extent += i;
+				goto retry;
 			} else {
 				/*
 				 * Increase the size to at least include
 				 * the new one and double it.
 				 */
-				array->n_extent += offset;
+increase:			array->n_extent += offset;
 				array->n_extent <<= 2;
 			}
 alloc:			if ((ret = __os_realloc(dbenv,
@@ -219,6 +246,7 @@ alloc:			if ((ret = __os_realloc(dbenv,
 		(void)__memp_set_pgcookie(mpf, &qp->pgcookie);
 		(void)__memp_get_ftype(dbp->mpf, &ftype);
 		(void)__memp_set_ftype(mpf, ftype);
+		(void)__memp_set_clear_len(mpf, dbp->pgsize);
 
 		/* Set up the fileid for this extent. */
 		__qam_exid(dbp, fid, extid);
@@ -255,7 +283,7 @@ alloc:			if ((ret = __os_realloc(dbenv,
 		(void)__memp_set_flags(mpf, DB_MPOOL_UNLINK, 0);
 
 err:
-	MUTEX_THREAD_UNLOCK(dbenv, dbp->mutexp);
+	MUTEX_UNLOCK(dbenv, dbp->mutex);
 
 	if (ret == 0) {
 		if (mode == QAM_PROBE_MPF) {
@@ -270,7 +298,7 @@ err:
 		} else
 			ret = __memp_fput(mpf, addrp, flags);
 
-		MUTEX_THREAD_LOCK(dbenv, dbp->mutexp);
+		MUTEX_LOCK(dbenv, dbp->mutex);
 		/* Recalculate because we dropped the lock. */
 		offset = extid - array->low_extent;
 		DB_ASSERT(array->mpfarray[offset].pinref > 0);
@@ -285,7 +313,7 @@ err:
 					ret = t_ret;
 			}
 		}
-		MUTEX_THREAD_UNLOCK(dbenv, dbp->mutexp);
+		MUTEX_UNLOCK(dbenv, dbp->mutex);
 	}
 	return (ret);
 }
@@ -314,7 +342,7 @@ __qam_fclose(dbp, pgnoaddr)
 	dbenv = dbp->dbenv;
 	qp = (QUEUE *)dbp->q_internal;
 
-	MUTEX_THREAD_LOCK(dbenv, dbp->mutexp);
+	MUTEX_LOCK(dbenv, dbp->mutex);
 
 	extid = QAM_PAGE_EXTENT(dbp, pgnoaddr);
 	array = &qp->array1;
@@ -333,7 +361,7 @@ __qam_fclose(dbp, pgnoaddr)
 	ret = __memp_fclose(mpf, 0);
 
 done:
-	MUTEX_THREAD_UNLOCK(dbenv, dbp->mutexp);
+	MUTEX_UNLOCK(dbenv, dbp->mutex);
 	return (ret);
 }
 
@@ -365,7 +393,7 @@ __qam_fremove(dbp, pgnoaddr)
 	dbenv = dbp->dbenv;
 	ret = 0;
 
-	MUTEX_THREAD_LOCK(dbenv, dbp->mutexp);
+	MUTEX_LOCK(dbenv, dbp->mutex);
 
 	extid = QAM_PAGE_EXTENT(dbp, pgnoaddr);
 	array = &qp->array1;
@@ -416,8 +444,8 @@ __qam_fremove(dbp, pgnoaddr)
 			array->hi_extent--;
 	}
 
-err:
-	MUTEX_THREAD_UNLOCK(dbenv, dbp->mutexp);
+err:	MUTEX_UNLOCK(dbenv, dbp->mutex);
+
 #ifdef CONFIG_TEST
 	if (real_name != NULL)
 		__os_free(dbenv, real_name);
@@ -786,7 +814,8 @@ int __qam_nameop(dbp, txn, newname, op)
 			snprintf(exname, exlen,
 			     "%s%s", fullname, names[i] + len);
 			if ((t_ret = __memp_nameop(dbenv,
-			    fid, NULL, exname, NULL)) != 0 && ret == 0)
+			    fid, NULL, exname, NULL,
+			    F_ISSET(dbp, DB_AM_INMEM))) != 0 && ret == 0)
 				ret = t_ret;
 			break;
 
diff --git a/storage/bdb/qam/qam_method.c b/storage/bdb/qam/qam_method.c
index e3526fa3c9e..6be9ad75b22 100644
--- a/storage/bdb/qam/qam_method.c
+++ b/storage/bdb/qam/qam_method.c
@@ -1,10 +1,10 @@
 /*-
  * See the file LICENSE for redistribution information.
  *
- * Copyright (c) 1999-2004
+ * Copyright (c) 1999-2005
  *	Sleepycat Software.  All rights reserved.
  *
- * $Id: qam_method.c,v 11.84 2004/10/14 18:09:32 bostic Exp $
+ * $Id: qam_method.c,v 12.2 2005/09/28 17:45:01 margo Exp $
  */
 
 #include "db_config.h"
@@ -302,7 +302,7 @@ __qam_rr(dbp, txn, name, subdb, newname, op)
 
 	PANIC_CHECK(dbenv);
 
-	if (subdb != NULL) {
+	if (subdb != NULL && name != NULL) {
 		__db_err(dbenv,
 		    "Queue does not support multiple databases per file");
 		return (EINVAL);
diff --git a/storage/bdb/qam/qam_open.c b/storage/bdb/qam/qam_open.c
index 595d74dac4a..5f1dbecbdd8 100644
--- a/storage/bdb/qam/qam_open.c
+++ b/storage/bdb/qam/qam_open.c
@@ -1,10 +1,10 @@
 /*-
  * See the file LICENSE for redistribution information.
  *
- * Copyright (c) 1999-2004
+ * Copyright (c) 1999-2005
  *	Sleepycat Software.  All rights reserved.
  *
- * $Id: qam_open.c,v 11.68 2004/02/27 12:38:31 bostic Exp $
+ * $Id: qam_open.c,v 12.4 2005/10/15 00:56:55 bostic Exp $
  */
 
 #include "db_config.h"
@@ -103,7 +103,7 @@ __qam_open(dbp, txn, name, base_pgno, mode, flags)
 		goto err;
 
 	if (mode == 0)
-		mode = __db_omode("rwrw--");
+		mode = __db_omode("rw-rw----");
 	t->mode = mode;
 	t->re_pad = qmeta->re_pad;
 	t->re_len = qmeta->re_len;
@@ -308,11 +308,11 @@ __qam_new_file(dbp, txn, fhp, name)
 
 	/* Build meta-data page. */
 
-	if (name == NULL) {
+	if (F_ISSET(dbp, DB_AM_INMEM)) {
 		pgno = PGNO_BASE_MD;
 		ret = __memp_fget(mpf, &pgno, DB_MPOOL_CREATE, &meta);
 	} else {
-		ret = __os_calloc(dbp->dbenv, 1, dbp->pgsize, &buf);
+		ret = __os_calloc(dbenv, 1, dbp->pgsize, &buf);
 		meta = (QMETA *)buf;
 	}
 	if (ret != 0)
@@ -321,9 +321,12 @@ __qam_new_file(dbp, txn, fhp, name)
 	if ((ret = __qam_init_meta(dbp, meta)) != 0)
 		goto err;
 
-	if (name == NULL)
+	if (F_ISSET(dbp, DB_AM_INMEM)) {
+		if ((ret = __db_log_page(dbp,
+		    txn, &meta->dbmeta.lsn, pgno, (PAGE *)meta)) != 0)
+			goto err;
 		ret = __memp_fput(mpf, meta, DB_MPOOL_DIRTY);
-	else {
+	} else {
 		pginfo.db_pagesize = dbp->pgsize;
 		pginfo.flags =
 		    F_ISSET(dbp, (DB_AM_CHKSUM | DB_AM_ENCRYPT | DB_AM_SWAP));
diff --git a/storage/bdb/qam/qam_rec.c b/storage/bdb/qam/qam_rec.c
index e92141ddda3..6751f64a4c1 100644
--- a/storage/bdb/qam/qam_rec.c
+++ b/storage/bdb/qam/qam_rec.c
@@ -1,10 +1,10 @@
 /*-
  * See the file LICENSE for redistribution information.
  *
- * Copyright (c) 1999-2004
+ * Copyright (c) 1999-2005
  *	Sleepycat Software.  All rights reserved.
  *
- * $Id: qam_rec.c,v 11.78 2004/05/11 14:04:51 bostic Exp $
+ * $Id: qam_rec.c,v 12.8 2005/10/20 18:57:13 bostic Exp $
  */
 
 #include "db_config.h"
@@ -23,6 +23,12 @@
 #include "dbinc/log.h"
 #include "dbinc/mp.h"
 #include "dbinc/qam.h"
+#include "dbinc/txn.h"
+
+/* Determine if we are restoring prepared transactions from __txn_recover. */
+#define	IS_IN_RESTORE(dbenv)						 \
+	(((DB_TXNREGION *)((DB_TXNMGR *)				 \
+	     (dbenv)->tx_handle)->reginfo.primary)->stat.st_nrestores != 0)
 
 /*
  * __qam_incfirst_recover --
@@ -51,8 +57,10 @@ __qam_incfirst_recover(dbenv, dbtp, lsnp, op, info)
 	u_int32_t rec_ext;
 	int exact, modified, ret, t_ret;
 
+	LOCK_INIT(lock);
+	COMPQUIET(meta, NULL);
 	REC_PRINT(__qam_incfirst_print);
-	REC_INTRO(__qam_incfirst_read, 1);
+	REC_INTRO(__qam_incfirst_read, 1, 1);
 
 	metapg = ((QUEUE *)file_dbp->q_internal)->q_meta;
 
@@ -169,11 +177,12 @@ __qam_mvptr_recover(dbenv, dbtp, lsnp, op, info)
 	DB_LOCK lock;
 	DB_MPOOLFILE *mpf;
 	QMETA *meta;
+	QUEUE_CURSOR *cp;
 	db_pgno_t metapg;
-	int cmp_n, cmp_p, modified, ret;
+	int cmp_n, cmp_p, exact, modified, ret;
 
 	REC_PRINT(__qam_mvptr_print);
-	REC_INTRO(__qam_mvptr_read, 1);
+	REC_INTRO(__qam_mvptr_read, 1, 1);
 
 	metapg = ((QUEUE *)file_dbp->q_internal)->q_meta;
 
@@ -203,6 +212,9 @@ __qam_mvptr_recover(dbenv, dbtp, lsnp, op, info)
 	/*
 	 * Under normal circumstances, we never undo a movement of one of
 	 * the pointers.  Just move them along regardless of abort/commit.
+	 * When going forward we need to verify that this is really where
+	 * the pointer belongs.  A transaction may roll back and reinsert
+	 * a record that was missing at the time of this action.
 	 *
 	 * If we're undoing a truncate, we need to reset the pointers to
 	 * their state before the truncate.
@@ -222,11 +234,30 @@ __qam_mvptr_recover(dbenv, dbtp, lsnp, op, info)
 			modified = 1;
 		}
 	} else if (op == DB_TXN_APPLY || cmp_p == 0) {
-		if (argp->opcode & QAM_SETFIRST)
-			meta->first_recno = argp->new_first;
+		cp = (QUEUE_CURSOR *)dbc->internal;
+		if ((argp->opcode & QAM_SETFIRST) &&
+		    meta->first_recno == argp->old_first) {
+			if ((ret = __qam_position(dbc,
+			    &meta->first_recno, QAM_READ, &exact)) != 0)
+				goto err;
+			if (!exact)
+				meta->first_recno = argp->new_first;
+			if (cp->page != NULL && (ret =
+			    __qam_fput(file_dbp, cp->pgno, cp->page, 0)) != 0)
+				goto err;
+		}
 
-		if (argp->opcode & QAM_SETCUR)
-			meta->cur_recno = argp->new_cur;
+		if ((argp->opcode & QAM_SETCUR) &&
+		    meta->cur_recno == argp->old_cur) {
+			if ((ret = __qam_position(dbc,
+			    &meta->cur_recno, QAM_READ, &exact)) != 0)
+				goto err;
+			if (!exact)
+				meta->cur_recno = argp->new_cur;
+			if (cp->page != NULL && (ret =
+			    __qam_fput(file_dbp, cp->pgno, cp->page, 0)) != 0)
+				goto err;
+		}
 
 		modified = 1;
 		meta->dbmeta.lsn = *lsnp;
@@ -241,6 +272,11 @@ __qam_mvptr_recover(dbenv, dbtp, lsnp, op, info)
 done:	*lsnp = argp->prev_lsn;
 	ret = 0;
 
+	if (0) {
+err:		(void)__memp_fput(mpf, meta, 0);
+		(void)__LPUT(dbc, lock);
+	}
+
 out:	REC_CLOSE;
 }
 
@@ -272,8 +308,9 @@ __qam_del_recover(dbenv, dbtp, lsnp, op, info)
 	int cmp_n, modified, ret, t_ret;
 
 	COMPQUIET(info, NULL);
+	COMPQUIET(pagep, NULL);
 	REC_PRINT(__qam_del_print);
-	REC_INTRO(__qam_del_read, 1);
+	REC_INTRO(__qam_del_read, 1, 1);
 
 	if ((ret = __qam_fget(file_dbp,
 	    &argp->pgno, DB_MPOOL_CREATE, &pagep)) != 0)
@@ -323,8 +360,12 @@ __qam_del_recover(dbenv, dbtp, lsnp, op, info)
 		 * foul up a concurrent put.  Having too late an LSN
 		 * is harmless in queue except when we're determining
 		 * what we need to roll forward during recovery.  [#2588]
+		 * If we are aborting a restored transaction then it
+		 * might get rolled forward later so the LSN needs to
+		 * be correct in that case too. [#12181]
 		 */
-		if (op == DB_TXN_BACKWARD_ROLL && cmp_n <= 0)
+		if (cmp_n <= 0 &&
+		      (op == DB_TXN_BACKWARD_ROLL || IS_IN_RESTORE(dbenv)))
 			LSN(pagep) = argp->lsn;
 		modified = 1;
 	} else if (op == DB_TXN_APPLY || (cmp_n > 0 && DB_REDO(op))) {
@@ -374,8 +415,9 @@ __qam_delext_recover(dbenv, dbtp, lsnp, op, info)
 	int cmp_n, modified, ret, t_ret;
 
 	COMPQUIET(info, NULL);
+	COMPQUIET(pagep, NULL);
 	REC_PRINT(__qam_delext_print);
-	REC_INTRO(__qam_delext_read, 1);
+	REC_INTRO(__qam_delext_read, 1, 1);
 
 	if ((ret = __qam_fget(file_dbp, &argp->pgno, 0, &pagep)) != 0) {
 		if (ret != DB_PAGE_NOTFOUND && ret != ENOENT)
@@ -436,7 +478,8 @@ __qam_delext_recover(dbenv, dbtp, lsnp, op, info)
 		 * is harmless in queue except when we're determining
 		 * what we need to roll forward during recovery.  [#2588]
 		 */
-		if (op == DB_TXN_BACKWARD_ROLL && cmp_n <= 0)
+		if (cmp_n <= 0 &&
+		      (op == DB_TXN_BACKWARD_ROLL || IS_IN_RESTORE(dbenv)))
 			LSN(pagep) = argp->lsn;
 		modified = 1;
 	} else if (op == DB_TXN_APPLY || (cmp_n > 0 && DB_REDO(op))) {
@@ -485,8 +528,9 @@ __qam_add_recover(dbenv, dbtp, lsnp, op, info)
 	int cmp_n, meta_dirty, modified, ret;
 
 	COMPQUIET(info, NULL);
+	COMPQUIET(pagep, NULL);
 	REC_PRINT(__qam_add_print);
-	REC_INTRO(__qam_add_read, 1);
+	REC_INTRO(__qam_add_read, 1, 1);
 
 	modified = 0;
 	if ((ret = __qam_fget(file_dbp, &argp->pgno, 0, &pagep)) != 0) {
@@ -570,7 +614,8 @@ __qam_add_recover(dbenv, dbtp, lsnp, op, info)
 		 * is harmless in queue except when we're determining
 		 * what we need to roll forward during recovery.  [#2588]
 		 */
-		if (op == DB_TXN_BACKWARD_ROLL && cmp_n <= 0)
+		if (cmp_n <= 0 &&
+		      (op == DB_TXN_BACKWARD_ROLL || IS_IN_RESTORE(dbenv)))
 			LSN(pagep) = argp->lsn;
 	}
 
diff --git a/storage/bdb/qam/qam_stat.c b/storage/bdb/qam/qam_stat.c
index c5264bd019f..d6d28b5cc81 100644
--- a/storage/bdb/qam/qam_stat.c
+++ b/storage/bdb/qam/qam_stat.c
@@ -1,10 +1,10 @@
 /*-
  * See the file LICENSE for redistribution information.
  *
- * Copyright (c) 1999-2004
+ * Copyright (c) 1999-2005
  *	Sleepycat Software.  All rights reserved.
  *
- * $Id: qam_stat.c,v 11.47 2004/09/22 16:29:47 bostic Exp $
+ * $Id: qam_stat.c,v 12.1 2005/06/16 20:23:33 bostic Exp $
  */
 
 #include "db_config.h"
diff --git a/storage/bdb/qam/qam_stub.c b/storage/bdb/qam/qam_stub.c
index 1c22aaa520b..f8149881324 100644
--- a/storage/bdb/qam/qam_stub.c
+++ b/storage/bdb/qam/qam_stub.c
@@ -1,10 +1,10 @@
 /*-
  * See the file LICENSE for redistribution information.
  *
- * Copyright (c) 1996-2004
+ * Copyright (c) 1996-2005
  *	Sleepycat Software.  All rights reserved.
  *
- * $Id: qam_stub.c,v 1.12 2004/06/14 15:23:33 bostic Exp $
+ * $Id: qam_stub.c,v 12.1 2005/06/16 20:23:33 bostic Exp $
  */
 
 #include "db_config.h"
diff --git a/storage/bdb/qam/qam_upgrade.c b/storage/bdb/qam/qam_upgrade.c
index dff82404acf..383b0c0a416 100644
--- a/storage/bdb/qam/qam_upgrade.c
+++ b/storage/bdb/qam/qam_upgrade.c
@@ -1,10 +1,10 @@
 /*-
  * See the file LICENSE for redistribution information.
  *
- * Copyright (c) 1996-2004
+ * Copyright (c) 1996-2005
  *	Sleepycat Software.  All rights reserved.
  *
- * $Id: qam_upgrade.c,v 11.16 2004/05/10 21:29:43 bostic Exp $
+ * $Id: qam_upgrade.c,v 12.1 2005/06/16 20:23:33 bostic Exp $
  */
 
 #include "db_config.h"
diff --git a/storage/bdb/qam/qam_verify.c b/storage/bdb/qam/qam_verify.c
index 571157b8fac..535cbe28a4c 100644
--- a/storage/bdb/qam/qam_verify.c
+++ b/storage/bdb/qam/qam_verify.c
@@ -1,17 +1,18 @@
 /*-
  * See the file LICENSE for redistribution information.
  *
- * Copyright (c) 1999-2004
+ * Copyright (c) 1999-2005
  *	Sleepycat Software.  All rights reserved.
  *
- * $Id: qam_verify.c,v 1.51 2004/10/11 18:47:51 bostic Exp $
+ * $Id: qam_verify.c,v 12.3 2005/06/16 20:23:34 bostic Exp $
  */
 
 #include "db_config.h"
 
 #ifndef NO_SYSTEM_INCLUDES
 #include 
-
+#include 
+#include 
 #endif
 
 #include "db_int.h"
@@ -21,9 +22,6 @@
 #include "dbinc/db_shash.h"
 #include "dbinc/mp.h"
 #include "dbinc/qam.h"
-#include 
-#include 
-
 /*
  * __qam_vrfy_meta --
  *	Verify the queue-specific part of a metadata page.
@@ -93,6 +91,8 @@ __qam_vrfy_meta(dbp, vdp, meta, pgno, flags)
 		 * it when handling extents.  It would get set up in open,
 		 * if we called open normally, but we don't.
 		 */
+		vdp->re_pad = meta->re_pad;
+		qp->re_pad = (int)meta->re_pad;
 		qp->re_len = vdp->re_len = meta->re_len;
 		qp->rec_page = vdp->rec_page = meta->rec_page;
 		qp->page_ext = vdp->page_ext = meta->page_ext;
diff --git a/storage/bdb/rep/rep.src b/storage/bdb/rep/rep.src
index ffabca3fa02..d1b207f5b3b 100644
--- a/storage/bdb/rep/rep.src
+++ b/storage/bdb/rep/rep.src
@@ -1,10 +1,10 @@
 /*-
  * See the file LICENSE for redistribution information.
  *
- * Copyright (c) 2001-2004
+ * Copyright (c) 2001-2005
  *      Sleepycat Software.  All rights reserved.
  *
- * $Id: rep.src,v 1.5 2004/09/22 18:01:04 bostic Exp $
+ * $Id: rep.src,v 12.3 2005/10/27 13:27:03 bostic Exp $
  */
 
 PREFIX  __rep
@@ -29,17 +29,17 @@ INCLUDE
  */
 BEGIN_BUF update
 POINTER	first_lsn	DB_LSN *	lu
-ARG	num_files	int		d
+ARG	num_files	u_int32_t	lu
 END
 
 /*
  * file info
  */
 BEGIN_BUF fileinfo
-ARG	pgsize		size_t		lu
+ARG	pgsize		u_int32_t	lu
 ARG	pgno		db_pgno_t	lu
 ARG	max_pgno	db_pgno_t	lu
-ARG	filenum		int		d
+ARG	filenum		u_int32_t	lu
 ARG	id		int32_t		d
 ARG	type		u_int32_t	lu
 ARG	flags		u_int32_t	lu
diff --git a/storage/bdb/rep/rep_backup.c b/storage/bdb/rep/rep_backup.c
index 64c538fb5c9..b19e387d382 100644
--- a/storage/bdb/rep/rep_backup.c
+++ b/storage/bdb/rep/rep_backup.c
@@ -1,10 +1,10 @@
 /*-
  * See the file LICENSE for redistribution information.
  *
- * Copyright (c) 2004
+ * Copyright (c) 2004-2005
  *	Sleepycat Software.  All rights reserved.
  *
- * $Id: rep_backup.c,v 1.33 2004/10/29 18:08:09 bostic Exp $
+ * $Id: rep_backup.c,v 12.38 2005/11/09 14:17:30 margo Exp $
  */
 
 #include "db_config.h"
@@ -36,22 +36,20 @@
 
 static int __rep_filedone __P((DB_ENV *, int, REP *, __rep_fileinfo_args *,
     u_int32_t));
-static int __rep_files_data __P((DB_ENV *, u_int8_t *, size_t *,
-    size_t *, int *));
-static int __rep_files_inmem __P((DB_ENV *, u_int8_t *, size_t *,
-    size_t *, int *));
+static int __rep_find_dbs __P((DB_ENV *, u_int8_t *, size_t *,
+    size_t *, u_int32_t *));
 static int __rep_get_fileinfo __P((DB_ENV *, const char *,
-    __rep_fileinfo_args *, u_int8_t *, int *));
+    const char *, __rep_fileinfo_args *, u_int8_t *, u_int32_t *));
 static int __rep_log_setup __P((DB_ENV *, REP *));
 static int __rep_mpf_open __P((DB_ENV *, DB_MPOOLFILE **,
-    __rep_fileinfo_args *));
+    __rep_fileinfo_args *, u_int32_t));
 static int __rep_page_gap __P((DB_ENV *, REP *, __rep_fileinfo_args *,
     u_int32_t));
 static int __rep_page_sendpages __P((DB_ENV *, int,
     __rep_fileinfo_args *, DB_MPOOLFILE *, DB *));
 static int __rep_queue_filedone __P((DB_ENV *, REP *, __rep_fileinfo_args *));
 static int __rep_walk_dir __P((DB_ENV *, const char *, u_int8_t *,
-    size_t *, size_t *, int *));
+    size_t *, size_t *, u_int32_t *));
 static int __rep_write_page __P((DB_ENV *, REP *, __rep_fileinfo_args *));
 
 /*
@@ -67,12 +65,11 @@ __rep_update_req(dbenv, eid)
 {
 	DBT updbt;
 	DB_LOG *dblp;
-	DB_LOGC *logc;
 	DB_LSN lsn;
-	DBT data_dbt;
 	size_t filelen, filesz, updlen;
+	u_int32_t filecnt;
 	u_int8_t *buf, *fp;
-	int filecnt, ret, t_ret;
+	int ret;
 
 	/*
 	 * Allocate enough for all currently open files and then some.
@@ -99,23 +96,14 @@ __rep_update_req(dbenv, eid)
 	 * then get on-disk files.
 	 */
 	fp = buf + sizeof(__rep_update_args);
-	if ((ret = __rep_files_inmem(dbenv, fp, &filesz, &filelen,
-	    &filecnt)) != 0)
-		goto err;
-	if ((ret = __rep_files_data(dbenv, fp, &filesz, &filelen,
-	    &filecnt)) != 0)
+	if ((ret = __rep_find_dbs(dbenv, fp, &filesz, &filelen, &filecnt)) != 0)
 		goto err;
 
 	/*
-	 * Now get our first LSN.
+	 * Now get our first LSN.  We send the lsn of the first
+	 * non-archivable log file.
 	 */
-	if ((ret = __log_cursor(dbenv, &logc)) != 0)
-		goto err;
-	memset(&data_dbt, 0, sizeof(data_dbt));
-	ret = __log_c_get(logc, &lsn, &data_dbt, DB_FIRST);
-	if ((t_ret = __log_c_close(logc)) != 0 && ret == 0)
-		ret = t_ret;
-	if (ret != 0)
+	if ((ret = __log_get_stable_lsn(dbenv, &lsn)) != 0)
 		goto err;
 
 	/*
@@ -129,10 +117,11 @@ __rep_update_req(dbenv, eid)
 	memset(&updbt, 0, sizeof(updbt));
 	updbt.data = buf;
 	updbt.size = (u_int32_t)(filelen + updlen);
-	R_LOCK(dbenv, &dblp->reginfo);
+	LOG_SYSTEM_LOCK(dbenv);
 	lsn = ((LOG *)dblp->reginfo.primary)->lsn;
-	R_UNLOCK(dbenv, &dblp->reginfo);
-	(void)__rep_send_message(dbenv, eid, REP_UPDATE, &lsn, &updbt, 0);
+	LOG_SYSTEM_UNLOCK(dbenv);
+	(void)__rep_send_message(dbenv, eid, REP_UPDATE, &lsn, &updbt, 0,
+	    DB_REP_ANYWHERE);
 
 err:
 	__os_free(dbenv, buf);
@@ -140,17 +129,19 @@ err:
 }
 
 /*
- * __rep_files_data -
- *	Walk through all the files in the env's data_dirs.  We need to
- *	open them, gather the necessary information and then close them.
- *	Then we need to figure out if they're already in the dbentry array.
+ * __rep_find_dbs -
+ *	Walk through all the named files/databases including those in the
+ *	environment or data_dirs and those that in named and in-memory.  We
+ *	need to	open them, gather the necessary information and then close
+ *	them. Then we need to figure out if they're already in the dbentry
+ *	array.
  */
 static int
-__rep_files_data(dbenv, fp, fileszp, filelenp, filecntp)
+__rep_find_dbs(dbenv, fp, fileszp, filelenp, filecntp)
 	DB_ENV *dbenv;
 	u_int8_t *fp;
 	size_t *fileszp, *filelenp;
-	int *filecntp;
+	u_int32_t *filecntp;
 {
 	int ret;
 	char **ddir;
@@ -169,23 +160,37 @@ __rep_files_data(dbenv, fp, fileszp, filelenp, filecntp)
 			    fileszp, filelenp, filecntp)) != 0)
 				break;
 	}
+
+	/* Now, collect any in-memory named databases. */
+	if (ret == 0)
+		ret = __rep_walk_dir(dbenv,
+		    NULL, fp, fileszp, filelenp, filecntp);
+
 	return (ret);
 }
 
-static int
+/*
+ * __rep_walk_dir --
+ *
+ * This is the routine that walks a directory and fills in the structures
+ * that we use to generate messages to the client telling it what files
+ * files are available.  If the directory name is NULL, then we should
+ * walk the list of in-memory named files.
+ */
+int
 __rep_walk_dir(dbenv, dir, fp, fileszp, filelenp, filecntp)
 	DB_ENV *dbenv;
 	const char *dir;
 	u_int8_t *fp;
 	size_t *fileszp, *filelenp;
-	int *filecntp;
+	u_int32_t *filecntp;
 {
 	DBT namedbt, uiddbt;
 	__rep_fileinfo_args tmpfp;
 	size_t len, offset;
 	int cnt, i, ret;
 	u_int8_t *rfp, uid[DB_FILE_ID_LEN];
-	char **names;
+	char *file, **names, *subdb;
 #ifdef DIAGNOSTIC
 	REP *rep;
 	DB_MSGBUF mb;
@@ -196,10 +201,17 @@ __rep_walk_dir(dbenv, dir, fp, fileszp, filelenp, filecntp)
 #endif
 	memset(&namedbt, 0, sizeof(namedbt));
 	memset(&uiddbt, 0, sizeof(uiddbt));
-	RPRINT(dbenv, rep, (dbenv, &mb,
-	    "Walk_dir: Getting info for dir: %s", dir));
-	if ((ret = __os_dirlist(dbenv, dir, &names, &cnt)) != 0)
-		return (ret);
+	if (dir == NULL) {
+		RPRINT(dbenv, rep, (dbenv, &mb,
+		    "Walk_dir: Getting info for in-memory named files"));
+		if ((ret = __memp_inmemlist(dbenv, &names, &cnt)) != 0)
+			return (ret);
+	} else {
+		RPRINT(dbenv, rep, (dbenv, &mb,
+		    "Walk_dir: Getting info for dir: %s", dir));
+		if ((ret = __os_dirlist(dbenv, dir, &names, &cnt)) != 0)
+			return (ret);
+	}
 	rfp = fp;
 	RPRINT(dbenv, rep, (dbenv, &mb,
 	    "Walk_dir: Dir %s has %d files", dir, cnt));
@@ -223,8 +235,15 @@ __rep_walk_dir(dbenv, dir, fp, fileszp, filelenp, filecntp)
 		 * We found a file to process.  Check if we need
 		 * to allocate more space.
 		 */
-		if ((ret = __rep_get_fileinfo(dbenv, names[i], &tmpfp, uid,
-		    filecntp)) != 0) {
+		if (dir == NULL) {
+			file = NULL;
+			subdb = names[i];
+		} else {
+			file = names[i];
+			subdb = NULL;
+		}
+		if ((ret = __rep_get_fileinfo(dbenv,
+		    file, subdb, &tmpfp, uid, filecntp)) != 0) {
 			/*
 			 * If we find a file that isn't a database, skip it.
 			 */
@@ -273,12 +292,12 @@ retry:
 }
 
 static int
-__rep_get_fileinfo(dbenv, file, rfp, uid, filecntp)
+__rep_get_fileinfo(dbenv, file, subdb, rfp, uid, filecntp)
 	DB_ENV *dbenv;
-	const char *file;
+	const char *file, *subdb;
 	__rep_fileinfo_args *rfp;
 	u_int8_t *uid;
-	int *filecntp;
+	u_int32_t *filecntp;
 {
 
 	DB *dbp, *entdbp;
@@ -296,10 +315,9 @@ __rep_get_fileinfo(dbenv, file, rfp, uid, filecntp)
 	mpf = NULL;
 	LOCK_INIT(lk);
 
-	dblp = dbenv->lg_handle;
 	if ((ret = db_create(&dbp, dbenv, 0)) != 0)
 		goto err;
-	if ((ret = __db_open(dbp, NULL, file, NULL, DB_UNKNOWN,
+	if ((ret = __db_open(dbp, NULL, file, subdb, DB_UNKNOWN,
 	    DB_RDONLY | (F_ISSET(dbenv, DB_ENV_THREAD) ? DB_THREAD : 0),
 	    0, PGNO_BASE_MD)) != 0)
 		goto err;
@@ -327,7 +345,7 @@ __rep_get_fileinfo(dbenv, file, rfp, uid, filecntp)
 	rfp->pgsize = dbp->pgsize;
 	memcpy(uid, dbp->fileid, DB_FILE_ID_LEN);
 	rfp->filenum = (*filecntp)++;
-	rfp->type = dbp->type;
+	rfp->type = (u_int32_t)dbp->type;
 	rfp->flags = dbp->flags;
 	rfp->id = DB_LOGFILEID_INVALID;
 	ret = __memp_fput(dbp->mpf, pagep, 0);
@@ -352,12 +370,13 @@ err:
 	 * is useless in that case.
 	 */
 	if (ret == 0) {
-		MUTEX_THREAD_LOCK(dbenv, dblp->mutexp);
+		LOG_SYSTEM_LOCK(dbenv);
 		/*
 		 * Walk entry table looking for this uid.
 		 * If we find it, save the id.
 		 */
-		for (i = 0; i < dblp->dbentry_cnt; i++) {
+		for (dblp = dbenv->lg_handle,
+		    i = 0; i < dblp->dbentry_cnt; i++) {
 			entdbp = dblp->dbentry[i].dbp;
 			if (entdbp == NULL)
 				break;
@@ -367,34 +386,11 @@ err:
 			    DB_FILE_ID_LEN) == 0)
 				rfp->id = i;
 		}
-		MUTEX_THREAD_UNLOCK(dbenv, dblp->mutexp);
+		LOG_SYSTEM_UNLOCK(dbenv);
 	}
 	return (ret);
 }
 
-/*
- * __rep_files_inmem -
- *	Gather all the information about in-memory files.
- */
-static int
-__rep_files_inmem(dbenv, fp, fileszp, filelenp, filecntp)
-	DB_ENV *dbenv;
-	u_int8_t *fp;
-	size_t *fileszp, *filelenp;
-	int *filecntp;
-{
-
-	int ret;
-
-	COMPQUIET(dbenv, NULL);
-	COMPQUIET(fp, NULL);
-	COMPQUIET(fileszp, NULL);
-	COMPQUIET(filelenp, NULL);
-	COMPQUIET(filecntp, NULL);
-	ret = 0;
-	return (ret);
-}
-
 /*
  * __rep_page_req
  *	Process a page_req and send the page information to the client.
@@ -407,39 +403,40 @@ __rep_page_req(dbenv, eid, rec)
 	int eid;
 	DBT *rec;
 {
+	__rep_fileinfo_args *msgfp;
 	DB *dbp;
 	DBT msgdbt;
 	DB_LOG *dblp;
 	DB_MPOOLFILE *mpf;
-	__rep_fileinfo_args *msgfp;
+	DB_REP *db_rep;
+	REP *rep;
 	int ret, t_ret;
 	void *next;
 #ifdef DIAGNOSTIC
 	DB_MSGBUF mb;
-	DB_REP *db_rep;
-	REP *rep;
+#endif
 
 	db_rep = dbenv->rep_handle;
 	rep = db_rep->region;
-#endif
 	dblp = dbenv->lg_handle;
+
 	if ((ret = __rep_fileinfo_read(dbenv, rec->data, &next, &msgfp)) != 0)
 		return (ret);
+
 	/*
-	 * See if we can find it already.  If so we can quickly
-	 * access its mpool and process.  Otherwise we have to
-	 * open the file ourselves.
+	 * See if we can find it already.  If so we can quickly access its
+	 * mpool and process.  Otherwise we have to open the file ourselves.
 	 */
 	RPRINT(dbenv, rep, (dbenv, &mb, "page_req: file %d page %lu to %lu",
 	    msgfp->filenum, (u_long)msgfp->pgno, (u_long)msgfp->max_pgno));
-	MUTEX_THREAD_LOCK(dbenv, dblp->mutexp);
+	LOG_SYSTEM_LOCK(dbenv);
 	if (msgfp->id >= 0 && dblp->dbentry_cnt > msgfp->id) {
 		dbp = dblp->dbentry[msgfp->id].dbp;
 		if (dbp != NULL) {
 			DB_ASSERT(dbp->log_filename != NULL);
 			if (memcmp(msgfp->uid.data, dbp->log_filename->ufid,
 			    DB_FILE_ID_LEN) == 0) {
-				MUTEX_THREAD_UNLOCK(dbenv, dblp->mutexp);
+				LOG_SYSTEM_UNLOCK(dbenv);
 				RPRINT(dbenv, rep, (dbenv, &mb,
 				    "page_req: found %d in dbreg",
 				    msgfp->filenum));
@@ -449,7 +446,7 @@ __rep_page_req(dbenv, eid, rec)
 			}
 		}
 	}
-	MUTEX_THREAD_UNLOCK(dbenv, dblp->mutexp);
+	LOG_SYSTEM_UNLOCK(dbenv);
 
 	/*
 	 * If we get here, we do not have the file open via dbreg.
@@ -458,14 +455,17 @@ __rep_page_req(dbenv, eid, rec)
 	 */
 	RPRINT(dbenv, rep, (dbenv, &mb, "page_req: Open %d via mpf_open",
 	    msgfp->filenum));
-	if ((ret = __rep_mpf_open(dbenv, &mpf, msgfp)) != 0) {
+	if ((ret = __rep_mpf_open(dbenv, &mpf, msgfp, 0)) != 0) {
 		memset(&msgdbt, 0, sizeof(msgdbt));
 		msgdbt.data = msgfp;
 		msgdbt.size = sizeof(*msgfp);
 		RPRINT(dbenv, rep, (dbenv, &mb, "page_req: Open %d failed",
 		    msgfp->filenum));
-		(void)__rep_send_message(dbenv, eid, REP_FILE_FAIL,
-		    NULL, &msgdbt, 0);
+		if (F_ISSET(rep, REP_F_MASTER))
+			(void)__rep_send_message(dbenv, eid, REP_FILE_FAIL,
+			    NULL, &msgdbt, 0, 0);
+		else
+			ret = DB_NOTFOUND;
 		goto err;
 	}
 
@@ -487,17 +487,22 @@ __rep_page_sendpages(dbenv, eid, msgfp, mpf, dbp)
 	DB *dbp;
 {
 	DB *qdbp;
-	DBT msgdbt, pgdbt;
+	DBT lockdbt, msgdbt, pgdbt;
+	DB_LOCK lock;
+	DB_LOCK_ILOCK lock_obj;
 	DB_LOG *dblp;
 	DB_LSN lsn;
 	DB_MSGBUF mb;
 	DB_REP *db_rep;
 	PAGE *pagep;
 	REP *rep;
+	REP_BULK bulk;
+	REP_THROTTLE repth;
 	db_pgno_t p;
+	uintptr_t bulkoff;
 	size_t len, msgsz;
-	u_int32_t bytes, gbytes, type;
-	int check_limit, opened, ret, t_ret;
+	u_int32_t bulkflags, lockid, use_bulk;
+	int opened, ret, t_ret;
 	u_int8_t *buf;
 
 #ifndef DIAGNOSTIC
@@ -505,21 +510,27 @@ __rep_page_sendpages(dbenv, eid, msgfp, mpf, dbp)
 #endif
 	db_rep = dbenv->rep_handle;
 	rep = db_rep->region;
+	lockid = DB_LOCK_INVALIDID;
 	opened = 0;
-	gbytes = bytes = 0;
-	MUTEX_LOCK(dbenv, db_rep->rep_mutexp);
-	gbytes = rep->gbytes;
-	bytes = rep->bytes;
-	MUTEX_UNLOCK(dbenv, db_rep->rep_mutexp);
-	check_limit = gbytes != 0 || bytes != 0;
 	qdbp = NULL;
 	buf = NULL;
-	if (msgfp->type == DB_QUEUE) {
+	bulk.addr = NULL;
+	use_bulk = FLD_ISSET(rep->config, REP_C_BULK);
+	if (msgfp->type == (u_int32_t)DB_QUEUE) {
 		if (dbp == NULL) {
 			if ((ret = db_create(&qdbp, dbenv, 0)) != 0)
 				goto err;
-			if ((ret = __db_open(qdbp, NULL, msgfp->info.data,
-			    NULL, DB_UNKNOWN,
+			/*
+			 * We need to check whether this is in-memory so that
+			 * we pass the name correctly as either the file or
+			 * the database name.
+			 */
+			if ((ret = __db_open(qdbp, NULL,
+			    FLD_ISSET(msgfp->flags, DB_AM_INMEM) ?
+			    NULL : msgfp->info.data,
+			    FLD_ISSET(msgfp->flags, DB_AM_INMEM) ?
+			    msgfp->info.data : NULL,
+			    DB_UNKNOWN,
 			    DB_RDONLY | (F_ISSET(dbenv, DB_ENV_THREAD) ?
 			    DB_THREAD : 0), 0, PGNO_BASE_MD)) != 0)
 				goto err;
@@ -529,13 +540,65 @@ __rep_page_sendpages(dbenv, eid, msgfp, mpf, dbp)
 	}
 	msgsz = sizeof(__rep_fileinfo_args) + DB_FILE_ID_LEN + msgfp->pgsize;
 	if ((ret = __os_calloc(dbenv, 1, msgsz, &buf)) != 0)
-		return (ret);
+		goto err;
 	memset(&msgdbt, 0, sizeof(msgdbt));
 	memset(&pgdbt, 0, sizeof(pgdbt));
 	RPRINT(dbenv, rep, (dbenv, &mb, "sendpages: file %d page %lu to %lu",
 	    msgfp->filenum, (u_long)msgfp->pgno, (u_long)msgfp->max_pgno));
+	memset(&repth, 0, sizeof(repth));
+	/*
+	 * If we're doing bulk transfer, allocate a bulk buffer to put our
+	 * pages in.  We still need to initialize the throttle info
+	 * because if we encounter a page larger than our entire bulk
+	 * buffer, we need to send it as a singleton.
+	 *
+	 * Use a local var so that we don't need to worry if someone else
+	 * turns on/off bulk in the middle of our call here.
+	 */
+	if (use_bulk && (ret = __rep_bulk_alloc(dbenv, &bulk, eid,
+	    &bulkoff, &bulkflags, REP_BULK_PAGE)) != 0)
+		goto err;
+	REP_SYSTEM_LOCK(dbenv);
+	repth.gbytes = rep->gbytes;
+	repth.bytes = rep->bytes;
+	repth.type = REP_PAGE;
+	repth.data_dbt = &msgdbt;
+	REP_SYSTEM_UNLOCK(dbenv);
+
+	/*
+	 * Set up locking.
+	 */
+	LOCK_INIT(lock);
+	memset(&lock_obj, 0, sizeof(lock_obj));
+	if ((ret = __lock_id(dbenv, &lockid, NULL)) != 0)
+		goto err;
+	memcpy(lock_obj.fileid, mpf->fileid, DB_FILE_ID_LEN);
+	lock_obj.type = DB_PAGE_LOCK;
+
+	memset(&lockdbt, 0, sizeof(lockdbt));
+	lockdbt.data = &lock_obj;
+	lockdbt.size = sizeof(lock_obj);
+
 	for (p = msgfp->pgno; p <= msgfp->max_pgno; p++) {
-		if (msgfp->type == DB_QUEUE && p != 0)
+		/*
+		 * We're not waiting for the lock, if we cannot get
+		 * the lock for this page, skip it.  The gap
+		 * code will rerequest it.
+		 */
+		lock_obj.pgno = p;
+		if ((ret = __lock_get(dbenv, lockid, DB_LOCK_NOWAIT, &lockdbt,
+		    DB_LOCK_READ, &lock)) != 0) {
+			/*
+			 * Continue if we couldn't get the lock.
+			 */
+			if (ret == DB_LOCK_NOTGRANTED)
+				continue;
+			/*
+			 * Otherwise we have an error.
+			 */
+			goto err;
+		}
+		if (msgfp->type == (u_int32_t)DB_QUEUE && p != 0)
 #ifdef HAVE_QUEUE
 			ret = __qam_fget(qdbp, &p, DB_MPOOL_CREATE, &pagep);
 #else
@@ -543,18 +606,22 @@ __rep_page_sendpages(dbenv, eid, msgfp, mpf, dbp)
 #endif
 		else
 			ret = __memp_fget(mpf, &p, DB_MPOOL_CREATE, &pagep);
-		type = REP_PAGE;
 		if (ret == DB_PAGE_NOTFOUND) {
 			memset(&pgdbt, 0, sizeof(pgdbt));
-			ret = 0;
 			ZERO_LSN(lsn);
-			RPRINT(dbenv, rep, (dbenv, &mb,
-			    "sendpages: PAGE_FAIL on page %lu", (u_long)p));
-			type = REP_PAGE_FAIL;
 			msgfp->pgno = p;
-			goto send;
+			if (F_ISSET(rep, REP_F_MASTER)) {
+				ret = 0;
+				RPRINT(dbenv, rep, (dbenv, &mb,
+				    "sendpages: PAGE_FAIL on page %lu",
+				    (u_long)p));
+				(void)__rep_send_message(dbenv, eid,
+				    REP_PAGE_FAIL, &lsn, &msgdbt, 0, 0);
+			} else
+				ret = DB_NOTFOUND;
+			goto lockerr;
 		} else if (ret != 0)
-			goto err;
+			goto lockerr;
 		else {
 			pgdbt.data = pagep;
 			pgdbt.size = (u_int32_t)msgfp->pgsize;
@@ -564,6 +631,19 @@ __rep_page_sendpages(dbenv, eid, msgfp, mpf, dbp)
 		    msgfp->pgsize, p, msgfp->max_pgno,
 		    msgfp->filenum, msgfp->id, msgfp->type,
 		    msgfp->flags, &msgfp->uid, &pgdbt);
+		if (msgfp->type != (u_int32_t)DB_QUEUE || p == 0)
+			t_ret = __memp_fput(mpf, pagep, 0);
+#ifdef HAVE_QUEUE
+		else
+			/*
+			 * We don't need an #else for HAVE_QUEUE here because if
+			 * we're not compiled with queue, then we're guaranteed
+			 * to have set REP_PAGE_FAIL above.
+			 */
+			t_ret = __qam_fput(qdbp, p, pagep, 0);
+#endif
+		if ((t_ret = __ENV_LPUT(dbenv, lock)) != 0 && ret == 0)
+			ret = t_ret;
 		if (ret != 0)
 			goto err;
 
@@ -572,68 +652,53 @@ __rep_page_sendpages(dbenv, eid, msgfp, mpf, dbp)
 		msgdbt.size = (u_int32_t)len;
 
 		dblp = dbenv->lg_handle;
-		R_LOCK(dbenv, &dblp->reginfo);
-		lsn = ((LOG *)dblp->reginfo.primary)->lsn;
-		R_UNLOCK(dbenv, &dblp->reginfo);
-		if (check_limit) {
-			/*
-			 * msgdbt.size is only the size of the page and
-			 * other information we're sending.  It doesn't
-			 * count the size of the control structure.  Factor
-			 * that in as well so we're not off by a lot if
-			 * pages are small.
-			 */
-			while (bytes < msgdbt.size + sizeof(REP_CONTROL)) {
-				if (gbytes > 0) {
-					bytes += GIGABYTE;
-					--gbytes;
-					continue;
-				}
-				/*
-				 * We don't hold the rep mutex, and may
-				 * miscount.
-				 */
-				rep->stat.st_nthrottles++;
-				type = REP_PAGE_MORE;
-				goto send;
-			}
-			bytes -= (msgdbt.size + sizeof(REP_CONTROL));
-		}
-send:
-		RPRINT(dbenv, rep, (dbenv, &mb,
-		    "sendpages: %s %lu, lsn [%lu][%lu]",
-		    (type == REP_PAGE ? "PAGE" :
-		    (type == REP_PAGE_MORE ? "PAGE_MORE" : "PAGE_FAIL")),
-		    (u_long)p, (u_long)lsn.file, (u_long)lsn.offset));
-		(void)__rep_send_message(dbenv, eid, type, &lsn, &msgdbt, 0);
+		LOG_SYSTEM_LOCK(dbenv);
+		repth.lsn = ((LOG *)dblp->reginfo.primary)->lsn;
+		LOG_SYSTEM_UNLOCK(dbenv);
 		/*
-		 * If we have REP_PAGE_FAIL we need to break before trying
-		 * to give the page back to mpool.  If we have REP_PAGE_MORE
+		 * If we are configured for bulk, try to send this as a bulk
+		 * request.  If not configured, or it is too big for bulk
+		 * then just send normally.
+		 */
+		if (use_bulk)
+			ret = __rep_bulk_message(dbenv, &bulk, &repth,
+			    &repth.lsn, &msgdbt, 0);
+		if (!use_bulk || ret == DB_REP_BULKOVF)
+			ret = __rep_send_throttle(dbenv, eid, &repth, 0);
+		RPRINT(dbenv, rep, (dbenv, &mb,
+		    "sendpages: %lu, lsn [%lu][%lu]", (u_long)p,
+		    (u_long)repth.lsn.file, (u_long)repth.lsn.offset));
+		/*
+		 * If we have REP_PAGE_MORE
 		 * we need to break this loop after giving the page back
 		 * to mpool.  Otherwise, with REP_PAGE, we keep going.
 		 */
-		if (type == REP_PAGE_FAIL)
-			break;
-		if (msgfp->type != DB_QUEUE || p == 0)
-			ret = __memp_fput(mpf, pagep, 0);
-#ifdef HAVE_QUEUE
-		else
-			/*
-			 * We don't need an #else for HAVE_QUEUE here because if
-			 * we're not compiled with queue, then we're guaranteed
-			 * to have set REP_PAGE_FAIL above.
-			 */
-			ret = __qam_fput(qdbp, p, pagep, 0);
-#endif
-		if (type == REP_PAGE_MORE)
+		if (ret == 0)
+			ret = t_ret;
+		if (repth.type == REP_PAGE_MORE || ret != 0)
 			break;
 	}
+
+	if (0) {
+lockerr:	if ((t_ret = __ENV_LPUT(dbenv, lock)) != 0 && ret == 0)
+			ret = t_ret;
+	}
 err:
+	/*
+	 * We're done, force out whatever remains in the bulk buffer and
+	 * free it.
+	 */
+	if (use_bulk && bulk.addr != NULL &&
+	    (t_ret = __rep_bulk_free(dbenv, &bulk, 0)) != 0 && ret == 0)
+		ret = t_ret;
 	if (opened && (t_ret = __db_close(qdbp, NULL, DB_NOSYNC)) != 0 &&
 	    ret == 0)
 		ret = t_ret;
 	if (buf != NULL)
 		__os_free(dbenv, buf);
+	if (lockid != DB_LOCK_INVALIDID && (t_ret = __lock_id_free(dbenv,
+	    lockid)) != 0 && ret == 0)
+		ret = t_ret;
 	return (ret);
 }
 
@@ -651,7 +716,6 @@ __rep_update_setup(dbenv, eid, rp, rec)
 	DBT *rec;
 {
 	DB_LOG *dblp;
-	DB_LSN lsn;
 	DB_REP *db_rep;
 	DBT pagereq_dbt;
 	LOG *lp;
@@ -673,17 +737,16 @@ __rep_update_setup(dbenv, eid, rp, rec)
 	lp = dblp->reginfo.primary;
 	ret = 0;
 
-	MUTEX_LOCK(dbenv, db_rep->rep_mutexp);
+	REP_SYSTEM_LOCK(dbenv);
 	if (!F_ISSET(rep, REP_F_RECOVER_UPDATE)) {
-		MUTEX_UNLOCK(dbenv, db_rep->rep_mutexp);
+		REP_SYSTEM_UNLOCK(dbenv);
 		return (0);
 	}
 	F_CLR(rep, REP_F_RECOVER_UPDATE);
 	/*
 	 * We know we're the first to come in here due to the
-	 * REP_F_RECOVER_UPDATE flag.  REP_F_READY should not be set.
+	 * REP_F_RECOVER_UPDATE flag.
 	 */
-	DB_ASSERT(!F_ISSET(rep, REP_F_READY));
 	F_SET(rep, REP_F_RECOVER_PAGE);
 	/*
 	 * We do not clear REP_F_READY or rep->in_recovery in this code.
@@ -691,7 +754,8 @@ __rep_update_setup(dbenv, eid, rp, rec)
 	 * code and that will clear all the flags and allow others to
 	 * proceed.
 	 */
-	__rep_lockout(dbenv, db_rep, rep, 1);
+	if ((ret = __rep_lockout(dbenv, rep, 1)) != 0)
+		goto err;
 	/*
 	 * We need to update the timestamp and kill any open handles
 	 * on this client.  The files are changing completely.
@@ -700,39 +764,31 @@ __rep_update_setup(dbenv, eid, rp, rec)
 	renv = infop->primary;
 	(void)time(&renv->rep_timestamp);
 
-	MUTEX_UNLOCK(dbenv, db_rep->rep_mutexp);
-	MUTEX_LOCK(dbenv, db_rep->db_mutexp);
+	REP_SYSTEM_UNLOCK(dbenv);
+	MUTEX_LOCK(dbenv, rep->mtx_clientdb);
 	lp->wait_recs = rep->request_gap;
 	lp->rcvd_recs = 0;
 	ZERO_LSN(lp->ready_lsn);
 	ZERO_LSN(lp->waiting_lsn);
 	ZERO_LSN(lp->max_wait_lsn);
 	ZERO_LSN(lp->max_perm_lsn);
-	MUTEX_UNLOCK(dbenv, db_rep->db_mutexp);
+	MUTEX_UNLOCK(dbenv, rep->mtx_clientdb);
 	if ((ret = __rep_update_read(dbenv, rec->data, &next, &rup)) != 0)
-		goto err;
-	R_LOCK(dbenv, &dblp->reginfo);
-	lsn = lp->lsn;
-	R_UNLOCK(dbenv, &dblp->reginfo);
+		goto err_nolock;
 
 	/*
 	 * We need to empty out any old log records that might be in the
 	 * temp database.
 	 */
 	if ((ret = __db_truncate(db_rep->rep_db, NULL, &count)) != 0)
-		goto err;
+		goto err_nolock;
 
 	/*
-	 * If our log is before the master's beginning of log,
-	 * we need to request from the master's beginning.
-	 * If we have some log, we need the earlier of the
-	 * master's last checkpoint LSN or our current LSN.
+	 * We will remove all logs we have so we need to request
+	 * from the master's beginning.
 	 */
-	MUTEX_LOCK(dbenv, db_rep->rep_mutexp);
-	if (log_compare(&lsn, &rup->first_lsn) < 0)
-		rep->first_lsn = rup->first_lsn;
-	else
-		rep->first_lsn = lsn;
+	REP_SYSTEM_LOCK(dbenv);
+	rep->first_lsn = rup->first_lsn;
 	rep->last_lsn = rp->lsn;
 	rep->nfiles = rup->num_files;
 	rep->curfile = 0;
@@ -794,7 +850,7 @@ __rep_update_setup(dbenv, eid, rp, rec)
 	 * We set up pagereq_dbt as we went along.  Send it now.
 	 */
 	(void)__rep_send_message(dbenv, eid, REP_PAGE_REQ,
-	    NULL, &pagereq_dbt, 0);
+	    NULL, &pagereq_dbt, 0, DB_REP_ANYWHERE);
 	if (0) {
 errmem:		__os_free(dbenv, rep->curinfo);
 errmem1:	__os_free(dbenv, rep->originfo);
@@ -802,12 +858,15 @@ errmem1:	__os_free(dbenv, rep->originfo);
 		rep->curinfo = NULL;
 		rep->originfo = NULL;
 	}
-err:
-	/*
-	 * If we get an error, we cannot leave ourselves in the
-	 * RECOVER_PAGE state because we have no file information.
-	 * That also means undo'ing the rep_lockout.
-	 * We need to move back to the RECOVER_UPDATE stage.
+
+	if (0) {
+err_nolock:	REP_SYSTEM_LOCK(dbenv);
+	}
+
+err:	/*
+	 * If we get an error, we cannot leave ourselves in the RECOVER_PAGE
+	 * state because we have no file information.  That also means undo'ing
+	 * the rep_lockout.  We need to move back to the RECOVER_UPDATE stage.
 	 */
 	if (ret != 0) {
 		RPRINT(dbenv, rep, (dbenv, &mb,
@@ -817,7 +876,76 @@ err:
 		rep->in_recovery = 0;
 		F_SET(rep, REP_F_RECOVER_UPDATE);
 	}
-	MUTEX_UNLOCK(dbenv, db_rep->rep_mutexp);
+	REP_SYSTEM_UNLOCK(dbenv);
+	return (ret);
+}
+
+/*
+ * __rep_bulk_page
+ *	Process a bulk page message.
+ *
+ * PUBLIC: int __rep_bulk_page __P((DB_ENV *, int, REP_CONTROL *, DBT *));
+ */
+int
+__rep_bulk_page(dbenv, eid, rp, rec)
+	DB_ENV *dbenv;
+	int eid;
+	REP_CONTROL *rp;
+	DBT *rec;
+{
+	DB_REP *db_rep;
+	DBT pgrec;
+	REP *rep;
+	REP_CONTROL tmprp;
+	u_int32_t len;
+	int ret;
+	u_int8_t *p, *ep;
+#ifdef DIAGNOSTIC
+	DB_MSGBUF mb;
+#endif
+
+	memset(&pgrec, 0, sizeof(pgrec));
+	/*
+	 * We're going to be modifying the rp LSN contents so make
+	 * our own private copy to play with.  We need to set the
+	 * rectype to REP_PAGE because we're calling through __rep_page
+	 * to process each page, and lower functions make decisions
+	 * based on the rectypes (for throttling/gap processing)
+	 */
+	memcpy(&tmprp, rp, sizeof(tmprp));
+	tmprp.rectype = REP_PAGE;
+	ret = 0;
+	db_rep = dbenv->rep_handle;
+	rep = db_rep->region;
+	for (ep = (u_int8_t *)rec->data + rec->size, p = (u_int8_t *)rec->data;
+	    p < ep; p += len) {
+		/*
+		 * First thing in the buffer is the length.  Then the LSN
+		 * of this page, then the page info itself.
+		 */
+		memcpy(&len, p, sizeof(len));
+		p += sizeof(len);
+		memcpy(&tmprp.lsn, p, sizeof(DB_LSN));
+		p += sizeof(DB_LSN);
+		pgrec.data = p;
+		pgrec.size = len;
+		RPRINT(dbenv, rep, (dbenv, &mb,
+		    "rep_bulk_page: Processing LSN [%lu][%lu]",
+		    (u_long)tmprp.lsn.file, (u_long)tmprp.lsn.offset));
+		RPRINT(dbenv, rep, (dbenv, &mb,
+    "rep_bulk_page: p %#lx ep %#lx pgrec data %#lx, size %lu (%#lx)",
+		    P_TO_ULONG(p), P_TO_ULONG(ep), P_TO_ULONG(pgrec.data),
+		    (u_long)pgrec.size, (u_long)pgrec.size));
+		/*
+		 * Now send the page info DBT to the page processing function.
+		 */
+		ret = __rep_page(dbenv, eid, &tmprp, &pgrec);
+		RPRINT(dbenv, rep, (dbenv, &mb,
+		    "rep_bulk_page: rep_page ret %d", ret));
+
+		if (ret != 0)
+			break;
+	}
 	return (ret);
 }
 
@@ -850,13 +978,13 @@ __rep_page(dbenv, eid, rp, rec)
 	db_rep = dbenv->rep_handle;
 	rep = db_rep->region;
 
-	MUTEX_LOCK(dbenv, db_rep->rep_mutexp);
+	REP_SYSTEM_LOCK(dbenv);
 	if (!F_ISSET(rep, REP_F_RECOVER_PAGE)) {
-		MUTEX_UNLOCK(dbenv, db_rep->rep_mutexp);
+		REP_SYSTEM_UNLOCK(dbenv);
 		return (0);
 	}
 	if ((ret = __rep_fileinfo_read(dbenv, rec->data, &next, &msgfp)) != 0) {
-		MUTEX_UNLOCK(dbenv, db_rep->rep_mutexp);
+		REP_SYSTEM_UNLOCK(dbenv);
 		return (ret);
 	}
 	RPRINT(dbenv, rep, (dbenv, &mb,
@@ -884,7 +1012,7 @@ __rep_page(dbenv, eid, rp, rec)
 	if ((ret = __rep_client_dbinit(dbenv, 1, REP_PG)) != 0)
 		goto err;
 
-	MUTEX_UNLOCK(dbenv, db_rep->rep_mutexp);
+	REP_SYSTEM_UNLOCK(dbenv);
 	memset(&key, 0, sizeof(key));
 	memset(&data, 0, sizeof(data));
 	recno = (db_recno_t)(msgfp->pgno + 1);
@@ -911,7 +1039,7 @@ __rep_page(dbenv, eid, rp, rec)
 
 	RPRINT(dbenv, rep, (dbenv, &mb,
 	    "PAGE: Write page %lu into mpool", (u_long)msgfp->pgno));
-	MUTEX_LOCK(dbenv, db_rep->rep_mutexp);
+	REP_SYSTEM_LOCK(dbenv);
 	/*
 	 * We put the page in the database file itself.
 	 */
@@ -947,8 +1075,8 @@ __rep_page(dbenv, eid, rp, rec)
 	 */
 	ret = __rep_filedone(dbenv, eid, rep, msgfp, rp->rectype);
 
-err:
-	MUTEX_UNLOCK(dbenv, db_rep->rep_mutexp);
+err:	REP_SYSTEM_UNLOCK(dbenv);
+
 err_nolock:
 	__os_free(dbenv, msgfp);
 	return (ret);
@@ -980,13 +1108,13 @@ __rep_page_fail(dbenv, eid, rec)
 	db_rep = dbenv->rep_handle;
 	rep = db_rep->region;
 
-	MUTEX_LOCK(dbenv, db_rep->rep_mutexp);
+	REP_SYSTEM_LOCK(dbenv);
 	if (!F_ISSET(rep, REP_F_RECOVER_PAGE)) {
-		MUTEX_UNLOCK(dbenv, db_rep->rep_mutexp);
+		REP_SYSTEM_UNLOCK(dbenv);
 		return (0);
 	}
 	if ((ret = __rep_fileinfo_read(dbenv, rec->data, &next, &msgfp)) != 0) {
-		MUTEX_UNLOCK(dbenv, db_rep->rep_mutexp);
+		REP_SYSTEM_UNLOCK(dbenv);
 		return (ret);
 	}
 	/*
@@ -1001,11 +1129,11 @@ __rep_page_fail(dbenv, eid, rec)
 	if (msgfp->filenum != rep->curfile) {
 		RPRINT(dbenv, rep, (dbenv, &mb, "Msg file %d != curfile %d",
 		    msgfp->filenum, rep->curfile));
-		MUTEX_UNLOCK(dbenv, db_rep->rep_mutexp);
+		REP_SYSTEM_UNLOCK(dbenv);
 		return (0);
 	}
 	rfp = rep->curinfo;
-	if (rfp->type != DB_QUEUE)
+	if (rfp->type != (u_int32_t)DB_QUEUE)
 		--rfp->max_pgno;
 	else {
 		/*
@@ -1036,7 +1164,7 @@ __rep_page_fail(dbenv, eid, rec)
 	 * send out a page request for the next file's pages.
 	 */
 	ret = __rep_filedone(dbenv, eid, rep, msgfp, REP_PAGE_FAIL);
-	MUTEX_UNLOCK(dbenv, db_rep->rep_mutexp);
+	REP_SYSTEM_UNLOCK(dbenv);
 	return (ret);
 }
 
@@ -1057,6 +1185,7 @@ __rep_write_page(dbenv, rep, msgfp)
 	char *real_name;
 
 	real_name = NULL;
+	rfp = NULL;
 
 	/*
 	 * If this is the first page we're putting in this database, we need
@@ -1067,9 +1196,8 @@ __rep_write_page(dbenv, rep, msgfp)
 	 * We need to create the file, removing any existing file and associate
 	 * the correct file ID with the new one.
 	 */
+	rfp = rep->curinfo;
 	if (rep->file_mpf == NULL) {
-		rfp = rep->curinfo;
-
 		if (!F_ISSET(rfp, DB_AM_INMEM)) {
 			if ((ret = __db_appname(dbenv, DB_APP_DATA,
 			    rfp->info.data, 0, NULL, &real_name)) != 0)
@@ -1079,7 +1207,7 @@ __rep_write_page(dbenv, rep, msgfp)
 			 * fileid from mpool and unlink it on disk.
 			 */
 			if ((ret = __memp_nameop(dbenv,
-			    rfp->uid.data, NULL, real_name, NULL)) != 0)
+			    rfp->uid.data, NULL, real_name, NULL, 0)) != 0)
 				goto err;
 			/*
 			 * Create the file on disk.  We'll be putting the data
@@ -1093,7 +1221,8 @@ __rep_write_page(dbenv, rep, msgfp)
 		}
 
 		if ((ret =
-		    __rep_mpf_open(dbenv, &rep->file_mpf, rep->curinfo)) != 0)
+		    __rep_mpf_open(dbenv, &rep->file_mpf, rep->curinfo,
+		    F_ISSET(rfp, DB_AM_INMEM) ? DB_CREATE : 0)) != 0)
 			goto err;
 	}
 	/*
@@ -1104,20 +1233,24 @@ __rep_write_page(dbenv, rep, msgfp)
 	 * we'll use the normal path for that first page.  After that we
 	 * can assume the dbp is opened.
 	 */
-	if (msgfp->type == DB_QUEUE && msgfp->pgno != 0) {
+	if (msgfp->type == (u_int32_t)DB_QUEUE && msgfp->pgno != 0) {
 #ifdef HAVE_QUEUE
 		if ((ret = __qam_fget(
 		    rep->queue_dbp, &msgfp->pgno, DB_MPOOL_CREATE, &dst)) != 0)
-#else
-		if ((ret = __db_no_queue_am(dbenv)) != 0)
-#endif
 			goto err;
+#else
+		/*
+		 * This always returns an error.
+		 */
+		ret = __db_no_queue_am(dbenv);
+		goto err;
+#endif
 	} else if ((ret = __memp_fget(
 		    rep->file_mpf, &msgfp->pgno, DB_MPOOL_CREATE, &dst)) != 0)
 			goto err;
 
 	memcpy(dst, msgfp->info.data, msgfp->pgsize);
-	if (msgfp->type != DB_QUEUE || msgfp->pgno == 0)
+	if (msgfp->type != (u_int32_t)DB_QUEUE || msgfp->pgno == 0)
 		ret = __memp_fput(rep->file_mpf, dst, DB_MPOOL_DIRTY);
 #ifdef HAVE_QUEUE
 	else
@@ -1143,7 +1276,6 @@ __rep_page_gap(dbenv, rep, msgfp, type)
 	u_int32_t type;
 {
 	DB_LOG *dblp;
-	DB_REP *db_rep;
 	DBT data, key;
 	LOG *lp;
 	__rep_fileinfo_args *rfp;
@@ -1153,10 +1285,9 @@ __rep_page_gap(dbenv, rep, msgfp, type)
 	DB_MSGBUF mb;
 #endif
 
-	db_rep = dbenv->rep_handle;
-	ret = 0;
 	dblp = dbenv->lg_handle;
 	lp = dblp->reginfo.primary;
+	ret = 0;
 
 	/*
 	 * We've successfully put this page into our file.
@@ -1168,9 +1299,9 @@ __rep_page_gap(dbenv, rep, msgfp, type)
 	 * So we need to drop it, acquire both in the right order and
 	 * then recheck the state of the world.
 	 */
-	MUTEX_UNLOCK(dbenv, db_rep->rep_mutexp);
-	MUTEX_LOCK(dbenv, db_rep->db_mutexp);
-	MUTEX_LOCK(dbenv, db_rep->rep_mutexp);
+	REP_SYSTEM_UNLOCK(dbenv);
+	MUTEX_LOCK(dbenv, rep->mtx_clientdb);
+	REP_SYSTEM_LOCK(dbenv);
 	rfp = rep->curinfo;
 
 	/*
@@ -1276,7 +1407,7 @@ __rep_page_gap(dbenv, rep, msgfp, type)
 		 */
 		if ((__rep_check_doreq(dbenv, rep) || type == REP_PAGE_MORE) &&
 		    ((ret = __rep_pggap_req(dbenv, rep, rfp,
-		    type == REP_PAGE_MORE)) != 0))
+		    (type == REP_PAGE_MORE) ? REP_GAP_FORCE : 0)) != 0))
 			goto err;
 	} else {
 		lp->wait_recs = 0;
@@ -1284,7 +1415,57 @@ __rep_page_gap(dbenv, rep, msgfp, type)
 	}
 
 err:
-	MUTEX_UNLOCK(dbenv, db_rep->db_mutexp);
+	MUTEX_UNLOCK(dbenv, rep->mtx_clientdb);
+	return (ret);
+}
+
+/*
+ * __rep_init_cleanup -
+ *	Clean up internal initialization pieces.
+ *
+ * PUBLIC: int __rep_init_cleanup __P((DB_ENV *, REP *, int));
+ */
+int
+__rep_init_cleanup(dbenv, rep, force)
+	DB_ENV *dbenv;
+	REP *rep;
+	int force;
+{
+	int ret, t_ret;
+
+	ret = 0;
+	/*
+	 * 1.  Close up the file data pointer we used.
+	 * 2.  Close/reset the page database.
+	 * 3.  Close/reset the queue database if we're forcing a cleanup.
+	 * 4.  Free current file info.
+	 * 5.  If we have all files or need to force, free original file info.
+	 */
+	if (rep->file_mpf != NULL) {
+		ret = __memp_fclose(rep->file_mpf, 0);
+		rep->file_mpf = NULL;
+	}
+	if (rep->file_dbp != NULL) {
+		t_ret = __db_close(rep->file_dbp, NULL, DB_NOSYNC);
+		rep->file_dbp = NULL;
+		if (t_ret != 0 && ret == 0)
+			ret = t_ret;
+	}
+	if (force && rep->queue_dbp != NULL) {
+		t_ret = __db_close(rep->queue_dbp, NULL, DB_NOSYNC);
+		rep->queue_dbp = NULL;
+		if (t_ret != 0 && ret == 0)
+			ret = t_ret;
+	}
+	if (rep->curinfo != NULL) {
+		__os_free(dbenv, rep->curinfo);
+		rep->curinfo = NULL;
+	}
+	if (rep->originfo != NULL &&
+	    (force || ++rep->curfile == rep->nfiles)) {
+		__os_free(dbenv, rep->originfo);
+		rep->originfo = NULL;
+	}
 	return (ret);
 }
 
@@ -1306,14 +1487,12 @@ __rep_filedone(dbenv, eid, rep, msgfp, type)
 	u_int32_t type;
 {
 	DBT dbt;
-	DB_REP *db_rep;
 	__rep_fileinfo_args *rfp;
 	int ret;
 #ifdef DIAGNOSTIC
 	DB_MSGBUF mb;
 #endif
 
-	db_rep = dbenv->rep_handle;
 	/*
 	 * We've put our page, now we need to do any gap processing
 	 * that might be needed to re-request pages.
@@ -1341,47 +1520,22 @@ __rep_filedone(dbenv, eid, rep, msgfp, type)
 	 * we need to do special queue processing.  Queue is handled in
 	 * several stages.
 	 */
-	if (rfp->type == DB_QUEUE &&
+	if (rfp->type == (u_int32_t)DB_QUEUE &&
 	    ((ret = __rep_queue_filedone(dbenv, rep, rfp)) !=
 	    DB_REP_PAGEDONE))
 		return (ret);
 	/*
-	 * We have all the pages for this file.  We need to:
-	 * 1.  Close up the file data pointer we used.
-	 * 2.  Close/reset the page database.
-	 * 3.  Check if we have all file data.  If so, request logs.
-	 * 4.  If not, set curfile to next file and request its pages.
+	 * We have all the pages for this file.  Clean up.
 	 */
-	/*
-	 * 1.  Close up the file data pointer we used.
-	 */
-	if (rep->file_mpf != NULL) {
-		ret = __memp_fclose(rep->file_mpf, 0);
-		rep->file_mpf = NULL;
-		if (ret != 0)
-			goto err;
-	}
-
-	/*
-	 * 2.  Close/reset the page database.
-	 */
-	ret = __db_close(rep->file_dbp, NULL, DB_NOSYNC);
-	rep->file_dbp = NULL;
-	if (ret != 0)
+	if ((ret = __rep_init_cleanup(dbenv, rep, 0)) != 0)
 		goto err;
-
-	/*
-	 * 3.  Check if we have all file data.  If so, request logs.
-	 */
-	__os_free(dbenv, rep->curinfo);
-	if (++rep->curfile == rep->nfiles) {
+	if (rep->curfile == rep->nfiles) {
 		RPRINT(dbenv, rep, (dbenv, &mb,
 		    "FILEDONE: have %d files.  RECOVER_LOG now", rep->nfiles));
 		/*
 		 * Move to REP_RECOVER_LOG state.
 		 * Request logs.
 		 */
-		__os_free(dbenv, rep->originfo);
 		/*
 		 * We need to do a sync here so that any later opens
 		 * can find the file and file id.  We need to do it
@@ -1395,16 +1549,16 @@ __rep_filedone(dbenv, eid, rep, msgfp, type)
 		memset(&dbt, 0, sizeof(dbt));
 		dbt.data = &rep->last_lsn;
 		dbt.size = sizeof(rep->last_lsn);
+		REP_SYSTEM_UNLOCK(dbenv);
+		if ((ret = __rep_log_setup(dbenv, rep)) != 0)
+			goto err;
 		RPRINT(dbenv, rep, (dbenv, &mb,
 		    "FILEDONE: LOG_REQ from LSN [%lu][%lu] to [%lu][%lu]",
 		    (u_long)rep->first_lsn.file, (u_long)rep->first_lsn.offset,
 		    (u_long)rep->last_lsn.file, (u_long)rep->last_lsn.offset));
-		MUTEX_UNLOCK(dbenv, db_rep->rep_mutexp);
-		if ((ret = __rep_log_setup(dbenv, rep)) != 0)
-			goto err;
 		(void)__rep_send_message(dbenv, eid,
-		    REP_LOG_REQ, &rep->first_lsn, &dbt, 0);
-		MUTEX_LOCK(dbenv, db_rep->rep_mutexp);
+		    REP_LOG_REQ, &rep->first_lsn, &dbt, 0, DB_REP_ANYWHERE);
+		REP_SYSTEM_LOCK(dbenv);
 		return (0);
 	}
 
@@ -1428,7 +1582,7 @@ __rep_filedone(dbenv, eid, rep, msgfp, type)
 	dbt.size = (u_int32_t)((u_int8_t *)rep->nextinfo -
 	    (u_int8_t *)rep->finfo);
 	(void)__rep_send_message(dbenv, eid, REP_PAGE_REQ,
-	    NULL, &dbt, 0);
+	    NULL, &dbt, 0, DB_REP_ANYWHERE);
 err:
 	return (ret);
 }
@@ -1439,10 +1593,11 @@ err:
  *	Used by both master and client to bring files into mpool.
  */
 static int
-__rep_mpf_open(dbenv, mpfp, rfp)
+__rep_mpf_open(dbenv, mpfp, rfp, flags)
 	DB_ENV *dbenv;
 	DB_MPOOLFILE **mpfp;
 	__rep_fileinfo_args *rfp;
+	u_int32_t flags;
 {
 	DB db;
 	int ret;
@@ -1454,13 +1609,17 @@ __rep_mpf_open(dbenv, mpfp, rfp)
 	 * We need a dbp to pass into to __db_dbenv_mpool.  Set up
 	 * only the parts that it needs.
 	 */
-	db.type = rfp->type;
-	db.pgsize = (u_int32_t)rfp->pgsize;
+	db.dbenv = dbenv;
+	db.type = (DBTYPE)rfp->type;
+	db.pgsize = rfp->pgsize;
 	memcpy(db.fileid, rfp->uid.data, DB_FILE_ID_LEN);
 	db.flags = rfp->flags;
+	/* We need to make sure the dbp isn't marked open. */
+	F_CLR(&db, DB_AM_OPEN_CALLED);
 	db.mpf = *mpfp;
-	db.dbenv = dbenv;
-	if ((ret = __db_dbenv_mpool(&db, rfp->info.data, 0)) != 0) {
+	if (F_ISSET(&db, DB_AM_INMEM))
+		(void)__memp_set_flags(db.mpf, DB_MPOOL_NOFILE, 1);
+	if ((ret = __db_dbenv_mpool(&db, rfp->info.data, flags)) != 0) {
 		(void)__memp_fclose(*mpfp, 0);
 		*mpfp = NULL;
 	}
@@ -1469,21 +1628,22 @@ __rep_mpf_open(dbenv, mpfp, rfp)
 
 /*
  * __rep_pggap_req -
- *	Request a page gap.  Assumes the caller holds the rep_mutexp.
+ *	Request a page gap.  Assumes the caller holds the rep_mutex.
  *
  * PUBLIC: int __rep_pggap_req __P((DB_ENV *, REP *, __rep_fileinfo_args *,
- * PUBLIC:    int));
+ * PUBLIC:    u_int32_t));
  */
 int
-__rep_pggap_req(dbenv, rep, reqfp, moregap)
+__rep_pggap_req(dbenv, rep, reqfp, gapflags)
 	DB_ENV *dbenv;
 	REP *rep;
 	__rep_fileinfo_args *reqfp;
-	int moregap;
+	u_int32_t gapflags;
 {
 	DBT max_pg_dbt;
 	__rep_fileinfo_args *tmpfp;
 	size_t len;
+	u_int32_t flags;
 	int alloc, ret;
 
 	ret = 0;
@@ -1511,30 +1671,44 @@ __rep_pggap_req(dbenv, rep, reqfp, moregap)
 	 * page we have.  If we have requested this page
 	 * then only request this record, not the entire gap.
 	 */
+	flags = 0;
 	memset(&max_pg_dbt, 0, sizeof(max_pg_dbt));
 	tmpfp->pgno = rep->ready_pg;
 	max_pg_dbt.data = rep->finfo;
 	max_pg_dbt.size = (u_int32_t)((u_int8_t *)rep->nextinfo -
 	    (u_int8_t *)rep->finfo);
-	if (rep->max_wait_pg == PGNO_INVALID || moregap) {
+	if (rep->max_wait_pg == PGNO_INVALID ||
+	    FLD_ISSET(gapflags, REP_GAP_FORCE | REP_GAP_REREQUEST)) {
 		/*
 		 * Request the gap - set max to waiting_pg - 1 or if
 		 * there is no waiting_pg, just ask for one.
 		 */
 		if (rep->waiting_pg == PGNO_INVALID) {
-			if (moregap)
+			if (FLD_ISSET(gapflags,
+			    REP_GAP_FORCE | REP_GAP_REREQUEST))
 				rep->max_wait_pg = rep->curinfo->max_pgno;
 			else
 				rep->max_wait_pg = rep->ready_pg;
 		} else
 			rep->max_wait_pg = rep->waiting_pg - 1;
 		tmpfp->max_pgno = rep->max_wait_pg;
+		/*
+		 * Gap requests are "new" and can go anywhere.
+		 */
+		if (FLD_ISSET(gapflags, REP_GAP_REREQUEST))
+			flags = DB_REP_REREQUEST;
+		else
+			flags = DB_REP_ANYWHERE;
 	} else {
 		/*
 		 * Request 1 page - set max to ready_pg.
 		 */
 		rep->max_wait_pg = rep->ready_pg;
 		tmpfp->max_pgno = rep->ready_pg;
+		/*
+		 * If we're dropping to singletons, this is a rerequest.
+		 */
+		flags = DB_REP_REREQUEST;
 	}
 	if (rep->master_id != DB_EID_INVALID) {
 		rep->stat.st_pg_requested++;
@@ -1550,69 +1724,16 @@ __rep_pggap_req(dbenv, rep, reqfp, moregap)
 		    tmpfp->flags, &tmpfp->uid, &tmpfp->info);
 		DB_ASSERT(len == max_pg_dbt.size);
 		(void)__rep_send_message(dbenv, rep->master_id,
-		    REP_PAGE_REQ, NULL, &max_pg_dbt, 0);
+		    REP_PAGE_REQ, NULL, &max_pg_dbt, 0, flags);
 	} else
 		(void)__rep_send_message(dbenv, DB_EID_BROADCAST,
-		    REP_MASTER_REQ, NULL, NULL, 0);
+		    REP_MASTER_REQ, NULL, NULL, 0, 0);
 
 	if (alloc)
 		__os_free(dbenv, tmpfp);
 	return (ret);
 }
 
-/*
- * __rep_loggap_req -
- *	Request a log gap.  Assumes the caller holds the db_mutexp.
- *
- * PUBLIC: void __rep_loggap_req __P((DB_ENV *, REP *, DB_LSN *, int));
- */
-void
-__rep_loggap_req(dbenv, rep, lsnp, moregap)
-	DB_ENV *dbenv;
-	REP *rep;
-	DB_LSN *lsnp;
-	int moregap;
-{
-	DB_LOG *dblp;
-	DBT max_lsn_dbt, *max_lsn_dbtp;
-	DB_LSN next_lsn;
-	LOG *lp;
-
-	dblp = dbenv->lg_handle;
-	lp = dblp->reginfo.primary;
-	R_LOCK(dbenv, &dblp->reginfo);
-	next_lsn = lp->lsn;
-	R_UNLOCK(dbenv, &dblp->reginfo);
-
-	if (moregap ||
-	    (lsnp != NULL &&
-	    (log_compare(lsnp, &lp->max_wait_lsn) == 0 ||
-	    IS_ZERO_LSN(lp->max_wait_lsn)))) {
-		/*
-		 * We need to ask for the gap.  Either we never asked
-		 * for records before, or we asked for a single record
-		 * and received it.
-		 */
-		lp->max_wait_lsn = lp->waiting_lsn;
-		memset(&max_lsn_dbt, 0, sizeof(max_lsn_dbt));
-		max_lsn_dbt.data = &lp->waiting_lsn;
-		max_lsn_dbt.size = sizeof(lp->waiting_lsn);
-		max_lsn_dbtp = &max_lsn_dbt;
-	} else {
-		max_lsn_dbtp = NULL;
-		lp->max_wait_lsn = next_lsn;
-	}
-	if (rep->master_id != DB_EID_INVALID) {
-		rep->stat.st_log_requested++;
-		(void)__rep_send_message(dbenv, rep->master_id,
-		    REP_LOG_REQ, &next_lsn, max_lsn_dbtp, 0);
-	} else
-		(void)__rep_send_message(dbenv, DB_EID_BROADCAST,
-		    REP_MASTER_REQ, NULL, NULL, 0);
-
-	return;
-}
-
 /*
  * __rep_finfo_alloc -
  *	Allocate and initialize a fileinfo structure.
@@ -1625,15 +1746,32 @@ __rep_finfo_alloc(dbenv, rfpsrc, rfpp)
 	DB_ENV *dbenv;
 	__rep_fileinfo_args *rfpsrc, **rfpp;
 {
+	__rep_fileinfo_args *rfp;
 	size_t size;
 	int ret;
+	void *uidp, *infop;
 
+	/*
+	 * Allocate enough for the structure and the two DBT data areas.
+	 */
 	size = sizeof(__rep_fileinfo_args) + rfpsrc->uid.size +
 	    rfpsrc->info.size;
-	if ((ret = __os_malloc(dbenv, size, rfpp)) != 0)
+	if ((ret = __os_malloc(dbenv, size, &rfp)) != 0)
 		return (ret);
 
-	memcpy(*rfpp, rfpsrc, size);
+	/*
+	 * Copy the structure itself, and then set the DBT data pointers
+	 * to their space and copy the data itself as well.
+	 */
+	memcpy(rfp, rfpsrc, sizeof(__rep_fileinfo_args));
+	uidp = (u_int8_t *)rfp + sizeof(__rep_fileinfo_args);
+	rfp->uid.data = uidp;
+	memcpy(uidp, rfpsrc->uid.data, rfpsrc->uid.size);
+
+	infop = (u_int8_t *)uidp + rfpsrc->uid.size;
+	rfp->info.data = infop;
+	memcpy(infop, rfpsrc->info.data, rfpsrc->info.size);
+	*rfpp = rfp;
 	return (ret);
 }
 
@@ -1649,30 +1787,44 @@ __rep_log_setup(dbenv, rep)
 {
 	DB_LOG *dblp;
 	DB_LSN lsn;
-	u_int32_t fnum;
+	DB_TXNMGR *mgr;
+	DB_TXNREGION *region;
+	LOG *lp;
+	u_int32_t fnum, lastfile;
 	int ret;
 	char *name;
 
 	dblp = dbenv->lg_handle;
+	lp = dblp->reginfo.primary;
+	mgr = dbenv->tx_handle;
+	region = mgr->reginfo.primary;
+
+	/*
+	 * Forcibly remove *all* existing log files.
+	 */
+	lastfile = lp->lsn.file;
+	for (fnum = 1; fnum <= lastfile; fnum++) {
+		if ((ret = __log_name(dblp, fnum, &name, NULL, 0)) != 0)
+			goto err;
+		(void)__os_unlink(dbenv, name);
+		__os_free(dbenv, name);
+	}
 	/*
 	 * Set up the log starting at the file number of the first LSN we
 	 * need to get from the master.
 	 */
-	if ((ret = __log_newfile(dblp, &lsn, rep->first_lsn.file)) == 0) {
-		/*
-		 * We do know we want to start this client's log at
-		 * log file 'first_lsn.file'.  So we want to forcibly
-		 * remove any log files earlier than that number.
-		 * We don't know what might be in any earlier log files
-		 * so we cannot just use __log_autoremove.
-		 */
-		for (fnum = 1; fnum < rep->first_lsn.file; fnum++) {
-			if ((ret = __log_name(dblp, fnum, &name, NULL, 0)) != 0)
-				goto err;
-			(void)__os_unlink(dbenv, name);
-			__os_free(dbenv, name);
-		}
-	}
+	ret = __log_newfile(dblp, &lsn, rep->first_lsn.file);
+
+	/*
+	 * We reset first_lsn to the lp->lsn.  We were given the LSN of
+	 * the checkpoint and we now need the LSN for the beginning of
+	 * the file, which __log_newfile conveniently set up for us
+	 * in lp->lsn.
+	 */
+	rep->first_lsn = lp->lsn;
+	TXN_SYSTEM_LOCK(dbenv);
+	ZERO_LSN(region->last_ckp);
+	TXN_SYSTEM_UNLOCK(dbenv);
 err:
 	return (ret);
 }
@@ -1723,13 +1875,18 @@ __rep_queue_filedone(dbenv, rep, rfp)
 		 */
 		if ((ret = __memp_sync(dbenv, NULL)) != 0)
 			goto out;
-		if ((ret = db_create(&rep->queue_dbp, dbenv,
-		    DB_REP_CREATE)) != 0)
+		if ((ret = db_create(&rep->queue_dbp, dbenv, 0)) != 0)
 			goto out;
 		flags = DB_NO_AUTO_COMMIT |
 		    (F_ISSET(dbenv, DB_ENV_THREAD) ? DB_THREAD : 0);
-		if ((ret = __db_open(rep->queue_dbp, NULL, rfp->info.data,
-		    NULL, DB_QUEUE, flags, 0, PGNO_BASE_MD)) != 0)
+		/*
+		 * We need to check whether this is in-memory so that we pass
+		 * the name correctly as either the file or the database name.
+		 */
+		if ((ret = __db_open(rep->queue_dbp, NULL,
+		    FLD_ISSET(rfp->flags, DB_AM_INMEM) ? NULL : rfp->info.data,
+		    FLD_ISSET(rfp->flags, DB_AM_INMEM) ? rfp->info.data : NULL,
+		    DB_QUEUE, flags, 0, PGNO_BASE_MD)) != 0)
 			goto out;
 	}
 	if ((ret = __queue_pageinfo(rep->queue_dbp,
diff --git a/storage/bdb/rep/rep_elect.c b/storage/bdb/rep/rep_elect.c
new file mode 100644
index 00000000000..71813532fa1
--- /dev/null
+++ b/storage/bdb/rep/rep_elect.c
@@ -0,0 +1,939 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2004-2005
+ *	Sleepycat Software.  All rights reserved.
+ *
+ * $Id: rep_elect.c,v 12.10 2005/08/23 14:18:19 sue Exp $
+ */
+
+#include "db_config.h"
+
+#ifndef NO_SYSTEM_INCLUDES
+#include 
+#include 
+#if TIME_WITH_SYS_TIME
+#include 
+#include 
+#else
+#if HAVE_SYS_TIME_H
+#include 
+#else
+#include 
+#endif
+#endif
+#endif
+
+#include "db_int.h"
+#include "dbinc/db_page.h"
+#include "dbinc/db_am.h"
+#include "dbinc/log.h"
+
+static void __rep_cmp_vote __P((DB_ENV *, REP *, int, DB_LSN *,
+    int, u_int32_t, u_int32_t));
+static int __rep_cmp_vote2 __P((DB_ENV *, REP *, int, u_int32_t));
+static int __rep_elect_init
+	       __P((DB_ENV *, DB_LSN *, int, int, int, int *, u_int32_t *));
+static int __rep_tally __P((DB_ENV *, REP *, int, int *, u_int32_t, roff_t));
+static int __rep_wait __P((DB_ENV *, u_int32_t, int *, u_int32_t));
+
+/*
+ * __rep_elect --
+ *	Called after master failure to hold/participate in an election for
+ *	a new master.
+ *
+ * PUBLIC:  int __rep_elect __P((DB_ENV *, int, int, int,
+ * PUBLIC:     u_int32_t, int *, u_int32_t));
+ */
+int
+__rep_elect(dbenv, nsites, nvotes, priority, timeout, eidp, flags)
+	DB_ENV *dbenv;
+	int nsites, nvotes, priority;
+	u_int32_t timeout;
+	int *eidp;
+	u_int32_t flags;
+{
+	DB_LOG *dblp;
+	DB_LSN lsn;
+	DB_REP *db_rep;
+	REP *rep;
+	int ack, done, in_progress, ret, send_vote;
+	u_int32_t egen, orig_tally, tiebreaker, to;
+#ifdef DIAGNOSTIC
+	DB_MSGBUF mb;
+#endif
+
+	PANIC_CHECK(dbenv);
+	COMPQUIET(flags, 0);
+	ENV_REQUIRES_CONFIG(dbenv, dbenv->rep_handle, "rep_elect", DB_INIT_REP);
+
+	/* Error checking. */
+	if (nsites <= 0) {
+		__db_err(dbenv,
+		    "DB_ENV->rep_elect: nsites must be greater than 0");
+		return (EINVAL);
+	}
+	if (nvotes < 0) {
+		__db_err(dbenv,
+		    "DB_ENV->rep_elect: nvotes may not be negative");
+		return (EINVAL);
+	}
+	if (priority < 0) {
+		__db_err(dbenv,
+		    "DB_ENV->rep_elect: priority may not be negative");
+		return (EINVAL);
+	}
+	if (nsites < nvotes) {
+		__db_err(dbenv,
+    "DB_ENV->rep_elect: nvotes (%d) is larger than nsites (%d)",
+		    nvotes, nsites);
+		return (EINVAL);
+	}
+
+	ack = nvotes;
+	/* If they give us a 0 for nvotes, default to simple majority.  */
+	if (nvotes == 0)
+		ack = (nsites / 2) + 1;
+
+	/*
+	 * XXX
+	 * If users give us less than a majority, they run the risk of
+	 * having a network partition.  However, this also allows the
+	 * scenario of master/1 client to elect the client.  Allow
+	 * sub-majority values, but give a warning.
+	 */
+	if (nvotes <= (nsites / 2)) {
+		__db_err(dbenv,
+    "DB_ENV->rep_elect:WARNING: nvotes (%d) is sub-majority with nsites (%d)",
+		    nvotes, nsites);
+	}
+
+	db_rep = dbenv->rep_handle;
+	rep = db_rep->region;
+	dblp = dbenv->lg_handle;
+
+	RPRINT(dbenv, rep,
+	    (dbenv, &mb, "Start election nsites %d, ack %d, priority %d",
+	    nsites, ack, priority));
+
+	LOG_SYSTEM_LOCK(dbenv);
+	lsn = ((LOG *)dblp->reginfo.primary)->lsn;
+	LOG_SYSTEM_UNLOCK(dbenv);
+
+	orig_tally = 0;
+	to = timeout;
+	if ((ret = __rep_elect_init(dbenv,
+	    &lsn, nsites, ack, priority, &in_progress, &orig_tally)) != 0) {
+		if (ret == DB_REP_NEWMASTER) {
+			ret = 0;
+			*eidp = dbenv->rep_eid;
+		}
+		goto err;
+	}
+	/*
+	 * If another thread is in the middle of an election we
+	 * just quietly return and not interfere.
+	 */
+	if (in_progress) {
+		*eidp = rep->master_id;
+		return (0);
+	}
+	__os_clock(dbenv, &rep->esec, &rep->eusec);
+restart:
+	/* Generate a randomized tiebreaker value. */
+	__os_unique_id(dbenv, &tiebreaker);
+
+	REP_SYSTEM_LOCK(dbenv);
+	F_SET(rep, REP_F_EPHASE1 | REP_F_NOARCHIVE);
+	F_CLR(rep, REP_F_TALLY);
+
+	/*
+	 * We are about to participate at this egen.  We must
+	 * write out the next egen before participating in this one
+	 * so that if we crash we can never participate in this egen
+	 * again.
+	 */
+	if ((ret = __rep_write_egen(dbenv, rep->egen + 1)) != 0)
+		goto lockdone;
+
+	/* Tally our own vote */
+	if (__rep_tally(dbenv, rep, rep->eid, &rep->sites, rep->egen,
+	    rep->tally_off) != 0) {
+		ret = EINVAL;
+		goto lockdone;
+	}
+	__rep_cmp_vote(dbenv, rep, rep->eid, &lsn, priority, rep->gen,
+	    tiebreaker);
+
+	RPRINT(dbenv, rep, (dbenv, &mb, "Beginning an election"));
+
+	/* Now send vote */
+	send_vote = DB_EID_INVALID;
+	egen = rep->egen;
+	REP_SYSTEM_UNLOCK(dbenv);
+	__rep_send_vote(dbenv, &lsn, nsites, ack, priority, tiebreaker, egen,
+	    DB_EID_BROADCAST, REP_VOTE1);
+	DB_ENV_TEST_RECOVERY(dbenv, DB_TEST_ELECTVOTE1, ret, NULL);
+	ret = __rep_wait(dbenv, to, eidp, REP_F_EPHASE1);
+	switch (ret) {
+		case 0:
+			/* Check if election complete or phase complete. */
+			if (*eidp != DB_EID_INVALID) {
+				RPRINT(dbenv, rep, (dbenv, &mb,
+				    "Ended election phase 1 %d", ret));
+				goto edone;
+			}
+			goto phase2;
+		case DB_REP_EGENCHG:
+			if (to > timeout)
+				to = timeout;
+			to = (to * 8) / 10;
+			RPRINT(dbenv, rep, (dbenv, &mb,
+"Egen changed while waiting. Now %lu.  New timeout %lu, orig timeout %lu",
+			    (u_long)rep->egen, (u_long)to, (u_long)timeout));
+			/*
+			 * If the egen changed while we were sleeping, that
+			 * means we're probably late to the next election,
+			 * so we'll backoff our timeout so that we don't get
+			 * into an out-of-phase election scenario.
+			 *
+			 * Backoff to 80% of the current timeout.
+			 */
+			goto restart;
+		case DB_TIMEOUT:
+			break;
+		default:
+			goto err;
+	}
+	/*
+	 * If we got here, we haven't heard from everyone, but we've
+	 * run out of time, so it's time to decide if we have enough
+	 * votes to pick a winner and if so, to send out a vote to
+	 * the winner.
+	 */
+	REP_SYSTEM_LOCK(dbenv);
+	/*
+	 * If our egen changed while we were waiting.  We need to
+	 * essentially reinitialize our election.
+	 */
+	if (egen != rep->egen) {
+		REP_SYSTEM_UNLOCK(dbenv);
+		RPRINT(dbenv, rep, (dbenv, &mb, "Egen changed from %lu to %lu",
+		    (u_long)egen, (u_long)rep->egen));
+		goto restart;
+	}
+	if (rep->sites >= rep->nvotes) {
+
+		/* We think we've seen enough to cast a vote. */
+		send_vote = rep->winner;
+		/*
+		 * See if we won.  This will make sure we
+		 * don't count ourselves twice if we're racing
+		 * with incoming votes.
+		 */
+		if (rep->winner == rep->eid) {
+			(void)__rep_tally(dbenv, rep, rep->eid, &rep->votes,
+			    egen, rep->v2tally_off);
+			RPRINT(dbenv, rep, (dbenv, &mb,
+			    "Counted my vote %d", rep->votes));
+		}
+		F_SET(rep, REP_F_EPHASE2);
+		F_CLR(rep, REP_F_EPHASE1);
+	}
+	REP_SYSTEM_UNLOCK(dbenv);
+	if (send_vote == DB_EID_INVALID) {
+		/* We do not have enough votes to elect. */
+		RPRINT(dbenv, rep, (dbenv, &mb,
+		    "Not enough votes to elect: recvd %d of %d from %d sites",
+		    rep->sites, rep->nvotes, rep->nsites));
+		ret = DB_REP_UNAVAIL;
+		goto err;
+
+	} else {
+		/*
+		 * We have seen enough vote1's.  Now we need to wait
+		 * for all the vote2's.
+		 */
+		if (send_vote != rep->eid) {
+			RPRINT(dbenv, rep, (dbenv, &mb, "Sending vote"));
+			__rep_send_vote(dbenv, NULL, 0, 0, 0, 0, egen,
+			    send_vote, REP_VOTE2);
+			/*
+			 * If we are NOT the new master we want to send
+			 * our vote to the winner, and wait longer.  The
+			 * reason is that the winner may be "behind" us
+			 * in the election waiting and if the master is
+			 * down, the winner will wait the full timeout
+			 * and we want to give the winner enough time to
+			 * process all the votes.  Otherwise we could
+			 * incorrectly return DB_REP_UNAVAIL and start a
+			 * new election before the winner can declare
+			 * itself.
+			 */
+			to = to * 2;
+
+		}
+
+phase2:		ret = __rep_wait(dbenv, to, eidp, REP_F_EPHASE2);
+		RPRINT(dbenv, rep, (dbenv, &mb,
+		    "Ended election phase 2 %d", ret));
+		switch (ret) {
+			case 0:
+				goto edone;
+			case DB_REP_EGENCHG:
+				if (to > timeout)
+					to = timeout;
+				to = (to * 8) / 10;
+				RPRINT(dbenv, rep, (dbenv, &mb,
+"While waiting egen changed to %lu.  Phase 2 New timeout %lu, orig timeout %lu",
+				    (u_long)rep->egen,
+				    (u_long)to, (u_long)timeout));
+				goto restart;
+			case DB_TIMEOUT:
+				ret = DB_REP_UNAVAIL;
+				break;
+			default:
+				goto err;
+		}
+		REP_SYSTEM_LOCK(dbenv);
+		if (egen != rep->egen) {
+			REP_SYSTEM_UNLOCK(dbenv);
+			RPRINT(dbenv, rep, (dbenv, &mb,
+			    "Egen ph2 changed from %lu to %lu",
+			    (u_long)egen, (u_long)rep->egen));
+			goto restart;
+		}
+		done = rep->votes >= rep->nvotes;
+		RPRINT(dbenv, rep, (dbenv, &mb,
+		    "After phase 2: done %d, votes %d, nsites %d",
+		    done, rep->votes, rep->nsites));
+		if (send_vote == rep->eid && done) {
+			__rep_elect_master(dbenv, rep, eidp);
+			ret = 0;
+			goto lockdone;
+		}
+		REP_SYSTEM_UNLOCK(dbenv);
+	}
+
+err:	REP_SYSTEM_LOCK(dbenv);
+lockdone:
+	/*
+	 * If we get here because of a non-election error, then we
+	 * did not tally our vote.  The only non-election error is
+	 * from elect_init where we were unable to grow_sites.  In
+	 * that case we do not want to discard all known election info.
+	 */
+	if (ret == 0 || ret == DB_REP_UNAVAIL)
+		__rep_elect_done(dbenv, rep);
+	else if (orig_tally)
+		F_SET(rep, orig_tally);
+
+	/*
+	 * If the election finished elsewhere, we need to decrement
+	 * the elect_th anyway.
+	 */
+	if (0) {
+edone:		REP_SYSTEM_LOCK(dbenv);
+	}
+	rep->elect_th = 0;
+
+	RPRINT(dbenv, rep, (dbenv, &mb,
+	    "Ended election with %d, sites %d, egen %lu, flags 0x%lx",
+	    ret, rep->sites, (u_long)rep->egen, (u_long)rep->flags));
+	REP_SYSTEM_UNLOCK(dbenv);
+
+DB_TEST_RECOVERY_LABEL
+	return (ret);
+}
+
+/*
+ * __rep_vote1 --
+ *	Handle incoming vote1 message on a client.
+ *
+ * PUBLIC: int __rep_vote1 __P((DB_ENV *, REP_CONTROL *, DBT *, int));
+ */
+int
+__rep_vote1(dbenv, rp, rec, eid)
+	DB_ENV *dbenv;
+	REP_CONTROL *rp;
+	DBT *rec;
+	int eid;
+{
+	DB_LOG *dblp;
+	DB_LSN lsn;
+	DB_REP *db_rep;
+	DBT data_dbt;
+	LOG *lp;
+	REP *rep;
+	REP_VOTE_INFO *vi;
+	u_int32_t egen;
+	int done, master, ret;
+#ifdef DIAGNOSTIC
+	DB_MSGBUF mb;
+#endif
+
+	ret = 0;
+	db_rep = dbenv->rep_handle;
+	rep = db_rep->region;
+	dblp = dbenv->lg_handle;
+	lp = dblp->reginfo.primary;
+
+	if (F_ISSET(rep, REP_F_MASTER)) {
+		RPRINT(dbenv, rep,
+		    (dbenv, &mb, "Master received vote"));
+		LOG_SYSTEM_LOCK(dbenv);
+		lsn = lp->lsn;
+		LOG_SYSTEM_UNLOCK(dbenv);
+		(void)__rep_send_message(dbenv,
+		    DB_EID_BROADCAST, REP_NEWMASTER, &lsn, NULL, 0, 0);
+		return (ret);
+	}
+
+	vi = (REP_VOTE_INFO *)rec->data;
+	REP_SYSTEM_LOCK(dbenv);
+
+	/*
+	 * If we get a vote from a later election gen, we
+	 * clear everything from the current one, and we'll
+	 * start over by tallying it.  If we get an old vote,
+	 * send an ALIVE to the old participant.
+	 */
+	RPRINT(dbenv, rep, (dbenv, &mb,
+	    "Received vote1 egen %lu, egen %lu",
+	    (u_long)vi->egen, (u_long)rep->egen));
+	if (vi->egen < rep->egen) {
+		RPRINT(dbenv, rep, (dbenv, &mb,
+		    "Received old vote %lu, egen %lu, ignoring vote1",
+		    (u_long)vi->egen, (u_long)rep->egen));
+		egen = rep->egen;
+		REP_SYSTEM_UNLOCK(dbenv);
+		data_dbt.data = &egen;
+		data_dbt.size = sizeof(egen);
+		(void)__rep_send_message(dbenv,
+		    eid, REP_ALIVE, &rp->lsn, &data_dbt, 0, 0);
+		return (ret);
+	}
+	if (vi->egen > rep->egen) {
+		RPRINT(dbenv, rep, (dbenv, &mb,
+		    "Received VOTE1 from egen %lu, my egen %lu; reset",
+		    (u_long)vi->egen, (u_long)rep->egen));
+		__rep_elect_done(dbenv, rep);
+		rep->egen = vi->egen;
+	}
+	if (!IN_ELECTION(rep))
+		F_SET(rep, REP_F_TALLY);
+
+	/* Check if this site knows about more sites than we do. */
+	if (vi->nsites > rep->nsites)
+		rep->nsites = vi->nsites;
+
+	/* Check if this site requires more votes than we do. */
+	if (vi->nvotes > rep->nvotes)
+		rep->nvotes = vi->nvotes;
+
+	/*
+	 * We are keeping the vote, let's see if that changes our
+	 * count of the number of sites.
+	 */
+	if (rep->sites + 1 > rep->nsites)
+		rep->nsites = rep->sites + 1;
+	if (rep->nsites > rep->asites &&
+	    (ret = __rep_grow_sites(dbenv, rep->nsites)) != 0) {
+		RPRINT(dbenv, rep, (dbenv, &mb,
+		    "Grow sites returned error %d", ret));
+		goto err;
+	}
+
+	/*
+	 * Ignore vote1's if we're in phase 2.
+	 */
+	if (F_ISSET(rep, REP_F_EPHASE2)) {
+		RPRINT(dbenv, rep, (dbenv, &mb,
+		    "In phase 2, ignoring vote1"));
+		goto err;
+	}
+
+	/*
+	 * Record this vote.  If we get back non-zero, we
+	 * ignore the vote.
+	 */
+	if ((ret = __rep_tally(dbenv, rep, eid, &rep->sites,
+	    vi->egen, rep->tally_off)) != 0) {
+		RPRINT(dbenv, rep, (dbenv, &mb,
+		    "Tally returned %d, sites %d",
+		    ret, rep->sites));
+		ret = 0;
+		goto err;
+	}
+	RPRINT(dbenv, rep, (dbenv, &mb,
+	    "Incoming vote: (eid)%d (pri)%d (gen)%lu (egen)%lu [%lu,%lu]",
+	    eid, vi->priority,
+	    (u_long)rp->gen, (u_long)vi->egen,
+	    (u_long)rp->lsn.file, (u_long)rp->lsn.offset));
+#ifdef DIAGNOSTIC
+	if (rep->sites > 1)
+		RPRINT(dbenv, rep, (dbenv, &mb,
+    "Existing vote: (eid)%d (pri)%d (gen)%lu (sites)%d [%lu,%lu]",
+		    rep->winner, rep->w_priority,
+		    (u_long)rep->w_gen, rep->sites,
+		    (u_long)rep->w_lsn.file,
+		    (u_long)rep->w_lsn.offset));
+#endif
+	__rep_cmp_vote(dbenv, rep, eid, &rp->lsn, vi->priority,
+	    rp->gen, vi->tiebreaker);
+	/*
+	 * If you get a vote and you're not in an election, we've
+	 * already recorded this vote.  But that is all we need
+	 * to do.
+	 */
+	if (!IN_ELECTION(rep)) {
+		RPRINT(dbenv, rep, (dbenv, &mb,
+		    "Not in election, but received vote1 0x%x",
+		    rep->flags));
+		ret = DB_REP_HOLDELECTION;
+		goto err;
+	}
+
+	master = rep->winner;
+	lsn = rep->w_lsn;
+	/*
+	 * We need to check sites == nsites, not more than half
+	 * like we do in __rep_elect and the VOTE2 code below.  The
+	 * reason is that we want to process all the incoming votes
+	 * and not short-circuit once we reach more than half.  The
+	 * real winner's vote may be in the last half.
+	 */
+	done = rep->sites >= rep->nsites && rep->w_priority != 0;
+	if (done) {
+		RPRINT(dbenv, rep,
+		    (dbenv, &mb, "Phase1 election done"));
+		RPRINT(dbenv, rep, (dbenv, &mb, "Voting for %d%s",
+		    master, master == rep->eid ? "(self)" : ""));
+		egen = rep->egen;
+		F_SET(rep, REP_F_EPHASE2);
+		F_CLR(rep, REP_F_EPHASE1);
+		if (master == rep->eid) {
+			(void)__rep_tally(dbenv, rep, rep->eid,
+			    &rep->votes, egen, rep->v2tally_off);
+			goto err;
+		}
+		REP_SYSTEM_UNLOCK(dbenv);
+
+		/* Vote for someone else. */
+		__rep_send_vote(dbenv, NULL, 0, 0, 0, 0, egen,
+		    master, REP_VOTE2);
+	} else
+err:		REP_SYSTEM_UNLOCK(dbenv);
+	return (ret);
+}
+
+/*
+ * __rep_vote2 --
+ *	Handle incoming vote1 message on a client.
+ *
+ * PUBLIC: int __rep_vote2 __P((DB_ENV *, DBT *, int *));
+ */
+int
+__rep_vote2(dbenv, rec, eidp)
+	DB_ENV *dbenv;
+	DBT *rec;
+	int *eidp;
+{
+	DB_LOG *dblp;
+	DB_LSN lsn;
+	DB_REP *db_rep;
+	LOG *lp;
+	REP *rep;
+	REP_VOTE_INFO *vi;
+	int done, ret;
+#ifdef DIAGNOSTIC
+	DB_MSGBUF mb;
+#endif
+
+	ret = 0;
+	db_rep = dbenv->rep_handle;
+	rep = db_rep->region;
+	dblp = dbenv->lg_handle;
+	lp = dblp->reginfo.primary;
+
+	RPRINT(dbenv, rep, (dbenv, &mb, "We received a vote%s",
+	    F_ISSET(rep, REP_F_MASTER) ? " (master)" : ""));
+	if (F_ISSET(rep, REP_F_MASTER)) {
+		LOG_SYSTEM_LOCK(dbenv);
+		lsn = lp->lsn;
+		LOG_SYSTEM_UNLOCK(dbenv);
+		rep->stat.st_elections_won++;
+		(void)__rep_send_message(dbenv,
+		    DB_EID_BROADCAST, REP_NEWMASTER, &lsn, NULL, 0, 0);
+		return (ret);
+	}
+
+	REP_SYSTEM_LOCK(dbenv);
+
+	/* If we have priority 0, we should never get a vote. */
+	DB_ASSERT(rep->priority != 0);
+
+	/*
+	 * We might be the last to the party and we haven't had
+	 * time to tally all the vote1's, but others have and
+	 * decided we're the winner.  So, if we're in the process
+	 * of tallying sites, keep the vote so that when our
+	 * election thread catches up we'll have the votes we
+	 * already received.
+	 */
+	vi = (REP_VOTE_INFO *)rec->data;
+	if (!IN_ELECTION_TALLY(rep) && vi->egen >= rep->egen) {
+		RPRINT(dbenv, rep, (dbenv, &mb,
+		    "Not in election gen %lu, at %lu, got vote",
+		    (u_long)vi->egen, (u_long)rep->egen));
+		ret = DB_REP_HOLDELECTION;
+		goto err;
+	}
+
+	/*
+	 * Record this vote.  In a VOTE2, the only valid entry
+	 * in the REP_VOTE_INFO is the election generation.
+	 *
+	 * There are several things which can go wrong that we
+	 * need to account for:
+	 * 1. If we receive a latent VOTE2 from an earlier election,
+	 * we want to ignore it.
+	 * 2. If we receive a VOTE2 from a site from which we never
+	 * received a VOTE1, we want to ignore it.
+	 * 3. If we have received a duplicate VOTE2 from this election
+	 * from the same site we want to ignore it.
+	 * 4. If this is from the current election and someone is
+	 * really voting for us, then we finally get to record it.
+	 */
+	/*
+	 * __rep_cmp_vote2 checks for cases 1 and 2.
+	 */
+	if ((ret = __rep_cmp_vote2(dbenv, rep, *eidp, vi->egen)) != 0) {
+		ret = 0;
+		goto err;
+	}
+	/*
+	 * __rep_tally takes care of cases 3 and 4.
+	 */
+	if ((ret = __rep_tally(dbenv, rep, *eidp, &rep->votes,
+	    vi->egen, rep->v2tally_off)) != 0) {
+		ret = 0;
+		goto err;
+	}
+	done = rep->votes >= rep->nvotes;
+	RPRINT(dbenv, rep, (dbenv, &mb, "Counted vote %d of %d",
+	    rep->votes, rep->nvotes));
+	if (done) {
+		__rep_elect_master(dbenv, rep, eidp);
+		ret = DB_REP_NEWMASTER;
+	}
+
+err:	REP_SYSTEM_UNLOCK(dbenv);
+	return (ret);
+}
+
+/*
+ * __rep_tally --
+ *	Handle incoming vote1 message on a client.  Called with the db_rep
+ *	mutex held.  This function will return 0 if we successfully tally
+ *	the vote and non-zero if the vote is ignored.  This will record
+ *	both VOTE1 and VOTE2 records, depending on which region offset the
+ *	caller passed in.
+ */
+static int
+__rep_tally(dbenv, rep, eid, countp, egen, vtoff)
+	DB_ENV *dbenv;
+	REP *rep;
+	int eid, *countp;
+	u_int32_t egen;
+	roff_t vtoff;
+{
+	REP_VTALLY *tally, *vtp;
+	int i;
+#ifdef DIAGNOSTIC
+	DB_MSGBUF mb;
+#else
+	COMPQUIET(rep, NULL);
+#endif
+
+	tally = R_ADDR((REGINFO *)dbenv->reginfo, vtoff);
+	i = 0;
+	vtp = &tally[i];
+	while (i < *countp) {
+		/*
+		 * Ignore votes from earlier elections (i.e. we've heard
+		 * from this site in this election, but its vote from an
+		 * earlier election got delayed and we received it now).
+		 * However, if we happened to hear from an earlier vote
+		 * and we recorded it and we're now hearing from a later
+		 * election we want to keep the updated one.  Note that
+		 * updating the entry will not increase the count.
+		 * Also ignore votes that are duplicates.
+		 */
+		if (vtp->eid == eid) {
+			RPRINT(dbenv, rep, (dbenv, &mb,
+			    "Tally found[%d] (%d, %lu), this vote (%d, %lu)",
+				    i, vtp->eid, (u_long)vtp->egen,
+				    eid, (u_long)egen));
+			if (vtp->egen >= egen)
+				return (1);
+			else {
+				vtp->egen = egen;
+				return (0);
+			}
+		}
+		i++;
+		vtp = &tally[i];
+	}
+	/*
+	 * If we get here, we have a new voter we haven't
+	 * seen before.  Tally this vote.
+	 */
+#ifdef DIAGNOSTIC
+	if (vtoff == rep->tally_off)
+		RPRINT(dbenv, rep, (dbenv, &mb, "Tallying VOTE1[%d] (%d, %lu)",
+		    i, eid, (u_long)egen));
+	else
+		RPRINT(dbenv, rep, (dbenv, &mb, "Tallying VOTE2[%d] (%d, %lu)",
+		    i, eid, (u_long)egen));
+#endif
+	vtp->eid = eid;
+	vtp->egen = egen;
+	(*countp)++;
+	return (0);
+}
+
+/*
+ * __rep_cmp_vote --
+ *	Compare incoming vote1 message on a client.  Called with the db_rep
+ *	mutex held.
+ *
+ */
+static void
+__rep_cmp_vote(dbenv, rep, eid, lsnp, priority, gen, tiebreaker)
+	DB_ENV *dbenv;
+	REP *rep;
+	int eid;
+	DB_LSN *lsnp;
+	int priority;
+	u_int32_t gen, tiebreaker;
+{
+	int cmp;
+
+#ifdef DIAGNOSTIC
+	DB_MSGBUF mb;
+#else
+	COMPQUIET(dbenv, NULL);
+#endif
+	cmp = log_compare(lsnp, &rep->w_lsn);
+	/*
+	 * If we've seen more than one, compare us to the best so far.
+	 * If we're the first, make ourselves the winner to start.
+	 */
+	if (rep->sites > 1 && priority != 0) {
+		/*
+		 * LSN is primary determinant. Then priority if LSNs
+		 * are equal, then tiebreaker if both are equal.
+		 */
+		if (cmp > 0 ||
+		    (cmp == 0 && (priority > rep->w_priority ||
+		    (priority == rep->w_priority &&
+		    (tiebreaker > rep->w_tiebreaker))))) {
+			RPRINT(dbenv, rep, (dbenv, &mb, "Accepting new vote"));
+			rep->winner = eid;
+			rep->w_priority = priority;
+			rep->w_lsn = *lsnp;
+			rep->w_gen = gen;
+			rep->w_tiebreaker = tiebreaker;
+		}
+	} else if (rep->sites == 1) {
+		if (priority != 0) {
+			/* Make ourselves the winner to start. */
+			rep->winner = eid;
+			rep->w_priority = priority;
+			rep->w_gen = gen;
+			rep->w_lsn = *lsnp;
+			rep->w_tiebreaker = tiebreaker;
+		} else {
+			rep->winner = DB_EID_INVALID;
+			rep->w_priority = 0;
+			rep->w_gen = 0;
+			ZERO_LSN(rep->w_lsn);
+			rep->w_tiebreaker = 0;
+		}
+	}
+	return;
+}
+
+/*
+ * __rep_cmp_vote2 --
+ *	Compare incoming vote2 message with vote1's we've recorded.  Called
+ *	with the db_rep mutex held.  We return 0 if the VOTE2 is from a
+ *	site we've heard from and it is from this election.  Otherwise return 1.
+ *
+ */
+static int
+__rep_cmp_vote2(dbenv, rep, eid, egen)
+	DB_ENV *dbenv;
+	REP *rep;
+	int eid;
+	u_int32_t egen;
+{
+	int i;
+	REP_VTALLY *tally, *vtp;
+#ifdef DIAGNOSTIC
+	DB_MSGBUF mb;
+#endif
+
+	tally = R_ADDR((REGINFO *)dbenv->reginfo, rep->tally_off);
+	i = 0;
+	vtp = &tally[i];
+	for (i = 0; i < rep->sites; i++) {
+		vtp = &tally[i];
+		if (vtp->eid == eid && vtp->egen == egen) {
+			RPRINT(dbenv, rep, (dbenv, &mb,
+			    "Found matching vote1 (%d, %lu), at %d of %d",
+			    eid, (u_long)egen, i, rep->sites));
+			return (0);
+		}
+	}
+	RPRINT(dbenv, rep,
+	    (dbenv, &mb, "Didn't find vote1 for eid %d, egen %lu",
+	    eid, (u_long)egen));
+	return (1);
+}
+
+/*
+ * __rep_elect_init
+ *	Initialize an election.  Sets beginp non-zero if the election is
+ * already in progress; makes it 0 otherwise.
+ */
+static int
+__rep_elect_init(dbenv, lsnp, nsites, nvotes, priority, beginp, otally)
+	DB_ENV *dbenv;
+	DB_LSN *lsnp;
+	int nsites, nvotes, priority;
+	int *beginp;
+	u_int32_t *otally;
+{
+	DB_REP *db_rep;
+	REP *rep;
+	int ret;
+
+	db_rep = dbenv->rep_handle;
+	rep = db_rep->region;
+
+	ret = 0;
+
+	/* We may miscount, as we don't hold the replication mutex here. */
+	rep->stat.st_elections++;
+
+	/* If we are already a master; simply broadcast that fact and return. */
+	if (F_ISSET(rep, REP_F_MASTER)) {
+		(void)__rep_send_message(dbenv,
+		    DB_EID_BROADCAST, REP_NEWMASTER, lsnp, NULL, 0, 0);
+		rep->stat.st_elections_won++;
+		return (DB_REP_NEWMASTER);
+	}
+
+	REP_SYSTEM_LOCK(dbenv);
+	if (otally != NULL)
+		*otally = F_ISSET(rep, REP_F_TALLY);
+	*beginp = IN_ELECTION(rep) || rep->elect_th;
+	if (!*beginp) {
+		/*
+		 * Make sure that we always initialize all the election fields
+		 * before putting ourselves in an election state.  That means
+		 * issuing calls that can fail (allocation) before setting all
+		 * the variables.
+		 */
+		if (nsites > rep->asites &&
+		    (ret = __rep_grow_sites(dbenv, nsites)) != 0)
+			goto err;
+		DB_ENV_TEST_RECOVERY(dbenv, DB_TEST_ELECTINIT, ret, NULL);
+		rep->elect_th = 1;
+		rep->nsites = nsites;
+		rep->nvotes = nvotes;
+		rep->priority = priority;
+		rep->master_id = DB_EID_INVALID;
+	}
+DB_TEST_RECOVERY_LABEL
+err:	REP_SYSTEM_UNLOCK(dbenv);
+	return (ret);
+}
+
+/*
+ * __rep_elect_master
+ *	Set up for new master from election.  Must be called with
+ *	the replication region mutex held.
+ *
+ * PUBLIC: void __rep_elect_master __P((DB_ENV *, REP *, int *));
+ */
+void
+__rep_elect_master(dbenv, rep, eidp)
+	DB_ENV *dbenv;
+	REP *rep;
+	int *eidp;
+{
+#ifdef DIAGNOSTIC
+	DB_MSGBUF mb;
+#else
+	COMPQUIET(dbenv, NULL);
+#endif
+	rep->master_id = rep->eid;
+	F_SET(rep, REP_F_MASTERELECT);
+	if (eidp != NULL)
+		*eidp = rep->master_id;
+	rep->stat.st_elections_won++;
+	RPRINT(dbenv, rep, (dbenv, &mb,
+	    "Got enough votes to win; election done; winner is %d, gen %lu",
+	    rep->master_id, (u_long)rep->gen));
+}
+
+static int
+__rep_wait(dbenv, timeout, eidp, flags)
+	DB_ENV *dbenv;
+	u_int32_t timeout;
+	int *eidp;
+	u_int32_t flags;
+{
+	DB_REP *db_rep;
+	REP *rep;
+	int done, echg;
+	u_int32_t egen, sleeptime;
+
+	done = echg = 0;
+	db_rep = dbenv->rep_handle;
+	rep = db_rep->region;
+	egen = rep->egen;
+
+	/*
+	 * The user specifies an overall timeout function, but checking
+	 * is cheap and the timeout may be a generous upper bound.
+	 * Sleep repeatedly for the smaller of .5s and timeout/10.
+	 */
+	sleeptime = (timeout > 5000000) ? 500000 : timeout / 10;
+	if (sleeptime == 0)
+		sleeptime++;
+	while (timeout > 0) {
+		__os_sleep(dbenv, 0, sleeptime);
+		REP_SYSTEM_LOCK(dbenv);
+		echg = egen != rep->egen;
+		done = !F_ISSET(rep, flags) && rep->master_id != DB_EID_INVALID;
+
+		*eidp = rep->master_id;
+		REP_SYSTEM_UNLOCK(dbenv);
+
+		if (done)
+			return (0);
+
+		if (echg)
+			return (DB_REP_EGENCHG);
+
+		if (timeout > sleeptime)
+			timeout -= sleeptime;
+		else
+			timeout = 0;
+	}
+	return (DB_TIMEOUT);
+}
diff --git a/storage/bdb/rep/rep_log.c b/storage/bdb/rep/rep_log.c
new file mode 100644
index 00000000000..83c04a54254
--- /dev/null
+++ b/storage/bdb/rep/rep_log.c
@@ -0,0 +1,616 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2004-2005
+ *	Sleepycat Software.  All rights reserved.
+ *
+ * $Id: rep_log.c,v 12.26 2005/10/12 17:58:39 bostic Exp $
+ */
+
+#include "db_config.h"
+
+#ifndef NO_SYSTEM_INCLUDES
+#if TIME_WITH_SYS_TIME
+#include 
+#include 
+#else
+#if HAVE_SYS_TIME_H
+#include 
+#else
+#include 
+#endif
+#endif
+
+#include 
+#include 
+#endif
+
+#include "db_int.h"
+#include "dbinc/db_page.h"
+#include "dbinc/db_am.h"
+#include "dbinc/log.h"
+
+/*
+ * __rep_allreq --
+ *      Handle a REP_ALL_REQ message.
+ *
+ * PUBLIC: int __rep_allreq __P((DB_ENV *, REP_CONTROL *, int));
+ */
+int
+__rep_allreq(dbenv, rp, eid)
+	DB_ENV *dbenv;
+	REP_CONTROL *rp;
+	int eid;
+{
+	DB_LOGC *logc;
+	DB_LSN oldfilelsn;
+	DB_REP *db_rep;
+	DBT data_dbt;
+	REP *rep;
+	REP_BULK bulk;
+	REP_THROTTLE repth;
+	uintptr_t bulkoff;
+	u_int32_t bulkflags, flags, use_bulk;
+	int ret, t_ret;
+
+	ret = 0;
+	db_rep = dbenv->rep_handle;
+	rep = db_rep->region;
+
+	if ((ret = __log_cursor(dbenv, &logc)) != 0)
+		return (ret);
+	memset(&data_dbt, 0, sizeof(data_dbt));
+	/*
+	 * If we're doing bulk transfer, allocate a bulk buffer to put our
+	 * log records in.  We still need to initialize the throttle info
+	 * because if we encounter a log record larger than our entire bulk
+	 * buffer, we need to send it as a singleton and also we want to
+	 * support throttling with bulk.
+	 *
+	 * Use a local var so we don't need to worry if someone else turns
+	 * on/off bulk in the middle of our call.
+	 */
+	use_bulk = FLD_ISSET(rep->config, REP_C_BULK);
+	if (use_bulk && (ret = __rep_bulk_alloc(dbenv, &bulk, eid,
+	    &bulkoff, &bulkflags, REP_BULK_LOG)) != 0)
+		goto err;
+	memset(&repth, 0, sizeof(repth));
+	REP_SYSTEM_LOCK(dbenv);
+	repth.gbytes = rep->gbytes;
+	repth.bytes = rep->bytes;
+	oldfilelsn = repth.lsn = rp->lsn;
+	repth.type = REP_LOG;
+	repth.data_dbt = &data_dbt;
+	REP_SYSTEM_UNLOCK(dbenv);
+	flags = IS_ZERO_LSN(rp->lsn) ||
+	    IS_INIT_LSN(rp->lsn) ?  DB_FIRST : DB_SET;
+	/*
+	 * We get the first item so that a client servicing requests
+	 * can distinguish between not having the records and reaching
+	 * the end of its log.  Return the DB_NOTFOUND if the client
+	 * cannot get the record.  Return 0 if we finish the loop and
+	 * sent all that we have.
+	 */
+	ret = __log_c_get(logc, &repth.lsn, &data_dbt, flags);
+	if (ret == DB_NOTFOUND) {
+		if (F_ISSET(rep, REP_F_MASTER))
+			ret = 0;
+		goto err;
+	}
+	/*
+	 * For singleton log records, we break when we get a REP_LOG_MORE.
+	 * Or if we're not using throttling, or we are using bulk, we stop
+	 * when we reach the end (i.e. ret != 0).
+	 */
+	for (;
+	    ret == 0 && repth.type != REP_LOG_MORE;
+	    ret = __log_c_get(logc, &repth.lsn, &data_dbt, DB_NEXT)) {
+		/*
+		 * If the client is asking for all records because it doesn't
+		 * have any, and our first record is not in the first log
+		 * file, then the client is outdated and needs to get a
+		 * VERIFY_FAIL.
+		 */
+		if (repth.lsn.file != 1 && flags == DB_FIRST) {
+			(void)__rep_send_message(dbenv, eid,
+			    REP_VERIFY_FAIL, &repth.lsn, NULL, 0, 0);
+			break;
+		}
+		if (repth.lsn.file != oldfilelsn.file)
+			(void)__rep_send_message(dbenv,
+			    eid, REP_NEWFILE, &oldfilelsn, NULL, 0, 0);
+		/*
+		 * If we are configured for bulk, try to send this as a bulk
+		 * request.  If not configured, or it is too big for bulk
+		 * then just send normally.
+		 */
+		if (use_bulk)
+			ret = __rep_bulk_message(dbenv, &bulk, &repth,
+			    &repth.lsn, &data_dbt, DB_LOG_RESEND);
+		if (!use_bulk || ret == DB_REP_BULKOVF)
+			ret = __rep_send_throttle(dbenv, eid, &repth, 0);
+		if (ret != 0)
+			break;
+		/*
+		 * If we are about to change files, then we'll need the
+		 * last LSN in the previous file.  Save it here.
+		 */
+		oldfilelsn = repth.lsn;
+		oldfilelsn.offset += logc->c_len;
+	}
+
+	if (ret == DB_NOTFOUND)
+		ret = 0;
+	/*
+	 * We're done, force out whatever remains in the bulk buffer and
+	 * free it.
+	 */
+	if (use_bulk && (t_ret = __rep_bulk_free(dbenv, &bulk,
+	    DB_LOG_RESEND)) != 0 && ret == 0)
+		ret = t_ret;
+err:
+	if ((t_ret = __log_c_close(logc)) != 0 && ret == 0)
+		ret = t_ret;
+	return (ret);
+}
+
+/*
+ * __rep_log --
+ *      Handle a REP_LOG/REP_LOG_MORE message.
+ *
+ * PUBLIC: int __rep_log __P((DB_ENV *, REP_CONTROL *, DBT *,
+ * PUBLIC:     time_t, DB_LSN *));
+ */
+int
+__rep_log(dbenv, rp, rec, savetime, ret_lsnp)
+	DB_ENV *dbenv;
+	REP_CONTROL *rp;
+	DBT *rec;
+	time_t savetime;
+	DB_LSN *ret_lsnp;
+{
+	DB_LOG *dblp;
+	DB_LSN lsn;
+	DB_REP *db_rep;
+	LOG *lp;
+	REP *rep;
+	int is_dup, master, ret;
+
+	is_dup = ret = 0;
+	db_rep = dbenv->rep_handle;
+	rep = db_rep->region;
+	dblp = dbenv->lg_handle;
+	lp = dblp->reginfo.primary;
+
+	ret = __rep_apply(dbenv, rp, rec, ret_lsnp, &is_dup);
+	switch (ret) {
+	/*
+	 * We're in an internal backup and we've gotten
+	 * all the log we need to run recovery.  Do so now.
+	 */
+	case DB_REP_LOGREADY:
+		if ((ret = __log_flush(dbenv, NULL)) != 0)
+			goto out;
+		if ((ret = __rep_verify_match(dbenv, &rep->last_lsn,
+		    savetime)) == 0) {
+			REP_SYSTEM_LOCK(dbenv);
+			ZERO_LSN(rep->first_lsn);
+			ZERO_LSN(rep->last_lsn);
+			F_CLR(rep, REP_F_RECOVER_LOG);
+			REP_SYSTEM_UNLOCK(dbenv);
+		}
+		break;
+	/*
+	 * If we get any of the "normal" returns, we only process
+	 * LOG_MORE if this is not a duplicate record.  If the
+	 * record is a duplicate we don't want to handle LOG_MORE
+	 * and request a multiple data stream (or trigger internal
+	 * initialization) since this could be a very old record
+	 * that no longer exists on the master.
+	 */
+	case DB_REP_ISPERM:
+	case DB_REP_NOTPERM:
+	case 0:
+		if (is_dup)
+			goto out;
+		else
+			break;
+	/*
+	 * Any other return (errors), we're done.
+	 */
+	default:
+		goto out;
+	}
+	if (rp->rectype == REP_LOG_MORE) {
+		REP_SYSTEM_LOCK(dbenv);
+		master = rep->master_id;
+		REP_SYSTEM_UNLOCK(dbenv);
+		LOG_SYSTEM_LOCK(dbenv);
+		lsn = lp->lsn;
+		LOG_SYSTEM_UNLOCK(dbenv);
+		/*
+		 * If the master_id is invalid, this means that since
+		 * the last record was sent, somebody declared an
+		 * election and we may not have a master to request
+		 * things of.
+		 *
+		 * This is not an error;  when we find a new master,
+		 * we'll re-negotiate where the end of the log is and
+		 * try to bring ourselves up to date again anyway.
+		 *
+		 * If we've asked for a bunch of records, it could
+		 * either be from a LOG_REQ or ALL_REQ.  If we're
+		 * waiting for a gap to be filled, call loggap_req,
+		 * otherwise use ALL_REQ again.
+		 */
+		MUTEX_LOCK(dbenv, rep->mtx_clientdb);
+		if (master == DB_EID_INVALID) {
+			ret = 0;
+			MUTEX_UNLOCK(dbenv, rep->mtx_clientdb);
+		} else if (IS_ZERO_LSN(lp->waiting_lsn)) {
+			/*
+			 * We're making an ALL_REQ.  However, since we're
+			 * in a LOG_MORE, this is in reply to a request and
+			 * it is likely we may receive new records, even if
+			 * we don't have any at this moment.  So, to avoid
+			 * multiple data streams, set the wait_recs high
+			 * now to give the master a chance to start sending
+			 * us these records before the gap code re-requests
+			 * the same gap.  Wait_recs will get reset once we
+			 * start receiving these records.
+			 */
+			lp->wait_recs = rep->max_gap;
+			MUTEX_UNLOCK(dbenv, rep->mtx_clientdb);
+			if (__rep_send_message(dbenv, master, REP_ALL_REQ,
+			    &lsn, NULL, 0, DB_REP_ANYWHERE) != 0)
+				goto out;
+		} else {
+			ret = __rep_loggap_req(dbenv, rep, &lsn, REP_GAP_FORCE);
+			MUTEX_UNLOCK(dbenv, rep->mtx_clientdb);
+		}
+	}
+out:
+	return (ret);
+}
+
+/*
+ * __rep_bulk_log --
+ *      Handle a REP_BULK_LOG message.
+ *
+ * PUBLIC: int __rep_bulk_log __P((DB_ENV *, REP_CONTROL *, DBT *,
+ * PUBLIC:     time_t, DB_LSN *));
+ */
+int
+__rep_bulk_log(dbenv, rp, rec, savetime, ret_lsnp)
+	DB_ENV *dbenv;
+	REP_CONTROL *rp;
+	DBT *rec;
+	time_t savetime;
+	DB_LSN *ret_lsnp;
+{
+	DB_REP *db_rep;
+	REP *rep;
+	int ret;
+
+	db_rep = dbenv->rep_handle;
+	rep = db_rep->region;
+
+	ret = __log_rep_split(dbenv, rp, rec, ret_lsnp);
+	switch (ret) {
+	/*
+	 * We're in an internal backup and we've gotten
+	 * all the log we need to run recovery.  Do so now.
+	 */
+	case DB_REP_LOGREADY:
+		if ((ret = __log_flush(dbenv, NULL)) != 0)
+			goto out;
+		if ((ret = __rep_verify_match(dbenv, &rep->last_lsn,
+		    savetime)) == 0) {
+			REP_SYSTEM_LOCK(dbenv);
+			ZERO_LSN(rep->first_lsn);
+			ZERO_LSN(rep->last_lsn);
+			F_CLR(rep, REP_F_RECOVER_LOG);
+			REP_SYSTEM_UNLOCK(dbenv);
+		}
+		break;
+	/*
+	 * Any other return (errors), we're done.
+	 */
+	default:
+		break;
+	}
+out:
+	return (ret);
+}
+
+/*
+ * __rep_log_req --
+ *      Handle a REP_LOG_REQ message.
+ *
+ * PUBLIC: int __rep_logreq __P((DB_ENV *, REP_CONTROL *, DBT *, int));
+ */
+int
+__rep_logreq(dbenv, rp, rec, eid)
+	DB_ENV *dbenv;
+	REP_CONTROL *rp;
+	DBT *rec;
+	int eid;
+{
+	DB_LOG *dblp;
+	DB_LOGC *logc;
+	DB_LSN endlsn, lsn, oldfilelsn;
+	DB_REP *db_rep;
+	DBT data_dbt;
+	LOG *lp;
+	REP *rep;
+	REP_BULK bulk;
+	REP_THROTTLE repth;
+	uintptr_t bulkoff;
+	u_int32_t bulkflags, use_bulk;
+	int ret, t_ret;
+#ifdef DIAGNOSTIC
+	DB_MSGBUF mb;
+#endif
+
+	ret = 0;
+	db_rep = dbenv->rep_handle;
+	rep = db_rep->region;
+	dblp = dbenv->lg_handle;
+	lp = dblp->reginfo.primary;
+
+	if (rec != NULL && rec->size != 0) {
+		RPRINT(dbenv, rep, (dbenv, &mb,
+		    "[%lu][%lu]: LOG_REQ max lsn: [%lu][%lu]",
+		    (u_long) rp->lsn.file, (u_long)rp->lsn.offset,
+		    (u_long)((DB_LSN *)rec->data)->file,
+		    (u_long)((DB_LSN *)rec->data)->offset));
+	}
+	/*
+	 * There are three different cases here.
+	 * 1. We asked log_c_get for a particular LSN and got it.
+	 * 2. We asked log_c_get for an LSN and it's not found because it is
+	 *	beyond the end of a log file and we need a NEWFILE msg.
+	 *	and then the record that was requested.
+	 * 3. We asked log_c_get for an LSN and it simply doesn't exist, but
+	 *    doesn't meet any of those other criteria, in which case
+	 *    it's an error (that should never happen on a master).
+	 *
+	 * If we have a valid LSN and the request has a data_dbt with
+	 * it, the sender is asking for a chunk of log records.
+	 * Then we need to send all records up to the LSN in the data dbt.
+	 */
+	memset(&data_dbt, 0, sizeof(data_dbt));
+	oldfilelsn = lsn = rp->lsn;
+	if ((ret = __log_cursor(dbenv, &logc)) != 0)
+		return (ret);
+	ret = __log_c_get(logc, &lsn, &data_dbt, DB_SET);
+
+	if (ret == 0) /* Case 1 */
+		(void)__rep_send_message(dbenv,
+		   eid, REP_LOG, &lsn, &data_dbt, DB_LOG_RESEND, 0);
+	else if (ret == DB_NOTFOUND) {
+		LOG_SYSTEM_LOCK(dbenv);
+		endlsn = lp->lsn;
+		LOG_SYSTEM_UNLOCK(dbenv);
+		if (endlsn.file > lsn.file) {
+			/*
+			 * Case 2:
+			 * Need to find the LSN of the last record in
+			 * file lsn.file so that we can send it with
+			 * the NEWFILE call.  In order to do that, we
+			 * need to try to get {lsn.file + 1, 0} and
+			 * then backup.
+			 */
+			endlsn.file = lsn.file + 1;
+			endlsn.offset = 0;
+			if ((ret = __log_c_get(logc,
+			    &endlsn, &data_dbt, DB_SET)) != 0 ||
+			    (ret = __log_c_get(logc,
+				&endlsn, &data_dbt, DB_PREV)) != 0) {
+				RPRINT(dbenv, rep, (dbenv, &mb,
+				    "Unable to get prev of [%lu][%lu]",
+				    (u_long)lsn.file,
+				    (u_long)lsn.offset));
+				/*
+				 * We want to push the error back
+				 * to the client so that the client
+				 * does an internal backup.  The
+				 * client asked for a log record
+				 * we no longer have and it is
+				 * outdated.
+				 * XXX - This could be optimized by
+				 * having the master perform and
+				 * send a REP_UPDATE message.  We
+				 * currently want the client to set
+				 * up its 'update' state prior to
+				 * requesting REP_UPDATE_REQ.
+				 *
+				 * If we're a client servicing a request
+				 * just return DB_NOTFOUND.
+				 */
+				if (F_ISSET(rep, REP_F_MASTER)) {
+					ret = 0;
+					(void)__rep_send_message(dbenv, eid,
+					    REP_VERIFY_FAIL, &rp->lsn,
+					    NULL, 0, 0);
+				} else
+					ret = DB_NOTFOUND;
+			} else {
+				endlsn.offset += logc->c_len;
+				(void)__rep_send_message(dbenv, eid,
+				    REP_NEWFILE, &endlsn, NULL, 0, 0);
+			}
+		} else {
+			/* Case 3 */
+			/*
+			 * If we're a master, this is a problem.
+			 * If we're a client servicing a request
+			 * just return the DB_NOTFOUND.
+			 */
+			if (F_ISSET(rep, REP_F_MASTER)) {
+				__db_err(dbenv,
+				    "Request for LSN [%lu][%lu] fails",
+				    (u_long)lsn.file, (u_long)lsn.offset);
+				DB_ASSERT(0);
+				ret = EINVAL;
+			}
+		}
+	}
+	if (ret != 0)
+		goto err;
+
+	/*
+	 * If the user requested a gap, send the whole thing,
+	 * while observing the limits from set_rep_limit.
+	 */
+	/*
+	 * If we're doing bulk transfer, allocate a bulk buffer to put our
+	 * log records in.  We still need to initialize the throttle info
+	 * because if we encounter a log record larger than our entire bulk
+	 * buffer, we need to send it as a singleton.
+	 *
+	 * Use a local var so we don't need to worry if someone else turns
+	 * on/off bulk in the middle of our call.
+	 */
+	use_bulk = FLD_ISSET(rep->config, REP_C_BULK);
+	if (use_bulk && (ret = __rep_bulk_alloc(dbenv, &bulk, eid,
+	    &bulkoff, &bulkflags, REP_BULK_LOG)) != 0)
+		goto err;
+	memset(&repth, 0, sizeof(repth));
+	REP_SYSTEM_LOCK(dbenv);
+	repth.gbytes = rep->gbytes;
+	repth.bytes = rep->bytes;
+	repth.type = REP_LOG;
+	repth.data_dbt = &data_dbt;
+	REP_SYSTEM_UNLOCK(dbenv);
+	while (ret == 0 && rec != NULL && rec->size != 0 &&
+	    repth.type == REP_LOG) {
+		if ((ret =
+		    __log_c_get(logc, &repth.lsn, &data_dbt, DB_NEXT)) != 0) {
+			/*
+			 * If we're a client and we only have part of the gap,
+			 * return DB_NOTFOUND so that we send a REREQUEST
+			 * back to the requester and it can ask for more.
+			 */
+			if (ret == DB_NOTFOUND && F_ISSET(rep, REP_F_MASTER))
+				ret = 0;
+			break;
+		}
+		if (log_compare(&repth.lsn, (DB_LSN *)rec->data) >= 0)
+			break;
+		if (repth.lsn.file != oldfilelsn.file)
+			(void)__rep_send_message(dbenv,
+			    eid, REP_NEWFILE, &oldfilelsn, NULL, 0, 0);
+		/*
+		 * If we are configured for bulk, try to send this as a bulk
+		 * request.  If not configured, or it is too big for bulk
+		 * then just send normally.
+		 */
+		if (use_bulk)
+			ret = __rep_bulk_message(dbenv, &bulk, &repth,
+			    &repth.lsn, &data_dbt, DB_LOG_RESEND);
+		if (!use_bulk || ret == DB_REP_BULKOVF)
+			ret = __rep_send_throttle(dbenv, eid, &repth, 0);
+		if (ret != 0)
+			break;
+		/*
+		 * If we are about to change files, then we'll need the
+		 * last LSN in the previous file.  Save it here.
+		 */
+		oldfilelsn = repth.lsn;
+		oldfilelsn.offset += logc->c_len;
+	}
+
+	/*
+	 * We're done, force out whatever remains in the bulk buffer and
+	 * free it.
+	 */
+	if (use_bulk && (t_ret = __rep_bulk_free(dbenv, &bulk,
+	    DB_LOG_RESEND)) != 0 && ret == 0)
+		ret = t_ret;
+err:
+	if ((t_ret = __log_c_close(logc)) != 0 && ret == 0)
+		ret = t_ret;
+	return (ret);
+}
+
+/*
+ * __rep_loggap_req -
+ *	Request a log gap.  Assumes the caller holds the REP->mtx_clientdb.
+ *
+ * lsnp is the current LSN we're handling.  It is used to help decide
+ *	if we ask for a gap or singleton.
+ * gapflags are flags that may override the algorithm or control the
+ *	processing in some way.
+ *
+ * PUBLIC: int __rep_loggap_req __P((DB_ENV *, REP *, DB_LSN *, u_int32_t));
+ */
+int
+__rep_loggap_req(dbenv, rep, lsnp, gapflags)
+	DB_ENV *dbenv;
+	REP *rep;
+	DB_LSN *lsnp;
+	u_int32_t gapflags;
+{
+	DB_LOG *dblp;
+	DBT max_lsn_dbt, *max_lsn_dbtp;
+	DB_LSN next_lsn;
+	LOG *lp;
+	u_int32_t flags, type;
+
+	dblp = dbenv->lg_handle;
+	lp = dblp->reginfo.primary;
+	LOG_SYSTEM_LOCK(dbenv);
+	next_lsn = lp->lsn;
+	LOG_SYSTEM_UNLOCK(dbenv);
+	flags = 0;
+	type = REP_LOG_REQ;
+
+	/*
+	 * Check if we need to ask for the gap.
+	 * We ask for the gap if:
+	 *	We are forced to with gapflags.
+	 *	If max_wait_lsn is ZERO_LSN - we've never asked for
+	 *	  records before.
+	 *	If we asked for a single record and received it.
+	 *
+	 * If we want a gap, but don't have an ending LSN (waiting_lsn)
+	 * send an ALL_REQ.  This is primarily used by REP_REREQUEST when
+	 * an ALL_REQ was not able to be fulfilled by another client.
+	 */
+	if (FLD_ISSET(gapflags, (REP_GAP_FORCE | REP_GAP_REREQUEST)) ||
+	    IS_ZERO_LSN(lp->max_wait_lsn) ||
+	    (lsnp != NULL && log_compare(lsnp, &lp->max_wait_lsn) == 0)) {
+		lp->max_wait_lsn = lp->waiting_lsn;
+		if (IS_ZERO_LSN(lp->max_wait_lsn))
+			type = REP_ALL_REQ;
+		memset(&max_lsn_dbt, 0, sizeof(max_lsn_dbt));
+		max_lsn_dbt.data = &lp->waiting_lsn;
+		max_lsn_dbt.size = sizeof(lp->waiting_lsn);
+		max_lsn_dbtp = &max_lsn_dbt;
+		/*
+		 * Gap requests are "new" and can go anywhere, unless
+		 * this is already a rerequest.
+		 */
+		if (FLD_ISSET(gapflags, REP_GAP_REREQUEST))
+			flags = DB_REP_REREQUEST;
+		else
+			flags = DB_REP_ANYWHERE;
+	} else {
+		max_lsn_dbtp = NULL;
+		lp->max_wait_lsn = next_lsn;
+		/*
+		 * If we're dropping to singletons, this is a rerequest.
+		 */
+		flags = DB_REP_REREQUEST;
+	}
+	if (rep->master_id != DB_EID_INVALID) {
+		rep->stat.st_log_requested++;
+		(void)__rep_send_message(dbenv, rep->master_id,
+		    type, &next_lsn, max_lsn_dbtp, 0, flags);
+	} else
+		(void)__rep_send_message(dbenv, DB_EID_BROADCAST,
+		    REP_MASTER_REQ, NULL, NULL, 0, 0);
+
+	return (0);
+}
diff --git a/storage/bdb/rep/rep_method.c b/storage/bdb/rep/rep_method.c
index 9e83dd72950..aaa1d70b5a1 100644
--- a/storage/bdb/rep/rep_method.c
+++ b/storage/bdb/rep/rep_method.c
@@ -1,10 +1,10 @@
 /*-
  * See the file LICENSE for redistribution information.
  *
- * Copyright (c) 2001-2004
+ * Copyright (c) 2001-2005
  *	Sleepycat Software.  All rights reserved.
  *
- * $Id: rep_method.c,v 1.167 2004/10/07 17:20:12 bostic Exp $
+ * $Id: rep_method.c,v 12.18 2005/11/08 03:25:13 bostic Exp $
  */
 
 #include "db_config.h"
@@ -12,83 +12,20 @@
 #ifndef NO_SYSTEM_INCLUDES
 #include 
 
-#ifdef HAVE_RPC
-#include 
-#endif
-
 #include 
 #include 
 #endif
 
-#ifdef HAVE_RPC
-#include "db_server.h"
-#endif
-
 #include "db_int.h"
 #include "dbinc/db_page.h"
 #include "dbinc/btree.h"
 #include "dbinc/log.h"
 #include "dbinc/txn.h"
 
-#ifdef HAVE_RPC
-#include "dbinc_auto/rpc_client_ext.h"
-#endif
-
-static int __rep_abort_prepared __P((DB_ENV *));
-static int __rep_bt_cmp __P((DB *, const DBT *, const DBT *));
-static int __rep_elect
-	       __P((DB_ENV *, int, int, int, u_int32_t, int *, u_int32_t));
-static int __rep_elect_init
-	       __P((DB_ENV *, DB_LSN *, int, int, int, int *, u_int32_t *));
-static int __rep_flush __P((DB_ENV *));
-static int __rep_restore_prepared __P((DB_ENV *));
-static int __rep_get_limit __P((DB_ENV *, u_int32_t *, u_int32_t *));
-static int __rep_set_limit __P((DB_ENV *, u_int32_t, u_int32_t));
-static int __rep_set_request __P((DB_ENV *, u_int32_t, u_int32_t));
-static int __rep_set_rep_transport __P((DB_ENV *, int,
-    int (*)(DB_ENV *, const DBT *, const DBT *, const DB_LSN *,
-    int, u_int32_t)));
-static int __rep_start __P((DB_ENV *, DBT *, u_int32_t));
-static int __rep_wait __P((DB_ENV *, u_int32_t, int *, u_int32_t));
-
-/*
- * __rep_dbenv_create --
- *	Replication-specific initialization of the DB_ENV structure.
- *
- * PUBLIC: void __rep_dbenv_create __P((DB_ENV *));
- */
-void
-__rep_dbenv_create(dbenv)
-	DB_ENV *dbenv;
-{
-#ifdef HAVE_RPC
-	if (F_ISSET(dbenv, DB_ENV_RPCCLIENT)) {
-		dbenv->rep_elect = __dbcl_rep_elect;
-		dbenv->rep_flush = __dbcl_rep_flush;
-		dbenv->rep_process_message = __dbcl_rep_process_message;
-		dbenv->rep_start = __dbcl_rep_start;
-		dbenv->rep_stat = __dbcl_rep_stat;
-		dbenv->rep_stat_print = NULL;
-		dbenv->get_rep_limit = __dbcl_rep_get_limit;
-		dbenv->set_rep_limit = __dbcl_rep_set_limit;
-		dbenv->set_rep_request = __dbcl_rep_set_request;
-		dbenv->set_rep_transport = __dbcl_rep_set_rep_transport;
-
-	} else
-#endif
-	{
-		dbenv->rep_elect = __rep_elect;
-		dbenv->rep_flush = __rep_flush;
-		dbenv->rep_process_message = __rep_process_message;
-		dbenv->rep_start = __rep_start;
-		dbenv->rep_stat = __rep_stat_pp;
-		dbenv->rep_stat_print = __rep_stat_print_pp;
-		dbenv->get_rep_limit = __rep_get_limit;
-		dbenv->set_rep_limit = __rep_set_limit;
-		dbenv->set_rep_request = __rep_set_request;
-		dbenv->set_rep_transport = __rep_set_rep_transport;
-	}
-}
+static int  __rep_abort_prepared __P((DB_ENV *));
+static int  __rep_bt_cmp __P((DB *, const DBT *, const DBT *));
+static void __rep_config_map __P((DB_ENV *, u_int32_t *, u_int32_t *));
+static int  __rep_restore_prepared __P((DB_ENV *));
 
 /*
  * __rep_open --
@@ -110,6 +47,146 @@ __rep_open(dbenv)
 	return (ret);
 }
 
+/*
+ * __rep_get_config --
+ *	Configure the replication subsystem.
+ *
+ * PUBLIC: int __rep_get_config __P((DB_ENV *, u_int32_t, int *));
+ */
+int
+__rep_get_config(dbenv, which, onp)
+	DB_ENV *dbenv;
+	u_int32_t which;
+	int *onp;
+{
+	DB_REP *db_rep;
+	REP *rep;
+	u_int32_t mapped;
+
+#undef	OK_FLAGS
+#define	OK_FLAGS							\
+(DB_REP_CONF_BULK | DB_REP_CONF_DELAYCLIENT | DB_REP_CONF_NOAUTOINIT	\
+    | DB_REP_CONF_NOWAIT)
+
+	PANIC_CHECK(dbenv);
+	ENV_REQUIRES_CONFIG(dbenv, dbenv->rep_handle,
+	    "rep_get_config", DB_INIT_REP);
+	if (FLD_ISSET(which, ~OK_FLAGS))
+		return (__db_ferr(dbenv, "DB_ENV->rep_get_config", 0));
+
+	db_rep = dbenv->rep_handle;
+	rep = db_rep->region;
+
+	mapped = 0;
+	__rep_config_map(dbenv, &which, &mapped);
+	if (FLD_ISSET(rep->config, mapped))
+		*onp = 1;
+	else
+		*onp = 0;
+	return (0);
+}
+
+/*
+ * __rep_set_config --
+ *	Configure the replication subsystem.
+ *
+ * PUBLIC: int __rep_set_config __P((DB_ENV *, u_int32_t, int));
+ */
+int
+__rep_set_config(dbenv, which, on)
+	DB_ENV *dbenv;
+	u_int32_t which;
+	int on;
+{
+	DB_LOG *dblp;
+	DB_REP *db_rep;
+	LOG *lp;
+	REP *rep;
+	REP_BULK bulk;
+	int ret;
+	u_int32_t mapped, orig;
+
+#undef	OK_FLAGS
+#define	OK_FLAGS							\
+(DB_REP_CONF_BULK | DB_REP_CONF_DELAYCLIENT | DB_REP_CONF_NOAUTOINIT	\
+    | DB_REP_CONF_NOWAIT)
+
+	PANIC_CHECK(dbenv);
+	ENV_REQUIRES_CONFIG(dbenv, dbenv->rep_handle,
+	    "rep_config", DB_INIT_REP);
+	if (FLD_ISSET(which, ~OK_FLAGS))
+		return (__db_ferr(dbenv, "DB_ENV->rep_set_config", 0));
+
+	dblp = dbenv->lg_handle;
+	lp = dblp->reginfo.primary;
+	db_rep = dbenv->rep_handle;
+	rep = db_rep->region;
+
+	mapped = ret = 0;
+	__rep_config_map(dbenv, &which, &mapped);
+	MUTEX_LOCK(dbenv, rep->mtx_clientdb);
+	REP_SYSTEM_LOCK(dbenv);
+	orig = rep->config;
+	if (on)
+		FLD_SET(rep->config, mapped);
+	else
+		FLD_CLR(rep->config, mapped);
+
+	/*
+	 * Bulk transfer requires special processing if it is getting
+	 * toggled.
+	 */
+	if (FLD_ISSET(rep->config, REP_C_BULK) &&
+	    !FLD_ISSET(orig, REP_C_BULK))
+		db_rep->bulk = R_ADDR(&dblp->reginfo, lp->bulk_buf);
+	REP_SYSTEM_UNLOCK(dbenv);
+	/*
+	 * If turning bulk off and it was on, send out whatever is in the
+	 * buffer already.
+	 */
+	if (FLD_ISSET(orig, REP_C_BULK) &&
+	    !FLD_ISSET(rep->config, REP_C_BULK) && lp->bulk_off != 0) {
+		memset(&bulk, 0, sizeof(bulk));
+		if (db_rep->bulk == NULL)
+			bulk.addr = R_ADDR(&dblp->reginfo, lp->bulk_buf);
+		else
+			bulk.addr = db_rep->bulk;
+		bulk.offp = &lp->bulk_off;
+		bulk.len = lp->bulk_len;
+		bulk.type = REP_BULK_LOG;
+		bulk.eid = DB_EID_BROADCAST;
+		bulk.flagsp = &lp->bulk_flags;
+		ret = __rep_send_bulk(dbenv, &bulk, 0);
+	}
+	MUTEX_UNLOCK(dbenv, rep->mtx_clientdb);
+	return (ret);
+}
+
+static void
+__rep_config_map(dbenv, inflagsp, outflagsp)
+	DB_ENV *dbenv;
+	u_int32_t *inflagsp, *outflagsp;
+{
+	COMPQUIET(dbenv, NULL);
+
+	if (FLD_ISSET(*inflagsp, DB_REP_CONF_BULK)) {
+		FLD_SET(*outflagsp, REP_C_BULK);
+		FLD_CLR(*inflagsp, DB_REP_CONF_BULK);
+	}
+	if (FLD_ISSET(*inflagsp, DB_REP_CONF_DELAYCLIENT)) {
+		FLD_SET(*outflagsp, REP_C_DELAYCLIENT);
+		FLD_CLR(*inflagsp, DB_REP_CONF_DELAYCLIENT);
+	}
+	if (FLD_ISSET(*inflagsp, DB_REP_CONF_NOAUTOINIT)) {
+		FLD_SET(*outflagsp, REP_C_NOAUTOINIT);
+		FLD_CLR(*inflagsp, DB_REP_CONF_NOAUTOINIT);
+	}
+	if (FLD_ISSET(*inflagsp, DB_REP_CONF_NOWAIT)) {
+		FLD_SET(*outflagsp, REP_C_NOWAIT);
+		FLD_CLR(*inflagsp, DB_REP_CONF_NOWAIT);
+	}
+}
+
 /*
  * __rep_start --
  *	Become a master or client, and start sending messages to participate
@@ -135,8 +212,10 @@ __rep_open(dbenv)
  * stored in the replication region.  This prevents the use of handles on
  * clients that reference non-existent files whose creation was backed out
  * during a synchronizing recovery.
+ *
+ * PUBLIC: int __rep_start __P((DB_ENV *, DBT *, u_int32_t));
  */
-static int
+int
 __rep_start(dbenv, dbt, flags)
 	DB_ENV *dbenv;
 	DBT *dbt;
@@ -189,7 +268,7 @@ __rep_start(dbenv, dbt, flags)
 	if (LF_ISSET(DB_REP_MASTER) && (ret = __log_flush(dbenv, NULL)) != 0)
 		return (ret);
 
-	MUTEX_LOCK(dbenv, db_rep->rep_mutexp);
+	REP_SYSTEM_LOCK(dbenv);
 	/*
 	 * We only need one thread to start-up replication, so if
 	 * there is another thread in rep_start, we'll let it finish
@@ -204,8 +283,8 @@ __rep_start(dbenv, dbt, flags)
 	} else
 		rep->start_th = 1;
 
-	role_chg = (F_ISSET(rep, REP_F_CLIENT) && LF_ISSET(DB_REP_MASTER)) ||
-	    (F_ISSET(rep, REP_F_MASTER) && LF_ISSET(DB_REP_CLIENT));
+	role_chg = (!F_ISSET(rep, REP_F_MASTER) && LF_ISSET(DB_REP_MASTER)) ||
+	    (!F_ISSET(rep, REP_F_CLIENT) && LF_ISSET(DB_REP_CLIENT));
 
 	/*
 	 * Wait for any active txns or mpool ops to complete, and
@@ -213,17 +292,18 @@ __rep_start(dbenv, dbt, flags)
 	 * changing roles.  If we are not changing roles, then we
 	 * only need to coordinate with msg_th.
 	 */
-	if (role_chg)
-		__rep_lockout(dbenv, db_rep, rep, 0);
-	else {
+	if (role_chg) {
+		if ((ret = __rep_lockout(dbenv, rep, 0)) != 0)
+			goto errunlock;
+	} else {
 		for (sleep_cnt = 0; rep->msg_th != 0;) {
 			if (++sleep_cnt % 60 == 0)
 				__db_err(dbenv,
 	"DB_ENV->rep_start waiting %d minutes for replication message thread",
 				    sleep_cnt / 60);
-			MUTEX_UNLOCK(dbenv, db_rep->rep_mutexp);
+			REP_SYSTEM_UNLOCK(dbenv);
 			__os_sleep(dbenv, 1, 0);
-			MUTEX_LOCK(dbenv, db_rep->rep_mutexp);
+			REP_SYSTEM_LOCK(dbenv);
 		}
 	}
 
@@ -234,17 +314,17 @@ __rep_start(dbenv, dbt, flags)
 		if (role_chg) {
 			/*
 			 * If we're upgrading from having been a client,
-			 * preclose, so that we close our temporary database.
-			 *
-			 * Do not close files that we may have opened while
-			 * doing a rep_apply;  they'll get closed when we
-			 * finally close the environment, but for now, leave
-			 * them open, as we don't want to recycle their
-			 * fileids, and we may need the handles again if
-			 * we become a client and the original master
-			 * that opened them becomes a master again.
+			 * preclose, so that we close our temporary database
+			 * and any files we opened while doing a rep_apply.
+			 * If we don't we can infinitely leak file ids if
+			 * the master crashed with files open (the likely
+			 * case).  If we don't close them we can run into
+			 * problems if we try to remove that file or long
+			 * running applications end up with an unbounded
+			 * number of used fileids, each getting written
+			 * on checkpoint.  Just close them.
 			 */
-			if ((ret = __rep_preclose(dbenv, 0)) != 0)
+			if ((ret = __rep_preclose(dbenv)) != 0)
 				goto errunlock;
 		}
 
@@ -284,11 +364,11 @@ __rep_start(dbenv, dbt, flags)
 		 */
 		rep->flags = REP_F_MASTER;
 		rep->start_th = 0;
-		MUTEX_UNLOCK(dbenv, db_rep->rep_mutexp);
+		REP_SYSTEM_UNLOCK(dbenv);
 		dblp = (DB_LOG *)dbenv->lg_handle;
-		R_LOCK(dbenv, &dblp->reginfo);
+		LOG_SYSTEM_LOCK(dbenv);
 		lsn = ((LOG *)dblp->reginfo.primary)->lsn;
-		R_UNLOCK(dbenv, &dblp->reginfo);
+		LOG_SYSTEM_UNLOCK(dbenv);
 
 		/*
 		 * Send the NEWMASTER message first so that clients know
@@ -297,14 +377,14 @@ __rep_start(dbenv, dbt, flags)
 		 * regarding errors.
 		 */
 		(void)__rep_send_message(dbenv,
-		    DB_EID_BROADCAST, REP_NEWMASTER, &lsn, NULL, 0);
+		    DB_EID_BROADCAST, REP_NEWMASTER, &lsn, NULL, 0, 0);
 		ret = 0;
 		if (role_chg) {
 			ret = __txn_reset(dbenv);
-			MUTEX_LOCK(dbenv, db_rep->rep_mutexp);
+			REP_SYSTEM_LOCK(dbenv);
 			F_CLR(rep, REP_F_READY);
 			rep->in_recovery = 0;
-			MUTEX_UNLOCK(dbenv, db_rep->rep_mutexp);
+			REP_SYSTEM_UNLOCK(dbenv);
 		}
 		/*
 		 * Take a transaction checkpoint so that our new generation
@@ -334,7 +414,7 @@ __rep_start(dbenv, dbt, flags)
 		FLD_SET(repflags, REP_F_CLIENT);
 
 		rep->flags = repflags;
-		MUTEX_UNLOCK(dbenv, db_rep->rep_mutexp);
+		REP_SYSTEM_UNLOCK(dbenv);
 
 		/*
 		 * Abort any prepared transactions that were restored
@@ -347,18 +427,18 @@ __rep_start(dbenv, dbt, flags)
 		if ((ret = __rep_abort_prepared(dbenv)) != 0)
 			goto errlock;
 
-		MUTEX_LOCK(dbenv, db_rep->db_mutexp);
+		MUTEX_LOCK(dbenv, rep->mtx_clientdb);
 		ret = __rep_client_dbinit(dbenv, init_db, REP_DB);
-		MUTEX_UNLOCK(dbenv, db_rep->db_mutexp);
+		MUTEX_UNLOCK(dbenv, rep->mtx_clientdb);
 		if (ret != 0)
 			goto errlock;
-		MUTEX_LOCK(dbenv, db_rep->rep_mutexp);
+		REP_SYSTEM_LOCK(dbenv);
 		rep->start_th = 0;
 		if (role_chg) {
 			F_CLR(rep, REP_F_READY);
 			rep->in_recovery = 0;
 		}
-		MUTEX_UNLOCK(dbenv, db_rep->rep_mutexp);
+		REP_SYSTEM_UNLOCK(dbenv);
 
 		/*
 		 * If this client created a newly replicated environment,
@@ -370,29 +450,27 @@ __rep_start(dbenv, dbt, flags)
 		 */
 		if (announce)
 			(void)__rep_send_message(dbenv,
-			    DB_EID_BROADCAST, REP_NEWCLIENT, NULL, dbt, 0);
+			    DB_EID_BROADCAST, REP_NEWCLIENT, NULL, dbt, 0, 0);
 		else
 			(void)__rep_send_message(dbenv,
-			    DB_EID_BROADCAST, REP_ALIVE_REQ, NULL, NULL, 0);
+			    DB_EID_BROADCAST, REP_ALIVE_REQ, NULL, NULL, 0, 0);
 	}
 
 	if (0) {
 		/*
 		 * We have separate labels for errors.  If we're returning an
 		 * error before we've set start_th, we use 'err'.  If
-		 * we are erroring while holding the rep_mutex, then we use
+		 * we are erroring while holding the region mutex, then we use
 		 * 'errunlock' label.  If we're erroring without holding the rep
 		 * mutex we must use 'errlock'.
 		 */
-errlock:
-		MUTEX_LOCK(dbenv, db_rep->rep_mutexp);
-errunlock:
-		rep->start_th = 0;
+errlock:	REP_SYSTEM_LOCK(dbenv);
+errunlock:	rep->start_th = 0;
 		if (role_chg) {
 			F_CLR(rep, REP_F_READY);
 			rep->in_recovery = 0;
 		}
-err:		MUTEX_UNLOCK(dbenv, db_rep->rep_mutexp);
+err:		REP_SYSTEM_UNLOCK(dbenv);
 	}
 	return (ret);
 }
@@ -443,7 +521,7 @@ __rep_client_dbinit(dbenv, startup, which)
 		return (0);
 
 	if (startup) {
-		if ((ret = db_create(&dbp, dbenv, DB_REP_CREATE)) != 0)
+		if ((ret = db_create(&dbp, dbenv, 0)) != 0)
 			goto err;
 		/*
 		 * Ignore errors, because if the file doesn't exist, this
@@ -452,7 +530,7 @@ __rep_client_dbinit(dbenv, startup, which)
 		(void)__db_remove(dbp, NULL, name, NULL, DB_FORCE);
 	}
 
-	if ((ret = db_create(&dbp, dbenv, DB_REP_CREATE)) != 0)
+	if ((ret = db_create(&dbp, dbenv, 0)) != 0)
 		goto err;
 	if (which == REP_DB &&
 	    (ret = __bam_set_bt_compare(dbp, __rep_bt_cmp)) != 0)
@@ -545,10 +623,10 @@ __rep_abort_prepared(dbenv)
 	region = mgr->reginfo.primary;
 
 	do_aborts = 0;
-	R_LOCK(dbenv, &mgr->reginfo);
+	TXN_SYSTEM_LOCK(dbenv);
 	if (region->stat.st_nrestores != 0)
 		do_aborts = 1;
-	R_UNLOCK(dbenv, &mgr->reginfo);
+	TXN_SYSTEM_UNLOCK(dbenv);
 
 	if (do_aborts) {
 		op = DB_FIRST;
@@ -584,13 +662,13 @@ __rep_restore_prepared(dbenv)
 {
 	DB_LOGC *logc;
 	DB_LSN ckp_lsn, lsn;
+	DB_TXNHEAD *txninfo;
 	DBT rec;
 	__txn_ckp_args *ckp_args;
 	__txn_regop_args *regop_args;
 	__txn_xa_regop_args *prep_args;
 	int ret, t_ret;
 	u_int32_t hi_txn, low_txn, rectype, status;
-	void *txninfo;
 
 	txninfo = NULL;
 	ckp_args = NULL;
@@ -774,7 +852,10 @@ err:	t_ret = __log_c_close(logc);
 	return (ret == 0 ? t_ret : ret);
 }
 
-static int
+/*
+ * PUBLIC: int __rep_get_limit __P((DB_ENV *, u_int32_t *, u_int32_t *));
+ */
+int
 __rep_get_limit(dbenv, gbytesp, bytesp)
 	DB_ENV *dbenv;
 	u_int32_t *gbytesp, *bytesp;
@@ -806,8 +887,10 @@ __rep_get_limit(dbenv, gbytesp, bytesp)
  * __rep_set_limit --
  *	Set a limit on the amount of data that will be sent during a single
  * invocation of __rep_process_message.
+ *
+ * PUBLIC: int __rep_set_limit __P((DB_ENV *, u_int32_t, u_int32_t));
  */
-static int
+int
 __rep_set_limit(dbenv, gbytes, bytes)
 	DB_ENV *dbenv;
 	u_int32_t gbytes, bytes;
@@ -827,14 +910,14 @@ __rep_set_limit(dbenv, gbytes, bytes)
 	}
 	db_rep = dbenv->rep_handle;
 	rep = db_rep->region;
-	MUTEX_LOCK(dbenv, db_rep->rep_mutexp);
+	REP_SYSTEM_LOCK(dbenv);
 	if (bytes > GIGABYTE) {
 		gbytes += bytes / GIGABYTE;
 		bytes = bytes % GIGABYTE;
 	}
 	rep->gbytes = gbytes;
 	rep->bytes = bytes;
-	MUTEX_UNLOCK(dbenv, db_rep->rep_mutexp);
+	REP_SYSTEM_UNLOCK(dbenv);
 
 	return (0);
 }
@@ -842,10 +925,14 @@ __rep_set_limit(dbenv, gbytes, bytes)
 /*
  * __rep_set_request --
  *	Set the minimum and maximum number of log records that we wait
- * before retransmitting.
+ *	before retransmitting.
+ *
+ * !!!
  * UNDOCUMENTED.
+ *
+ * PUBLIC: int __rep_set_request __P((DB_ENV *, u_int32_t, u_int32_t));
  */
-static int
+int
 __rep_set_request(dbenv, min, max)
 	DB_ENV *dbenv;
 	u_int32_t min, max;
@@ -867,21 +954,22 @@ __rep_set_request(dbenv, min, max)
 	}
 	db_rep = dbenv->rep_handle;
 	rep = db_rep->region;
+
 	/*
-	 * Note we acquire the rep_mutexp or the db_mutexp as needed.
+	 * We acquire the mtx_region or mtx_clientdb mutexes as needed.
 	 */
-	MUTEX_LOCK(dbenv, db_rep->rep_mutexp);
+	REP_SYSTEM_LOCK(dbenv);
 	rep->request_gap = min;
 	rep->max_gap = max;
-	MUTEX_UNLOCK(dbenv, db_rep->rep_mutexp);
+	REP_SYSTEM_UNLOCK(dbenv);
 
-	MUTEX_LOCK(dbenv, db_rep->db_mutexp);
+	MUTEX_LOCK(dbenv, rep->mtx_clientdb);
 	dblp = dbenv->lg_handle;
 	if (dblp != NULL && (lp = dblp->reginfo.primary) != NULL) {
 		lp->wait_recs = 0;
 		lp->rcvd_recs = 0;
 	}
-	MUTEX_UNLOCK(dbenv, db_rep->db_mutexp);
+	MUTEX_UNLOCK(dbenv, rep->mtx_clientdb);
 
 	return (0);
 }
@@ -889,8 +977,12 @@ __rep_set_request(dbenv, min, max)
 /*
  * __rep_set_transport --
  *	Set the transport function for replication.
+ *
+ * PUBLIC: int __rep_set_rep_transport __P((DB_ENV *, int,
+ * PUBLIC:     int (*)(DB_ENV *, const DBT *, const DBT *, const DB_LSN *,
+ * PUBLIC:     int, u_int32_t)));
  */
-static int
+int
 __rep_set_rep_transport(dbenv, eid, f_send)
 	DB_ENV *dbenv;
 	int eid;
@@ -914,478 +1006,14 @@ __rep_set_rep_transport(dbenv, eid, f_send)
 	return (0);
 }
 
-/*
- * __rep_elect --
- *	Called after master failure to hold/participate in an election for
- *	a new master.
- */
-static int
-__rep_elect(dbenv, nsites, nvotes, priority, timeout, eidp, flags)
-	DB_ENV *dbenv;
-	int nsites, nvotes, priority;
-	u_int32_t timeout;
-	int *eidp;
-	u_int32_t flags;
-{
-	DB_LOG *dblp;
-	DB_LSN lsn;
-	DB_REP *db_rep;
-	REP *rep;
-	int ack, done, in_progress, ret, send_vote;
-	u_int32_t egen, orig_tally, tiebreaker, to;
-#ifdef DIAGNOSTIC
-	DB_MSGBUF mb;
-#endif
-
-	PANIC_CHECK(dbenv);
-	COMPQUIET(flags, 0);
-	ENV_REQUIRES_CONFIG(dbenv, dbenv->rep_handle, "rep_elect", DB_INIT_REP);
-
-	/* Error checking. */
-	if (nsites <= 0) {
-		__db_err(dbenv,
-		    "DB_ENV->rep_elect: nsites must be greater than 0");
-		return (EINVAL);
-	}
-	if (nvotes < 0) {
-		__db_err(dbenv,
-		    "DB_ENV->rep_elect: nvotes may not be negative");
-		return (EINVAL);
-	}
-	if (priority < 0) {
-		__db_err(dbenv,
-		    "DB_ENV->rep_elect: priority may not be negative");
-		return (EINVAL);
-	}
-	if (nsites < nvotes) {
-		__db_err(dbenv,
-    "DB_ENV->rep_elect: nvotes (%d) is larger than nsites (%d)",
-		    nvotes, nsites);
-		return (EINVAL);
-	}
-
-	ack = nvotes;
-	/* If they give us a 0 for nvotes, default to simple majority.  */
-	if (nvotes == 0)
-		ack = (nsites / 2) + 1;
-
-	/*
-	 * XXX
-	 * If users give us less than a majority, they run the risk of
-	 * having a network partition.  However, this also allows the
-	 * scenario of master/1 client to elect the client.  Allow
-	 * sub-majority values, but give a warning.
-	 */
-	if (nvotes <= (nsites / 2)) {
-		__db_err(dbenv,
-    "DB_ENV->rep_elect:WARNING: nvotes (%d) is sub-majority with nsites (%d)",
-		    nvotes, nsites);
-	}
-
-	db_rep = dbenv->rep_handle;
-	rep = db_rep->region;
-	dblp = dbenv->lg_handle;
-
-	RPRINT(dbenv, rep,
-	    (dbenv, &mb, "Start election nsites %d, ack %d, priority %d",
-	    nsites, ack, priority));
-
-	R_LOCK(dbenv, &dblp->reginfo);
-	lsn = ((LOG *)dblp->reginfo.primary)->lsn;
-	R_UNLOCK(dbenv, &dblp->reginfo);
-
-	orig_tally = 0;
-	to = timeout;
-	if ((ret = __rep_elect_init(dbenv,
-	    &lsn, nsites, ack, priority, &in_progress, &orig_tally)) != 0) {
-		if (ret == DB_REP_NEWMASTER) {
-			ret = 0;
-			*eidp = dbenv->rep_eid;
-		}
-		goto err;
-	}
-	/*
-	 * If another thread is in the middle of an election we
-	 * just quietly return and not interfere.
-	 */
-	if (in_progress) {
-		*eidp = rep->master_id;
-		return (0);
-	}
-	(void)__rep_send_message(dbenv,
-	    DB_EID_BROADCAST, REP_MASTER_REQ, NULL, NULL, 0);
-	ret = __rep_wait(dbenv, to/4, eidp, REP_F_EPHASE1);
-	switch (ret) {
-		case 0:
-			/* Check if we found a master. */
-			if (*eidp != DB_EID_INVALID) {
-				RPRINT(dbenv, rep, (dbenv, &mb,
-				    "Found master %d", *eidp));
-				goto edone;
-			}
-			/*
-			 * If we didn't find a master, continue
-			 * the election.
-			 */
-			break;
-		case DB_REP_EGENCHG:
-			/*
-			 * Egen changed, just continue with election.
-			 */
-			break;
-		case DB_TIMEOUT:
-			RPRINT(dbenv, rep, (dbenv, &mb,
-			    "Did not find master.  Sending vote1"));
-			break;
-		default:
-			goto err;
-	}
-restart:
-	/* Generate a randomized tiebreaker value. */
-	__os_unique_id(dbenv, &tiebreaker);
-
-	MUTEX_LOCK(dbenv, db_rep->rep_mutexp);
-	F_SET(rep, REP_F_EPHASE1 | REP_F_NOARCHIVE);
-	F_CLR(rep, REP_F_TALLY);
-
-	/*
-	 * We are about to participate at this egen.  We must
-	 * write out the next egen before participating in this one
-	 * so that if we crash we can never participate in this egen
-	 * again.
-	 */
-	if ((ret = __rep_write_egen(dbenv, rep->egen + 1)) != 0)
-		goto lockdone;
-
-	/* Tally our own vote */
-	if (__rep_tally(dbenv, rep, rep->eid, &rep->sites, rep->egen,
-	    rep->tally_off) != 0) {
-		ret = EINVAL;
-		goto lockdone;
-	}
-	__rep_cmp_vote(dbenv, rep, &rep->eid, &lsn, priority, rep->gen,
-	    tiebreaker);
-
-	RPRINT(dbenv, rep, (dbenv, &mb, "Beginning an election"));
-
-	/* Now send vote */
-	send_vote = DB_EID_INVALID;
-	egen = rep->egen;
-	MUTEX_UNLOCK(dbenv, db_rep->rep_mutexp);
-	__rep_send_vote(dbenv, &lsn, nsites, ack, priority, tiebreaker, egen,
-	    DB_EID_BROADCAST, REP_VOTE1);
-	DB_ENV_TEST_RECOVERY(dbenv, DB_TEST_ELECTVOTE1, ret, NULL);
-	ret = __rep_wait(dbenv, to, eidp, REP_F_EPHASE1);
-	switch (ret) {
-		case 0:
-			/* Check if election complete or phase complete. */
-			if (*eidp != DB_EID_INVALID) {
-				RPRINT(dbenv, rep, (dbenv, &mb,
-				    "Ended election phase 1 %d", ret));
-				goto edone;
-			}
-			goto phase2;
-		case DB_REP_EGENCHG:
-			if (to > timeout)
-				to = timeout;
-			to = (to * 8) / 10;
-			RPRINT(dbenv, rep, (dbenv, &mb,
-"Egen changed while waiting. Now %lu.  New timeout %lu, orig timeout %lu",
-			    (u_long)rep->egen, (u_long)to, (u_long)timeout));
-			/*
-			 * If the egen changed while we were sleeping, that
-			 * means we're probably late to the next election,
-			 * so we'll backoff our timeout so that we don't get
-			 * into an out-of-phase election scenario.
-			 *
-			 * Backoff to 80% of the current timeout.
-			 */
-			goto restart;
-		case DB_TIMEOUT:
-			break;
-		default:
-			goto err;
-	}
-	/*
-	 * If we got here, we haven't heard from everyone, but we've
-	 * run out of time, so it's time to decide if we have enough
-	 * votes to pick a winner and if so, to send out a vote to
-	 * the winner.
-	 */
-	MUTEX_LOCK(dbenv, db_rep->rep_mutexp);
-	/*
-	 * If our egen changed while we were waiting.  We need to
-	 * essentially reinitialize our election.
-	 */
-	if (egen != rep->egen) {
-		MUTEX_UNLOCK(dbenv, db_rep->rep_mutexp);
-		RPRINT(dbenv, rep, (dbenv, &mb, "Egen changed from %lu to %lu",
-		    (u_long)egen, (u_long)rep->egen));
-		goto restart;
-	}
-	if (rep->sites >= rep->nvotes) {
-
-		/* We think we've seen enough to cast a vote. */
-		send_vote = rep->winner;
-		/*
-		 * See if we won.  This will make sure we
-		 * don't count ourselves twice if we're racing
-		 * with incoming votes.
-		 */
-		if (rep->winner == rep->eid) {
-			(void)__rep_tally(dbenv, rep, rep->eid, &rep->votes,
-			    egen, rep->v2tally_off);
-			RPRINT(dbenv, rep, (dbenv, &mb,
-			    "Counted my vote %d", rep->votes));
-		}
-		F_SET(rep, REP_F_EPHASE2);
-		F_CLR(rep, REP_F_EPHASE1);
-	}
-	MUTEX_UNLOCK(dbenv, db_rep->rep_mutexp);
-	if (send_vote == DB_EID_INVALID) {
-		/* We do not have enough votes to elect. */
-		RPRINT(dbenv, rep, (dbenv, &mb,
-		    "Not enough votes to elect: recvd %d of %d from %d sites",
-		    rep->sites, rep->nvotes, rep->nsites));
-		ret = DB_REP_UNAVAIL;
-		goto err;
-
-	} else {
-		/*
-		 * We have seen enough vote1's.  Now we need to wait
-		 * for all the vote2's.
-		 */
-		if (send_vote != rep->eid) {
-			RPRINT(dbenv, rep, (dbenv, &mb, "Sending vote"));
-			__rep_send_vote(dbenv, NULL, 0, 0, 0, 0, egen,
-			    send_vote, REP_VOTE2);
-			/*
-			 * If we are NOT the new master we want to send
-			 * our vote to the winner, and wait longer.  The
-			 * reason is that the winner may be "behind" us
-			 * in the election waiting and if the master is
-			 * down, the winner will wait the full timeout
-			 * and we want to give the winner enough time to
-			 * process all the votes.  Otherwise we could
-			 * incorrectly return DB_REP_UNAVAIL and start a
-			 * new election before the winner can declare
-			 * itself.
-			 */
-			to = to * 2;
-
-		}
-
-phase2:		ret = __rep_wait(dbenv, to, eidp, REP_F_EPHASE2);
-		RPRINT(dbenv, rep, (dbenv, &mb,
-		    "Ended election phase 2 %d", ret));
-		switch (ret) {
-			case 0:
-				goto edone;
-			case DB_REP_EGENCHG:
-				if (to > timeout)
-					to = timeout;
-				to = (to * 8) / 10;
-				RPRINT(dbenv, rep, (dbenv, &mb,
-"While waiting egen changed to %lu.  Phase 2 New timeout %lu, orig timeout %lu",
-				    (u_long)rep->egen,
-				    (u_long)to, (u_long)timeout));
-				goto restart;
-			case DB_TIMEOUT:
-				ret = DB_REP_UNAVAIL;
-				break;
-			default:
-				goto err;
-		}
-		MUTEX_LOCK(dbenv, db_rep->rep_mutexp);
-		if (egen != rep->egen) {
-			MUTEX_UNLOCK(dbenv, db_rep->rep_mutexp);
-			RPRINT(dbenv, rep, (dbenv, &mb,
-			    "Egen ph2 changed from %lu to %lu",
-			    (u_long)egen, (u_long)rep->egen));
-			goto restart;
-		}
-		done = rep->votes >= rep->nvotes;
-		RPRINT(dbenv, rep, (dbenv, &mb,
-		    "After phase 2: done %d, votes %d, nsites %d",
-		    done, rep->votes, rep->nsites));
-		if (send_vote == rep->eid && done) {
-			__rep_elect_master(dbenv, rep, eidp);
-			ret = 0;
-			goto lockdone;
-		}
-		MUTEX_UNLOCK(dbenv, db_rep->rep_mutexp);
-	}
-
-err:	MUTEX_LOCK(dbenv, db_rep->rep_mutexp);
-lockdone:
-	/*
-	 * If we get here because of a non-election error, then we
-	 * did not tally our vote.  The only non-election error is
-	 * from elect_init where we were unable to grow_sites.  In
-	 * that case we do not want to discard all known election info.
-	 */
-	if (ret == 0 || ret == DB_REP_UNAVAIL)
-		__rep_elect_done(dbenv, rep);
-	else if (orig_tally)
-		F_SET(rep, orig_tally);
-
-	/*
-	 * If the election finished elsewhere, we need to decrement
-	 * the elect_th anyway.
-	 */
-	if (0)
-edone:		MUTEX_LOCK(dbenv, db_rep->rep_mutexp);
-	rep->elect_th = 0;
-
-	RPRINT(dbenv, rep, (dbenv, &mb,
-	    "Ended election with %d, sites %d, egen %lu, flags 0x%lx",
-	    ret, rep->sites, (u_long)rep->egen, (u_long)rep->flags));
-	MUTEX_UNLOCK(dbenv, db_rep->rep_mutexp);
-DB_TEST_RECOVERY_LABEL
-	return (ret);
-}
-
-/*
- * __rep_elect_init
- *	Initialize an election.  Sets beginp non-zero if the election is
- * already in progress; makes it 0 otherwise.
- */
-static int
-__rep_elect_init(dbenv, lsnp, nsites, nvotes, priority, beginp, otally)
-	DB_ENV *dbenv;
-	DB_LSN *lsnp;
-	int nsites, nvotes, priority;
-	int *beginp;
-	u_int32_t *otally;
-{
-	DB_REP *db_rep;
-	REP *rep;
-	int ret;
-
-	db_rep = dbenv->rep_handle;
-	rep = db_rep->region;
-
-	ret = 0;
-
-	/* We may miscount, as we don't hold the replication mutex here. */
-	rep->stat.st_elections++;
-
-	/* If we are already a master; simply broadcast that fact and return. */
-	if (F_ISSET(rep, REP_F_MASTER)) {
-		(void)__rep_send_message(dbenv,
-		    DB_EID_BROADCAST, REP_NEWMASTER, lsnp, NULL, 0);
-		rep->stat.st_elections_won++;
-		return (DB_REP_NEWMASTER);
-	}
-
-	MUTEX_LOCK(dbenv, db_rep->rep_mutexp);
-	if (otally != NULL)
-		*otally = F_ISSET(rep, REP_F_TALLY);
-	*beginp = IN_ELECTION(rep) || rep->elect_th;
-	if (!*beginp) {
-		/*
-		 * Make sure that we always initialize all the election fields
-		 * before putting ourselves in an election state.  That means
-		 * issuing calls that can fail (allocation) before setting all
-		 * the variables.
-		 */
-		if (nsites > rep->asites &&
-		    (ret = __rep_grow_sites(dbenv, nsites)) != 0)
-			goto err;
-		DB_ENV_TEST_RECOVERY(dbenv, DB_TEST_ELECTINIT, ret, NULL);
-		rep->elect_th = 1;
-		rep->nsites = nsites;
-		rep->nvotes = nvotes;
-		rep->priority = priority;
-		rep->master_id = DB_EID_INVALID;
-	}
-DB_TEST_RECOVERY_LABEL
-err:	MUTEX_UNLOCK(dbenv, db_rep->rep_mutexp);
-	return (ret);
-}
-
-/*
- * __rep_elect_master
- *	Set up for new master from election.  Must be called with
- *	the db_rep->rep_mutex held.
- *
- * PUBLIC: void __rep_elect_master __P((DB_ENV *, REP *, int *));
- */
-void
-__rep_elect_master(dbenv, rep, eidp)
-	DB_ENV *dbenv;
-	REP *rep;
-	int *eidp;
-{
-#ifdef DIAGNOSTIC
-	DB_MSGBUF mb;
-#else
-	COMPQUIET(dbenv, NULL);
-#endif
-	rep->master_id = rep->eid;
-	F_SET(rep, REP_F_MASTERELECT);
-	if (eidp != NULL)
-		*eidp = rep->master_id;
-	rep->stat.st_elections_won++;
-	RPRINT(dbenv, rep, (dbenv, &mb,
-	    "Got enough votes to win; election done; winner is %d, gen %lu",
-	    rep->master_id, (u_long)rep->gen));
-}
-
-static int
-__rep_wait(dbenv, timeout, eidp, flags)
-	DB_ENV *dbenv;
-	u_int32_t timeout;
-	int *eidp;
-	u_int32_t flags;
-{
-	DB_REP *db_rep;
-	REP *rep;
-	int done, echg;
-	u_int32_t egen, sleeptime;
-
-	done = echg = 0;
-	db_rep = dbenv->rep_handle;
-	rep = db_rep->region;
-	egen = rep->egen;
-
-	/*
-	 * The user specifies an overall timeout function, but checking
-	 * is cheap and the timeout may be a generous upper bound.
-	 * Sleep repeatedly for the smaller of .5s and timeout/10.
-	 */
-	sleeptime = (timeout > 5000000) ? 500000 : timeout / 10;
-	if (sleeptime == 0)
-		sleeptime++;
-	while (timeout > 0) {
-		__os_sleep(dbenv, 0, sleeptime);
-		MUTEX_LOCK(dbenv, db_rep->rep_mutexp);
-		echg = egen != rep->egen;
-		done = !F_ISSET(rep, flags) && rep->master_id != DB_EID_INVALID;
-
-		*eidp = rep->master_id;
-		MUTEX_UNLOCK(dbenv, db_rep->rep_mutexp);
-
-		if (done)
-			return (0);
-
-		if (echg)
-			return (DB_REP_EGENCHG);
-
-		if (timeout > sleeptime)
-			timeout -= sleeptime;
-		else
-			timeout = 0;
-	}
-	return (DB_TIMEOUT);
-}
-
 /*
  * __rep_flush --
  *	Re-push the last log record to all clients, in case they've lost
- * messages and don't know it.
+ *	messages and don't know it.
+ *
+ * PUBLIC: int __rep_flush __P((DB_ENV *));
  */
-static int
+int
 __rep_flush(dbenv)
 	DB_ENV *dbenv;
 {
@@ -1407,9 +1035,86 @@ __rep_flush(dbenv)
 		goto err;
 
 	(void)__rep_send_message(dbenv,
-	    DB_EID_BROADCAST, REP_LOG, &lsn, &rec, 0);
+	    DB_EID_BROADCAST, REP_LOG, &lsn, &rec, 0, 0);
 
 err:	if ((t_ret = __log_c_close(logc)) != 0 && ret == 0)
 		ret = t_ret;
 	return (ret);
 }
+
+/*
+ * __rep_sync --
+ *	Force a synchronization to occur between this client and the master.
+ *	This is the other half of configuring DELAYCLIENT.
+ *
+ * PUBLIC: int __rep_sync __P((DB_ENV *, u_int32_t));
+ */
+int
+__rep_sync(dbenv, flags)
+	DB_ENV *dbenv;
+	u_int32_t flags;
+{
+	DB_LOG *dblp;
+	DB_LSN lsn;
+	DB_REP *db_rep;
+	LOG *lp;
+	REP *rep;
+	int master;
+	u_int32_t type;
+
+	COMPQUIET(flags, 0);
+	PANIC_CHECK(dbenv);
+	ENV_REQUIRES_CONFIG(dbenv, dbenv->rep_handle,
+	    "rep_sync", DB_INIT_REP);
+
+	dblp = dbenv->lg_handle;
+	lp = dblp->reginfo.primary;
+	db_rep = dbenv->rep_handle;
+	rep = db_rep->region;
+
+	/*
+	 * Simple cases.  If we're not in the DELAY state we have nothing
+	 * to do.  If we don't know who the master is, send a MASTER_REQ.
+	 */
+	MUTEX_LOCK(dbenv, rep->mtx_clientdb);
+	lsn = lp->verify_lsn;
+	MUTEX_UNLOCK(dbenv, rep->mtx_clientdb);
+	REP_SYSTEM_LOCK(dbenv);
+	master = rep->master_id;
+	if (master == DB_EID_INVALID) {
+		REP_SYSTEM_UNLOCK(dbenv);
+		(void)__rep_send_message(dbenv, DB_EID_BROADCAST,
+		    REP_MASTER_REQ, NULL, NULL, 0, 0);
+		return (0);
+	}
+	/*
+	 * We want to hold the rep mutex to test and then clear the
+	 * DELAY flag.  Racing threads in here could otherwise result
+	 * in dual data streams.
+	 */
+	if (!F_ISSET(rep, REP_F_DELAY)) {
+		REP_SYSTEM_UNLOCK(dbenv);
+		return (0);
+	}
+
+	/*
+	 * If we get here, we clear the delay flag and kick off a
+	 * synchronization.  From this point forward, we will
+	 * synchronize until the next time the master changes.
+	 */
+	F_CLR(rep, REP_F_DELAY);
+	REP_SYSTEM_UNLOCK(dbenv);
+	/*
+	 * When we set REP_F_DELAY, we set verify_lsn to the real verify
+	 * lsn if we need to verify, or we zeroed it out if this is a client
+	 * that needs to sync up from the beginning.  So, send the type
+	 * of message now that __rep_new_master delayed sending.
+	 */
+	if (IS_ZERO_LSN(lsn))
+		type = REP_ALL_REQ;
+	else
+		type = REP_VERIFY_REQ;
+	(void)__rep_send_message(dbenv, master, type, &lsn, NULL, 0,
+	    DB_REP_ANYWHERE);
+	return (0);
+}
diff --git a/storage/bdb/rep/rep_record.c b/storage/bdb/rep/rep_record.c
index 2421c5af294..9944ba3eced 100644
--- a/storage/bdb/rep/rep_record.c
+++ b/storage/bdb/rep/rep_record.c
@@ -1,10 +1,10 @@
 /*-
  * See the file LICENSE for redistribution information.
  *
- * Copyright (c) 2001-2004
+ * Copyright (c) 2001-2005
  *	Sleepycat Software.  All rights reserved.
  *
- * $Id: rep_record.c,v 1.255 2004/11/04 18:35:29 sue Exp $
+ * $Id: rep_record.c,v 12.25 2005/10/20 18:57:13 bostic Exp $
  */
 
 #include "db_config.h"
@@ -34,10 +34,8 @@
 #include "dbinc/mp.h"
 #include "dbinc/txn.h"
 
-static int __rep_apply __P((DB_ENV *, REP_CONTROL *, DBT *, DB_LSN *, int *));
 static int __rep_collect_txn __P((DB_ENV *, DB_LSN *, LSN_COLLECTION *));
 static int __rep_do_ckp __P((DB_ENV *, DBT *, REP_CONTROL *));
-static int __rep_dorecovery __P((DB_ENV *, DB_LSN *, DB_LSN *));
 static int __rep_getnext __P((DB_ENV *));
 static int __rep_lsn_cmp __P((const void *, const void *));
 static int __rep_newfile __P((DB_ENV *, REP_CONTROL *, DB_LSN *));
@@ -45,7 +43,7 @@ static int __rep_process_rec __P((DB_ENV *,
     REP_CONTROL *, DBT *, u_int32_t *, DB_LSN *));
 static int __rep_remfirst __P((DB_ENV *, DBT *, DBT *));
 static int __rep_resend_req __P((DB_ENV *, int));
-static int __rep_verify_match __P((DB_ENV *, DB_LSN *, time_t));
+static int __rep_skip_msg __P((DB_ENV *, REP *, int, u_int32_t));
 
 /* Used to consistently designate which messages ought to be received where. */
 
@@ -67,35 +65,60 @@ static int __rep_verify_match __P((DB_ENV *, DB_LSN *, time_t));
 		REP_PRINT_MESSAGE(dbenv,				\
 		    *eidp, rp, "rep_process_message");			\
 		(void)__rep_send_message(dbenv,				\
-		    DB_EID_BROADCAST, REP_DUPMASTER, NULL, NULL, 0);	\
+		    DB_EID_BROADCAST, REP_DUPMASTER, NULL, NULL, 0, 0);	\
 		ret = DB_REP_DUPMASTER;					\
 		goto errlock;						\
 	}								\
 } while (0)
 
-#define	MASTER_CHECK(dbenv, eid, rep) do {				\
-	if (rep->master_id == DB_EID_INVALID) {				\
-		RPRINT(dbenv, rep, (dbenv, &mb,				\
-		    "Received record from %d, master is INVALID", eid));\
-		ret = 0;						\
-		(void)__rep_send_message(dbenv,				\
-		    DB_EID_BROADCAST, REP_MASTER_REQ, NULL, NULL, 0);	\
-		goto errlock;						\
-	}								\
-	if (eid != rep->master_id) {					\
-		__db_err(dbenv,						\
-		   "Received master record from %d, master is %d",	\
-		   eid, rep->master_id);				\
-		ret = EINVAL;						\
-		goto errlock;						\
+/*
+ * If a client is attempting to service a request it does not have,
+ * call rep_skip_msg to skip this message and force a rerequest to the
+ * sender.  We don't hold the mutex for the stats and may miscount.
+ */
+#define	CLIENT_REREQ do {						\
+	if (F_ISSET(rep, REP_F_CLIENT)) {				\
+		rep->stat.st_client_svc_req++;				\
+		if (ret == DB_NOTFOUND) {				\
+			rep->stat.st_client_svc_miss++;			\
+			ret = __rep_skip_msg(dbenv, rep, *eidp, rp->rectype);\
+		}							\
 	}								\
 } while (0)
 
 #define	MASTER_UPDATE(dbenv, renv) do {					\
-	MUTEX_LOCK((dbenv), &(renv)->mutex);				\
+	REP_SYSTEM_LOCK(dbenv);						\
 	F_SET((renv), DB_REGENV_REPLOCKED);				\
 	(void)time(&(renv)->op_timestamp);				\
-	MUTEX_UNLOCK((dbenv), &(renv)->mutex);				\
+	REP_SYSTEM_UNLOCK(dbenv);					\
+} while (0)
+
+#define	RECOVERING_SKIP do {						\
+	if (recovering) {						\
+		/* Not holding region mutex, may miscount */		\
+		rep->stat.st_msgs_recover++;				\
+		ret = __rep_skip_msg(dbenv, rep, *eidp, rp->rectype);	\
+		goto errlock;						\
+	}								\
+} while (0)
+
+/*
+ * If we're recovering the log we only want log records that are in the
+ * range we need to recover.  Otherwise we can end up storing a huge
+ * number of "new" records, only to truncate the temp database later after
+ * we run recovery.  If we are actively delaying a sync-up, we also skip
+ * all incoming log records until the application requests sync-up.
+ */
+#define	RECOVERING_LOG_SKIP do {					\
+	if (F_ISSET(rep, REP_F_DELAY) ||				\
+	    (recovering &&						\
+	    (!F_ISSET(rep, REP_F_RECOVER_LOG) ||			\
+	     log_compare(&rp->lsn, &rep->last_lsn) > 0))) {		\
+		/* Not holding region mutex, may miscount */		\
+		rep->stat.st_msgs_recover++;				\
+		ret = __rep_skip_msg(dbenv, rep, *eidp, rp->rectype);	\
+		goto errlock;						\
+	}								\
 } while (0)
 
 #define	ANYSITE(rep)
@@ -125,19 +148,16 @@ __rep_process_message(dbenv, control, rec, eidp, ret_lsnp)
 	DB_LSN *ret_lsnp;
 {
 	DB_LOG *dblp;
-	DB_LOGC *logc;
-	DB_LSN endlsn, lsn, oldfilelsn;
+	DB_LSN lsn;
 	DB_REP *db_rep;
-	DBT *d, data_dbt, mylog;
+	DBT data_dbt;
 	LOG *lp;
 	REGENV *renv;
 	REGINFO *infop;
 	REP *rep;
 	REP_CONTROL *rp;
-	REP_VOTE_INFO *vi;
-	u_int32_t bytes, egen, flags, gen, gbytes, rectype, type;
-	int check_limit, cmp, done, do_req, is_dup;
-	int master, match, old, recovering, ret, t_ret;
+	u_int32_t egen, gen;
+	int cmp, recovering, ret;
 	time_t savetime;
 #ifdef DIAGNOSTIC
 	DB_MSGBUF mb;
@@ -174,7 +194,7 @@ __rep_process_message(dbenv, control, rec, eidp, ret_lsnp)
 	/*
 	 * Acquire the replication lock.
 	 */
-	MUTEX_LOCK(dbenv, db_rep->rep_mutexp);
+	REP_SYSTEM_LOCK(dbenv);
 	if (rep->start_th != 0) {
 		/*
 		 * If we're racing with a thread in rep_start, then
@@ -182,7 +202,9 @@ __rep_process_message(dbenv, control, rec, eidp, ret_lsnp)
 		 */
 		RPRINT(dbenv, rep, (dbenv, &mb,
 		    "Racing rep_start, ignore message."));
-		MUTEX_UNLOCK(dbenv, db_rep->rep_mutexp);
+		if (F_ISSET(rp, DB_LOG_PERM))
+			ret = DB_REP_IGNORE;
+		REP_SYSTEM_UNLOCK(dbenv);
 		goto out;
 	}
 	rep->msg_th++;
@@ -191,7 +213,7 @@ __rep_process_message(dbenv, control, rec, eidp, ret_lsnp)
 	savetime = renv->rep_timestamp;
 
 	rep->stat.st_msgs_processed++;
-	MUTEX_UNLOCK(dbenv, db_rep->rep_mutexp);
+	REP_SYSTEM_UNLOCK(dbenv);
 
 	REP_PRINT_MESSAGE(dbenv, *eidp, rp, "rep_process_message");
 
@@ -223,6 +245,8 @@ __rep_process_message(dbenv, control, rec, eidp, ret_lsnp)
 		 * We don't hold the rep mutex, and could miscount if we race.
 		 */
 		rep->stat.st_msgs_badgen++;
+		if (F_ISSET(rp, DB_LOG_PERM))
+			ret = DB_REP_IGNORE;
 		goto errlock;
 	}
 
@@ -237,7 +261,7 @@ __rep_process_message(dbenv, control, rec, eidp, ret_lsnp)
 			if (rp->rectype != REP_DUPMASTER)
 				(void)__rep_send_message(dbenv,
 				    DB_EID_BROADCAST, REP_DUPMASTER,
-				    NULL, NULL, 0);
+				    NULL, NULL, 0, 0);
 			goto errlock;
 		}
 
@@ -250,7 +274,7 @@ __rep_process_message(dbenv, control, rec, eidp, ret_lsnp)
 		 */
 		if (rp->rectype == REP_ALIVE ||
 		    rp->rectype == REP_VOTE1 || rp->rectype == REP_VOTE2) {
-			MUTEX_LOCK(dbenv, db_rep->rep_mutexp);
+			REP_SYSTEM_LOCK(dbenv);
 			RPRINT(dbenv, rep, (dbenv, &mb,
 			    "Updating gen from %lu to %lu",
 			    (u_long)gen, (u_long)rp->gen));
@@ -260,23 +284,26 @@ __rep_process_message(dbenv, control, rec, eidp, ret_lsnp)
 			 * Updating of egen will happen when we process the
 			 * message below for each message type.
 			 */
-			MUTEX_UNLOCK(dbenv, db_rep->rep_mutexp);
+			REP_SYSTEM_UNLOCK(dbenv);
 			if (rp->rectype == REP_ALIVE)
 				(void)__rep_send_message(dbenv,
 				    DB_EID_BROADCAST, REP_MASTER_REQ, NULL,
-				    NULL, 0);
+				    NULL, 0, 0);
 		} else if (rp->rectype != REP_NEWMASTER) {
-			(void)__rep_send_message(dbenv,
-			    DB_EID_BROADCAST, REP_MASTER_REQ, NULL, NULL, 0);
+			/*
+			 * Ignore this message, retransmit if needed.
+			 */
+			if (__rep_check_doreq(dbenv, rep))
+				(void)__rep_send_message(dbenv,
+				    DB_EID_BROADCAST, REP_MASTER_REQ,
+				    NULL, NULL, 0, 0);
 			goto errlock;
 		}
-
 		/*
 		 * If you get here, then you're a client and either you're
 		 * in an election or you have a NEWMASTER or an ALIVE message
 		 * whose processing will do the right thing below.
 		 */
-
 	}
 
 	/*
@@ -286,79 +313,14 @@ __rep_process_message(dbenv, control, rec, eidp, ret_lsnp)
 	 * PAGE* and FILE*.  We need to also accept LOG messages
 	 * if we're copying the log for recovery/backup.
 	 */
-	if (recovering) {
-		switch (rp->rectype) {
-		case REP_VERIFY:
-			MUTEX_LOCK(dbenv, db_rep->db_mutexp);
-			cmp = log_compare(&lp->verify_lsn, &rp->lsn);
-			MUTEX_UNLOCK(dbenv, db_rep->db_mutexp);
-			if (cmp != 0)
-				goto skip;
-			break;
-		case REP_NEWFILE:
-		case REP_LOG:
-		case REP_LOG_MORE:
-			if (!F_ISSET(rep, REP_F_RECOVER_LOG))
-				goto skip;
-			/*
-			 * If we're recovering the log we only want
-			 * log records that are in the range we need
-			 * to recover.  Otherwise we can end up storing
-			 * a huge number of "new" records, only to
-			 * truncate the temp database later after we
-			 * run recovery.
-			 */
-			if (log_compare(&rp->lsn, &rep->last_lsn) > 0)
-				goto skip;
-			break;
-		case REP_ALIVE:
-		case REP_ALIVE_REQ:
-		case REP_DUPMASTER:
-		case REP_FILE_FAIL:
-		case REP_NEWCLIENT:
-		case REP_NEWMASTER:
-		case REP_NEWSITE:
-		case REP_PAGE:
-		case REP_PAGE_FAIL:
-		case REP_PAGE_MORE:
-		case REP_PAGE_REQ:
-		case REP_UPDATE:
-		case REP_UPDATE_REQ:
-		case REP_VERIFY_FAIL:
-		case REP_VOTE1:
-		case REP_VOTE2:
-			break;
-		default:
-skip:
-			/* Check for need to retransmit. */
-			/* Not holding rep_mutex, may miscount */
-			rep->stat.st_msgs_recover++;
-			MUTEX_LOCK(dbenv, db_rep->db_mutexp);
-			do_req = __rep_check_doreq(dbenv, rep);
-			MUTEX_UNLOCK(dbenv, db_rep->db_mutexp);
-			if (do_req) {
-				/*
-				 * Don't respond to a MASTER_REQ with
-				 * a MASTER_REQ.
-				 */
-				if (rep->master_id == DB_EID_INVALID &&
-				    rp->rectype != REP_MASTER_REQ)
-					(void)__rep_send_message(dbenv,
-					    DB_EID_BROADCAST,
-					    REP_MASTER_REQ,
-					    NULL, NULL, 0);
-				else if (*eidp == rep->master_id)
-					ret = __rep_resend_req(dbenv, *eidp);
-			}
-			goto errlock;
-		}
-	}
-
 	switch (rp->rectype) {
 	case REP_ALIVE:
+		/*
+		 * Handle even if we're recovering.
+		 */
 		ANYSITE(rep);
 		egen = *(u_int32_t *)rec->data;
-		MUTEX_LOCK(dbenv, db_rep->rep_mutexp);
+		REP_SYSTEM_LOCK(dbenv);
 		RPRINT(dbenv, rep, (dbenv, &mb,
 		    "Received ALIVE egen of %lu, mine %lu",
 		    (u_long)egen, (u_long)rep->egen));
@@ -370,348 +332,81 @@ skip:
 			__rep_elect_done(dbenv, rep);
 			rep->egen = egen;
 		}
-		MUTEX_UNLOCK(dbenv, db_rep->rep_mutexp);
+		REP_SYSTEM_UNLOCK(dbenv);
 		break;
 	case REP_ALIVE_REQ:
+		/*
+		 * Handle even if we're recovering.
+		 */
 		ANYSITE(rep);
 		dblp = dbenv->lg_handle;
-		R_LOCK(dbenv, &dblp->reginfo);
+		LOG_SYSTEM_LOCK(dbenv);
 		lsn = ((LOG *)dblp->reginfo.primary)->lsn;
-		R_UNLOCK(dbenv, &dblp->reginfo);
-		MUTEX_LOCK(dbenv, db_rep->rep_mutexp);
+		LOG_SYSTEM_UNLOCK(dbenv);
+		REP_SYSTEM_LOCK(dbenv);
 		egen = rep->egen;
-		MUTEX_UNLOCK(dbenv, db_rep->rep_mutexp);
+		REP_SYSTEM_UNLOCK(dbenv);
 		data_dbt.data = &egen;
 		data_dbt.size = sizeof(egen);
 		(void)__rep_send_message(dbenv,
-		    *eidp, REP_ALIVE, &lsn, &data_dbt, 0);
-		goto errlock;
+		    *eidp, REP_ALIVE, &lsn, &data_dbt, 0, 0);
+		break;
+	case REP_ALL_REQ:
+		RECOVERING_SKIP;
+		ret = __rep_allreq(dbenv, rp, *eidp);
+		CLIENT_REREQ;
+		break;
+	case REP_BULK_LOG:
+		RECOVERING_LOG_SKIP;
+		CLIENT_ONLY(rep, rp);
+		ret = __rep_bulk_log(dbenv, rp, rec, savetime, ret_lsnp);
+		break;
+	case REP_BULK_PAGE:
+		/*
+		 * Handle even if we're recovering.
+		 */
+		CLIENT_ONLY(rep, rp);
+		ret = __rep_bulk_page(dbenv, *eidp, rp, rec);
+		break;
 	case REP_DUPMASTER:
+		/*
+		 * Handle even if we're recovering.
+		 */
 		if (F_ISSET(rep, REP_F_MASTER))
 			ret = DB_REP_DUPMASTER;
-		goto errlock;
-	case REP_ALL_REQ:
-		MASTER_ONLY(rep, rp);
-		gbytes  = bytes = 0;
-		MUTEX_LOCK(dbenv, db_rep->rep_mutexp);
-		gbytes = rep->gbytes;
-		bytes = rep->bytes;
-		MUTEX_UNLOCK(dbenv, db_rep->rep_mutexp);
-		check_limit = gbytes != 0 || bytes != 0;
-		if ((ret = __log_cursor(dbenv, &logc)) != 0)
-			goto errlock;
-		memset(&data_dbt, 0, sizeof(data_dbt));
-		oldfilelsn = lsn = rp->lsn;
-		type = REP_LOG;
-		flags = IS_ZERO_LSN(rp->lsn) ||
-		    IS_INIT_LSN(rp->lsn) ?  DB_FIRST : DB_SET;
-		for (ret = __log_c_get(logc, &lsn, &data_dbt, flags);
-		    ret == 0 && type == REP_LOG;
-		    ret = __log_c_get(logc, &lsn, &data_dbt, DB_NEXT)) {
-			/*
-			 * When a log file changes, we'll have a real log
-			 * record with some lsn [n][m], and we'll also want
-			 * to send a NEWFILE message with lsn [n-1][MAX].
-			 */
-			if (lsn.file != oldfilelsn.file)
-				(void)__rep_send_message(dbenv,
-				    *eidp, REP_NEWFILE, &oldfilelsn, NULL, 0);
-			if (check_limit) {
-				/*
-				 * data_dbt.size is only the size of the log
-				 * record;  it doesn't count the size of the
-				 * control structure. Factor that in as well
-				 * so we're not off by a lot if our log records
-				 * are small.
-				 */
-				while (bytes <
-				    data_dbt.size + sizeof(REP_CONTROL)) {
-					if (gbytes > 0) {
-						bytes += GIGABYTE;
-						--gbytes;
-						continue;
-					}
-					/*
-					 * We don't hold the rep mutex,
-					 * and may miscount.
-					 */
-					rep->stat.st_nthrottles++;
-					type = REP_LOG_MORE;
-					goto send;
-				}
-				bytes -= (data_dbt.size + sizeof(REP_CONTROL));
-			}
-
-send:			if (__rep_send_message(dbenv, *eidp, type,
-			    &lsn, &data_dbt, DB_LOG_RESEND) != 0)
-				break;
-
-			/*
-			 * If we are about to change files, then we'll need the
-			 * last LSN in the previous file.  Save it here.
-			 */
-			oldfilelsn = lsn;
-			oldfilelsn.offset += logc->c_len;
-		}
-
-		if (ret == DB_NOTFOUND)
-			ret = 0;
-		if ((t_ret = __log_c_close(logc)) != 0 && ret == 0)
-			ret = t_ret;
-		goto errlock;
+		break;
 #ifdef NOTYET
 	case REP_FILE: /* TODO */
 		CLIENT_ONLY(rep, rp);
-		MASTER_CHECK(dbenv, *eidp, rep);
 		break;
 	case REP_FILE_REQ:
-		MASTER_ONLY(rep, rp);
 		ret = __rep_send_file(dbenv, rec, *eidp);
-		goto errlock;
+		break;
 #endif
 	case REP_FILE_FAIL:
+		/*
+		 * Handle even if we're recovering.
+		 */
 		CLIENT_ONLY(rep, rp);
-		MASTER_CHECK(dbenv, *eidp, rep);
 		/*
 		 * XXX
 		 */
 		break;
 	case REP_LOG:
 	case REP_LOG_MORE:
+		RECOVERING_LOG_SKIP;
 		CLIENT_ONLY(rep, rp);
-		MASTER_CHECK(dbenv, *eidp, rep);
-		is_dup = 0;
-		ret = __rep_apply(dbenv, rp, rec, ret_lsnp, &is_dup);
-		switch (ret) {
-		/*
-		 * We're in an internal backup and we've gotten 
-		 * all the log we need to run recovery.  Do so now.
-		 */
-		case DB_REP_LOGREADY:
-			if ((ret = __log_flush(dbenv, NULL)) != 0)
-				goto errlock;
-			if ((ret = __rep_verify_match(dbenv, &rep->last_lsn,
-			    savetime)) == 0) {
-				MUTEX_LOCK(dbenv, db_rep->rep_mutexp);
-				ZERO_LSN(rep->first_lsn);
-				ZERO_LSN(rep->last_lsn);
-				F_CLR(rep, REP_F_RECOVER_LOG);
-				MUTEX_UNLOCK(dbenv, db_rep->rep_mutexp);
-			}
-			break;
-		/*
-		 * If we get any of the "normal" returns, we only process
-		 * LOG_MORE if this is not a duplicate record.  If the 
-		 * record is a duplicate we don't want to handle LOG_MORE
-		 * and request a multiple data stream (or trigger internal
-		 * initialization) since this could be a very old record
-		 * that no longer exists on the master.
-		 */
-		case DB_REP_ISPERM:
-		case DB_REP_NOTPERM:
-		case 0:
-			if (is_dup)
-				goto errlock;
-			else
-				break;
-		/*
-		 * Any other return (errors), we're done.
-		 */
-		default:
-			goto errlock;
-		}
-		if (rp->rectype == REP_LOG_MORE) {
-			MUTEX_LOCK(dbenv, db_rep->rep_mutexp);
-			master = rep->master_id;
-			MUTEX_UNLOCK(dbenv, db_rep->rep_mutexp);
-			R_LOCK(dbenv, &dblp->reginfo);
-			lsn = lp->lsn;
-			R_UNLOCK(dbenv, &dblp->reginfo);
-			/*
-			 * If the master_id is invalid, this means that since
-			 * the last record was sent, somebody declared an
-			 * election and we may not have a master to request
-			 * things of.
-			 *
-			 * This is not an error;  when we find a new master,
-			 * we'll re-negotiate where the end of the log is and
-			 * try to bring ourselves up to date again anyway.
-			 */
-			MUTEX_LOCK(dbenv, db_rep->db_mutexp);
-			if (master == DB_EID_INVALID)
-				ret = 0;
-			/*
-			 * If we've asked for a bunch of records, it could
-			 * either be from a LOG_REQ or ALL_REQ.  If we're
-			 * waiting for a gap to be filled, call loggap_req,
-			 * otherwise use ALL_REQ again.
-			 */
-			else if (IS_ZERO_LSN(lp->waiting_lsn)) {
-				MUTEX_UNLOCK(dbenv, db_rep->db_mutexp);
-				if (__rep_send_message(dbenv,
-				    master, REP_ALL_REQ, &lsn, NULL, 0) != 0)
-					break;
-			} else {
-				__rep_loggap_req(dbenv, rep, &lsn, 1);
-				MUTEX_UNLOCK(dbenv, db_rep->db_mutexp);
-			}
-		}
-		goto errlock;
+		ret = __rep_log(dbenv, rp, rec, savetime, ret_lsnp);
+		break;
 	case REP_LOG_REQ:
-		MASTER_ONLY(rep, rp);
-		if (rec != NULL && rec->size != 0) {
-			RPRINT(dbenv, rep, (dbenv, &mb,
-			    "[%lu][%lu]: LOG_REQ max lsn: [%lu][%lu]",
-			    (u_long) rp->lsn.file, (u_long)rp->lsn.offset,
-			    (u_long)((DB_LSN *)rec->data)->file,
-			    (u_long)((DB_LSN *)rec->data)->offset));
-		}
-		/*
-		 * There are three different cases here.
-		 * 1. We asked for a particular LSN and got it.
-		 * 2. We asked for an LSN and it's not found because it is
-		 *	beyond the end of a log file and we need a NEWFILE msg.
-		 *	and then the record that was requested.
-		 * 3. We asked for an LSN and it simply doesn't exist, but
-		 *    doesn't meet any of those other criteria, in which case
-		 *    it's an error (that should never happen).
-		 * If we have a valid LSN and the request has a data_dbt with
-		 * it, then we need to send all records up to the LSN in the
-		 * data dbt.
-		 */
-		oldfilelsn = lsn = rp->lsn;
-		if ((ret = __log_cursor(dbenv, &logc)) != 0)
-			goto errlock;
-		memset(&data_dbt, 0, sizeof(data_dbt));
-		ret = __log_c_get(logc, &lsn, &data_dbt, DB_SET);
-
-		if (ret == 0) /* Case 1 */
-			(void)__rep_send_message(dbenv,
-			   *eidp, REP_LOG, &lsn, &data_dbt, DB_LOG_RESEND);
-		else if (ret == DB_NOTFOUND) {
-			R_LOCK(dbenv, &dblp->reginfo);
-			endlsn = lp->lsn;
-			R_UNLOCK(dbenv, &dblp->reginfo);
-			if (endlsn.file > lsn.file) {
-				/*
-				 * Case 2:
-				 * Need to find the LSN of the last record in
-				 * file lsn.file so that we can send it with
-				 * the NEWFILE call.  In order to do that, we
-				 * need to try to get {lsn.file + 1, 0} and
-				 * then backup.
-				 */
-				endlsn.file = lsn.file + 1;
-				endlsn.offset = 0;
-				if ((ret = __log_c_get(logc,
-				    &endlsn, &data_dbt, DB_SET)) != 0 ||
-				    (ret = __log_c_get(logc,
-					&endlsn, &data_dbt, DB_PREV)) != 0) {
-					RPRINT(dbenv, rep, (dbenv, &mb,
-					    "Unable to get prev of [%lu][%lu]",
-					    (u_long)lsn.file,
-					    (u_long)lsn.offset));
-					/*
-					 * We want to push the error back
-					 * to the client so that the client
-					 * does an internal backup.  The
-					 * client asked for a log record
-					 * we no longer have and it is
-					 * outdated.
-					 * XXX - This could be optimized by
-					 * having the master perform and
-					 * send a REP_UPDATE message.  We
-					 * currently want the client to set
-					 * up its 'update' state prior to
-					 * requesting REP_UPDATE_REQ.
-					 */
-					ret = 0;
-					(void)__rep_send_message(dbenv, *eidp,
-					    REP_VERIFY_FAIL, &rp->lsn, NULL, 0);
-				} else {
-					endlsn.offset += logc->c_len;
-					(void)__rep_send_message(dbenv, *eidp,
-					    REP_NEWFILE, &endlsn, NULL, 0);
-				}
-			} else {
-				/* Case 3 */
-				__db_err(dbenv,
-				    "Request for LSN [%lu][%lu] fails",
-				    (u_long)lsn.file, (u_long)lsn.offset);
-				DB_ASSERT(0);
-				ret = EINVAL;
-			}
-		}
-
-		/*
-		 * If the user requested a gap, send the whole thing,
-		 * while observing the limits from set_rep_limit.
-		 */
-		MUTEX_LOCK(dbenv, db_rep->rep_mutexp);
-		gbytes = rep->gbytes;
-		bytes = rep->bytes;
-		MUTEX_UNLOCK(dbenv, db_rep->rep_mutexp);
-		check_limit = gbytes != 0 || bytes != 0;
-		type = REP_LOG;
-		while (ret == 0 && rec != NULL && rec->size != 0 &&
-		    type == REP_LOG) {
-			if ((ret =
-			    __log_c_get(logc, &lsn, &data_dbt, DB_NEXT)) != 0) {
-				if (ret == DB_NOTFOUND)
-					ret = 0;
-				break;
-			}
-			if (log_compare(&lsn, (DB_LSN *)rec->data) >= 0)
-				break;
-			/*
-			 * When a log file changes, we'll have a real log
-			 * record with some lsn [n][m], and we'll also want
-			 * to send a NEWFILE message with lsn [n-1][MAX].
-			 */
-			if (lsn.file != oldfilelsn.file)
-				(void)__rep_send_message(dbenv,
-				    *eidp, REP_NEWFILE, &oldfilelsn, NULL, 0);
-			if (check_limit) {
-				/*
-				 * data_dbt.size is only the size of the log
-				 * record;  it doesn't count the size of the
-				 * control structure. Factor that in as well
-				 * so we're not off by a lot if our log records
-				 * are small.
-				 */
-				while (bytes <
-				    data_dbt.size + sizeof(REP_CONTROL)) {
-					if (gbytes > 0) {
-						bytes += GIGABYTE;
-						--gbytes;
-						continue;
-					}
-					/*
-					 * We don't hold the rep mutex,
-					 * and may miscount.
-					 */
-					rep->stat.st_nthrottles++;
-					type = REP_LOG_MORE;
-					goto send1;
-				}
-				bytes -= (data_dbt.size + sizeof(REP_CONTROL));
-			}
-
-send1:			 if (__rep_send_message(dbenv, *eidp, type,
-			    &lsn, &data_dbt, DB_LOG_RESEND) != 0)
-				break;
-			/*
-			 * If we are about to change files, then we'll need the
-			 * last LSN in the previous file.  Save it here.
-			 */
-			oldfilelsn = lsn;
-			oldfilelsn.offset += logc->c_len;
-		}
-
-		if ((t_ret = __log_c_close(logc)) != 0 && ret == 0)
-			ret = t_ret;
-		goto errlock;
+		RECOVERING_SKIP;
+		ret = __rep_logreq(dbenv, rp, rec, *eidp);
+		CLIENT_REREQ;
+		break;
 	case REP_NEWSITE:
+		/*
+		 * Handle even if we're recovering.
+		 */
 		/* We don't hold the rep mutex, and may miscount. */
 		rep->stat.st_newsites++;
 
@@ -719,15 +414,18 @@ send1:			 if (__rep_send_message(dbenv, *eidp, type,
 		if (F_ISSET(rep, REP_F_MASTER)) {
 			dblp = dbenv->lg_handle;
 			lp = dblp->reginfo.primary;
-			R_LOCK(dbenv, &dblp->reginfo);
+			LOG_SYSTEM_LOCK(dbenv);
 			lsn = lp->lsn;
-			R_UNLOCK(dbenv, &dblp->reginfo);
+			LOG_SYSTEM_UNLOCK(dbenv);
 			(void)__rep_send_message(dbenv,
-			    *eidp, REP_NEWMASTER, &lsn, NULL, 0);
+			    *eidp, REP_NEWMASTER, &lsn, NULL, 0, 0);
 		}
 		ret = DB_REP_NEWSITE;
-		goto errlock;
+		break;
 	case REP_NEWCLIENT:
+		/*
+		 * Handle even if we're recovering.
+		 */
 		/*
 		 * This message was received and should have resulted in the
 		 * application entering the machine ID in its machine table.
@@ -738,30 +436,31 @@ send1:			 if (__rep_send_message(dbenv, *eidp, type,
 		 * all the clients.
 		 */
 		(void)__rep_send_message(dbenv,
-		    DB_EID_BROADCAST, REP_NEWSITE, &rp->lsn, rec, 0);
+		    DB_EID_BROADCAST, REP_NEWSITE, &rp->lsn, rec, 0, 0);
 
 		ret = DB_REP_NEWSITE;
 
 		if (F_ISSET(rep, REP_F_CLIENT)) {
-			MUTEX_LOCK(dbenv, db_rep->rep_mutexp);
+			REP_SYSTEM_LOCK(dbenv);
 			egen = rep->egen;
 			if (*eidp == rep->master_id)
 				rep->master_id = DB_EID_INVALID;
-			MUTEX_UNLOCK(dbenv, db_rep->rep_mutexp);
+			REP_SYSTEM_UNLOCK(dbenv);
 			data_dbt.data = &egen;
 			data_dbt.size = sizeof(egen);
 			(void)__rep_send_message(dbenv, DB_EID_BROADCAST,
-			    REP_ALIVE, &rp->lsn, &data_dbt, 0);
-			goto errlock;
+			    REP_ALIVE, &rp->lsn, &data_dbt, 0, 0);
+			break;
 		}
 		/* FALLTHROUGH */
 	case REP_MASTER_REQ:
+		RECOVERING_SKIP;
 		if (F_ISSET(rep, REP_F_MASTER)) {
-			R_LOCK(dbenv, &dblp->reginfo);
+			LOG_SYSTEM_LOCK(dbenv);
 			lsn = lp->lsn;
-			R_UNLOCK(dbenv, &dblp->reginfo);
+			LOG_SYSTEM_UNLOCK(dbenv);
 			(void)__rep_send_message(dbenv,
-			    DB_EID_BROADCAST, REP_NEWMASTER, &lsn, NULL, 0);
+			    DB_EID_BROADCAST, REP_NEWMASTER, &lsn, NULL, 0, 0);
 		}
 		/*
 		 * If there is no master, then we could get into a state
@@ -770,24 +469,26 @@ send1:			 if (__rep_send_message(dbenv, *eidp, type,
 		 * never get to the current gen.
 		 */
 		if (F_ISSET(rep, REP_F_CLIENT) && rp->gen < gen) {
-			MUTEX_LOCK(dbenv, db_rep->rep_mutexp);
+			REP_SYSTEM_LOCK(dbenv);
 			egen = rep->egen;
 			if (*eidp == rep->master_id)
 				rep->master_id = DB_EID_INVALID;
-			MUTEX_UNLOCK(dbenv, db_rep->rep_mutexp);
+			REP_SYSTEM_UNLOCK(dbenv);
 			data_dbt.data = &egen;
 			data_dbt.size = sizeof(egen);
 			(void)__rep_send_message(dbenv, *eidp,
-			    REP_ALIVE, &rp->lsn, &data_dbt, 0);
-			goto errlock;
+			    REP_ALIVE, &rp->lsn, &data_dbt, 0, 0);
 		}
-		goto errlock;
+		break;
 	case REP_NEWFILE:
+		RECOVERING_LOG_SKIP;
 		CLIENT_ONLY(rep, rp);
-		MASTER_CHECK(dbenv, *eidp, rep);
 		ret = __rep_apply(dbenv, rp, rec, ret_lsnp, NULL);
-		goto errlock;
+		break;
 	case REP_NEWMASTER:
+		/*
+		 * Handle even if we're recovering.
+		 */
 		ANYSITE(rep);
 		if (F_ISSET(rep, REP_F_MASTER) &&
 		    *eidp != dbenv->rep_eid) {
@@ -795,34 +496,57 @@ send1:			 if (__rep_send_message(dbenv, *eidp, type,
 			rep->stat.st_dupmasters++;
 			ret = DB_REP_DUPMASTER;
 			(void)__rep_send_message(dbenv,
-			    DB_EID_BROADCAST, REP_DUPMASTER, NULL, NULL, 0);
-			goto errlock;
+			    DB_EID_BROADCAST, REP_DUPMASTER, NULL, NULL, 0, 0);
+			break;
 		}
 		ret = __rep_new_master(dbenv, rp, *eidp);
-		goto errlock;
+		break;
 	case REP_PAGE:
 	case REP_PAGE_MORE:
+		/*
+		 * Handle even if we're recovering.
+		 */
 		CLIENT_ONLY(rep, rp);
-		MASTER_CHECK(dbenv, *eidp, rep);
 		ret = __rep_page(dbenv, *eidp, rp, rec);
 		break;
 	case REP_PAGE_FAIL:
+		/*
+		 * Handle even if we're recovering.
+		 */
 		CLIENT_ONLY(rep, rp);
-		MASTER_CHECK(dbenv, *eidp, rep);
 		ret = __rep_page_fail(dbenv, *eidp, rec);
 		break;
 	case REP_PAGE_REQ:
-		MASTER_ONLY(rep, rp);
+		/*
+		 * Handle even if we're recovering.
+		 */
 		MASTER_UPDATE(dbenv, renv);
 		ret = __rep_page_req(dbenv, *eidp, rec);
+		CLIENT_REREQ;
+		break;
+	case REP_REREQUEST:
+		/*
+		 * Handle even if we're recovering.  Don't do a master
+		 * check.
+		 */
+		CLIENT_ONLY(rep, rp);
+		/*
+		 * Don't hold any mutex, may miscount.
+		 */
+		rep->stat.st_client_rerequests++;
+		ret = __rep_resend_req(dbenv, 1);
 		break;
 	case REP_UPDATE:
+		/*
+		 * Handle even if we're recovering.
+		 */
 		CLIENT_ONLY(rep, rp);
-		MASTER_CHECK(dbenv, *eidp, rep);
-
 		ret = __rep_update_setup(dbenv, *eidp, rp, rec);
 		break;
 	case REP_UPDATE_REQ:
+		/*
+		 * Handle even if we're recovering.
+		 */
 		MASTER_ONLY(rep, rp);
 		infop = dbenv->reginfo;
 		renv = infop->primary;
@@ -830,386 +554,58 @@ send1:			 if (__rep_send_message(dbenv, *eidp, type,
 		ret = __rep_update_req(dbenv, *eidp);
 		break;
 	case REP_VERIFY:
-		CLIENT_ONLY(rep, rp);
-		MASTER_CHECK(dbenv, *eidp, rep);
-		if (IS_ZERO_LSN(lp->verify_lsn))
-			goto errlock;
-
-		if ((ret = __log_cursor(dbenv, &logc)) != 0)
-			goto errlock;
-		memset(&mylog, 0, sizeof(mylog));
-		if ((ret = __log_c_get(logc, &rp->lsn, &mylog, DB_SET)) != 0)
-			goto rep_verify_err;
-		match = 0;
-		memcpy(&rectype, mylog.data, sizeof(rectype));
-		if (mylog.size == rec->size &&
-		    memcmp(mylog.data, rec->data, rec->size) == 0)
-			match = 1;
-		DB_ASSERT(rectype == DB___txn_ckp);
-		/*
-		 * If we don't have a match, backup to the previous
-		 * checkpoint and try again.
-		 */
-		if (match == 0) {
-			ZERO_LSN(lsn);
-			if ((ret = __log_backup(dbenv, logc, &rp->lsn, &lsn,
-			    LASTCKP_CMP)) == 0) {
-				MUTEX_LOCK(dbenv, db_rep->db_mutexp);
-				lp->verify_lsn = lsn;
-				lp->rcvd_recs = 0;
-				lp->wait_recs = rep->request_gap;
-				MUTEX_UNLOCK(dbenv, db_rep->db_mutexp);
-				(void)__rep_send_message(dbenv,
-				    *eidp, REP_VERIFY_REQ, &lsn, NULL, 0);
-			} else if (ret == DB_NOTFOUND) {
-				/*
-				 * We've either run out of records because
-				 * logs have been removed or we've rolled back
-				 * all the way to the beginning.  In the latter
-				 * we don't think these sites were ever part of
-				 * the same environment and we'll say so.
-				 * In the former, request internal backup.
-				 */
-				if (rp->lsn.file == 1) {
-					__db_err(dbenv,
-			"Client was never part of master's environment");
-					ret = EINVAL;
-				} else {
-					rep->stat.st_outdated++;
-
-					R_LOCK(dbenv, &dblp->reginfo);
-					lsn = lp->lsn;
-					R_UNLOCK(dbenv, &dblp->reginfo);
-					MUTEX_LOCK(dbenv, db_rep->rep_mutexp);
-					F_CLR(rep, REP_F_RECOVER_VERIFY);
-					F_SET(rep, REP_F_RECOVER_UPDATE);
-					ZERO_LSN(rep->first_lsn);
-					MUTEX_UNLOCK(dbenv, db_rep->rep_mutexp);
-					(void)__rep_send_message(dbenv,
-					    *eidp, REP_UPDATE_REQ, NULL,
-					    NULL, 0);
-				}
+		if (recovering) {
+			MUTEX_LOCK(dbenv, rep->mtx_clientdb);
+			cmp = log_compare(&lp->verify_lsn, &rp->lsn);
+			MUTEX_UNLOCK(dbenv, rep->mtx_clientdb);
+			/*
+			 * If this is not the verify record I want, skip it.
+			 */
+			if (cmp != 0) {
+				ret = __rep_skip_msg(
+				    dbenv, rep, *eidp, rp->rectype);
+				break;
 			}
-		} else
-			ret = __rep_verify_match(dbenv, &rp->lsn, savetime);
-
-rep_verify_err:	if ((t_ret = __log_c_close(logc)) != 0 && ret == 0)
-			ret = t_ret;
-		goto errlock;
+		}
+		CLIENT_ONLY(rep, rp);
+		ret = __rep_verify(dbenv, rp, rec, *eidp, savetime);
+		break;
 	case REP_VERIFY_FAIL:
+		/*
+		 * Handle even if we're recovering.
+		 */
 		CLIENT_ONLY(rep, rp);
-		MASTER_CHECK(dbenv, *eidp, rep);
-		/*
-		 * If any recovery flags are set, but not VERIFY,
-		 * then we ignore this message.  We are already
-		 * in the middle of updating.
-		 */
-		if (F_ISSET(rep, REP_F_RECOVER_MASK) &&
-		    !F_ISSET(rep, REP_F_RECOVER_VERIFY))
-			goto errlock;
-		rep->stat.st_outdated++;
-
-		MUTEX_LOCK(dbenv, db_rep->db_mutexp);
-		MUTEX_LOCK(dbenv, db_rep->rep_mutexp);
-		/*
-		 * We don't want an old or delayed VERIFY_FAIL
-		 * message to throw us into internal initialization
-		 * when we shouldn't be.  
-		 *
-		 * Only go into internal initialization if:
-		 * We are in RECOVER_VERIFY and this LSN == verify_lsn.
-		 * We are not in any RECOVERY and we are expecting
-		 *    an LSN that no longer exists on the master.
-		 * Otherwise, ignore this message.
-		 */
-		if (((F_ISSET(rep, REP_F_RECOVER_VERIFY)) &&
-		    log_compare(&rp->lsn, &lp->verify_lsn) == 0) ||
-		    (F_ISSET(rep, REP_F_RECOVER_MASK) == 0 &&
-		    log_compare(&rp->lsn, &lp->ready_lsn) >= 0)) {
-			F_CLR(rep, REP_F_RECOVER_VERIFY);
-			F_SET(rep, REP_F_RECOVER_UPDATE);
-			ZERO_LSN(rep->first_lsn);
-			lp->wait_recs = rep->request_gap;
-			MUTEX_UNLOCK(dbenv, db_rep->rep_mutexp);
-			MUTEX_UNLOCK(dbenv, db_rep->db_mutexp);
-			(void)__rep_send_message(dbenv,
-			    *eidp, REP_UPDATE_REQ, NULL, NULL, 0);
-		} else {
-			MUTEX_UNLOCK(dbenv, db_rep->rep_mutexp);
-			MUTEX_UNLOCK(dbenv, db_rep->db_mutexp);
-		}
-		goto errlock;
+		ret = __rep_verify_fail(dbenv, rp, *eidp);
+		break;
 	case REP_VERIFY_REQ:
-		MASTER_ONLY(rep, rp);
-		type = REP_VERIFY;
-		if ((ret = __log_cursor(dbenv, &logc)) != 0)
-			goto errlock;
-		d = &data_dbt;
-		memset(d, 0, sizeof(data_dbt));
-		F_SET(logc, DB_LOG_SILENT_ERR);
-		ret = __log_c_get(logc, &rp->lsn, d, DB_SET);
-		/*
-		 * If the LSN was invalid, then we might get a not
-		 * found, we might get an EIO, we could get anything.
-		 * If we get a DB_NOTFOUND, then there is a chance that
-		 * the LSN comes before the first file present in which
-		 * case we need to return a fail so that the client can return
-		 * a DB_OUTDATED.
-		 */
-		if (ret == DB_NOTFOUND &&
-		    __log_is_outdated(dbenv, rp->lsn.file, &old) == 0 &&
-		    old != 0)
-			type = REP_VERIFY_FAIL;
-
-		if (ret != 0)
-			d = NULL;
-
-		(void)__rep_send_message(dbenv, *eidp, type, &rp->lsn, d, 0);
-		ret = __log_c_close(logc);
-		goto errlock;
+		RECOVERING_SKIP;
+		ret = __rep_verify_req(dbenv, rp, *eidp);
+		CLIENT_REREQ;
+		break;
 	case REP_VOTE1:
-		if (F_ISSET(rep, REP_F_MASTER)) {
-			RPRINT(dbenv, rep,
-			    (dbenv, &mb, "Master received vote"));
-			R_LOCK(dbenv, &dblp->reginfo);
-			lsn = lp->lsn;
-			R_UNLOCK(dbenv, &dblp->reginfo);
-			(void)__rep_send_message(dbenv,
-			    *eidp, REP_NEWMASTER, &lsn, NULL, 0);
-			goto errlock;
-		}
-
-		vi = (REP_VOTE_INFO *)rec->data;
-		MUTEX_LOCK(dbenv, db_rep->rep_mutexp);
-
 		/*
-		 * If we get a vote from a later election gen, we
-		 * clear everything from the current one, and we'll
-		 * start over by tallying it.  If we get an old vote,
-		 * send an ALIVE to the old participant.
+		 * Handle even if we're recovering.
 		 */
-		RPRINT(dbenv, rep, (dbenv, &mb,
-		    "Received vote1 egen %lu, egen %lu",
-		    (u_long)vi->egen, (u_long)rep->egen));
-		if (vi->egen < rep->egen) {
-			RPRINT(dbenv, rep, (dbenv, &mb,
-			    "Received old vote %lu, egen %lu, ignoring vote1",
-			    (u_long)vi->egen, (u_long)rep->egen));
-			egen = rep->egen;
-			MUTEX_UNLOCK(dbenv, db_rep->rep_mutexp);
-			data_dbt.data = &egen;
-			data_dbt.size = sizeof(egen);
-			(void)__rep_send_message(dbenv,
-			    *eidp, REP_ALIVE, &rp->lsn, &data_dbt, 0);
-			goto errlock;
-		}
-		if (vi->egen > rep->egen) {
-			RPRINT(dbenv, rep, (dbenv, &mb,
-			    "Received VOTE1 from egen %lu, my egen %lu; reset",
-			    (u_long)vi->egen, (u_long)rep->egen));
-			__rep_elect_done(dbenv, rep);
-			rep->egen = vi->egen;
-		}
-		if (!IN_ELECTION(rep))
-			F_SET(rep, REP_F_TALLY);
-
-		/* Check if this site knows about more sites than we do. */
-		if (vi->nsites > rep->nsites)
-			rep->nsites = vi->nsites;
-
-		/* Check if this site requires more votes than we do. */
-		if (vi->nvotes > rep->nvotes)
-			rep->nvotes = vi->nvotes;
-
-		/*
-		 * We are keeping the vote, let's see if that changes our
-		 * count of the number of sites.
-		 */
-		if (rep->sites + 1 > rep->nsites)
-			rep->nsites = rep->sites + 1;
-		if (rep->nsites > rep->asites &&
-		    (ret = __rep_grow_sites(dbenv, rep->nsites)) != 0) {
-			RPRINT(dbenv, rep, (dbenv, &mb,
-			    "Grow sites returned error %d", ret));
-			goto errunlock;
-		}
-
-		/*
-		 * Ignore vote1's if we're in phase 2.
-		 */
-		if (F_ISSET(rep, REP_F_EPHASE2)) {
-			RPRINT(dbenv, rep, (dbenv, &mb,
-			    "In phase 2, ignoring vote1"));
-			goto errunlock;
-		}
-
-		/*
-		 * Record this vote.  If we get back non-zero, we
-		 * ignore the vote.
-		 */
-		if ((ret = __rep_tally(dbenv, rep, *eidp, &rep->sites,
-		    vi->egen, rep->tally_off)) != 0) {
-			RPRINT(dbenv, rep, (dbenv, &mb,
-			    "Tally returned %d, sites %d",
-			    ret, rep->sites));
-			ret = 0;
-			goto errunlock;
-		}
-		RPRINT(dbenv, rep, (dbenv, &mb,
-	    "Incoming vote: (eid)%d (pri)%d (gen)%lu (egen)%lu [%lu,%lu]",
-		    *eidp, vi->priority,
-		    (u_long)rp->gen, (u_long)vi->egen,
-		    (u_long)rp->lsn.file, (u_long)rp->lsn.offset));
-#ifdef DIAGNOSTIC
-		if (rep->sites > 1)
-			RPRINT(dbenv, rep, (dbenv, &mb,
-	    "Existing vote: (eid)%d (pri)%d (gen)%lu (sites)%d [%lu,%lu]",
-			    rep->winner, rep->w_priority,
-			    (u_long)rep->w_gen, rep->sites,
-			    (u_long)rep->w_lsn.file,
-			    (u_long)rep->w_lsn.offset));
-#endif
-		__rep_cmp_vote(dbenv, rep, eidp, &rp->lsn, vi->priority,
-		    rp->gen, vi->tiebreaker);
-		/*
-		 * If you get a vote and you're not in an election, we've
-		 * already recorded this vote.  But that is all we need
-		 * to do.
-		 */
-		if (!IN_ELECTION(rep)) {
-			RPRINT(dbenv, rep, (dbenv, &mb,
-			    "Not in election, but received vote1 0x%x",
-			    rep->flags));
-			ret = DB_REP_HOLDELECTION;
-			goto errunlock;
-		}
-
-		master = rep->winner;
-		lsn = rep->w_lsn;
-		/*
-		 * We need to check sites == nsites, not more than half
-		 * like we do in __rep_elect and the VOTE2 code below.  The
-		 * reason is that we want to process all the incoming votes
-		 * and not short-circuit once we reach more than half.  The
-		 * real winner's vote may be in the last half.
-		 */
-		done = rep->sites >= rep->nsites && rep->w_priority != 0;
-		if (done) {
-			RPRINT(dbenv, rep,
-			    (dbenv, &mb, "Phase1 election done"));
-			RPRINT(dbenv, rep, (dbenv, &mb, "Voting for %d%s",
-			    master, master == rep->eid ? "(self)" : ""));
-			egen = rep->egen;
-			F_SET(rep, REP_F_EPHASE2);
-			F_CLR(rep, REP_F_EPHASE1);
-			if (master == rep->eid) {
-				(void)__rep_tally(dbenv, rep, rep->eid,
-				    &rep->votes, egen, rep->v2tally_off);
-				goto errunlock;
-			}
-			MUTEX_UNLOCK(dbenv, db_rep->rep_mutexp);
-
-			/* Vote for someone else. */
-			__rep_send_vote(dbenv, NULL, 0, 0, 0, 0, egen,
-			    master, REP_VOTE2);
-		} else
-			MUTEX_UNLOCK(dbenv, db_rep->rep_mutexp);
-
-		/* Election is still going on. */
+		ret = __rep_vote1(dbenv, rp, rec, *eidp);
 		break;
 	case REP_VOTE2:
-		RPRINT(dbenv, rep, (dbenv, &mb, "We received a vote%s",
-		    F_ISSET(rep, REP_F_MASTER) ? " (master)" : ""));
-		if (F_ISSET(rep, REP_F_MASTER)) {
-			R_LOCK(dbenv, &dblp->reginfo);
-			lsn = lp->lsn;
-			R_UNLOCK(dbenv, &dblp->reginfo);
-			rep->stat.st_elections_won++;
-			(void)__rep_send_message(dbenv,
-			    *eidp, REP_NEWMASTER, &lsn, NULL, 0);
-			goto errlock;
-		}
-
-		MUTEX_LOCK(dbenv, db_rep->rep_mutexp);
-
-		/* If we have priority 0, we should never get a vote. */
-		DB_ASSERT(rep->priority != 0);
-
 		/*
-		 * We might be the last to the party and we haven't had
-		 * time to tally all the vote1's, but others have and
-		 * decided we're the winner.  So, if we're in the process
-		 * of tallying sites, keep the vote so that when our
-		 * election thread catches up we'll have the votes we
-		 * already received.
+		 * Handle even if we're recovering.
 		 */
-		vi = (REP_VOTE_INFO *)rec->data;
-		if (!IN_ELECTION_TALLY(rep) && vi->egen >= rep->egen) {
-			RPRINT(dbenv, rep, (dbenv, &mb,
-			    "Not in election gen %lu, at %lu, got vote",
-			    (u_long)vi->egen, (u_long)rep->egen));
-			ret = DB_REP_HOLDELECTION;
-			goto errunlock;
-		}
-
-		/*
-		 * Record this vote.  In a VOTE2, the only valid entry
-		 * in the REP_VOTE_INFO is the election generation.
-		 *
-		 * There are several things which can go wrong that we
-		 * need to account for:
-		 * 1. If we receive a latent VOTE2 from an earlier election,
-		 * we want to ignore it.
-		 * 2. If we receive a VOTE2 from a site from which we never
-		 * received a VOTE1, we want to ignore it.
-		 * 3. If we have received a duplicate VOTE2 from this election
-		 * from the same site we want to ignore it.
-		 * 4. If this is from the current election and someone is
-		 * really voting for us, then we finally get to record it.
-		 */
-		/*
-		 * __rep_cmp_vote2 checks for cases 1 and 2.
-		 */
-		if ((ret = __rep_cmp_vote2(dbenv, rep, *eidp, vi->egen)) != 0) {
-			ret = 0;
-			goto errunlock;
-		}
-		/*
-		 * __rep_tally takes care of cases 3 and 4.
-		 */
-		if ((ret = __rep_tally(dbenv, rep, *eidp, &rep->votes,
-		    vi->egen, rep->v2tally_off)) != 0) {
-			ret = 0;
-			goto errunlock;
-		}
-		done = rep->votes >= rep->nvotes;
-		RPRINT(dbenv, rep, (dbenv, &mb, "Counted vote %d of %d",
-		    rep->votes, rep->nvotes));
-		if (done) {
-			__rep_elect_master(dbenv, rep, eidp);
-			ret = DB_REP_NEWMASTER;
-			goto errunlock;
-		} else
-			MUTEX_UNLOCK(dbenv, db_rep->rep_mutexp);
+		ret = __rep_vote2(dbenv, rec, eidp);
 		break;
 	default:
 		__db_err(dbenv,
 	"DB_ENV->rep_process_message: unknown replication message: type %lu",
 		   (u_long)rp->rectype);
 		ret = EINVAL;
-		goto errlock;
+		break;
 	}
 
-	/*
-	 * If we already hold rep_mutexp then we goto 'errunlock'
-	 * Otherwise we goto 'errlock' to acquire it before we
-	 * decrement our message thread count.
-	 */
 errlock:
-	MUTEX_LOCK(dbenv, db_rep->rep_mutexp);
-errunlock:
+	REP_SYSTEM_LOCK(dbenv);
 	rep->msg_th--;
-	MUTEX_UNLOCK(dbenv, db_rep->rep_mutexp);
+	REP_SYSTEM_UNLOCK(dbenv);
 out:
 	if (ret == 0 && F_ISSET(rp, DB_LOG_PERM)) {
 		if (ret_lsnp != NULL)
@@ -1228,8 +624,11 @@ out:
  * __rep_process_rec, when possible and enqueuing in the __db.rep.db
  * when necessary.  As gaps in the stream are filled in, this is where
  * we try to process as much as possible from __db.rep.db to catch up.
+ *
+ * PUBLIC: int __rep_apply __P((DB_ENV *, REP_CONTROL *,
+ * PUBLIC:     DBT *, DB_LSN *, int *));
  */
-static int
+int
 __rep_apply(dbenv, rp, rec, ret_lsnp, is_dupp)
 	DB_ENV *dbenv;
 	REP_CONTROL *rp;
@@ -1261,13 +660,13 @@ __rep_apply(dbenv, rp, rec, ret_lsnp, is_dupp)
 	ZERO_LSN(max_lsn);
 
 	dblp = dbenv->lg_handle;
-	MUTEX_LOCK(dbenv, db_rep->db_mutexp);
+	MUTEX_LOCK(dbenv, rep->mtx_clientdb);
 	lp = dblp->reginfo.primary;
-	MUTEX_LOCK(dbenv, db_rep->rep_mutexp);
+	REP_SYSTEM_LOCK(dbenv);
 	if (F_ISSET(rep, REP_F_RECOVER_LOG) &&
 	    log_compare(&lp->ready_lsn, &rep->first_lsn) < 0)
 		lp->ready_lsn = rep->first_lsn;
-	MUTEX_UNLOCK(dbenv, db_rep->rep_mutexp);
+	REP_SYSTEM_UNLOCK(dbenv);
 	cmp = log_compare(&rp->lsn, &lp->ready_lsn);
 
 	if (cmp == 0) {
@@ -1280,6 +679,7 @@ __rep_apply(dbenv, rp, rec, ret_lsnp, is_dupp)
 		 * towards the request interval.
 		 */
 		lp->rcvd_recs = 0;
+		ZERO_LSN(lp->max_wait_lsn);
 
 		while (ret == 0 &&
 		    log_compare(&lp->ready_lsn, &lp->waiting_lsn) == 0) {
@@ -1288,8 +688,6 @@ __rep_apply(dbenv, rp, rec, ret_lsnp, is_dupp)
 			 * Write subsequent records to the log.
 			 */
 gap_check:
-			lp->rcvd_recs = 0;
-			ZERO_LSN(lp->max_wait_lsn);
 			if ((ret =
 			    __rep_remfirst(dbenv, &control_dbt, &rec_dbt)) != 0)
 				goto err;
@@ -1305,7 +703,23 @@ gap_check:
 			 */
 			--rep->stat.st_log_queued;
 
+			/*
+			 * Since we just filled a gap in the log stream, and
+			 * we're writing subsequent records to the log, we want
+			 * to use rcvd_recs and wait_recs so that we will
+			 * request the next gap if we end up with a gap and
+			 * a lot of records still in the temp db, but not
+			 * request if it is near the end of the temp db and
+			 * likely to arrive on its own shortly.  We want to
+			 * avoid requesting the record in that case.  Also
+			 * reset max_wait_lsn because the next gap is a
+			 * fresh gap.
+			 */
+			lp->rcvd_recs = rep->stat.st_log_queued;
+			lp->wait_recs = rep->request_gap;
+
 			if ((ret = __rep_getnext(dbenv)) == DB_NOTFOUND) {
+				lp->rcvd_recs = 0;
 				ret = 0;
 				break;
 			} else if (ret != 0)
@@ -1320,10 +734,14 @@ gap_check:
 		    log_compare(&lp->ready_lsn, &lp->waiting_lsn) != 0) {
 			/*
 			 * We got a record and processed it, but we may
-			 * still be waiting for more records.
+			 * still be waiting for more records.  If we
+			 * filled a gap we keep a count of how many other
+			 * records are in the temp database and if we should
+			 * request the next gap at this time.
 			 */
-			if (__rep_check_doreq(dbenv, rep))
-				__rep_loggap_req(dbenv, rep, &rp->lsn, 0);
+			if (__rep_check_doreq(dbenv, rep) && (ret =
+			    __rep_loggap_req(dbenv, rep, &rp->lsn, 0)) != 0)
+				goto err;
 		} else {
 			lp->wait_recs = 0;
 			ZERO_LSN(lp->max_wait_lsn);
@@ -1351,8 +769,9 @@ gap_check:
 			lp->rcvd_recs = 0;
 			ZERO_LSN(lp->max_wait_lsn);
 		}
-		if (__rep_check_doreq(dbenv, rep))
-			__rep_loggap_req(dbenv, rep, &rp->lsn, 0);
+		if (__rep_check_doreq(dbenv, rep) &&
+		    (ret = __rep_loggap_req(dbenv, rep, &rp->lsn, 0) != 0))
+			goto err;
 
 		ret = __db_put(dbp, NULL, &key_dbt, rec, DB_NOOVERWRITE);
 		rep->stat.st_log_queued++;
@@ -1397,7 +816,7 @@ gap_check:
 
 done:
 err:	/* Check if we need to go back into the table. */
-	MUTEX_LOCK(dbenv, db_rep->rep_mutexp);
+	REP_SYSTEM_LOCK(dbenv);
 	if (ret == 0 &&
 	    F_ISSET(rep, REP_F_RECOVER_LOG) &&
 	    log_compare(&lp->ready_lsn, &rep->last_lsn) >= 0) {
@@ -1405,7 +824,7 @@ err:	/* Check if we need to go back into the table. */
 		ZERO_LSN(max_lsn);
 		ret = DB_REP_LOGREADY;
 	}
-	MUTEX_UNLOCK(dbenv, db_rep->rep_mutexp);
+	REP_SYSTEM_UNLOCK(dbenv);
 
 	if (ret == 0 && !F_ISSET(rep, REP_F_RECOVER_LOG) &&
 	    !IS_ZERO_LSN(max_lsn)) {
@@ -1415,7 +834,7 @@ err:	/* Check if we need to go back into the table. */
 		DB_ASSERT(log_compare(&max_lsn, &lp->max_perm_lsn) >= 0);
 		lp->max_perm_lsn = max_lsn;
 	}
-	MUTEX_UNLOCK(dbenv, db_rep->db_mutexp);
+	MUTEX_UNLOCK(dbenv, rep->mtx_clientdb);
 
 	/*
 	 * Startup is complete when we process our first live record.  However,
@@ -1480,6 +899,7 @@ __rep_process_txn(dbenv, rec)
 	DB_LOGC *logc;
 	DB_LSN prev_lsn, *lsnp;
 	DB_REP *db_rep;
+	DB_TXNHEAD *txninfo;
 	LSN_COLLECTION lc;
 	REP *rep;
 	__txn_regop_args *txn_args;
@@ -1487,7 +907,6 @@ __rep_process_txn(dbenv, rec)
 	u_int32_t lockid, rectype;
 	u_int i;
 	int ret, t_ret;
-	void *txninfo;
 
 	db_rep = dbenv->rep_handle;
 	rep = db_rep->region;
@@ -1536,7 +955,7 @@ __rep_process_txn(dbenv, rec)
 	}
 
 	/* Get locks. */
-	if ((ret = __lock_id(dbenv, &lockid)) != 0)
+	if ((ret = __lock_id(dbenv, &lockid, NULL)) != 0)
 		goto err1;
 
 	if ((ret =
@@ -1720,371 +1139,11 @@ __rep_newfile(dbenv, rc, lsnp)
 	}
 }
 
-/*
- * __rep_tally --
- * PUBLIC: int __rep_tally __P((DB_ENV *, REP *, int, int *,
- * PUBLIC:    u_int32_t, roff_t));
- *
- * Handle incoming vote1 message on a client.  Called with the db_rep
- * mutex held.  This function will return 0 if we successfully tally
- * the vote and non-zero if the vote is ignored.  This will record
- * both VOTE1 and VOTE2 records, depending on which region offset the
- * caller passed in.
- */
-int
-__rep_tally(dbenv, rep, eid, countp, egen, vtoff)
-	DB_ENV *dbenv;
-	REP *rep;
-	int eid, *countp;
-	u_int32_t egen;
-	roff_t vtoff;
-{
-	REP_VTALLY *tally, *vtp;
-	int i;
-#ifdef DIAGNOSTIC
-	DB_MSGBUF mb;
-#else
-	COMPQUIET(rep, NULL);
-#endif
-
-	tally = R_ADDR((REGINFO *)dbenv->reginfo, vtoff);
-	i = 0;
-	vtp = &tally[i];
-	while (i < *countp) {
-		/*
-		 * Ignore votes from earlier elections (i.e. we've heard
-		 * from this site in this election, but its vote from an
-		 * earlier election got delayed and we received it now).
-		 * However, if we happened to hear from an earlier vote
-		 * and we recorded it and we're now hearing from a later
-		 * election we want to keep the updated one.  Note that
-		 * updating the entry will not increase the count.
-		 * Also ignore votes that are duplicates.
-		 */
-		if (vtp->eid == eid) {
-			RPRINT(dbenv, rep, (dbenv, &mb,
-			    "Tally found[%d] (%d, %lu), this vote (%d, %lu)",
-				    i, vtp->eid, (u_long)vtp->egen,
-				    eid, (u_long)egen));
-			if (vtp->egen >= egen)
-				return (1);
-			else {
-				vtp->egen = egen;
-				return (0);
-			}
-		}
-		i++;
-		vtp = &tally[i];
-	}
-	/*
-	 * If we get here, we have a new voter we haven't
-	 * seen before.  Tally this vote.
-	 */
-#ifdef DIAGNOSTIC
-	if (vtoff == rep->tally_off)
-		RPRINT(dbenv, rep, (dbenv, &mb, "Tallying VOTE1[%d] (%d, %lu)",
-		    i, eid, (u_long)egen));
-	else
-		RPRINT(dbenv, rep, (dbenv, &mb, "Tallying VOTE2[%d] (%d, %lu)",
-		    i, eid, (u_long)egen));
-#endif
-	vtp->eid = eid;
-	vtp->egen = egen;
-	(*countp)++;
-	return (0);
-}
-
-/*
- * __rep_cmp_vote --
- * PUBLIC: void __rep_cmp_vote __P((DB_ENV *, REP *, int *, DB_LSN *,
- * PUBLIC:     int, u_int32_t, u_int32_t));
- *
- * Compare incoming vote1 message on a client.  Called with the db_rep
- * mutex held.
- */
-void
-__rep_cmp_vote(dbenv, rep, eidp, lsnp, priority, gen, tiebreaker)
-	DB_ENV *dbenv;
-	REP *rep;
-	int *eidp;
-	DB_LSN *lsnp;
-	int priority;
-	u_int32_t gen, tiebreaker;
-{
-	int cmp;
-
-#ifdef DIAGNOSTIC
-	DB_MSGBUF mb;
-#else
-	COMPQUIET(dbenv, NULL);
-#endif
-	cmp = log_compare(lsnp, &rep->w_lsn);
-	/*
-	 * If we've seen more than one, compare us to the best so far.
-	 * If we're the first, make ourselves the winner to start.
-	 */
-	if (rep->sites > 1 && priority != 0) {
-		/*
-		 * LSN is primary determinant. Then priority if LSNs
-		 * are equal, then tiebreaker if both are equal.
-		 */
-		if (cmp > 0 ||
-		    (cmp == 0 && (priority > rep->w_priority ||
-		    (priority == rep->w_priority &&
-		    (tiebreaker > rep->w_tiebreaker))))) {
-			RPRINT(dbenv, rep, (dbenv, &mb, "Accepting new vote"));
-			rep->winner = *eidp;
-			rep->w_priority = priority;
-			rep->w_lsn = *lsnp;
-			rep->w_gen = gen;
-			rep->w_tiebreaker = tiebreaker;
-		}
-	} else if (rep->sites == 1) {
-		if (priority != 0) {
-			/* Make ourselves the winner to start. */
-			rep->winner = *eidp;
-			rep->w_priority = priority;
-			rep->w_gen = gen;
-			rep->w_lsn = *lsnp;
-			rep->w_tiebreaker = tiebreaker;
-		} else {
-			rep->winner = DB_EID_INVALID;
-			rep->w_priority = 0;
-			rep->w_gen = 0;
-			ZERO_LSN(rep->w_lsn);
-			rep->w_tiebreaker = 0;
-		}
-	}
-	return;
-}
-
-/*
- * __rep_cmp_vote2 --
- * PUBLIC: int __rep_cmp_vote2 __P((DB_ENV *, REP *, int, u_int32_t));
- *
- * Compare incoming vote2 message with vote1's we've recorded.  Called
- * with the db_rep mutex held.  We return 0 if the VOTE2 is from a
- * site we've heard from and it is from this election.  Otherwise we return 1.
- */
-int
-__rep_cmp_vote2(dbenv, rep, eid, egen)
-	DB_ENV *dbenv;
-	REP *rep;
-	int eid;
-	u_int32_t egen;
-{
-	int i;
-	REP_VTALLY *tally, *vtp;
-#ifdef DIAGNOSTIC
-	DB_MSGBUF mb;
-#endif
-
-	tally = R_ADDR((REGINFO *)dbenv->reginfo, rep->tally_off);
-	i = 0;
-	vtp = &tally[i];
-	for (i = 0; i < rep->sites; i++) {
-		vtp = &tally[i];
-		if (vtp->eid == eid && vtp->egen == egen) {
-			RPRINT(dbenv, rep, (dbenv, &mb,
-			    "Found matching vote1 (%d, %lu), at %d of %d",
-			    eid, (u_long)egen, i, rep->sites));
-			return (0);
-		}
-	}
-	RPRINT(dbenv, rep,
-	    (dbenv, &mb, "Didn't find vote1 for eid %d, egen %lu",
-	    eid, (u_long)egen));
-	return (1);
-}
-
-static int
-__rep_dorecovery(dbenv, lsnp, trunclsnp)
-	DB_ENV *dbenv;
-	DB_LSN *lsnp, *trunclsnp;
-{
-	DB_LSN lsn;
-	DB_REP *db_rep;
-	DBT mylog;
-	DB_LOGC *logc;
-	int ret, t_ret, update;
-	u_int32_t rectype;
-	__txn_regop_args *txnrec;
-
-	db_rep = dbenv->rep_handle;
-
-	/* Figure out if we are backing out any committed transactions. */
-	if ((ret = __log_cursor(dbenv, &logc)) != 0)
-		return (ret);
-
-	memset(&mylog, 0, sizeof(mylog));
-	update = 0;
-	while (update == 0 &&
-	    (ret = __log_c_get(logc, &lsn, &mylog, DB_PREV)) == 0 &&
-	    log_compare(&lsn, lsnp) > 0) {
-		memcpy(&rectype, mylog.data, sizeof(rectype));
-		if (rectype == DB___txn_regop) {
-			if ((ret =
-			    __txn_regop_read(dbenv, mylog.data, &txnrec)) != 0)
-				goto err;
-			if (txnrec->opcode != TXN_ABORT)
-				update = 1;
-			__os_free(dbenv, txnrec);
-		}
-	}
-
-	/*
-	 * If we successfully run recovery, we've opened all the necessary
-	 * files.  We are guaranteed to be single-threaded here, so no mutex
-	 * is necessary.
-	 */
-	if ((ret = __db_apprec(dbenv, lsnp, trunclsnp, update, 0)) == 0)
-		F_SET(db_rep, DBREP_OPENFILES);
-
-err:	if ((t_ret = __log_c_close(logc)) != 0 && ret == 0)
-		ret = t_ret;
-
-	return (ret);
-}
-
-/*
- * __rep_verify_match --
- *	We have just received a matching log record during verification.
- * Figure out if we're going to need to run recovery. If so, wait until
- * everything else has exited the library.  If not, set up the world
- * correctly and move forward.
- */
-static int
-__rep_verify_match(dbenv, reclsnp, savetime)
-	DB_ENV *dbenv;
-	DB_LSN *reclsnp;
-	time_t savetime;
-{
-	DB_LOG *dblp;
-	DB_LSN trunclsn;
-	DB_REP *db_rep;
-	LOG *lp;
-	REGENV *renv;
-	REGINFO *infop;
-	REP *rep;
-	int done, master, ret;
-	u_int32_t unused;
-
-	dblp = dbenv->lg_handle;
-	db_rep = dbenv->rep_handle;
-	rep = db_rep->region;
-	lp = dblp->reginfo.primary;
-	ret = 0;
-	infop = dbenv->reginfo;
-	renv = infop->primary;
-
-	/*
-	 * Check if the savetime is different than our current time stamp.
-	 * If it is, then we're racing with another thread trying to recover
-	 * and we lost.  We must give up.
-	 */
-	MUTEX_LOCK(dbenv, db_rep->db_mutexp);
-	done = savetime != renv->rep_timestamp;
-	if (done) {
-		MUTEX_UNLOCK(dbenv, db_rep->db_mutexp);
-		return (0);
-	}
-	ZERO_LSN(lp->verify_lsn);
-	MUTEX_UNLOCK(dbenv, db_rep->db_mutexp);
-
-	/*
-	 * Make sure the world hasn't changed while we tried to get
-	 * the lock.  If it hasn't then it's time for us to kick all
-	 * operations out of DB and run recovery.
-	 */
-	MUTEX_LOCK(dbenv, db_rep->rep_mutexp);
-	if (!F_ISSET(rep, REP_F_RECOVER_LOG) &&
-	    (F_ISSET(rep, REP_F_READY) || rep->in_recovery != 0)) {
-		rep->stat.st_msgs_recover++;
-		goto errunlock;
-	}
-
-	__rep_lockout(dbenv, db_rep, rep, 1);
-
-	/* OK, everyone is out, we can now run recovery. */
-	MUTEX_UNLOCK(dbenv, db_rep->rep_mutexp);
-
-	if ((ret = __rep_dorecovery(dbenv, reclsnp, &trunclsn)) != 0) {
-		MUTEX_LOCK(dbenv, db_rep->rep_mutexp);
-		rep->in_recovery = 0;
-		F_CLR(rep, REP_F_READY);
-		goto errunlock;
-	}
-
-	/*
-	 * The log has been truncated (either directly by us or by __db_apprec)
-	 * We want to make sure we're waiting for the LSN at the new end-of-log,
-	 * not some later point.
-	 */
-	MUTEX_LOCK(dbenv, db_rep->db_mutexp);
-	lp->ready_lsn = trunclsn;
-	ZERO_LSN(lp->waiting_lsn);
-	ZERO_LSN(lp->max_wait_lsn);
-	lp->max_perm_lsn = *reclsnp;
-	lp->wait_recs = 0;
-	lp->rcvd_recs = 0;
-	ZERO_LSN(lp->verify_lsn);
-
-	/*
-	 * Discard any log records we have queued;  we're about to re-request
-	 * them, and can't trust the ones in the queue.  We need to set the
-	 * DB_AM_RECOVER bit in this handle, so that the operation doesn't
-	 * deadlock.
-	 */
-	F_SET(db_rep->rep_db, DB_AM_RECOVER);
-	MUTEX_UNLOCK(dbenv, db_rep->db_mutexp);
-	ret = __db_truncate(db_rep->rep_db, NULL, &unused);
-	MUTEX_LOCK(dbenv, db_rep->db_mutexp);
-	F_CLR(db_rep->rep_db, DB_AM_RECOVER);
-	MUTEX_UNLOCK(dbenv, db_rep->db_mutexp);
-
-	MUTEX_LOCK(dbenv, db_rep->rep_mutexp);
-	rep->stat.st_log_queued = 0;
-	rep->in_recovery = 0;
-	F_CLR(rep, REP_F_NOARCHIVE | REP_F_RECOVER_MASK);
-
-	if (ret != 0)
-		goto errunlock;
-
-	/*
-	 * If the master_id is invalid, this means that since
-	 * the last record was sent, somebody declared an
-	 * election and we may not have a master to request
-	 * things of.
-	 *
-	 * This is not an error;  when we find a new master,
-	 * we'll re-negotiate where the end of the log is and
-	 * try to bring ourselves up to date again anyway.
-	 *
-	 * !!!
-	 * We cannot assert the election flags though because
-	 * somebody may have declared an election and then
-	 * got an error, thus clearing the election flags
-	 * but we still have an invalid master_id.
-	 */
-	master = rep->master_id;
-	MUTEX_UNLOCK(dbenv, db_rep->rep_mutexp);
-	if (master == DB_EID_INVALID)
-		ret = 0;
-	else
-		(void)__rep_send_message(dbenv,
-		    master, REP_ALL_REQ, reclsnp, NULL, 0);
-	if (0) {
-errunlock:
-		MUTEX_UNLOCK(dbenv, db_rep->rep_mutexp);
-	}
-	return (ret);
-}
-
 /*
  * __rep_do_ckp --
- * Perform the memp_sync necessary for this checkpoint without holding
- * the db_rep->db_mutexp.  All callers of this function must hold the
- * db_rep->db_mutexp and must not be holding the db_rep->rep_mutexp.
+ * Perform the memp_sync necessary for this checkpoint without holding the
+ * REP->mtx_clientdb.  Callers of this function must hold REP->mtx_clientdb
+ * and must not be holding the region mutex.
  */
 static int
 __rep_do_ckp(dbenv, rec, rp)
@@ -2098,9 +1157,9 @@ __rep_do_ckp(dbenv, rec, rp)
 
 	db_rep = dbenv->rep_handle;
 
-	MUTEX_UNLOCK(dbenv, db_rep->db_mutexp);
+	MUTEX_UNLOCK(dbenv, db_rep->region->mtx_clientdb);
 
-	DB_TEST_CHECKPOINT(dbenv, dbenv->test_check);
+	DB_TEST_WAIT(dbenv, dbenv->test_check);
 
 	/* Sync the memory pool. */
 	memcpy(&ckp_lsn, (u_int8_t *)rec->data +
@@ -2109,13 +1168,13 @@ __rep_do_ckp(dbenv, rec, rp)
 
 	/* Update the last_ckp in the txn region. */
 	if (ret == 0)
-		__txn_updateckp(dbenv, &rp->lsn);
+		ret = __txn_updateckp(dbenv, &rp->lsn);
 	else {
 		__db_err(dbenv, "Error syncing ckp [%lu][%lu]",
 		    (u_long)ckp_lsn.file, (u_long)ckp_lsn.offset);
 		ret = __db_panic(dbenv, ret);
 	}
-	MUTEX_LOCK(dbenv, db_rep->db_mutexp);
+	MUTEX_LOCK(dbenv, db_rep->region->mtx_clientdb);
 
 	return (ret);
 }
@@ -2134,7 +1193,6 @@ __rep_remfirst(dbenv, cntrl, rec)
 	DBC *dbc;
 	DB_REP *db_rep;
 	int ret, t_ret;
-	u_int32_t rectype;
 
 	db_rep = dbenv->rep_handle;
 	dbp = db_rep->rep_db;
@@ -2145,10 +1203,8 @@ __rep_remfirst(dbenv, cntrl, rec)
 	/* The DBTs need to persist through another call. */
 	F_SET(cntrl, DB_DBT_REALLOC);
 	F_SET(rec, DB_DBT_REALLOC);
-	if ((ret = __db_c_get(dbc, cntrl, rec, DB_RMW | DB_FIRST)) == 0) {
-		memcpy(&rectype, rec->data, sizeof(rectype));
+	if ((ret = __db_c_get(dbc, cntrl, rec, DB_RMW | DB_FIRST)) == 0)
 		ret = __db_c_del(dbc, 0);
-	}
 	if ((t_ret = __db_c_close(dbc)) != 0 && ret == 0)
 		ret = t_ret;
 
@@ -2342,15 +1398,13 @@ __rep_process_rec(dbenv, rp, rec, typep, ret_lsnp)
 		break;
 	case DB___txn_ckp:
 		/*
-		 * We do not want to hold the db_rep->db_mutexp
-		 * mutex while syncing the mpool, so if we get
-		 * a checkpoint record that we are supposed to
-		 * process, we add it to the __db.rep.db, do
-		 * the memp_sync and then go back and process
-		 * it later, when the sync has finished.  If
-		 * this record is already in the table, then
-		 * some other thread will process it, so simply
-		 * return REP_NOTPERM;
+		 * We do not want to hold the REP->mtx_clientdb mutex while
+		 * syncing the mpool, so if we get a checkpoint record we are
+		 * supposed to process, add it to the __db.rep.db, do the
+		 * memp_sync and then go back and process it later, when the
+		 * sync has finished.  If this record is already in the table,
+		 * then some other thread will process it, so simply return
+		 * REP_NOTPERM.
 		 */
 		memset(&key_dbt, 0, sizeof(key_dbt));
 		key_dbt.data = rp;
@@ -2406,9 +1460,9 @@ out:
  *	The caller holds no locks.
  */
 static int
-__rep_resend_req(dbenv, eid)
+__rep_resend_req(dbenv, rereq)
 	DB_ENV *dbenv;
-	int eid;
+	int rereq;
 {
 
 	DB_LOG *dblp;
@@ -2417,37 +1471,45 @@ __rep_resend_req(dbenv, eid)
 	LOG *lp;
 	REP *rep;
 	int ret;
-	u_int32_t repflags;
+	u_int32_t gapflags, repflags;
 
-	ret = 0;
 	db_rep = dbenv->rep_handle;
 	rep = db_rep->region;
 	dblp = dbenv->lg_handle;
 	lp = dblp->reginfo.primary;
+	ret = 0;
 
 	repflags = rep->flags;
+	/*
+	 * If we are delayed we do not rerequest anything.
+	 */
+	if (FLD_ISSET(repflags, REP_F_DELAY))
+		return (ret);
+	gapflags = rereq ? REP_GAP_REREQUEST : 0;
+
 	if (FLD_ISSET(repflags, REP_F_RECOVER_VERIFY)) {
-		MUTEX_LOCK(dbenv, db_rep->db_mutexp);
+		MUTEX_LOCK(dbenv, rep->mtx_clientdb);
 		lsn = lp->verify_lsn;
-		MUTEX_UNLOCK(dbenv, db_rep->db_mutexp);
+		MUTEX_UNLOCK(dbenv, rep->mtx_clientdb);
 		if (!IS_ZERO_LSN(lsn))
-			(void)__rep_send_message(dbenv, eid,
-			    REP_VERIFY_REQ, &lsn, NULL, 0);
-		goto out;
+			(void)__rep_send_message(dbenv, rep->master_id,
+			    REP_VERIFY_REQ, &lsn, NULL, 0, DB_REP_REREQUEST);
 	} else if (FLD_ISSET(repflags, REP_F_RECOVER_UPDATE)) {
-		(void)__rep_send_message(dbenv, eid,
-		    REP_UPDATE_REQ, NULL, NULL, 0);
+		/*
+		 * UPDATE_REQ only goes to the master.
+		 */
+		(void)__rep_send_message(dbenv, rep->master_id,
+		    REP_UPDATE_REQ, NULL, NULL, 0, 0);
 	} else if (FLD_ISSET(repflags, REP_F_RECOVER_PAGE)) {
-		MUTEX_LOCK(dbenv, db_rep->rep_mutexp);
-		ret = __rep_pggap_req(dbenv, rep, NULL, 0);
-		MUTEX_UNLOCK(dbenv, db_rep->rep_mutexp);
-	} else if (FLD_ISSET(repflags, REP_F_RECOVER_LOG)) {
-		MUTEX_LOCK(dbenv, db_rep->db_mutexp);
-		__rep_loggap_req(dbenv, rep, NULL, 0);
-		MUTEX_UNLOCK(dbenv, db_rep->db_mutexp);
+		REP_SYSTEM_LOCK(dbenv);
+		ret = __rep_pggap_req(dbenv, rep, NULL, gapflags);
+		REP_SYSTEM_UNLOCK(dbenv);
+	} else {
+		MUTEX_LOCK(dbenv, rep->mtx_clientdb);
+		ret = __rep_loggap_req(dbenv, rep, NULL, gapflags);
+		MUTEX_UNLOCK(dbenv, rep->mtx_clientdb);
 	}
 
-out:
 	return (ret);
 }
 
@@ -2457,7 +1519,7 @@ out:
  *
  * Check if we need to send another request.  If so, compare with
  * the request limits the user might have set.  This assumes the
- * caller holds the db_rep->db_mutexp mutex.  Returns 1 if a request
+ * caller holds the REP->mtx_clientdb mutex.  Returns 1 if a request
  * needs to be made, and 0 if it does not.
  */
 int
@@ -2483,51 +1545,55 @@ __rep_check_doreq(dbenv, rep)
 }
 
 /*
- * __rep_lockout --
- * PUBLIC: void __rep_lockout __P((DB_ENV *, DB_REP *, REP *, u_int32_t));
+ * __rep_skip_msg -
  *
- * Coordinate with other threads in the library and active txns so
- * that we can run single-threaded, for recovery or internal backup.
- * Assumes the caller holds rep_mutexp.
+ *	If we're in recovery we want to skip/ignore the message, but
+ *	we also need to see if we need to re-request any retransmissions.
  */
-void
-__rep_lockout(dbenv, db_rep, rep, msg_th)
+static int
+__rep_skip_msg(dbenv, rep, eid, rectype)
 	DB_ENV *dbenv;
-	DB_REP *db_rep;
 	REP *rep;
-	u_int32_t msg_th;
+	int eid;
+	u_int32_t rectype;
 {
-	int wait_cnt;
-
-	/* Phase 1: set REP_F_READY and wait for op_cnt to go to 0. */
-	F_SET(rep, REP_F_READY);
-	for (wait_cnt = 0; rep->op_cnt != 0;) {
-		MUTEX_UNLOCK(dbenv, db_rep->rep_mutexp);
-		__os_sleep(dbenv, 1, 0);
-#ifdef DIAGNOSTIC
-		if (++wait_cnt % 60 == 0)
-			__db_err(dbenv,
-	"Waiting for txn_cnt to run replication recovery/backup for %d minutes",
-			wait_cnt / 60);
-#endif
-		MUTEX_LOCK(dbenv, db_rep->rep_mutexp);
-	}
+	int do_req, ret;
 
+	ret = 0;
 	/*
-	 * Phase 2: set in_recovery and wait for handle count to go
-	 * to 0 and for the number of threads in __rep_process_message
-	 * to go to 1 (us).
+	 * If we have a request message from a client then immediately
+	 * send a REP_REREQUEST back to that client since we're skipping it.
 	 */
-	rep->in_recovery = 1;
-	for (wait_cnt = 0; rep->handle_cnt != 0 || rep->msg_th > msg_th;) {
-		MUTEX_UNLOCK(dbenv, db_rep->rep_mutexp);
-		__os_sleep(dbenv, 1, 0);
-#ifdef DIAGNOSTIC
-		if (++wait_cnt % 60 == 0)
-			__db_err(dbenv,
-"Waiting for handle count to run replication recovery/backup for %d minutes",
-			wait_cnt / 60);
-#endif
-		MUTEX_LOCK(dbenv, db_rep->rep_mutexp);
+	if (rep->master_id != DB_EID_INVALID && eid != rep->master_id)
+		do_req = 1;
+	else {
+		/* Check for need to retransmit. */
+		MUTEX_LOCK(dbenv, rep->mtx_clientdb);
+		do_req = __rep_check_doreq(dbenv, rep);
+		MUTEX_UNLOCK(dbenv, rep->mtx_clientdb);
 	}
+	/*
+	 * Don't respond to a MASTER_REQ with
+	 * a MASTER_REQ or REREQUEST.
+	 */
+	if (do_req && rectype != REP_MASTER_REQ) {
+		/*
+		 * There are three cases:
+		 * 1.  If we don't know who the master is, then send MASTER_REQ.
+		 * 2.  If the message we're skipping came from the master,
+		 * then we need to rerequest.
+		 * 3.  If the message didn't come from a master (i.e. client
+		 * to client), then send a rerequest back to the sender so
+		 * the sender can rerequest it elsewhere.
+		 */
+		if (rep->master_id == DB_EID_INVALID)	/* Case 1. */
+			(void)__rep_send_message(dbenv,
+			    DB_EID_BROADCAST, REP_MASTER_REQ, NULL, NULL, 0, 0);
+		else if (eid == rep->master_id)		/* Case 2. */
+			ret = __rep_resend_req(dbenv, 0);
+		else					/* Case 3. */
+			(void)__rep_send_message(dbenv,
+			    eid, REP_REREQUEST, NULL, NULL, 0, 0);
+	}
+	return (ret);
 }
diff --git a/storage/bdb/rep/rep_region.c b/storage/bdb/rep/rep_region.c
index b163e3456e6..000cf19330e 100644
--- a/storage/bdb/rep/rep_region.c
+++ b/storage/bdb/rep/rep_region.c
@@ -1,10 +1,10 @@
 /*-
  * See the file LICENSE for redistribution information.
  *
- * Copyright (c) 2001-2004
+ * Copyright (c) 2001-2005
  *	Sleepycat Software.  All rights reserved.
  *
- * $Id: rep_region.c,v 1.53 2004/10/15 16:59:44 bostic Exp $
+ * $Id: rep_region.c,v 12.12 2005/10/19 19:10:40 sue Exp $
  */
 
 #include "db_config.h"
@@ -26,8 +26,8 @@
 
 #include "db_int.h"
 #include "dbinc/db_page.h"
-#include "dbinc/log.h"
 #include "dbinc/db_am.h"
+#include "dbinc/log.h"
 
 static int __rep_egen_init  __P((DB_ENV *, REP *));
 
@@ -43,7 +43,6 @@ __rep_region_init(dbenv)
 {
 	REGENV *renv;
 	REGINFO *infop;
-	DB_MUTEX *db_mutexp;
 	DB_REP *db_rep;
 	REP *rep;
 	int ret;
@@ -53,31 +52,18 @@ __rep_region_init(dbenv)
 	renv = infop->primary;
 	ret = 0;
 
-	MUTEX_LOCK(dbenv, &renv->mutex);
 	if (renv->rep_off == INVALID_ROFF) {
 		/* Must create the region. */
-		if ((ret =
-		    __db_shalloc(infop, sizeof(REP), MUTEX_ALIGN, &rep)) != 0)
-			goto err;
+		if ((ret = __db_shalloc(infop, sizeof(REP), 0, &rep)) != 0)
+			return (ret);
 		memset(rep, 0, sizeof(*rep));
 		rep->tally_off = INVALID_ROFF;
 		rep->v2tally_off = INVALID_ROFF;
 		renv->rep_off = R_OFFSET(infop, rep);
 
-		if ((ret = __db_mutex_setup(dbenv, infop, &rep->mutex,
-		    MUTEX_NO_RECORD)) != 0)
-			goto err;
-
-		/*
-		 * We must create a place for the db_mutex separately;
-		 * mutexes have to be aligned to MUTEX_ALIGN, and the only way
-		 * to guarantee that is to make sure they're at the beginning
-		 * of a shalloc'ed chunk.
-		 */
-		if ((ret = __db_shalloc(infop, sizeof(DB_MUTEX),
-		    MUTEX_ALIGN, &db_mutexp)) != 0)
-			goto err;
-		rep->db_mutex_off = R_OFFSET(infop, db_mutexp);
+		if ((ret = __mutex_alloc(
+		    dbenv, MTX_REP_REGION, 0, &rep->mtx_region)) != 0)
+			return (ret);
 
 		/*
 		 * Because we have no way to prevent deadlocks and cannot log
@@ -86,16 +72,16 @@ __rep_region_init(dbenv)
 		 * accessed when messages arrive out-of-order, so it should
 		 * stay small and not be used in a high-performance app.
 		 */
-		if ((ret = __db_mutex_setup(dbenv, infop, db_mutexp,
-		    MUTEX_NO_RECORD)) != 0)
-			goto err;
+		if ((ret = __mutex_alloc(
+		    dbenv, MTX_REP_DATABASE, 0, &rep->mtx_clientdb)) != 0)
+			return (ret);
 
 		/* We have the region; fill in the values. */
 		rep->eid = DB_EID_INVALID;
 		rep->master_id = DB_EID_INVALID;
 		rep->gen = 0;
 		if ((ret = __rep_egen_init(dbenv, rep)) != 0)
-			goto err;
+			return (ret);
 		/*
 		 * Set default values for the min and max log records that we
 		 * wait before requesting a missing log record.
@@ -108,16 +94,10 @@ __rep_region_init(dbenv)
 		F_CLR(renv, DB_REGENV_REPLOCKED);
 	} else
 		rep = R_ADDR(infop, renv->rep_off);
-	MUTEX_UNLOCK(dbenv, &renv->mutex);
 
-	db_rep->rep_mutexp = &rep->mutex;
-	db_rep->db_mutexp = R_ADDR(infop, rep->db_mutex_off);
 	db_rep->region = rep;
 
 	return (0);
-
-err:	MUTEX_UNLOCK(dbenv, &renv->mutex);
-	return (ret);
 }
 
 /*
@@ -131,19 +111,29 @@ __rep_region_destroy(dbenv)
 	DB_ENV *dbenv;
 {
 	DB_REP *db_rep;
+	REGENV *renv;
+	REGINFO *infop;
 	int ret, t_ret;
 
-	ret = t_ret = 0;
-	db_rep = dbenv->rep_handle;
+	if (!REP_ON(dbenv))
+		return (0);
 
-	if (db_rep != NULL) {
-		if (db_rep->rep_mutexp != NULL)
-			ret = __db_mutex_destroy(db_rep->rep_mutexp);
-		if (db_rep->db_mutexp != NULL)
-			t_ret = __db_mutex_destroy(db_rep->db_mutexp);
+	ret = 0;
+
+	db_rep = dbenv->rep_handle;
+	if (db_rep->region != NULL) {
+		ret = __mutex_free(dbenv, &db_rep->region->mtx_region);
+		if ((t_ret = __mutex_free(
+		    dbenv, &db_rep->region->mtx_clientdb)) != 0 && ret == 0)
+			ret = t_ret;
 	}
 
-	return (ret == 0 ? t_ret : ret);
+	infop = dbenv->reginfo;
+	renv = infop->primary;
+	if (renv->rep_off != INVALID_ROFF)
+		__db_shalloc_free(infop, R_ADDR(infop, renv->rep_off));
+
+	return (ret);
 }
 
 /*
@@ -182,44 +172,61 @@ __rep_dbenv_close(dbenv)
 
 /*
  * __rep_preclose --
- *	If we are a client, shut down our client database and, if we're
- * actually closing the environment, close all databases we've opened
- * while applying messages.
+ *	If we are a client, shut down our client database and close
+ * all databases we've opened while applying messages as a client.
  *
- * PUBLIC: int __rep_preclose __P((DB_ENV *, int));
+ * PUBLIC: int __rep_preclose __P((DB_ENV *));
  */
 int
-__rep_preclose(dbenv, do_closefiles)
+__rep_preclose(dbenv)
 	DB_ENV *dbenv;
-	int do_closefiles;
 {
+	DB_LOG *dblp;
 	DB_REP *db_rep;
+	LOG *lp;
+	REP_BULK bulk;
 	int ret, t_ret;
 
 	ret = 0;
 
 	db_rep = dbenv->rep_handle;
-	MUTEX_LOCK(dbenv, db_rep->db_mutexp);
+	dblp = dbenv->lg_handle;
+	lp = dblp->reginfo.primary;
+
+	MUTEX_LOCK(dbenv, db_rep->region->mtx_clientdb);
 	if (db_rep->rep_db != NULL) {
 		ret = __db_close(db_rep->rep_db, NULL, DB_NOSYNC);
 		db_rep->rep_db = NULL;
 	}
 
-	if (do_closefiles) {
-		if ((t_ret = __dbreg_close_files(dbenv)) != 0 && ret == 0)
+	if ((t_ret = __dbreg_close_files(dbenv)) != 0 && ret == 0)
+		ret = t_ret;
+	F_CLR(db_rep, DBREP_OPENFILES);
+	/*
+	 * If we have something in the bulk buffer, send anything in it
+	 * if we are able to.
+	 */
+	if (lp->bulk_off != 0 && dbenv->rep_send != NULL) {
+		memset(&bulk, 0, sizeof(bulk));
+		bulk.addr = R_ADDR(&dblp->reginfo, lp->bulk_buf);
+		bulk.offp = &lp->bulk_off;
+		bulk.len = lp->bulk_len;
+		bulk.type = REP_BULK_LOG;
+		bulk.eid = DB_EID_BROADCAST;
+		bulk.flagsp = &lp->bulk_flags;
+		if ((t_ret = __rep_send_bulk(dbenv, &bulk, 0)) != 0 && ret == 0)
 			ret = t_ret;
-		F_CLR(db_rep, DBREP_OPENFILES);
 	}
-	MUTEX_UNLOCK(dbenv, db_rep->db_mutexp);
+	MUTEX_UNLOCK(dbenv, db_rep->region->mtx_clientdb);
 	return (ret);
 }
 
 /*
  * __rep_egen_init --
- *	Initialize the value of egen in the region.  Called
- * only from __rep_region_init, which is guaranteed to be
- * single-threaded as we create the rep region.  We set the
- * rep->egen field which is normally protected by db_rep->rep_mutex.
+ *	Initialize the value of egen in the region.  Called only from
+ *	__rep_region_init, which is guaranteed to be single-threaded
+ *	as we create the rep region.  We set the rep->egen field which
+ *	is normally protected by db_rep->region->mutex.
  */
 static int
 __rep_egen_init(dbenv, rep)
@@ -249,7 +256,7 @@ __rep_egen_init(dbenv, rep)
 		 * File exists, open it and read in our egen.
 		 */
 		if ((ret = __os_open(dbenv, p, DB_OSO_RDONLY,
-		    __db_omode("rw----"), &fhp)) != 0)
+		    __db_omode(OWNER_RW), &fhp)) != 0)
 			goto err;
 		if ((ret = __os_read(dbenv, fhp, &rep->egen, sizeof(u_int32_t),
 		    &cnt)) < 0 || cnt == 0)
@@ -282,7 +289,7 @@ __rep_write_egen(dbenv, egen)
 	    __db_appname(dbenv, DB_APP_NONE, REP_EGENNAME, 0, NULL, &p)) != 0)
 		return (ret);
 	if ((ret = __os_open(dbenv, p, DB_OSO_CREATE | DB_OSO_TRUNC,
-	    __db_omode("rw----"), &fhp)) == 0) {
+	    __db_omode(OWNER_RW), &fhp)) == 0) {
 		if ((ret = __os_write(dbenv, fhp, &egen, sizeof(u_int32_t),
 		    &cnt)) != 0 || ((ret = __os_fsync(dbenv, fhp)) != 0))
 			__db_err(dbenv, "%s: %s", p, db_strerror(ret));
diff --git a/storage/bdb/rep/rep_stat.c b/storage/bdb/rep/rep_stat.c
index d740c2b40c4..984e63938fa 100644
--- a/storage/bdb/rep/rep_stat.c
+++ b/storage/bdb/rep/rep_stat.c
@@ -1,10 +1,10 @@
 /*-
  * See the file LICENSE for redistribution information.
  *
- * Copyright (c) 2001-2004
+ * Copyright (c) 2001-2005
  *	Sleepycat Software.  All rights reserved.
  *
- * $Id: rep_stat.c,v 1.155 2004/09/29 15:36:38 bostic Exp $
+ * $Id: rep_stat.c,v 12.6 2005/10/24 18:37:10 alanb Exp $
  */
 
 #include "db_config.h"
@@ -48,6 +48,7 @@ __rep_stat_pp(dbenv, statp, flags)
 	DB_REP_STAT **statp;
 	u_int32_t flags;
 {
+	DB_THREAD_INFO *ip;
 	int ret;
 
 	PANIC_CHECK(dbenv);
@@ -58,7 +59,11 @@ __rep_stat_pp(dbenv, statp, flags)
 	    "DB_ENV->rep_stat", flags, DB_STAT_CLEAR)) != 0)
 		return (ret);
 
-	return (__rep_stat(dbenv, statp, flags));
+	ENV_ENTER(dbenv, ip);
+	ret = __rep_stat(dbenv, statp, flags);
+	ENV_LEAVE(dbenv, ip);
+
+	return (ret);
 }
 
 /*
@@ -111,6 +116,7 @@ __rep_stat(dbenv, statp, flags)
 		stats->st_election_gen = rep->w_gen;
 		stats->st_election_lsn = rep->w_lsn;
 		stats->st_election_votes = rep->votes;
+		stats->st_election_nvotes = rep->nvotes;
 		stats->st_election_tiebreaker = rep->w_tiebreaker;
 	}
 
@@ -141,7 +147,7 @@ __rep_stat(dbenv, statp, flags)
 	 * protected by the log region lock.
 	 */
 	if (dolock)
-		MUTEX_LOCK(dbenv, db_rep->db_mutexp);
+		MUTEX_LOCK(dbenv, rep->mtx_clientdb);
 	if (F_ISSET(rep, REP_F_CLIENT)) {
 		stats->st_next_lsn = lp->ready_lsn;
 		stats->st_waiting_lsn = lp->waiting_lsn;
@@ -155,7 +161,7 @@ __rep_stat(dbenv, statp, flags)
 		ZERO_LSN(stats->st_waiting_lsn);
 	}
 	if (dolock)
-		MUTEX_UNLOCK(dbenv, db_rep->db_mutexp);
+		MUTEX_UNLOCK(dbenv, rep->mtx_clientdb);
 
 	*statp = stats;
 	return (0);
@@ -172,6 +178,7 @@ __rep_stat_print_pp(dbenv, flags)
 	DB_ENV *dbenv;
 	u_int32_t flags;
 {
+	DB_THREAD_INFO *ip;
 	int ret;
 
 	PANIC_CHECK(dbenv);
@@ -182,7 +189,11 @@ __rep_stat_print_pp(dbenv, flags)
 	    flags, DB_STAT_ALL | DB_STAT_CLEAR)) != 0)
 		return (ret);
 
-	return (__rep_stat_print(dbenv, flags));
+	ENV_ENTER(dbenv, ip);
+	ret = __rep_stat_print(dbenv, flags);
+	ENV_LEAVE(dbenv, ip);
+
+	return (ret);
 }
 
 /*
@@ -328,9 +339,14 @@ __rep_print_stats(dbenv, flags)
 	__db_dl(dbenv,
 	    "Number of elections won", (u_long)sp->st_elections_won);
 
-	if (sp->st_election_status == 0)
+	if (sp->st_election_status == 0) {
 		__db_msg(dbenv, "No election in progress");
-	else {
+		if (sp->st_election_sec > 0 || sp->st_election_usec > 0)
+			__db_msg(dbenv,
+			    "%lu.%.6lu\tDuration of last election (seconds)",
+			    (u_long)sp->st_election_sec,
+			    (u_long)sp->st_election_usec);
+	} else {
 		__db_dl(dbenv, "Current election phase",
 		    (u_long)sp->st_election_status);
 		__db_dl(dbenv, "Election winner",
@@ -352,6 +368,21 @@ __rep_print_stats(dbenv, flags)
 		__db_dl(dbenv, "Votes received this election round",
 		    (u_long)sp->st_election_votes);
 	}
+	__db_dl(dbenv, "Number of bulk buffer sends triggered by full buffer",
+	    (u_long)sp->st_bulk_fills);
+	__db_dl(dbenv, "Number of single records exceeding bulk buffer size",
+	    (u_long)sp->st_bulk_overflows);
+	__db_dl(dbenv, "Number of records added to a bulk buffer",
+	    (u_long)sp->st_bulk_records);
+	__db_dl(dbenv, "Number of bulk buffers sent",
+	    (u_long)sp->st_bulk_transfers);
+	__db_dl(dbenv, "Number of re-request messages received",
+	    (u_long)sp->st_client_rerequests);
+	__db_dl(dbenv,
+	    "Number of request messages this client failed to process",
+	    (u_long)sp->st_client_svc_miss);
+	__db_dl(dbenv, "Number of request messages received by this client",
+	    (u_long)sp->st_client_svc_req);
 
 	__os_ufree(dbenv, sp);
 
@@ -400,10 +431,6 @@ __rep_print_all(dbenv, flags)
 
 	__db_msg(dbenv, "%s", DB_GLOBAL(db_line));
 	__db_msg(dbenv, "DB_REP handle information:");
-	__db_print_mutex(dbenv, NULL,
-	    db_rep->rep_mutexp, "Replication region mutex", flags);
-	__db_print_mutex(dbenv, NULL,
-	    db_rep->db_mutexp, "Bookkeeping database mutex", flags);
 
 	if (db_rep->rep_db == NULL)
 		STAT_ISSET("Bookkeeping database", db_rep->rep_db);
@@ -414,7 +441,10 @@ __rep_print_all(dbenv, flags)
 
 	__db_msg(dbenv, "%s", DB_GLOBAL(db_line));
 	__db_msg(dbenv, "REP handle information:");
-	__db_print_mutex(dbenv, NULL, &rep->mutex, "REP mutex", flags);
+	__mutex_print_debug_single(dbenv,
+	    "Replication region mutex", rep->mtx_region, flags);
+	__mutex_print_debug_single(dbenv,
+	    "Bookkeeping database mutex", rep->mtx_clientdb, flags);
 
 	STAT_LONG("Environment ID", rep->eid);
 	STAT_LONG("Master environment ID", rep->master_id);
@@ -451,7 +481,7 @@ __rep_print_all(dbenv, flags)
 
 	__db_msg(dbenv, "%s", DB_GLOBAL(db_line));
 	__db_msg(dbenv, "LOG replication information:");
-	MUTEX_LOCK(dbenv, db_rep->db_mutexp);
+	MUTEX_LOCK(dbenv, rep->mtx_clientdb);
 	dblp = dbenv->lg_handle;
 	lp = (LOG *)dblp->reginfo.primary;
 	STAT_LSN("First log record after a gap", &lp->waiting_lsn);
@@ -460,7 +490,7 @@ __rep_print_all(dbenv, flags)
 	STAT_ULONG("Records to wait before requesting", lp->wait_recs);
 	STAT_ULONG("Records received while waiting", lp->rcvd_recs);
 	STAT_LSN("Next LSN expected", &lp->ready_lsn);
-	MUTEX_UNLOCK(dbenv, db_rep->db_mutexp);
+	MUTEX_UNLOCK(dbenv, rep->mtx_clientdb);
 
 	return (0);
 }
diff --git a/storage/bdb/rep/rep_stub.c b/storage/bdb/rep/rep_stub.c
index c2851915d1e..2c0b76fc83b 100644
--- a/storage/bdb/rep/rep_stub.c
+++ b/storage/bdb/rep/rep_stub.c
@@ -1,10 +1,10 @@
 /*-
  * See the file LICENSE for redistribution information.
  *
- * Copyright (c) 1996-2004
+ * Copyright (c) 1996-2005
  *	Sleepycat Software.  All rights reserved.
  *
- * $Id: rep_stub.c,v 1.22 2004/09/29 15:36:04 bostic Exp $
+ * $Id: rep_stub.c,v 12.11 2005/10/09 16:12:07 bostic Exp $
  */
 
 #include "db_config.h"
@@ -22,17 +22,7 @@
  * If the library wasn't compiled with replication support, various routines
  * aren't available.  Stub them here, returning an appropriate error.
  */
-
 static int __db_norep __P((DB_ENV *));
-static int __rep_elect
-    __P((DB_ENV *, int, int, int, u_int32_t, int *, u_int32_t));
-static int __rep_flush __P((DB_ENV *));
-static int __rep_start __P((DB_ENV *, DBT *, u_int32_t));
-static int __rep_get_limit __P((DB_ENV *, u_int32_t *, u_int32_t *));
-static int __rep_set_limit __P((DB_ENV *, u_int32_t, u_int32_t));
-static int __rep_set_request __P((DB_ENV *, u_int32_t, u_int32_t));
-static int __rep_set_rep_transport __P((DB_ENV *, int, int (*)
-    (DB_ENV *, const DBT *, const DBT *, const DB_LSN *, int, u_int32_t)));
 
 /*
  * __db_norep --
@@ -58,36 +48,51 @@ __db_rep_enter(dbp, checkgen, checklock, return_now)
 	return (__db_norep(dbp->dbenv));
 }
 
-void
-__env_rep_enter(dbenv)
+int
+__env_rep_enter(dbenv, checklock)
 	DB_ENV *dbenv;
+	int checklock;
 {
-	COMPQUIET(dbenv, NULL);
-	return;
+	COMPQUIET(checklock, 0);
+	return (__db_norep(dbenv));
 }
 
-void
+int
 __env_db_rep_exit(dbenv)
 	DB_ENV *dbenv;
 {
-	COMPQUIET(dbenv, NULL);
-	return;
+	return (__db_norep(dbenv));
 }
 
-void
+int
 __op_rep_enter(dbenv)
 	DB_ENV *dbenv;
 {
-	COMPQUIET(dbenv, NULL);
-	return;
+	return (__db_norep(dbenv));
 }
 
-void
+int
 __op_rep_exit(dbenv)
 	DB_ENV *dbenv;
 {
-	COMPQUIET(dbenv, NULL);
-	return;
+	return (__db_norep(dbenv));
+}
+
+int
+__rep_bulk_message(dbenv, bulkp, repth, lsnp, dbt, flags)
+	DB_ENV *dbenv;
+	REP_BULK *bulkp;
+	REP_THROTTLE *repth;
+	DB_LSN *lsnp;
+	const DBT *dbt;
+	u_int32_t flags;
+{
+	COMPQUIET(bulkp, NULL);
+	COMPQUIET(repth, NULL);
+	COMPQUIET(lsnp, NULL);
+	COMPQUIET(dbt, NULL);
+	COMPQUIET(flags, 0);
+	return (__db_norep(dbenv));
 }
 
 int
@@ -98,22 +103,6 @@ __rep_dbenv_close(dbenv)
 	return (0);
 }
 
-void
-__rep_dbenv_create(dbenv)
-	DB_ENV *dbenv;
-{
-	dbenv->rep_elect = __rep_elect;
-	dbenv->rep_flush = __rep_flush;
-	dbenv->rep_process_message = __rep_process_message;
-	dbenv->rep_start = __rep_start;
-	dbenv->rep_stat = __rep_stat_pp;
-	dbenv->rep_stat_print = __rep_stat_print_pp;
-	dbenv->get_rep_limit = __rep_get_limit;
-	dbenv->set_rep_limit = __rep_set_limit;
-	dbenv->set_rep_request = __rep_set_request;
-	dbenv->set_rep_transport = __rep_set_rep_transport;
-}
-
 void
 __rep_dbenv_refresh(dbenv)
 	DB_ENV *dbenv;
@@ -122,7 +111,7 @@ __rep_dbenv_refresh(dbenv)
 	return;
 }
 
-static int
+int
 __rep_elect(dbenv, nsites, nvotes, priority, timeout, eidp, flags)
 	DB_ENV *dbenv;
 	int nsites, nvotes, priority;
@@ -138,14 +127,36 @@ __rep_elect(dbenv, nsites, nvotes, priority, timeout, eidp, flags)
 	return (__db_norep(dbenv));
 }
 
-static int
+int
 __rep_flush(dbenv)
 	DB_ENV *dbenv;
 {
 	return (__db_norep(dbenv));
 }
 
-static int
+int
+__rep_get_config(dbenv, which, onp)
+	DB_ENV *dbenv;
+	u_int32_t which;
+	int *onp;
+{
+	COMPQUIET(which, 0);
+	COMPQUIET(onp, NULL);
+	return (__db_norep(dbenv));
+}
+
+int
+__rep_set_config(dbenv, which, on)
+	DB_ENV *dbenv;
+	u_int32_t which;
+	int on;
+{
+	COMPQUIET(which, 0);
+	COMPQUIET(on, 0);
+	return (__db_norep(dbenv));
+}
+
+int
 __rep_get_limit(dbenv, gbytesp, bytesp)
 	DB_ENV *dbenv;
 	u_int32_t *gbytesp, *bytesp;
@@ -155,14 +166,13 @@ __rep_get_limit(dbenv, gbytesp, bytesp)
 	return (__db_norep(dbenv));
 }
 
-void
+int
 __rep_get_gen(dbenv, genp)
 	DB_ENV *dbenv;
 	u_int32_t *genp;
 {
-	COMPQUIET(dbenv, NULL);
 	COMPQUIET(genp, NULL);
-	return;
+	return (__db_norep(dbenv));
 }
 
 int
@@ -190,11 +200,9 @@ __rep_open(dbenv)
 }
 
 int
-__rep_preclose(dbenv, do_closefiles)
+__rep_preclose(dbenv)
 	DB_ENV *dbenv;
-	int do_closefiles;
 {
-	COMPQUIET(do_closefiles, 0);
 	return (__db_norep(dbenv));
 }
 
@@ -229,23 +237,24 @@ __rep_region_init(dbenv)
 }
 
 int
-__rep_send_message(dbenv, eid, rtype, lsnp, dbtp, flags)
+__rep_send_message(dbenv, eid, rtype, lsnp, dbtp, logflags, repflags)
 	DB_ENV *dbenv;
 	int eid;
 	u_int32_t rtype;
 	DB_LSN *lsnp;
 	const DBT *dbtp;
-	u_int32_t flags;
+	u_int32_t logflags, repflags;
 {
 	COMPQUIET(eid, 0);
 	COMPQUIET(rtype, 0);
 	COMPQUIET(lsnp, NULL);
 	COMPQUIET(dbtp, NULL);
-	COMPQUIET(flags, 0);
+	COMPQUIET(logflags, 0);
+	COMPQUIET(repflags, 0);
 	return (__db_norep(dbenv));
 }
 
-static int
+int
 __rep_set_limit(dbenv, gbytes, bytes)
 	DB_ENV *dbenv;
 	u_int32_t gbytes, bytes;
@@ -255,7 +264,7 @@ __rep_set_limit(dbenv, gbytes, bytes)
 	return (__db_norep(dbenv));
 }
 
-static int
+int
 __rep_set_rep_transport(dbenv, eid, f_send)
 	DB_ENV *dbenv;
 	int eid;
@@ -267,7 +276,7 @@ __rep_set_rep_transport(dbenv, eid, f_send)
 	return (__db_norep(dbenv));
 }
 
-static int
+int
 __rep_set_request(dbenv, min, max)
 	DB_ENV *dbenv;
 	u_int32_t min, max;
@@ -277,7 +286,7 @@ __rep_set_request(dbenv, min, max)
 	return (__db_norep(dbenv));
 }
 
-static int
+int
 __rep_start(dbenv, dbt, flags)
 	DB_ENV *dbenv;
 	DBT *dbt;
@@ -316,4 +325,13 @@ __rep_stat_print(dbenv, flags)
 	COMPQUIET(flags, 0);
 	return (__db_norep(dbenv));
 }
+
+int
+__rep_sync(dbenv, flags)
+	DB_ENV *dbenv;
+	u_int32_t flags;
+{
+	COMPQUIET(flags, 0);
+	return (__db_norep(dbenv));
+}
 #endif /* !HAVE_REPLICATION */
diff --git a/storage/bdb/rep/rep_util.c b/storage/bdb/rep/rep_util.c
index b6e72a6b47c..90c5df4cc99 100644
--- a/storage/bdb/rep/rep_util.c
+++ b/storage/bdb/rep/rep_util.c
@@ -1,9 +1,10 @@
 /*-
- * See the file LICENSE for redistribution information.  *
- * Copyright (c) 2001-2004
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2001-2005
  *	Sleepycat Software.  All rights reserved.
  *
- * $Id: rep_util.c,v 1.135 2004/10/15 16:59:44 bostic Exp $
+ * $Id: rep_util.c,v 12.42 2005/10/27 01:26:02 mjc Exp $
  */
 
 #include "db_config.h"
@@ -27,6 +28,13 @@
 #include "db_int.h"
 #include "dbinc/log.h"
 #include "dbinc/txn.h"
+#ifdef REP_DIAGNOSTIC
+#include "dbinc/db_page.h"
+#include "dbinc/fop.h"
+#include "dbinc/btree.h"
+#include "dbinc/hash.h"
+#include "dbinc/qam.h"
+#endif
 
 /*
  * rep_util.c:
@@ -34,37 +42,303 @@
  *	those called by other subsystems.
  */
 
-#define	TIMESTAMP_CHECK(dbenv, ts, renv)			\
-do {								\
-	if (renv->op_timestamp != 0 &&				\
+#define	TIMESTAMP_CHECK(dbenv, ts, renv) do {				\
+	if (renv->op_timestamp != 0 &&					\
 	    renv->op_timestamp + DB_REGENV_TIMEOUT < ts) {		\
-		MUTEX_LOCK(dbenv, &renv->mutex);		\
-		F_CLR(renv, DB_REGENV_REPLOCKED);		\
-		renv->op_timestamp = 0;				\
-		MUTEX_UNLOCK(dbenv, &renv->mutex);		\
-	}							\
+		REP_SYSTEM_LOCK(dbenv);					\
+		F_CLR(renv, DB_REGENV_REPLOCKED);			\
+		renv->op_timestamp = 0;					\
+		REP_SYSTEM_UNLOCK(dbenv);				\
+	}								\
 } while (0)
 
 #ifdef REP_DIAGNOSTIC
 static void __rep_print_logmsg __P((DB_ENV *, const DBT *, DB_LSN *));
 #endif
 
+/*
+ * __rep_bulk_message --
+ *	This is a wrapper for putting a record into a bulk buffer.  Since
+ * we have different bulk buffers, the caller must hand us the information
+ * we need to put the record into the correct buffer.  All bulk buffers
+ * are protected by the REP->mtx_clientdb.
+ *
+ * PUBLIC: int __rep_bulk_message __P((DB_ENV *, REP_BULK *, REP_THROTTLE *,
+ * PUBLIC:     DB_LSN *, const DBT *, u_int32_t));
+ */
+int
+__rep_bulk_message(dbenv, bulk, repth, lsn, dbt, flags)
+	DB_ENV *dbenv;
+	REP_BULK *bulk;
+	REP_THROTTLE *repth;
+	DB_LSN *lsn;
+	const DBT *dbt;
+	u_int32_t flags;
+{
+	DB_REP *db_rep;
+	REP *rep;
+	int ret;
+	u_int32_t recsize, typemore;
+	u_int8_t *p;
+#ifdef DIAGNOSTIC
+	DB_MSGBUF mb;
+#endif
+
+	db_rep = dbenv->rep_handle;
+	rep = db_rep->region;
+	ret = 0;
+
+	/*
+	 * Figure out the total number of bytes needed for this record.
+	 */
+	recsize = dbt->size + sizeof(DB_LSN) + sizeof(dbt->size);
+
+	/*
+	 * If *this* buffer is actively being transmitted, wait until
+	 * we can use it.
+	 */
+	MUTEX_LOCK(dbenv, rep->mtx_clientdb);
+	while (FLD_ISSET(*(bulk->flagsp), BULK_XMIT)) {
+		MUTEX_UNLOCK(dbenv, rep->mtx_clientdb);
+		__os_sleep(dbenv, 1, 0);
+		MUTEX_LOCK(dbenv, rep->mtx_clientdb);
+	}
+
+	/*
+	 * If the record is bigger than the buffer entirely, send the
+	 * current buffer and then return DB_REP_BULKOVF so that this
+	 * record is sent as a singleton.  Do we have enough info to
+	 * do that here?  XXX
+	 */
+	if (recsize > bulk->len) {
+		RPRINT(dbenv, rep, (dbenv, &mb,
+		    "bulk_msg: Record %d (0x%x) larger than entire buffer 0x%x",
+		    recsize, recsize, bulk->len));
+		rep->stat.st_bulk_overflows++;
+		(void)__rep_send_bulk(dbenv, bulk, flags);
+		/*
+		 * XXX __rep_send_message...
+		 */
+		MUTEX_UNLOCK(dbenv, rep->mtx_clientdb);
+		return (DB_REP_BULKOVF);
+	}
+	/*
+	 * If this record doesn't fit, send the current buffer.
+	 * Sending the buffer will reset the offset, but we will
+	 * drop the mutex while sending so we need to keep checking
+	 * if we're racing.
+	 */
+	while (recsize + *(bulk->offp) > bulk->len) {
+		RPRINT(dbenv, rep, (dbenv, &mb,
+	    "bulk_msg: Record %lu (%#lx) doesn't fit.  Send %lu (%#lx) now.",
+		    (u_long)recsize, (u_long)recsize,
+		    (u_long)bulk->len, (u_long)bulk->len));
+		rep->stat.st_bulk_fills++;
+		if ((ret = __rep_send_bulk(dbenv, bulk, flags)) != 0)
+			break;
+	}
+
+	/*
+	 * If we're using throttling, see if we are at the throttling
+	 * limit before we do any more work here, by checking if the
+	 * call to rep_send_throttle changed the repth->type to the
+	 * *_MORE message type.  If the throttling code hits the limit
+	 * then we're done here.
+	 */
+	if (bulk->type == REP_BULK_LOG)
+		typemore = REP_LOG_MORE;
+	else
+		typemore = REP_PAGE_MORE;
+	if (repth != NULL &&
+	    (ret = __rep_send_throttle(dbenv, bulk->eid, repth,
+	    REP_THROTTLE_ONLY)) == 0 && repth->type == typemore) {
+		RPRINT(dbenv, rep, (dbenv, &mb,
+		    "bulk_msg: Record %d (0x%x) hit throttle limit.",
+		    recsize, recsize));
+		MUTEX_UNLOCK(dbenv, rep->mtx_clientdb);
+		return (ret);
+	}
+
+	/*
+	 * Now we own the buffer, and we know our record fits into it.
+	 * The buffer is structured with the len, LSN and then the record.
+	 * Copy the record into the buffer.  Then if we need to,
+	 * send the buffer.
+	 */
+	/*
+	 * First thing is the length of the dbt record.
+	 */
+	p = bulk->addr + *(bulk->offp);
+	memcpy(p, &dbt->size, sizeof(dbt->size));
+	p += sizeof(dbt->size);
+	/*
+	 * The next thing is the LSN.  We need LSNs for both pages and
+	 * log records.  For log records, this is obviously, the LSN of
+	 * this record.  For pages, the LSN is used by the internal init code.
+	 */
+	memcpy(p, lsn, sizeof(DB_LSN));
+	RPRINT(dbenv, rep, (dbenv, &mb,
+	    "bulk_msg: Copying LSN [%lu][%lu] of %lu bytes to %#lx",
+	    (u_long)lsn->file, (u_long)lsn->offset, (u_long)dbt->size,
+	    P_TO_ULONG(p)));
+	p += sizeof(DB_LSN);
+	/*
+	 * If we're the first record, we need to save the first
+	 * LSN in the bulk structure.
+	 */
+	if (*(bulk->offp) == 0)
+		bulk->lsn = *lsn;
+	/*
+	 * Now copy the record and finally adjust the offset.
+	 */
+	memcpy(p, dbt->data, dbt->size);
+	p += dbt->size;
+	*(bulk->offp) = (uintptr_t)p - (uintptr_t)bulk->addr;
+	rep->stat.st_bulk_records++;
+	/*
+	 * Send the buffer if it is a perm record or a force.
+	 */
+	if (LF_ISSET(DB_LOG_PERM) || FLD_ISSET(*(bulk->flagsp), BULK_FORCE)) {
+		RPRINT(dbenv, rep, (dbenv, &mb,
+		    "bulk_msg: Send buffer after copy due to %s",
+		    LF_ISSET(DB_LOG_PERM) ? "PERM" : "FORCE"));
+		ret = __rep_send_bulk(dbenv, bulk, flags);
+	}
+	MUTEX_UNLOCK(dbenv, rep->mtx_clientdb);
+	return (ret);
+
+}
+
+/*
+ * __rep_send_bulk --
+ *	This function transmits the bulk buffer given.  It assumes the
+ * caller holds the REP->mtx_clientdb.  We may release it and reacquire
+ * it during this call.  We will return with it held.
+ *
+ * PUBLIC: int __rep_send_bulk __P((DB_ENV *, REP_BULK *, u_int32_t));
+ */
+int
+__rep_send_bulk(dbenv, bulkp, flags)
+	DB_ENV *dbenv;
+	REP_BULK *bulkp;
+	u_int32_t flags;
+{
+	DB_REP *db_rep;
+	REP *rep;
+	DBT dbt;
+	int ret;
+#ifdef DIAGNOSTIC
+	DB_MSGBUF mb;
+#endif
+
+	/*
+	 * If the offset is 0, we're done.  There is nothing to send.
+	 */
+	if (*(bulkp->offp) == 0)
+		return (0);
+
+	db_rep = dbenv->rep_handle;
+	rep = db_rep->region;
+
+	memset(&dbt, 0, sizeof(dbt));
+	/*
+	 * Set that this buffer is being actively transmitted.
+	 */
+	FLD_SET(*(bulkp->flagsp), BULK_XMIT);
+	dbt.data = bulkp->addr;
+	dbt.size = (u_int32_t)*(bulkp->offp);
+	MUTEX_UNLOCK(dbenv, rep->mtx_clientdb);
+	RPRINT(dbenv, rep, (dbenv, &mb,
+	    "send_bulk: Send %d (0x%x) bulk buffer bytes", dbt.size, dbt.size));
+	/*
+	 * Unlocked the mutex and now send the message.
+	 */
+	rep->stat.st_bulk_transfers++;
+	ret = __rep_send_message(dbenv, bulkp->eid, bulkp->type, &bulkp->lsn,
+	    &dbt, flags, 0);
+
+	MUTEX_LOCK(dbenv, rep->mtx_clientdb);
+	/*
+	 * If we're successful, reset the offset pointer to 0.
+	 * Clear the transmit flag regardless.
+	 */
+	if (ret == 0)
+		*(bulkp->offp) = 0;
+	FLD_CLR(*(bulkp->flagsp), BULK_XMIT);
+	return (ret);
+}
+
+/*
+ * __rep_bulk_alloc --
+ *	This function allocates and initializes an internal bulk buffer.
+ * This is used by the master when fulfilling a request for a chunk of
+ * log records or a bunch of pages.
+ *
+ * PUBLIC: int __rep_bulk_alloc __P((DB_ENV *, REP_BULK *, int, uintptr_t *,
+ * PUBLIC:    u_int32_t *, u_int32_t));
+ */
+int
+__rep_bulk_alloc(dbenv, bulkp, eid, offp, flagsp, type)
+	DB_ENV *dbenv;
+	REP_BULK *bulkp;
+	int eid;
+	uintptr_t *offp;
+	u_int32_t *flagsp, type;
+{
+	int ret;
+
+	memset(bulkp, 0, sizeof(REP_BULK));
+	*offp = *flagsp = 0;
+	bulkp->len = MEGABYTE;
+	if ((ret = __os_malloc(dbenv, bulkp->len, &bulkp->addr)) != 0)
+		return (ret);
+	bulkp->offp = offp;
+	bulkp->type = type;
+	bulkp->eid = eid;
+	bulkp->flagsp = flagsp;
+	return (ret);
+}
+
+/*
+ * __rep_bulk_free --
+ *	This function sends the remainder of the bulk buffer and frees it.
+ *
+ * PUBLIC: int __rep_bulk_free __P((DB_ENV *, REP_BULK *, u_int32_t));
+ */
+int
+__rep_bulk_free(dbenv, bulkp, flags)
+	DB_ENV *dbenv;
+	REP_BULK *bulkp;
+	u_int32_t flags;
+{
+	DB_REP *db_rep;
+	int ret;
+
+	db_rep = dbenv->rep_handle;
+
+	MUTEX_LOCK(dbenv, db_rep->region->mtx_clientdb);
+	ret = __rep_send_bulk(dbenv, bulkp, flags);
+	MUTEX_UNLOCK(dbenv, db_rep->region->mtx_clientdb);
+	__os_free(dbenv, bulkp->addr);
+	return (ret);
+}
+
 /*
  * __rep_send_message --
  *	This is a wrapper for sending a message.  It takes care of constructing
  * the REP_CONTROL structure and calling the user's specified send function.
  *
  * PUBLIC: int __rep_send_message __P((DB_ENV *, int,
- * PUBLIC:     u_int32_t, DB_LSN *, const DBT *, u_int32_t));
+ * PUBLIC:     u_int32_t, DB_LSN *, const DBT *, u_int32_t, u_int32_t));
  */
 int
-__rep_send_message(dbenv, eid, rtype, lsnp, dbtp, flags)
+__rep_send_message(dbenv, eid, rtype, lsnp, dbt, logflags, repflags)
 	DB_ENV *dbenv;
 	int eid;
 	u_int32_t rtype;
 	DB_LSN *lsnp;
-	const DBT *dbtp;
-	u_int32_t flags;
+	const DBT *dbt;
+	u_int32_t logflags, repflags;
 {
 	DB_REP *db_rep;
 	REP *rep;
@@ -86,7 +360,7 @@ __rep_send_message(dbenv, eid, rtype, lsnp, dbtp, flags)
 	else
 		cntrl.lsn = *lsnp;
 	cntrl.rectype = rtype;
-	cntrl.flags = flags;
+	cntrl.flags = logflags;
 	cntrl.rep_version = DB_REPVERSION;
 	cntrl.log_version = DB_LOGVERSION;
 	cntrl.gen = rep->gen;
@@ -96,35 +370,38 @@ __rep_send_message(dbenv, eid, rtype, lsnp, dbtp, flags)
 	cdbt.size = sizeof(cntrl);
 
 	/* Don't assume the send function will be tolerant of NULL records. */
-	if (dbtp == NULL) {
+	if (dbt == NULL) {
 		memset(&scrap_dbt, 0, sizeof(DBT));
-		dbtp = &scrap_dbt;
+		dbt = &scrap_dbt;
 	}
 
 	REP_PRINT_MESSAGE(dbenv, eid, &cntrl, "rep_send_message");
 #ifdef REP_DIAGNOSTIC
-	if (rtype == REP_LOG)
-		__rep_print_logmsg(dbenv, dbtp, lsnp);
+	if (FLD_ISSET(dbenv->verbose, DB_VERB_REPLICATION) && rtype == REP_LOG)
+		__rep_print_logmsg(dbenv, dbt, lsnp);
 #endif
 	/*
-	 * There are three types of records: commit and checkpoint records
+	 * There are several types of records: commit and checkpoint records
 	 * that affect database durability, regular log records that might
 	 * be buffered on the master before being transmitted, and control
 	 * messages which don't require the guarantees of permanency, but
 	 * should not be buffered.
+	 *
+	 * There are request records that can be sent anywhere, and there
+	 * are rerequest records that the app might want to send to the master.
 	 */
-	myflags = 0;
-	if (LF_ISSET(DB_LOG_PERM))
-		myflags = DB_REP_PERMANENT;
-	else if (rtype != REP_LOG || LF_ISSET(DB_LOG_RESEND))
-		myflags = DB_REP_NOBUFFER;
-	if (rtype == REP_LOG && !LF_ISSET(DB_LOG_PERM)) {
+	myflags = repflags;
+	if (FLD_ISSET(logflags, DB_LOG_PERM))
+		myflags |= DB_REP_PERMANENT;
+	else if (rtype != REP_LOG || FLD_ISSET(logflags, DB_LOG_RESEND))
+		myflags |= DB_REP_NOBUFFER;
+	if (rtype == REP_LOG && !FLD_ISSET(logflags, DB_LOG_PERM)) {
 		/*
 		 * Check if this is a log record we just read that
 		 * may need a DB_LOG_PERM.  This is of type REP_LOG,
-		 * so we know that dbtp is a log record.
+		 * so we know that dbt is a log record.
 		 */
-		memcpy(&rectype, dbtp->data, sizeof(rectype));
+		memcpy(&rectype, dbt->data, sizeof(rectype));
 		if (rectype == DB___txn_regop || rectype == DB___txn_ckp)
 			F_SET(&cntrl, DB_LOG_PERM);
 	 }
@@ -134,7 +411,7 @@ __rep_send_message(dbenv, eid, rtype, lsnp, dbtp, flags)
 	 * actual LSN so that they can coordinate with permanent records from
 	 * the client if they want to.
 	 */
-	ret = dbenv->rep_send(dbenv, &cdbt, dbtp, &cntrl.lsn, eid, myflags);
+	ret = dbenv->rep_send(dbenv, &cdbt, dbt, &cntrl.lsn, eid, myflags);
 
 	/*
 	 * We don't hold the rep lock, so this could miscount if we race.
@@ -152,13 +429,11 @@ __rep_send_message(dbenv, eid, rtype, lsnp, dbtp, flags)
 }
 
 #ifdef REP_DIAGNOSTIC
-
 /*
  * __rep_print_logmsg --
  *	This is a debugging routine for printing out log records that
  * we are about to transmit to a client.
  */
-
 static void
 __rep_print_logmsg(dbenv, logdbt, lsnp)
 	DB_ENV *dbenv;
@@ -185,8 +460,8 @@ __rep_print_logmsg(dbenv, logdbt, lsnp)
 	(void)__db_dispatch(dbenv,
 	    ptab, ptabsize, (DBT *)logdbt, lsnp, DB_TXN_PRINT, NULL);
 }
-
 #endif
+
 /*
  * __rep_new_master --
  *	Called after a master election to sync back up with a new master.
@@ -206,25 +481,24 @@ __rep_new_master(dbenv, cntrl, eid)
 	int eid;
 {
 	DB_LOG *dblp;
-	DB_LSN ckp_lsn, lsn;
+	DB_LOGC *logc;
+	DB_LSN first_lsn, lsn;
 	DB_REP *db_rep;
-	DB_TXNMGR *mgr;
-	DB_TXNREGION *region;
+	DBT dbt;
 	LOG *lp;
 	REGENV *renv;
 	REGINFO *infop;
 	REP *rep;
-	int change, do_req, ret;
+	int change, do_req, ret, t_ret;
 #ifdef DIAGNOSTIC
 	DB_MSGBUF mb;
 #endif
 
 	db_rep = dbenv->rep_handle;
-	mgr = dbenv->tx_handle;
-	region = mgr->reginfo.primary;
 	rep = db_rep->region;
 	ret = 0;
-	MUTEX_LOCK(dbenv, db_rep->rep_mutexp);
+	logc = NULL;
+	REP_SYSTEM_LOCK(dbenv);
 	__rep_elect_done(dbenv, rep);
 	change = rep->gen != cntrl->gen || rep->master_id != eid;
 	if (change) {
@@ -239,18 +513,40 @@ __rep_new_master(dbenv, cntrl, eid)
 		rep->master_id = eid;
 		rep->stat.st_master_changes++;
 		rep->stat.st_startup_complete = 0;
+		/*
+		 * If we're delaying client sync-up, we know we have a
+		 * new/changed master now, set flag indicating we are
+		 * actively delaying.
+		 */
+		if (FLD_ISSET(rep->config, REP_C_DELAYCLIENT))
+			F_SET(rep, REP_F_DELAY);
+		/*
+		 * If we are already locking out others, we're either
+		 * in the middle of sync-up recovery or internal init
+		 * when this newmaster comes in (we also lockout in
+		 * rep_start, but we cannot be racing that because we
+		 * don't allow rep_proc_msg when rep_start is going on).
+		 *
+		 * If we were in the middle of an internal initialization
+		 * and we've discovered a new master instead, clean up
+		 * our old internal init information.  We need to clean
+		 * up any flags and unlock our lockout.
+		 */
+		if (rep->in_recovery || F_ISSET(rep, REP_F_READY)) {
+			(void)__rep_init_cleanup(dbenv, rep, DB_FORCE);
+			F_CLR(rep, REP_F_RECOVER_MASK);
+			rep->in_recovery = 0;
+			F_CLR(rep, REP_F_READY);
+		}
 		F_SET(rep, REP_F_NOARCHIVE | REP_F_RECOVER_VERIFY);
 	}
-	MUTEX_UNLOCK(dbenv, db_rep->rep_mutexp);
+	REP_SYSTEM_UNLOCK(dbenv);
 
 	dblp = dbenv->lg_handle;
 	lp = dblp->reginfo.primary;
-	R_LOCK(dbenv, &dblp->reginfo);
+	LOG_SYSTEM_LOCK(dbenv);
 	lsn = lp->lsn;
-	R_UNLOCK(dbenv, &dblp->reginfo);
-	R_LOCK(dbenv, &mgr->reginfo);
-	ckp_lsn = region->last_ckp;
-	R_UNLOCK(dbenv, &mgr->reginfo);
+	LOG_SYSTEM_UNLOCK(dbenv);
 
 	if (!change) {
 		/*
@@ -258,22 +554,25 @@ __rep_new_master(dbenv, cntrl, eid)
 		 * catching up or verification to do.
 		 */
 		ret = 0;
-		MUTEX_LOCK(dbenv, db_rep->db_mutexp);
+		MUTEX_LOCK(dbenv, rep->mtx_clientdb);
 		do_req = __rep_check_doreq(dbenv, rep);
 		if (F_ISSET(rep, REP_F_RECOVER_VERIFY)) {
 			lsn = lp->verify_lsn;
-			MUTEX_UNLOCK(dbenv, db_rep->db_mutexp);
-			if (!IS_ZERO_LSN(lsn) && do_req)
+			MUTEX_UNLOCK(dbenv, rep->mtx_clientdb);
+			if (!F_ISSET(rep, REP_F_DELAY) &&
+			    !IS_ZERO_LSN(lsn) && do_req)
 				(void)__rep_send_message(dbenv, eid,
-				    REP_VERIFY_REQ, &lsn, NULL, 0);
+				    REP_VERIFY_REQ, &lsn, NULL, 0,
+				    DB_REP_ANYWHERE);
 		} else {
-			MUTEX_UNLOCK(dbenv, db_rep->db_mutexp);
+			MUTEX_UNLOCK(dbenv, rep->mtx_clientdb);
 			if (log_compare(&lsn, &cntrl->lsn) < 0 && do_req)
-				(void)__rep_send_message(dbenv,
-				    eid, REP_ALL_REQ, &lsn, NULL, 0);
-			MUTEX_LOCK(dbenv, db_rep->rep_mutexp);
+				(void)__rep_send_message(dbenv, eid,
+				    REP_ALL_REQ, &lsn, NULL,
+				    0, DB_REP_ANYWHERE);
+			REP_SYSTEM_LOCK(dbenv);
 			F_CLR(rep, REP_F_NOARCHIVE);
-			MUTEX_UNLOCK(dbenv, db_rep->rep_mutexp);
+			REP_SYSTEM_UNLOCK(dbenv);
 		}
 		return (ret);
 	}
@@ -286,45 +585,119 @@ __rep_new_master(dbenv, cntrl, eid)
 	 * the master is not, then we just need to request all the log
 	 * records from the master.
 	 */
-	if (IS_INIT_LSN(lsn) || IS_ZERO_LSN(lsn) || IS_ZERO_LSN(ckp_lsn)) {
-		/*
-		 * If we don't have a checkpoint, we still might have
-		 * some log records but we're discarding them to sync
-		 * up with the master from the start.  Therefore,
-		 * truncate our log.
-		 */
-		if (IS_ZERO_LSN(ckp_lsn)) {
-			INIT_LSN(lsn);
-			(void)__log_vtruncate(dbenv, &lsn, &ckp_lsn, NULL);
-			infop = dbenv->reginfo;
-			renv = infop->primary;
-			(void)time(&renv->rep_timestamp);
-		}
-
+	if (IS_INIT_LSN(lsn) || IS_ZERO_LSN(lsn)) {
 		/*
 		 * If we have no log, then we have no files to open
 		 * in recovery, but we've opened what we can, which
 		 * is none.  Mark DBREP_OPENFILES here.
 		 */
-		MUTEX_LOCK(dbenv, db_rep->db_mutexp);
+empty:		MUTEX_LOCK(dbenv, rep->mtx_clientdb);
 		F_SET(db_rep, DBREP_OPENFILES);
-		MUTEX_LOCK(dbenv, db_rep->rep_mutexp);
+		ZERO_LSN(lp->verify_lsn);
+		REP_SYSTEM_LOCK(dbenv);
 		F_CLR(rep, REP_F_NOARCHIVE | REP_F_RECOVER_MASK);
-		MUTEX_UNLOCK(dbenv, db_rep->rep_mutexp);
-		MUTEX_UNLOCK(dbenv, db_rep->db_mutexp);
+		REP_SYSTEM_UNLOCK(dbenv);
 
-		if (!IS_INIT_LSN(cntrl->lsn))
-			(void)__rep_send_message(dbenv, rep->master_id,
-			    REP_ALL_REQ, &lsn, NULL, 0);
+		if (!IS_INIT_LSN(cntrl->lsn)) {
+			/*
+			 * We're making an ALL_REQ.  But now that we've
+			 * cleared the flags, we're likely receiving new
+			 * log records from the master, resulting in a gap
+			 * immediately.  So to avoid multiple data streams,
+			 * set the wait_recs value high now to give the master
+			 * a chance to start sending us these records before
+			 * the gap code re-requests the same gap.  Wait_recs
+			 * will get reset once we start receiving these
+			 * records.
+			 */
+			lp->wait_recs = rep->max_gap;
+			MUTEX_UNLOCK(dbenv, rep->mtx_clientdb);
+			/*
+			 * Don't send the ALL_REQ if we're delayed.  But we
+			 * check here, after lp->wait_recs is set up so that
+			 * when the app calls rep_sync, everything is ready
+			 * to go.
+			 */
+			if (!F_ISSET(rep, REP_F_DELAY))
+				(void)__rep_send_message(dbenv, eid,
+				    REP_ALL_REQ, &lsn, NULL,
+				    0, DB_REP_ANYWHERE);
+		} else
+			MUTEX_UNLOCK(dbenv, rep->mtx_clientdb);
 
 		return (DB_REP_NEWMASTER);
 	}
 
-	MUTEX_LOCK(dbenv, db_rep->db_mutexp);
-	lp->verify_lsn = ckp_lsn;
-	MUTEX_UNLOCK(dbenv, db_rep->db_mutexp);
-	(void)__rep_send_message(dbenv,
-	    eid, REP_VERIFY_REQ, &ckp_lsn, NULL, 0);
+	memset(&dbt, 0, sizeof(dbt));
+	/*
+	 * If this client is farther ahead on the log file than the master, see
+	 * if there is any overlap in the logs.  If not, the client is too
+	 * far ahead of the master and we cannot determine they're part of
+	 * the same replication group.
+	 */
+	if (cntrl->lsn.file < lsn.file) {
+		if ((ret = __log_cursor(dbenv, &logc)) != 0)
+			goto err;
+		if ((ret = __log_c_get(logc, &first_lsn, &dbt, DB_FIRST)) != 0)
+			goto err;
+		if (cntrl->lsn.file < first_lsn.file) {
+			__db_err(dbenv,
+    "Client too far ahead of master; unable to join replication group");
+			ret = DB_REP_JOIN_FAILURE;
+			goto err;
+		}
+		ret = __log_c_close(logc);
+		logc = NULL;
+		if (ret != 0)
+			goto err;
+	}
+	if ((ret = __log_cursor(dbenv, &logc)) != 0)
+		goto err;
+	ret = __rep_log_backup(logc, &lsn);
+err:	if (logc != NULL && (t_ret = __log_c_close(logc)) != 0 && ret == 0)
+		ret = t_ret;
+	if (ret == DB_NOTFOUND) {
+		/*
+		 * If we don't have an identification record, we still might
+		 * have some log records but we're discarding them to sync
+		 * up with the master from the start.  Therefore,
+		 * truncate our log and go to the no log case.
+		 */
+		INIT_LSN(lsn);
+		RPRINT(dbenv, rep, (dbenv, &mb,
+		    "No commit or ckp found.  Truncate log."));
+		(void)__log_vtruncate(dbenv, &lsn, &lsn, NULL);
+		infop = dbenv->reginfo;
+		renv = infop->primary;
+		REP_SYSTEM_LOCK(dbenv);
+		(void)time(&renv->rep_timestamp);
+		REP_SYSTEM_UNLOCK(dbenv);
+		goto empty;
+	}
+
+	/*
+	 * If we failed here, we need to clear the flags we may
+	 * have set above because we're not going to be setting
+	 * the verify_lsn.
+	 */
+	if (ret != 0) {
+		REP_SYSTEM_LOCK(dbenv);
+		F_CLR(rep, REP_F_RECOVER_MASK | REP_F_DELAY);
+		REP_SYSTEM_UNLOCK(dbenv);
+		return (ret);
+	}
+
+	/*
+	 * Finally, we have a record to ask for.
+	 */
+	MUTEX_LOCK(dbenv, rep->mtx_clientdb);
+	lp->verify_lsn = lsn;
+	lp->rcvd_recs = 0;
+	lp->wait_recs = rep->request_gap;
+	MUTEX_UNLOCK(dbenv, rep->mtx_clientdb);
+	if (!F_ISSET(rep, REP_F_DELAY))
+		(void)__rep_send_message(dbenv,
+		    eid, REP_VERIFY_REQ, &lsn, NULL, 0, DB_REP_ANYWHERE);
 
 	return (DB_REP_NEWMASTER);
 }
@@ -373,15 +746,14 @@ __rep_noarchive(dbenv)
 	REP *rep;
 	time_t timestamp;
 
-	if (!REP_ON(dbenv))
-		return (0);
-	db_rep = dbenv->rep_handle;
-	rep = db_rep->region;
 	infop = dbenv->reginfo;
 	renv = infop->primary;
 
-	if (F_ISSET(rep, REP_F_NOARCHIVE))
-		return (1);
+	/*
+	 * This is tested before REP_ON below because we always need
+	 * to obey if any replication process has disabled archiving.
+	 * Everything is in the environment region that we need here.
+	 */
 	if (F_ISSET(renv, DB_REGENV_REPLOCKED)) {
 		(void)time(×tamp);
 		TIMESTAMP_CHECK(dbenv, timestamp, renv);
@@ -392,6 +764,13 @@ __rep_noarchive(dbenv)
 		if (F_ISSET(renv, DB_REGENV_REPLOCKED))
 			return (EINVAL);
 	}
+
+	if (!REP_ON(dbenv))
+		return (0);
+	db_rep = dbenv->rep_handle;
+	rep = db_rep->region;
+	if (F_ISSET(rep, REP_F_NOARCHIVE))
+		return (1);
 	return (0);
 }
 
@@ -424,13 +803,13 @@ __rep_send_vote(dbenv, lsnp, nsites, nvotes, pri, tie, egen, eid, vtype)
 	vote_dbt.data = &vi;
 	vote_dbt.size = sizeof(vi);
 
-	(void)__rep_send_message(dbenv, eid, vtype, lsnp, &vote_dbt, 0);
+	(void)__rep_send_message(dbenv, eid, vtype, lsnp, &vote_dbt, 0, 0);
 }
 
 /*
  * __rep_elect_done
  *	Clear all election information for this site.  Assumes the
- *	caller hold rep_mutex.
+ *	caller hold the region mutex.
  *
  * PUBLIC: void __rep_elect_done __P((DB_ENV *, REP *));
  */
@@ -440,6 +819,7 @@ __rep_elect_done(dbenv, rep)
 	REP *rep;
 {
 	int inelect;
+	u_int32_t endsec, endusec;
 #ifdef DIAGNOSTIC
 	DB_MSGBUF mb;
 #else
@@ -449,8 +829,21 @@ __rep_elect_done(dbenv, rep)
 	F_CLR(rep, REP_F_EPHASE1 | REP_F_EPHASE2 | REP_F_TALLY);
 	rep->sites = 0;
 	rep->votes = 0;
-	if (inelect)
+	if (inelect) {
+		if (rep->esec != 0) {
+			__os_clock(dbenv, &endsec, &endusec);
+			__db_difftime(rep->esec, endsec, rep->eusec, endusec,
+			    &rep->stat.st_election_sec,
+			    &rep->stat.st_election_usec);
+			RPRINT(dbenv, rep, (dbenv, &mb,
+			    "Election finished in %u.%06u sec",
+			    rep->stat.st_election_sec,
+			    rep->stat.st_election_usec));
+			rep->esec = 0;
+			rep->eusec = 0;
+		}
 		rep->egen++;
+	}
 	RPRINT(dbenv, rep, (dbenv, &mb,
 	    "Election done; egen %lu", (u_long)rep->egen));
 }
@@ -480,14 +873,14 @@ __rep_grow_sites(dbenv, nsites)
 	 * Allocate either twice the current allocation or nsites,
 	 * whichever is more.
 	 */
-
 	nalloc = 2 * rep->asites;
 	if (nalloc < nsites)
 		nalloc = nsites;
 
 	infop = dbenv->reginfo;
 	renv = infop->primary;
-	MUTEX_LOCK(dbenv, &renv->mutex);
+	MUTEX_LOCK(dbenv, renv->mtx_regenv);
+
 	/*
 	 * We allocate 2 tally regions, one for tallying VOTE1's and
 	 * one for VOTE2's.  Always grow them in tandem, because if we
@@ -528,47 +921,71 @@ __rep_grow_sites(dbenv, nsites)
 			rep->nsites = 0;
 		}
 	}
-	MUTEX_UNLOCK(dbenv, &renv->mutex);
+	MUTEX_UNLOCK(dbenv, renv->mtx_regenv);
 	return (ret);
 }
 
 /*
  * __env_rep_enter --
  *
- *	Check if we are in the middle of replication initialization and/or
+ * Check if we are in the middle of replication initialization and/or
  * recovery, and if so, disallow operations.  If operations are allowed,
  * increment handle-counts, so that we do not start recovery while we
  * are operating in the library.
  *
- * PUBLIC: void __env_rep_enter __P((DB_ENV *));
+ * PUBLIC: int __env_rep_enter __P((DB_ENV *, int));
  */
-void
-__env_rep_enter(dbenv)
+int
+__env_rep_enter(dbenv, checklock)
 	DB_ENV *dbenv;
+	int checklock;
 {
 	DB_REP *db_rep;
+	REGENV *renv;
+	REGINFO *infop;
 	REP *rep;
 	int cnt;
+	time_t	timestamp;
 
-        /* Check if locks have been globally turned off. */
+	/* Check if locks have been globally turned off. */
 	if (F_ISSET(dbenv, DB_ENV_NOLOCKING))
-		return;
+		return (0);
 
 	db_rep = dbenv->rep_handle;
 	rep = db_rep->region;
 
-	MUTEX_LOCK(dbenv, db_rep->rep_mutexp);
+	infop = dbenv->reginfo;
+	renv = infop->primary;
+	if (checklock && F_ISSET(renv, DB_REGENV_REPLOCKED)) {
+		(void)time(×tamp);
+		TIMESTAMP_CHECK(dbenv, timestamp, renv);
+		/*
+		 * Check if we're still locked out after checking
+		 * the timestamp.
+		 */
+		if (F_ISSET(renv, DB_REGENV_REPLOCKED))
+			return (EINVAL);
+	}
+
+	REP_SYSTEM_LOCK(dbenv);
 	for (cnt = 0; rep->in_recovery;) {
-		MUTEX_UNLOCK(dbenv, db_rep->rep_mutexp);
+		REP_SYSTEM_UNLOCK(dbenv);
+		if (FLD_ISSET(rep->config, REP_C_NOWAIT)) {
+			__db_err(dbenv,
+    "Operation locked out.  Waiting for replication recovery to complete");
+			return (DB_REP_LOCKOUT);
+		}
 		__os_sleep(dbenv, 1, 0);
-		MUTEX_LOCK(dbenv, db_rep->rep_mutexp);
+		REP_SYSTEM_LOCK(dbenv);
 		if (++cnt % 60 == 0)
 			__db_err(dbenv,
     "DB_ENV handle waiting %d minutes for replication recovery to complete",
 			    cnt / 60);
 	}
 	rep->handle_cnt++;
-	MUTEX_UNLOCK(dbenv, db_rep->rep_mutexp);
+	REP_SYSTEM_UNLOCK(dbenv);
+
+	return (0);
 }
 
 /*
@@ -576,25 +993,27 @@ __env_rep_enter(dbenv)
  *
  *	Decrement handle count upon routine exit.
  *
- * PUBLIC: void __env_db_rep_exit __P((DB_ENV *));
+ * PUBLIC: int __env_db_rep_exit __P((DB_ENV *));
  */
-void
+int
 __env_db_rep_exit(dbenv)
 	DB_ENV *dbenv;
 {
 	DB_REP *db_rep;
 	REP *rep;
 
-        /* Check if locks have been globally turned off. */
+	/* Check if locks have been globally turned off. */
 	if (F_ISSET(dbenv, DB_ENV_NOLOCKING))
-		return;
+		return (0);
 
 	db_rep = dbenv->rep_handle;
 	rep = db_rep->region;
 
-	MUTEX_LOCK(dbenv, db_rep->rep_mutexp);
+	REP_SYSTEM_LOCK(dbenv);
 	rep->handle_cnt--;
-	MUTEX_UNLOCK(dbenv, db_rep->rep_mutexp);
+	REP_SYSTEM_UNLOCK(dbenv);
+
+	return (0);
 }
 
 /*
@@ -602,8 +1021,12 @@ __env_db_rep_exit(dbenv)
  *	Called in replicated environments to keep track of in-use handles
  * and prevent any concurrent operation during recovery.  If checkgen is
  * non-zero, then we verify that the dbp has the same handle as the env.
+ *
  * If return_now is non-zero, we'll return DB_DEADLOCK immediately, else we'll
- * sleep before returning DB_DEADLOCK.
+ * sleep before returning DB_DEADLOCK.  Without the sleep, it is likely
+ * the application will immediately try again and could reach a retry
+ * limit before replication has a chance to finish.  The sleep increases
+ * the probability that an application retry will succeed.
  *
  * PUBLIC: int __db_rep_enter __P((DB *, int, int, int));
  */
@@ -639,23 +1062,23 @@ __db_rep_enter(dbp, checkgen, checklock, return_now)
 		if (F_ISSET(renv, DB_REGENV_REPLOCKED))
 			return (EINVAL);
 	}
-	MUTEX_LOCK(dbenv, db_rep->rep_mutexp);
+	REP_SYSTEM_LOCK(dbenv);
 	if (F_ISSET(rep, REP_F_READY)) {
-		MUTEX_UNLOCK(dbenv, db_rep->rep_mutexp);
+		REP_SYSTEM_UNLOCK(dbenv);
 		if (!return_now)
 			__os_sleep(dbenv, 5, 0);
 		return (DB_LOCK_DEADLOCK);
 	}
 
 	if (checkgen && dbp->timestamp != renv->rep_timestamp) {
-		MUTEX_UNLOCK(dbenv, db_rep->rep_mutexp);
+		REP_SYSTEM_UNLOCK(dbenv);
 		__db_err(dbenv, "%s %s",
 		    "replication recovery unrolled committed transactions;",
 		    "open DB and DBcursor handles must be closed");
 		return (DB_REP_HANDLE_DEAD);
 	}
 	rep->handle_cnt++;
-	MUTEX_UNLOCK(dbenv, db_rep->rep_mutexp);
+	REP_SYSTEM_UNLOCK(dbenv);
 
 	return (0);
 }
@@ -669,9 +1092,9 @@ __db_rep_enter(dbp, checkgen, checklock, return_now)
  * increment the op_cnt, so that we do not start recovery while we have
  * active operations.
  *
- * PUBLIC: void __op_rep_enter __P((DB_ENV *));
+ * PUBLIC: int __op_rep_enter __P((DB_ENV *));
  */
-void
+int
 __op_rep_enter(dbenv)
 	DB_ENV *dbenv;
 {
@@ -679,25 +1102,33 @@ __op_rep_enter(dbenv)
 	REP *rep;
 	int cnt;
 
-        /* Check if locks have been globally turned off. */
+	/* Check if locks have been globally turned off. */
 	if (F_ISSET(dbenv, DB_ENV_NOLOCKING))
-		return;
+		return (0);
 
 	db_rep = dbenv->rep_handle;
 	rep = db_rep->region;
 
-	MUTEX_LOCK(dbenv, db_rep->rep_mutexp);
+	REP_SYSTEM_LOCK(dbenv);
 	for (cnt = 0; F_ISSET(rep, REP_F_READY);) {
-		MUTEX_UNLOCK(dbenv, db_rep->rep_mutexp);
+		REP_SYSTEM_UNLOCK(dbenv);
+		if (FLD_ISSET(rep->config, REP_C_NOWAIT)) {
+			__db_err(dbenv,
+    "Operation locked out.  Waiting for replication recovery to complete");
+			return (DB_REP_LOCKOUT);
+		}
 		__os_sleep(dbenv, 5, 0);
-		MUTEX_LOCK(dbenv, db_rep->rep_mutexp);
-		if (++cnt % 60 == 0)
+		cnt += 5;
+		REP_SYSTEM_LOCK(dbenv);
+		if (cnt % 60 == 0)
 			__db_err(dbenv,
 	"__op_rep_enter waiting %d minutes for op count to drain",
 			    cnt / 60);
 	}
 	rep->op_cnt++;
-	MUTEX_UNLOCK(dbenv, db_rep->rep_mutexp);
+	REP_SYSTEM_UNLOCK(dbenv);
+
+	return (0);
 }
 
 /*
@@ -706,26 +1137,28 @@ __op_rep_enter(dbenv)
  *	Decrement op count upon transaction commit/abort/discard or
  *	memp_fput.
  *
- * PUBLIC: void __op_rep_exit __P((DB_ENV *));
+ * PUBLIC: int __op_rep_exit __P((DB_ENV *));
  */
-void
+int
 __op_rep_exit(dbenv)
 	DB_ENV *dbenv;
 {
 	DB_REP *db_rep;
 	REP *rep;
 
-        /* Check if locks have been globally turned off. */
+	/* Check if locks have been globally turned off. */
 	if (F_ISSET(dbenv, DB_ENV_NOLOCKING))
-		return;
+		return (0);
 
 	db_rep = dbenv->rep_handle;
 	rep = db_rep->region;
 
-	MUTEX_LOCK(dbenv, db_rep->rep_mutexp);
+	REP_SYSTEM_LOCK(dbenv);
 	DB_ASSERT(rep->op_cnt > 0);
 	rep->op_cnt--;
-	MUTEX_UNLOCK(dbenv, db_rep->rep_mutexp);
+	REP_SYSTEM_UNLOCK(dbenv);
+
+	return (0);
 }
 
 /*
@@ -733,9 +1166,9 @@ __op_rep_exit(dbenv)
  *
  *	Get the generation number from a replicated environment.
  *
- * PUBLIC: void __rep_get_gen __P((DB_ENV *, u_int32_t *));
+ * PUBLIC: int __rep_get_gen __P((DB_ENV *, u_int32_t *));
  */
-void
+int
 __rep_get_gen(dbenv, genp)
 	DB_ENV *dbenv;
 	u_int32_t *genp;
@@ -746,12 +1179,145 @@ __rep_get_gen(dbenv, genp)
 	db_rep = dbenv->rep_handle;
 	rep = db_rep->region;
 
-	MUTEX_LOCK(dbenv, db_rep->rep_mutexp);
+	REP_SYSTEM_LOCK(dbenv);
 	if (rep->recover_gen > rep->gen)
 		*genp = rep->recover_gen;
 	else
 		*genp = rep->gen;
-	MUTEX_UNLOCK(dbenv, db_rep->rep_mutexp);
+	REP_SYSTEM_UNLOCK(dbenv);
+
+	return (0);
+}
+
+/*
+ * __rep_lockout --
+ *	Coordinate with other threads in the library and active txns so
+ *	that we can run single-threaded, for recovery or internal backup.
+ *	Assumes the caller holds the region mutex.
+ *
+ * PUBLIC: int __rep_lockout __P((DB_ENV *, REP *, u_int32_t));
+ */
+int
+__rep_lockout(dbenv, rep, msg_th)
+	DB_ENV *dbenv;
+	REP *rep;
+	u_int32_t msg_th;
+{
+	int wait_cnt;
+
+	/* Phase 1: set REP_F_READY and wait for op_cnt to go to 0. */
+	F_SET(rep, REP_F_READY);
+	for (wait_cnt = 0; rep->op_cnt != 0;) {
+		REP_SYSTEM_UNLOCK(dbenv);
+		__os_sleep(dbenv, 1, 0);
+#if defined(DIAGNOSTIC) || defined(CONFIG_TEST)
+		if (++wait_cnt % 60 == 0)
+			__db_err(dbenv,
+	"Waiting for txn_cnt to run replication recovery/backup for %d minutes",
+			wait_cnt / 60);
+#endif
+		REP_SYSTEM_LOCK(dbenv);
+	}
+
+	/*
+	 * Phase 2: set in_recovery and wait for handle count to go
+	 * to 0 and for the number of threads in __rep_process_message
+	 * to go to 1 (us).
+	 */
+	rep->in_recovery = 1;
+	for (wait_cnt = 0; rep->handle_cnt != 0 || rep->msg_th > msg_th;) {
+		REP_SYSTEM_UNLOCK(dbenv);
+		__os_sleep(dbenv, 1, 0);
+#ifdef DIAGNOSTIC
+		if (++wait_cnt % 60 == 0)
+			__db_err(dbenv,
+"Waiting for handle count to run replication recovery/backup for %d minutes",
+			wait_cnt / 60);
+#endif
+		REP_SYSTEM_LOCK(dbenv);
+	}
+
+	return (0);
+}
+
+/*
+ * __rep_send_throttle -
+ *	Send a record, throttling if necessary.  Callers of this function
+ * will throttle - breaking out of their loop, if the repth->type field
+ * changes from the normal message type to the *_MORE message type.
+ * This function will send the normal type unless throttling gets invoked.
+ * Then it sets the type field and sends the _MORE message.
+ *
+ * PUBLIC: int __rep_send_throttle __P((DB_ENV *, int, REP_THROTTLE *,
+ * PUBLIC:    u_int32_t));
+ */
+int
+__rep_send_throttle(dbenv, eid, repth, flags)
+	DB_ENV *dbenv;
+	int eid;
+	REP_THROTTLE *repth;
+	u_int32_t flags;
+{
+	DB_REP *db_rep;
+	REP *rep;
+	u_int32_t size, typemore;
+	int check_limit;
+
+	check_limit = repth->gbytes != 0 || repth->bytes != 0;
+	/*
+	 * If we only want to do throttle processing and we don't have it
+	 * turned on, return immediately.
+	 */
+	if (!check_limit && LF_ISSET(REP_THROTTLE_ONLY))
+		return (0);
+
+	db_rep = dbenv->rep_handle;
+	rep = db_rep->region;
+	typemore = 0;
+	if (repth->type == REP_LOG)
+		typemore = REP_LOG_MORE;
+	if (repth->type == REP_PAGE)
+		typemore = REP_PAGE_MORE;
+	DB_ASSERT(typemore != 0);
+
+	/*
+	 * data_dbt.size is only the size of the log
+	 * record;  it doesn't count the size of the
+	 * control structure. Factor that in as well
+	 * so we're not off by a lot if our log records
+	 * are small.
+	 */
+	size = repth->data_dbt->size + sizeof(REP_CONTROL);
+	if (check_limit) {
+		if (repth->lsn.offset == 28) {
+			repth->type = typemore;
+			goto send;
+		}
+		while (repth->bytes <= size) {
+			if (repth->gbytes > 0) {
+				repth->bytes += GIGABYTE;
+				--(repth->gbytes);
+				continue;
+			}
+			/*
+			 * We don't hold the rep mutex,
+			 * and may miscount.
+			 */
+			rep->stat.st_nthrottles++;
+			repth->type = typemore;
+			goto send;
+		}
+		repth->bytes -= size;
+	}
+	/*
+	 * Always send if it is typemore, otherwise send only if
+	 * REP_THROTTLE_ONLY is not set.
+	 */
+send:	if ((repth->type == typemore || !LF_ISSET(REP_THROTTLE_ONLY)) &&
+	    (__rep_send_message(dbenv, eid, repth->type,
+	    &repth->lsn, repth->data_dbt, DB_LOG_RESEND, 0) != 0))
+		return (1);
+	return (0);
 }
 
 #ifdef DIAGNOSTIC
@@ -778,6 +1344,12 @@ __rep_print_message(dbenv, eid, rp, str)
 	case REP_ALL_REQ:
 		type = "all_req";
 		break;
+	case REP_BULK_LOG:
+		type = "bulk_log";
+		break;
+	case REP_BULK_PAGE:
+		type = "bulk_page";
+		break;
 	case REP_DUPMASTER:
 		type = "dupmaster";
 		break;
@@ -826,6 +1398,9 @@ __rep_print_message(dbenv, eid, rp, str)
 	case REP_PAGE_REQ:
 		type = "page_req";
 		break;
+	case REP_REREQUEST:
+		type = "rerequest";
+		break;
 	case REP_UPDATE:
 		type = "update";
 		break;
diff --git a/storage/bdb/rep/rep_verify.c b/storage/bdb/rep/rep_verify.c
new file mode 100644
index 00000000000..76fa8ae10c4
--- /dev/null
+++ b/storage/bdb/rep/rep_verify.c
@@ -0,0 +1,499 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2004-2005
+ *	Sleepycat Software.  All rights reserved.
+ *
+ * $Id: rep_verify.c,v 12.21 2005/10/19 19:06:37 sue Exp $
+ */
+
+#include "db_config.h"
+
+#ifndef NO_SYSTEM_INCLUDES
+#if TIME_WITH_SYS_TIME
+#include 
+#include 
+#else
+#if HAVE_SYS_TIME_H
+#include 
+#else
+#include 
+#endif
+#endif
+
+#include 
+#include 
+#endif
+
+#include "db_int.h"
+#include "dbinc/db_page.h"
+#include "dbinc/db_am.h"
+#include "dbinc/log.h"
+#include "dbinc/txn.h"
+
+static int __rep_dorecovery __P((DB_ENV *, DB_LSN *, DB_LSN *));
+
+/*
+ * __rep_verify --
+ *	Handle a REP_VERIFY message.
+ *
+ * PUBLIC: int __rep_verify __P((DB_ENV *, REP_CONTROL *, DBT *, int, time_t));
+ */
+int
+__rep_verify(dbenv, rp, rec, eid, savetime)
+	DB_ENV *dbenv;
+	REP_CONTROL *rp;
+	DBT *rec;
+	int eid;
+	time_t savetime;
+{
+	DB_LOG *dblp;
+	DB_LOGC *logc;
+	DB_LSN lsn;
+	DB_REP *db_rep;
+	DBT mylog;
+	LOG *lp;
+	REP *rep;
+	u_int32_t rectype;
+	int match, ret, t_ret;
+
+	ret = 0;
+	db_rep = dbenv->rep_handle;
+	rep = db_rep->region;
+	dblp = dbenv->lg_handle;
+	lp = dblp->reginfo.primary;
+
+	if (IS_ZERO_LSN(lp->verify_lsn))
+		return (ret);
+
+	if ((ret = __log_cursor(dbenv, &logc)) != 0)
+		return (ret);
+	memset(&mylog, 0, sizeof(mylog));
+	if ((ret = __log_c_get(logc, &rp->lsn, &mylog, DB_SET)) != 0)
+		goto err;;
+	match = 0;
+	memcpy(&rectype, mylog.data, sizeof(rectype));
+	if (mylog.size == rec->size &&
+	    memcmp(mylog.data, rec->data, rec->size) == 0)
+		match = 1;
+	/*
+	 * If we don't have a match, backup to the previous
+	 * identification record and try again.
+	 */
+	if (match == 0) {
+		ZERO_LSN(lsn);
+		if ((ret = __rep_log_backup(logc, &lsn)) == 0) {
+			MUTEX_LOCK(dbenv, rep->mtx_clientdb);
+			lp->verify_lsn = lsn;
+			lp->rcvd_recs = 0;
+			lp->wait_recs = rep->request_gap;
+			MUTEX_UNLOCK(dbenv, rep->mtx_clientdb);
+			(void)__rep_send_message(dbenv, eid, REP_VERIFY_REQ,
+			    &lsn, NULL, 0, DB_REP_ANYWHERE);
+		} else if (ret == DB_NOTFOUND) {
+			/*
+			 * We've either run out of records because
+			 * logs have been removed or we've rolled back
+			 * all the way to the beginning.  In the latter
+			 * we don't think these sites were ever part of
+			 * the same environment and we'll say so.
+			 * In the former, request internal backup.
+			 */
+			if (rp->lsn.file == 1) {
+				__db_err(dbenv,
+		"Client was never part of master's environment");
+				ret = DB_REP_JOIN_FAILURE;
+			} else {
+				rep->stat.st_outdated++;
+
+				LOG_SYSTEM_LOCK(dbenv);
+				lsn = lp->lsn;
+				LOG_SYSTEM_UNLOCK(dbenv);
+				REP_SYSTEM_LOCK(dbenv);
+				F_CLR(rep, REP_F_RECOVER_VERIFY);
+				if (FLD_ISSET(rep->config, REP_C_NOAUTOINIT))
+					ret = DB_REP_JOIN_FAILURE;
+				else {
+					F_SET(rep, REP_F_RECOVER_UPDATE);
+					ZERO_LSN(rep->first_lsn);
+				}
+				REP_SYSTEM_UNLOCK(dbenv);
+				if (ret == 0)
+					(void)__rep_send_message(dbenv,
+					    eid, REP_UPDATE_REQ, NULL,
+					    NULL, 0, DB_REP_ANYWHERE);
+			}
+		}
+	} else
+		ret = __rep_verify_match(dbenv, &rp->lsn, savetime);
+
+err:	if ((t_ret = __log_c_close(logc)) != 0 && ret == 0)
+		ret = t_ret;
+	return (ret);
+}
+
+/*
+ * __rep_verify_fail --
+ *	Handle a REP_VERIFY_FAIL message.
+ *
+ * PUBLIC: int __rep_verify_fail __P((DB_ENV *, REP_CONTROL *, int));
+ */
+int
+__rep_verify_fail(dbenv, rp, eid)
+	DB_ENV *dbenv;
+	REP_CONTROL *rp;
+	int eid;
+{
+	DB_LOG *dblp;
+	DB_REP *db_rep;
+	LOG *lp;
+	REP *rep;
+	int ret;
+
+	ret = 0;
+	db_rep = dbenv->rep_handle;
+	rep = db_rep->region;
+	dblp = dbenv->lg_handle;
+	lp = dblp->reginfo.primary;
+
+	/*
+	 * If any recovery flags are set, but not VERIFY,
+	 * then we ignore this message.  We are already
+	 * in the middle of updating.
+	 */
+	if (F_ISSET(rep, REP_F_RECOVER_MASK) &&
+	    !F_ISSET(rep, REP_F_RECOVER_VERIFY))
+		return (0);
+	rep->stat.st_outdated++;
+
+	MUTEX_LOCK(dbenv, rep->mtx_clientdb);
+	REP_SYSTEM_LOCK(dbenv);
+	/*
+	 * We don't want an old or delayed VERIFY_FAIL
+	 * message to throw us into internal initialization
+	 * when we shouldn't be.
+	 *
+	 * Only go into internal initialization if:
+	 * We are set for AUTOINIT mode.
+	 * We are in RECOVER_VERIFY and this LSN == verify_lsn.
+	 * We are not in any RECOVERY and we are expecting
+	 *    an LSN that no longer exists on the master.
+	 * Otherwise, ignore this message.
+	 */
+	if (FLD_ISSET(rep->config, REP_C_NOAUTOINIT) &&
+	    ((F_ISSET(rep, REP_F_RECOVER_VERIFY) &&
+	    log_compare(&rp->lsn, &lp->verify_lsn) == 0) ||
+	    (F_ISSET(rep, REP_F_RECOVER_MASK) == 0 &&
+	    log_compare(&rp->lsn, &lp->ready_lsn) >= 0))) {
+		ret = DB_REP_JOIN_FAILURE;
+		goto unlock;
+	}
+	if (((F_ISSET(rep, REP_F_RECOVER_VERIFY)) &&
+	    log_compare(&rp->lsn, &lp->verify_lsn) == 0) ||
+	    (F_ISSET(rep, REP_F_RECOVER_MASK) == 0 &&
+	    log_compare(&rp->lsn, &lp->ready_lsn) >= 0)) {
+		F_CLR(rep, REP_F_RECOVER_VERIFY);
+		F_SET(rep, REP_F_RECOVER_UPDATE);
+		ZERO_LSN(rep->first_lsn);
+		lp->wait_recs = rep->request_gap;
+		REP_SYSTEM_UNLOCK(dbenv);
+		MUTEX_UNLOCK(dbenv, rep->mtx_clientdb);
+		(void)__rep_send_message(dbenv,
+		    eid, REP_UPDATE_REQ, NULL, NULL, 0, 0);
+	} else {
+unlock:		REP_SYSTEM_UNLOCK(dbenv);
+		MUTEX_UNLOCK(dbenv, rep->mtx_clientdb);
+	}
+	return (ret);
+}
+
+/*
+ * __rep_verify_req --
+ *	Handle a REP_VERIFY_REQ message.
+ *
+ * PUBLIC: int __rep_verify_req __P((DB_ENV *, REP_CONTROL *, int));
+ */
+int
+__rep_verify_req(dbenv, rp, eid)
+	DB_ENV *dbenv;
+	REP_CONTROL *rp;
+	int eid;
+{
+	DB_LOGC *logc;
+	DB_REP *db_rep;
+	DBT *d, data_dbt;
+	REP *rep;
+	u_int32_t type;
+	int old, ret;
+
+	ret = 0;
+	db_rep = dbenv->rep_handle;
+	rep = db_rep->region;
+
+	type = REP_VERIFY;
+	if ((ret = __log_cursor(dbenv, &logc)) != 0)
+		return (ret);
+	d = &data_dbt;
+	memset(d, 0, sizeof(data_dbt));
+	F_SET(logc, DB_LOG_SILENT_ERR);
+	ret = __log_c_get(logc, &rp->lsn, d, DB_SET);
+	/*
+	 * If the LSN was invalid, then we might get a not
+	 * found, we might get an EIO, we could get anything.
+	 * If we get a DB_NOTFOUND, then there is a chance that
+	 * the LSN comes before the first file present in which
+	 * case we need to return a fail so that the client can return
+	 * a DB_OUTDATED.
+	 *
+	 * If we're a client servicing this request and we get a
+	 * NOTFOUND, return it so the caller can rerequest from
+	 * a better source.
+	 */
+	if (ret == DB_NOTFOUND) {
+		if (F_ISSET(rep, REP_F_CLIENT))
+			goto notfound;
+		else if (__log_is_outdated(dbenv, rp->lsn.file, &old) == 0 &&
+		    old != 0)
+			type = REP_VERIFY_FAIL;
+	}
+
+	if (ret != 0)
+		d = NULL;
+
+	(void)__rep_send_message(dbenv, eid, type, &rp->lsn, d, 0, 0);
+notfound:
+	ret = __log_c_close(logc);
+	return (ret);
+}
+
+static int
+__rep_dorecovery(dbenv, lsnp, trunclsnp)
+	DB_ENV *dbenv;
+	DB_LSN *lsnp, *trunclsnp;
+{
+	DB_LSN lsn;
+	DB_REP *db_rep;
+	DBT mylog;
+	DB_LOGC *logc;
+	int ret, t_ret, update;
+	u_int32_t rectype;
+	__txn_regop_args *txnrec;
+
+	db_rep = dbenv->rep_handle;
+
+	/* Figure out if we are backing out any committed transactions. */
+	if ((ret = __log_cursor(dbenv, &logc)) != 0)
+		return (ret);
+
+	memset(&mylog, 0, sizeof(mylog));
+	update = 0;
+	while (update == 0 &&
+	    (ret = __log_c_get(logc, &lsn, &mylog, DB_PREV)) == 0 &&
+	    log_compare(&lsn, lsnp) > 0) {
+		memcpy(&rectype, mylog.data, sizeof(rectype));
+		if (rectype == DB___txn_regop) {
+			if ((ret =
+			    __txn_regop_read(dbenv, mylog.data, &txnrec)) != 0)
+				goto err;
+			if (txnrec->opcode != TXN_ABORT)
+				update = 1;
+			__os_free(dbenv, txnrec);
+		}
+	}
+
+	/*
+	 * If we successfully run recovery, we've opened all the necessary
+	 * files.  We are guaranteed to be single-threaded here, so no mutex
+	 * is necessary.
+	 */
+	if ((ret = __db_apprec(dbenv, lsnp, trunclsnp, update, 0)) == 0)
+		F_SET(db_rep, DBREP_OPENFILES);
+
+err:	if ((t_ret = __log_c_close(logc)) != 0 && ret == 0)
+		ret = t_ret;
+
+	return (ret);
+}
+
+/*
+ * __rep_verify_match --
+ *	We have just received a matching log record during verification.
+ * Figure out if we're going to need to run recovery. If so, wait until
+ * everything else has exited the library.  If not, set up the world
+ * correctly and move forward.
+ *
+ * PUBLIC: int __rep_verify_match __P((DB_ENV *, DB_LSN *, time_t));
+ */
+int
+__rep_verify_match(dbenv, reclsnp, savetime)
+	DB_ENV *dbenv;
+	DB_LSN *reclsnp;
+	time_t savetime;
+{
+	DB_LOG *dblp;
+	DB_LSN trunclsn;
+	DB_REP *db_rep;
+	LOG *lp;
+	REGENV *renv;
+	REGINFO *infop;
+	REP *rep;
+	int done, master, ret;
+	u_int32_t unused;
+
+	dblp = dbenv->lg_handle;
+	db_rep = dbenv->rep_handle;
+	rep = db_rep->region;
+	lp = dblp->reginfo.primary;
+	ret = 0;
+	infop = dbenv->reginfo;
+	renv = infop->primary;
+
+	/*
+	 * Check if the savetime is different than our current time stamp.
+	 * If it is, then we're racing with another thread trying to recover
+	 * and we lost.  We must give up.
+	 */
+	MUTEX_LOCK(dbenv, rep->mtx_clientdb);
+	done = savetime != renv->rep_timestamp;
+	if (done) {
+		MUTEX_UNLOCK(dbenv, rep->mtx_clientdb);
+		return (0);
+	}
+	ZERO_LSN(lp->verify_lsn);
+	MUTEX_UNLOCK(dbenv, rep->mtx_clientdb);
+
+	/*
+	 * Make sure the world hasn't changed while we tried to get
+	 * the lock.  If it hasn't then it's time for us to kick all
+	 * operations out of DB and run recovery.
+	 */
+	REP_SYSTEM_LOCK(dbenv);
+	if (!F_ISSET(rep, REP_F_RECOVER_LOG) &&
+	    (F_ISSET(rep, REP_F_READY) || rep->in_recovery != 0)) {
+		rep->stat.st_msgs_recover++;
+		goto errunlock;
+	}
+
+	if ((ret = __rep_lockout(dbenv, rep, 1)) != 0)
+		goto errunlock;
+
+	/* OK, everyone is out, we can now run recovery. */
+	REP_SYSTEM_UNLOCK(dbenv);
+
+	if ((ret = __rep_dorecovery(dbenv, reclsnp, &trunclsn)) != 0) {
+		REP_SYSTEM_LOCK(dbenv);
+		rep->in_recovery = 0;
+		F_CLR(rep, REP_F_READY);
+		goto errunlock;
+	}
+
+	/*
+	 * The log has been truncated (either directly by us or by __db_apprec)
+	 * We want to make sure we're waiting for the LSN at the new end-of-log,
+	 * not some later point.
+	 */
+	MUTEX_LOCK(dbenv, rep->mtx_clientdb);
+	lp->ready_lsn = trunclsn;
+	ZERO_LSN(lp->waiting_lsn);
+	ZERO_LSN(lp->max_wait_lsn);
+	lp->max_perm_lsn = *reclsnp;
+	lp->wait_recs = 0;
+	lp->rcvd_recs = 0;
+	ZERO_LSN(lp->verify_lsn);
+
+	/*
+	 * Discard any log records we have queued;  we're about to re-request
+	 * them, and can't trust the ones in the queue.  We need to set the
+	 * DB_AM_RECOVER bit in this handle, so that the operation doesn't
+	 * deadlock.
+	 */
+	F_SET(db_rep->rep_db, DB_AM_RECOVER);
+	MUTEX_UNLOCK(dbenv, rep->mtx_clientdb);
+	ret = __db_truncate(db_rep->rep_db, NULL, &unused);
+	MUTEX_LOCK(dbenv, rep->mtx_clientdb);
+	F_CLR(db_rep->rep_db, DB_AM_RECOVER);
+
+	REP_SYSTEM_LOCK(dbenv);
+	rep->stat.st_log_queued = 0;
+	rep->in_recovery = 0;
+	F_CLR(rep, REP_F_NOARCHIVE | REP_F_RECOVER_MASK);
+
+	if (ret != 0)
+		goto errunlock2;
+
+	/*
+	 * If the master_id is invalid, this means that since
+	 * the last record was sent, somebody declared an
+	 * election and we may not have a master to request
+	 * things of.
+	 *
+	 * This is not an error;  when we find a new master,
+	 * we'll re-negotiate where the end of the log is and
+	 * try to bring ourselves up to date again anyway.
+	 *
+	 * !!!
+	 * We cannot assert the election flags though because
+	 * somebody may have declared an election and then
+	 * got an error, thus clearing the election flags
+	 * but we still have an invalid master_id.
+	 */
+	master = rep->master_id;
+	REP_SYSTEM_UNLOCK(dbenv);
+	if (master == DB_EID_INVALID) {
+		MUTEX_UNLOCK(dbenv, rep->mtx_clientdb);
+		ret = 0;
+	} else {
+		/*
+		 * We're making an ALL_REQ.  But now that we've
+		 * cleared the flags, we're likely receiving new
+		 * log records from the master, resulting in a gap
+		 * immediately.  So to avoid multiple data streams,
+		 * set the wait_recs value high now to give the master
+		 * a chance to start sending us these records before
+		 * the gap code re-requests the same gap.  Wait_recs
+		 * will get reset once we start receiving these
+		 * records.
+		 */
+		lp->wait_recs = rep->max_gap;
+		MUTEX_UNLOCK(dbenv, rep->mtx_clientdb);
+		(void)__rep_send_message(dbenv,
+		    master, REP_ALL_REQ, reclsnp, NULL, 0, DB_REP_ANYWHERE);
+	}
+	if (0) {
+errunlock2:	MUTEX_UNLOCK(dbenv, rep->mtx_clientdb);
+errunlock:	REP_SYSTEM_UNLOCK(dbenv);
+	}
+	return (ret);
+}
+
+/*
+ * __rep_log_backup --
+ *
+ * In the verify handshake, we walk backward looking for
+ * identification records.  Those are the only record types
+ * we verify and match on.
+ *
+ * PUBLIC: int __rep_log_backup __P((DB_LOGC *, DB_LSN *));
+ */
+int
+__rep_log_backup(logc, lsn)
+	DB_LOGC *logc;
+	DB_LSN *lsn;
+{
+	DBT mylog;
+	u_int32_t rectype;
+	int ret;
+
+	ret = 0;
+	memset(&mylog, 0, sizeof(mylog));
+	while ((ret = __log_c_get(logc, lsn, &mylog, DB_PREV)) == 0) {
+		/*
+		 * Look at the record type.  Only txn_regop and txn_ckp
+		 * are interesting to us.
+		 */
+		memcpy(&rectype, mylog.data, sizeof(rectype));
+		if (rectype == DB___txn_ckp || rectype == DB___txn_regop)
+			break;
+	}
+	return (ret);
+}
diff --git a/storage/bdb/rpc_client/client.c b/storage/bdb/rpc_client/client.c
deleted file mode 100644
index d96721ece60..00000000000
--- a/storage/bdb/rpc_client/client.c
+++ /dev/null
@@ -1,489 +0,0 @@
-/*-
- * See the file LICENSE for redistribution information.
- *
- * Copyright (c) 1996-2004
- *	Sleepycat Software.  All rights reserved.
- *
- * $Id: client.c,v 1.60 2004/09/21 16:09:54 sue Exp $
- */
-
-#include "db_config.h"
-
-#ifndef NO_SYSTEM_INCLUDES
-#include 
-
-#ifdef HAVE_VXWORKS
-#include 
-#endif
-#include 
-
-#include 
-#include 
-#include 
-#endif
-
-#include "db_server.h"
-
-#include "db_int.h"
-#include "dbinc/db_page.h"
-#include "dbinc/db_am.h"
-#include "dbinc/txn.h"
-#include "dbinc_auto/rpc_client_ext.h"
-
-static int __dbcl_c_destroy __P((DBC *));
-static int __dbcl_txn_close __P((DB_ENV *));
-
-/*
- * __dbcl_envrpcserver --
- *	Initialize an environment's server.
- *
- * PUBLIC: int __dbcl_envrpcserver
- * PUBLIC:     __P((DB_ENV *, void *, const char *, long, long, u_int32_t));
- */
-int
-__dbcl_envrpcserver(dbenv, clnt, host, tsec, ssec, flags)
-	DB_ENV *dbenv;
-	void *clnt;
-	const char *host;
-	long tsec, ssec;
-	u_int32_t flags;
-{
-	CLIENT *cl;
-	struct timeval tp;
-
-	COMPQUIET(flags, 0);
-
-#ifdef HAVE_VXWORKS
-	if (rpcTaskInit() != 0) {
-		__db_err(dbenv, "Could not initialize VxWorks RPC");
-		return (ERROR);
-	}
-#endif
-	if (RPC_ON(dbenv)) {
-		__db_err(dbenv, "Already set an RPC handle");
-		return (EINVAL);
-	}
-	/*
-	 * Only create the client and set its timeout if the user
-	 * did not pass us a client structure to begin with.
-	 */
-	if (clnt == NULL) {
-		if ((cl = clnt_create((char *)host, DB_RPC_SERVERPROG,
-		    DB_RPC_SERVERVERS, "tcp")) == NULL) {
-			__db_err(dbenv, clnt_spcreateerror((char *)host));
-			return (DB_NOSERVER);
-		}
-		if (tsec != 0) {
-			tp.tv_sec = tsec;
-			tp.tv_usec = 0;
-			(void)clnt_control(cl, CLSET_TIMEOUT, (char *)&tp);
-		}
-	} else {
-		cl = (CLIENT *)clnt;
-		F_SET(dbenv, DB_ENV_RPCCLIENT_GIVEN);
-	}
-	dbenv->cl_handle = cl;
-
-	return (__dbcl_env_create(dbenv, ssec));
-}
-
-/*
- * __dbcl_env_close_wrap --
- *	Wrapper function for DB_ENV->close function for clients.
- *	We need a wrapper function to deal with the case where we
- *	either don't call dbenv->open or close gets an error.
- *	We need to release the handle no matter what.
- *
- * PUBLIC: int __dbcl_env_close_wrap
- * PUBLIC:     __P((DB_ENV *, u_int32_t));
- */
-int
-__dbcl_env_close_wrap(dbenv, flags)
-	DB_ENV * dbenv;
-	u_int32_t flags;
-{
-	int ret, t_ret;
-
-	ret = __dbcl_env_close(dbenv, flags);
-	t_ret = __dbcl_refresh(dbenv);
-	if (ret == 0 && t_ret != 0)
-		ret = t_ret;
-	return (ret);
-}
-
-/*
- * __dbcl_env_open_wrap --
- *	Wrapper function for DB_ENV->open function for clients.
- *	We need a wrapper function to deal with DB_USE_ENVIRON* flags
- *	and we don't want to complicate the generated code for env_open.
- *
- * PUBLIC: int __dbcl_env_open_wrap
- * PUBLIC:     __P((DB_ENV *, const char *, u_int32_t, int));
- */
-int
-__dbcl_env_open_wrap(dbenv, home, flags, mode)
-	DB_ENV * dbenv;
-	const char * home;
-	u_int32_t flags;
-	int mode;
-{
-	int ret;
-
-	if (LF_ISSET(DB_THREAD)) {
-		__db_err(dbenv, "DB_THREAD not allowed on RPC clients");
-		return (EINVAL);
-	}
-	if ((ret = __db_home(dbenv, home, flags)) != 0)
-		return (ret);
-	return (__dbcl_env_open(dbenv, dbenv->db_home, flags, mode));
-}
-
-/*
- * __dbcl_db_open_wrap --
- *	Wrapper function for DB->open function for clients.
- *	We need a wrapper function to error on DB_THREAD flag.
- *	and we don't want to complicate the generated code.
- *
- * PUBLIC: int __dbcl_db_open_wrap
- * PUBLIC:     __P((DB *, DB_TXN *, const char *, const char *,
- * PUBLIC:     DBTYPE, u_int32_t, int));
- */
-int
-__dbcl_db_open_wrap(dbp, txnp, name, subdb, type, flags, mode)
-	DB * dbp;
-	DB_TXN * txnp;
-	const char * name;
-	const char * subdb;
-	DBTYPE type;
-	u_int32_t flags;
-	int mode;
-{
-	if (LF_ISSET(DB_THREAD)) {
-		__db_err(dbp->dbenv, "DB_THREAD not allowed on RPC clients");
-		return (EINVAL);
-	}
-	return (__dbcl_db_open(dbp, txnp, name, subdb, type, flags, mode));
-}
-
-/*
- * __dbcl_refresh --
- *	Clean up an environment.
- *
- * PUBLIC: int __dbcl_refresh __P((DB_ENV *));
- */
-int
-__dbcl_refresh(dbenv)
-	DB_ENV *dbenv;
-{
-	CLIENT *cl;
-	int ret;
-
-	cl = (CLIENT *)dbenv->cl_handle;
-
-	ret = 0;
-	if (dbenv->tx_handle != NULL) {
-		/*
-		 * We only need to free up our stuff, the caller
-		 * of this function will call the server who will
-		 * do all the real work.
-		 */
-		ret = __dbcl_txn_close(dbenv);
-		dbenv->tx_handle = NULL;
-	}
-	if (!F_ISSET(dbenv, DB_ENV_RPCCLIENT_GIVEN) && cl != NULL)
-		clnt_destroy(cl);
-	dbenv->cl_handle = NULL;
-	if (dbenv->db_home != NULL) {
-		__os_free(dbenv, dbenv->db_home);
-		dbenv->db_home = NULL;
-	}
-	return (ret);
-}
-
-/*
- * __dbcl_retcopy --
- *	Copy the returned data into the user's DBT, handling allocation flags,
- *	but not DB_DBT_PARTIAL.
- *
- * PUBLIC: int __dbcl_retcopy __P((DB_ENV *, DBT *,
- * PUBLIC:    void *, u_int32_t, void **, u_int32_t *));
- */
-int
-__dbcl_retcopy(dbenv, dbt, data, len, memp, memsize)
-	DB_ENV *dbenv;
-	DBT *dbt;
-	void *data;
-	u_int32_t len;
-	void **memp;
-	u_int32_t *memsize;
-{
-	int ret;
-	u_int32_t orig_flags;
-
-	/*
-	 * The RPC server handles DB_DBT_PARTIAL, so we mask it out here to
-	 * avoid the handling of partials in __db_retcopy.  Check first whether
-	 * the data has actually changed, so we don't try to copy over
-	 * read-only keys, which the RPC server always returns regardless.
-	 */
-	orig_flags = dbt->flags;
-	F_CLR(dbt, DB_DBT_PARTIAL);
-	if (dbt->data != NULL && dbt->size == len &&
-	    memcmp(dbt->data, data, len) == 0)
-		ret = 0;
-	else
-		ret = __db_retcopy(dbenv, dbt, data, len, memp, memsize);
-	dbt->flags = orig_flags;
-	return (ret);
-}
-
-/*
- * __dbcl_txn_close --
- *	Clean up an environment's transactions.
- */
-static int
-__dbcl_txn_close(dbenv)
-	DB_ENV *dbenv;
-{
-	DB_TXN *txnp;
-	DB_TXNMGR *tmgrp;
-	int ret;
-
-	ret = 0;
-	tmgrp = dbenv->tx_handle;
-
-	/*
-	 * This function can only be called once per process (i.e., not
-	 * once per thread), so no synchronization is required.
-	 * Also this function is called *after* the server has been called,
-	 * so the server has already closed/aborted any transactions that
-	 * were open on its side.  We only need to do local cleanup.
-	 */
-	while ((txnp = TAILQ_FIRST(&tmgrp->txn_chain)) != NULL)
-		__dbcl_txn_end(txnp);
-
-	__os_free(dbenv, tmgrp);
-	return (ret);
-
-}
-
-/*
- * __dbcl_txn_end --
- *	Clean up an transaction.
- * RECURSIVE FUNCTION:  Clean up nested transactions.
- *
- * PUBLIC: void __dbcl_txn_end __P((DB_TXN *));
- */
-void
-__dbcl_txn_end(txnp)
-	DB_TXN *txnp;
-{
-	DB_ENV *dbenv;
-	DB_TXN *kids;
-	DB_TXNMGR *mgr;
-
-	mgr = txnp->mgrp;
-	dbenv = mgr->dbenv;
-
-	/*
-	 * First take care of any kids we have
-	 */
-	for (kids = TAILQ_FIRST(&txnp->kids);
-	    kids != NULL;
-	    kids = TAILQ_FIRST(&txnp->kids))
-		__dbcl_txn_end(kids);
-
-	/*
-	 * We are ending this transaction no matter what the parent
-	 * may eventually do, if we have a parent.  All those details
-	 * are taken care of by the server.  We only need to make sure
-	 * that we properly release resources.
-	 */
-	if (txnp->parent != NULL)
-		TAILQ_REMOVE(&txnp->parent->kids, txnp, klinks);
-	TAILQ_REMOVE(&mgr->txn_chain, txnp, links);
-	__os_free(dbenv, txnp);
-}
-
-/*
- * __dbcl_txn_setup --
- *	Setup a client transaction structure.
- *
- * PUBLIC: void __dbcl_txn_setup __P((DB_ENV *, DB_TXN *, DB_TXN *, u_int32_t));
- */
-void
-__dbcl_txn_setup(dbenv, txn, parent, id)
-	DB_ENV *dbenv;
-	DB_TXN *txn;
-	DB_TXN *parent;
-	u_int32_t id;
-{
-	txn->mgrp = dbenv->tx_handle;
-	txn->parent = parent;
-	txn->txnid = id;
-
-	/*
-	 * XXX
-	 * In DB library the txn_chain is protected by the mgrp->mutexp.
-	 * However, that mutex is implemented in the environments shared
-	 * memory region.  The client library does not support all of the
-	 * region - that just get forwarded to the server.  Therefore,
-	 * the chain is unprotected here, but properly protected on the
-	 * server.
-	 */
-	TAILQ_INSERT_TAIL(&txn->mgrp->txn_chain, txn, links);
-
-	TAILQ_INIT(&txn->kids);
-
-	if (parent != NULL)
-		TAILQ_INSERT_HEAD(&parent->kids, txn, klinks);
-
-	txn->abort = __dbcl_txn_abort;
-	txn->commit = __dbcl_txn_commit;
-	txn->discard = __dbcl_txn_discard;
-	txn->id = __txn_id;
-	txn->prepare = __dbcl_txn_prepare;
-	txn->set_timeout = __dbcl_txn_timeout;
-
-	txn->flags = TXN_MALLOC;
-}
-
-/*
- * __dbcl_c_destroy --
- *	Destroy a cursor.
- */
-static int
-__dbcl_c_destroy(dbc)
-	DBC *dbc;
-{
-	DB *dbp;
-
-	dbp = dbc->dbp;
-
-	TAILQ_REMOVE(&dbp->free_queue, dbc, links);
-	/* Discard any memory used to store returned data. */
-	if (dbc->my_rskey.data != NULL)
-		__os_free(dbc->dbp->dbenv, dbc->my_rskey.data);
-	if (dbc->my_rkey.data != NULL)
-		__os_free(dbc->dbp->dbenv, dbc->my_rkey.data);
-	if (dbc->my_rdata.data != NULL)
-		__os_free(dbc->dbp->dbenv, dbc->my_rdata.data);
-	__os_free(NULL, dbc);
-
-	return (0);
-}
-
-/*
- * __dbcl_c_refresh --
- *	Refresh a cursor.  Move it from the active queue to the free queue.
- *
- * PUBLIC: void __dbcl_c_refresh __P((DBC *));
- */
-void
-__dbcl_c_refresh(dbc)
-	DBC *dbc;
-{
-	DB *dbp;
-
-	dbp = dbc->dbp;
-	dbc->flags = 0;
-	dbc->cl_id = 0;
-
-	/*
-	 * If dbp->cursor fails locally, we use a local dbc so that
-	 * we can close it.  In that case, dbp will be NULL.
-	 */
-	if (dbp != NULL) {
-		TAILQ_REMOVE(&dbp->active_queue, dbc, links);
-		TAILQ_INSERT_TAIL(&dbp->free_queue, dbc, links);
-	}
-}
-
-/*
- * __dbcl_c_setup --
- *	Allocate a cursor.
- *
- * PUBLIC: int __dbcl_c_setup __P((u_int, DB *, DBC **));
- */
-int
-__dbcl_c_setup(cl_id, dbp, dbcp)
-	u_int cl_id;
-	DB *dbp;
-	DBC **dbcp;
-{
-	DBC *dbc, tmpdbc;
-	int ret;
-
-	if ((dbc = TAILQ_FIRST(&dbp->free_queue)) != NULL)
-		TAILQ_REMOVE(&dbp->free_queue, dbc, links);
-	else {
-		if ((ret =
-		    __os_calloc(dbp->dbenv, 1, sizeof(DBC), &dbc)) != 0) {
-			/*
-			 * If we die here, set up a tmp dbc to call the
-			 * server to shut down that cursor.
-			 */
-			tmpdbc.dbp = NULL;
-			tmpdbc.cl_id = cl_id;
-			(void)__dbcl_dbc_close(&tmpdbc);
-			return (ret);
-		}
-		dbc->c_close = __dbcl_dbc_close;
-		dbc->c_count = __dbcl_dbc_count;
-		dbc->c_del = __dbcl_dbc_del;
-		dbc->c_dup = __dbcl_dbc_dup;
-		dbc->c_get = __dbcl_dbc_get;
-		dbc->c_pget = __dbcl_dbc_pget;
-		dbc->c_put = __dbcl_dbc_put;
-		dbc->c_am_destroy = __dbcl_c_destroy;
-	}
-	dbc->cl_id = cl_id;
-	dbc->dbp = dbp;
-	TAILQ_INSERT_TAIL(&dbp->active_queue, dbc, links);
-	*dbcp = dbc;
-	return (0);
-}
-
-/*
- * __dbcl_dbclose_common --
- *	Common code for closing/cleaning a dbp.
- *
- * PUBLIC: int __dbcl_dbclose_common __P((DB *));
- */
-int
-__dbcl_dbclose_common(dbp)
-	DB *dbp;
-{
-	int ret, t_ret;
-	DBC *dbc;
-
-	/*
-	 * Go through the active cursors and call the cursor recycle routine,
-	 * which resolves pending operations and moves the cursors onto the
-	 * free list.  Then, walk the free list and call the cursor destroy
-	 * routine.
-	 *
-	 * NOTE:  We do not need to use the join_queue for join cursors.
-	 * See comment in __dbcl_dbjoin_ret.
-	 */
-	ret = 0;
-	while ((dbc = TAILQ_FIRST(&dbp->active_queue)) != NULL)
-		__dbcl_c_refresh(dbc);
-	while ((dbc = TAILQ_FIRST(&dbp->free_queue)) != NULL)
-		if ((t_ret = __dbcl_c_destroy(dbc)) != 0 && ret == 0)
-			ret = t_ret;
-
-	TAILQ_INIT(&dbp->free_queue);
-	TAILQ_INIT(&dbp->active_queue);
-	/* Discard any memory used to store returned data. */
-	if (dbp->my_rskey.data != NULL)
-		__os_free(dbp->dbenv, dbp->my_rskey.data);
-	if (dbp->my_rkey.data != NULL)
-		__os_free(dbp->dbenv, dbp->my_rkey.data);
-	if (dbp->my_rdata.data != NULL)
-		__os_free(dbp->dbenv, dbp->my_rdata.data);
-
-	memset(dbp, CLEAR_BYTE, sizeof(*dbp));
-	__os_free(NULL, dbp);
-	return (ret);
-}
diff --git a/storage/bdb/rpc_client/gen_client_ret.c b/storage/bdb/rpc_client/gen_client_ret.c
deleted file mode 100644
index 7285afc2562..00000000000
--- a/storage/bdb/rpc_client/gen_client_ret.c
+++ /dev/null
@@ -1,801 +0,0 @@
-/*-
- * See the file LICENSE for redistribution information.
- *
- * Copyright (c) 2000-2004
- *	Sleepycat Software.  All rights reserved.
- *
- * $Id: gen_client_ret.c,v 1.69 2004/09/22 16:29:51 bostic Exp $
- */
-
-#include "db_config.h"
-
-#ifndef NO_SYSTEM_INCLUDES
-#include 
-
-#include 
-
-#include 
-#endif
-
-#include "db_server.h"
-
-#include "db_int.h"
-#include "dbinc/db_page.h"
-#include "dbinc/db_am.h"
-#include "dbinc/txn.h"
-#include "dbinc_auto/rpc_client_ext.h"
-
-#define	FREE_IF_CHANGED(dbtp, orig)	do {				\
-	if ((dbtp)->data != NULL && (dbtp)->data != orig) {		\
-		__os_free(dbenv, (dbtp)->data);				\
-		(dbtp)->data = NULL;					\
-	}								\
-} while (0)
-
-/*
- * PUBLIC: int __dbcl_env_create_ret
- * PUBLIC:     __P((DB_ENV *, long, __env_create_reply *));
- */
-int
-__dbcl_env_create_ret(dbenv, timeout, replyp)
-	DB_ENV * dbenv;
-	long timeout;
-	__env_create_reply *replyp;
-{
-
-	COMPQUIET(timeout, 0);
-
-	if (replyp->status != 0)
-		return (replyp->status);
-	dbenv->cl_id = replyp->envcl_id;
-	return (replyp->status);
-}
-
-/*
- * PUBLIC: int __dbcl_env_open_ret __P((DB_ENV *,
- * PUBLIC:     const char *, u_int32_t, int, __env_open_reply *));
- */
-int
-__dbcl_env_open_ret(dbenv, home, flags, mode, replyp)
-	DB_ENV *dbenv;
-	const char *home;
-	u_int32_t flags;
-	int mode;
-	__env_open_reply *replyp;
-{
-	DB_TXNMGR *tmgrp;
-	int ret;
-
-	COMPQUIET(home, NULL);
-	COMPQUIET(mode, 0);
-
-	/*
-	 * If error, return it.
-	 */
-	if (replyp->status != 0)
-		return (replyp->status);
-
-	dbenv->cl_id = replyp->envcl_id;
-	/*
-	 * If the user requested transactions, then we have some
-	 * local client-side setup to do also.
-	 */
-	if (LF_ISSET(DB_INIT_TXN)) {
-		if ((ret = __os_calloc(dbenv,
-		    1, sizeof(DB_TXNMGR), &tmgrp)) != 0)
-			return (ret);
-		TAILQ_INIT(&tmgrp->txn_chain);
-		tmgrp->dbenv = dbenv;
-		dbenv->tx_handle = tmgrp;
-	}
-
-	return (replyp->status);
-}
-
-/*
- * PUBLIC: int __dbcl_env_remove_ret
- * PUBLIC:     __P((DB_ENV *, const char *, u_int32_t, __env_remove_reply *));
- */
-int
-__dbcl_env_remove_ret(dbenv, home, flags, replyp)
-	DB_ENV *dbenv;
-	const char *home;
-	u_int32_t flags;
-	__env_remove_reply *replyp;
-{
-	int ret;
-
-	COMPQUIET(home, NULL);
-	COMPQUIET(flags, 0);
-
-	ret = __dbcl_refresh(dbenv);
-	__os_free(NULL, dbenv);
-	if (replyp->status == 0 && ret != 0)
-		return (ret);
-	else
-		return (replyp->status);
-}
-
-/*
- * PUBLIC: int __dbcl_txn_abort_ret __P((DB_TXN *, __txn_abort_reply *));
- */
-int
-__dbcl_txn_abort_ret(txnp, replyp)
-	DB_TXN *txnp;
-	__txn_abort_reply *replyp;
-{
-	__dbcl_txn_end(txnp);
-	return (replyp->status);
-}
-
-/*
- * PUBLIC: int __dbcl_txn_begin_ret __P((DB_ENV *,
- * PUBLIC:     DB_TXN *, DB_TXN **, u_int32_t, __txn_begin_reply *));
- */
-int
-__dbcl_txn_begin_ret(envp, parent, txnpp, flags, replyp)
-	DB_ENV *envp;
-	DB_TXN *parent, **txnpp;
-	u_int32_t flags;
-	__txn_begin_reply *replyp;
-{
-	DB_TXN *txn;
-	int ret;
-
-	COMPQUIET(flags, 0);
-
-	if (replyp->status != 0)
-		return (replyp->status);
-
-	if ((ret = __os_calloc(envp, 1, sizeof(DB_TXN), &txn)) != 0)
-		return (ret);
-	/*
-	 * !!!
-	 * Cast the txnidcl_id to 32-bits.  We don't want to change the
-	 * size of the txn structure.  But if we're running on 64-bit
-	 * machines, we could overflow.  Ignore for now.
-	 */
-	__dbcl_txn_setup(envp, txn, parent, (u_int32_t)replyp->txnidcl_id);
-	*txnpp = txn;
-	return (replyp->status);
-}
-
-/*
- * PUBLIC: int __dbcl_txn_commit_ret
- * PUBLIC:     __P((DB_TXN *, u_int32_t, __txn_commit_reply *));
- */
-int
-__dbcl_txn_commit_ret(txnp, flags, replyp)
-	DB_TXN *txnp;
-	u_int32_t flags;
-	__txn_commit_reply *replyp;
-{
-	COMPQUIET(flags, 0);
-
-	__dbcl_txn_end(txnp);
-	return (replyp->status);
-}
-
-/*
- * PUBLIC: int __dbcl_txn_discard_ret __P((DB_TXN *, u_int32_t,
- * PUBLIC:      __txn_discard_reply *));
- */
-int
-__dbcl_txn_discard_ret(txnp, flags, replyp)
-	DB_TXN * txnp;
-	u_int32_t flags;
-	__txn_discard_reply *replyp;
-{
-	COMPQUIET(flags, 0);
-
-	__dbcl_txn_end(txnp);
-	return (replyp->status);
-}
-
-/*
- * PUBLIC: int __dbcl_txn_recover_ret __P((DB_ENV *, DB_PREPLIST *, long,
- * PUBLIC:      long *, u_int32_t, __txn_recover_reply *));
- */
-int
-__dbcl_txn_recover_ret(dbenv, preplist, count, retp, flags, replyp)
-	DB_ENV * dbenv;
-	DB_PREPLIST * preplist;
-	long count;
-	long * retp;
-	u_int32_t flags;
-	__txn_recover_reply *replyp;
-{
-	DB_PREPLIST *prep;
-	DB_TXN *txnarray, *txn;
-	u_int32_t i, *txnid;
-	int ret;
-	u_int8_t *gid;
-
-	COMPQUIET(flags, 0);
-	COMPQUIET(count, 0);
-
-	if (replyp->status != 0)
-		return (replyp->status);
-
-	*retp = (long) replyp->retcount;
-
-	if (replyp->retcount == 0)
-		return (replyp->status);
-
-	if ((ret = __os_calloc(dbenv, replyp->retcount, sizeof(DB_TXN),
-	    &txnarray)) != 0)
-		return (ret);
-	/*
-	 * We have a bunch of arrays that need to iterate in
-	 * lockstep with each other.
-	 */
-	i = 0;
-	txn = txnarray;
-	txnid = (u_int32_t *)replyp->txn.txn_val;
-	gid = (u_int8_t *)replyp->gid.gid_val;
-	prep = preplist;
-	while (i++ < replyp->retcount) {
-		__dbcl_txn_setup(dbenv, txn, NULL, *txnid);
-		prep->txn = txn;
-		memcpy(prep->gid, gid, DB_XIDDATASIZE);
-		/*
-		 * Now increment all our array pointers.
-		 */
-		txn++;
-		gid += DB_XIDDATASIZE;
-		txnid++;
-		prep++;
-	}
-
-	return (0);
-}
-
-/*
- * PUBLIC: int __dbcl_db_close_ret __P((DB *, u_int32_t, __db_close_reply *));
- */
-int
-__dbcl_db_close_ret(dbp, flags, replyp)
-	DB *dbp;
-	u_int32_t flags;
-	__db_close_reply *replyp;
-{
-	int ret;
-
-	COMPQUIET(flags, 0);
-
-	ret = __dbcl_dbclose_common(dbp);
-
-	if (replyp->status != 0)
-		return (replyp->status);
-	else
-		return (ret);
-}
-
-/*
- * PUBLIC: int __dbcl_db_create_ret
- * PUBLIC:     __P((DB *, DB_ENV *, u_int32_t, __db_create_reply *));
- */
-int
-__dbcl_db_create_ret(dbp, dbenv, flags, replyp)
-	DB * dbp;
-	DB_ENV * dbenv;
-	u_int32_t flags;
-	__db_create_reply *replyp;
-{
-	COMPQUIET(dbenv, NULL);
-	COMPQUIET(flags, 0);
-
-	if (replyp->status != 0)
-		return (replyp->status);
-	dbp->cl_id = replyp->dbcl_id;
-	return (replyp->status);
-}
-
-/*
- * PUBLIC: int __dbcl_db_get_ret
- * PUBLIC:     __P((DB *, DB_TXN *, DBT *, DBT *, u_int32_t, __db_get_reply *));
- */
-int
-__dbcl_db_get_ret(dbp, txnp, key, data, flags, replyp)
-	DB *dbp;
-	DB_TXN *txnp;
-	DBT *key, *data;
-	u_int32_t flags;
-	__db_get_reply *replyp;
-{
-	DB_ENV *dbenv;
-	int ret;
-	void *oldkey;
-
-	COMPQUIET(txnp, NULL);
-	COMPQUIET(flags, 0);
-
-	ret = 0;
-	if (replyp->status != 0)
-		return (replyp->status);
-
-	dbenv = dbp->dbenv;
-
-	oldkey = key->data;
-	ret = __dbcl_retcopy(dbenv, key, replyp->keydata.keydata_val,
-	    replyp->keydata.keydata_len, &dbp->my_rkey.data,
-	    &dbp->my_rkey.ulen);
-	if (ret)
-		return (ret);
-	ret = __dbcl_retcopy(dbenv, data, replyp->datadata.datadata_val,
-	    replyp->datadata.datadata_len, &dbp->my_rdata.data,
-	    &dbp->my_rdata.ulen);
-	/*
-	 * If an error on copying 'data' and we allocated for 'key'
-	 * free it before returning the error.
-	 */
-	if (ret)
-		FREE_IF_CHANGED(key, oldkey);
-	return (ret);
-}
-
-/*
- * PUBLIC: int __dbcl_db_key_range_ret __P((DB *, DB_TXN *,
- * PUBLIC:     DBT *, DB_KEY_RANGE *, u_int32_t, __db_key_range_reply *));
- */
-int
-__dbcl_db_key_range_ret(dbp, txnp, key, range, flags, replyp)
-	DB *dbp;
-	DB_TXN *txnp;
-	DBT *key;
-	DB_KEY_RANGE *range;
-	u_int32_t flags;
-	__db_key_range_reply *replyp;
-{
-	COMPQUIET(dbp, NULL);
-	COMPQUIET(txnp, NULL);
-	COMPQUIET(key, NULL);
-	COMPQUIET(flags, 0);
-
-	if (replyp->status != 0)
-		return (replyp->status);
-	range->less = replyp->less;
-	range->equal = replyp->equal;
-	range->greater = replyp->greater;
-	return (replyp->status);
-}
-
-/*
- * PUBLIC: int __dbcl_db_open_ret __P((DB *, DB_TXN *, const char *,
- * PUBLIC:     const char *, DBTYPE, u_int32_t, int, __db_open_reply *));
- */
-int
-__dbcl_db_open_ret(dbp, txn, name, subdb, type, flags, mode, replyp)
-	DB *dbp;
-	DB_TXN *txn;
-	const char *name, *subdb;
-	DBTYPE type;
-	u_int32_t flags;
-	int mode;
-	__db_open_reply *replyp;
-{
-	COMPQUIET(txn, NULL);
-	COMPQUIET(name, NULL);
-	COMPQUIET(subdb, NULL);
-	COMPQUIET(type, DB_UNKNOWN);
-	COMPQUIET(flags, 0);
-	COMPQUIET(mode, 0);
-
-	if (replyp->status == 0) {
-		dbp->cl_id = replyp->dbcl_id;
-		dbp->type = (DBTYPE)replyp->type;
-		/*
-		 * We get back the database's byteorder on the server.
-		 * Determine if our byteorder is the same or not by
-		 * calling __db_set_lorder.
-		 *
-		 * XXX
-		 * This MUST come before we set the flags because
-		 * __db_set_lorder checks that it is called before
-		 * the open flag is set.
-		 */
-		(void)__db_set_lorder(dbp, replyp->lorder);
-
-		/*
-		 * Explicitly set DB_AM_OPEN_CALLED since open is now
-		 * successfully completed.
-		 */
-		F_SET(dbp, DB_AM_OPEN_CALLED);
-	}
-	return (replyp->status);
-}
-
-/*
- * PUBLIC: int __dbcl_db_pget_ret __P((DB *, DB_TXN *, DBT *, DBT *, DBT *,
- * PUBLIC:      u_int32_t, __db_pget_reply *));
- */
-int
-__dbcl_db_pget_ret(dbp, txnp, skey, pkey, data, flags, replyp)
-	DB * dbp;
-	DB_TXN * txnp;
-	DBT * skey;
-	DBT * pkey;
-	DBT * data;
-	u_int32_t flags;
-	__db_pget_reply *replyp;
-{
-	DB_ENV *dbenv;
-	int ret;
-	void *oldskey, *oldpkey;
-
-	COMPQUIET(txnp, NULL);
-	COMPQUIET(flags, 0);
-
-	ret = 0;
-	if (replyp->status != 0)
-		return (replyp->status);
-
-	dbenv = dbp->dbenv;
-
-	oldskey = skey->data;
-	ret = __dbcl_retcopy(dbenv, skey, replyp->skeydata.skeydata_val,
-	    replyp->skeydata.skeydata_len, &dbp->my_rskey.data,
-	    &dbp->my_rskey.ulen);
-	if (ret)
-		return (ret);
-
-	oldpkey = pkey->data;
-	if ((ret = __dbcl_retcopy(dbenv, pkey, replyp->pkeydata.pkeydata_val,
-	    replyp->pkeydata.pkeydata_len, &dbp->my_rkey.data,
-	    &dbp->my_rkey.ulen)) != 0)
-		goto err;
-	ret = __dbcl_retcopy(dbenv, data, replyp->datadata.datadata_val,
-	    replyp->datadata.datadata_len, &dbp->my_rdata.data,
-	    &dbp->my_rdata.ulen);
-
-	if (ret) {
-err:		FREE_IF_CHANGED(skey, oldskey);
-		FREE_IF_CHANGED(pkey, oldpkey);
-	}
-	return (ret);
-}
-
-/*
- * PUBLIC: int __dbcl_db_put_ret
- * PUBLIC:     __P((DB *, DB_TXN *, DBT *, DBT *, u_int32_t, __db_put_reply *));
- */
-int
-__dbcl_db_put_ret(dbp, txnp, key, data, flags, replyp)
-	DB *dbp;
-	DB_TXN *txnp;
-	DBT *key, *data;
-	u_int32_t flags;
-	__db_put_reply *replyp;
-{
-	int ret;
-
-	COMPQUIET(dbp, NULL);
-	COMPQUIET(txnp, NULL);
-	COMPQUIET(data, NULL);
-
-	ret = replyp->status;
-	if (replyp->status == 0 && (flags == DB_APPEND))
-		*(db_recno_t *)key->data =
-		    *(db_recno_t *)replyp->keydata.keydata_val;
-	return (ret);
-}
-
-/*
- * PUBLIC: int __dbcl_db_remove_ret __P((DB *,
- * PUBLIC:     const char *, const char *, u_int32_t, __db_remove_reply *));
- */
-int
-__dbcl_db_remove_ret(dbp, name, subdb, flags, replyp)
-	DB *dbp;
-	const char *name, *subdb;
-	u_int32_t flags;
-	__db_remove_reply *replyp;
-{
-	int ret;
-
-	COMPQUIET(name, 0);
-	COMPQUIET(subdb, 0);
-	COMPQUIET(flags, 0);
-
-	ret = __dbcl_dbclose_common(dbp);
-
-	if (replyp->status != 0)
-		return (replyp->status);
-	else
-		return (ret);
-}
-
-/*
- * PUBLIC: int __dbcl_db_rename_ret __P((DB *, const char *,
- * PUBLIC:     const char *, const char *, u_int32_t, __db_rename_reply *));
- */
-int
-__dbcl_db_rename_ret(dbp, name, subdb, newname, flags, replyp)
-	DB *dbp;
-	const char *name, *subdb, *newname;
-	u_int32_t flags;
-	__db_rename_reply *replyp;
-{
-	int ret;
-
-	COMPQUIET(name, 0);
-	COMPQUIET(subdb, 0);
-	COMPQUIET(newname, 0);
-	COMPQUIET(flags, 0);
-
-	ret = __dbcl_dbclose_common(dbp);
-
-	if (replyp->status != 0)
-		return (replyp->status);
-	else
-		return (ret);
-}
-
-/*
- * PUBLIC: int __dbcl_db_stat_ret
- * PUBLIC:     __P((DB *, DB_TXN *, void *, u_int32_t, __db_stat_reply *));
- */
-int
-__dbcl_db_stat_ret(dbp, txnp, sp, flags, replyp)
-	DB *dbp;
-	DB_TXN *txnp;
-	void *sp;
-	u_int32_t flags;
-	__db_stat_reply *replyp;
-{
-	size_t len;
-	u_int32_t i, *q, *p, *retsp;
-	int ret;
-
-	COMPQUIET(flags, 0);
-	COMPQUIET(txnp, NULL);
-
-	if (replyp->status != 0 || sp == NULL)
-		return (replyp->status);
-
-	len = replyp->stats.stats_len * sizeof(u_int32_t);
-	if ((ret = __os_umalloc(dbp->dbenv, len, &retsp)) != 0)
-		return (ret);
-	for (i = 0, q = retsp, p = (u_int32_t *)replyp->stats.stats_val;
-	    i < replyp->stats.stats_len; i++, q++, p++)
-		*q = *p;
-	*(u_int32_t **)sp = retsp;
-	return (0);
-}
-
-/*
- * PUBLIC: int __dbcl_db_truncate_ret __P((DB *, DB_TXN *, u_int32_t  *,
- * PUBLIC:      u_int32_t, __db_truncate_reply *));
- */
-int
-__dbcl_db_truncate_ret(dbp, txnp, countp, flags, replyp)
-	DB *dbp;
-	DB_TXN *txnp;
-	u_int32_t *countp, flags;
-	__db_truncate_reply *replyp;
-{
-	COMPQUIET(dbp, NULL);
-	COMPQUIET(txnp, NULL);
-	COMPQUIET(flags, 0);
-
-	if (replyp->status != 0)
-		return (replyp->status);
-	*countp = replyp->count;
-
-	return (replyp->status);
-}
-
-/*
- * PUBLIC: int __dbcl_db_cursor_ret
- * PUBLIC:     __P((DB *, DB_TXN *, DBC **, u_int32_t, __db_cursor_reply *));
- */
-int
-__dbcl_db_cursor_ret(dbp, txnp, dbcp, flags, replyp)
-	DB *dbp;
-	DB_TXN *txnp;
-	DBC **dbcp;
-	u_int32_t flags;
-	__db_cursor_reply *replyp;
-{
-	COMPQUIET(txnp, NULL);
-	COMPQUIET(flags, 0);
-
-	if (replyp->status != 0)
-		return (replyp->status);
-
-	return (__dbcl_c_setup(replyp->dbcidcl_id, dbp, dbcp));
-}
-
-/*
- * PUBLIC: int __dbcl_db_join_ret
- * PUBLIC:     __P((DB *, DBC **, DBC **, u_int32_t, __db_join_reply *));
- */
-int
-__dbcl_db_join_ret(dbp, curs, dbcp, flags, replyp)
-	DB *dbp;
-	DBC **curs, **dbcp;
-	u_int32_t flags;
-	__db_join_reply *replyp;
-{
-	COMPQUIET(curs, NULL);
-	COMPQUIET(flags, 0);
-
-	if (replyp->status != 0)
-		return (replyp->status);
-
-	/*
-	 * We set this up as a normal cursor.  We do not need
-	 * to treat a join cursor any differently than a normal
-	 * cursor, even though DB itself must.  We only need the
-	 * client-side cursor/db relationship to know what cursors
-	 * are open in the db, and to store their ID.  Nothing else.
-	 */
-	return (__dbcl_c_setup(replyp->dbcidcl_id, dbp, dbcp));
-}
-
-/*
- * PUBLIC: int __dbcl_dbc_close_ret __P((DBC *, __dbc_close_reply *));
- */
-int
-__dbcl_dbc_close_ret(dbc, replyp)
-	DBC *dbc;
-	__dbc_close_reply *replyp;
-{
-	__dbcl_c_refresh(dbc);
-	return (replyp->status);
-}
-
-/*
- * PUBLIC: int __dbcl_dbc_count_ret
- * PUBLIC:     __P((DBC *, db_recno_t *, u_int32_t, __dbc_count_reply *));
- */
-int
-__dbcl_dbc_count_ret(dbc, countp, flags, replyp)
-	DBC *dbc;
-	db_recno_t *countp;
-	u_int32_t flags;
-	__dbc_count_reply *replyp;
-{
-	COMPQUIET(dbc, NULL);
-	COMPQUIET(flags, 0);
-
-	if (replyp->status != 0)
-		return (replyp->status);
-	*countp = replyp->dupcount;
-
-	return (replyp->status);
-}
-
-/*
- * PUBLIC: int __dbcl_dbc_dup_ret
- * PUBLIC:     __P((DBC *, DBC **, u_int32_t, __dbc_dup_reply *));
- */
-int
-__dbcl_dbc_dup_ret(dbc, dbcp, flags, replyp)
-	DBC *dbc, **dbcp;
-	u_int32_t flags;
-	__dbc_dup_reply *replyp;
-{
-	COMPQUIET(flags, 0);
-
-	if (replyp->status != 0)
-		return (replyp->status);
-
-	return (__dbcl_c_setup(replyp->dbcidcl_id, dbc->dbp, dbcp));
-}
-
-/*
- * PUBLIC: int __dbcl_dbc_get_ret
- * PUBLIC:     __P((DBC *, DBT *, DBT *, u_int32_t, __dbc_get_reply *));
- */
-int
-__dbcl_dbc_get_ret(dbc, key, data, flags, replyp)
-	DBC *dbc;
-	DBT *key, *data;
-	u_int32_t flags;
-	__dbc_get_reply *replyp;
-{
-	DB_ENV *dbenv;
-	int ret;
-	void *oldkey;
-
-	COMPQUIET(flags, 0);
-
-	ret = 0;
-	if (replyp->status != 0)
-		return (replyp->status);
-
-	dbenv = dbc->dbp->dbenv;
-	oldkey = key->data;
-	ret = __dbcl_retcopy(dbenv, key, replyp->keydata.keydata_val,
-	    replyp->keydata.keydata_len, &dbc->my_rkey.data,
-	    &dbc->my_rkey.ulen);
-	if (ret)
-		return (ret);
-	ret = __dbcl_retcopy(dbenv, data, replyp->datadata.datadata_val,
-	    replyp->datadata.datadata_len, &dbc->my_rdata.data,
-	    &dbc->my_rdata.ulen);
-
-	/*
-	 * If an error on copying 'data' and we allocated for 'key'
-	 * free it before returning the error.
-	 */
-	if (ret)
-		FREE_IF_CHANGED(key, oldkey);
-	return (ret);
-}
-
-/*
- * PUBLIC: int __dbcl_dbc_pget_ret __P((DBC *, DBT *, DBT *, DBT *, u_int32_t,
- * PUBLIC:      __dbc_pget_reply *));
- */
-int
-__dbcl_dbc_pget_ret(dbc, skey, pkey, data, flags, replyp)
-	DBC * dbc;
-	DBT * skey;
-	DBT * pkey;
-	DBT * data;
-	u_int32_t flags;
-	__dbc_pget_reply *replyp;
-{
-	DB_ENV *dbenv;
-	int ret;
-	void *oldskey, *oldpkey;
-
-	COMPQUIET(flags, 0);
-
-	ret = 0;
-	if (replyp->status != 0)
-		return (replyp->status);
-
-	dbenv = dbc->dbp->dbenv;
-
-	oldskey = skey->data;
-	ret = __dbcl_retcopy(dbenv, skey, replyp->skeydata.skeydata_val,
-	    replyp->skeydata.skeydata_len, &dbc->my_rskey.data,
-	    &dbc->my_rskey.ulen);
-	if (ret)
-		return (ret);
-
-	oldpkey = pkey->data;
-	if ((ret = __dbcl_retcopy(dbenv, pkey, replyp->pkeydata.pkeydata_val,
-	    replyp->pkeydata.pkeydata_len, &dbc->my_rkey.data,
-	    &dbc->my_rkey.ulen)) != 0)
-		goto err;
-	ret = __dbcl_retcopy(dbenv, data, replyp->datadata.datadata_val,
-	    replyp->datadata.datadata_len, &dbc->my_rdata.data,
-	    &dbc->my_rdata.ulen);
-
-	/*
-	 * If an error on copying 'data' and we allocated for '*key'
-	 * free it before returning the error.
-	 */
-	if (ret) {
-err:		FREE_IF_CHANGED(skey, oldskey);
-		FREE_IF_CHANGED(pkey, oldpkey);
-	}
-	return (ret);
-}
-
-/*
- * PUBLIC: int __dbcl_dbc_put_ret
- * PUBLIC:     __P((DBC *, DBT *, DBT *, u_int32_t, __dbc_put_reply *));
- */
-int
-__dbcl_dbc_put_ret(dbc, key, data, flags, replyp)
-	DBC *dbc;
-	DBT *key, *data;
-	u_int32_t flags;
-	__dbc_put_reply *replyp;
-{
-	COMPQUIET(data, NULL);
-
-	if (replyp->status != 0)
-		return (replyp->status);
-
-	if (replyp->status == 0 && dbc->dbp->type == DB_RECNO &&
-	    (flags == DB_AFTER || flags == DB_BEFORE))
-		*(db_recno_t *)key->data =
-		    *(db_recno_t *)replyp->keydata.keydata_val;
-	return (replyp->status);
-}
diff --git a/storage/bdb/rpc_server/c/db_server_proc.c.in b/storage/bdb/rpc_server/c/db_server_proc.c.in
deleted file mode 100644
index d5d1f49508a..00000000000
--- a/storage/bdb/rpc_server/c/db_server_proc.c.in
+++ /dev/null
@@ -1,2500 +0,0 @@
-/*-
- * See the file LICENSE for redistribution information.
- *
- * Copyright (c) 2000-2002
- *      Sleepycat Software.  All rights reserved.
- */
-
-#include "db_config.h"
-
-#ifdef HAVE_RPC
-#ifndef lint
-static const char revid[] = "$Id: db_server_proc.c,v 1.92 2002/07/29 15:21:20 sue Exp $";
-#endif /* not lint */
-
-#ifndef NO_SYSTEM_INCLUDES
-#include 
-
-#include 
-
-#include 
-#endif
-#include "dbinc_auto/db_server.h"
-
-#include "db_int.h"
-#include "dbinc/db_server_int.h"
-#include "dbinc_auto/rpc_server_ext.h"
-
-/* BEGIN __env_cachesize_proc */
-/*
- * PUBLIC: void __env_cachesize_proc __P((long, u_int32_t, u_int32_t,
- * PUBLIC:      u_int32_t, __env_cachesize_reply *));
- */
-void
-__env_cachesize_proc(dbenvcl_id, gbytes, bytes,
-		ncache, replyp)
-	long dbenvcl_id;
-	u_int32_t gbytes;
-	u_int32_t bytes;
-	u_int32_t ncache;
-	__env_cachesize_reply *replyp;
-/* END __env_cachesize_proc */
-{
-	DB_ENV *dbenv;
-	ct_entry *dbenv_ctp;
-	int ret;
-
-	ACTIVATE_CTP(dbenv_ctp, dbenvcl_id, CT_ENV);
-	dbenv = (DB_ENV *)dbenv_ctp->ct_anyp;
-
-	ret = dbenv->set_cachesize(dbenv, gbytes, bytes, ncache);
-
-	replyp->status = ret;
-	return;
-}
-
-/* BEGIN __env_close_proc */
-/*
- * PUBLIC: void __env_close_proc __P((long, u_int32_t, __env_close_reply *));
- */
-void
-__env_close_proc(dbenvcl_id, flags, replyp)
-	long dbenvcl_id;
-	u_int32_t flags;
-	__env_close_reply *replyp;
-/* END __env_close_proc */
-{
-	ct_entry *dbenv_ctp;
-
-	ACTIVATE_CTP(dbenv_ctp, dbenvcl_id, CT_ENV);
-	replyp->status = __dbenv_close_int(dbenvcl_id, flags, 0);
-	return;
-}
-
-/* BEGIN __env_create_proc */
-/*
- * PUBLIC: void __env_create_proc __P((u_int32_t, __env_create_reply *));
- */
-void
-__env_create_proc(timeout, replyp)
-	u_int32_t timeout;
-	__env_create_reply *replyp;
-/* END __env_create_proc */
-{
-	DB_ENV *dbenv;
-	ct_entry *ctp;
-	int ret;
-
-	ctp = new_ct_ent(&replyp->status);
-	if (ctp == NULL)
-		return;
-	if ((ret = db_env_create(&dbenv, 0)) == 0) {
-		ctp->ct_envp = dbenv;
-		ctp->ct_type = CT_ENV;
-		ctp->ct_parent = NULL;
-		ctp->ct_envparent = ctp;
-		__dbsrv_settimeout(ctp, timeout);
-		__dbsrv_active(ctp);
-		replyp->envcl_id = ctp->ct_id;
-	} else
-		__dbclear_ctp(ctp);
-
-	replyp->status = ret;
-	return;
-}
-
-/* BEGIN __env_dbremove_proc */
-/*
- * PUBLIC: void __env_dbremove_proc __P((long, long, char *, char *, u_int32_t,
- * PUBLIC:      __env_dbremove_reply *));
- */
-void
-__env_dbremove_proc(dbenvcl_id, txnpcl_id, name,
-		subdb, flags, replyp)
-	long dbenvcl_id;
-	long txnpcl_id;
-	char *name;
-	char *subdb;
-	u_int32_t flags;
-	__env_dbremove_reply *replyp;
-/* END __env_dbremove_proc */
-{
-	int ret;
-	DB_ENV * dbenv;
-	ct_entry *dbenv_ctp;
-	DB_TXN * txnp;
-	ct_entry *txnp_ctp;
-
-	ACTIVATE_CTP(dbenv_ctp, dbenvcl_id, CT_ENV);
-	dbenv = (DB_ENV *)dbenv_ctp->ct_anyp;
-
-	if (txnpcl_id != 0) {
-		ACTIVATE_CTP(txnp_ctp, txnpcl_id, CT_TXN);
-		txnp = (DB_TXN *)txnp_ctp->ct_anyp;
-	} else
-		txnp = NULL;
-
-	ret = dbenv->dbremove(dbenv, txnp, name, subdb, flags);
-
-	replyp->status = ret;
-	return;
-}
-
-/* BEGIN __env_dbrename_proc */
-/*
- * PUBLIC: void __env_dbrename_proc __P((long, long, char *, char *, char *,
- * PUBLIC:      u_int32_t, __env_dbrename_reply *));
- */
-void
-__env_dbrename_proc(dbenvcl_id, txnpcl_id, name,
-		subdb, newname, flags, replyp)
-	long dbenvcl_id;
-	long txnpcl_id;
-	char *name;
-	char *subdb;
-	char *newname;
-	u_int32_t flags;
-	__env_dbrename_reply *replyp;
-/* END __env_dbrename_proc */
-{
-	int ret;
-	DB_ENV * dbenv;
-	ct_entry *dbenv_ctp;
-	DB_TXN * txnp;
-	ct_entry *txnp_ctp;
-
-	ACTIVATE_CTP(dbenv_ctp, dbenvcl_id, CT_ENV);
-	dbenv = (DB_ENV *)dbenv_ctp->ct_anyp;
-
-	if (txnpcl_id != 0) {
-		ACTIVATE_CTP(txnp_ctp, txnpcl_id, CT_TXN);
-		txnp = (DB_TXN *)txnp_ctp->ct_anyp;
-	} else
-		txnp = NULL;
-
-	ret = dbenv->dbrename(dbenv, txnp, name, subdb, newname, flags);
-
-	replyp->status = ret;
-	return;
-}
-
-/* BEGIN __env_encrypt_proc */
-/*
- * PUBLIC: void __env_encrypt_proc __P((long, char *, u_int32_t,
- * PUBLIC:      __env_encrypt_reply *));
- */
-void
-__env_encrypt_proc(dbenvcl_id, passwd, flags, replyp)
-	long dbenvcl_id;
-	char *passwd;
-	u_int32_t flags;
-	__env_encrypt_reply *replyp;
-/* END __env_encrypt_proc */
-{
-	int ret;
-	DB_ENV * dbenv;
-	ct_entry *dbenv_ctp;
-
-	ACTIVATE_CTP(dbenv_ctp, dbenvcl_id, CT_ENV);
-	dbenv = (DB_ENV *)dbenv_ctp->ct_anyp;
-
-	ret = dbenv->set_encrypt(dbenv, passwd, flags);
-
-	replyp->status = ret;
-	return;
-}
-
-/* BEGIN __env_flags_proc */
-/*
- * PUBLIC: void __env_flags_proc __P((long, u_int32_t, u_int32_t,
- * PUBLIC:      __env_flags_reply *));
- */
-void
-__env_flags_proc(dbenvcl_id, flags, onoff, replyp)
-	long dbenvcl_id;
-	u_int32_t flags;
-	u_int32_t onoff;
-	__env_flags_reply *replyp;
-/* END __env_flags_proc */
-{
-	DB_ENV *dbenv;
-	ct_entry *dbenv_ctp;
-	int ret;
-
-	ACTIVATE_CTP(dbenv_ctp, dbenvcl_id, CT_ENV);
-	dbenv = (DB_ENV *)dbenv_ctp->ct_anyp;
-
-	ret = dbenv->set_flags(dbenv, flags, onoff);
-	if (onoff)
-		dbenv_ctp->ct_envdp.onflags = flags;
-	else
-		dbenv_ctp->ct_envdp.offflags = flags;
-
-	replyp->status = ret;
-	return;
-}
-/* BEGIN __env_open_proc */
-/*
- * PUBLIC: void __env_open_proc __P((long, char *, u_int32_t, u_int32_t,
- * PUBLIC:      __env_open_reply *));
- */
-void
-__env_open_proc(dbenvcl_id, home, flags,
-		mode, replyp)
-	long dbenvcl_id;
-	char *home;
-	u_int32_t flags;
-	u_int32_t mode;
-	__env_open_reply *replyp;
-/* END __env_open_proc */
-{
-	DB_ENV *dbenv;
-	ct_entry *dbenv_ctp, *new_ctp;
-	u_int32_t newflags, shareflags;
-	int ret;
-	home_entry *fullhome;
-
-	ACTIVATE_CTP(dbenv_ctp, dbenvcl_id, CT_ENV);
-	dbenv = (DB_ENV *)dbenv_ctp->ct_anyp;
-	fullhome = get_home(home);
-	if (fullhome == NULL) {
-		ret = DB_NOSERVER_HOME;
-		goto out;
-	}
-
-	/*
-	 * If they are using locking do deadlock detection for them,
-	 * internally.
-	 */
-	if ((flags & DB_INIT_LOCK) &&
-	    (ret = dbenv->set_lk_detect(dbenv, DB_LOCK_DEFAULT)) != 0)
-		goto out;
-
-	if (__dbsrv_verbose) {
-		dbenv->set_errfile(dbenv, stderr);
-		dbenv->set_errpfx(dbenv, fullhome->home);
-	}
-
-	/*
-	 * Mask off flags we ignore
-	 */
-	newflags = (flags & ~DB_SERVER_FLAGMASK);
-	shareflags = (newflags & DB_SERVER_ENVFLAGS);
-	/*
-	 * Check now whether we can share a handle for this env.
-	 */
-	replyp->envcl_id = dbenvcl_id;
-	if ((new_ctp = __dbsrv_shareenv(dbenv_ctp, fullhome, shareflags))
-	    != NULL) {
-		/*
-		 * We can share, clean up old  ID, set new one.
-		 */
-		if (__dbsrv_verbose)
-			printf("Sharing env ID %ld\n", new_ctp->ct_id);
-		replyp->envcl_id = new_ctp->ct_id;
-		ret = __dbenv_close_int(dbenvcl_id, 0, 0);
-	} else {
-		ret = dbenv->open(dbenv, fullhome->home, newflags, mode);
-		dbenv_ctp->ct_envdp.home = fullhome;
-		dbenv_ctp->ct_envdp.envflags = shareflags;
-	}
-out:	replyp->status = ret;
-	return;
-}
-
-/* BEGIN __env_remove_proc */
-/*
- * PUBLIC: void __env_remove_proc __P((long, char *, u_int32_t,
- * PUBLIC:      __env_remove_reply *));
- */
-void
-__env_remove_proc(dbenvcl_id, home, flags, replyp)
-	long dbenvcl_id;
-	char *home;
-	u_int32_t flags;
-	__env_remove_reply *replyp;
-/* END __env_remove_proc */
-{
-	DB_ENV *dbenv;
-	ct_entry *dbenv_ctp;
-	int ret;
-	home_entry *fullhome;
-
-	ACTIVATE_CTP(dbenv_ctp, dbenvcl_id, CT_ENV);
-	dbenv = (DB_ENV *)dbenv_ctp->ct_anyp;
-
-	fullhome = get_home(home);
-	if (fullhome == NULL) {
-		replyp->status = DB_NOSERVER_HOME;
-		return;
-	}
-
-	ret = dbenv->remove(dbenv, fullhome->home, flags);
-	__dbdel_ctp(dbenv_ctp);
-	replyp->status = ret;
-	return;
-}
-
-/* BEGIN __txn_abort_proc */
-/*
- * PUBLIC: void __txn_abort_proc __P((long, __txn_abort_reply *));
- */
-void
-__txn_abort_proc(txnpcl_id, replyp)
-	long txnpcl_id;
-	__txn_abort_reply *replyp;
-/* END __txn_abort_proc */
-{
-	DB_TXN *txnp;
-	ct_entry *txnp_ctp;
-	int ret;
-
-	ACTIVATE_CTP(txnp_ctp, txnpcl_id, CT_TXN);
-	txnp = (DB_TXN *)txnp_ctp->ct_anyp;
-
-	ret = txnp->abort(txnp);
-	__dbdel_ctp(txnp_ctp);
-	replyp->status = ret;
-	return;
-}
-
-/* BEGIN __txn_begin_proc */
-/*
- * PUBLIC: void __txn_begin_proc __P((long, long, u_int32_t,
- * PUBLIC:      __txn_begin_reply *));
- */
-void
-__txn_begin_proc(dbenvcl_id, parentcl_id,
-		flags, replyp)
-	long dbenvcl_id;
-	long parentcl_id;
-	u_int32_t flags;
-	__txn_begin_reply *replyp;
-/* END __txn_begin_proc */
-{
-	DB_ENV *dbenv;
-	DB_TXN *parent, *txnp;
-	ct_entry *ctp, *dbenv_ctp, *parent_ctp;
-	int ret;
-
-	ACTIVATE_CTP(dbenv_ctp, dbenvcl_id, CT_ENV);
-	dbenv = (DB_ENV *)dbenv_ctp->ct_anyp;
-	parent_ctp = NULL;
-
-	ctp = new_ct_ent(&replyp->status);
-	if (ctp == NULL)
-		return;
-
-	if (parentcl_id != 0) {
-		ACTIVATE_CTP(parent_ctp, parentcl_id, CT_TXN);
-		parent = (DB_TXN *)parent_ctp->ct_anyp;
-		ctp->ct_activep = parent_ctp->ct_activep;
-	} else
-		parent = NULL;
-
-	ret = dbenv->txn_begin(dbenv, parent, &txnp, flags);
-	if (ret == 0) {
-		ctp->ct_txnp = txnp;
-		ctp->ct_type = CT_TXN;
-		ctp->ct_parent = parent_ctp;
-		ctp->ct_envparent = dbenv_ctp;
-		replyp->txnidcl_id = ctp->ct_id;
-		__dbsrv_settimeout(ctp, dbenv_ctp->ct_timeout);
-		__dbsrv_active(ctp);
-	} else
-		__dbclear_ctp(ctp);
-
-	replyp->status = ret;
-	return;
-}
-
-/* BEGIN __txn_commit_proc */
-/*
- * PUBLIC: void __txn_commit_proc __P((long, u_int32_t,
- * PUBLIC:      __txn_commit_reply *));
- */
-void
-__txn_commit_proc(txnpcl_id, flags, replyp)
-	long txnpcl_id;
-	u_int32_t flags;
-	__txn_commit_reply *replyp;
-/* END __txn_commit_proc */
-{
-	DB_TXN *txnp;
-	ct_entry *txnp_ctp;
-	int ret;
-
-	ACTIVATE_CTP(txnp_ctp, txnpcl_id, CT_TXN);
-	txnp = (DB_TXN *)txnp_ctp->ct_anyp;
-
-	ret = txnp->commit(txnp, flags);
-	__dbdel_ctp(txnp_ctp);
-
-	replyp->status = ret;
-	return;
-}
-
-/* BEGIN __txn_discard_proc */
-/*
- * PUBLIC: void __txn_discard_proc __P((long, u_int32_t,
- * PUBLIC:      __txn_discard_reply *));
- */
-void
-__txn_discard_proc(txnpcl_id, flags, replyp)
-	long txnpcl_id;
-	u_int32_t flags;
-	__txn_discard_reply *replyp;
-/* END __txn_discard_proc */
-{
-	DB_TXN *txnp;
-	ct_entry *txnp_ctp;
-	int ret;
-
-	ACTIVATE_CTP(txnp_ctp, txnpcl_id, CT_TXN);
-	txnp = (DB_TXN *)txnp_ctp->ct_anyp;
-
-	ret = txnp->discard(txnp, flags);
-	__dbdel_ctp(txnp_ctp);
-
-	replyp->status = ret;
-	return;
-}
-
-/* BEGIN __txn_prepare_proc */
-/*
- * PUBLIC: void __txn_prepare_proc __P((long, u_int8_t *,
- * PUBLIC:      __txn_prepare_reply *));
- */
-void
-__txn_prepare_proc(txnpcl_id, gid, replyp)
-	long txnpcl_id;
-	u_int8_t *gid;
-	__txn_prepare_reply *replyp;
-/* END __txn_prepare_proc */
-{
-	DB_TXN *txnp;
-	ct_entry *txnp_ctp;
-	int ret;
-
-	ACTIVATE_CTP(txnp_ctp, txnpcl_id, CT_TXN);
-	txnp = (DB_TXN *)txnp_ctp->ct_anyp;
-
-	ret = txnp->prepare(txnp, gid);
-	replyp->status = ret;
-	return;
-}
-
-/* BEGIN __txn_recover_proc */
-/*
- * PUBLIC: void __txn_recover_proc __P((long, u_int32_t, u_int32_t,
- * PUBLIC:      __txn_recover_reply *, int *));
- */
-void
-__txn_recover_proc(dbenvcl_id, count,
-		flags, replyp, freep)
-	long dbenvcl_id;
-	u_int32_t count;
-	u_int32_t flags;
-	__txn_recover_reply *replyp;
-	int * freep;
-/* END __txn_recover_proc */
-{
-	DB_ENV *dbenv;
-	DB_PREPLIST *dbprep, *p;
-	ct_entry *dbenv_ctp, *ctp;
-	long erri, i, retcount;
-	u_int32_t *txnidp;
-	int ret;
-	u_int8_t *gid;
-
-	ACTIVATE_CTP(dbenv_ctp, dbenvcl_id, CT_ENV);
-	dbenv = (DB_ENV *)dbenv_ctp->ct_anyp;
-	dbprep = NULL;
-	*freep = 0;
-
-	if ((ret =
-	    __os_malloc(dbenv, count * sizeof(DB_PREPLIST), &dbprep)) != 0)
-		goto out;
-	if ((ret =
-	    dbenv->txn_recover(dbenv, dbprep, count, &retcount, flags)) != 0)
-		goto out;
-	/*
-	 * If there is nothing, success, but it's easy.
-	 */
-	replyp->retcount = retcount;
-	if (retcount == 0) {
-		replyp->txn.txn_val = NULL;
-		replyp->txn.txn_len = 0;
-		replyp->gid.gid_val = NULL;
-		replyp->gid.gid_len = 0;
-	}
-
-	/*
-	 * We have our txn list.  Now we need to allocate the space for
-	 * the txn ID array and the GID array and set them up.
-	 */
-	if ((ret = __os_calloc(dbenv, retcount, sizeof(u_int32_t),
-	    &replyp->txn.txn_val)) != 0)
-		goto out;
-	replyp->txn.txn_len = retcount * sizeof(u_int32_t);
-	if ((ret = __os_calloc(dbenv, retcount, DB_XIDDATASIZE,
-	    &replyp->gid.gid_val)) != 0) {
-		__os_free(dbenv, replyp->txn.txn_val);
-		goto out;
-	}
-	replyp->gid.gid_len = retcount * DB_XIDDATASIZE;
-
-	/*
-	 * Now walk through our results, creating parallel arrays
-	 * to send back.  For each entry we need to create a new
-	 * txn ctp and then fill in the array info.
-	 */
-	i = 0;
-	p = dbprep;
-	gid = replyp->gid.gid_val;
-	txnidp = replyp->txn.txn_val;
-	while (i++ < retcount) {
-		ctp = new_ct_ent(&ret);
-		if (ret != 0) {
-			i--;
-			goto out2;
-		}
-		ctp->ct_txnp = p->txn;
-		ctp->ct_type = CT_TXN;
-		ctp->ct_parent = NULL;
-		ctp->ct_envparent = dbenv_ctp;
-		__dbsrv_settimeout(ctp, dbenv_ctp->ct_timeout);
-		__dbsrv_active(ctp);
-
-		*txnidp = ctp->ct_id;
-		memcpy(gid, p->gid, DB_XIDDATASIZE);
-
-		p++;
-		txnidp++;
-		gid += DB_XIDDATASIZE;
-	}
-	/*
-	 * If we get here, we have success and we have to set freep
-	 * so it'll get properly freed next time.
-	 */
-	*freep = 1;
-out:
-	if (dbprep != NULL)
-		__os_free(dbenv, dbprep);
-	replyp->status = ret;
-	return;
-out2:
-	/*
-	 * We had an error in the middle of creating our new txn
-	 * ct entries.  We have to unwind all that we have done.  Ugh.
-	 */
-	for (txnidp = replyp->txn.txn_val, erri = 0;
-	    erri < i; erri++, txnidp++) {
-		ctp = get_tableent(*txnidp);
-		__dbclear_ctp(ctp);
-	}
-	__os_free(dbenv, replyp->txn.txn_val);
-	__os_free(dbenv, replyp->gid.gid_val);
-	__os_free(dbenv, dbprep);
-	replyp->status = ret;
-	return;
-}
-
-/* BEGIN __db_bt_maxkey_proc */
-/*
- * PUBLIC: void __db_bt_maxkey_proc __P((long, u_int32_t,
- * PUBLIC:      __db_bt_maxkey_reply *));
- */
-void
-__db_bt_maxkey_proc(dbpcl_id, maxkey, replyp)
-	long dbpcl_id;
-	u_int32_t maxkey;
-	__db_bt_maxkey_reply *replyp;
-/* END __db_bt_maxkey_proc */
-{
-	DB *dbp;
-	ct_entry *dbp_ctp;
-	int ret;
-
-	ACTIVATE_CTP(dbp_ctp, dbpcl_id, CT_DB);
-	dbp = (DB *)dbp_ctp->ct_anyp;
-
-	ret = dbp->set_bt_maxkey(dbp, maxkey);
-
-	replyp->status = ret;
-	return;
-}
-
-/* BEGIN __db_associate_proc */
-/*
- * PUBLIC: void __db_associate_proc __P((long, long, long, u_int32_t,
- * PUBLIC:      __db_associate_reply *));
- */
-void
-__db_associate_proc(dbpcl_id, txnpcl_id, sdbpcl_id,
-		flags, replyp)
-	long dbpcl_id;
-	long txnpcl_id;
-	long sdbpcl_id;
-	u_int32_t flags;
-	__db_associate_reply *replyp;
-/* END __db_associate_proc */
-{
-	DB *dbp, *sdbp;
-	DB_TXN *txnp;
-	ct_entry *dbp_ctp, *sdbp_ctp, *txnp_ctp;
-	int ret;
-
-	ACTIVATE_CTP(dbp_ctp, dbpcl_id, CT_DB);
-	dbp = (DB *)dbp_ctp->ct_anyp;
-	ACTIVATE_CTP(sdbp_ctp, sdbpcl_id, CT_DB);
-	sdbp = (DB *)sdbp_ctp->ct_anyp;
-	if (txnpcl_id != 0) {
-		ACTIVATE_CTP(txnp_ctp, txnpcl_id, CT_TXN);
-		txnp = (DB_TXN *)txnp_ctp->ct_anyp;
-	} else
-		txnp = NULL;
-
-
-	/*
-	 * We do not support DB_CREATE for associate.   Users
-	 * can only access secondary indices on a read-only basis,
-	 * so whatever they are looking for needs to be there already.
-	 */
-	if (flags != 0)
-		ret = EINVAL;
-	else
-		ret = dbp->associate(dbp, txnp, sdbp, NULL, flags);
-
-	replyp->status = ret;
-	return;
-}
-
-/* BEGIN __db_bt_minkey_proc */
-/*
- * PUBLIC: void __db_bt_minkey_proc __P((long, u_int32_t,
- * PUBLIC:      __db_bt_minkey_reply *));
- */
-void
-__db_bt_minkey_proc(dbpcl_id, minkey, replyp)
-	long dbpcl_id;
-	u_int32_t minkey;
-	__db_bt_minkey_reply *replyp;
-/* END __db_bt_minkey_proc */
-{
-	DB *dbp;
-	ct_entry *dbp_ctp;
-	int ret;
-
-	ACTIVATE_CTP(dbp_ctp, dbpcl_id, CT_DB);
-	dbp = (DB *)dbp_ctp->ct_anyp;
-
-	ret = dbp->set_bt_minkey(dbp, minkey);
-
-	replyp->status = ret;
-	return;
-}
-
-/* BEGIN __db_close_proc */
-/*
- * PUBLIC: void __db_close_proc __P((long, u_int32_t, __db_close_reply *));
- */
-void
-__db_close_proc(dbpcl_id, flags, replyp)
-	long dbpcl_id;
-	u_int32_t flags;
-	__db_close_reply *replyp;
-/* END __db_close_proc */
-{
-	ct_entry *dbp_ctp;
-
-	ACTIVATE_CTP(dbp_ctp, dbpcl_id, CT_DB);
-	replyp->status = __db_close_int(dbpcl_id, flags);
-	return;
-}
-
-/* BEGIN __db_create_proc */
-/*
- * PUBLIC: void __db_create_proc __P((long, u_int32_t, __db_create_reply *));
- */
-void
-__db_create_proc(dbenvcl_id, flags, replyp)
-	long dbenvcl_id;
-	u_int32_t flags;
-	__db_create_reply *replyp;
-/* END __db_create_proc */
-{
-	DB *dbp;
-	DB_ENV *dbenv;
-	ct_entry *dbenv_ctp, *dbp_ctp;
-	int ret;
-
-	ACTIVATE_CTP(dbenv_ctp, dbenvcl_id, CT_ENV);
-	dbenv = (DB_ENV *)dbenv_ctp->ct_anyp;
-
-	dbp_ctp = new_ct_ent(&replyp->status);
-	if (dbp_ctp == NULL)
-		return ;
-	/*
-	 * We actually require env's for databases.  The client should
-	 * have caught it, but just in case.
-	 */
-	DB_ASSERT(dbenv != NULL);
-	if ((ret = db_create(&dbp, dbenv, flags)) == 0) {
-		dbp_ctp->ct_dbp = dbp;
-		dbp_ctp->ct_type = CT_DB;
-		dbp_ctp->ct_parent = dbenv_ctp;
-		dbp_ctp->ct_envparent = dbenv_ctp;
-		replyp->dbcl_id = dbp_ctp->ct_id;
-	} else
-		__dbclear_ctp(dbp_ctp);
-	replyp->status = ret;
-	return;
-}
-
-/* BEGIN __db_del_proc */
-/*
- * PUBLIC: void __db_del_proc __P((long, long, u_int32_t, u_int32_t, u_int32_t,
- * PUBLIC:      u_int32_t, void *, u_int32_t, u_int32_t, __db_del_reply *));
- */
-void
-__db_del_proc(dbpcl_id, txnpcl_id, keydlen,
-		keydoff, keyulen, keyflags, keydata,
-		keysize, flags, replyp)
-	long dbpcl_id;
-	long txnpcl_id;
-	u_int32_t keydlen;
-	u_int32_t keydoff;
-	u_int32_t keyulen;
-	u_int32_t keyflags;
-	void *keydata;
-	u_int32_t keysize;
-	u_int32_t flags;
-	__db_del_reply *replyp;
-/* END __db_del_proc */
-{
-	DB *dbp;
-	DBT key;
-	DB_TXN *txnp;
-	ct_entry *dbp_ctp, *txnp_ctp;
-	int ret;
-
-	ACTIVATE_CTP(dbp_ctp, dbpcl_id, CT_DB);
-	dbp = (DB *)dbp_ctp->ct_anyp;
-	if (txnpcl_id != 0) {
-		ACTIVATE_CTP(txnp_ctp, txnpcl_id, CT_TXN);
-		txnp = (DB_TXN *)txnp_ctp->ct_anyp;
-	} else
-		txnp = NULL;
-
-	memset(&key, 0, sizeof(key));
-
-	/* Set up key DBT */
-	key.dlen = keydlen;
-	key.ulen = keyulen;
-	key.doff = keydoff;
-	key.flags = keyflags;
-	key.size = keysize;
-	key.data = keydata;
-
-	ret = dbp->del(dbp, txnp, &key, flags);
-
-	replyp->status = ret;
-	return;
-}
-
-/* BEGIN __db_encrypt_proc */
-/*
- * PUBLIC: void __db_encrypt_proc __P((long, char *, u_int32_t,
- * PUBLIC:      __db_encrypt_reply *));
- */
-void
-__db_encrypt_proc(dbpcl_id, passwd, flags, replyp)
-	long dbpcl_id;
-	char *passwd;
-	u_int32_t flags;
-	__db_encrypt_reply *replyp;
-/* END __db_encrypt_proc */
-{
-	int ret;
-	DB * dbp;
-	ct_entry *dbp_ctp;
-
-	ACTIVATE_CTP(dbp_ctp, dbpcl_id, CT_DB);
-	dbp = (DB *)dbp_ctp->ct_anyp;
-
-	ret = dbp->set_encrypt(dbp, passwd, flags);
-	replyp->status = ret;
-	return;
-}
-
-/* BEGIN __db_extentsize_proc */
-/*
- * PUBLIC: void __db_extentsize_proc __P((long, u_int32_t,
- * PUBLIC:      __db_extentsize_reply *));
- */
-void
-__db_extentsize_proc(dbpcl_id, extentsize, replyp)
-	long dbpcl_id;
-	u_int32_t extentsize;
-	__db_extentsize_reply *replyp;
-/* END __db_extentsize_proc */
-{
-	DB *dbp;
-	ct_entry *dbp_ctp;
-	int ret;
-
-	ACTIVATE_CTP(dbp_ctp, dbpcl_id, CT_DB);
-	dbp = (DB *)dbp_ctp->ct_anyp;
-
-	ret = dbp->set_q_extentsize(dbp, extentsize);
-
-	replyp->status = ret;
-	return;
-}
-
-/* BEGIN __db_flags_proc */
-/*
- * PUBLIC: void __db_flags_proc __P((long, u_int32_t, __db_flags_reply *));
- */
-void
-__db_flags_proc(dbpcl_id, flags, replyp)
-	long dbpcl_id;
-	u_int32_t flags;
-	__db_flags_reply *replyp;
-/* END __db_flags_proc */
-{
-	DB *dbp;
-	ct_entry *dbp_ctp;
-	int ret;
-
-	ACTIVATE_CTP(dbp_ctp, dbpcl_id, CT_DB);
-	dbp = (DB *)dbp_ctp->ct_anyp;
-
-	ret = dbp->set_flags(dbp, flags);
-	dbp_ctp->ct_dbdp.setflags |= flags;
-
-	replyp->status = ret;
-	return;
-}
-
-/* BEGIN __db_get_proc */
-/*
- * PUBLIC: void __db_get_proc __P((long, long, u_int32_t, u_int32_t, u_int32_t,
- * PUBLIC:      u_int32_t, void *, u_int32_t, u_int32_t, u_int32_t, u_int32_t, u_int32_t, void *,
- * PUBLIC:      u_int32_t, u_int32_t, __db_get_reply *, int *));
- */
-void
-__db_get_proc(dbpcl_id, txnpcl_id, keydlen,
-		keydoff, keyulen, keyflags, keydata,
-		keysize, datadlen, datadoff, dataulen,
-		dataflags, datadata, datasize, flags, replyp, freep)
-	long dbpcl_id;
-	long txnpcl_id;
-	u_int32_t keydlen;
-	u_int32_t keydoff;
-	u_int32_t keyulen;
-	u_int32_t keyflags;
-	void *keydata;
-	u_int32_t keysize;
-	u_int32_t datadlen;
-	u_int32_t datadoff;
-	u_int32_t dataulen;
-	u_int32_t dataflags;
-	void *datadata;
-	u_int32_t datasize;
-	u_int32_t flags;
-	__db_get_reply *replyp;
-	int * freep;
-/* END __db_get_proc */
-{
-	DB *dbp;
-	DBT key, data;
-	DB_TXN *txnp;
-	ct_entry *dbp_ctp, *txnp_ctp;
-	int key_alloc, bulk_alloc, ret;
-
-	ACTIVATE_CTP(dbp_ctp, dbpcl_id, CT_DB);
-	dbp = (DB *)dbp_ctp->ct_anyp;
-	if (txnpcl_id != 0) {
-		ACTIVATE_CTP(txnp_ctp, txnpcl_id, CT_TXN);
-		txnp = (DB_TXN *)txnp_ctp->ct_anyp;
-	} else
-		txnp = NULL;
-
-	*freep = 0;
-	bulk_alloc = 0;
-	memset(&key, 0, sizeof(key));
-	memset(&data, 0, sizeof(data));
-
-	/* Set up key and data DBT */
-	key.dlen = keydlen;
-	key.doff = keydoff;
-	/*
-	 * Ignore memory related flags on server.
-	 */
-	key.flags = DB_DBT_MALLOC;
-	if (keyflags & DB_DBT_PARTIAL)
-		key.flags |= DB_DBT_PARTIAL;
-	key.size = keysize;
-	key.ulen = keyulen;
-	key.data = keydata;
-
-	data.dlen = datadlen;
-	data.doff = datadoff;
-	data.ulen = dataulen;
-	/*
-	 * Ignore memory related flags on server.
-	 */
-	data.size = datasize;
-	data.data = datadata;
-	if (flags & DB_MULTIPLE) {
-		if (data.data == 0) {
-			ret = __os_umalloc(dbp->dbenv,
-			    data.ulen, &data.data);
-			if (ret != 0)
-				goto err;
-			bulk_alloc = 1;
-		}
-		data.flags |= DB_DBT_USERMEM;
-	} else
-		data.flags |= DB_DBT_MALLOC;
-	if (dataflags & DB_DBT_PARTIAL)
-		data.flags |= DB_DBT_PARTIAL;
-
-	/* Got all our stuff, now do the get */
-	ret = dbp->get(dbp, txnp, &key, &data, flags);
-	/*
-	 * Otherwise just status.
-	 */
-	if (ret == 0) {
-		/*
-		 * XXX
-		 * We need to xdr_free whatever we are returning, next time.
-		 * However, DB does not allocate a new key if one was given
-		 * and we'd be free'ing up space allocated in the request.
-		 * So, allocate a new key/data pointer if it is the same one
-		 * as in the request.
-		 */
-		*freep = 1;
-		/*
-		 * Key
-		 */
-		key_alloc = 0;
-		if (key.data == keydata) {
-			ret = __os_umalloc(dbp->dbenv,
-			    key.size, &replyp->keydata.keydata_val);
-			if (ret != 0) {
-				__os_ufree(dbp->dbenv, key.data);
-				__os_ufree(dbp->dbenv, data.data);
-				goto err;
-			}
-			key_alloc = 1;
-			memcpy(replyp->keydata.keydata_val, key.data, key.size);
-		} else
-			replyp->keydata.keydata_val = key.data;
-
-		replyp->keydata.keydata_len = key.size;
-
-		/*
-		 * Data
-		 */
-		if (data.data == datadata) {
-			ret = __os_umalloc(dbp->dbenv,
-			     data.size, &replyp->datadata.datadata_val);
-			if (ret != 0) {
-				__os_ufree(dbp->dbenv, key.data);
-				__os_ufree(dbp->dbenv, data.data);
-				if (key_alloc)
-					__os_ufree(dbp->dbenv,
-					    replyp->keydata.keydata_val);
-				goto err;
-			}
-			memcpy(replyp->datadata.datadata_val, data.data,
-			    data.size);
-		} else
-			replyp->datadata.datadata_val = data.data;
-		replyp->datadata.datadata_len = data.size;
-	} else {
-err:		replyp->keydata.keydata_val = NULL;
-		replyp->keydata.keydata_len = 0;
-		replyp->datadata.datadata_val = NULL;
-		replyp->datadata.datadata_len = 0;
-		*freep = 0;
-		if (bulk_alloc)
-			__os_ufree(dbp->dbenv, data.data);
-	}
-	replyp->status = ret;
-	return;
-}
-
-/* BEGIN __db_h_ffactor_proc */
-/*
- * PUBLIC: void __db_h_ffactor_proc __P((long, u_int32_t,
- * PUBLIC:      __db_h_ffactor_reply *));
- */
-void
-__db_h_ffactor_proc(dbpcl_id, ffactor, replyp)
-	long dbpcl_id;
-	u_int32_t ffactor;
-	__db_h_ffactor_reply *replyp;
-/* END __db_h_ffactor_proc */
-{
-	DB *dbp;
-	ct_entry *dbp_ctp;
-	int ret;
-
-	ACTIVATE_CTP(dbp_ctp, dbpcl_id, CT_DB);
-	dbp = (DB *)dbp_ctp->ct_anyp;
-
-	ret = dbp->set_h_ffactor(dbp, ffactor);
-
-	replyp->status = ret;
-	return;
-}
-
-/* BEGIN __db_h_nelem_proc */
-/*
- * PUBLIC: void __db_h_nelem_proc __P((long, u_int32_t,
- * PUBLIC:      __db_h_nelem_reply *));
- */
-void
-__db_h_nelem_proc(dbpcl_id, nelem, replyp)
-	long dbpcl_id;
-	u_int32_t nelem;
-	__db_h_nelem_reply *replyp;
-/* END __db_h_nelem_proc */
-{
-	DB *dbp;
-	ct_entry *dbp_ctp;
-	int ret;
-
-	ACTIVATE_CTP(dbp_ctp, dbpcl_id, CT_DB);
-	dbp = (DB *)dbp_ctp->ct_anyp;
-
-	ret = dbp->set_h_nelem(dbp, nelem);
-
-	replyp->status = ret;
-	return;
-}
-
-/* BEGIN __db_key_range_proc */
-/*
- * PUBLIC: void __db_key_range_proc __P((long, long, u_int32_t, u_int32_t,
- * PUBLIC:      u_int32_t, u_int32_t, void *, u_int32_t, u_int32_t, __db_key_range_reply *));
- */
-void
-__db_key_range_proc(dbpcl_id, txnpcl_id, keydlen,
-		keydoff, keyulen, keyflags, keydata,
-		keysize, flags, replyp)
-	long dbpcl_id;
-	long txnpcl_id;
-	u_int32_t keydlen;
-	u_int32_t keydoff;
-	u_int32_t keyulen;
-	u_int32_t keyflags;
-	void *keydata;
-	u_int32_t keysize;
-	u_int32_t flags;
-	__db_key_range_reply *replyp;
-/* END __db_key_range_proc */
-{
-	DB *dbp;
-	DBT key;
-	DB_KEY_RANGE range;
-	DB_TXN *txnp;
-	ct_entry *dbp_ctp, *txnp_ctp;
-	int ret;
-
-	ACTIVATE_CTP(dbp_ctp, dbpcl_id, CT_DB);
-	dbp = (DB *)dbp_ctp->ct_anyp;
-	if (txnpcl_id != 0) {
-		ACTIVATE_CTP(txnp_ctp, txnpcl_id, CT_TXN);
-		txnp = (DB_TXN *)txnp_ctp->ct_anyp;
-	} else
-		txnp = NULL;
-
-	memset(&key, 0, sizeof(key));
-	/* Set up key and data DBT */
-	key.dlen = keydlen;
-	key.ulen = keyulen;
-	key.doff = keydoff;
-	key.size = keysize;
-	key.data = keydata;
-	key.flags = keyflags;
-
-	ret = dbp->key_range(dbp, txnp, &key, &range, flags);
-
-	replyp->status = ret;
-	replyp->less = range.less;
-	replyp->equal = range.equal;
-	replyp->greater = range.greater;
-	return;
-}
-
-/* BEGIN __db_lorder_proc */
-/*
- * PUBLIC: void __db_lorder_proc __P((long, u_int32_t, __db_lorder_reply *));
- */
-void
-__db_lorder_proc(dbpcl_id, lorder, replyp)
-	long dbpcl_id;
-	u_int32_t lorder;
-	__db_lorder_reply *replyp;
-/* END __db_lorder_proc */
-{
-	DB *dbp;
-	ct_entry *dbp_ctp;
-	int ret;
-
-	ACTIVATE_CTP(dbp_ctp, dbpcl_id, CT_DB);
-	dbp = (DB *)dbp_ctp->ct_anyp;
-
-	ret = dbp->set_lorder(dbp, lorder);
-
-	replyp->status = ret;
-	return;
-}
-
-/* BEGIN __db_open_proc */
-/*
- * PUBLIC: void __db_open_proc __P((long, long, char *, char *, u_int32_t,
- * PUBLIC:      u_int32_t, u_int32_t, __db_open_reply *));
- */
-void
-__db_open_proc(dbpcl_id, txnpcl_id, name,
-		subdb, type, flags, mode, replyp)
-	long dbpcl_id;
-	long txnpcl_id;
-	char *name;
-	char *subdb;
-	u_int32_t type;
-	u_int32_t flags;
-	u_int32_t mode;
-	__db_open_reply *replyp;
-/* END __db_open_proc */
-{
-	DB *dbp;
-	DB_TXN *txnp;
-	DBTYPE dbtype;
-	ct_entry *dbp_ctp, *new_ctp, *txnp_ctp;
-	int isswapped, ret;
-
-	ACTIVATE_CTP(dbp_ctp, dbpcl_id, CT_DB);
-	dbp = (DB *)dbp_ctp->ct_anyp;
-
-	if (txnpcl_id != 0) {
-		ACTIVATE_CTP(txnp_ctp, txnpcl_id, CT_TXN);
-		txnp = (DB_TXN *)txnp_ctp->ct_anyp;
-	} else
-		txnp = NULL;
-
-	replyp->dbcl_id = dbpcl_id;
-	if ((new_ctp = __dbsrv_sharedb(dbp_ctp, name, subdb, type, flags))
-	    != NULL) {
-		/*
-		 * We can share, clean up old ID, set new one.
-		 */
-		if (__dbsrv_verbose)
-			printf("Sharing db ID %ld\n", new_ctp->ct_id);
-		replyp->dbcl_id = new_ctp->ct_id;
-		ret = __db_close_int(dbpcl_id, 0);
-		goto out;
-	}
-	ret = dbp->open(dbp, txnp, name, subdb, (DBTYPE)type, flags, mode);
-	if (ret == 0) {
-		(void)dbp->get_type(dbp, &dbtype);
-		replyp->type = dbtype;
-		/* XXX
-		 * Tcl needs to peek at dbp->flags for DB_AM_DUP.  Send
-		 * this dbp's flags back.
-		 */
-		replyp->dbflags = (int) dbp->flags;
-		/*
-		 * We need to determine the byte order of the database
-		 * and send it back to the client.  Determine it by
-		 * the server's native order and the swapped value of
-		 * the DB itself.
-		 */
-		(void)dbp->get_byteswapped(dbp, &isswapped);
-		if (__db_byteorder(NULL, 1234) == 0) {
-			if (isswapped == 0)
-				replyp->lorder = 1234;
-			else
-				replyp->lorder = 4321;
-		} else {
-			if (isswapped == 0)
-				replyp->lorder = 4321;
-			else
-				replyp->lorder = 1234;
-		}
-		dbp_ctp->ct_dbdp.type = dbtype;
-		dbp_ctp->ct_dbdp.dbflags = LF_ISSET(DB_SERVER_DBFLAGS);
-		if (name == NULL)
-			dbp_ctp->ct_dbdp.db = NULL;
-		else if ((ret = __os_strdup(dbp->dbenv, name,
-		    &dbp_ctp->ct_dbdp.db)) != 0)
-			goto out;
-		if (subdb == NULL)
-			dbp_ctp->ct_dbdp.subdb = NULL;
-		else if ((ret = __os_strdup(dbp->dbenv, subdb,
-		    &dbp_ctp->ct_dbdp.subdb)) != 0)
-			goto out;
-	}
-out:
-	replyp->status = ret;
-	return;
-}
-
-/* BEGIN __db_pagesize_proc */
-/*
- * PUBLIC: void __db_pagesize_proc __P((long, u_int32_t,
- * PUBLIC:      __db_pagesize_reply *));
- */
-void
-__db_pagesize_proc(dbpcl_id, pagesize, replyp)
-	long dbpcl_id;
-	u_int32_t pagesize;
-	__db_pagesize_reply *replyp;
-/* END __db_pagesize_proc */
-{
-	DB *dbp;
-	ct_entry *dbp_ctp;
-	int ret;
-
-	ACTIVATE_CTP(dbp_ctp, dbpcl_id, CT_DB);
-	dbp = (DB *)dbp_ctp->ct_anyp;
-
-	ret = dbp->set_pagesize(dbp, pagesize);
-
-	replyp->status = ret;
-	return;
-}
-
-/* BEGIN __db_pget_proc */
-/*
- * PUBLIC: void __db_pget_proc __P((long, long, u_int32_t, u_int32_t,
- * PUBLIC:      u_int32_t, u_int32_t, void *, u_int32_t, u_int32_t, u_int32_t, u_int32_t,
- * PUBLIC:      u_int32_t, void *, u_int32_t, u_int32_t, u_int32_t, u_int32_t, u_int32_t, void *,
- * PUBLIC:      u_int32_t, u_int32_t, __db_pget_reply *, int *));
- */
-void
-__db_pget_proc(dbpcl_id, txnpcl_id, skeydlen,
-		skeydoff, skeyulen, skeyflags, skeydata,
-		skeysize, pkeydlen, pkeydoff, pkeyulen,
-		pkeyflags, pkeydata, pkeysize, datadlen,
-		datadoff, dataulen, dataflags, datadata,
-		datasize, flags, replyp, freep)
-	long dbpcl_id;
-	long txnpcl_id;
-	u_int32_t skeydlen;
-	u_int32_t skeydoff;
-	u_int32_t skeyulen;
-	u_int32_t skeyflags;
-	void *skeydata;
-	u_int32_t skeysize;
-	u_int32_t pkeydlen;
-	u_int32_t pkeydoff;
-	u_int32_t pkeyulen;
-	u_int32_t pkeyflags;
-	void *pkeydata;
-	u_int32_t pkeysize;
-	u_int32_t datadlen;
-	u_int32_t datadoff;
-	u_int32_t dataulen;
-	u_int32_t dataflags;
-	void *datadata;
-	u_int32_t datasize;
-	u_int32_t flags;
-	__db_pget_reply *replyp;
-	int * freep;
-/* END __db_pget_proc */
-{
-	DB *dbp;
-	DBT skey, pkey, data;
-	DB_TXN *txnp;
-	ct_entry *dbp_ctp, *txnp_ctp;
-	int key_alloc, ret;
-
-	ACTIVATE_CTP(dbp_ctp, dbpcl_id, CT_DB);
-	dbp = (DB *)dbp_ctp->ct_anyp;
-	if (txnpcl_id != 0) {
-		ACTIVATE_CTP(txnp_ctp, txnpcl_id, CT_TXN);
-		txnp = (DB_TXN *)txnp_ctp->ct_anyp;
-	} else
-		txnp = NULL;
-
-	*freep = 0;
-	memset(&skey, 0, sizeof(skey));
-	memset(&pkey, 0, sizeof(pkey));
-	memset(&data, 0, sizeof(data));
-
-	/*
-	 * Ignore memory related flags on server.
-	 */
-	/* Set up key and data DBT */
-	skey.flags = DB_DBT_MALLOC;
-	skey.dlen = skeydlen;
-	skey.ulen = skeyulen;
-	skey.doff = skeydoff;
-	if (skeyflags & DB_DBT_PARTIAL)
-		skey.flags |= DB_DBT_PARTIAL;
-	skey.size = skeysize;
-	skey.data = skeydata;
-
-	pkey.flags = DB_DBT_MALLOC;
-	pkey.dlen = pkeydlen;
-	pkey.ulen = pkeyulen;
-	pkey.doff = pkeydoff;
-	if (pkeyflags & DB_DBT_PARTIAL)
-		pkey.flags |= DB_DBT_PARTIAL;
-	pkey.size = pkeysize;
-	pkey.data = pkeydata;
-
-	data.flags = DB_DBT_MALLOC;
-	data.dlen = datadlen;
-	data.ulen = dataulen;
-	data.doff = datadoff;
-	if (dataflags & DB_DBT_PARTIAL)
-		data.flags |= DB_DBT_PARTIAL;
-	data.size = datasize;
-	data.data = datadata;
-
-	/* Got all our stuff, now do the get */
-	ret = dbp->pget(dbp, txnp, &skey, &pkey, &data, flags);
-	/*
-	 * Otherwise just status.
-	 */
-	if (ret == 0) {
-		/*
-		 * XXX
-		 * We need to xdr_free whatever we are returning, next time.
-		 * However, DB does not allocate a new key if one was given
-		 * and we'd be free'ing up space allocated in the request.
-		 * So, allocate a new key/data pointer if it is the same one
-		 * as in the request.
-		 */
-		*freep = 1;
-		/*
-		 * Key
-		 */
-		key_alloc = 0;
-		if (skey.data == skeydata) {
-			ret = __os_umalloc(dbp->dbenv,
-			    skey.size, &replyp->skeydata.skeydata_val);
-			if (ret != 0) {
-				__os_ufree(dbp->dbenv, skey.data);
-				__os_ufree(dbp->dbenv, pkey.data);
-				__os_ufree(dbp->dbenv, data.data);
-				goto err;
-			}
-			key_alloc = 1;
-			memcpy(replyp->skeydata.skeydata_val, skey.data,
-			    skey.size);
-		} else
-			replyp->skeydata.skeydata_val = skey.data;
-
-		replyp->skeydata.skeydata_len = skey.size;
-
-		/*
-		 * Primary key
-		 */
-		if (pkey.data == pkeydata) {
-			ret = __os_umalloc(dbp->dbenv,
-			     pkey.size, &replyp->pkeydata.pkeydata_val);
-			if (ret != 0) {
-				__os_ufree(dbp->dbenv, skey.data);
-				__os_ufree(dbp->dbenv, pkey.data);
-				__os_ufree(dbp->dbenv, data.data);
-				if (key_alloc)
-					__os_ufree(dbp->dbenv,
-					    replyp->skeydata.skeydata_val);
-				goto err;
-			}
-			/*
-			 * We can set it to 2, because they cannot send the
-			 * pkey over without sending the skey over too.
-			 * So if they did send a pkey, they must have sent
-			 * the skey as well.
-			 */
-			key_alloc = 2;
-			memcpy(replyp->pkeydata.pkeydata_val, pkey.data,
-			    pkey.size);
-		} else
-			replyp->pkeydata.pkeydata_val = pkey.data;
-		replyp->pkeydata.pkeydata_len = pkey.size;
-
-		/*
-		 * Data
-		 */
-		if (data.data == datadata) {
-			ret = __os_umalloc(dbp->dbenv,
-			     data.size, &replyp->datadata.datadata_val);
-			if (ret != 0) {
-				__os_ufree(dbp->dbenv, skey.data);
-				__os_ufree(dbp->dbenv, pkey.data);
-				__os_ufree(dbp->dbenv, data.data);
-				/*
-				 * If key_alloc is 1, just skey needs to be
-				 * freed, if key_alloc is 2, both skey and pkey
-				 * need to be freed.
-				 */
-				if (key_alloc--)
-					__os_ufree(dbp->dbenv,
-					    replyp->skeydata.skeydata_val);
-				if (key_alloc)
-					__os_ufree(dbp->dbenv,
-					    replyp->pkeydata.pkeydata_val);
-				goto err;
-			}
-			memcpy(replyp->datadata.datadata_val, data.data,
-			    data.size);
-		} else
-			replyp->datadata.datadata_val = data.data;
-		replyp->datadata.datadata_len = data.size;
-	} else {
-err:		replyp->skeydata.skeydata_val = NULL;
-		replyp->skeydata.skeydata_len = 0;
-		replyp->pkeydata.pkeydata_val = NULL;
-		replyp->pkeydata.pkeydata_len = 0;
-		replyp->datadata.datadata_val = NULL;
-		replyp->datadata.datadata_len = 0;
-		*freep = 0;
-	}
-	replyp->status = ret;
-	return;
-}
-
-/* BEGIN __db_put_proc */
-/*
- * PUBLIC: void __db_put_proc __P((long, long, u_int32_t, u_int32_t, u_int32_t,
- * PUBLIC:      u_int32_t, void *, u_int32_t, u_int32_t, u_int32_t, u_int32_t, u_int32_t, void *,
- * PUBLIC:      u_int32_t, u_int32_t, __db_put_reply *, int *));
- */
-void
-__db_put_proc(dbpcl_id, txnpcl_id, keydlen,
-		keydoff, keyulen, keyflags, keydata,
-		keysize, datadlen, datadoff, dataulen,
-		dataflags, datadata, datasize, flags, replyp, freep)
-	long dbpcl_id;
-	long txnpcl_id;
-	u_int32_t keydlen;
-	u_int32_t keydoff;
-	u_int32_t keyulen;
-	u_int32_t keyflags;
-	void *keydata;
-	u_int32_t keysize;
-	u_int32_t datadlen;
-	u_int32_t datadoff;
-	u_int32_t dataulen;
-	u_int32_t dataflags;
-	void *datadata;
-	u_int32_t datasize;
-	u_int32_t flags;
-	__db_put_reply *replyp;
-	int * freep;
-/* END __db_put_proc */
-{
-	DB *dbp;
-	DBT key, data;
-	DB_TXN *txnp;
-	ct_entry *dbp_ctp, *txnp_ctp;
-	int ret;
-
-	ACTIVATE_CTP(dbp_ctp, dbpcl_id, CT_DB);
-	dbp = (DB *)dbp_ctp->ct_anyp;
-	if (txnpcl_id != 0) {
-		ACTIVATE_CTP(txnp_ctp, txnpcl_id, CT_TXN);
-		txnp = (DB_TXN *)txnp_ctp->ct_anyp;
-	} else
-		txnp = NULL;
-
-	*freep = 0;
-	memset(&key, 0, sizeof(key));
-	memset(&data, 0, sizeof(data));
-
-	/* Set up key and data DBT */
-	key.dlen = keydlen;
-	key.ulen = keyulen;
-	key.doff = keydoff;
-	/*
-	 * Ignore memory related flags on server.
-	 */
-	key.flags = DB_DBT_MALLOC;
-	if (keyflags & DB_DBT_PARTIAL)
-		key.flags |= DB_DBT_PARTIAL;
-	key.size = keysize;
-	key.data = keydata;
-
-	data.dlen = datadlen;
-	data.ulen = dataulen;
-	data.doff = datadoff;
-	data.flags = dataflags;
-	data.size = datasize;
-	data.data = datadata;
-
-	/* Got all our stuff, now do the put */
-	ret = dbp->put(dbp, txnp, &key, &data, flags);
-	/*
-	 * If the client did a DB_APPEND, set up key in reply.
-	 * Otherwise just status.
-	 */
-	if (ret == 0 && (flags == DB_APPEND)) {
-		/*
-		 * XXX
-		 * We need to xdr_free whatever we are returning, next time.
-		 * However, DB does not allocate a new key if one was given
-		 * and we'd be free'ing up space allocated in the request.
-		 * So, allocate a new key/data pointer if it is the same one
-		 * as in the request.
-		 */
-		*freep = 1;
-		/*
-		 * Key
-		 */
-		if (key.data == keydata) {
-			ret = __os_umalloc(dbp->dbenv,
-			    key.size, &replyp->keydata.keydata_val);
-			if (ret != 0) {
-				__os_ufree(dbp->dbenv, key.data);
-				goto err;
-			}
-			memcpy(replyp->keydata.keydata_val, key.data, key.size);
-		} else
-			replyp->keydata.keydata_val = key.data;
-
-		replyp->keydata.keydata_len = key.size;
-	} else {
-err:		replyp->keydata.keydata_val = NULL;
-		replyp->keydata.keydata_len = 0;
-		*freep = 0;
-	}
-	replyp->status = ret;
-	return;
-}
-
-/* BEGIN __db_re_delim_proc */
-/*
- * PUBLIC: void __db_re_delim_proc __P((long, u_int32_t,
- * PUBLIC:      __db_re_delim_reply *));
- */
-void
-__db_re_delim_proc(dbpcl_id, delim, replyp)
-	long dbpcl_id;
-	u_int32_t delim;
-	__db_re_delim_reply *replyp;
-/* END __db_re_delim_proc */
-{
-	DB *dbp;
-	ct_entry *dbp_ctp;
-	int ret;
-
-	ACTIVATE_CTP(dbp_ctp, dbpcl_id, CT_DB);
-	dbp = (DB *)dbp_ctp->ct_anyp;
-
-	ret = dbp->set_re_delim(dbp, delim);
-
-	replyp->status = ret;
-	return;
-}
-
-/* BEGIN __db_re_len_proc */
-/*
- * PUBLIC: void __db_re_len_proc __P((long, u_int32_t, __db_re_len_reply *));
- */
-void
-__db_re_len_proc(dbpcl_id, len, replyp)
-	long dbpcl_id;
-	u_int32_t len;
-	__db_re_len_reply *replyp;
-/* END __db_re_len_proc */
-{
-	DB *dbp;
-	ct_entry *dbp_ctp;
-	int ret;
-
-	ACTIVATE_CTP(dbp_ctp, dbpcl_id, CT_DB);
-	dbp = (DB *)dbp_ctp->ct_anyp;
-
-	ret = dbp->set_re_len(dbp, len);
-
-	replyp->status = ret;
-	return;
-}
-
-/* BEGIN __db_re_pad_proc */
-/*
- * PUBLIC: void __db_re_pad_proc __P((long, u_int32_t, __db_re_pad_reply *));
- */
-void
-__db_re_pad_proc(dbpcl_id, pad, replyp)
-	long dbpcl_id;
-	u_int32_t pad;
-	__db_re_pad_reply *replyp;
-/* END __db_re_pad_proc */
-{
-	DB *dbp;
-	ct_entry *dbp_ctp;
-	int ret;
-
-	ACTIVATE_CTP(dbp_ctp, dbpcl_id, CT_DB);
-	dbp = (DB *)dbp_ctp->ct_anyp;
-
-	ret = dbp->set_re_pad(dbp, pad);
-
-	replyp->status = ret;
-	return;
-}
-
-/* BEGIN __db_remove_proc */
-/*
- * PUBLIC: void __db_remove_proc __P((long, char *, char *, u_int32_t,
- * PUBLIC:      __db_remove_reply *));
- */
-void
-__db_remove_proc(dbpcl_id, name, subdb,
-		flags, replyp)
-	long dbpcl_id;
-	char *name;
-	char *subdb;
-	u_int32_t flags;
-	__db_remove_reply *replyp;
-/* END __db_remove_proc */
-{
-	DB *dbp;
-	ct_entry *dbp_ctp;
-	int ret;
-
-	ACTIVATE_CTP(dbp_ctp, dbpcl_id, CT_DB);
-	dbp = (DB *)dbp_ctp->ct_anyp;
-
-	ret = dbp->remove(dbp, name, subdb, flags);
-	__dbdel_ctp(dbp_ctp);
-
-	replyp->status = ret;
-	return;
-}
-
-/* BEGIN __db_rename_proc */
-/*
- * PUBLIC: void __db_rename_proc __P((long, char *, char *, char *, u_int32_t,
- * PUBLIC:      __db_rename_reply *));
- */
-void
-__db_rename_proc(dbpcl_id, name, subdb,
-		newname, flags, replyp)
-	long dbpcl_id;
-	char *name;
-	char *subdb;
-	char *newname;
-	u_int32_t flags;
-	__db_rename_reply *replyp;
-/* END __db_rename_proc */
-{
-	DB *dbp;
-	ct_entry *dbp_ctp;
-	int ret;
-
-	ACTIVATE_CTP(dbp_ctp, dbpcl_id, CT_DB);
-	dbp = (DB *)dbp_ctp->ct_anyp;
-
-	ret = dbp->rename(dbp, name, subdb, newname, flags);
-	__dbdel_ctp(dbp_ctp);
-
-	replyp->status = ret;
-	return;
-}
-
-/* BEGIN __db_stat_proc */
-/*
- * PUBLIC: void __db_stat_proc __P((long, u_int32_t, __db_stat_reply *,
- * PUBLIC:      int *));
- */
-void
-__db_stat_proc(dbpcl_id, flags, replyp, freep)
-	long dbpcl_id;
-	u_int32_t flags;
-	__db_stat_reply *replyp;
-	int * freep;
-/* END __db_stat_proc */
-{
-	DB *dbp;
-	DBTYPE type;
-	ct_entry *dbp_ctp;
-	u_int32_t *q, *p, *retsp;
-	int i, len, ret;
-	void *sp;
-
-	ACTIVATE_CTP(dbp_ctp, dbpcl_id, CT_DB);
-	dbp = (DB *)dbp_ctp->ct_anyp;
-
-	ret = dbp->stat(dbp, &sp, flags);
-	replyp->status = ret;
-	if (ret != 0)
-		return;
-	/*
-	 * We get here, we have success.  Allocate an array so that
-	 * we can use the list generator.  Generate the reply, free
-	 * up the space.
-	 */
-	/*
-	 * XXX This assumes that all elements of all stat structures
-	 * are u_int32_t fields.  They are, currently.
-	 */
-	(void)dbp->get_type(dbp, &type);
-	if (type == DB_HASH)
-		len = sizeof(DB_HASH_STAT);
-	else if (type == DB_QUEUE)
-		len = sizeof(DB_QUEUE_STAT);
-	else            /* BTREE or RECNO are same stats */
-		len = sizeof(DB_BTREE_STAT);
-	replyp->stats.stats_len = len / sizeof(u_int32_t);
-
-	if ((ret = __os_umalloc(dbp->dbenv, len * replyp->stats.stats_len,
-	    &retsp)) != 0)
-		goto out;
-	for (i = 0, q = retsp, p = sp; i < len;
-	    i++, q++, p++)
-		*q = *p;
-	replyp->stats.stats_val = retsp;
-	__os_ufree(dbp->dbenv, sp);
-	if (ret == 0)
-		*freep = 1;
-out:
-	replyp->status = ret;
-	return;
-}
-
-/* BEGIN __db_sync_proc */
-/*
- * PUBLIC: void __db_sync_proc __P((long, u_int32_t, __db_sync_reply *));
- */
-void
-__db_sync_proc(dbpcl_id, flags, replyp)
-	long dbpcl_id;
-	u_int32_t flags;
-	__db_sync_reply *replyp;
-/* END __db_sync_proc */
-{
-	DB *dbp;
-	ct_entry *dbp_ctp;
-	int ret;
-
-	ACTIVATE_CTP(dbp_ctp, dbpcl_id, CT_DB);
-	dbp = (DB *)dbp_ctp->ct_anyp;
-
-	ret = dbp->sync(dbp, flags);
-
-	replyp->status = ret;
-	return;
-}
-
-/* BEGIN __db_truncate_proc */
-/*
- * PUBLIC: void __db_truncate_proc __P((long, long, u_int32_t,
- * PUBLIC:      __db_truncate_reply *));
- */
-void
-__db_truncate_proc(dbpcl_id, txnpcl_id,
-		flags, replyp)
-	long dbpcl_id;
-	long txnpcl_id;
-	u_int32_t flags;
-	__db_truncate_reply *replyp;
-/* END __db_truncate_proc */
-{
-	DB *dbp;
-	DB_TXN *txnp;
-	ct_entry *dbp_ctp, *txnp_ctp;
-	u_int32_t count;
-	int ret;
-
-	ACTIVATE_CTP(dbp_ctp, dbpcl_id, CT_DB);
-	dbp = (DB *)dbp_ctp->ct_anyp;
-	if (txnpcl_id != 0) {
-		ACTIVATE_CTP(txnp_ctp, txnpcl_id, CT_TXN);
-		txnp = (DB_TXN *)txnp_ctp->ct_anyp;
-	} else
-		txnp = NULL;
-
-	ret = dbp->truncate(dbp, txnp, &count, flags);
-	replyp->status = ret;
-	if (ret == 0)
-		replyp->count = count;
-	return;
-}
-
-/* BEGIN __db_cursor_proc */
-/*
- * PUBLIC: void __db_cursor_proc __P((long, long, u_int32_t,
- * PUBLIC:      __db_cursor_reply *));
- */
-void
-__db_cursor_proc(dbpcl_id, txnpcl_id,
-		flags, replyp)
-	long dbpcl_id;
-	long txnpcl_id;
-	u_int32_t flags;
-	__db_cursor_reply *replyp;
-/* END __db_cursor_proc */
-{
-	DB *dbp;
-	DBC *dbc;
-	DB_TXN *txnp;
-	ct_entry *dbc_ctp, *env_ctp, *dbp_ctp, *txnp_ctp;
-	int ret;
-
-	ACTIVATE_CTP(dbp_ctp, dbpcl_id, CT_DB);
-	dbp = (DB *)dbp_ctp->ct_anyp;
-	dbc_ctp = new_ct_ent(&replyp->status);
-	if (dbc_ctp == NULL)
-		return;
-
-	if (txnpcl_id != 0) {
-		ACTIVATE_CTP(txnp_ctp, txnpcl_id, CT_TXN);
-		txnp = (DB_TXN *)txnp_ctp->ct_anyp;
-		dbc_ctp->ct_activep = txnp_ctp->ct_activep;
-	} else
-		txnp = NULL;
-
-	if ((ret = dbp->cursor(dbp, txnp, &dbc, flags)) == 0) {
-		dbc_ctp->ct_dbc = dbc;
-		dbc_ctp->ct_type = CT_CURSOR;
-		dbc_ctp->ct_parent = dbp_ctp;
-		env_ctp = dbp_ctp->ct_envparent;
-		dbc_ctp->ct_envparent = env_ctp;
-		__dbsrv_settimeout(dbc_ctp, env_ctp->ct_timeout);
-		__dbsrv_active(dbc_ctp);
-		replyp->dbcidcl_id = dbc_ctp->ct_id;
-	} else
-		__dbclear_ctp(dbc_ctp);
-
-	replyp->status = ret;
-	return;
-}
-
-/* BEGIN __db_join_proc */
-/*
- * PUBLIC: void __db_join_proc __P((long, u_int32_t *, u_int32_t, u_int32_t,
- * PUBLIC:      __db_join_reply *));
- */
-void
-__db_join_proc(dbpcl_id, curs, curslen,
-		flags, replyp)
-	long dbpcl_id;
-	u_int32_t * curs;
-	u_int32_t curslen;
-	u_int32_t flags;
-	__db_join_reply *replyp;
-/* END __db_join_proc */
-{
-	DB *dbp;
-	DBC **jcurs, **c;
-	DBC *dbc;
-	ct_entry *dbc_ctp, *ctp, *dbp_ctp;
-	size_t size;
-	u_int32_t *cl, i;
-	int ret;
-
-	ACTIVATE_CTP(dbp_ctp, dbpcl_id, CT_DB);
-	dbp = (DB *)dbp_ctp->ct_anyp;
-
-	dbc_ctp = new_ct_ent(&replyp->status);
-	if (dbc_ctp == NULL)
-		return;
-
-	size = (curslen + 1) * sizeof(DBC *);
-	if ((ret = __os_calloc(dbp->dbenv,
-	    curslen + 1, sizeof(DBC *), &jcurs)) != 0) {
-		replyp->status = ret;
-		__dbclear_ctp(dbc_ctp);
-		return;
-	}
-	/*
-	 * If our curslist has a parent txn, we need to use it too
-	 * for the activity timeout.  All cursors must be part of
-	 * the same transaction, so just check the first.
-	 */
-	ctp = get_tableent(*curs);
-	DB_ASSERT(ctp->ct_type == CT_CURSOR);
-	/*
-	 * If we are using a transaction, set the join activity timer
-	 * to point to the parent transaction.
-	 */
-	if (ctp->ct_activep != &ctp->ct_active)
-		dbc_ctp->ct_activep = ctp->ct_activep;
-	for (i = 0, cl = curs, c = jcurs; i < curslen; i++, cl++, c++) {
-		ctp = get_tableent(*cl);
-		if (ctp == NULL) {
-			replyp->status = DB_NOSERVER_ID;
-			goto out;
-		}
-		/*
-		 * If we are using a txn, the join cursor points to the
-		 * transaction timeout.  If we are not using a transaction,
-		 * then all the curslist cursors must point to the join
-		 * cursor's timeout so that we do not timeout any of the
-		 * curlist cursors while the join cursor is active.
-		 * Change the type of the curslist ctps to CT_JOIN so that
-		 * we know they are part of a join list and we can distinguish
-		 * them and later restore them when the join cursor is closed.
-		 */
-		DB_ASSERT(ctp->ct_type == CT_CURSOR);
-		ctp->ct_type |= CT_JOIN;
-		ctp->ct_origp = ctp->ct_activep;
-		/*
-		 * Setting this to the ct_active field of the dbc_ctp is
-		 * really just a way to distinguish which join dbc this
-		 * cursor is part of.  The ct_activep of this cursor is
-		 * not used at all during its lifetime as part of a join
-		 * cursor.
-		 */
-		ctp->ct_activep = &dbc_ctp->ct_active;
-		*c = ctp->ct_dbc;
-	}
-	*c = NULL;
-	if ((ret = dbp->join(dbp, jcurs, &dbc, flags)) == 0) {
-		dbc_ctp->ct_dbc = dbc;
-		dbc_ctp->ct_type = (CT_JOINCUR | CT_CURSOR);
-		dbc_ctp->ct_parent = dbp_ctp;
-		dbc_ctp->ct_envparent = dbp_ctp->ct_envparent;
-		__dbsrv_settimeout(dbc_ctp, dbp_ctp->ct_envparent->ct_timeout);
-		__dbsrv_active(dbc_ctp);
-		replyp->dbcidcl_id = dbc_ctp->ct_id;
-	} else {
-		__dbclear_ctp(dbc_ctp);
-		/*
-		 * If we get an error, undo what we did above to any cursors.
-		 */
-		for (cl = curs; *cl != 0; cl++) {
-			ctp = get_tableent(*cl);
-			ctp->ct_type = CT_CURSOR;
-			ctp->ct_activep = ctp->ct_origp;
-		}
-	}
-
-	replyp->status = ret;
-out:
-	__os_free(dbp->dbenv, jcurs);
-	return;
-}
-
-/* BEGIN __dbc_close_proc */
-/*
- * PUBLIC: void __dbc_close_proc __P((long, __dbc_close_reply *));
- */
-void
-__dbc_close_proc(dbccl_id, replyp)
-	long dbccl_id;
-	__dbc_close_reply *replyp;
-/* END __dbc_close_proc */
-{
-	ct_entry *dbc_ctp;
-
-	ACTIVATE_CTP(dbc_ctp, dbccl_id, CT_CURSOR);
-	replyp->status = __dbc_close_int(dbc_ctp);
-	return;
-}
-
-/* BEGIN __dbc_count_proc */
-/*
- * PUBLIC: void __dbc_count_proc __P((long, u_int32_t, __dbc_count_reply *));
- */
-void
-__dbc_count_proc(dbccl_id, flags, replyp)
-	long dbccl_id;
-	u_int32_t flags;
-	__dbc_count_reply *replyp;
-/* END __dbc_count_proc */
-{
-	DBC *dbc;
-	ct_entry *dbc_ctp;
-	db_recno_t num;
-	int ret;
-
-	ACTIVATE_CTP(dbc_ctp, dbccl_id, CT_CURSOR);
-	dbc = (DBC *)dbc_ctp->ct_anyp;
-
-	ret = dbc->c_count(dbc, &num, flags);
-	replyp->status = ret;
-	if (ret == 0)
-		replyp->dupcount = num;
-	return;
-}
-
-/* BEGIN __dbc_del_proc */
-/*
- * PUBLIC: void __dbc_del_proc __P((long, u_int32_t, __dbc_del_reply *));
- */
-void
-__dbc_del_proc(dbccl_id, flags, replyp)
-	long dbccl_id;
-	u_int32_t flags;
-	__dbc_del_reply *replyp;
-/* END __dbc_del_proc */
-{
-	DBC *dbc;
-	ct_entry *dbc_ctp;
-	int ret;
-
-	ACTIVATE_CTP(dbc_ctp, dbccl_id, CT_CURSOR);
-	dbc = (DBC *)dbc_ctp->ct_anyp;
-
-	ret = dbc->c_del(dbc, flags);
-
-	replyp->status = ret;
-	return;
-}
-
-/* BEGIN __dbc_dup_proc */
-/*
- * PUBLIC: void __dbc_dup_proc __P((long, u_int32_t, __dbc_dup_reply *));
- */
-void
-__dbc_dup_proc(dbccl_id, flags, replyp)
-	long dbccl_id;
-	u_int32_t flags;
-	__dbc_dup_reply *replyp;
-/* END __dbc_dup_proc */
-{
-	DBC *dbc, *newdbc;
-	ct_entry *dbc_ctp, *new_ctp;
-	int ret;
-
-	ACTIVATE_CTP(dbc_ctp, dbccl_id, CT_CURSOR);
-	dbc = (DBC *)dbc_ctp->ct_anyp;
-
-	new_ctp = new_ct_ent(&replyp->status);
-	if (new_ctp == NULL)
-		return;
-
-	if ((ret = dbc->c_dup(dbc, &newdbc, flags)) == 0) {
-		new_ctp->ct_dbc = newdbc;
-		new_ctp->ct_type = CT_CURSOR;
-		new_ctp->ct_parent = dbc_ctp->ct_parent;
-		new_ctp->ct_envparent = dbc_ctp->ct_envparent;
-		/*
-		 * If our cursor has a parent txn, we need to use it too.
-		 */
-		if (dbc_ctp->ct_activep != &dbc_ctp->ct_active)
-			new_ctp->ct_activep = dbc_ctp->ct_activep;
-		__dbsrv_settimeout(new_ctp, dbc_ctp->ct_timeout);
-		__dbsrv_active(new_ctp);
-		replyp->dbcidcl_id = new_ctp->ct_id;
-	} else
-		__dbclear_ctp(new_ctp);
-
-	replyp->status = ret;
-	return;
-}
-
-/* BEGIN __dbc_get_proc */
-/*
- * PUBLIC: void __dbc_get_proc __P((long, u_int32_t, u_int32_t, u_int32_t,
- * PUBLIC:      u_int32_t, void *, u_int32_t, u_int32_t, u_int32_t, u_int32_t, u_int32_t, void *,
- * PUBLIC:      u_int32_t, u_int32_t, __dbc_get_reply *, int *));
- */
-void
-__dbc_get_proc(dbccl_id, keydlen, keydoff,
-		keyulen, keyflags, keydata, keysize,
-		datadlen, datadoff, dataulen, dataflags,
-		datadata, datasize, flags, replyp, freep)
-	long dbccl_id;
-	u_int32_t keydlen;
-	u_int32_t keydoff;
-	u_int32_t keyulen;
-	u_int32_t keyflags;
-	void *keydata;
-	u_int32_t keysize;
-	u_int32_t datadlen;
-	u_int32_t datadoff;
-	u_int32_t dataulen;
-	u_int32_t dataflags;
-	void *datadata;
-	u_int32_t datasize;
-	u_int32_t flags;
-	__dbc_get_reply *replyp;
-	int * freep;
-/* END __dbc_get_proc */
-{
-	DBC *dbc;
-	DBT key, data;
-	DB_ENV *dbenv;
-	ct_entry *dbc_ctp;
-	int key_alloc, bulk_alloc, ret;
-
-	ACTIVATE_CTP(dbc_ctp, dbccl_id, CT_CURSOR);
-	dbc = (DBC *)dbc_ctp->ct_anyp;
-	dbenv = dbc->dbp->dbenv;
-
-	*freep = 0;
-	bulk_alloc = 0;
-	memset(&key, 0, sizeof(key));
-	memset(&data, 0, sizeof(data));
-
-	/* Set up key and data DBT */
-	key.dlen = keydlen;
-	key.ulen = keyulen;
-	key.doff = keydoff;
-	/*
-	 * Ignore memory related flags on server.
-	 */
-	key.flags = DB_DBT_MALLOC;
-	if (keyflags & DB_DBT_PARTIAL)
-		key.flags |= DB_DBT_PARTIAL;
-	key.size = keysize;
-	key.data = keydata;
-
-	data.dlen = datadlen;
-	data.ulen = dataulen;
-	data.doff = datadoff;
-	data.size = datasize;
-	data.data = datadata;
-	if (flags & DB_MULTIPLE || flags & DB_MULTIPLE_KEY) {
-		if (data.data == 0) {
-			ret = __os_umalloc(dbenv, data.ulen, &data.data);
-			if (ret != 0)
-				goto err;
-			bulk_alloc = 1;
-		}
-		data.flags |= DB_DBT_USERMEM;
-	} else
-		data.flags |= DB_DBT_MALLOC;
-	if (dataflags & DB_DBT_PARTIAL)
-		data.flags |= DB_DBT_PARTIAL;
-
-	/* Got all our stuff, now do the get */
-	ret = dbc->c_get(dbc, &key, &data, flags);
-
-	/*
-	 * Otherwise just status.
-	 */
-	if (ret == 0) {
-		/*
-		 * XXX
-		 * We need to xdr_free whatever we are returning, next time.
-		 * However, DB does not allocate a new key if one was given
-		 * and we'd be free'ing up space allocated in the request.
-		 * So, allocate a new key/data pointer if it is the same one
-		 * as in the request.
-		 */
-		*freep = 1;
-		/*
-		 * Key
-		 */
-		key_alloc = 0;
-		if (key.data == keydata) {
-			ret = __os_umalloc(dbenv, key.size,
-			    &replyp->keydata.keydata_val);
-			if (ret != 0) {
-				__os_ufree(dbenv, key.data);
-				__os_ufree(dbenv, data.data);
-				goto err;
-			}
-			key_alloc = 1;
-			memcpy(replyp->keydata.keydata_val, key.data, key.size);
-		} else
-			replyp->keydata.keydata_val = key.data;
-
-		replyp->keydata.keydata_len = key.size;
-
-		/*
-		 * Data
-		 */
-		if (data.data == datadata) {
-			ret = __os_umalloc(dbenv, data.size,
-			    &replyp->datadata.datadata_val);
-			if (ret != 0) {
-				__os_ufree(dbenv, key.data);
-				__os_ufree(dbenv, data.data);
-				if (key_alloc)
-					__os_ufree(dbenv, replyp->keydata.keydata_val);
-				goto err;
-			}
-			memcpy(replyp->datadata.datadata_val, data.data,
-			    data.size);
-		} else
-			replyp->datadata.datadata_val = data.data;
-		replyp->datadata.datadata_len = data.size;
-	} else {
-err:		replyp->keydata.keydata_val = NULL;
-		replyp->keydata.keydata_len = 0;
-		replyp->datadata.datadata_val = NULL;
-		replyp->datadata.datadata_len = 0;
-		*freep = 0;
-		if (bulk_alloc)
-			__os_ufree(dbenv, data.data);
-	}
-	replyp->status = ret;
-	return;
-}
-
-/* BEGIN __dbc_pget_proc */
-/*
- * PUBLIC: void __dbc_pget_proc __P((long, u_int32_t, u_int32_t, u_int32_t,
- * PUBLIC:      u_int32_t, void *, u_int32_t, u_int32_t, u_int32_t, u_int32_t, u_int32_t, void *,
- * PUBLIC:      u_int32_t, u_int32_t, u_int32_t, u_int32_t, u_int32_t, void *, u_int32_t,
- * PUBLIC:      u_int32_t, __dbc_pget_reply *, int *));
- */
-void
-__dbc_pget_proc(dbccl_id, skeydlen, skeydoff,
-		skeyulen, skeyflags, skeydata, skeysize,
-		pkeydlen, pkeydoff, pkeyulen, pkeyflags,
-		pkeydata, pkeysize, datadlen, datadoff,
-		dataulen, dataflags, datadata, datasize,
-		flags, replyp, freep)
-	long dbccl_id;
-	u_int32_t skeydlen;
-	u_int32_t skeydoff;
-	u_int32_t skeyulen;
-	u_int32_t skeyflags;
-	void *skeydata;
-	u_int32_t skeysize;
-	u_int32_t pkeydlen;
-	u_int32_t pkeydoff;
-	u_int32_t pkeyulen;
-	u_int32_t pkeyflags;
-	void *pkeydata;
-	u_int32_t pkeysize;
-	u_int32_t datadlen;
-	u_int32_t datadoff;
-	u_int32_t dataulen;
-	u_int32_t dataflags;
-	void *datadata;
-	u_int32_t datasize;
-	u_int32_t flags;
-	__dbc_pget_reply *replyp;
-	int * freep;
-/* END __dbc_pget_proc */
-{
-	DBC *dbc;
-	DBT skey, pkey, data;
-	DB_ENV *dbenv;
-	ct_entry *dbc_ctp;
-	int key_alloc, ret;
-
-	ACTIVATE_CTP(dbc_ctp, dbccl_id, CT_CURSOR);
-	dbc = (DBC *)dbc_ctp->ct_anyp;
-	dbenv = dbc->dbp->dbenv;
-
-	*freep = 0;
-	memset(&skey, 0, sizeof(skey));
-	memset(&pkey, 0, sizeof(pkey));
-	memset(&data, 0, sizeof(data));
-
-	/*
-	 * Ignore memory related flags on server.
-	 */
-	/* Set up key and data DBT */
-	skey.flags = DB_DBT_MALLOC;
-	skey.dlen = skeydlen;
-	skey.ulen = skeyulen;
-	skey.doff = skeydoff;
-	if (skeyflags & DB_DBT_PARTIAL)
-		skey.flags |= DB_DBT_PARTIAL;
-	skey.size = skeysize;
-	skey.data = skeydata;
-
-	pkey.flags = DB_DBT_MALLOC;
-	pkey.dlen = pkeydlen;
-	pkey.ulen = pkeyulen;
-	pkey.doff = pkeydoff;
-	if (pkeyflags & DB_DBT_PARTIAL)
-		pkey.flags |= DB_DBT_PARTIAL;
-	pkey.size = pkeysize;
-	pkey.data = pkeydata;
-
-	data.flags = DB_DBT_MALLOC;
-	data.dlen = datadlen;
-	data.ulen = dataulen;
-	data.doff = datadoff;
-	if (dataflags & DB_DBT_PARTIAL)
-		data.flags |= DB_DBT_PARTIAL;
-	data.size = datasize;
-	data.data = datadata;
-
-	/* Got all our stuff, now do the get */
-	ret = dbc->c_pget(dbc, &skey, &pkey, &data, flags);
-	/*
-	 * Otherwise just status.
-	 */
-	if (ret == 0) {
-		/*
-		 * XXX
-		 * We need to xdr_free whatever we are returning, next time.
-		 * However, DB does not allocate a new key if one was given
-		 * and we'd be free'ing up space allocated in the request.
-		 * So, allocate a new key/data pointer if it is the same one
-		 * as in the request.
-		 */
-		*freep = 1;
-		/*
-		 * Key
-		 */
-		key_alloc = 0;
-		if (skey.data == skeydata) {
-			ret = __os_umalloc(dbenv,
-			    skey.size, &replyp->skeydata.skeydata_val);
-			if (ret != 0) {
-				__os_ufree(dbenv, skey.data);
-				__os_ufree(dbenv, pkey.data);
-				__os_ufree(dbenv, data.data);
-				goto err;
-			}
-			key_alloc = 1;
-			memcpy(replyp->skeydata.skeydata_val, skey.data,
-			    skey.size);
-		} else
-			replyp->skeydata.skeydata_val = skey.data;
-		replyp->skeydata.skeydata_len = skey.size;
-
-		/*
-		 * Primary key
-		 */
-		if (pkey.data == pkeydata) {
-			ret = __os_umalloc(dbenv,
-			     pkey.size, &replyp->pkeydata.pkeydata_val);
-			if (ret != 0) {
-				__os_ufree(dbenv, skey.data);
-				__os_ufree(dbenv, pkey.data);
-				__os_ufree(dbenv, data.data);
-				if (key_alloc)
-					__os_ufree(dbenv,
-					    replyp->skeydata.skeydata_val);
-				goto err;
-			}
-			/*
-			 * We can set it to 2, because they cannot send the
-			 * pkey over without sending the skey over too.
-			 * So if they did send a pkey, they must have sent
-			 * the skey as well.
-			 */
-			key_alloc = 2;
-			memcpy(replyp->pkeydata.pkeydata_val, pkey.data,
-			    pkey.size);
-		} else
-			replyp->pkeydata.pkeydata_val = pkey.data;
-		replyp->pkeydata.pkeydata_len = pkey.size;
-
-		/*
-		 * Data
-		 */
-		if (data.data == datadata) {
-			ret = __os_umalloc(dbenv,
-			     data.size, &replyp->datadata.datadata_val);
-			if (ret != 0) {
-				__os_ufree(dbenv, skey.data);
-				__os_ufree(dbenv, pkey.data);
-				__os_ufree(dbenv, data.data);
-				/*
-				 * If key_alloc is 1, just skey needs to be
-				 * freed, if key_alloc is 2, both skey and pkey
-				 * need to be freed.
-				 */
-				if (key_alloc--)
-					__os_ufree(dbenv,
-					    replyp->skeydata.skeydata_val);
-				if (key_alloc)
-					__os_ufree(dbenv,
-					    replyp->pkeydata.pkeydata_val);
-				goto err;
-			}
-			memcpy(replyp->datadata.datadata_val, data.data,
-			    data.size);
-		} else
-			replyp->datadata.datadata_val = data.data;
-		replyp->datadata.datadata_len = data.size;
-	} else {
-err:		replyp->skeydata.skeydata_val = NULL;
-		replyp->skeydata.skeydata_len = 0;
-		replyp->pkeydata.pkeydata_val = NULL;
-		replyp->pkeydata.pkeydata_len = 0;
-		replyp->datadata.datadata_val = NULL;
-		replyp->datadata.datadata_len = 0;
-		*freep = 0;
-	}
-	replyp->status = ret;
-	return;
-}
-
-/* BEGIN __dbc_put_proc */
-/*
- * PUBLIC: void __dbc_put_proc __P((long, u_int32_t, u_int32_t, u_int32_t,
- * PUBLIC:      u_int32_t, void *, u_int32_t, u_int32_t, u_int32_t, u_int32_t, u_int32_t, void *,
- * PUBLIC:      u_int32_t, u_int32_t, __dbc_put_reply *, int *));
- */
-void
-__dbc_put_proc(dbccl_id, keydlen, keydoff,
-		keyulen, keyflags, keydata, keysize,
-		datadlen, datadoff, dataulen, dataflags,
-		datadata, datasize, flags, replyp, freep)
-	long dbccl_id;
-	u_int32_t keydlen;
-	u_int32_t keydoff;
-	u_int32_t keyulen;
-	u_int32_t keyflags;
-	void *keydata;
-	u_int32_t keysize;
-	u_int32_t datadlen;
-	u_int32_t datadoff;
-	u_int32_t dataulen;
-	u_int32_t dataflags;
-	void *datadata;
-	u_int32_t datasize;
-	u_int32_t flags;
-	__dbc_put_reply *replyp;
-	int * freep;
-/* END __dbc_put_proc */
-{
-	DB *dbp;
-	DBC *dbc;
-	DBT key, data;
-	ct_entry *dbc_ctp;
-	int ret;
-
-	ACTIVATE_CTP(dbc_ctp, dbccl_id, CT_CURSOR);
-	dbc = (DBC *)dbc_ctp->ct_anyp;
-	dbp = (DB *)dbc_ctp->ct_parent->ct_anyp;
-
-	memset(&key, 0, sizeof(key));
-	memset(&data, 0, sizeof(data));
-
-	/* Set up key and data DBT */
-	key.dlen = keydlen;
-	key.ulen = keyulen;
-	key.doff = keydoff;
-	/*
-	 * Ignore memory related flags on server.
-	 */
-	key.flags = 0;
-	if (keyflags & DB_DBT_PARTIAL)
-		key.flags |= DB_DBT_PARTIAL;
-	key.size = keysize;
-	key.data = keydata;
-
-	data.dlen = datadlen;
-	data.ulen = dataulen;
-	data.doff = datadoff;
-	data.flags = dataflags;
-	data.size = datasize;
-	data.data = datadata;
-
-	/* Got all our stuff, now do the put */
-	ret = dbc->c_put(dbc, &key, &data, flags);
-
-	*freep = 0;
-	if (ret == 0 && (flags == DB_AFTER || flags == DB_BEFORE) &&
-	    dbp->type == DB_RECNO) {
-		/*
-		 * We need to xdr_free whatever we are returning, next time.
-		 */
-		replyp->keydata.keydata_val = key.data;
-		replyp->keydata.keydata_len = key.size;
-	} else {
-		replyp->keydata.keydata_val = NULL;
-		replyp->keydata.keydata_len = 0;
-	}
-	replyp->status = ret;
-	return;
-}
-#endif /* HAVE_RPC */
diff --git a/storage/bdb/rpc_server/c/db_server_util.c b/storage/bdb/rpc_server/c/db_server_util.c
deleted file mode 100644
index 11bc4deb39c..00000000000
--- a/storage/bdb/rpc_server/c/db_server_util.c
+++ /dev/null
@@ -1,844 +0,0 @@
-/*-
- * See the file LICENSE for redistribution information.
- *
- * Copyright (c) 2000-2004
- *      Sleepycat Software.  All rights reserved.
- *
- * $Id: db_server_util.c,v 1.72 2004/09/22 17:30:12 bostic Exp $
- */
-
-#include "db_config.h"
-
-#ifndef NO_SYSTEM_INCLUDES
-#include 
-
-#if TIME_WITH_SYS_TIME
-#include 
-#include 
-#else
-#if HAVE_SYS_TIME_H
-#include 
-#else
-#include 
-#endif
-#endif
-
-#include 
-
-#include 
-#include 
-#include 
-#include 
-#include 
-#include 
-#endif
-
-#include "db_server.h"
-
-#include "db_int.h"
-#include "dbinc_auto/clib_ext.h"
-#include "dbinc/db_server_int.h"
-#include "dbinc_auto/common_ext.h"
-#include "dbinc_auto/rpc_server_ext.h"
-
-static int add_home __P((char *));
-static int add_passwd __P((char *));
-static int env_recover __P((char *));
-static void __dbclear_child __P((ct_entry *));
-
-static LIST_HEAD(cthead, ct_entry) __dbsrv_head;
-static LIST_HEAD(homehead, home_entry) __dbsrv_home;
-static long __dbsrv_defto = DB_SERVER_TIMEOUT;
-static long __dbsrv_maxto = DB_SERVER_MAXTIMEOUT;
-static long __dbsrv_idleto = DB_SERVER_IDLETIMEOUT;
-static char *logfile = NULL;
-static char *prog;
-
-static void usage __P((void));
-static void version_check __P((void));
-
-int __dbsrv_verbose = 0;
-
-int
-main(argc, argv)
-	int argc;
-	char **argv;
-{
-	extern int __dbsrv_main();
-	extern char *optarg;
-	CLIENT *cl;
-	int ch, ret;
-	char *passwd;
-
-	prog = argv[0];
-
-	version_check();
-
-	ret = 0;
-	/*
-	 * Check whether another server is running or not.  There
-	 * is a race condition where two servers could be racing to
-	 * register with the portmapper.  The goal of this check is to
-	 * forbid running additional servers (like those started from
-	 * the test suite) if the user is already running one.
-	 *
-	 * XXX
-	 * This does not solve nor prevent two servers from being
-	 * started at the same time and running recovery at the same
-	 * time on the same environments.
-	 */
-	if ((cl = clnt_create("localhost",
-	    DB_RPC_SERVERPROG, DB_RPC_SERVERVERS, "tcp")) != NULL) {
-		fprintf(stderr,
-		    "%s: Berkeley DB RPC server already running.\n", prog);
-		clnt_destroy(cl);
-		return (EXIT_FAILURE);
-	}
-
-	LIST_INIT(&__dbsrv_home);
-	while ((ch = getopt(argc, argv, "h:I:L:P:t:T:Vv")) != EOF)
-		switch (ch) {
-		case 'h':
-			(void)add_home(optarg);
-			break;
-		case 'I':
-			if (__db_getlong(NULL, prog,
-			    optarg, 1, LONG_MAX, &__dbsrv_idleto))
-				return (EXIT_FAILURE);
-			break;
-		case 'L':
-			logfile = optarg;
-			break;
-		case 'P':
-			passwd = strdup(optarg);
-			memset(optarg, 0, strlen(optarg));
-			if (passwd == NULL) {
-				fprintf(stderr, "%s: strdup: %s\n",
-				    prog, strerror(errno));
-				return (EXIT_FAILURE);
-			}
-			if ((ret = add_passwd(passwd)) != 0) {
-				fprintf(stderr, "%s: strdup: %s\n",
-				    prog, strerror(ret));
-				return (EXIT_FAILURE);
-			}
-			break;
-		case 't':
-			if (__db_getlong(NULL, prog,
-			    optarg, 1, LONG_MAX, &__dbsrv_defto))
-				return (EXIT_FAILURE);
-			break;
-		case 'T':
-			if (__db_getlong(NULL, prog,
-			    optarg, 1, LONG_MAX, &__dbsrv_maxto))
-				return (EXIT_FAILURE);
-			break;
-		case 'V':
-			printf("%s\n", db_version(NULL, NULL, NULL));
-			return (EXIT_SUCCESS);
-		case 'v':
-			__dbsrv_verbose = 1;
-			break;
-		default:
-			usage();
-		}
-	/*
-	 * Check default timeout against maximum timeout
-	 */
-	if (__dbsrv_defto > __dbsrv_maxto)
-		__dbsrv_defto = __dbsrv_maxto;
-
-	/*
-	 * Check default timeout against idle timeout
-	 * It would be bad to timeout environments sooner than txns.
-	 */
-	if (__dbsrv_defto > __dbsrv_idleto)
-		fprintf(stderr,
-	    "%s: WARNING: Idle timeout %ld is less than resource timeout %ld\n",
-		    prog, __dbsrv_idleto, __dbsrv_defto);
-
-	LIST_INIT(&__dbsrv_head);
-
-	/*
-	 * If a client crashes during an RPC, our reply to it
-	 * generates a SIGPIPE.  Ignore SIGPIPE so we don't exit unnecessarily.
-	 */
-#ifdef SIGPIPE
-	signal(SIGPIPE, SIG_IGN);
-#endif
-
-	if (logfile != NULL && __db_util_logset("berkeley_db_svc", logfile))
-		return (EXIT_FAILURE);
-
-	/*
-	 * Now that we are ready to start, run recovery on all the
-	 * environments specified.
-	 */
-	if (env_recover(prog) != 0)
-		return (EXIT_FAILURE);
-
-	/*
-	 * We've done our setup, now call the generated server loop
-	 */
-	if (__dbsrv_verbose)
-		printf("%s:  Ready to receive requests\n", prog);
-	__dbsrv_main();
-
-	abort();
-
-	/* NOTREACHED */
-	return (0);
-}
-
-static void
-usage()
-{
-	fprintf(stderr, "usage: %s %s\n\t%s\n", prog,
-	    "[-Vv] [-h home] [-P passwd]",
-	    "[-I idletimeout] [-L logfile] [-t def_timeout] [-T maxtimeout]");
-	exit(EXIT_FAILURE);
-}
-
-static void
-version_check()
-{
-	int v_major, v_minor, v_patch;
-
-	/* Make sure we're loaded with the right version of the DB library. */
-	(void)db_version(&v_major, &v_minor, &v_patch);
-	if (v_major != DB_VERSION_MAJOR ||
-	    v_minor != DB_VERSION_MINOR || v_patch != DB_VERSION_PATCH) {
-		fprintf(stderr,
-	"%s: version %d.%d.%d doesn't match library version %d.%d.%d\n",
-		    prog, DB_VERSION_MAJOR, DB_VERSION_MINOR,
-		    DB_VERSION_PATCH, v_major, v_minor, v_patch);
-		exit(EXIT_FAILURE);
-	}
-}
-
-/*
- * PUBLIC: void __dbsrv_settimeout __P((ct_entry *, u_int32_t));
- */
-void
-__dbsrv_settimeout(ctp, to)
-	ct_entry *ctp;
-	u_int32_t to;
-{
-	if (to > (u_int32_t)__dbsrv_maxto)
-		ctp->ct_timeout = __dbsrv_maxto;
-	else if (to <= 0)
-		ctp->ct_timeout = __dbsrv_defto;
-	else
-		ctp->ct_timeout = to;
-}
-
-/*
- * PUBLIC: void __dbsrv_timeout __P((int));
- */
-void
-__dbsrv_timeout(force)
-	int force;
-{
-	static long to_hint = -1;
-	time_t t;
-	long to;
-	ct_entry *ctp, *nextctp;
-
-	if ((t = time(NULL)) == -1)
-		return;
-
-	/*
-	 * Check hint.  If hint is further in the future
-	 * than now, no work to do.
-	 */
-	if (!force && to_hint > 0 && t < to_hint)
-		return;
-	to_hint = -1;
-	/*
-	 * Timeout transactions or cursors holding DB resources.
-	 * Do this before timing out envs to properly release resources.
-	 *
-	 * !!!
-	 * We can just loop through this list looking for cursors and txns.
-	 * We do not need to verify txn and cursor relationships at this
-	 * point because we maintain the list in LIFO order *and* we
-	 * maintain activity in the ultimate txn parent of any cursor
-	 * so either everything in a txn is timing out, or nothing.
-	 * So, since we are LIFO, we will correctly close/abort all the
-	 * appropriate handles, in the correct order.
-	 */
-	for (ctp = LIST_FIRST(&__dbsrv_head); ctp != NULL; ctp = nextctp) {
-		nextctp = LIST_NEXT(ctp, entries);
-		switch (ctp->ct_type) {
-		case CT_TXN:
-			to = *(ctp->ct_activep) + ctp->ct_timeout;
-			/* TIMEOUT */
-			if (to < t) {
-				if (__dbsrv_verbose)
-					printf("Timing out txn id %ld\n",
-					    ctp->ct_id);
-				(void)((DB_TXN *)ctp->ct_anyp)->
-				    abort((DB_TXN *)ctp->ct_anyp);
-				__dbdel_ctp(ctp);
-				/*
-				 * If we timed out an txn, we may have closed
-				 * all sorts of ctp's.
-				 * So start over with a guaranteed good ctp.
-				 */
-				nextctp = LIST_FIRST(&__dbsrv_head);
-			} else if ((to_hint > 0 && to_hint > to) ||
-			    to_hint == -1)
-				to_hint = to;
-			break;
-		case CT_CURSOR:
-		case (CT_JOINCUR | CT_CURSOR):
-			to = *(ctp->ct_activep) + ctp->ct_timeout;
-			/* TIMEOUT */
-			if (to < t) {
-				if (__dbsrv_verbose)
-					printf("Timing out cursor %ld\n",
-					    ctp->ct_id);
-				(void)__dbc_close_int(ctp);
-				/*
-				 * Start over with a guaranteed good ctp.
-				 */
-				nextctp = LIST_FIRST(&__dbsrv_head);
-			} else if ((to_hint > 0 && to_hint > to) ||
-			    to_hint == -1)
-				to_hint = to;
-			break;
-		default:
-			break;
-		}
-	}
-	/*
-	 * Timeout idle handles.
-	 * If we are forcing a timeout, we'll close all env handles.
-	 */
-	for (ctp = LIST_FIRST(&__dbsrv_head); ctp != NULL; ctp = nextctp) {
-		nextctp = LIST_NEXT(ctp, entries);
-		if (ctp->ct_type != CT_ENV)
-			continue;
-		to = *(ctp->ct_activep) + ctp->ct_idle;
-		/* TIMEOUT */
-		if (to < t || force) {
-			if (__dbsrv_verbose)
-				printf("Timing out env id %ld\n", ctp->ct_id);
-			(void)__dbenv_close_int(ctp->ct_id, 0, 1);
-			/*
-			 * If we timed out an env, we may have closed
-			 * all sorts of ctp's (maybe even all of them.
-			 * So start over with a guaranteed good ctp.
-			 */
-			nextctp = LIST_FIRST(&__dbsrv_head);
-		}
-	}
-}
-
-/*
- * RECURSIVE FUNCTION.  We need to clear/free any number of levels of nested
- * layers.
- */
-static void
-__dbclear_child(parent)
-	ct_entry *parent;
-{
-	ct_entry *ctp, *nextctp;
-
-	for (ctp = LIST_FIRST(&__dbsrv_head); ctp != NULL;
-	    ctp = nextctp) {
-		nextctp = LIST_NEXT(ctp, entries);
-		if (ctp->ct_type == 0)
-			continue;
-		if (ctp->ct_parent == parent) {
-			__dbclear_child(ctp);
-			/*
-			 * Need to do this here because le_next may
-			 * have changed with the recursive call and we
-			 * don't want to point to a removed entry.
-			 */
-			nextctp = LIST_NEXT(ctp, entries);
-			__dbclear_ctp(ctp);
-		}
-	}
-}
-
-/*
- * PUBLIC: void __dbclear_ctp __P((ct_entry *));
- */
-void
-__dbclear_ctp(ctp)
-	ct_entry *ctp;
-{
-	LIST_REMOVE(ctp, entries);
-	__os_free(NULL, ctp);
-}
-
-/*
- * PUBLIC: void __dbdel_ctp __P((ct_entry *));
- */
-void
-__dbdel_ctp(parent)
-	ct_entry *parent;
-{
-	__dbclear_child(parent);
-	__dbclear_ctp(parent);
-}
-
-/*
- * PUBLIC: ct_entry *new_ct_ent __P((int *));
- */
-ct_entry *
-new_ct_ent(errp)
-	int *errp;
-{
-	time_t t;
-	ct_entry *ctp, *octp;
-	int ret;
-
-	if ((ret = __os_malloc(NULL, sizeof(ct_entry), &ctp)) != 0) {
-		*errp = ret;
-		return (NULL);
-	}
-	memset(ctp, 0, sizeof(ct_entry));
-	/*
-	 * Get the time as ID.  We may service more than one request per
-	 * second however.  If we are, then increment id value until we
-	 * find an unused one.  We insert entries in LRU fashion at the
-	 * head of the list.  So, if the first entry doesn't match, then
-	 * we know for certain that we can use our entry.
-	 */
-	if ((t = time(NULL)) == -1) {
-		*errp = __os_get_errno();
-		__os_free(NULL, ctp);
-		return (NULL);
-	}
-	octp = LIST_FIRST(&__dbsrv_head);
-	if (octp != NULL && octp->ct_id >= t)
-		t = octp->ct_id + 1;
-	ctp->ct_id = (long)t;
-	ctp->ct_idle = __dbsrv_idleto;
-	ctp->ct_activep = &ctp->ct_active;
-	ctp->ct_origp = NULL;
-	ctp->ct_refcount = 1;
-
-	LIST_INSERT_HEAD(&__dbsrv_head, ctp, entries);
-	return (ctp);
-}
-
-/*
- * PUBLIC: ct_entry *get_tableent __P((long));
- */
-ct_entry *
-get_tableent(id)
-	long id;
-{
-	ct_entry *ctp;
-
-	for (ctp = LIST_FIRST(&__dbsrv_head); ctp != NULL;
-	    ctp = LIST_NEXT(ctp, entries))
-		if (ctp->ct_id == id)
-			return (ctp);
-	return (NULL);
-}
-
-/*
- * PUBLIC: ct_entry *__dbsrv_sharedb __P((ct_entry *, const char *,
- * PUBLIC:    const char *, DBTYPE, u_int32_t));
- */
-ct_entry *
-__dbsrv_sharedb(db_ctp, name, subdb, type, flags)
-	ct_entry *db_ctp;
-	const char *name, *subdb;
-	DBTYPE type;
-	u_int32_t flags;
-{
-	ct_entry *ctp;
-
-	/*
-	 * Check if we can share a db handle.  Criteria for sharing are:
-	 * If any of the non-sharable flags are set, we cannot share.
-	 * Must be a db ctp, obviously.
-	 * Must share the same env parent.
-	 * Must be the same type, or current one DB_UNKNOWN.
-	 * Must be same byteorder, or current one must not care.
-	 * All flags must match.
-	 * Must be same name, but don't share in-memory databases.
-	 * Must be same subdb name.
-	 */
-	if (flags & DB_SERVER_DBNOSHARE)
-		return (NULL);
-	for (ctp = LIST_FIRST(&__dbsrv_head); ctp != NULL;
-	    ctp = LIST_NEXT(ctp, entries)) {
-		/*
-		 * Skip ourselves.
-		 */
-		if (ctp == db_ctp)
-			continue;
-		if (ctp->ct_type != CT_DB)
-			continue;
-		if (ctp->ct_envparent != db_ctp->ct_envparent)
-			continue;
-		if (type != DB_UNKNOWN && ctp->ct_dbdp.type != type)
-			continue;
-		if (ctp->ct_dbdp.dbflags != LF_ISSET(DB_SERVER_DBFLAGS))
-			continue;
-		if (db_ctp->ct_dbdp.setflags != 0 &&
-		    ctp->ct_dbdp.setflags != db_ctp->ct_dbdp.setflags)
-			continue;
-		if (name == NULL || ctp->ct_dbdp.db == NULL ||
-		    strcmp(name, ctp->ct_dbdp.db) != 0)
-			continue;
-		if (subdb != ctp->ct_dbdp.subdb &&
-		    (subdb == NULL || ctp->ct_dbdp.subdb == NULL ||
-		    strcmp(subdb, ctp->ct_dbdp.subdb) != 0))
-			continue;
-		/*
-		 * If we get here, then we match.
-		 */
-		ctp->ct_refcount++;
-		return (ctp);
-	}
-
-	return (NULL);
-}
-
-/*
- * PUBLIC: ct_entry *__dbsrv_shareenv
- * PUBLIC:     __P((ct_entry *, home_entry *, u_int32_t));
- */
-ct_entry *
-__dbsrv_shareenv(env_ctp, home, flags)
-	ct_entry *env_ctp;
-	home_entry *home;
-	u_int32_t flags;
-{
-	ct_entry *ctp;
-
-	/*
-	 * Check if we can share an env.  Criteria for sharing are:
-	 * Must be an env ctp, obviously.
-	 * Must share the same home env.
-	 * All flags must match.
-	 */
-	for (ctp = LIST_FIRST(&__dbsrv_head); ctp != NULL;
-	    ctp = LIST_NEXT(ctp, entries)) {
-		/*
-		 * Skip ourselves.
-		 */
-		if (ctp == env_ctp)
-			continue;
-		if (ctp->ct_type != CT_ENV)
-			continue;
-		if (ctp->ct_envdp.home != home)
-			continue;
-		if (ctp->ct_envdp.envflags != flags)
-			continue;
-		if (ctp->ct_envdp.onflags != env_ctp->ct_envdp.onflags)
-			continue;
-		if (ctp->ct_envdp.offflags != env_ctp->ct_envdp.offflags)
-			continue;
-		/*
-		 * If we get here, then we match.  The only thing left to
-		 * check is the timeout.  Since the server timeout set by
-		 * the client is a hint, for sharing we'll give them the
-		 * benefit of the doubt and grant them the longer timeout.
-		 */
-		if (ctp->ct_timeout < env_ctp->ct_timeout)
-			ctp->ct_timeout = env_ctp->ct_timeout;
-		ctp->ct_refcount++;
-		return (ctp);
-	}
-
-	return (NULL);
-}
-
-/*
- * PUBLIC: void __dbsrv_active __P((ct_entry *));
- */
-void
-__dbsrv_active(ctp)
-	ct_entry *ctp;
-{
-	time_t t;
-	ct_entry *envctp;
-
-	if (ctp == NULL)
-		return;
-	if ((t = time(NULL)) == -1)
-		return;
-	*(ctp->ct_activep) = t;
-	if ((envctp = ctp->ct_envparent) == NULL)
-		return;
-	*(envctp->ct_activep) = t;
-	return;
-}
-
-/*
- * PUBLIC: int __db_close_int __P((long, u_int32_t));
- */
-int
-__db_close_int(id, flags)
-	long id;
-	u_int32_t flags;
-{
-	DB *dbp;
-	int ret;
-	ct_entry *ctp;
-
-	ret = 0;
-	ctp = get_tableent(id);
-	if (ctp == NULL)
-		return (DB_NOSERVER_ID);
-	DB_ASSERT(ctp->ct_type == CT_DB);
-	if (__dbsrv_verbose && ctp->ct_refcount != 1)
-		printf("Deref'ing dbp id %ld, refcount %d\n",
-		    id, ctp->ct_refcount);
-	if (--ctp->ct_refcount != 0)
-		return (ret);
-	dbp = ctp->ct_dbp;
-	if (__dbsrv_verbose)
-		printf("Closing dbp id %ld\n", id);
-
-	ret = dbp->close(dbp, flags);
-	__dbdel_ctp(ctp);
-	return (ret);
-}
-
-/*
- * PUBLIC: int __dbc_close_int __P((ct_entry *));
- */
-int
-__dbc_close_int(dbc_ctp)
-	ct_entry *dbc_ctp;
-{
-	DBC *dbc;
-	int ret;
-	ct_entry *ctp;
-
-	dbc = (DBC *)dbc_ctp->ct_anyp;
-
-	ret = dbc->c_close(dbc);
-	/*
-	 * If this cursor is a join cursor then we need to fix up the
-	 * cursors that it was joined from so that they are independent again.
-	 */
-	if (dbc_ctp->ct_type & CT_JOINCUR)
-		for (ctp = LIST_FIRST(&__dbsrv_head); ctp != NULL;
-		    ctp = LIST_NEXT(ctp, entries)) {
-			/*
-			 * Test if it is a join cursor, and if it is part
-			 * of this one.
-			 */
-			if ((ctp->ct_type & CT_JOIN) &&
-			    ctp->ct_activep == &dbc_ctp->ct_active) {
-				ctp->ct_type &= ~CT_JOIN;
-				ctp->ct_activep = ctp->ct_origp;
-				__dbsrv_active(ctp);
-			}
-		}
-	__dbclear_ctp(dbc_ctp);
-	return (ret);
-
-}
-
-/*
- * PUBLIC: int __dbenv_close_int __P((long, u_int32_t, int));
- */
-int
-__dbenv_close_int(id, flags, force)
-	long id;
-	u_int32_t flags;
-	int force;
-{
-	DB_ENV *dbenv;
-	int ret;
-	ct_entry *ctp, *dbctp, *nextctp;
-
-	ret = 0;
-	ctp = get_tableent(id);
-	if (ctp == NULL)
-		return (DB_NOSERVER_ID);
-	DB_ASSERT(ctp->ct_type == CT_ENV);
-	if (__dbsrv_verbose && ctp->ct_refcount != 1)
-		printf("Deref'ing env id %ld, refcount %d\n",
-		    id, ctp->ct_refcount);
-	/*
-	 * If we are timing out, we need to force the close, no matter
-	 * what the refcount.
-	 */
-	if (--ctp->ct_refcount != 0 && !force)
-		return (ret);
-	dbenv = ctp->ct_envp;
-	if (__dbsrv_verbose)
-		printf("Closing env id %ld\n", id);
-
-	/*
-	 * If we're timing out an env, we want to close all of its
-	 * database handles as well.  All of the txns and cursors
-	 * must have been timed out prior to timing out the env.
-	 */
-	if (force)
-		for (dbctp = LIST_FIRST(&__dbsrv_head);
-		    dbctp != NULL; dbctp = nextctp) {
-			nextctp = LIST_NEXT(dbctp, entries);
-			if (dbctp->ct_type != CT_DB)
-				continue;
-			if (dbctp->ct_envparent != ctp)
-				continue;
-			/*
-			 * We found a DB handle that is part of this
-			 * environment.  Close it.
-			 */
-			__db_close_int(dbctp->ct_id, 0);
-			/*
-			 * If we timed out a dbp, we may have removed
-			 * multiple ctp entries.  Start over with a
-			 * guaranteed good ctp.
-			 */
-			nextctp = LIST_FIRST(&__dbsrv_head);
-		}
-	ret = dbenv->close(dbenv, flags);
-	__dbdel_ctp(ctp);
-	return (ret);
-}
-
-static int
-add_home(home)
-	char *home;
-{
-	home_entry *hp, *homep;
-	int ret;
-
-	if ((ret = __os_malloc(NULL, sizeof(home_entry), &hp)) != 0)
-		return (ret);
-	if ((ret = __os_malloc(NULL, strlen(home)+1, &hp->home)) != 0)
-		return (ret);
-	memcpy(hp->home, home, strlen(home)+1);
-	hp->dir = home;
-	hp->passwd = NULL;
-	/*
-	 * This loop is to remove any trailing path separators,
-	 * to assure hp->name points to the last component.
-	 */
-	hp->name = __db_rpath(home);
-	if (hp->name != NULL) {
-		*(hp->name) = '\0';
-		hp->name++;
-	} else
-		hp->name = home;
-	while (*(hp->name) == '\0') {
-		hp->name = __db_rpath(home);
-		*(hp->name) = '\0';
-		hp->name++;
-	}
-	/*
-	 * Now we have successfully added it.  Make sure there are no
-	 * identical names.
-	 */
-	for (homep = LIST_FIRST(&__dbsrv_home); homep != NULL;
-	    homep = LIST_NEXT(homep, entries))
-		if (strcmp(homep->name, hp->name) == 0) {
-			printf("Already added home name %s, at directory %s\n",
-			    hp->name, homep->dir);
-			__os_free(NULL, hp->home);
-			__os_free(NULL, hp);
-			return (-1);
-		}
-	LIST_INSERT_HEAD(&__dbsrv_home, hp, entries);
-	if (__dbsrv_verbose)
-		printf("Added home %s in dir %s\n", hp->name, hp->dir);
-	return (0);
-}
-
-static int
-add_passwd(passwd)
-	char *passwd;
-{
-	home_entry *hp;
-
-	/*
-	 * We add the passwd to the last given home dir.  If there
-	 * isn't a home dir, or the most recent one already has a
-	 * passwd, then there is a user error.
-	 */
-	hp = LIST_FIRST(&__dbsrv_home);
-	if (hp == NULL || hp->passwd != NULL)
-		return (EINVAL);
-	/*
-	 * We've already strdup'ed the passwd above, so we don't need
-	 * to malloc new space, just point to it.
-	 */
-	hp->passwd = passwd;
-	return (0);
-}
-
-/*
- * PUBLIC: home_entry *get_fullhome __P((char *));
- */
-home_entry *
-get_fullhome(name)
-	char *name;
-{
-	home_entry *hp;
-
-	if (name == NULL)
-		return (NULL);
-	for (hp = LIST_FIRST(&__dbsrv_home); hp != NULL;
-	    hp = LIST_NEXT(hp, entries))
-		if (strcmp(name, hp->name) == 0)
-			return (hp);
-	return (NULL);
-}
-
-static int
-env_recover(progname)
-	char *progname;
-{
-	DB_ENV *dbenv;
-	home_entry *hp;
-	u_int32_t flags;
-	int exitval, ret;
-
-	for (hp = LIST_FIRST(&__dbsrv_home); hp != NULL;
-	    hp = LIST_NEXT(hp, entries)) {
-		exitval = 0;
-		if ((ret = db_env_create(&dbenv, 0)) != 0) {
-			fprintf(stderr, "%s: db_env_create: %s\n",
-			    progname, db_strerror(ret));
-			exit(EXIT_FAILURE);
-		}
-		if (__dbsrv_verbose == 1)
-			(void)dbenv->set_verbose(dbenv, DB_VERB_RECOVERY, 1);
-		dbenv->set_errfile(dbenv, stderr);
-		dbenv->set_errpfx(dbenv, progname);
-		if (hp->passwd != NULL)
-			(void)dbenv->set_encrypt(dbenv, hp->passwd,
-			    DB_ENCRYPT_AES);
-
-		/*
-		 * Initialize the env with DB_RECOVER.  That is all we
-		 * have to do to run recovery.
-		 */
-		if (__dbsrv_verbose)
-			printf("Running recovery on %s\n", hp->home);
-		flags = DB_CREATE | DB_INIT_LOCK | DB_INIT_LOG | DB_INIT_MPOOL |
-		    DB_INIT_TXN | DB_USE_ENVIRON | DB_RECOVER;
-		if ((ret = dbenv->open(dbenv, hp->home, flags, 0)) != 0) {
-			dbenv->err(dbenv, ret, "DB_ENV->open");
-			goto error;
-		}
-
-		if (0) {
-error:			exitval = 1;
-		}
-		if ((ret = dbenv->close(dbenv, 0)) != 0) {
-			exitval = 1;
-			fprintf(stderr, "%s: dbenv->close: %s\n",
-			    progname, db_strerror(ret));
-		}
-		if (exitval)
-			return (exitval);
-	}
-	return (0);
-}
diff --git a/storage/bdb/rpc_server/clsrv.html b/storage/bdb/rpc_server/clsrv.html
deleted file mode 100644
index 599ad56f557..00000000000
--- a/storage/bdb/rpc_server/clsrv.html
+++ /dev/null
@@ -1,453 +0,0 @@
-
-
-
-   
-   
-
-
-
-
-

- Client/Server Interface for Berkeley DB

- -
Susan LoVerso -
sue@sleepycat.com -
Rev 1.3 -
1999 Nov 29
- -

We provide an interface allowing client/server access to Berkeley DB.   -Our goal is to provide a client and server library to allow users to separate -the functionality of their applications yet still have access to the full -benefits of Berkeley DB.  The goal is to provide a totally seamless -interface with minimal modification to existing applications as well. -

The client/server interface for Berkeley DB can be broken up into several -layers.  At the lowest level there is the transport mechanism to send -out the messages over the network.  Above that layer is the messaging -layer to interpret what comes over the wire, and bundle/unbundle message -contents.  The next layer is Berkeley DB itself. -

The transport layer uses ONC RPC (RFC 1831) and XDR (RFC 1832).  -We declare our message types and operations supported by our program and -the RPC library and utilities pretty much take care of the rest.  -The -rpcgen program generates all of the low level code needed.  -We need to define both sides of the RPC. -
  -

-DB Modifications

-To achieve the goal of a seamless interface, it is necessary to impose -a constraint on the application. That constraint is simply that all database -access must be done through an open environment.  I.e. this model -does not support standalone databases.  The reason for this constraint -is so that we have an environment structure internally to store our connection -to the server.  Imposing this constraint means that we can provide -the seamless interface just by adding a single environment method: DBENV->set_rpc_server(). -

The planned interface for this method is: -

DBENV->set_rpc_server(dbenv,      /* DB_ENV structure */
-                  hostname    /* Host of server */
-                  cl_timeout, /* Client timeout (sec) */
-                  srv_timeout,/* Server timeout (sec) */
-                  flags);     /* Flags: unused */
-This new method takes the hostname of the server, establishes our connection -and an environment on the server.  If a server timeout is specified, -then we send that to the server as well (and the server may or may not -choose to use that value).  This timeout is how long the server will -allow the environment to remain idle before declaring it dead and releasing -resources on the server.  The pointer to the connection is stored -on the client in the DBENV structure and is used by all other methods to -figure out with whom to communicate.  If a client timeout is specified, -it indicates how long the client is willing to wait for a reply from the -server.  If the values are 0, then defaults are used.  Flags -is currently unused, but exists because we always need to have a placeholder -for flags and it would be used for specifying authentication desired (were -we to provide an authentication scheme at some point) or other uses not -thought of yet! -

This client code is part of the monolithic DB library.  The user -accesses the client functions via a new flag to db_env_create().  -That flag is DB_CLIENT.  By using this flag the user indicates they -want to have the client methods rather than the standard methods for the -environment.  Also by issuing this flag, the user needs to connect -to the server via the DBENV->set_rpc_server() -method. -

We need two new fields in the DB_ENV structure.  One is -the socket descriptor to communicate to the server, the other field is -the client identifier the server gives to us.  The DB, and -DBC only need one additional field, the client identifier.  The -DB_TXN -structure does not need modification, we are overloading the txn_id -field. -

-Issues

-We need to figure out what to do in case of client and server crashes.  -Both the client library and the server program are stateful.  They -both consume local resources during the lifetime of the connection.  -Should one end drop that connection, the other side needs to release those -resources. -

If the server crashes, then the client will get an error back.  -I have chosen to implement time-outs on the client side, using a default -or allowing the application to specify one through the DBENV->set_rpc_server() -method.  Either the current operation will time-out waiting for the -reply or the next operation called will time out (or get back some other -kind of error regarding the server's non-existence).  In any case, -if the client application gets back such an error, it should abort any -open transactions locally, close any databases, and close its environment.  -It may then decide to retry to connect to the server periodically or whenever -it comes back.  If the last operation a client did was a transaction -commit that did not return or timed out from the server, the client cannot -determine if the transaction was committed or not but must release the -local transaction resources. Once the server is back up, recovery must -be run on the server.   If the transaction commit completed on -the server before the crash, then the operation is redone, if the transaction -commit did not get to the server, the pieces of the transaction are undone -on recover.  The client can then re-establish its connection and begin -again.  This is effectively like beginning over.  The client -cannot use ID's from its previous connection to the server.  However, -if recovery is run, then consistency is assured. -

If the client crashes, the server needs to somehow figure this out.  -The server is just sitting there waiting for a request to come in.  -A server must be able to time-out a client.  Similar to ftpd, if a -connection is idle for N seconds, then the server decides the client is -dead and releases that client's resources, aborting any open transactions, -closing any open databases and environments.   The server timing -out a client is not a trivial issue however.  The generated function -for the server just calls svc_run().  The server code I write -contains procedures to do specific things.  We do not have access -to the code calling select().  Timing out the select is not -good enough even if we could do so.  We want to time-out idle environments, -not simply cause a time-out if the server is idle a while.  See the -discussion of the server program for -a description of how we accomplish this. -

Since rpcgen generates the main() function of the server, I do not yet -know how we are going to have the server multi-threaded or multi-process -without changing the generated code.  The RPC book indicates that -the only way to accomplish this is through modifying the generated code -in the server.  For the moment we will ignore this issue while -we get the core server working, as it is only a performance issue. -

We do not do any security or authentication.  Someone could get -the code and modify it to spoof messages, trick the server, etc.  -RPC has some amount of authentication built into it.  I haven't yet -looked into it much to know if we want to use it or just point a user at -it.  The changes to the client code are fairly minor, the changes -to our server procs are fairly minor.  We would have to add code to -a sed script or awk script to change the generated server -code (yet again) in the dispatch routine to perform authentication. -

We will need to get an official program number from Sun.  We can -get this by sending mail to rpc@sun.com and presumably at some point -they will send us back a program number that we will encode into our XDR -description file.  Until we release this we can use a program number -in the "user defined" number space. -
  -

-The Server Program

-The server is a standalone program that the user builds and runs, probably -as a daemon like process.  This program is linked against the Berkeley -DB library and the RPC library (which is part of the C library on my FreeBSD -machine, others may have/need -lrpclib).  The server basically -is a slave to the client process.  All messages from the client are -synchronous and two-way.  The server handles messages one at a time, -and sends a reply back before getting another message.  There are -no asynchronous messages generated by the server to the client. -

We have made a choice to modify the generated code for the server.  -The changes will be minimal, generally calling functions we write, that -are in other source files.  The first change is adding a call to our -time-out function as described below.  The second change is changing -the name of the generated main() function to __dbsrv_main(), -and adding our own main() function so that we can parse options, -and set up other initialization we require.  I have a sed script -that is run from the distribution scripts that massages the generated code -to make these minor changes. -

Primarily the code needed for the server is the collection of the specified -RPC functions.  Each function receives the structure indicated, and -our code takes out what it needs and passes the information into DB itself.  -The server needs to maintain a translation table for identifiers that we -pass back to the client for the environment, transaction and database handles. -

The table that the server maintains, assuming one client per server -process/thread, should contain the handle to the environment, database -or transaction, a link to maintain parent/child relationships between transactions, -or databases and cursors, this handle's identifier, a type so that we can -error if the client passes us a bad id for this call, and a link to this -handle's environment entry (for time out/activity purposes).  The -table contains, in entries used by environments, a time-out value and an -activity time stamp.  Its use is described below for timing out idle -clients. -

Here is how we time out clients in the server.  We have to modify -the generated server code, but only to add one line during the dispatch -function to run the time-out function.  The call is made right before -the return of the dispatch function, after the reply is sent to the client, -so that client's aren't kept waiting for server bookkeeping activities.  -This time-out function then runs every time the server processes a request.  -In the time-out function we maintain a time-out hint that is the youngest -environment to time-out.  If the current time is less than the hint -we know we do not need to run through the list of open handles.  If -the hint is expired, then we go through the list of open environment handles, -and if they are past their expiration, then we close them and clean up.  -If they are not, we set up the hint for the next time. -

Each entry in the open handle table has a pointer back to its environment's -entry.  Every operation within this environment can then update the -single environment activity record.  Every environment can have a -different time-out.  The DBENV->set_rpc_server -call -takes a server time-out value.  If this value is 0 then a default -(currently 5 minutes) is used.  This time-out value is only a hint -to the server.  It may choose to disregard this value or set the time-out -based on its own implementation. -

For completeness, the flaws of this time-out implementation should be -pointed out.  First, it is possible that a client could crash with -open handles, and no other requests come in to the server.  Therefore -the time-out function never gets run and those resources are not released -(until a request does come in).  Similarly, this time-out is not exact.  -The time-out function uses its hint and if it computes a hint on one run, -an earlier time-out might be created before that time-out expires.  -This issue simply yields a handle that doesn't get released until that -original hint expires.  To illustrate, consider that at the time that -the time-out function is run, the youngest time-out is 5 minutes in the -future.  Soon after, a new environment is opened that has a time-out -of 1 minute.  If this environment becomes idle (and other operations -are going on), the time-out function will not release that environment -until the original 5 minute hint expires.  This is not a problem since -the resources will eventually be released. -

On a similar note, if a client crashes during an RPC, our reply generates -a SIGPIPE, and our server crashes unless we catch it.  Using signal(SIGPIPE, -SIG_IGN) we can ignore it, and the server will go on.  This is -a call  in our main() function that we write.  Eventually -this client's handles would be timed out as described above.  We need -this only for the unfortunate window of a client crashing during the RPC. -

The options below are primarily for control of the program itself,.  -Details relating to databases and environments should be passed from the -client to the server, since the server can serve many clients, many environments -and many databases.  Therefore it makes more sense for the client -to set the cache size of its own environment, rather than setting a default -cachesize on the server that applies as a blanket to any environment it -may be called upon to open.  Options are: -

    -
  • --t  to set the default time-out given to an environment.
  • - -
  • --T to set the maximum time-out allowed for the server.
  • - -
  • --L to log the execution of the server process to a specified file.
  • - -
  • --v to run in verbose mode.
  • - -
  • --M  to specify the maximum number of outstanding child server -processes/threads we can have at any given time.  The default is 10. -[We -are not yet doing multiple threads/processes.]
  • -
- -

-The Client Code

-The client code contains all of the supported functions and methods used -in this model.  There are several methods in the __db_env -and -__db -structures that currently do not apply, such as the callbacks.  Those -fields that are not applicable to the client model point to NULL to notify -the user of their error.  Some method functions remain unchanged, -as well such as the error calls. -

The client code contains each method function that goes along with the -RPC -calls described elsewhere.  The client library also contains its -own version of db_env_create(), -which does not result in any messages going over to the server (since we -do not yet know what server we are talking to).  This function sets -up the pointers to the correct client functions. -

All of the method functions that handle the messaging have a basic flow -similar to this: -

    -
  • -Local arg parsing that may be needed
  • - -
  • -Marshalling the message header and the arguments we need to send to the -server
  • - -
  • -Sending the message
  • - -
  • -Receiving a reply
  • - -
  • -Unmarshalling the reply
  • - -
  • -Local results processing that may be needed
  • -
- -

-Generated Code

-Almost all of the code is generated from a source file describing the interface -and an awk script.   This awk script generates six (6) -files for us.  It also modifies one.  The files are: -
    -
  1. -Client file - The C source file created containing the client code.
  2. - -
  3. -Client template file - The C template source file created containing interfaces -for handling client-local issues such as resource allocation, but with -a consistent interface with the client code generated.
  4. - -
  5. -Server file - The C source file created containing the server code.
  6. - -
  7. -Server template file - The C template source file created containing interfaces -for handling server-local issues such as resource allocation, calling into -the DB library but with a consistent interface with the server code generated.
  8. - -
  9. -XDR file - The XDR message description file created.
  10. - -
  11. -Server sed file - A sed script that contains commands to apply to the server -procedure file (i.e. the real source file that the server template file -becomes) so that minor interface changes can be consistently and easily -applied to the real code.
  12. - -
  13. -Server procedure file - This is the file that is modified by the sed script -generated.  It originated from the server template file.
  14. -
-The awk script reads a source file, db_server/rpc.src that describes -each operation and what sorts of arguments it takes and what it returns -from the server.  The syntax of the source file describes the interface -to that operation.  There are four (4) parts to the syntax: -
    -
  1. -BEGIN function version# codetype - begins a new functional -interface for the given function.  Each function has -a version number, currently all of them are at version number -one (1).  The code type indicates to the awk script -what kind of code to generate.  The choices are:
  2. - -
      -
    • -CODE - Generate all code, and return a status value.  If specified, -the client code will simply return the status to the user upon completion -of the RPC call.
    • - -
    • -RETCODE - Generate all code and call a return function in the client -template file to deal with client issues or with other returned items.  -If specified, the client code generated will call a function of the form -__dbcl_<name>_ret() -where -<name> is replaced with the function name given here.  This function -is placed in the template file because this indicates that something special -must occur on return.  The arguments to this function are the same -as those for the client function, with the addition of the reply message -structure.
    • - -
    • -NOCLNTCODE - Generate XDR and server code, but no corresponding -client code. (This is used for functions that are not named the same thing -on both sides.  The only use of this at the moment is db_env_create -and db_create.  The environment create call to the server is actually -called from the DBENV->set_rpc_server() -method.  The db_create code exists elsewhere in the library and we -modify that code for the client call.)
    • -
    - -
  3. -ARG RPC-type C-type varname [list-type]- each line of this -describes an argument to the function.  The argument is called varname.  -The C-type given is what it should look like in the C code -generated, such as DB *, u_int32_t, const char *.  The -RPC-type -is an indication about how the RPC request message should be constructed.  -The RPC-types allowed are described below.
  4. - -
  5. -RET RPC-type C-type varname [list-type]- each line of this -describes what the server should return from this procedure call (in addition -to a status, which is always returned and should not be specified).  -The argument is called varname.  The C-type -given is what it should look like in the C code generated, such as DB -*, u_int32_t, const char *.  The RPC-type is an -indication about how the RPC reply message should be constructed.  -The RPC-types are described below.
  6. - -
  7. -END - End the description of this function.  The result is -that when the awk script encounters the END tag, it now has all -the information it needs to construct the generated code for this function.
  8. -
-The RPC-type must be one of the following: -
    -
  • -IGNORE - This argument is not passed to the server and should be -ignored when constructing the XDR code.  Only allowed for an ARG -specfication.
  • - -
  • -STRING - This argument is a string.
  • - -
  • -INT - This argument is an integer of some sort.
  • - -
  • -DBT - This argument is a DBT, resulting in its decomposition into -the request message.
  • - -
  • -LIST - This argument is an opaque list passed to the server (NULL-terminated).  -If an argument of this type is given, it must have a list-type -specified that is one of:
  • - -
      -
    • -STRING
    • - -
    • -INT
    • - -
    • -ID.
    • -
    - -
  • -ID - This argument is an identifier.
  • -
-So, for example, the source for the DB->join RPC call looks like: -
BEGIN   dbjoin          1       RETCODE
-ARG     ID      DB *            dbp 
-ARG     LIST    DBC **          curs    ID
-ARG     IGNORE  DBC **          dbcpp 
-ARG     INT     u_int32_t       flags
-RET     ID      long            dbcid
-END
-Our first line tells us we are writing the dbjoin function.  It requires -special code on the client so we indicate that with the RETCODE.  -This method takes four arguments.  For the RPC request we need the -database ID from the dbp, we construct a NULL-terminated list of IDs for -the cursor list, we ignore the argument to return the cursor handle to -the user, and we pass along the flags.  On the return, the reply contains -a status, by default, and additionally, it contains the ID of the newly -created cursor. -

-Building and Installing

-I need to verify with Don Anderson, but I believe we should just build -the server program, just like we do for db_stat, db_checkpoint, etc.  -Basically it can be treated as a utility program from the building and -installation perspective. -

As mentioned early on, in the section on DB -Modifications, we have a single library, but allowing the user to access -the client portion by sending a flag to db_env_create().  -The Makefile is modified to include the new files. -

Testing is performed in two ways.  First I have a new example program, -that should become part of the example directory.  It is basically -a merging of ex_access.c and ex_env.c.  This example is adequate to -test basic functionality, as it does just does database put/get calls and -appropriate open and close calls.  However, in order to test the full -set of functions a more generalized scheme is required.  For the moment, -I am going to modify the Tcl interface to accept the server information.  -Nothing else should need to change in Tcl.  Then we can either write -our own test modules or use a subset of the existing ones to test functionality -on a regular basis. - - diff --git a/storage/bdb/rpc_server/cxx/db_server_cxxproc.cpp b/storage/bdb/rpc_server/cxx/db_server_cxxproc.cpp deleted file mode 100644 index e536a70518b..00000000000 --- a/storage/bdb/rpc_server/cxx/db_server_cxxproc.cpp +++ /dev/null @@ -1,2383 +0,0 @@ -/*- - * See the file LICENSE for redistribution information. - * - * Copyright (c) 2001-2004 - * Sleepycat Software. All rights reserved. - * - * $Id: db_server_cxxproc.cpp,v 1.23 2004/09/22 17:30:12 bostic Exp $ - */ - -#include "db_config.h" - -#ifndef NO_SYSTEM_INCLUDES -#include - -#include - -#include -#endif - -#include "db_server.h" - -#include "db_int.h" -#include "db_cxx.h" - -extern "C" { -#include "dbinc/db_server_int.h" -#include "dbinc_auto/rpc_server_ext.h" -} - -extern "C" void -__env_get_cachesize_proc( - long dbenvcl_id, - __env_get_cachesize_reply *replyp) -{ - DbEnv *dbenv; - ct_entry *dbenv_ctp; - - ACTIVATE_CTP(dbenv_ctp, dbenvcl_id, CT_ENV); - dbenv = (DbEnv *)dbenv_ctp->ct_anyp; - - replyp->status = dbenv->get_cachesize(&replyp->gbytes, - &replyp->bytes, (int *)&replyp->ncache); -} - -extern "C" void -__env_cachesize_proc( - long dbenvcl_id, - u_int32_t gbytes, - u_int32_t bytes, - u_int32_t ncache, - __env_cachesize_reply *replyp) -{ - DbEnv *dbenv; - ct_entry *dbenv_ctp; - int ret; - - ACTIVATE_CTP(dbenv_ctp, dbenvcl_id, CT_ENV); - dbenv = (DbEnv *)dbenv_ctp->ct_anyp; - - ret = dbenv->set_cachesize(gbytes, bytes, ncache); - - replyp->status = ret; - return; -} - -extern "C" void -__env_close_proc( - long dbenvcl_id, - u_int32_t flags, - __env_close_reply *replyp) -{ - ct_entry *dbenv_ctp; - - ACTIVATE_CTP(dbenv_ctp, dbenvcl_id, CT_ENV); - replyp->status = __dbenv_close_int(dbenvcl_id, flags, 0); - return; -} - -extern "C" void -__env_create_proc( - u_int32_t timeout, - __env_create_reply *replyp) -{ - DbEnv *dbenv; - ct_entry *ctp; - - ctp = new_ct_ent(&replyp->status); - if (ctp == NULL) - return; - - dbenv = new DbEnv(DB_CXX_NO_EXCEPTIONS); - ctp->ct_envp = dbenv; - ctp->ct_type = CT_ENV; - ctp->ct_parent = NULL; - ctp->ct_envparent = ctp; - __dbsrv_settimeout(ctp, timeout); - __dbsrv_active(ctp); - replyp->envcl_id = ctp->ct_id; - - replyp->status = 0; - return; -} - -extern "C" void -__env_dbremove_proc( - long dbenvcl_id, - long txnpcl_id, - char *name, - char *subdb, - u_int32_t flags, - __env_dbremove_reply *replyp) -{ - int ret; - DbEnv *dbenv; - DbTxn *txnp; - ct_entry *dbenv_ctp, *txnp_ctp; - - ACTIVATE_CTP(dbenv_ctp, dbenvcl_id, CT_ENV); - dbenv = (DbEnv *)dbenv_ctp->ct_anyp; - - if (txnpcl_id != 0) { - ACTIVATE_CTP(txnp_ctp, txnpcl_id, CT_TXN); - txnp = (DbTxn *)txnp_ctp->ct_anyp; - } else - txnp = NULL; - - ret = dbenv->dbremove(txnp, name, subdb, flags); - - replyp->status = ret; - return; -} - -void -__env_dbrename_proc( - long dbenvcl_id, - long txnpcl_id, - char *name, - char *subdb, - char *newname, - u_int32_t flags, - __env_dbrename_reply *replyp) -{ - int ret; - DbEnv *dbenv; - DbTxn *txnp; - ct_entry *dbenv_ctp, *txnp_ctp; - - ACTIVATE_CTP(dbenv_ctp, dbenvcl_id, CT_ENV); - dbenv = (DbEnv *)dbenv_ctp->ct_anyp; - - if (txnpcl_id != 0) { - ACTIVATE_CTP(txnp_ctp, txnpcl_id, CT_TXN); - txnp = (DbTxn *)txnp_ctp->ct_anyp; - } else - txnp = NULL; - - ret = dbenv->dbrename(txnp, name, subdb, newname, flags); - - replyp->status = ret; - return; -} - -extern "C" void -__env_get_encrypt_flags_proc( - long dbenvcl_id, - __env_get_encrypt_flags_reply *replyp) -{ - DbEnv *dbenv; - ct_entry *dbenv_ctp; - - ACTIVATE_CTP(dbenv_ctp, dbenvcl_id, CT_ENV); - dbenv = (DbEnv *)dbenv_ctp->ct_anyp; - - replyp->status = dbenv->get_encrypt_flags(&replyp->flags); -} - -extern "C" void -__env_encrypt_proc( - long dbenvcl_id, - char *passwd, - u_int32_t flags, - __env_encrypt_reply *replyp) -{ - DbEnv *dbenv; - ct_entry *dbenv_ctp; - int ret; - - ACTIVATE_CTP(dbenv_ctp, dbenvcl_id, CT_ENV); - dbenv = (DbEnv *)dbenv_ctp->ct_anyp; - - ret = dbenv->set_encrypt(passwd, flags); - - replyp->status = ret; - return; -} - -extern "C" void -__env_get_flags_proc( - long dbenvcl_id, - __env_get_flags_reply *replyp) -{ - DbEnv *dbenv; - ct_entry *dbenv_ctp; - - ACTIVATE_CTP(dbenv_ctp, dbenvcl_id, CT_ENV); - dbenv = (DbEnv *)dbenv_ctp->ct_anyp; - - replyp->status = dbenv->get_flags(&replyp->flags); -} - -extern "C" void -__env_flags_proc( - long dbenvcl_id, - u_int32_t flags, - u_int32_t onoff, - __env_flags_reply *replyp) -{ - DbEnv *dbenv; - ct_entry *dbenv_ctp; - int ret; - - ACTIVATE_CTP(dbenv_ctp, dbenvcl_id, CT_ENV); - dbenv = (DbEnv *)dbenv_ctp->ct_anyp; - - ret = dbenv->set_flags(flags, onoff); - if (onoff) - dbenv_ctp->ct_envdp.onflags = flags; - else - dbenv_ctp->ct_envdp.offflags = flags; - - replyp->status = ret; - return; -} - -extern "C" void -__env_get_home_proc( - long dbenvcl_id, - __env_get_home_reply *replyp) -{ - DbEnv *dbenv; - ct_entry *dbenv_ctp; - - ACTIVATE_CTP(dbenv_ctp, dbenvcl_id, CT_ENV); - dbenv = (DbEnv *)dbenv_ctp->ct_anyp; - - replyp->status = dbenv->get_home((const char **)&replyp->home); -} - -extern "C" void -__env_get_open_flags_proc( - long dbenvcl_id, - __env_get_open_flags_reply *replyp) -{ - DbEnv *dbenv; - ct_entry *dbenv_ctp; - - ACTIVATE_CTP(dbenv_ctp, dbenvcl_id, CT_ENV); - dbenv = (DbEnv *)dbenv_ctp->ct_anyp; - - replyp->status = dbenv->get_open_flags(&replyp->flags); -} - -extern "C" void -__env_open_proc( - long dbenvcl_id, - char *home, - u_int32_t flags, - u_int32_t mode, - __env_open_reply *replyp) -{ - DbEnv *dbenv; - ct_entry *dbenv_ctp, *new_ctp; - u_int32_t newflags, shareflags; - int ret; - home_entry *fullhome; - - ACTIVATE_CTP(dbenv_ctp, dbenvcl_id, CT_ENV); - dbenv = (DbEnv *)dbenv_ctp->ct_anyp; - fullhome = get_fullhome(home); - if (fullhome == NULL) { - ret = DB_NOSERVER_HOME; - goto out; - } - - /* - * If they are using locking do deadlock detection for them, - * internally. - */ - if ((flags & DB_INIT_LOCK) && - (ret = dbenv->set_lk_detect(DB_LOCK_DEFAULT)) != 0) - goto out; - - if (__dbsrv_verbose) { - dbenv->set_errfile(stderr); - dbenv->set_errpfx(fullhome->home); - } - - /* - * Mask off flags we ignore - */ - newflags = (flags & ~DB_SERVER_FLAGMASK); - shareflags = (newflags & DB_SERVER_ENVFLAGS); - /* - * Check now whether we can share a handle for this env. - */ - replyp->envcl_id = dbenvcl_id; - if ((new_ctp = __dbsrv_shareenv(dbenv_ctp, fullhome, shareflags)) - != NULL) { - /* - * We can share, clean up old ID, set new one. - */ - if (__dbsrv_verbose) - printf("Sharing env ID %ld\n", new_ctp->ct_id); - replyp->envcl_id = new_ctp->ct_id; - ret = __dbenv_close_int(dbenvcl_id, 0, 0); - } else { - ret = dbenv->open(fullhome->home, newflags, mode); - dbenv_ctp->ct_envdp.home = fullhome; - dbenv_ctp->ct_envdp.envflags = shareflags; - } -out: replyp->status = ret; - return; -} - -extern "C" void -__env_remove_proc( - long dbenvcl_id, - char *home, - u_int32_t flags, - __env_remove_reply *replyp) -{ - DbEnv *dbenv; - ct_entry *dbenv_ctp; - int ret; - home_entry *fullhome; - - ACTIVATE_CTP(dbenv_ctp, dbenvcl_id, CT_ENV); - dbenv = (DbEnv *)dbenv_ctp->ct_anyp; - fullhome = get_fullhome(home); - if (fullhome == NULL) { - replyp->status = DB_NOSERVER_HOME; - return; - } - - ret = dbenv->remove(fullhome->home, flags); - __dbdel_ctp(dbenv_ctp); - replyp->status = ret; - return; -} - -extern "C" void -__txn_abort_proc( - long txnpcl_id, - __txn_abort_reply *replyp) -{ - DbTxn *txnp; - ct_entry *txnp_ctp; - int ret; - - ACTIVATE_CTP(txnp_ctp, txnpcl_id, CT_TXN); - txnp = (DbTxn *)txnp_ctp->ct_anyp; - - ret = txnp->abort(); - __dbdel_ctp(txnp_ctp); - replyp->status = ret; - return; -} - -extern "C" void -__txn_begin_proc( - long dbenvcl_id, - long parentcl_id, - u_int32_t flags, - __txn_begin_reply *replyp) -{ - DbEnv *dbenv; - DbTxn *parent, *txnp; - ct_entry *ctp, *dbenv_ctp, *parent_ctp; - int ret; - - ACTIVATE_CTP(dbenv_ctp, dbenvcl_id, CT_ENV); - dbenv = (DbEnv *)dbenv_ctp->ct_anyp; - parent_ctp = NULL; - - ctp = new_ct_ent(&replyp->status); - if (ctp == NULL) - return; - - if (parentcl_id != 0) { - ACTIVATE_CTP(parent_ctp, parentcl_id, CT_TXN); - parent = (DbTxn *)parent_ctp->ct_anyp; - ctp->ct_activep = parent_ctp->ct_activep; - } else - parent = NULL; - - ret = dbenv->txn_begin(parent, &txnp, flags | DB_TXN_NOWAIT); - if (ret == 0) { - ctp->ct_txnp = txnp; - ctp->ct_type = CT_TXN; - ctp->ct_parent = parent_ctp; - ctp->ct_envparent = dbenv_ctp; - replyp->txnidcl_id = ctp->ct_id; - __dbsrv_settimeout(ctp, dbenv_ctp->ct_timeout); - __dbsrv_active(ctp); - } else - __dbclear_ctp(ctp); - - replyp->status = ret; - return; -} - -extern "C" void -__txn_commit_proc( - long txnpcl_id, - u_int32_t flags, - __txn_commit_reply *replyp) -{ - DbTxn *txnp; - ct_entry *txnp_ctp; - int ret; - - ACTIVATE_CTP(txnp_ctp, txnpcl_id, CT_TXN); - txnp = (DbTxn *)txnp_ctp->ct_anyp; - - ret = txnp->commit(flags); - __dbdel_ctp(txnp_ctp); - - replyp->status = ret; - return; -} - -extern "C" void -__txn_discard_proc( - long txnpcl_id, - u_int32_t flags, - __txn_discard_reply *replyp) -{ - DbTxn *txnp; - ct_entry *txnp_ctp; - int ret; - - ACTIVATE_CTP(txnp_ctp, txnpcl_id, CT_TXN); - txnp = (DbTxn *)txnp_ctp->ct_anyp; - - ret = txnp->discard(flags); - __dbdel_ctp(txnp_ctp); - - replyp->status = ret; - return; -} - -extern "C" void -__txn_prepare_proc( - long txnpcl_id, - u_int8_t *gid, - __txn_prepare_reply *replyp) -{ - DbTxn *txnp; - ct_entry *txnp_ctp; - int ret; - - ACTIVATE_CTP(txnp_ctp, txnpcl_id, CT_TXN); - txnp = (DbTxn *)txnp_ctp->ct_anyp; - - ret = txnp->prepare(gid); - replyp->status = ret; - return; -} - -extern "C" void -__txn_recover_proc( - long dbenvcl_id, - u_int32_t count, - u_int32_t flags, - __txn_recover_reply *replyp, - int * freep) -{ - DbEnv *dbenv; - DbPreplist *dbprep, *p; - ct_entry *dbenv_ctp, *ctp; - long erri, i, retcount; - u_int32_t *txnidp; - int ret; - char *gid; - - ACTIVATE_CTP(dbenv_ctp, dbenvcl_id, CT_ENV); - dbenv = (DbEnv *)dbenv_ctp->ct_anyp; - *freep = 0; - - if ((ret = __os_malloc( - dbenv->get_DB_ENV(), count * sizeof(DbPreplist), &dbprep)) != 0) - goto out; - if ((ret = dbenv->txn_recover(dbprep, count, &retcount, flags)) != 0) - goto out; - /* - * If there is nothing, success, but it's easy. - */ - replyp->retcount = retcount; // TODO: fix C++ txn_recover - if (retcount == 0) { - replyp->txn.txn_val = NULL; - replyp->txn.txn_len = 0; - replyp->gid.gid_val = NULL; - replyp->gid.gid_len = 0; - } - - /* - * We have our txn list. Now we need to allocate the space for - * the txn ID array and the GID array and set them up. - */ - if ((ret = __os_calloc(dbenv->get_DB_ENV(), retcount, sizeof(u_int32_t), - &replyp->txn.txn_val)) != 0) - goto out; - replyp->txn.txn_len = retcount * sizeof(u_int32_t); - if ((ret = __os_calloc(dbenv->get_DB_ENV(), retcount, DB_XIDDATASIZE, - &replyp->gid.gid_val)) != 0) { - __os_free(dbenv->get_DB_ENV(), replyp->txn.txn_val); - goto out; - } - replyp->gid.gid_len = retcount * DB_XIDDATASIZE; - - /* - * Now walk through our results, creating parallel arrays - * to send back. For each entry we need to create a new - * txn ctp and then fill in the array info. - */ - i = 0; - p = dbprep; - gid = replyp->gid.gid_val; - txnidp = replyp->txn.txn_val; - while (i++ < retcount) { - ctp = new_ct_ent(&ret); - if (ret != 0) { - i--; - goto out2; - } - ctp->ct_txnp = p->txn; - ctp->ct_type = CT_TXN; - ctp->ct_parent = NULL; - ctp->ct_envparent = dbenv_ctp; - __dbsrv_settimeout(ctp, dbenv_ctp->ct_timeout); - __dbsrv_active(ctp); - - *txnidp = ctp->ct_id; - memcpy(gid, p->gid, DB_XIDDATASIZE); - - p++; - txnidp++; - gid += DB_XIDDATASIZE; - } - /* - * If we get here, we have success and we have to set freep - * so it'll get properly freed next time. - */ - *freep = 1; -out: - if (dbprep != NULL) - __os_free(dbenv->get_DB_ENV(), dbprep); - replyp->status = ret; - return; -out2: - /* - * We had an error in the middle of creating our new txn - * ct entries. We have to unwind all that we have done. Ugh. - */ - for (txnidp = replyp->txn.txn_val, erri = 0; - erri < i; erri++, txnidp++) { - ctp = get_tableent(*txnidp); - __dbclear_ctp(ctp); - } - __os_free(dbenv->get_DB_ENV(), replyp->txn.txn_val); - __os_free(dbenv->get_DB_ENV(), replyp->gid.gid_val); - __os_free(dbenv->get_DB_ENV(), dbprep); - replyp->status = ret; - return; -} - -extern "C" void -__db_bt_maxkey_proc( - long dbpcl_id, - u_int32_t maxkey, - __db_bt_maxkey_reply *replyp) -{ - Db *dbp; - ct_entry *dbp_ctp; - int ret; - - ACTIVATE_CTP(dbp_ctp, dbpcl_id, CT_DB); - dbp = (Db *)dbp_ctp->ct_anyp; - - ret = dbp->set_bt_maxkey(maxkey); - - replyp->status = ret; - return; -} - -extern "C" void -__db_associate_proc( - long dbpcl_id, - long txnpcl_id, - long sdbpcl_id, - u_int32_t flags, - __db_associate_reply *replyp) -{ - Db *dbp, *sdbp; - DbTxn *txnp; - ct_entry *dbp_ctp, *sdbp_ctp, *txnp_ctp; - int ret; - - ACTIVATE_CTP(dbp_ctp, dbpcl_id, CT_DB); - dbp = (Db *)dbp_ctp->ct_anyp; - ACTIVATE_CTP(sdbp_ctp, sdbpcl_id, CT_DB); - sdbp = (Db *)sdbp_ctp->ct_anyp; - if (txnpcl_id != 0) { - ACTIVATE_CTP(txnp_ctp, txnpcl_id, CT_TXN); - txnp = (DbTxn *)txnp_ctp->ct_anyp; - } else - txnp = NULL; - - /* - * We do not support DB_CREATE for associate or the callbacks - * implemented in the Java and JE RPC servers. Users can only - * access secondary indices on a read-only basis, so whatever they - * are looking for needs to be there already. - */ - if (LF_ISSET(DB_RPC2ND_MASK | DB_CREATE)) - ret = EINVAL; - else - ret = dbp->associate(txnp, sdbp, NULL, flags); - - replyp->status = ret; - return; -} - -extern "C" void -__db_get_bt_minkey_proc( - long dbpcl_id, - __db_get_bt_minkey_reply *replyp) -{ - Db *dbp; - ct_entry *dbp_ctp; - - ACTIVATE_CTP(dbp_ctp, dbpcl_id, CT_DB); - dbp = (Db *)dbp_ctp->ct_anyp; - - replyp->status = dbp->get_bt_minkey(&replyp->minkey); -} - -extern "C" void -__db_bt_minkey_proc( - long dbpcl_id, - u_int32_t minkey, - __db_bt_minkey_reply *replyp) -{ - Db *dbp; - ct_entry *dbp_ctp; - int ret; - - ACTIVATE_CTP(dbp_ctp, dbpcl_id, CT_DB); - dbp = (Db *)dbp_ctp->ct_anyp; - - ret = dbp->set_bt_minkey(minkey); - - replyp->status = ret; - return; -} - -extern "C" void -__db_close_proc( - long dbpcl_id, - u_int32_t flags, - __db_close_reply *replyp) -{ - ct_entry *dbp_ctp; - - ACTIVATE_CTP(dbp_ctp, dbpcl_id, CT_DB); - replyp->status = __db_close_int(dbpcl_id, flags); - return; -} - -extern "C" void -__db_create_proc( - long dbenvcl_id, - u_int32_t flags, - __db_create_reply *replyp) -{ - Db *dbp; - DbEnv *dbenv; - ct_entry *dbenv_ctp, *dbp_ctp; - - ACTIVATE_CTP(dbenv_ctp, dbenvcl_id, CT_ENV); - dbenv = (DbEnv *)dbenv_ctp->ct_anyp; - - dbp_ctp = new_ct_ent(&replyp->status); - if (dbp_ctp == NULL) - return ; - /* - * We actually require env's for databases. The client should - * have caught it, but just in case. - */ - DB_ASSERT(dbenv != NULL); - dbp = new Db(dbenv, flags); - dbp_ctp->ct_dbp = dbp; - dbp_ctp->ct_type = CT_DB; - dbp_ctp->ct_parent = dbenv_ctp; - dbp_ctp->ct_envparent = dbenv_ctp; - replyp->dbcl_id = dbp_ctp->ct_id; - replyp->status = 0; - return; -} - -extern "C" void -__db_del_proc( - long dbpcl_id, - long txnpcl_id, - u_int32_t keydlen, - u_int32_t keydoff, - u_int32_t keyulen, - u_int32_t keyflags, - void *keydata, - u_int32_t keysize, - u_int32_t flags, - __db_del_reply *replyp) -{ - Db *dbp; - DbTxn *txnp; - ct_entry *dbp_ctp, *txnp_ctp; - int ret; - - ACTIVATE_CTP(dbp_ctp, dbpcl_id, CT_DB); - dbp = (Db *)dbp_ctp->ct_anyp; - if (txnpcl_id != 0) { - ACTIVATE_CTP(txnp_ctp, txnpcl_id, CT_TXN); - txnp = (DbTxn *)txnp_ctp->ct_anyp; - } else - txnp = NULL; - - /* Set up key */ - Dbt key(keydata, keysize); - key.set_dlen(keydlen); - key.set_ulen(keyulen); - key.set_doff(keydoff); - key.set_flags(keyflags); - - ret = dbp->del(txnp, &key, flags); - - replyp->status = ret; - return; -} - -extern "C" void -__db_get_encrypt_flags_proc( - long dbpcl_id, - __db_get_encrypt_flags_reply *replyp) -{ - Db *dbp; - ct_entry *dbp_ctp; - - ACTIVATE_CTP(dbp_ctp, dbpcl_id, CT_DB); - dbp = (Db *)dbp_ctp->ct_anyp; - - replyp->status = dbp->get_encrypt_flags(&replyp->flags); -} - -extern "C" void -__db_encrypt_proc( - long dbpcl_id, - char *passwd, - u_int32_t flags, - __db_encrypt_reply *replyp) -{ - Db *dbp; - ct_entry *dbp_ctp; - int ret; - - ACTIVATE_CTP(dbp_ctp, dbpcl_id, CT_DB); - dbp = (Db *)dbp_ctp->ct_anyp; - - ret = dbp->set_encrypt(passwd, flags); - replyp->status = ret; - return; -} - -extern "C" void -__db_get_extentsize_proc( - long dbpcl_id, - __db_get_extentsize_reply *replyp) -{ - Db *dbp; - ct_entry *dbp_ctp; - - ACTIVATE_CTP(dbp_ctp, dbpcl_id, CT_DB); - dbp = (Db *)dbp_ctp->ct_anyp; - - replyp->status = dbp->get_q_extentsize(&replyp->extentsize); -} - -extern "C" void -__db_extentsize_proc( - long dbpcl_id, - u_int32_t extentsize, - __db_extentsize_reply *replyp) -{ - Db *dbp; - ct_entry *dbp_ctp; - int ret; - - ACTIVATE_CTP(dbp_ctp, dbpcl_id, CT_DB); - dbp = (Db *)dbp_ctp->ct_anyp; - - ret = dbp->set_q_extentsize(extentsize); - - replyp->status = ret; - return; -} - -extern "C" void -__db_get_flags_proc( - long dbpcl_id, - __db_get_flags_reply *replyp) -{ - Db *dbp; - ct_entry *dbp_ctp; - - ACTIVATE_CTP(dbp_ctp, dbpcl_id, CT_DB); - dbp = (Db *)dbp_ctp->ct_anyp; - - replyp->status = dbp->get_flags(&replyp->flags); -} - -extern "C" void -__db_flags_proc( - long dbpcl_id, - u_int32_t flags, - __db_flags_reply *replyp) -{ - Db *dbp; - ct_entry *dbp_ctp; - int ret; - - ACTIVATE_CTP(dbp_ctp, dbpcl_id, CT_DB); - dbp = (Db *)dbp_ctp->ct_anyp; - - ret = dbp->set_flags(flags); - dbp_ctp->ct_dbdp.setflags = flags; - - replyp->status = ret; - return; -} - -extern "C" void -__db_get_proc( - long dbpcl_id, - long txnpcl_id, - u_int32_t keydlen, - u_int32_t keydoff, - u_int32_t keyulen, - u_int32_t keyflags, - void *keydata, - u_int32_t keysize, - u_int32_t datadlen, - u_int32_t datadoff, - u_int32_t dataulen, - u_int32_t dataflags, - void *datadata, - u_int32_t datasize, - u_int32_t flags, - __db_get_reply *replyp, - int * freep) -{ - Db *dbp; - DbTxn *txnp; - ct_entry *dbp_ctp, *txnp_ctp; - int key_alloc, bulk_alloc, ret; - void *tmpdata; - - ACTIVATE_CTP(dbp_ctp, dbpcl_id, CT_DB); - dbp = (Db *)dbp_ctp->ct_anyp; - if (txnpcl_id != 0) { - ACTIVATE_CTP(txnp_ctp, txnpcl_id, CT_TXN); - txnp = (DbTxn *)txnp_ctp->ct_anyp; - } else - txnp = NULL; - - *freep = 0; - bulk_alloc = 0; - - /* Set up key and data */ - Dbt key(keydata, keysize); - key.set_dlen(keydlen); - key.set_ulen(keyulen); - key.set_doff(keydoff); - /* - * Ignore memory related flags on server. - */ - key.set_flags(DB_DBT_MALLOC | (keyflags & DB_DBT_PARTIAL)); - - Dbt data(datadata, datasize); - data.set_dlen(datadlen); - data.set_ulen(dataulen); - data.set_doff(datadoff); - /* - * Ignore memory related flags on server. - */ - dataflags &= DB_DBT_PARTIAL; - if (flags & DB_MULTIPLE) { - if (data.get_data() == 0) { - ret = __os_umalloc(dbp->get_DB()->dbenv, - dataulen, &tmpdata); - if (ret != 0) - goto err; - data.set_data(tmpdata); - bulk_alloc = 1; - } - dataflags |= DB_DBT_USERMEM; - } else - dataflags |= DB_DBT_MALLOC; - data.set_flags(dataflags); - - /* Got all our stuff, now do the get */ - ret = dbp->get(txnp, &key, &data, flags); - /* - * Otherwise just status. - */ - if (ret == 0) { - /* - * XXX - * We need to xdr_free whatever we are returning, next time. - * However, DB does not allocate a new key if one was given - * and we'd be free'ing up space allocated in the request. - * So, allocate a new key/data pointer if it is the same one - * as in the request. - */ - *freep = 1; - /* - * Key - */ - key_alloc = 0; - if (key.get_data() == keydata) { - ret = __os_umalloc(dbp->get_DB()->dbenv, - key.get_size(), &replyp->keydata.keydata_val); - if (ret != 0) { - __os_ufree( - dbp->get_DB()->dbenv, key.get_data()); - __os_ufree( - dbp->get_DB()->dbenv, data.get_data()); - goto err; - } - key_alloc = 1; - memcpy(replyp->keydata.keydata_val, - key.get_data(), key.get_size()); - } else - replyp->keydata.keydata_val = (char *)key.get_data(); - - replyp->keydata.keydata_len = key.get_size(); - - /* - * Data - */ - if (data.get_data() == datadata) { - ret = __os_umalloc(dbp->get_DB()->dbenv, - data.get_size(), &replyp->datadata.datadata_val); - if (ret != 0) { - __os_ufree( - dbp->get_DB()->dbenv, key.get_data()); - __os_ufree( - dbp->get_DB()->dbenv, data.get_data()); - if (key_alloc) - __os_ufree(dbp->get_DB()->dbenv, - replyp->keydata.keydata_val); - goto err; - } - memcpy(replyp->datadata.datadata_val, data.get_data(), - data.get_size()); - } else - replyp->datadata.datadata_val = (char *)data.get_data(); - replyp->datadata.datadata_len = data.get_size(); - } else { -err: replyp->keydata.keydata_val = NULL; - replyp->keydata.keydata_len = 0; - replyp->datadata.datadata_val = NULL; - replyp->datadata.datadata_len = 0; - *freep = 0; - if (bulk_alloc) - __os_ufree(dbp->get_DB()->dbenv, data.get_data()); - } - replyp->status = ret; - return; -} - -extern "C" void -__db_get_h_ffactor_proc( - long dbpcl_id, - __db_get_h_ffactor_reply *replyp) -{ - Db *dbp; - ct_entry *dbp_ctp; - - ACTIVATE_CTP(dbp_ctp, dbpcl_id, CT_DB); - dbp = (Db *)dbp_ctp->ct_anyp; - - replyp->status = dbp->get_h_ffactor(&replyp->ffactor); -} - -extern "C" void -__db_h_ffactor_proc( - long dbpcl_id, - u_int32_t ffactor, - __db_h_ffactor_reply *replyp) -{ - Db *dbp; - ct_entry *dbp_ctp; - int ret; - - ACTIVATE_CTP(dbp_ctp, dbpcl_id, CT_DB); - dbp = (Db *)dbp_ctp->ct_anyp; - - ret = dbp->set_h_ffactor(ffactor); - - replyp->status = ret; - return; -} - -extern "C" void -__db_get_h_nelem_proc( - long dbpcl_id, - __db_get_h_nelem_reply *replyp) -{ - Db *dbp; - ct_entry *dbp_ctp; - - ACTIVATE_CTP(dbp_ctp, dbpcl_id, CT_DB); - dbp = (Db *)dbp_ctp->ct_anyp; - - replyp->status = dbp->get_h_nelem(&replyp->nelem); -} - -extern "C" void -__db_h_nelem_proc( - long dbpcl_id, - u_int32_t nelem, - __db_h_nelem_reply *replyp) -{ - Db *dbp; - ct_entry *dbp_ctp; - int ret; - - ACTIVATE_CTP(dbp_ctp, dbpcl_id, CT_DB); - dbp = (Db *)dbp_ctp->ct_anyp; - - ret = dbp->set_h_nelem(nelem); - - replyp->status = ret; - return; -} - -extern "C" void -__db_key_range_proc( - long dbpcl_id, - long txnpcl_id, - u_int32_t keydlen, - u_int32_t keydoff, - u_int32_t keyulen, - u_int32_t keyflags, - void *keydata, - u_int32_t keysize, - u_int32_t flags, - __db_key_range_reply *replyp) -{ - Db *dbp; - DB_KEY_RANGE range; - DbTxn *txnp; - ct_entry *dbp_ctp, *txnp_ctp; - int ret; - - ACTIVATE_CTP(dbp_ctp, dbpcl_id, CT_DB); - dbp = (Db *)dbp_ctp->ct_anyp; - if (txnpcl_id != 0) { - ACTIVATE_CTP(txnp_ctp, txnpcl_id, CT_TXN); - txnp = (DbTxn *)txnp_ctp->ct_anyp; - } else - txnp = NULL; - - /* Set up key */ - Dbt key(keydata, keysize); - key.set_dlen(keydlen); - key.set_ulen(keyulen); - key.set_doff(keydoff); - key.set_flags(keyflags); - - ret = dbp->key_range(txnp, &key, &range, flags); - - replyp->status = ret; - replyp->less = range.less; - replyp->equal = range.equal; - replyp->greater = range.greater; - return; -} - -extern "C" void -__db_get_lorder_proc( - long dbpcl_id, - __db_get_lorder_reply *replyp) -{ - Db *dbp; - ct_entry *dbp_ctp; - - ACTIVATE_CTP(dbp_ctp, dbpcl_id, CT_DB); - dbp = (Db *)dbp_ctp->ct_anyp; - - replyp->status = dbp->get_lorder((int *)&replyp->lorder); -} - -extern "C" void -__db_lorder_proc( - long dbpcl_id, - u_int32_t lorder, - __db_lorder_reply *replyp) -{ - Db *dbp; - ct_entry *dbp_ctp; - int ret; - - ACTIVATE_CTP(dbp_ctp, dbpcl_id, CT_DB); - dbp = (Db *)dbp_ctp->ct_anyp; - - ret = dbp->set_lorder(lorder); - - replyp->status = ret; - return; -} - -extern "C" void -__db_get_name_proc( - long dbpcl_id, - __db_get_name_reply *replyp) -{ - Db *dbp; - ct_entry *dbp_ctp; - - ACTIVATE_CTP(dbp_ctp, dbpcl_id, CT_DB); - dbp = (Db *)dbp_ctp->ct_anyp; - - replyp->status = dbp->get_dbname( - (const char **)&replyp->filename, (const char **)&replyp->dbname); -} - -extern "C" void -__db_get_open_flags_proc( - long dbpcl_id, - __db_get_open_flags_reply *replyp) -{ - Db *dbp; - ct_entry *dbp_ctp; - - ACTIVATE_CTP(dbp_ctp, dbpcl_id, CT_DB); - dbp = (Db *)dbp_ctp->ct_anyp; - - replyp->status = dbp->get_open_flags(&replyp->flags); -} - -extern "C" void -__db_open_proc( - long dbpcl_id, - long txnpcl_id, - char *name, - char *subdb, - u_int32_t type, - u_int32_t flags, - u_int32_t mode, - __db_open_reply *replyp) -{ - Db *dbp; - DbTxn *txnp; - DBTYPE dbtype; - ct_entry *dbp_ctp, *new_ctp, *txnp_ctp; - int isswapped, ret; - - ACTIVATE_CTP(dbp_ctp, dbpcl_id, CT_DB); - dbp = (Db *)dbp_ctp->ct_anyp; - if (txnpcl_id != 0) { - ACTIVATE_CTP(txnp_ctp, txnpcl_id, CT_TXN); - txnp = (DbTxn *)txnp_ctp->ct_anyp; - } else - txnp = NULL; - - replyp->dbcl_id = dbpcl_id; - if ((new_ctp = __dbsrv_sharedb( - dbp_ctp, name, subdb, (DBTYPE)type, flags)) != NULL) { - /* - * We can share, clean up old ID, set new one. - */ - if (__dbsrv_verbose) - printf("Sharing db ID %ld\n", new_ctp->ct_id); - replyp->dbcl_id = new_ctp->ct_id; - ret = __db_close_int(dbpcl_id, 0); - goto out; - } - ret = dbp->open(txnp, name, subdb, (DBTYPE)type, flags, mode); - if (ret == 0) { - (void)dbp->get_type(&dbtype); - replyp->type = dbtype; - /* - * We need to determine the byte order of the database - * and send it back to the client. Determine it by - * the server's native order and the swapped value of - * the DB itself. - */ - (void)dbp->get_byteswapped(&isswapped); - if (__db_byteorder(NULL, 1234) == 0) { - if (isswapped == 0) - replyp->lorder = 1234; - else - replyp->lorder = 4321; - } else { - if (isswapped == 0) - replyp->lorder = 4321; - else - replyp->lorder = 1234; - } - dbp_ctp->ct_dbdp.type = dbtype; - dbp_ctp->ct_dbdp.dbflags = LF_ISSET(DB_SERVER_DBFLAGS); - if (name == NULL) - dbp_ctp->ct_dbdp.db = NULL; - else if ((ret = __os_strdup(dbp->get_DB()->dbenv, name, - &dbp_ctp->ct_dbdp.db)) != 0) - goto out; - if (subdb == NULL) - dbp_ctp->ct_dbdp.subdb = NULL; - else if ((ret = __os_strdup(dbp->get_DB()->dbenv, subdb, - &dbp_ctp->ct_dbdp.subdb)) != 0) - goto out; - } -out: - replyp->status = ret; - return; -} - -extern "C" void -__db_get_pagesize_proc( - long dbpcl_id, - __db_get_pagesize_reply *replyp) -{ - Db *dbp; - ct_entry *dbp_ctp; - - ACTIVATE_CTP(dbp_ctp, dbpcl_id, CT_DB); - dbp = (Db *)dbp_ctp->ct_anyp; - - replyp->status = dbp->get_pagesize(&replyp->pagesize); -} - -extern "C" void -__db_pagesize_proc( - long dbpcl_id, - u_int32_t pagesize, - __db_pagesize_reply *replyp) -{ - Db *dbp; - ct_entry *dbp_ctp; - int ret; - - ACTIVATE_CTP(dbp_ctp, dbpcl_id, CT_DB); - dbp = (Db *)dbp_ctp->ct_anyp; - - ret = dbp->set_pagesize(pagesize); - - replyp->status = ret; - return; -} - -extern "C" void -__db_pget_proc( - long dbpcl_id, - long txnpcl_id, - u_int32_t skeydlen, - u_int32_t skeydoff, - u_int32_t skeyulen, - u_int32_t skeyflags, - void *skeydata, - u_int32_t skeysize, - u_int32_t pkeydlen, - u_int32_t pkeydoff, - u_int32_t pkeyulen, - u_int32_t pkeyflags, - void *pkeydata, - u_int32_t pkeysize, - u_int32_t datadlen, - u_int32_t datadoff, - u_int32_t dataulen, - u_int32_t dataflags, - void *datadata, - u_int32_t datasize, - u_int32_t flags, - __db_pget_reply *replyp, - int * freep) -{ - Db *dbp; - DbTxn *txnp; - ct_entry *dbp_ctp, *txnp_ctp; - int key_alloc, ret; - - ACTIVATE_CTP(dbp_ctp, dbpcl_id, CT_DB); - dbp = (Db *)dbp_ctp->ct_anyp; - if (txnpcl_id != 0) { - ACTIVATE_CTP(txnp_ctp, txnpcl_id, CT_TXN); - txnp = (DbTxn *)txnp_ctp->ct_anyp; - } else - txnp = NULL; - - *freep = 0; - - /* - * Ignore memory related flags on server. - */ - /* Set up key and data */ - Dbt skey(skeydata, skeysize); - skey.set_dlen(skeydlen); - skey.set_ulen(skeyulen); - skey.set_doff(skeydoff); - skey.set_flags(DB_DBT_MALLOC | (skeyflags & DB_DBT_PARTIAL)); - - Dbt pkey(pkeydata, pkeysize); - pkey.set_dlen(pkeydlen); - pkey.set_ulen(pkeyulen); - pkey.set_doff(pkeydoff); - pkey.set_flags(DB_DBT_MALLOC | (pkeyflags & DB_DBT_PARTIAL)); - - Dbt data(datadata, datasize); - data.set_dlen(datadlen); - data.set_ulen(dataulen); - data.set_doff(datadoff); - data.set_flags(DB_DBT_MALLOC | (dataflags & DB_DBT_PARTIAL)); - - /* Got all our stuff, now do the get */ - ret = dbp->pget(txnp, &skey, &pkey, &data, flags); - /* - * Otherwise just status. - */ - if (ret == 0) { - /* - * XXX - * We need to xdr_free whatever we are returning, next time. - * However, DB does not allocate a new key if one was given - * and we'd be free'ing up space allocated in the request. - * So, allocate a new key/data pointer if it is the same one - * as in the request. - */ - *freep = 1; - /* - * Key - */ - key_alloc = 0; - if (skey.get_data() == skeydata) { - ret = __os_umalloc(dbp->get_DB()->dbenv, - skey.get_size(), &replyp->skeydata.skeydata_val); - if (ret != 0) { - __os_ufree( - dbp->get_DB()->dbenv, skey.get_data()); - __os_ufree( - dbp->get_DB()->dbenv, pkey.get_data()); - __os_ufree( - dbp->get_DB()->dbenv, data.get_data()); - goto err; - } - key_alloc = 1; - memcpy(replyp->skeydata.skeydata_val, skey.get_data(), - skey.get_size()); - } else - replyp->skeydata.skeydata_val = (char *)skey.get_data(); - - replyp->skeydata.skeydata_len = skey.get_size(); - - /* - * Primary key - */ - if (pkey.get_data() == pkeydata) { - ret = __os_umalloc(dbp->get_DB()->dbenv, - pkey.get_size(), &replyp->pkeydata.pkeydata_val); - if (ret != 0) { - __os_ufree( - dbp->get_DB()->dbenv, skey.get_data()); - __os_ufree( - dbp->get_DB()->dbenv, pkey.get_data()); - __os_ufree( - dbp->get_DB()->dbenv, data.get_data()); - if (key_alloc) - __os_ufree(dbp->get_DB()->dbenv, - replyp->skeydata.skeydata_val); - goto err; - } - /* - * We can set it to 2, because they cannot send the - * pkey over without sending the skey over too. - * So if they did send a pkey, they must have sent - * the skey as well. - */ - key_alloc = 2; - memcpy(replyp->pkeydata.pkeydata_val, pkey.get_data(), - pkey.get_size()); - } else - replyp->pkeydata.pkeydata_val = (char *)pkey.get_data(); - replyp->pkeydata.pkeydata_len = pkey.get_size(); - - /* - * Data - */ - if (data.get_data() == datadata) { - ret = __os_umalloc(dbp->get_DB()->dbenv, - data.get_size(), &replyp->datadata.datadata_val); - if (ret != 0) { - __os_ufree( - dbp->get_DB()->dbenv, skey.get_data()); - __os_ufree( - dbp->get_DB()->dbenv, pkey.get_data()); - __os_ufree( - dbp->get_DB()->dbenv, data.get_data()); - /* - * If key_alloc is 1, just skey needs to be - * freed, if key_alloc is 2, both skey and pkey - * need to be freed. - */ - if (key_alloc--) - __os_ufree(dbp->get_DB()->dbenv, - replyp->skeydata.skeydata_val); - if (key_alloc) - __os_ufree(dbp->get_DB()->dbenv, - replyp->pkeydata.pkeydata_val); - goto err; - } - memcpy(replyp->datadata.datadata_val, data.get_data(), - data.get_size()); - } else - replyp->datadata.datadata_val = (char *)data.get_data(); - replyp->datadata.datadata_len = data.get_size(); - } else { -err: replyp->skeydata.skeydata_val = NULL; - replyp->skeydata.skeydata_len = 0; - replyp->pkeydata.pkeydata_val = NULL; - replyp->pkeydata.pkeydata_len = 0; - replyp->datadata.datadata_val = NULL; - replyp->datadata.datadata_len = 0; - *freep = 0; - } - replyp->status = ret; - return; -} - -extern "C" void -__db_put_proc( - long dbpcl_id, - long txnpcl_id, - u_int32_t keydlen, - u_int32_t keydoff, - u_int32_t keyulen, - u_int32_t keyflags, - void *keydata, - u_int32_t keysize, - u_int32_t datadlen, - u_int32_t datadoff, - u_int32_t dataulen, - u_int32_t dataflags, - void *datadata, - u_int32_t datasize, - u_int32_t flags, - __db_put_reply *replyp, - int * freep) -{ - Db *dbp; - DbTxn *txnp; - ct_entry *dbp_ctp, *txnp_ctp; - int ret; - - ACTIVATE_CTP(dbp_ctp, dbpcl_id, CT_DB); - dbp = (Db *)dbp_ctp->ct_anyp; - if (txnpcl_id != 0) { - ACTIVATE_CTP(txnp_ctp, txnpcl_id, CT_TXN); - txnp = (DbTxn *)txnp_ctp->ct_anyp; - } else - txnp = NULL; - - *freep = 0; - - /* Set up key and data */ - Dbt key(keydata, keysize); - key.set_dlen(keydlen); - key.set_ulen(keyulen); - key.set_doff(keydoff); - key.set_flags(DB_DBT_MALLOC | (keyflags & DB_DBT_PARTIAL)); - - Dbt data(datadata, datasize); - data.set_dlen(datadlen); - data.set_ulen(dataulen); - data.set_doff(datadoff); - data.set_flags(dataflags); - - /* Got all our stuff, now do the put */ - ret = dbp->put(txnp, &key, &data, flags); - /* - * If the client did a DB_APPEND, set up key in reply. - * Otherwise just status. - */ - if (ret == 0 && (flags == DB_APPEND)) { - /* - * XXX - * We need to xdr_free whatever we are returning, next time. - * However, DB does not allocate a new key if one was given - * and we'd be free'ing up space allocated in the request. - * So, allocate a new key/data pointer if it is the same one - * as in the request. - */ - *freep = 1; - /* - * Key - */ - if (key.get_data() == keydata) { - ret = __os_umalloc(dbp->get_DB()->dbenv, - key.get_size(), &replyp->keydata.keydata_val); - if (ret != 0) { - __os_ufree( - dbp->get_DB()->dbenv, key.get_data()); - goto err; - } - memcpy(replyp->keydata.keydata_val, - key.get_data(), key.get_size()); - } else - replyp->keydata.keydata_val = (char *)key.get_data(); - - replyp->keydata.keydata_len = key.get_size(); - } else { -err: replyp->keydata.keydata_val = NULL; - replyp->keydata.keydata_len = 0; - *freep = 0; - } - replyp->status = ret; - return; -} - -extern "C" void -__db_get_re_delim_proc( - long dbpcl_id, - __db_get_re_delim_reply *replyp) -{ - Db *dbp; - ct_entry *dbp_ctp; - - ACTIVATE_CTP(dbp_ctp, dbpcl_id, CT_DB); - dbp = (Db *)dbp_ctp->ct_anyp; - - replyp->status = dbp->get_re_delim((int *)&replyp->delim); -} - -extern "C" void -__db_re_delim_proc( - long dbpcl_id, - u_int32_t delim, - __db_re_delim_reply *replyp) -{ - Db *dbp; - ct_entry *dbp_ctp; - int ret; - - ACTIVATE_CTP(dbp_ctp, dbpcl_id, CT_DB); - dbp = (Db *)dbp_ctp->ct_anyp; - - ret = dbp->set_re_delim(delim); - - replyp->status = ret; - return; -} - -extern "C" void -__db_get_re_len_proc( - long dbpcl_id, - __db_get_re_len_reply *replyp) -{ - Db *dbp; - ct_entry *dbp_ctp; - - ACTIVATE_CTP(dbp_ctp, dbpcl_id, CT_DB); - dbp = (Db *)dbp_ctp->ct_anyp; - - replyp->status = dbp->get_re_len(&replyp->len); -} - -extern "C" void -__db_re_len_proc( - long dbpcl_id, - u_int32_t len, - __db_re_len_reply *replyp) -{ - Db *dbp; - ct_entry *dbp_ctp; - int ret; - - ACTIVATE_CTP(dbp_ctp, dbpcl_id, CT_DB); - dbp = (Db *)dbp_ctp->ct_anyp; - - ret = dbp->set_re_len(len); - - replyp->status = ret; - return; -} - -void -__db_get_re_pad_proc( - long dbpcl_id, - __db_get_re_pad_reply *replyp) -{ - Db *dbp; - ct_entry *dbp_ctp; - - ACTIVATE_CTP(dbp_ctp, dbpcl_id, CT_DB); - dbp = (Db *)dbp_ctp->ct_anyp; - - replyp->status = dbp->get_re_pad((int *)&replyp->pad); -} - -extern "C" void -__db_re_pad_proc( - long dbpcl_id, - u_int32_t pad, - __db_re_pad_reply *replyp) -{ - Db *dbp; - ct_entry *dbp_ctp; - int ret; - - ACTIVATE_CTP(dbp_ctp, dbpcl_id, CT_DB); - dbp = (Db *)dbp_ctp->ct_anyp; - - ret = dbp->set_re_pad(pad); - - replyp->status = ret; - return; -} - -extern "C" void -__db_remove_proc( - long dbpcl_id, - char *name, - char *subdb, - u_int32_t flags, - __db_remove_reply *replyp) -{ - Db *dbp; - ct_entry *dbp_ctp; - int ret; - - ACTIVATE_CTP(dbp_ctp, dbpcl_id, CT_DB); - dbp = (Db *)dbp_ctp->ct_anyp; - - ret = dbp->remove(name, subdb, flags); - __dbdel_ctp(dbp_ctp); - - replyp->status = ret; - return; -} - -extern "C" void -__db_rename_proc( - long dbpcl_id, - char *name, - char *subdb, - char *newname, - u_int32_t flags, - __db_rename_reply *replyp) -{ - Db *dbp; - ct_entry *dbp_ctp; - int ret; - - ACTIVATE_CTP(dbp_ctp, dbpcl_id, CT_DB); - dbp = (Db *)dbp_ctp->ct_anyp; - - ret = dbp->rename(name, subdb, newname, flags); - __dbdel_ctp(dbp_ctp); - - replyp->status = ret; - return; -} - -extern "C" void -__db_stat_proc( - long dbpcl_id, - long txnpcl_id, - u_int32_t flags, - __db_stat_reply *replyp, - int * freep) -{ - Db *dbp; - DbTxn *txnp; - DBTYPE type; - ct_entry *dbp_ctp, *txnp_ctp; - u_int32_t *q, *p, *retsp; - int i, len, ret; - void *sp; - - ACTIVATE_CTP(dbp_ctp, dbpcl_id, CT_DB); - dbp = (Db *)dbp_ctp->ct_anyp; - if (txnpcl_id != 0) { - ACTIVATE_CTP(txnp_ctp, txnpcl_id, CT_TXN); - txnp = (DbTxn *)txnp_ctp->ct_anyp; - } else - txnp = NULL; - - ret = dbp->stat(txnp, &sp, flags); - replyp->status = ret; - if (ret != 0) - return; - /* - * We get here, we have success. Allocate an array so that - * we can use the list generator. Generate the reply, free - * up the space. - */ - /* - * XXX This assumes that all elements of all stat structures - * are u_int32_t fields. They are, currently. - */ - (void)dbp->get_type(&type); - if (type == DB_HASH) - len = sizeof(DB_HASH_STAT); - else if (type == DB_QUEUE) - len = sizeof(DB_QUEUE_STAT); - else /* BTREE or RECNO are same stats */ - len = sizeof(DB_BTREE_STAT); - replyp->stats.stats_len = len / sizeof(u_int32_t); - - if ((ret = __os_umalloc(dbp->get_DB()->dbenv, - len * replyp->stats.stats_len, &retsp)) != 0) - goto out; - for (i = 0, q = retsp, p = (u_int32_t *)sp; i < len; - i++, q++, p++) - *q = *p; - replyp->stats.stats_val = retsp; - __os_ufree(dbp->get_DB()->dbenv, sp); - if (ret == 0) - *freep = 1; -out: - replyp->status = ret; - return; -} - -extern "C" void -__db_sync_proc( - long dbpcl_id, - u_int32_t flags, - __db_sync_reply *replyp) -{ - Db *dbp; - ct_entry *dbp_ctp; - int ret; - - ACTIVATE_CTP(dbp_ctp, dbpcl_id, CT_DB); - dbp = (Db *)dbp_ctp->ct_anyp; - - ret = dbp->sync(flags); - - replyp->status = ret; - return; -} - -extern "C" void -__db_truncate_proc( - long dbpcl_id, - long txnpcl_id, - u_int32_t flags, - __db_truncate_reply *replyp) -{ - Db *dbp; - DbTxn *txnp; - ct_entry *dbp_ctp, *txnp_ctp; - u_int32_t count; - int ret; - - ACTIVATE_CTP(dbp_ctp, dbpcl_id, CT_DB); - dbp = (Db *)dbp_ctp->ct_anyp; - if (txnpcl_id != 0) { - ACTIVATE_CTP(txnp_ctp, txnpcl_id, CT_TXN); - txnp = (DbTxn *)txnp_ctp->ct_anyp; - } else - txnp = NULL; - - ret = dbp->truncate(txnp, &count, flags); - replyp->status = ret; - if (ret == 0) - replyp->count = count; - return; -} - -extern "C" void -__db_cursor_proc( - long dbpcl_id, - long txnpcl_id, - u_int32_t flags, - __db_cursor_reply *replyp) -{ - Db *dbp; - Dbc *dbc; - DbTxn *txnp; - ct_entry *dbc_ctp, *env_ctp, *dbp_ctp, *txnp_ctp; - int ret; - - ACTIVATE_CTP(dbp_ctp, dbpcl_id, CT_DB); - dbp = (Db *)dbp_ctp->ct_anyp; - dbc_ctp = new_ct_ent(&replyp->status); - if (dbc_ctp == NULL) - return; - - if (txnpcl_id != 0) { - ACTIVATE_CTP(txnp_ctp, txnpcl_id, CT_TXN); - txnp = (DbTxn *)txnp_ctp->ct_anyp; - dbc_ctp->ct_activep = txnp_ctp->ct_activep; - } else - txnp = NULL; - - if ((ret = dbp->cursor(txnp, &dbc, flags)) == 0) { - dbc_ctp->ct_dbc = dbc; - dbc_ctp->ct_type = CT_CURSOR; - dbc_ctp->ct_parent = dbp_ctp; - env_ctp = dbp_ctp->ct_envparent; - dbc_ctp->ct_envparent = env_ctp; - __dbsrv_settimeout(dbc_ctp, env_ctp->ct_timeout); - __dbsrv_active(dbc_ctp); - replyp->dbcidcl_id = dbc_ctp->ct_id; - } else - __dbclear_ctp(dbc_ctp); - - replyp->status = ret; - return; -} - -extern "C" void -__db_join_proc( - long dbpcl_id, - u_int32_t *curs, - u_int32_t curslen, - u_int32_t flags, - __db_join_reply *replyp) -{ - Db *dbp; - Dbc **jcurs, **c; - Dbc *dbc; - ct_entry *dbc_ctp, *ctp, *dbp_ctp; - size_t size; - u_int32_t *cl, i; - int ret; - - ACTIVATE_CTP(dbp_ctp, dbpcl_id, CT_DB); - dbp = (Db *)dbp_ctp->ct_anyp; - - dbc_ctp = new_ct_ent(&replyp->status); - if (dbc_ctp == NULL) - return; - - size = (curslen + 1) * sizeof(Dbc *); - if ((ret = __os_calloc(dbp->get_DB()->dbenv, - curslen + 1, sizeof(Dbc *), &jcurs)) != 0) { - replyp->status = ret; - __dbclear_ctp(dbc_ctp); - return; - } - /* - * If our curslist has a parent txn, we need to use it too - * for the activity timeout. All cursors must be part of - * the same transaction, so just check the first. - */ - ctp = get_tableent(*curs); - DB_ASSERT(ctp->ct_type == CT_CURSOR); - /* - * If we are using a transaction, set the join activity timer - * to point to the parent transaction. - */ - if (ctp->ct_activep != &ctp->ct_active) - dbc_ctp->ct_activep = ctp->ct_activep; - for (i = 0, cl = curs, c = jcurs; i < curslen; i++, cl++, c++) { - ctp = get_tableent(*cl); - if (ctp == NULL) { - replyp->status = DB_NOSERVER_ID; - goto out; - } - /* - * If we are using a txn, the join cursor points to the - * transaction timeout. If we are not using a transaction, - * then all the curslist cursors must point to the join - * cursor's timeout so that we do not timeout any of the - * curlist cursors while the join cursor is active. - * Change the type of the curslist ctps to CT_JOIN so that - * we know they are part of a join list and we can distinguish - * them and later restore them when the join cursor is closed. - */ - DB_ASSERT(ctp->ct_type == CT_CURSOR); - ctp->ct_type |= CT_JOIN; - ctp->ct_origp = ctp->ct_activep; - /* - * Setting this to the ct_active field of the dbc_ctp is - * really just a way to distinguish which join dbc this - * cursor is part of. The ct_activep of this cursor is - * not used at all during its lifetime as part of a join - * cursor. - */ - ctp->ct_activep = &dbc_ctp->ct_active; - *c = ctp->ct_dbc; - } - *c = NULL; - if ((ret = dbp->join(jcurs, &dbc, flags)) == 0) { - dbc_ctp->ct_dbc = dbc; - dbc_ctp->ct_type = (CT_JOINCUR | CT_CURSOR); - dbc_ctp->ct_parent = dbp_ctp; - dbc_ctp->ct_envparent = dbp_ctp->ct_envparent; - __dbsrv_settimeout(dbc_ctp, dbp_ctp->ct_envparent->ct_timeout); - __dbsrv_active(dbc_ctp); - replyp->dbcidcl_id = dbc_ctp->ct_id; - } else { - __dbclear_ctp(dbc_ctp); - /* - * If we get an error, undo what we did above to any cursors. - */ - for (cl = curs; *cl != 0; cl++) { - ctp = get_tableent(*cl); - ctp->ct_type = CT_CURSOR; - ctp->ct_activep = ctp->ct_origp; - } - } - - replyp->status = ret; -out: - __os_free(dbp->get_DB()->dbenv, jcurs); - return; -} - -extern "C" void -__dbc_close_proc( - long dbccl_id, - __dbc_close_reply *replyp) -{ - ct_entry *dbc_ctp; - - ACTIVATE_CTP(dbc_ctp, dbccl_id, CT_CURSOR); - replyp->status = __dbc_close_int(dbc_ctp); - return; -} - -extern "C" void -__dbc_count_proc( - long dbccl_id, - u_int32_t flags, - __dbc_count_reply *replyp) -{ - Dbc *dbc; - ct_entry *dbc_ctp; - db_recno_t num; - int ret; - - ACTIVATE_CTP(dbc_ctp, dbccl_id, CT_CURSOR); - dbc = (Dbc *)dbc_ctp->ct_anyp; - - ret = dbc->count(&num, flags); - replyp->status = ret; - if (ret == 0) - replyp->dupcount = num; - return; -} - -extern "C" void -__dbc_del_proc( - long dbccl_id, - u_int32_t flags, - __dbc_del_reply *replyp) -{ - Dbc *dbc; - ct_entry *dbc_ctp; - int ret; - - ACTIVATE_CTP(dbc_ctp, dbccl_id, CT_CURSOR); - dbc = (Dbc *)dbc_ctp->ct_anyp; - - ret = dbc->del(flags); - - replyp->status = ret; - return; -} - -extern "C" void -__dbc_dup_proc( - long dbccl_id, - u_int32_t flags, - __dbc_dup_reply *replyp) -{ - Dbc *dbc, *newdbc; - ct_entry *dbc_ctp, *new_ctp; - int ret; - - ACTIVATE_CTP(dbc_ctp, dbccl_id, CT_CURSOR); - dbc = (Dbc *)dbc_ctp->ct_anyp; - - new_ctp = new_ct_ent(&replyp->status); - if (new_ctp == NULL) - return; - - if ((ret = dbc->dup(&newdbc, flags)) == 0) { - new_ctp->ct_dbc = newdbc; - new_ctp->ct_type = CT_CURSOR; - new_ctp->ct_parent = dbc_ctp->ct_parent; - new_ctp->ct_envparent = dbc_ctp->ct_envparent; - /* - * If our cursor has a parent txn, we need to use it too. - */ - if (dbc_ctp->ct_activep != &dbc_ctp->ct_active) - new_ctp->ct_activep = dbc_ctp->ct_activep; - __dbsrv_settimeout(new_ctp, dbc_ctp->ct_timeout); - __dbsrv_active(new_ctp); - replyp->dbcidcl_id = new_ctp->ct_id; - } else - __dbclear_ctp(new_ctp); - - replyp->status = ret; - return; -} - -extern "C" void -__dbc_get_proc( - long dbccl_id, - u_int32_t keydlen, - u_int32_t keydoff, - u_int32_t keyulen, - u_int32_t keyflags, - void *keydata, - u_int32_t keysize, - u_int32_t datadlen, - u_int32_t datadoff, - u_int32_t dataulen, - u_int32_t dataflags, - void *datadata, - u_int32_t datasize, - u_int32_t flags, - __dbc_get_reply *replyp, - int * freep) -{ - Dbc *dbc; - DbEnv *dbenv; - ct_entry *dbc_ctp; - int key_alloc, bulk_alloc, ret; - void *tmpdata; - - ACTIVATE_CTP(dbc_ctp, dbccl_id, CT_CURSOR); - dbc = (Dbc *)dbc_ctp->ct_anyp; - dbenv = DbEnv::get_DbEnv(((DBC *)dbc)->dbp->dbenv); - - *freep = 0; - bulk_alloc = 0; - - /* Set up key and data */ - Dbt key(keydata, keysize); - key.set_dlen(keydlen); - key.set_ulen(keyulen); - key.set_doff(keydoff); - key.set_flags(DB_DBT_MALLOC | (keyflags & DB_DBT_PARTIAL)); - - Dbt data(datadata, datasize); - data.set_dlen(datadlen); - data.set_ulen(dataulen); - data.set_doff(datadoff); - dataflags &= DB_DBT_PARTIAL; - if (flags & DB_MULTIPLE || flags & DB_MULTIPLE_KEY) { - if (data.get_data() == NULL) { - ret = __os_umalloc(dbenv->get_DB_ENV(), - data.get_ulen(), &tmpdata); - if (ret != 0) - goto err; - data.set_data(tmpdata); - bulk_alloc = 1; - } - dataflags |= DB_DBT_USERMEM; - } else - dataflags |= DB_DBT_MALLOC; - data.set_flags(dataflags); - - /* Got all our stuff, now do the get */ - ret = dbc->get(&key, &data, flags); - - /* - * Otherwise just status. - */ - if (ret == 0) { - /* - * XXX - * We need to xdr_free whatever we are returning, next time. - * However, DB does not allocate a new key if one was given - * and we'd be free'ing up space allocated in the request. - * So, allocate a new key/data pointer if it is the same one - * as in the request. - */ - *freep = 1; - /* - * Key - */ - key_alloc = 0; - if (key.get_data() == keydata) { - ret = __os_umalloc(dbenv->get_DB_ENV(), key.get_size(), - &replyp->keydata.keydata_val); - if (ret != 0) { - __os_ufree(dbenv->get_DB_ENV(), key.get_data()); - __os_ufree( - dbenv->get_DB_ENV(), data.get_data()); - goto err; - } - key_alloc = 1; - memcpy(replyp->keydata.keydata_val, - key.get_data(), key.get_size()); - } else - replyp->keydata.keydata_val = (char *)key.get_data(); - - replyp->keydata.keydata_len = key.get_size(); - - /* - * Data - */ - if (data.get_data() == datadata) { - ret = __os_umalloc(dbenv->get_DB_ENV(), data.get_size(), - &replyp->datadata.datadata_val); - if (ret != 0) { - __os_ufree(dbenv->get_DB_ENV(), key.get_data()); - __os_ufree( - dbenv->get_DB_ENV(), data.get_data()); - if (key_alloc) - __os_ufree(dbenv->get_DB_ENV(), - replyp->keydata.keydata_val); - goto err; - } - memcpy(replyp->datadata.datadata_val, data.get_data(), - data.get_size()); - } else - replyp->datadata.datadata_val = (char *)data.get_data(); - replyp->datadata.datadata_len = data.get_size(); - } else { -err: replyp->keydata.keydata_val = NULL; - replyp->keydata.keydata_len = 0; - replyp->datadata.datadata_val = NULL; - replyp->datadata.datadata_len = 0; - *freep = 0; - if (bulk_alloc) - __os_ufree(dbenv->get_DB_ENV(), data.get_data()); - } - replyp->status = ret; - return; -} - -extern "C" void -__dbc_pget_proc( - long dbccl_id, - u_int32_t skeydlen, - u_int32_t skeydoff, - u_int32_t skeyulen, - u_int32_t skeyflags, - void *skeydata, - u_int32_t skeysize, - u_int32_t pkeydlen, - u_int32_t pkeydoff, - u_int32_t pkeyulen, - u_int32_t pkeyflags, - void *pkeydata, - u_int32_t pkeysize, - u_int32_t datadlen, - u_int32_t datadoff, - u_int32_t dataulen, - u_int32_t dataflags, - void *datadata, - u_int32_t datasize, - u_int32_t flags, - __dbc_pget_reply *replyp, - int * freep) -{ - Dbc *dbc; - DbEnv *dbenv; - ct_entry *dbc_ctp; - int key_alloc, ret; - - ACTIVATE_CTP(dbc_ctp, dbccl_id, CT_CURSOR); - dbc = (Dbc *)dbc_ctp->ct_anyp; - dbenv = DbEnv::get_DbEnv(((DBC *)dbc)->dbp->dbenv); - - *freep = 0; - - /* - * Ignore memory related flags on server. - */ - /* Set up key and data */ - Dbt skey(skeydata, skeysize); - skey.set_dlen(skeydlen); - skey.set_ulen(skeyulen); - skey.set_doff(skeydoff); - skey.set_flags(DB_DBT_MALLOC | (skeyflags & DB_DBT_PARTIAL)); - - Dbt pkey(pkeydata, pkeysize); - pkey.set_dlen(pkeydlen); - pkey.set_ulen(pkeyulen); - pkey.set_doff(pkeydoff); - pkey.set_flags(DB_DBT_MALLOC | (pkeyflags & DB_DBT_PARTIAL)); - - Dbt data(datadata, datasize); - data.set_dlen(datadlen); - data.set_ulen(dataulen); - data.set_doff(datadoff); - data.set_flags(DB_DBT_MALLOC | (dataflags & DB_DBT_PARTIAL)); - - /* Got all our stuff, now do the get */ - ret = dbc->pget(&skey, &pkey, &data, flags); - /* - * Otherwise just status. - */ - if (ret == 0) { - /* - * XXX - * We need to xdr_free whatever we are returning, next time. - * However, DB does not allocate a new key if one was given - * and we'd be free'ing up space allocated in the request. - * So, allocate a new key/data pointer if it is the same one - * as in the request. - */ - *freep = 1; - /* - * Key - */ - key_alloc = 0; - if (skey.get_data() == skeydata) { - ret = __os_umalloc(dbenv->get_DB_ENV(), - skey.get_size(), &replyp->skeydata.skeydata_val); - if (ret != 0) { - __os_ufree( - dbenv->get_DB_ENV(), skey.get_data()); - __os_ufree( - dbenv->get_DB_ENV(), pkey.get_data()); - __os_ufree( - dbenv->get_DB_ENV(), data.get_data()); - goto err; - } - key_alloc = 1; - memcpy(replyp->skeydata.skeydata_val, skey.get_data(), - skey.get_size()); - } else - replyp->skeydata.skeydata_val = (char *)skey.get_data(); - replyp->skeydata.skeydata_len = skey.get_size(); - - /* - * Primary key - */ - if (pkey.get_data() == pkeydata) { - ret = __os_umalloc(dbenv->get_DB_ENV(), - pkey.get_size(), &replyp->pkeydata.pkeydata_val); - if (ret != 0) { - __os_ufree( - dbenv->get_DB_ENV(), skey.get_data()); - __os_ufree( - dbenv->get_DB_ENV(), pkey.get_data()); - __os_ufree( - dbenv->get_DB_ENV(), data.get_data()); - if (key_alloc) - __os_ufree(dbenv->get_DB_ENV(), - replyp->skeydata.skeydata_val); - goto err; - } - /* - * We can set it to 2, because they cannot send the - * pkey over without sending the skey over too. - * So if they did send a pkey, they must have sent - * the skey as well. - */ - key_alloc = 2; - memcpy(replyp->pkeydata.pkeydata_val, pkey.get_data(), - pkey.get_size()); - } else - replyp->pkeydata.pkeydata_val = (char *)pkey.get_data(); - replyp->pkeydata.pkeydata_len = pkey.get_size(); - - /* - * Data - */ - if (data.get_data() == datadata) { - ret = __os_umalloc(dbenv->get_DB_ENV(), - data.get_size(), &replyp->datadata.datadata_val); - if (ret != 0) { - __os_ufree( - dbenv->get_DB_ENV(), skey.get_data()); - __os_ufree( - dbenv->get_DB_ENV(), pkey.get_data()); - __os_ufree( - dbenv->get_DB_ENV(), data.get_data()); - /* - * If key_alloc is 1, just skey needs to be - * freed, if key_alloc is 2, both skey and pkey - * need to be freed. - */ - if (key_alloc--) - __os_ufree(dbenv->get_DB_ENV(), - replyp->skeydata.skeydata_val); - if (key_alloc) - __os_ufree(dbenv->get_DB_ENV(), - replyp->pkeydata.pkeydata_val); - goto err; - } - memcpy(replyp->datadata.datadata_val, data.get_data(), - data.get_size()); - } else - replyp->datadata.datadata_val = (char *)data.get_data(); - replyp->datadata.datadata_len = data.get_size(); - } else { -err: replyp->skeydata.skeydata_val = NULL; - replyp->skeydata.skeydata_len = 0; - replyp->pkeydata.pkeydata_val = NULL; - replyp->pkeydata.pkeydata_len = 0; - replyp->datadata.datadata_val = NULL; - replyp->datadata.datadata_len = 0; - *freep = 0; - } - replyp->status = ret; - return; -} - -extern "C" void -__dbc_put_proc( - long dbccl_id, - u_int32_t keydlen, - u_int32_t keydoff, - u_int32_t keyulen, - u_int32_t keyflags, - void *keydata, - u_int32_t keysize, - u_int32_t datadlen, - u_int32_t datadoff, - u_int32_t dataulen, - u_int32_t dataflags, - void *datadata, - u_int32_t datasize, - u_int32_t flags, - __dbc_put_reply *replyp, - int * freep) -{ - Db *dbp; - Dbc *dbc; - ct_entry *dbc_ctp; - int ret; - DBTYPE dbtype; - - ACTIVATE_CTP(dbc_ctp, dbccl_id, CT_CURSOR); - dbc = (Dbc *)dbc_ctp->ct_anyp; - dbp = (Db *)dbc_ctp->ct_parent->ct_anyp; - - /* Set up key and data */ - Dbt key(keydata, keysize); - key.set_dlen(keydlen); - key.set_ulen(keyulen); - key.set_doff(keydoff); - /* - * Ignore memory related flags on server. - */ - key.set_flags(DB_DBT_MALLOC | (keyflags & DB_DBT_PARTIAL)); - - Dbt data(datadata, datasize); - data.set_dlen(datadlen); - data.set_ulen(dataulen); - data.set_doff(datadoff); - data.set_flags(dataflags); - - /* Got all our stuff, now do the put */ - ret = dbc->put(&key, &data, flags); - - *freep = 0; - replyp->keydata.keydata_val = NULL; - replyp->keydata.keydata_len = 0; - if (ret == 0 && (flags == DB_AFTER || flags == DB_BEFORE)) { - ret = dbp->get_type(&dbtype); - if (ret == 0 && dbtype == DB_RECNO) { - /* - * We need to xdr_free whatever we are returning, next - * time. - */ - replyp->keydata.keydata_val = (char *)key.get_data(); - replyp->keydata.keydata_len = key.get_size(); - } - } - replyp->status = ret; - return; -} diff --git a/storage/bdb/rpc_server/cxx/db_server_cxxutil.cpp b/storage/bdb/rpc_server/cxx/db_server_cxxutil.cpp deleted file mode 100644 index d5aacdc0f99..00000000000 --- a/storage/bdb/rpc_server/cxx/db_server_cxxutil.cpp +++ /dev/null @@ -1,776 +0,0 @@ -/*- - * See the file LICENSE for redistribution information. - * - * Copyright (c) 2000-2004 - * Sleepycat Software. All rights reserved. - * - * $Id: db_server_cxxutil.cpp,v 1.17 2004/09/22 17:30:13 bostic Exp $ - */ - -#include "db_config.h" - -#ifndef NO_SYSTEM_INCLUDES -#include - -#if TIME_WITH_SYS_TIME -#include -#include -#else -#if HAVE_SYS_TIME_H -#include -#else -#include -#endif -#endif - -#include - -#include -#include -#include -#include -#include -#include -#endif - -#include "db_server.h" - -#include "db_int.h" -#include "db_cxx.h" -#include "dbinc_auto/clib_ext.h" - -extern "C" { -#include "dbinc/db_server_int.h" -#include "dbinc_auto/rpc_server_ext.h" -#include "dbinc_auto/common_ext.h" - -extern int __dbsrv_main __P((void)); -} - -static int add_home __P((char *)); -static int add_passwd __P((char *)); -static int env_recover __P((char *)); -static void __dbclear_child __P((ct_entry *)); - -static LIST_HEAD(cthead, ct_entry) __dbsrv_head; -static LIST_HEAD(homehead, home_entry) __dbsrv_home; -static long __dbsrv_defto = DB_SERVER_TIMEOUT; -static long __dbsrv_maxto = DB_SERVER_MAXTIMEOUT; -static long __dbsrv_idleto = DB_SERVER_IDLETIMEOUT; -static char *logfile = NULL; -static char *prog; - -static void usage __P((char *)); -static void version_check __P((void)); - -int __dbsrv_verbose = 0; - -int -main( - int argc, - char **argv) -{ - extern char *optarg; - CLIENT *cl; - int ch, ret; - char *passwd; - - prog = argv[0]; - - version_check(); - - /* - * Check whether another server is running or not. There - * is a race condition where two servers could be racing to - * register with the portmapper. The goal of this check is to - * forbid running additional servers (like those started from - * the test suite) if the user is already running one. - * - * XXX - * This does not solve nor prevent two servers from being - * started at the same time and running recovery at the same - * time on the same environments. - */ - if ((cl = clnt_create("localhost", - DB_RPC_SERVERPROG, DB_RPC_SERVERVERS, "tcp")) != NULL) { - fprintf(stderr, - "%s: Berkeley DB RPC server already running.\n", prog); - clnt_destroy(cl); - return (EXIT_FAILURE); - } - - LIST_INIT(&__dbsrv_home); - while ((ch = getopt(argc, argv, "h:I:L:P:t:T:Vv")) != EOF) - switch (ch) { - case 'h': - (void)add_home(optarg); - break; - case 'I': - if (__db_getlong(NULL, prog, - optarg, 1, LONG_MAX, &__dbsrv_idleto)) - return (EXIT_FAILURE); - break; - case 'L': - logfile = optarg; - break; - case 'P': - passwd = strdup(optarg); - memset(optarg, 0, strlen(optarg)); - if (passwd == NULL) { - fprintf(stderr, "%s: strdup: %s\n", - prog, strerror(errno)); - return (EXIT_FAILURE); - } - if ((ret = add_passwd(passwd)) != 0) { - fprintf(stderr, "%s: strdup: %s\n", - prog, strerror(ret)); - return (EXIT_FAILURE); - } - break; - case 't': - if (__db_getlong(NULL, prog, - optarg, 1, LONG_MAX, &__dbsrv_defto)) - return (EXIT_FAILURE); - break; - case 'T': - if (__db_getlong(NULL, prog, - optarg, 1, LONG_MAX, &__dbsrv_maxto)) - return (EXIT_FAILURE); - break; - case 'V': - printf("%s\n", db_version(NULL, NULL, NULL)); - return (EXIT_SUCCESS); - case 'v': - __dbsrv_verbose = 1; - break; - default: - usage(prog); - } - /* - * Check default timeout against maximum timeout - */ - if (__dbsrv_defto > __dbsrv_maxto) - __dbsrv_defto = __dbsrv_maxto; - - /* - * Check default timeout against idle timeout - * It would be bad to timeout environments sooner than txns. - */ - if (__dbsrv_defto > __dbsrv_idleto) - fprintf(stderr, - "%s: WARNING: Idle timeout %ld is less than resource timeout %ld\n", - prog, __dbsrv_idleto, __dbsrv_defto); - - LIST_INIT(&__dbsrv_head); - - /* - * If a client crashes during an RPC, our reply to it - * generates a SIGPIPE. Ignore SIGPIPE so we don't exit unnecessarily. - */ -#ifdef SIGPIPE - signal(SIGPIPE, SIG_IGN); -#endif - - if (logfile != NULL && __db_util_logset("berkeley_db_svc", logfile)) - return (EXIT_FAILURE); - - /* - * Now that we are ready to start, run recovery on all the - * environments specified. - */ - if (env_recover(prog) != 0) - return (EXIT_FAILURE); - - /* - * We've done our setup, now call the generated server loop - */ - if (__dbsrv_verbose) - printf("%s: Ready to receive requests\n", prog); - __dbsrv_main(); - - /* NOTREACHED */ - abort(); -} - -static void -usage(char *prog) -{ - fprintf(stderr, "usage: %s %s\n\t%s\n", prog, - "[-Vv] [-h home] [-P passwd]", - "[-I idletimeout] [-L logfile] [-t def_timeout] [-T maxtimeout]"); - exit(EXIT_FAILURE); -} - -static void -version_check() -{ - int v_major, v_minor, v_patch; - - /* Make sure we're loaded with the right version of the DB library. */ - (void)db_version(&v_major, &v_minor, &v_patch); - if (v_major != DB_VERSION_MAJOR || - v_minor != DB_VERSION_MINOR || v_patch != DB_VERSION_PATCH) { - fprintf(stderr, - "%s: version %d.%d.%d doesn't match library version %d.%d.%d\n", - prog, DB_VERSION_MAJOR, DB_VERSION_MINOR, - DB_VERSION_PATCH, v_major, v_minor, v_patch); - exit(EXIT_FAILURE); - } -} - -extern "C" void -__dbsrv_settimeout( - ct_entry *ctp, - u_int32_t to) -{ - if (to > (u_int32_t)__dbsrv_maxto) - ctp->ct_timeout = __dbsrv_maxto; - else if (to <= 0) - ctp->ct_timeout = __dbsrv_defto; - else - ctp->ct_timeout = to; -} - -extern "C" void -__dbsrv_timeout(int force) -{ - static long to_hint = -1; - time_t t; - long to; - ct_entry *ctp, *nextctp; - - if ((t = time(NULL)) == -1) - return; - - /* - * Check hint. If hint is further in the future - * than now, no work to do. - */ - if (!force && to_hint > 0 && t < to_hint) - return; - to_hint = -1; - /* - * Timeout transactions or cursors holding DB resources. - * Do this before timing out envs to properly release resources. - * - * !!! - * We can just loop through this list looking for cursors and txns. - * We do not need to verify txn and cursor relationships at this - * point because we maintain the list in LIFO order *and* we - * maintain activity in the ultimate txn parent of any cursor - * so either everything in a txn is timing out, or nothing. - * So, since we are LIFO, we will correctly close/abort all the - * appropriate handles, in the correct order. - */ - for (ctp = LIST_FIRST(&__dbsrv_head); ctp != NULL; ctp = nextctp) { - nextctp = LIST_NEXT(ctp, entries); - switch (ctp->ct_type) { - case CT_TXN: - to = *(ctp->ct_activep) + ctp->ct_timeout; - /* TIMEOUT */ - if (to < t) { - if (__dbsrv_verbose) - printf("Timing out txn id %ld\n", - ctp->ct_id); - (void)((DbTxn *)ctp->ct_anyp)->abort(); - __dbdel_ctp(ctp); - /* - * If we timed out an txn, we may have closed - * all sorts of ctp's. - * So start over with a guaranteed good ctp. - */ - nextctp = LIST_FIRST(&__dbsrv_head); - } else if ((to_hint > 0 && to_hint > to) || - to_hint == -1) - to_hint = to; - break; - case CT_CURSOR: - case (CT_JOINCUR | CT_CURSOR): - to = *(ctp->ct_activep) + ctp->ct_timeout; - /* TIMEOUT */ - if (to < t) { - if (__dbsrv_verbose) - printf("Timing out cursor %ld\n", - ctp->ct_id); - (void)__dbc_close_int(ctp); - /* - * Start over with a guaranteed good ctp. - */ - nextctp = LIST_FIRST(&__dbsrv_head); - } else if ((to_hint > 0 && to_hint > to) || - to_hint == -1) - to_hint = to; - break; - default: - break; - } - } - /* - * Timeout idle handles. - * If we are forcing a timeout, we'll close all env handles. - */ - for (ctp = LIST_FIRST(&__dbsrv_head); ctp != NULL; ctp = nextctp) { - nextctp = LIST_NEXT(ctp, entries); - if (ctp->ct_type != CT_ENV) - continue; - to = *(ctp->ct_activep) + ctp->ct_idle; - /* TIMEOUT */ - if (to < t || force) { - if (__dbsrv_verbose) - printf("Timing out env id %ld\n", ctp->ct_id); - (void)__dbenv_close_int(ctp->ct_id, 0, 1); - /* - * If we timed out an env, we may have closed - * all sorts of ctp's (maybe even all of them. - * So start over with a guaranteed good ctp. - */ - nextctp = LIST_FIRST(&__dbsrv_head); - } - } -} - -/* - * RECURSIVE FUNCTION. We need to clear/free any number of levels of nested - * layers. - */ -static void -__dbclear_child(ct_entry *parent) -{ - ct_entry *ctp, *nextctp; - - for (ctp = LIST_FIRST(&__dbsrv_head); ctp != NULL; - ctp = nextctp) { - nextctp = LIST_NEXT(ctp, entries); - if (ctp->ct_type == 0) - continue; - if (ctp->ct_parent == parent) { - __dbclear_child(ctp); - /* - * Need to do this here because le_next may - * have changed with the recursive call and we - * don't want to point to a removed entry. - */ - nextctp = LIST_NEXT(ctp, entries); - __dbclear_ctp(ctp); - } - } -} - -extern "C" void -__dbclear_ctp(ct_entry *ctp) -{ - LIST_REMOVE(ctp, entries); - __os_free(NULL, ctp); -} - -extern "C" void -__dbdel_ctp(ct_entry *parent) -{ - __dbclear_child(parent); - __dbclear_ctp(parent); -} - -extern "C" ct_entry * -new_ct_ent(int *errp) -{ - time_t t; - ct_entry *ctp, *octp; - int ret; - - if ((ret = __os_malloc(NULL, sizeof(ct_entry), &ctp)) != 0) { - *errp = ret; - return (NULL); - } - memset(ctp, 0, sizeof(ct_entry)); - /* - * Get the time as ID. We may service more than one request per - * second however. If we are, then increment id value until we - * find an unused one. We insert entries in LRU fashion at the - * head of the list. So, if the first entry doesn't match, then - * we know for certain that we can use our entry. - */ - if ((t = time(NULL)) == -1) { - *errp = __os_get_errno(); - __os_free(NULL, ctp); - return (NULL); - } - octp = LIST_FIRST(&__dbsrv_head); - if (octp != NULL && octp->ct_id >= t) - t = octp->ct_id + 1; - ctp->ct_id = t; - ctp->ct_idle = __dbsrv_idleto; - ctp->ct_activep = &ctp->ct_active; - ctp->ct_origp = NULL; - ctp->ct_refcount = 1; - - LIST_INSERT_HEAD(&__dbsrv_head, ctp, entries); - return (ctp); -} - -extern "C" ct_entry * -get_tableent(long id) -{ - ct_entry *ctp; - - for (ctp = LIST_FIRST(&__dbsrv_head); ctp != NULL; - ctp = LIST_NEXT(ctp, entries)) - if (ctp->ct_id == id) - return (ctp); - return (NULL); -} - -extern "C" ct_entry * -__dbsrv_sharedb(ct_entry *db_ctp, - const char *name, const char *subdb, DBTYPE type, u_int32_t flags) -{ - ct_entry *ctp; - - /* - * Check if we can share a db handle. Criteria for sharing are: - * If any of the non-sharable flags are set, we cannot share. - * Must be a db ctp, obviously. - * Must share the same env parent. - * Must be the same type, or current one DB_UNKNOWN. - * Must be same byteorder, or current one must not care. - * All flags must match. - * Must be same name, but don't share in-memory databases. - * Must be same subdb name. - */ - if (flags & DB_SERVER_DBNOSHARE) - return (NULL); - for (ctp = LIST_FIRST(&__dbsrv_head); ctp != NULL; - ctp = LIST_NEXT(ctp, entries)) { - /* - * Skip ourselves. - */ - if (ctp == db_ctp) - continue; - if (ctp->ct_type != CT_DB) - continue; - if (ctp->ct_envparent != db_ctp->ct_envparent) - continue; - if (type != DB_UNKNOWN && ctp->ct_dbdp.type != type) - continue; - if (ctp->ct_dbdp.dbflags != LF_ISSET(DB_SERVER_DBFLAGS)) - continue; - if (db_ctp->ct_dbdp.setflags != 0 && - ctp->ct_dbdp.setflags != db_ctp->ct_dbdp.setflags) - continue; - if (name == NULL || ctp->ct_dbdp.db == NULL || - strcmp(name, ctp->ct_dbdp.db) != 0) - continue; - if (subdb != ctp->ct_dbdp.subdb && - (subdb == NULL || ctp->ct_dbdp.subdb == NULL || - strcmp(subdb, ctp->ct_dbdp.subdb) != 0)) - continue; - /* - * If we get here, then we match. - */ - ctp->ct_refcount++; - return (ctp); - } - - return (NULL); -} - -extern "C" ct_entry * -__dbsrv_shareenv(ct_entry *env_ctp, home_entry *home, u_int32_t flags) -{ - ct_entry *ctp; - - /* - * Check if we can share an env. Criteria for sharing are: - * Must be an env ctp, obviously. - * Must share the same home env. - * All flags must match. - */ - for (ctp = LIST_FIRST(&__dbsrv_head); ctp != NULL; - ctp = LIST_NEXT(ctp, entries)) { - /* - * Skip ourselves. - */ - if (ctp == env_ctp) - continue; - if (ctp->ct_type != CT_ENV) - continue; - if (ctp->ct_envdp.home != home) - continue; - if (ctp->ct_envdp.envflags != flags) - continue; - if (ctp->ct_envdp.onflags != env_ctp->ct_envdp.onflags) - continue; - if (ctp->ct_envdp.offflags != env_ctp->ct_envdp.offflags) - continue; - /* - * If we get here, then we match. The only thing left to - * check is the timeout. Since the server timeout set by - * the client is a hint, for sharing we'll give them the - * benefit of the doubt and grant them the longer timeout. - */ - if (ctp->ct_timeout < env_ctp->ct_timeout) - ctp->ct_timeout = env_ctp->ct_timeout; - ctp->ct_refcount++; - return (ctp); - } - - return (NULL); -} - -extern "C" void -__dbsrv_active(ct_entry *ctp) -{ - time_t t; - ct_entry *envctp; - - if (ctp == NULL) - return; - if ((t = time(NULL)) == -1) - return; - *(ctp->ct_activep) = t; - if ((envctp = ctp->ct_envparent) == NULL) - return; - *(envctp->ct_activep) = t; - return; -} - -extern "C" int -__db_close_int(long id, u_int32_t flags) -{ - Db *dbp; - int ret; - ct_entry *ctp; - - ret = 0; - ctp = get_tableent(id); - if (ctp == NULL) - return (DB_NOSERVER_ID); - DB_ASSERT(ctp->ct_type == CT_DB); - if (__dbsrv_verbose && ctp->ct_refcount != 1) - printf("Deref'ing dbp id %ld, refcount %d\n", - id, ctp->ct_refcount); - if (--ctp->ct_refcount != 0) - return (ret); - dbp = ctp->ct_dbp; - if (__dbsrv_verbose) - printf("Closing dbp id %ld\n", id); - - ret = dbp->close(flags); - __dbdel_ctp(ctp); - return (ret); -} - -extern "C" int -__dbc_close_int(ct_entry *dbc_ctp) -{ - Dbc *dbc; - int ret; - ct_entry *ctp; - - dbc = (Dbc *)dbc_ctp->ct_anyp; - - ret = dbc->close(); - /* - * If this cursor is a join cursor then we need to fix up the - * cursors that it was joined from so that they are independent again. - */ - if (dbc_ctp->ct_type & CT_JOINCUR) - for (ctp = LIST_FIRST(&__dbsrv_head); ctp != NULL; - ctp = LIST_NEXT(ctp, entries)) { - /* - * Test if it is a join cursor, and if it is part - * of this one. - */ - if ((ctp->ct_type & CT_JOIN) && - ctp->ct_activep == &dbc_ctp->ct_active) { - ctp->ct_type &= ~CT_JOIN; - ctp->ct_activep = ctp->ct_origp; - __dbsrv_active(ctp); - } - } - __dbclear_ctp(dbc_ctp); - return (ret); - -} - -extern "C" int -__dbenv_close_int(long id, u_int32_t flags, int force) -{ - DbEnv *dbenv; - int ret; - ct_entry *ctp, *dbctp, *nextctp; - - ret = 0; - ctp = get_tableent(id); - if (ctp == NULL) - return (DB_NOSERVER_ID); - DB_ASSERT(ctp->ct_type == CT_ENV); - if (__dbsrv_verbose && ctp->ct_refcount != 1) - printf("Deref'ing env id %ld, refcount %d\n", - id, ctp->ct_refcount); - /* - * If we are timing out, we need to force the close, no matter - * what the refcount. - */ - if (--ctp->ct_refcount != 0 && !force) - return (ret); - dbenv = ctp->ct_envp; - if (__dbsrv_verbose) - printf("Closing env id %ld\n", id); - - /* - * If we're timing out an env, we want to close all of its - * database handles as well. All of the txns and cursors - * must have been timed out prior to timing out the env. - */ - if (force) - for (dbctp = LIST_FIRST(&__dbsrv_head); - dbctp != NULL; dbctp = nextctp) { - nextctp = LIST_NEXT(dbctp, entries); - if (dbctp->ct_type != CT_DB) - continue; - if (dbctp->ct_envparent != ctp) - continue; - /* - * We found a DB handle that is part of this - * environment. Close it. - */ - __db_close_int(dbctp->ct_id, 0); - /* - * If we timed out a dbp, we may have removed - * multiple ctp entries. Start over with a - * guaranteed good ctp. - */ - nextctp = LIST_FIRST(&__dbsrv_head); - } - - ret = dbenv->close(flags); - __dbdel_ctp(ctp); - return (ret); -} - -static int -add_home(char *home) -{ - home_entry *hp, *homep; - int ret; - - if ((ret = __os_malloc(NULL, sizeof(home_entry), &hp)) != 0) - return (ret); - if ((ret = __os_malloc(NULL, strlen(home)+1, &hp->home)) != 0) - return (ret); - memcpy(hp->home, home, strlen(home)+1); - hp->dir = home; - hp->passwd = NULL; - /* - * This loop is to remove any trailing path separators, - * to assure hp->name points to the last component. - */ - hp->name = __db_rpath(home); - if (hp->name != NULL) { - *(hp->name) = '\0'; - hp->name++; - } else - hp->name = home; - while (*(hp->name) == '\0') { - hp->name = __db_rpath(home); - *(hp->name) = '\0'; - hp->name++; - } - /* - * Now we have successfully added it. Make sure there are no - * identical names. - */ - for (homep = LIST_FIRST(&__dbsrv_home); homep != NULL; - homep = LIST_NEXT(homep, entries)) - if (strcmp(homep->name, hp->name) == 0) { - printf("Already added home name %s, at directory %s\n", - hp->name, homep->dir); - return (-1); - } - LIST_INSERT_HEAD(&__dbsrv_home, hp, entries); - if (__dbsrv_verbose) - printf("Added home %s in dir %s\n", hp->name, hp->dir); - return (0); -} - -static int -add_passwd(char *passwd) -{ - home_entry *hp; - - /* - * We add the passwd to the last given home dir. If there - * isn't a home dir, or the most recent one already has a - * passwd, then there is a user error. - */ - hp = LIST_FIRST(&__dbsrv_home); - if (hp == NULL || hp->passwd != NULL) - return (EINVAL); - /* - * We've already strdup'ed the passwd above, so we don't need - * to malloc new space, just point to it. - */ - hp->passwd = passwd; - return (0); -} - -extern "C" home_entry * -get_fullhome(char *name) -{ - home_entry *hp; - - if (name == NULL) - return (NULL); - - for (hp = LIST_FIRST(&__dbsrv_home); hp != NULL; - hp = LIST_NEXT(hp, entries)) - if (strcmp(name, hp->name) == 0) - return (hp); - return (NULL); -} - -static int -env_recover(char *progname) -{ - DbEnv *dbenv; - home_entry *hp; - u_int32_t flags; - int exitval, ret; - - for (hp = LIST_FIRST(&__dbsrv_home); hp != NULL; - hp = LIST_NEXT(hp, entries)) { - exitval = 0; - dbenv = new DbEnv(DB_CXX_NO_EXCEPTIONS); - if (__dbsrv_verbose == 1) - (void)dbenv->set_verbose(DB_VERB_RECOVERY, 1); - dbenv->set_errfile(stderr); - dbenv->set_errpfx(progname); - if (hp->passwd != NULL) - (void)dbenv->set_encrypt(hp->passwd, DB_ENCRYPT_AES); - - /* - * Initialize the env with DB_RECOVER. That is all we - * have to do to run recovery. - */ - if (__dbsrv_verbose) - printf("Running recovery on %s\n", hp->home); - flags = DB_CREATE | DB_INIT_LOCK | DB_INIT_LOG | DB_INIT_MPOOL | - DB_INIT_TXN | DB_USE_ENVIRON | DB_RECOVER; - if ((ret = dbenv->open(hp->home, flags, 0)) != 0) { - dbenv->err(ret, "DbEnv->open"); - goto error; - } - - if (0) { -error: exitval = 1; - } - if ((ret = dbenv->close(0)) != 0) { - exitval = 1; - fprintf(stderr, "%s: dbenv->close: %s\n", - progname, db_strerror(ret)); - } - if (exitval) - return (exitval); - } - return (0); -} diff --git a/storage/bdb/rpc_server/java/DbDispatcher.java b/storage/bdb/rpc_server/java/DbDispatcher.java deleted file mode 100644 index 5c5e63fc2ad..00000000000 --- a/storage/bdb/rpc_server/java/DbDispatcher.java +++ /dev/null @@ -1,590 +0,0 @@ -/*- - * See the file LICENSE for redistribution information. - * - * Copyright (c) 2001-2002 - * Sleepycat Software. All rights reserved. - * - * $Id: DbDispatcher.java,v 1.5 2002/08/09 01:56:08 bostic Exp $ - */ - -package com.sleepycat.db.rpcserver; - -import com.sleepycat.db.*; -import java.io.IOException; -import org.acplt.oncrpc.OncRpcException; - -/** - * Dispatcher for RPC messages for the Java RPC server. - * These are hooks that translate between RPC msg/reply structures and - * DB calls, which keeps the real implementation code in Rpc* classes cleaner. - */ -public abstract class DbDispatcher extends DbServerStub -{ - abstract int addEnv(RpcDbEnv rdbenv); - abstract int addDb(RpcDb rdb); - abstract int addTxn(RpcDbTxn rtxn); - abstract int addCursor(RpcDbc rdbc); - abstract void delEnv(RpcDbEnv rdbenv); - abstract void delDb(RpcDb rdb); - abstract void delTxn(RpcDbTxn rtxn); - abstract void delCursor(RpcDbc rdbc); - abstract RpcDbEnv getEnv(int envid); - abstract RpcDb getDb(int dbid); - abstract RpcDbTxn getTxn(int txnbid); - abstract RpcDbc getCursor(int dbcid); - - public DbDispatcher() throws IOException, OncRpcException - { - super(); - } - - //// Db methods - - public __db_associate_reply __DB_db_associate_4001(__db_associate_msg args) - { - __db_associate_reply reply = new __db_associate_reply(); - RpcDb rdb = getDb(args.dbpcl_id); - if (rdb == null) - reply.status = Db.DB_NOSERVER_ID; - else - rdb.associate(this, args, reply); - return reply; - } - - public __db_bt_maxkey_reply __DB_db_bt_maxkey_4001(__db_bt_maxkey_msg args) - { - __db_bt_maxkey_reply reply = new __db_bt_maxkey_reply(); - RpcDb rdb = getDb(args.dbpcl_id); - if (rdb == null) - reply.status = Db.DB_NOSERVER_ID; - else - rdb.set_bt_maxkey(this, args, reply); - return reply; - } - - public __db_bt_minkey_reply __DB_db_bt_minkey_4001(__db_bt_minkey_msg args) - { - __db_bt_minkey_reply reply = new __db_bt_minkey_reply(); - RpcDb rdb = getDb(args.dbpcl_id); - if (rdb == null) - reply.status = Db.DB_NOSERVER_ID; - else - rdb.set_bt_minkey(this, args, reply); - return reply; - } - - public __db_close_reply __DB_db_close_4001(__db_close_msg args) - { - __db_close_reply reply = new __db_close_reply(); - RpcDb rdb = getDb(args.dbpcl_id); - if (rdb == null) - reply.status = Db.DB_NOSERVER_ID; - else - rdb.close(this, args, reply); - return reply; - } - - public __db_create_reply __DB_db_create_4001(__db_create_msg args) - { - __db_create_reply reply = new __db_create_reply(); - RpcDb rdb = new RpcDb(getEnv(args.dbenvcl_id)); - if (rdb == null) - reply.status = Db.DB_NOSERVER_ID; - else - rdb.create(this, args, reply); - return reply; - } - - public __db_cursor_reply __DB_db_cursor_4001(__db_cursor_msg args) - { - __db_cursor_reply reply = new __db_cursor_reply(); - RpcDb rdb = getDb(args.dbpcl_id); - if (rdb == null) - reply.status = Db.DB_NOSERVER_ID; - else - rdb.cursor(this, args, reply); - return reply; - } - - public __db_del_reply __DB_db_del_4001(__db_del_msg args) - { - __db_del_reply reply = new __db_del_reply(); - RpcDb rdb = getDb(args.dbpcl_id); - if (rdb == null) - reply.status = Db.DB_NOSERVER_ID; - else - rdb.del(this, args, reply); - return reply; - } - - public __db_encrypt_reply __DB_db_encrypt_4001(__db_encrypt_msg args) - { - __db_encrypt_reply reply = new __db_encrypt_reply(); - RpcDb rdb = getDb(args.dbpcl_id); - if (rdb == null) - reply.status = Db.DB_NOSERVER_ID; - else - rdb.set_encrypt(this, args, reply); - return reply; - } - - public __db_extentsize_reply __DB_db_extentsize_4001(__db_extentsize_msg args) - { - __db_extentsize_reply reply = new __db_extentsize_reply(); - RpcDb rdb = getDb(args.dbpcl_id); - if (rdb == null) - reply.status = Db.DB_NOSERVER_ID; - else - rdb.set_q_extentsize(this, args, reply); - return reply; - } - - public __db_flags_reply __DB_db_flags_4001(__db_flags_msg args) - { - __db_flags_reply reply = new __db_flags_reply(); - RpcDb rdb = getDb(args.dbpcl_id); - if (rdb == null) - reply.status = Db.DB_NOSERVER_ID; - else - rdb.set_flags(this, args, reply); - return reply; - } - - public __db_get_reply __DB_db_get_4001(__db_get_msg args) - { - __db_get_reply reply = new __db_get_reply(); - RpcDb rdb = getDb(args.dbpcl_id); - if (rdb == null) - reply.status = Db.DB_NOSERVER_ID; - else - rdb.get(this, args, reply); - return reply; - } - - public __db_h_ffactor_reply __DB_db_h_ffactor_4001(__db_h_ffactor_msg args) - { - __db_h_ffactor_reply reply = new __db_h_ffactor_reply(); - RpcDb rdb = getDb(args.dbpcl_id); - if (rdb == null) - reply.status = Db.DB_NOSERVER_ID; - else - rdb.set_h_ffactor(this, args, reply); - return reply; - } - - public __db_h_nelem_reply __DB_db_h_nelem_4001(__db_h_nelem_msg args) - { - __db_h_nelem_reply reply = new __db_h_nelem_reply(); - RpcDb rdb = getDb(args.dbpcl_id); - if (rdb == null) - reply.status = Db.DB_NOSERVER_ID; - else - rdb.set_h_nelem(this, args, reply); - return reply; - } - - public __db_join_reply __DB_db_join_4001(__db_join_msg args) - { - __db_join_reply reply = new __db_join_reply(); - RpcDb rdb = getDb(args.dbpcl_id); - if (rdb == null) - reply.status = Db.DB_NOSERVER_ID; - else - rdb.join(this, args, reply); - return reply; - } - - public __db_key_range_reply __DB_db_key_range_4001(__db_key_range_msg args) - { - __db_key_range_reply reply = new __db_key_range_reply(); - RpcDb rdb = getDb(args.dbpcl_id); - if (rdb == null) - reply.status = Db.DB_NOSERVER_ID; - else - rdb.key_range(this, args, reply); - return reply; - } - - public __db_lorder_reply __DB_db_lorder_4001(__db_lorder_msg args) - { - __db_lorder_reply reply = new __db_lorder_reply(); - RpcDb rdb = getDb(args.dbpcl_id); - if (rdb == null) - reply.status = Db.DB_NOSERVER_ID; - else - rdb.set_lorder(this, args, reply); - return reply; - } - - public __db_open_reply __DB_db_open_4001(__db_open_msg args) - { - __db_open_reply reply = new __db_open_reply(); - RpcDb rdb = getDb(args.dbpcl_id); - if (rdb == null) - reply.status = Db.DB_NOSERVER_ID; - else - rdb.open(this, args, reply); - return reply; - } - - public __db_pagesize_reply __DB_db_pagesize_4001(__db_pagesize_msg args) - { - __db_pagesize_reply reply = new __db_pagesize_reply(); - RpcDb rdb = getDb(args.dbpcl_id); - if (rdb == null) - reply.status = Db.DB_NOSERVER_ID; - else - rdb.set_pagesize(this, args, reply); - return reply; - } - - public __db_pget_reply __DB_db_pget_4001(__db_pget_msg args) - { - __db_pget_reply reply = new __db_pget_reply(); - RpcDb rdb = getDb(args.dbpcl_id); - if (rdb == null) - reply.status = Db.DB_NOSERVER_ID; - else - rdb.pget(this, args, reply); - return reply; - } - - public __db_put_reply __DB_db_put_4001(__db_put_msg args) - { - __db_put_reply reply = new __db_put_reply(); - RpcDb rdb = getDb(args.dbpcl_id); - if (rdb == null) - reply.status = Db.DB_NOSERVER_ID; - else - rdb.put(this, args, reply); - return reply; - } - - public __db_remove_reply __DB_db_remove_4001(__db_remove_msg args) - { - __db_remove_reply reply = new __db_remove_reply(); - RpcDb rdb = getDb(args.dbpcl_id); - if (rdb == null) - reply.status = Db.DB_NOSERVER_ID; - else - rdb.remove(this, args, reply); - return reply; - } - - public __db_rename_reply __DB_db_rename_4001(__db_rename_msg args) - { - __db_rename_reply reply = new __db_rename_reply(); - RpcDb rdb = getDb(args.dbpcl_id); - if (rdb == null) - reply.status = Db.DB_NOSERVER_ID; - else - rdb.rename(this, args, reply); - return reply; - } - - public __db_re_delim_reply __DB_db_re_delim_4001(__db_re_delim_msg args) - { - __db_re_delim_reply reply = new __db_re_delim_reply(); - RpcDb rdb = getDb(args.dbpcl_id); - if (rdb == null) - reply.status = Db.DB_NOSERVER_ID; - else - rdb.set_re_delim(this, args, reply); - return reply; - } - - public __db_re_len_reply __DB_db_re_len_4001(__db_re_len_msg args) - { - __db_re_len_reply reply = new __db_re_len_reply(); - RpcDb rdb = getDb(args.dbpcl_id); - if (rdb == null) - reply.status = Db.DB_NOSERVER_ID; - else - rdb.set_re_len(this, args, reply); - return reply; - } - - public __db_re_pad_reply __DB_db_re_pad_4001(__db_re_pad_msg args) - { - __db_re_pad_reply reply = new __db_re_pad_reply(); - RpcDb rdb = getDb(args.dbpcl_id); - if (rdb == null) - reply.status = Db.DB_NOSERVER_ID; - else - rdb.set_re_pad(this, args, reply); - return reply; - } - - public __db_stat_reply __DB_db_stat_4001(__db_stat_msg args) - { - __db_stat_reply reply = new __db_stat_reply(); - RpcDb rdb = getDb(args.dbpcl_id); - if (rdb == null) - reply.status = Db.DB_NOSERVER_ID; - else - rdb.stat(this, args, reply); - return reply; - } - - public __db_sync_reply __DB_db_sync_4001(__db_sync_msg args) - { - __db_sync_reply reply = new __db_sync_reply(); - RpcDb rdb = getDb(args.dbpcl_id); - if (rdb == null) - reply.status = Db.DB_NOSERVER_ID; - else - rdb.sync(this, args, reply); - return reply; - } - - public __db_truncate_reply __DB_db_truncate_4001(__db_truncate_msg args) - { - __db_truncate_reply reply = new __db_truncate_reply(); - RpcDb rdb = getDb(args.dbpcl_id); - if (rdb == null) - reply.status = Db.DB_NOSERVER_ID; - else - rdb.truncate(this, args, reply); - return reply; - } - - //// Cursor methods - - public __dbc_close_reply __DB_dbc_close_4001(__dbc_close_msg args) - { - __dbc_close_reply reply = new __dbc_close_reply(); - RpcDbc rdbc = getCursor(args.dbccl_id); - if (rdbc == null) - reply.status = Db.DB_NOSERVER_ID; - else - rdbc.close(this, args, reply); - return reply; - } - - public __dbc_count_reply __DB_dbc_count_4001(__dbc_count_msg args) - { - __dbc_count_reply reply = new __dbc_count_reply(); - RpcDbc rdbc = getCursor(args.dbccl_id); - if (rdbc == null) - reply.status = Db.DB_NOSERVER_ID; - else - rdbc.count(this, args, reply); - return reply; - } - - public __dbc_del_reply __DB_dbc_del_4001(__dbc_del_msg args) - { - __dbc_del_reply reply = new __dbc_del_reply(); - RpcDbc rdbc = getCursor(args.dbccl_id); - if (rdbc == null) - reply.status = Db.DB_NOSERVER_ID; - else - rdbc.del(this, args, reply); - return reply; - } - - public __dbc_dup_reply __DB_dbc_dup_4001(__dbc_dup_msg args) - { - __dbc_dup_reply reply = new __dbc_dup_reply(); - RpcDbc rdbc = getCursor(args.dbccl_id); - if (rdbc == null) - reply.status = Db.DB_NOSERVER_ID; - else - rdbc.dup(this, args, reply); - return reply; - } - - public __dbc_get_reply __DB_dbc_get_4001(__dbc_get_msg args) - { - __dbc_get_reply reply = new __dbc_get_reply(); - RpcDbc rdbc = getCursor(args.dbccl_id); - if (rdbc == null) - reply.status = Db.DB_NOSERVER_ID; - else - rdbc.get(this, args, reply); - return reply; - } - - public __dbc_pget_reply __DB_dbc_pget_4001(__dbc_pget_msg args) { - __dbc_pget_reply reply = new __dbc_pget_reply(); - RpcDbc rdbc = getCursor(args.dbccl_id); - if (rdbc == null) - reply.status = Db.DB_NOSERVER_ID; - else - rdbc.pget(this, args, reply); - return reply; - } - - public __dbc_put_reply __DB_dbc_put_4001(__dbc_put_msg args) { - __dbc_put_reply reply = new __dbc_put_reply(); - RpcDbc rdbc = getCursor(args.dbccl_id); - if (rdbc == null) - reply.status = Db.DB_NOSERVER_ID; - else - rdbc.put(this, args, reply); - return reply; - } - - //// Environment methods - - public __env_cachesize_reply __DB_env_cachesize_4001(__env_cachesize_msg args) - { - __env_cachesize_reply reply = new __env_cachesize_reply(); - RpcDbEnv rdbenv = getEnv(args.dbenvcl_id); - if (rdbenv == null) - reply.status = Db.DB_NOSERVER_ID; - else - rdbenv.set_cachesize(this, args, reply); - return reply; - } - - public __env_close_reply __DB_env_close_4001(__env_close_msg args) - { - __env_close_reply reply = new __env_close_reply(); - RpcDbEnv rdbenv = getEnv(args.dbenvcl_id); - if (rdbenv == null) - reply.status = Db.DB_NOSERVER_ID; - else - rdbenv.close(this, args, reply); - return reply; - } - - public __env_create_reply __DB_env_create_4001(__env_create_msg args) - { - __env_create_reply reply = new __env_create_reply(); - RpcDbEnv rdbenv = new RpcDbEnv(); - rdbenv.create(this, args, reply); - return reply; - } - - public __env_dbremove_reply __DB_env_dbremove_4001(__env_dbremove_msg args) - { - __env_dbremove_reply reply = new __env_dbremove_reply(); - RpcDbEnv rdbenv = getEnv(args.dbenvcl_id); - if (rdbenv == null) - reply.status = Db.DB_NOSERVER_ID; - else - rdbenv.dbremove(this, args, reply); - return reply; - } - - public __env_dbrename_reply __DB_env_dbrename_4001(__env_dbrename_msg args) - { - __env_dbrename_reply reply = new __env_dbrename_reply(); - RpcDbEnv rdbenv = getEnv(args.dbenvcl_id); - if (rdbenv == null) - reply.status = Db.DB_NOSERVER_ID; - else - rdbenv.dbrename(this, args, reply); - return reply; - } - - public __env_encrypt_reply __DB_env_encrypt_4001(__env_encrypt_msg args) - { - __env_encrypt_reply reply = new __env_encrypt_reply(); - RpcDbEnv rdbenv = getEnv(args.dbenvcl_id); - if (rdbenv == null) - reply.status = Db.DB_NOSERVER_ID; - else - rdbenv.set_encrypt(this, args, reply); - return reply; - } - - public __env_flags_reply __DB_env_flags_4001(__env_flags_msg args) - { - __env_flags_reply reply = new __env_flags_reply(); - RpcDbEnv rdbenv = getEnv(args.dbenvcl_id); - if (rdbenv == null) - reply.status = Db.DB_NOSERVER_ID; - else - rdbenv.set_flags(this, args, reply); - return reply; - } - - public __env_open_reply __DB_env_open_4001(__env_open_msg args) - { - __env_open_reply reply = new __env_open_reply(); - RpcDbEnv rdbenv = getEnv(args.dbenvcl_id); - if (rdbenv == null) - reply.status = Db.DB_NOSERVER_ID; - else - rdbenv.open(this, args, reply); - return reply; - } - - public __env_remove_reply __DB_env_remove_4001(__env_remove_msg args) - { - __env_remove_reply reply = new __env_remove_reply(); - RpcDbEnv rdbenv = getEnv(args.dbenvcl_id); - if (rdbenv == null) - reply.status = Db.DB_NOSERVER_ID; - else - rdbenv.remove(this, args, reply); - return reply; - } - - //// Transaction methods - - public __txn_abort_reply __DB_txn_abort_4001(__txn_abort_msg args) - { - __txn_abort_reply reply = new __txn_abort_reply(); - RpcDbTxn rdbtxn = getTxn(args.txnpcl_id); - if (rdbtxn == null) - reply.status = Db.DB_NOSERVER_ID; - else - rdbtxn.abort(this, args, reply); - return reply; - } - - public __txn_begin_reply __DB_txn_begin_4001(__txn_begin_msg args) - { - __txn_begin_reply reply = new __txn_begin_reply(); - RpcDbTxn rdbtxn = new RpcDbTxn(getEnv(args.dbenvcl_id), null); - rdbtxn.begin(this, args, reply); - return reply; - } - - public __txn_commit_reply __DB_txn_commit_4001(__txn_commit_msg args) - { - __txn_commit_reply reply = new __txn_commit_reply(); - RpcDbTxn rdbtxn = getTxn(args.txnpcl_id); - if (rdbtxn == null) - reply.status = Db.DB_NOSERVER_ID; - else - rdbtxn.commit(this, args, reply); - return reply; - } - - public __txn_discard_reply __DB_txn_discard_4001(__txn_discard_msg args) - { - __txn_discard_reply reply = new __txn_discard_reply(); - RpcDbTxn rdbtxn = getTxn(args.txnpcl_id); - if (rdbtxn == null) - reply.status = Db.DB_NOSERVER_ID; - else - rdbtxn.discard(this, args, reply); - return reply; - } - - public __txn_prepare_reply __DB_txn_prepare_4001(__txn_prepare_msg args) - { - __txn_prepare_reply reply = new __txn_prepare_reply(); - RpcDbTxn rdbtxn = getTxn(args.txnpcl_id); - if (rdbtxn == null) - reply.status = Db.DB_NOSERVER_ID; - else - rdbtxn.prepare(this, args, reply); - return reply; - } - - public __txn_recover_reply __DB_txn_recover_4001(__txn_recover_msg args) - { - __txn_recover_reply reply = new __txn_recover_reply(); - RpcDbEnv rdbenv = getEnv(args.dbenvcl_id); - if (rdbenv == null) - reply.status = Db.DB_NOSERVER_ID; - else - rdbenv.txn_recover(this, args, reply); - return reply; - } -} diff --git a/storage/bdb/rpc_server/java/DbServer.java b/storage/bdb/rpc_server/java/DbServer.java deleted file mode 100644 index 9b20becbcdc..00000000000 --- a/storage/bdb/rpc_server/java/DbServer.java +++ /dev/null @@ -1,301 +0,0 @@ -/*- - * See the file LICENSE for redistribution information. - * - * Copyright (c) 2001-2002 - * Sleepycat Software. All rights reserved. - * - * $Id: DbServer.java,v 1.5 2002/08/09 01:56:09 bostic Exp $ - */ - -package com.sleepycat.db.rpcserver; - -import com.sleepycat.db.*; -import java.io.*; -import java.util.*; -import org.acplt.oncrpc.OncRpcException; -import org.acplt.oncrpc.server.OncRpcCallInformation; - -/** - * Main entry point for the Java version of the Berkeley DB RPC server - */ -public class DbServer extends DbDispatcher -{ - public static long idleto = 10 * 60 * 1000; // 5 minutes - public static long defto = 5 * 60 * 1000; // 5 minutes - public static long maxto = 60 * 60 * 1000; // 1 hour - public static String passwd = null; - public static PrintWriter err; - - long now, hint; // updated each operation - FreeList env_list = new FreeList(); - FreeList db_list = new FreeList(); - FreeList txn_list = new FreeList(); - FreeList cursor_list = new FreeList(); - - public DbServer() throws IOException, OncRpcException - { - super(); - init_lists(); - } - - public void dispatchOncRpcCall(OncRpcCallInformation call, int program, - int version, int procedure) throws OncRpcException, IOException - { - long newnow = System.currentTimeMillis(); - // DbServer.err.println("Dispatching RPC call " + procedure + " after delay of " + (newnow - now)); - now = newnow; - // DbServer.err.flush(); - super.dispatchOncRpcCall(call, program, version, procedure); - - try { - doTimeouts(); - } catch(Throwable t) { - System.err.println("Caught " + t + " during doTimeouts()"); - t.printStackTrace(System.err); - } - } - - // Internal methods to track context - private void init_lists() - { - // We do this so that getEnv/Db/etc(0) == null - env_list.add(null); - db_list.add(null); - txn_list.add(null); - cursor_list.add(null); - } - - int addEnv(RpcDbEnv rdbenv) - { - rdbenv.timer.last_access = now; - int id = env_list.add(rdbenv); - return id; - } - - int addDb(RpcDb rdb) - { - int id = db_list.add(rdb); - return id; - } - - int addTxn(RpcDbTxn rtxn) - { - rtxn.timer.last_access = now; - int id = txn_list.add(rtxn); - return id; - } - - int addCursor(RpcDbc rdbc) - { - rdbc.timer.last_access = now; - int id = cursor_list.add(rdbc); - return id; - } - - void delEnv(RpcDbEnv rdbenv) - { - // cursors and transactions will already have been cleaned up - for(LocalIterator i = db_list.iterator(); i.hasNext(); ) { - RpcDb rdb = (RpcDb)i.next(); - if (rdb != null && rdb.rdbenv == rdbenv) - delDb(rdb); - } - - env_list.del(rdbenv); - rdbenv.dispose(); - } - - void delDb(RpcDb rdb) - { - db_list.del(rdb); - rdb.dispose(); - - for(LocalIterator i = cursor_list.iterator(); i.hasNext(); ) { - RpcDbc rdbc = (RpcDbc)i.next(); - if (rdbc != null && rdbc.timer == rdb) - i.remove(); - } - } - - void delTxn(RpcDbTxn rtxn) - { - txn_list.del(rtxn); - rtxn.dispose(); - - for(LocalIterator i = cursor_list.iterator(); i.hasNext(); ) { - RpcDbc rdbc = (RpcDbc)i.next(); - if (rdbc != null && rdbc.timer == rtxn) - i.remove(); - } - - for(LocalIterator i = txn_list.iterator(); i.hasNext(); ) { - RpcDbTxn rtxn_child = (RpcDbTxn)i.next(); - if (rtxn_child != null && rtxn_child.timer == rtxn) - i.remove(); - } - } - - void delCursor(RpcDbc rdbc) - { - cursor_list.del(rdbc); - rdbc.dispose(); - } - - RpcDbEnv getEnv(int envid) - { - RpcDbEnv rdbenv = (RpcDbEnv)env_list.get(envid); - if (rdbenv != null) - rdbenv.timer.last_access = now; - return rdbenv; - } - - RpcDb getDb(int dbid) - { - RpcDb rdb = (RpcDb)db_list.get(dbid); - if (rdb != null) - rdb.rdbenv.timer.last_access = now; - return rdb; - } - - RpcDbTxn getTxn(int txnid) - { - RpcDbTxn rtxn = (RpcDbTxn)txn_list.get(txnid); - if (rtxn != null) - rtxn.timer.last_access = rtxn.rdbenv.timer.last_access = now; - return rtxn; - } - - RpcDbc getCursor(int dbcid) - { - RpcDbc rdbc = (RpcDbc)cursor_list.get(dbcid); - if (rdbc != null) - rdbc.last_access = rdbc.timer.last_access = rdbc.rdbenv.timer.last_access = now; - return rdbc; - } - - void doTimeouts() - { - if (now < hint) { - // DbServer.err.println("Skipping cleaner sweep - now = " + now + ", hint = " + hint); - return; - } - - // DbServer.err.println("Starting a cleaner sweep"); - hint = now + DbServer.maxto; - - for(LocalIterator i = cursor_list.iterator(); i.hasNext(); ) { - RpcDbc rdbc = (RpcDbc)i.next(); - if (rdbc == null) - continue; - - long end_time = rdbc.timer.last_access + rdbc.rdbenv.timeout; - // DbServer.err.println("Examining " + rdbc + ", time left = " + (end_time - now)); - if (end_time < now) { - DbServer.err.println("Cleaning up " + rdbc); - delCursor(rdbc); - } else if (end_time < hint) - hint = end_time; - } - - for(LocalIterator i = txn_list.iterator(); i.hasNext(); ) { - RpcDbTxn rtxn = (RpcDbTxn)i.next(); - if (rtxn == null) - continue; - - long end_time = rtxn.timer.last_access + rtxn.rdbenv.timeout; - // DbServer.err.println("Examining " + rtxn + ", time left = " + (end_time - now)); - if (end_time < now) { - DbServer.err.println("Cleaning up " + rtxn); - delTxn(rtxn); - } else if (end_time < hint) - hint = end_time; - } - - for(LocalIterator i = env_list.iterator(); i.hasNext(); ) { - RpcDbEnv rdbenv = (RpcDbEnv)i.next(); - if (rdbenv == null) - continue; - - long end_time = rdbenv.timer.last_access + rdbenv.idletime; - // DbServer.err.println("Examining " + rdbenv + ", time left = " + (end_time - now)); - if (end_time < now) { - DbServer.err.println("Cleaning up " + rdbenv); - delEnv(rdbenv); - } - } - - // if we didn't find anything, reset the hint - if (hint == now + DbServer.maxto) - hint = 0; - - // DbServer.err.println("Finishing a cleaner sweep"); - } - - // Some constants that aren't available elsewhere - static final int DB_SERVER_FLAGMASK = Db.DB_LOCKDOWN | - Db.DB_PRIVATE | Db.DB_RECOVER | Db.DB_RECOVER_FATAL | - Db.DB_SYSTEM_MEM | Db.DB_USE_ENVIRON | - Db.DB_USE_ENVIRON_ROOT; - static final int DB_SERVER_ENVFLAGS = Db.DB_INIT_CDB | - Db.DB_INIT_LOCK | Db.DB_INIT_LOG | Db.DB_INIT_MPOOL | - Db.DB_INIT_TXN | Db.DB_JOINENV; - static final int DB_SERVER_DBFLAGS = Db.DB_DIRTY_READ | - Db.DB_NOMMAP | Db.DB_RDONLY; - static final int DB_SERVER_DBNOSHARE = Db.DB_EXCL | Db.DB_TRUNCATE; - - public static void main(String[] args) - { - System.out.println("Starting DbServer..."); - for (int i = 0; i < args.length; i++) { - if (args[i].charAt(0) != '-') - usage(); - - switch (args[i].charAt(1)) { - case 'h': - ++i; // add_home(args[++i]); - break; - case 'I': - idleto = Long.parseLong(args[++i]) * 1000L; - break; - case 'P': - passwd = args[++i]; - break; - case 't': - defto = Long.parseLong(args[++i]) * 1000L; - break; - case 'T': - maxto = Long.parseLong(args[++i]) * 1000L; - break; - case 'V': - // version; - break; - case 'v': - // verbose - break; - default: - usage(); - } - } - - try { - DbServer.err = new PrintWriter(new FileOutputStream("JavaRPCServer.trace", true)); - DbServer server = new DbServer(); - server.run(); - } catch (Throwable e) { - System.out.println("DbServer exception:"); - e.printStackTrace(DbServer.err); - } finally { - if (DbServer.err != null) - DbServer.err.close(); - } - - System.out.println("DbServer stopped."); - } - - static void usage() - { - System.err.println("usage: java com.sleepycat.db.rpcserver.DbServer \\"); - System.err.println("[-Vv] [-h home] [-P passwd] [-I idletimeout] [-L logfile] [-t def_timeout] [-T maxtimeout]"); - System.exit(1); - } -} diff --git a/storage/bdb/rpc_server/java/FreeList.java b/storage/bdb/rpc_server/java/FreeList.java deleted file mode 100644 index bec2b877276..00000000000 --- a/storage/bdb/rpc_server/java/FreeList.java +++ /dev/null @@ -1,101 +0,0 @@ -/*- - * See the file LICENSE for redistribution information. - * - * Copyright (c) 2001-2004 - * Sleepycat Software. All rights reserved. - * - * $Id: FreeList.java,v 1.7 2004/04/06 20:43:41 mjc Exp $ - */ - -package com.sleepycat.db.rpcserver; - -import java.util.*; - -/** - * Keep track of a list of objects by id with a free list. - * Intentionally package-protected exposure. - */ -class FreeList { - class FreeIndex { - int index; - FreeIndex(int index) { this.index = index; } - int getIndex() { return index; } - } - - Vector items = new Vector(); - FreeIndex free_head = null; - - public synchronized int add(Object obj) { - int pos; - if (free_head == null) { - pos = items.size(); - items.addElement(obj); - if (pos + 1 % 1000 == 0) - Server.err.println(this + " grew to size " + (pos + 1)); - } else { - pos = free_head.getIndex(); - free_head = (FreeIndex)items.elementAt(pos); - items.setElementAt(obj, pos); - } - return pos; - } - - public synchronized void del(int pos) { - Object obj = items.elementAt(pos); - if (obj != null && obj instanceof FreeIndex) - throw new NoSuchElementException("index " + pos + " has already been freed"); - items.setElementAt(free_head, pos); - free_head = new FreeIndex(pos); - } - - public void del(Object obj) { - del(items.indexOf(obj)); - } - - public Object get(int pos) { - Object obj = items.elementAt(pos); - if (obj instanceof FreeIndex) - obj = null; - return obj; - } - - public LocalIterator iterator() { - return new FreeListIterator(); - } - - /** - * Iterator for a FreeList. Note that this class doesn't implement - * java.util.Iterator to maintain compatibility with Java 1.1 - * Intentionally package-protected exposure. - */ - class FreeListIterator implements LocalIterator { - int current; - - FreeListIterator() { current = findNext(-1); } - - private int findNext(int start) { - int next = start; - while (++next < items.size()) { - Object obj = items.elementAt(next); - if (obj == null || !(obj instanceof FreeIndex)) - break; - } - return next; - } - - public boolean hasNext() { - return (findNext(current) < items.size()); - } - - public Object next() { - current = findNext(current); - if (current == items.size()) - throw new NoSuchElementException("enumerated past end of FreeList"); - return items.elementAt(current); - } - - public void remove() { - del(current); - } - } -} diff --git a/storage/bdb/rpc_server/java/LocalIterator.java b/storage/bdb/rpc_server/java/LocalIterator.java deleted file mode 100644 index f142eb31832..00000000000 --- a/storage/bdb/rpc_server/java/LocalIterator.java +++ /dev/null @@ -1,23 +0,0 @@ -/*- - * See the file LICENSE for redistribution information. - * - * Copyright (c) 2001-2004 - * Sleepycat Software. All rights reserved. - * - * $Id: LocalIterator.java,v 1.5 2004/04/06 20:43:41 mjc Exp $ - */ - -package com.sleepycat.db.rpcserver; - -import java.util.*; - -/** - * Iterator interface. Note that this matches java.util.Iterator - * but maintains compatibility with Java 1.1 - * Intentionally package-protected exposure. - */ -interface LocalIterator { - boolean hasNext(); - Object next(); - void remove(); -} diff --git a/storage/bdb/rpc_server/java/README b/storage/bdb/rpc_server/java/README deleted file mode 100644 index f29c87805cc..00000000000 --- a/storage/bdb/rpc_server/java/README +++ /dev/null @@ -1,27 +0,0 @@ -Berkeley DB Java RPC server. - -Copyright (c) 2002-2004 - Sleepycat Software. All rights reserved. - -The Java implementation of the Berkeley DB RPC server is intended -primarily for testing purposes. It provides the same interface -as the C and C++ RPC servers, but is implemented via the Java API -rather than the C or C++ APIs. This allows the existing Tcl test -suite to exercise the Java API without modification. - -The Java RPC server relies on a Java version of rpcgen to -automatically generate appropriate Java classes from the RPC -interface specification (../db_server.x). We use jrpcgen, which -is part of the Remote Tea for Java project: - acplt.plt.rwth-aachen.de/ks/english/remotetea.html - -To rebuild the Java stubs from db_server.x, you will need to -download the full Remote Tea package, but if you just want to -compile the Java sources and run the Java RPC server, the runtime -component of Remote Tea is included in oncrpc.jar. Building -the Java RPC server is automatic when Berkeley DB is configured -with the both --enable-rpc and --enable-java. - -All of the Remote Tea project is licensed under the Library GNU -Public License, and we have made no modifications to their -released code. diff --git a/storage/bdb/rpc_server/java/RpcDb.java b/storage/bdb/rpc_server/java/RpcDb.java deleted file mode 100644 index bcdb861e3d4..00000000000 --- a/storage/bdb/rpc_server/java/RpcDb.java +++ /dev/null @@ -1,780 +0,0 @@ -/*- - * See the file LICENSE for redistribution information. - * - * Copyright (c) 2001-2004 - * Sleepycat Software. All rights reserved. - * - * $Id: RpcDb.java,v 1.24 2004/11/05 00:42:40 mjc Exp $ - */ - -package com.sleepycat.db.rpcserver; - -import com.sleepycat.db.*; -import com.sleepycat.db.internal.DbConstants; -import java.io.*; -import java.util.*; - -/** - * RPC wrapper around a db object for the Java RPC server. - */ -public class RpcDb extends Timer { - static final byte[] empty = new byte[0]; - DatabaseConfig config; - Database db; - RpcDbEnv rdbenv; - int refcount = 0; - String dbname, subdbname; - int type, setflags, openflags; - - public RpcDb(RpcDbEnv rdbenv) { - this.rdbenv = rdbenv; - } - - void dispose() { - if (db != null) { - try { - db.close(); - } catch (Throwable t) { - Util.handleException(t); - } - db = null; - } - } - - public void associate(Dispatcher server, - __db_associate_msg args, __db_associate_reply reply) { - try { - // The semantics of the new API are a little different. - // The secondary database will already be open, here, so we first - // have to close it and then call openSecondaryDatabase. - RpcDb secondary = server.getDatabase(args.sdbpcl_id); - try { - secondary.db.close(); - } finally { - secondary.db = null; - } - - RpcDbTxn rtxn = server.getTxn(args.txnpcl_id); - Transaction txn = (rtxn != null) ? rtxn.txn : null; - - args.flags &= ~AssociateCallbacks.DB_RPC2ND_MASK; - SecondaryConfig secondaryConfig = new SecondaryConfig(); - // The secondary has already been opened once, so we don't - // need all of the settings here, only a few: - secondaryConfig.setReadOnly(secondary.config.getReadOnly()); - secondaryConfig.setTransactional(secondary.config.getTransactional()); - secondaryConfig.setKeyCreator(AssociateCallbacks.getCallback(args.flags)); - secondaryConfig.setAllowPopulate((args.flags & DbConstants.DB_CREATE) != 0); - secondary.db = rdbenv.dbenv.openSecondaryDatabase(txn, secondary.dbname, secondary.subdbname, db, secondaryConfig); - secondary.config = secondary.db.getConfig(); - reply.status = 0; - } catch (Throwable t) { - reply.status = Util.handleException(t); - } - } - - public void close(Dispatcher server, - __db_close_msg args, __db_close_reply reply) { - if (refcount == 0 || --refcount > 0) { - reply.status = 0; - return; - } - - try { - server.delDatabase(this, false); - if (db != null) - db.close((args.flags & DbConstants.DB_NOSYNC) != 0); - reply.status = 0; - } catch (Throwable t) { - reply.status = Util.handleException(t); - } finally { - db = null; - } - } - - public void create(Dispatcher server, - __db_create_msg args, __db_create_reply reply) { - try { - config = new DatabaseConfig(); - config.setXACreate((args.flags & DbConstants.DB_XA_CREATE) != 0); - reply.dbcl_id = server.addDatabase(this); - reply.status = 0; - } catch (Throwable t) { - reply.status = Util.handleException(t); - } - } - - public void cursor(Dispatcher server, - __db_cursor_msg args, __db_cursor_reply reply) { - try { - RpcDbTxn rtxn = server.getTxn(args.txnpcl_id); - Transaction txn = (rtxn != null) ? rtxn.txn : null; - - CursorConfig config = new CursorConfig(); - config.setDirtyRead((args.flags & DbConstants.DB_DIRTY_READ) != 0); - config.setDegree2((args.flags & DbConstants.DB_DEGREE_2) != 0); - config.setWriteCursor((args.flags & DbConstants.DB_WRITECURSOR) != 0); - - Cursor dbc = db.openCursor(txn, config); - RpcDbc rdbc = new RpcDbc(this, dbc, false); - rdbc.timer = (rtxn != null) ? rtxn.timer : this; - reply.dbcidcl_id = server.addCursor(rdbc); - reply.status = 0; - } catch (Throwable t) { - reply.status = Util.handleException(t); - } - } - - public void del(Dispatcher server, - __db_del_msg args, __db_del_reply reply) { - try { - RpcDbTxn rtxn = server.getTxn(args.txnpcl_id); - Transaction txn = (rtxn != null) ? rtxn.txn : null; - DatabaseEntry key = Util.makeDatabaseEntry(args.keydata, args.keydlen, args.keydoff, args.keyulen, args.keyflags); - - db.delete(txn, key /* args.flags == 0 */); - reply.status = 0; - } catch (Throwable t) { - reply.status = Util.handleException(t); - } - } - - public void get(Dispatcher server, - __db_get_msg args, __db_get_reply reply) { - try { - RpcDbTxn rtxn = server.getTxn(args.txnpcl_id); - Transaction txn = (rtxn != null) ? rtxn.txn : null; - DatabaseEntry key = Util.makeDatabaseEntry(args.keydata, args.keydlen, args.keydoff, args.keyulen, args.keyflags); - DatabaseEntry data = Util.makeDatabaseEntry(args.datadata, - args.datadlen, args.datadoff, args.dataulen, args.dataflags, - args.flags & DbConstants.DB_MULTIPLE); - - OperationStatus status; - switch(args.flags & ~Server.DB_MODIFIER_MASK) { - case 0: - status = db.get(txn, key, data, Util.getLockMode(args.flags)); - break; - - case DbConstants.DB_CONSUME: - status = db.consume(txn, key, data, false); - break; - - case DbConstants.DB_CONSUME_WAIT: - status = db.consume(txn, key, data, true); - break; - - case DbConstants.DB_GET_BOTH: - status = db.getSearchBoth(txn, key, data, Util.getLockMode(args.flags)); - break; - - case DbConstants.DB_SET_RECNO: - status = db.getSearchRecordNumber(txn, key, data, Util.getLockMode(args.flags)); - break; - - default: - throw new UnsupportedOperationException("Unknown flag: " + (args.flags & ~Server.DB_MODIFIER_MASK)); - } - reply.status = Util.getStatus(status); - - reply.keydata = Util.returnDatabaseEntry(key); - reply.datadata = Util.returnDatabaseEntry(data); - } catch (Throwable t) { - reply.status = Util.handleException(t); - reply.keydata = reply.datadata = empty; - } - } - - public void join(Dispatcher server, - __db_join_msg args, __db_join_reply reply) { - try { - Cursor[] cursors = new Cursor[args.curs.length + 1]; - for (int i = 0; i < args.curs.length; i++) { - RpcDbc rdbc = server.getCursor(args.curs[i]); - if (rdbc == null) { - reply.status = DbConstants.DB_NOSERVER_ID; - return; - } - cursors[i] = rdbc.dbc; - } - cursors[args.curs.length] = null; - - JoinConfig config = new JoinConfig(); - config.setNoSort(args.flags == DbConstants.DB_JOIN_NOSORT); - JoinCursor jdbc = db.join(cursors, config); - - RpcDbc rjdbc = new RpcDbc(this, new JoinCursorAdapter(db, jdbc), true); - /* - * If our curslist has a parent txn, we need to use it too - * for the activity timeout. All cursors must be part of - * the same transaction, so just check the first. - */ - RpcDbc rdbc0 = server.getCursor(args.curs[0]); - if (rdbc0.timer != rdbc0) - rjdbc.timer = rdbc0.timer; - - /* - * All of the curslist cursors must point to the join - * cursor's timeout so that we do not timeout any of the - * curlist cursors while the join cursor is active. - */ - for (int i = 0; i < args.curs.length; i++) { - RpcDbc rdbc = server.getCursor(args.curs[i]); - rdbc.orig_timer = rdbc.timer; - rdbc.timer = rjdbc; - } - reply.dbcidcl_id = server.addCursor(rjdbc); - reply.status = 0; - } catch (Throwable t) { - reply.status = Util.handleException(t); - } - } - - public void key_range(Dispatcher server, - __db_key_range_msg args, __db_key_range_reply reply) { - try { - RpcDbTxn rtxn = server.getTxn(args.txnpcl_id); - Transaction txn = (rtxn != null) ? rtxn.txn : null; - DatabaseEntry key = Util.makeDatabaseEntry(args.keydata, args.keydlen, args.keydoff, args.keyulen, args.keyflags); - - KeyRange range = db.getKeyRange(txn, key /*, args.flags == 0 */); - reply.status = 0; - reply.less = range.less; - reply.equal = range.equal; - reply.greater = range.greater; - } catch (Throwable t) { - reply.status = Util.handleException(t); - } - } - - private boolean findSharedDatabase(Dispatcher server, __db_open_reply reply) - throws DatabaseException { - RpcDb rdb = null; - boolean matchFound = false; - LocalIterator i = ((Server)server).db_list.iterator(); - - while (!matchFound && i.hasNext()) { - rdb = (RpcDb)i.next(); - if (rdb != null && rdb != this && rdb.rdbenv == rdbenv && - (type == DbConstants.DB_UNKNOWN || rdb.type == type) && - openflags == rdb.openflags && - setflags == rdb.setflags && - dbname != null && rdb.dbname != null && - dbname.equals(rdb.dbname) && - (subdbname == rdb.subdbname || - (subdbname != null && rdb.subdbname != null && - subdbname.equals(rdb.subdbname)))) - matchFound = true; - } - - if (matchFound) { - ++rdb.refcount; - reply.dbcl_id = ((FreeList.FreeListIterator)i).current; - reply.type = Util.fromDatabaseType(rdb.config.getType()); - reply.lorder = rdb.config.getByteOrder(); - reply.status = 0; - - // Server.err.println("Sharing Database: " + reply.dbcl_id); - } - - return matchFound; - } - - public void get_name(Dispatcher server, - __db_get_name_msg args, __db_get_name_reply reply) { - reply.filename = dbname; - reply.dbname = subdbname; - reply.status = 0; - } - - public void get_open_flags(Dispatcher server, - __db_get_open_flags_msg args, __db_get_open_flags_reply reply) { - try { - reply.flags = 0; - if (config.getAllowCreate()) reply.flags |= DbConstants.DB_CREATE; - if (config.getExclusiveCreate()) reply.flags |= DbConstants.DB_EXCL; - if (config.getReadOnly()) reply.flags |= DbConstants.DB_RDONLY; - reply.status = 0; - } catch (Throwable t) { - reply.status = Util.handleException(t); - } - } - - public void open(Dispatcher server, - __db_open_msg args, __db_open_reply reply) { - try { - dbname = (args.name.length() > 0) ? args.name : null; - subdbname = (args.subdb.length() > 0) ? args.subdb : null; - type = args.type; - openflags = args.flags & Server.DB_SERVER_DBFLAGS; - - if (findSharedDatabase(server, reply)) { - server.delDatabase(this, true); - } else { - RpcDbTxn rtxn = server.getTxn(args.txnpcl_id); - Transaction txn = (rtxn != null) ? rtxn.txn : null; - - // Server.err.println("Calling db.open(" + null + ", " + dbname + ", " + subdbname + ", " + args.type + ", " + Integer.toHexString(args.flags) + ", " + args.mode + ")"); - - config.setAllowCreate((args.flags & DbConstants.DB_CREATE) != 0); - config.setExclusiveCreate((args.flags & DbConstants.DB_EXCL) != 0); - config.setReadOnly((args.flags & DbConstants.DB_RDONLY) != 0); - config.setTransactional(txn != null || (args.flags & DbConstants.DB_AUTO_COMMIT) != 0); - config.setTruncate((args.flags & DbConstants.DB_TRUNCATE) != 0); - config.setType(Util.toDatabaseType(args.type)); - config.setMode(args.mode); - - db = rdbenv.dbenv.openDatabase(txn, dbname, subdbname, config); - ++refcount; - - // Refresh config in case we didn't know the full story before opening - config = db.getConfig(); - - reply.dbcl_id = args.dbpcl_id; - type = reply.type = Util.fromDatabaseType(config.getType()); - reply.lorder = config.getByteOrder(); - reply.status = 0; - } - } catch (Throwable t) { - reply.status = Util.handleException(t); - } - - // System.err.println("Database.open: reply.status = " + reply.status + ", reply.dbcl_id = " + reply.dbcl_id); - } - - public void pget(Dispatcher server, - __db_pget_msg args, __db_pget_reply reply) { - try { - RpcDbTxn rtxn = server.getTxn(args.txnpcl_id); - Transaction txn = (rtxn != null) ? rtxn.txn : null; - DatabaseEntry skey = Util.makeDatabaseEntry(args.skeydata, args.skeydlen, args.skeydoff, args.skeyulen, args.skeyflags); - DatabaseEntry pkey = Util.makeDatabaseEntry(args.pkeydata, args.pkeydlen, args.pkeydoff, args.pkeyulen, args.pkeyflags); - DatabaseEntry data = Util.makeDatabaseEntry(args.datadata, args.datadlen, args.datadoff, args.dataulen, args.dataflags); - - OperationStatus status; - switch(args.flags & ~Server.DB_MODIFIER_MASK) { - case 0: - status = ((SecondaryDatabase)db).get(txn, skey, pkey, data, Util.getLockMode(args.flags)); - break; - - case DbConstants.DB_GET_BOTH: - status = ((SecondaryDatabase)db).getSearchBoth(txn, skey, pkey, data, Util.getLockMode(args.flags)); - break; - - case DbConstants.DB_SET_RECNO: - status = ((SecondaryDatabase)db).getSearchRecordNumber(txn, skey, pkey, data, Util.getLockMode(args.flags)); - break; - - default: - throw new UnsupportedOperationException("Unknown flag: " + (args.flags & ~Server.DB_MODIFIER_MASK)); - } - reply.status = Util.getStatus(status); - - reply.skeydata = Util.returnDatabaseEntry(skey); - reply.pkeydata = Util.returnDatabaseEntry(pkey); - reply.datadata = Util.returnDatabaseEntry(data); - } catch (Throwable t) { - reply.status = Util.handleException(t); - reply.skeydata = reply.pkeydata = reply.datadata = empty; - } - } - - public void put(Dispatcher server, - __db_put_msg args, __db_put_reply reply) { - try { - RpcDbTxn rtxn = server.getTxn(args.txnpcl_id); - Transaction txn = (rtxn != null) ? rtxn.txn : null; - - DatabaseEntry key = Util.makeDatabaseEntry(args.keydata, args.keydlen, args.keydoff, args.keyulen, args.keyflags); - DatabaseEntry data = Util.makeDatabaseEntry(args.datadata, args.datadlen, args.datadoff, args.dataulen, args.dataflags); - - reply.keydata = empty; - OperationStatus status; - switch(args.flags & ~Server.DB_MODIFIER_MASK) { - case 0: - status = db.put(txn, key, data); - break; - - case DbConstants.DB_APPEND: - status = db.append(txn, key, data); - reply.keydata = Util.returnDatabaseEntry(key); - break; - - case DbConstants.DB_NODUPDATA: - status = db.putNoDupData(txn, key, data); - break; - - case DbConstants.DB_NOOVERWRITE: - status = db.putNoOverwrite(txn, key, data); - break; - - default: - throw new UnsupportedOperationException("Unknown flag: " + (args.flags & ~Server.DB_MODIFIER_MASK)); - } - reply.status = Util.getStatus(status); - } catch (Throwable t) { - reply.status = Util.handleException(t); - reply.keydata = empty; - } - } - - public void remove(Dispatcher server, - __db_remove_msg args, __db_remove_reply reply) { - try { - args.name = (args.name.length() > 0) ? args.name : null; - args.subdb = (args.subdb.length() > 0) ? args.subdb : null; - Database.remove(args.name, args.subdb, config); - reply.status = 0; - } catch (Throwable t) { - reply.status = Util.handleException(t); - } finally { - server.delDatabase(this, false); - } - } - - public void rename(Dispatcher server, - __db_rename_msg args, __db_rename_reply reply) { - try { - args.name = (args.name.length() > 0) ? args.name : null; - args.subdb = (args.subdb.length() > 0) ? args.subdb : null; - args.newname = (args.newname.length() > 0) ? args.newname : null; - Database.rename(args.name, args.subdb, args.newname, config); - reply.status = 0; - } catch (Throwable t) { - reply.status = Util.handleException(t); - } finally { - server.delDatabase(this, false); - } - } - - public void set_bt_maxkey(Dispatcher server, - __db_bt_maxkey_msg args, __db_bt_maxkey_reply reply) { - try { - // XXX: check what to do about: config.setBtreeMaxKey(args.maxkey); - reply.status = 0; - } catch (Throwable t) { - reply.status = Util.handleException(t); - } - } - - public void get_bt_minkey(Dispatcher server, - __db_get_bt_minkey_msg args, __db_get_bt_minkey_reply reply) { - try { - reply.minkey = config.getBtreeMinKey(); - reply.status = 0; - } catch (Throwable t) { - reply.status = Util.handleException(t); - } - } - - public void set_bt_minkey(Dispatcher server, - __db_bt_minkey_msg args, __db_bt_minkey_reply reply) { - try { - config.setBtreeMinKey(args.minkey); - reply.status = 0; - } catch (Throwable t) { - reply.status = Util.handleException(t); - } - } - - public void get_encrypt_flags(Dispatcher server, - __db_get_encrypt_flags_msg args, __db_get_encrypt_flags_reply reply) { - try { - reply.flags = config.getEncrypted() ? DbConstants.DB_ENCRYPT_AES : 0; - reply.status = 0; - } catch (Throwable t) { - reply.status = Util.handleException(t); - } - } - - public void set_encrypt(Dispatcher server, - __db_encrypt_msg args, __db_encrypt_reply reply) { - try { - config.setEncrypted(args.passwd /*, args.flags == 0 */); - reply.status = 0; - } catch (Throwable t) { - reply.status = Util.handleException(t); - } - } - - public void get_flags(Dispatcher server, - __db_get_flags_msg args, __db_get_flags_reply reply) { - try { - reply.flags = 0; - if (config.getChecksum()) reply.flags |= DbConstants.DB_CHKSUM; - if (config.getEncrypted()) reply.flags |= DbConstants.DB_ENCRYPT; - if (config.getBtreeRecordNumbers()) reply.flags |= DbConstants.DB_RECNUM; - if (config.getRenumbering()) reply.flags |= DbConstants.DB_RENUMBER; - if (config.getReverseSplitOff()) reply.flags |= DbConstants.DB_REVSPLITOFF; - if (config.getSortedDuplicates()) reply.flags |= DbConstants.DB_DUPSORT; - if (config.getSnapshot()) reply.flags |= DbConstants.DB_SNAPSHOT; - if (config.getUnsortedDuplicates()) reply.flags |= DbConstants.DB_DUP; - if (config.getTransactionNotDurable()) reply.flags |= DbConstants.DB_TXN_NOT_DURABLE; - reply.status = 0; - } catch (Throwable t) { - reply.status = Util.handleException(t); - } - } - - public void set_flags(Dispatcher server, - __db_flags_msg args, __db_flags_reply reply) { - try { - // Server.err.println("Calling db.setflags(" + Integer.toHexString(args.flags) + ")"); - config.setChecksum((args.flags & DbConstants.DB_CHKSUM) != 0); - config.setBtreeRecordNumbers((args.flags & DbConstants.DB_RECNUM) != 0); - config.setRenumbering((args.flags & DbConstants.DB_RENUMBER) != 0); - config.setReverseSplitOff((args.flags & DbConstants.DB_REVSPLITOFF) != 0); - config.setSortedDuplicates((args.flags & DbConstants.DB_DUPSORT) != 0); - config.setSnapshot((args.flags & DbConstants.DB_SNAPSHOT) != 0); - config.setUnsortedDuplicates((args.flags & DbConstants.DB_DUP) != 0); - config.setTransactionNotDurable((args.flags & DbConstants.DB_TXN_NOT_DURABLE) != 0); - - setflags |= args.flags; - reply.status = 0; - } catch (Throwable t) { - reply.status = Util.handleException(t); - } - } - - public void get_h_ffactor(Dispatcher server, - __db_get_h_ffactor_msg args, __db_get_h_ffactor_reply reply) { - try { - reply.ffactor = config.getHashFillFactor(); - reply.status = 0; - } catch (Throwable t) { - reply.status = Util.handleException(t); - } - } - - public void set_h_ffactor(Dispatcher server, - __db_h_ffactor_msg args, __db_h_ffactor_reply reply) { - try { - config.setHashFillFactor(args.ffactor); - reply.status = 0; - } catch (Throwable t) { - reply.status = Util.handleException(t); - } - } - - public void get_h_nelem(Dispatcher server, - __db_get_h_nelem_msg args, __db_get_h_nelem_reply reply) { - try { - reply.nelem = config.getHashNumElements(); - reply.status = 0; - } catch (Throwable t) { - reply.status = Util.handleException(t); - } - } - - public void set_h_nelem(Dispatcher server, - __db_h_nelem_msg args, __db_h_nelem_reply reply) { - try { - config.setHashNumElements(args.nelem); - reply.status = 0; - } catch (Throwable t) { - reply.status = Util.handleException(t); - } - } - - public void get_lorder(Dispatcher server, - __db_get_lorder_msg args, __db_get_lorder_reply reply) { - try { - reply.lorder = config.getByteOrder(); - reply.status = 0; - } catch (Throwable t) { - reply.status = Util.handleException(t); - } - } - - public void set_lorder(Dispatcher server, - __db_lorder_msg args, __db_lorder_reply reply) { - try { - config.setByteOrder(args.lorder); - reply.status = 0; - } catch (Throwable t) { - reply.status = Util.handleException(t); - } - } - - public void get_pagesize(Dispatcher server, - __db_get_pagesize_msg args, __db_get_pagesize_reply reply) { - try { - reply.pagesize = config.getPageSize(); - reply.status = 0; - } catch (Throwable t) { - reply.status = Util.handleException(t); - } - } - - public void set_pagesize(Dispatcher server, - __db_pagesize_msg args, __db_pagesize_reply reply) { - try { - config.setPageSize(args.pagesize); - reply.status = 0; - } catch (Throwable t) { - reply.status = Util.handleException(t); - } - } - - public void get_q_extentsize(Dispatcher server, - __db_get_extentsize_msg args, __db_get_extentsize_reply reply) { - try { - reply.extentsize = config.getQueueExtentSize(); - reply.status = 0; - } catch (Throwable t) { - reply.status = Util.handleException(t); - } - } - - public void set_q_extentsize(Dispatcher server, - __db_extentsize_msg args, __db_extentsize_reply reply) { - try { - config.setQueueExtentSize(args.extentsize); - reply.status = 0; - } catch (Throwable t) { - reply.status = Util.handleException(t); - } - } - - public void get_re_delim(Dispatcher server, - __db_get_re_delim_msg args, __db_get_re_delim_reply reply) { - try { - reply.delim = config.getRecordDelimiter(); - reply.status = 0; - } catch (Throwable t) { - reply.status = Util.handleException(t); - } - } - - public void set_re_delim(Dispatcher server, - __db_re_delim_msg args, __db_re_delim_reply reply) { - try { - config.setRecordDelimiter(args.delim); - reply.status = 0; - } catch (Throwable t) { - reply.status = Util.handleException(t); - } - } - - public void get_re_len(Dispatcher server, - __db_get_re_len_msg args, __db_get_re_len_reply reply) { - try { - reply.len = config.getRecordLength(); - reply.status = 0; - } catch (Throwable t) { - reply.status = Util.handleException(t); - } - } - - public void set_re_len(Dispatcher server, - __db_re_len_msg args, __db_re_len_reply reply) { - try { - config.setRecordLength(args.len); - reply.status = 0; - } catch (Throwable t) { - reply.status = Util.handleException(t); - } - } - - public void get_re_pad(Dispatcher server, - __db_get_re_pad_msg args, __db_get_re_pad_reply reply) { - try { - reply.pad = config.getRecordPad(); - reply.status = 0; - } catch (Throwable t) { - reply.status = Util.handleException(t); - } - } - - public void set_re_pad(Dispatcher server, - __db_re_pad_msg args, __db_re_pad_reply reply) { - try { - config.setRecordPad(args.pad); - reply.status = 0; - } catch (Throwable t) { - reply.status = Util.handleException(t); - } - } - - public void stat(Dispatcher server, - __db_stat_msg args, __db_stat_reply reply) { - try { - RpcDbTxn rtxn = server.getTxn(args.txnpcl_id); - Transaction txn = (rtxn != null) ? rtxn.txn : null; - StatsConfig config = new StatsConfig(); - config.setClear((args.flags & DbConstants.DB_STAT_CLEAR) != 0); - config.setFast((args.flags & DbConstants.DB_FAST_STAT) != 0); - DatabaseStats raw_stat = db.getStats(txn, config); - - if (raw_stat instanceof BtreeStats) { - BtreeStats bs = (BtreeStats)raw_stat; - int[] raw_stats = { - bs.getMagic(), bs.getVersion(), - bs.getMetaFlags(), bs.getNumKeys(), - bs.getNumData(), bs.getPageSize(), - bs.getMaxKey(), bs.getMinKey(), - bs.getReLen(), bs.getRePad(), - bs.getLevels(), bs.getIntPages(), - bs.getLeafPages(), bs.getDupPages(), - bs.getOverPages(), bs.getFree(), - bs.getIntPagesFree(), bs.getLeafPagesFree(), - bs.getDupPagesFree(), bs.getOverPagesFree() - }; - reply.stats = raw_stats; - } else if (raw_stat instanceof HashStats) { - HashStats hs = (HashStats)raw_stat; - int[] raw_stats = { - hs.getMagic(), hs.getVersion(), - hs.getMetaFlags(), hs.getNumKeys(), - hs.getNumData(), hs.getPageSize(), - hs.getFfactor(), hs.getBuckets(), - hs.getFree(), hs.getBFree(), - hs.getBigPages(), hs.getBigBFree(), - hs.getOverflows(), hs.getOvflFree(), - hs.getDup(), hs.getDupFree() - }; - reply.stats = raw_stats; - } else if (raw_stat instanceof QueueStats) { - QueueStats qs = (QueueStats)raw_stat; - int[] raw_stats = { - qs.getMagic(), qs.getVersion(), - qs.getMetaFlags(), qs.getNumKeys(), - qs.getNumData(), qs.getPageSize(), - qs.getExtentSize(), qs.getPages(), - qs.getReLen(), qs.getRePad(), - qs.getPagesFree(), qs.getFirstRecno(), - qs.getCurRecno() - }; - reply.stats = raw_stats; - } else - throw new DatabaseException("Invalid return type from db.stat()", DbConstants.DB_NOTFOUND); - - reply.status = 0; - } catch (Throwable t) { - reply.status = Util.handleException(t); - reply.stats = new int[0]; - } - } - - public void sync(Dispatcher server, - __db_sync_msg args, __db_sync_reply reply) { - try { - db.sync(/* args.flags == 0 */); - reply.status = 0; - } catch (Throwable t) { - reply.status = Util.handleException(t); - } - } - - public void truncate(Dispatcher server, - __db_truncate_msg args, __db_truncate_reply reply) { - try { - RpcDbTxn rtxn = server.getTxn(args.txnpcl_id); - Transaction txn = (rtxn != null) ? rtxn.txn : null; - reply.count = db.truncate(txn, true /*, args.flags == 0 */); - reply.status = 0; - } catch (Throwable t) { - reply.status = Util.handleException(t); - reply.count = 0; - } - } -} diff --git a/storage/bdb/rpc_server/java/RpcDbEnv.java b/storage/bdb/rpc_server/java/RpcDbEnv.java deleted file mode 100644 index 71e134f7f35..00000000000 --- a/storage/bdb/rpc_server/java/RpcDbEnv.java +++ /dev/null @@ -1,369 +0,0 @@ -/*- - * See the file LICENSE for redistribution information. - * - * Copyright (c) 2001-2004 - * Sleepycat Software. All rights reserved. - * - * $Id: RpcDbEnv.java,v 1.15 2004/04/21 01:09:11 mjc Exp $ - */ - -package com.sleepycat.db.rpcserver; - -import com.sleepycat.db.*; -import com.sleepycat.db.internal.DbConstants; -import java.io.*; -import java.util.*; - -/** - * RPC wrapper around a dbenv for the Java RPC server. - */ -public class RpcDbEnv extends Timer { - EnvironmentConfig config; - Environment dbenv; - String home; - long idletime, timeout; - int openflags, onflags, offflags; - int refcount = 1; - - void dispose() { - if (dbenv != null) { - try { - dbenv.close(); - } catch (Throwable t) { - Util.handleException(t); - } - dbenv = null; - } - } - - public void close(Dispatcher server, - __env_close_msg args, __env_close_reply reply) { - if (--refcount != 0) { - reply.status = 0; - return; - } - - try { - server.delEnv(this, false); - if (dbenv != null) - dbenv.close(/* args.flags == 0 */); - reply.status = 0; - } catch (Throwable t) { - reply.status = Util.handleException(t); - } finally { - dbenv = null; - } - } - - public void create(Dispatcher server, - __env_create_msg args, __env_create_reply reply) { - this.idletime = (args.timeout != 0) ? args.timeout : Server.idleto; - this.timeout = Server.defto; - try { - config = new EnvironmentConfig(); - config.setErrorStream(Server.errstream); - reply.envcl_id = server.addEnv(this); - reply.status = 0; - } catch (Throwable t) { - reply.status = Util.handleException(t); - } - } - - public void dbremove(Dispatcher server, - __env_dbremove_msg args, __env_dbremove_reply reply) { - try { - args.name = (args.name.length() > 0) ? args.name : null; - args.subdb = (args.subdb.length() > 0) ? args.subdb : null; - - RpcDbTxn rtxn = server.getTxn(args.txnpcl_id); - Transaction txn = (rtxn != null) ? rtxn.txn : null; - dbenv.removeDatabase(txn, args.name, args.subdb /*, args.flags == 0 */); - reply.status = 0; - } catch (Throwable t) { - reply.status = Util.handleException(t); - } - } - - public void dbrename(Dispatcher server, - __env_dbrename_msg args, __env_dbrename_reply reply) { - try { - args.name = (args.name.length() > 0) ? args.name : null; - args.subdb = (args.subdb.length() > 0) ? args.subdb : null; - args.newname = (args.newname.length() > 0) ? args.newname : null; - - RpcDbTxn rtxn = server.getTxn(args.txnpcl_id); - Transaction txn = (rtxn != null) ? rtxn.txn : null; - dbenv.renameDatabase(txn, args.name, args.subdb, args.newname /*, args.flags == 0 */); - reply.status = 0; - } catch (Throwable t) { - reply.status = Util.handleException(t); - } - } - - private boolean findSharedEnvironment(Dispatcher server, __env_open_reply reply) - throws DatabaseException { - RpcDbEnv rdbenv = null; - boolean matchFound = false; - LocalIterator i = ((Server)server).env_list.iterator(); - - while (!matchFound && i.hasNext()) { - rdbenv = (RpcDbEnv)i.next(); - if (rdbenv != null && rdbenv != this && - (home == rdbenv.home || - (home != null && home.equals(rdbenv.home))) && - openflags == rdbenv.openflags && - onflags == rdbenv.onflags && - offflags == rdbenv.offflags) - matchFound = true; - } - - if (matchFound) { - /* - * The only thing left to check is the timeout. - * Since the server timeout set by the client is a hint, for sharing - * we'll give them the benefit of the doubt and grant them the - * longer timeout. - */ - if (rdbenv.timeout < timeout) - rdbenv.timeout = timeout; - - ++rdbenv.refcount; - reply.envcl_id = ((FreeList.FreeListIterator)i).current; - reply.status = 0; - - Server.err.println("Sharing Environment: " + reply.envcl_id); - } - - return matchFound; - } - - public void get_home(Dispatcher server, - __env_get_home_msg args, __env_get_home_reply reply) { - try { - reply.home = dbenv.getHome().toString(); - reply.status = 0; - } catch (Throwable t) { - reply.status = Util.handleException(t); - } - } - - public void get_open_flags(Dispatcher server, - __env_get_open_flags_msg args, __env_get_open_flags_reply reply) { - try { - reply.flags = 0; - if (config.getAllowCreate()) reply.flags |= DbConstants.DB_CREATE; - if (config.getInitializeCache()) reply.flags |= DbConstants.DB_INIT_MPOOL; - if (config.getInitializeCDB()) reply.flags |= DbConstants.DB_INIT_CDB; - if (config.getInitializeLocking()) reply.flags |= DbConstants.DB_INIT_LOCK; - if (config.getInitializeLogging()) reply.flags |= DbConstants.DB_INIT_LOG; - if (config.getInitializeReplication()) reply.flags |= DbConstants.DB_INIT_REP; - if (config.getJoinEnvironment()) reply.flags |= DbConstants.DB_JOINENV; - if (config.getLockDown()) reply.flags |= DbConstants.DB_LOCKDOWN; - if (config.getPrivate()) reply.flags |= DbConstants.DB_PRIVATE; - if (config.getReadOnly()) reply.flags |= DbConstants.DB_RDONLY; - if (config.getRunRecovery()) reply.flags |= DbConstants.DB_RECOVER; - if (config.getRunFatalRecovery()) reply.flags |= DbConstants.DB_RECOVER_FATAL; - if (config.getSystemMemory()) reply.flags |= DbConstants.DB_SYSTEM_MEM; - if (config.getTransactional()) reply.flags |= DbConstants.DB_INIT_TXN; - if (config.getUseEnvironment()) reply.flags |= DbConstants.DB_USE_ENVIRON; - if (config.getUseEnvironmentRoot()) reply.flags |= DbConstants.DB_USE_ENVIRON_ROOT; - - reply.status = 0; - } catch (Throwable t) { - reply.status = Util.handleException(t); - } - } - - public void open(Dispatcher server, - __env_open_msg args, __env_open_reply reply) { - try { - home = (args.home.length() > 0) ? args.home : null; - - /* - * If they are using locking do deadlock detection for - * them, internally. - */ - if ((args.flags & DbConstants.DB_INIT_LOCK) != 0) - config.setLockDetectMode(LockDetectMode.DEFAULT); - - // adjust flags for RPC - int newflags = (args.flags & ~Server.DB_SERVER_FLAGMASK); - openflags = (newflags & Server.DB_SERVER_ENVFLAGS); - - config.setAllowCreate((args.flags & DbConstants.DB_CREATE) != 0); - config.setInitializeCache((args.flags & DbConstants.DB_INIT_MPOOL) != 0); - config.setInitializeCDB((args.flags & DbConstants.DB_INIT_CDB) != 0); - config.setInitializeLocking((args.flags & DbConstants.DB_INIT_LOCK) != 0); - config.setInitializeLogging((args.flags & DbConstants.DB_INIT_LOG) != 0); - config.setInitializeReplication((args.flags & DbConstants.DB_INIT_REP) != 0); - config.setJoinEnvironment((args.flags & DbConstants.DB_JOINENV) != 0); - config.setLockDown((args.flags & DbConstants.DB_LOCKDOWN) != 0); - config.setPrivate((args.flags & DbConstants.DB_PRIVATE) != 0); - config.setReadOnly((args.flags & DbConstants.DB_RDONLY) != 0); - config.setRunRecovery((args.flags & DbConstants.DB_RECOVER) != 0); - config.setRunFatalRecovery((args.flags & DbConstants.DB_RECOVER_FATAL) != 0); - config.setSystemMemory((args.flags & DbConstants.DB_SYSTEM_MEM) != 0); - config.setTransactional((args.flags & DbConstants.DB_INIT_TXN) != 0); - config.setUseEnvironment((args.flags & DbConstants.DB_USE_ENVIRON) != 0); - config.setUseEnvironmentRoot((args.flags & DbConstants.DB_USE_ENVIRON_ROOT) != 0); - - if (findSharedEnvironment(server, reply)) - dbenv = null; - else if (Server.check_home(home)) { - dbenv = new Environment(new File(home), config); - // Get the configuration after opening -- it may have changed if we're joining an environment - config = dbenv.getConfig(); - reply.status = 0; - reply.envcl_id = args.dbenvcl_id; - } else - reply.status = DbConstants.DB_NOSERVER_HOME; - } catch (Throwable t) { - reply.status = Util.handleException(t); - } - - // System.err.println("Environment.open: reply.status = " + reply.status + ", reply.envcl_id = " + reply.envcl_id); - } - - public void remove(Dispatcher server, - __env_remove_msg args, __env_remove_reply reply) { - Server.err.println("RpcDbEnv.remove(" + args.home + ")"); - try { - args.home = (args.home.length() > 0) ? args.home : null; - // TODO: check home? - - boolean force = (args.flags & DbConstants.DB_FORCE) != 0; - config.setUseEnvironment((args.flags & DbConstants.DB_USE_ENVIRON) != 0); - config.setUseEnvironmentRoot((args.flags & DbConstants.DB_USE_ENVIRON_ROOT) != 0); - - Environment.remove(new File(args.home), force, config); - reply.status = 0; - } catch (Throwable t) { - reply.status = Util.handleException(t); - } finally { - server.delEnv(this, false); - } - } - - public void get_cachesize(Dispatcher server, - __env_get_cachesize_msg args, __env_get_cachesize_reply reply) { - try { - long cachesize = config.getCacheSize(); - final long GIGABYTE = 1073741824; - reply.gbytes = (int)(cachesize / GIGABYTE); - reply.bytes = (int)(cachesize % GIGABYTE); - reply.ncache = config.getCacheCount(); - reply.status = 0; - } catch (Throwable t) { - reply.status = Util.handleException(t); - } - } - - public void set_cachesize(Dispatcher server, - __env_cachesize_msg args, __env_cachesize_reply reply) { - try { - long bytes = (long)args.gbytes * 1024 * 1024 * 1024; - bytes += args.bytes; - config.setCacheSize(bytes); - config.setCacheCount(args.ncache); - reply.status = 0; - } catch (Throwable t) { - reply.status = Util.handleException(t); - } - } - - public void get_encrypt_flags(Dispatcher server, - __env_get_encrypt_flags_msg args, __env_get_encrypt_flags_reply reply) { - try { - reply.flags = config.getEncrypted() ? DbConstants.DB_ENCRYPT_AES : 0; - reply.status = 0; - } catch (Throwable t) { - reply.status = Util.handleException(t); - } - } - - public void set_encrypt(Dispatcher server, - __env_encrypt_msg args, __env_encrypt_reply reply) { - try { - config.setEncrypted(args.passwd); - reply.status = 0; - } catch (Throwable t) { - reply.status = Util.handleException(t); - } - } - - public void get_flags(Dispatcher server, - __env_get_flags_msg args, __env_get_flags_reply reply) { - try { - reply.flags = 0; - if (config.getCDBLockAllDatabases()) reply.flags |= DbConstants.DB_CDB_ALLDB; - if (config.getDirectDatabaseIO()) reply.flags |= DbConstants.DB_DIRECT_DB; - if (config.getDirectLogIO()) reply.flags |= DbConstants.DB_DIRECT_LOG; - if (config.getInitializeRegions()) reply.flags |= DbConstants.DB_REGION_INIT; - if (config.getLogAutoRemove()) reply.flags |= DbConstants.DB_LOG_AUTOREMOVE; - if (config.getNoLocking()) reply.flags |= DbConstants.DB_NOLOCKING; - if (config.getNoMMap()) reply.flags |= DbConstants.DB_NOMMAP; - if (config.getNoPanic()) reply.flags |= DbConstants.DB_NOPANIC; - if (config.getOverwrite()) reply.flags |= DbConstants.DB_OVERWRITE; - if (config.getTxnNoSync()) reply.flags |= DbConstants.DB_TXN_NOSYNC; - if (config.getTxnNotDurable()) reply.flags |= DbConstants.DB_TXN_NOT_DURABLE; - if (config.getTxnWriteNoSync()) reply.flags |= DbConstants.DB_TXN_WRITE_NOSYNC; - if (config.getYieldCPU()) reply.flags |= DbConstants.DB_YIELDCPU; - - reply.status = 0; - } catch (Throwable t) { - reply.status = Util.handleException(t); - } - } - - public void set_flags(Dispatcher server, - __env_flags_msg args, __env_flags_reply reply) { - try { - boolean onoff = (args.onoff != 0); - if (onoff) - onflags |= args.flags; - else - offflags |= args.flags; - - if ((args.flags & DbConstants.DB_CDB_ALLDB) != 0) config.setCDBLockAllDatabases(onoff); - if ((args.flags & DbConstants.DB_DIRECT_DB) != 0) config.setDirectDatabaseIO(onoff); - if ((args.flags & DbConstants.DB_DIRECT_LOG) != 0) config.setDirectLogIO(onoff); - if ((args.flags & DbConstants.DB_REGION_INIT) != 0) config.setInitializeRegions(onoff); - if ((args.flags & DbConstants.DB_LOG_AUTOREMOVE) != 0) config.setLogAutoRemove(onoff); - if ((args.flags & DbConstants.DB_NOLOCKING) != 0) config.setNoLocking(onoff); - if ((args.flags & DbConstants.DB_NOMMAP) != 0) config.setNoMMap(onoff); - if ((args.flags & DbConstants.DB_NOPANIC) != 0) config.setNoPanic(onoff); - if ((args.flags & DbConstants.DB_OVERWRITE) != 0) config.setOverwrite(onoff); - if ((args.flags & DbConstants.DB_TXN_NOSYNC) != 0) config.setTxnNoSync(onoff); - if ((args.flags & DbConstants.DB_TXN_NOT_DURABLE) != 0) config.setTxnNotDurable(onoff); - if ((args.flags & DbConstants.DB_TXN_WRITE_NOSYNC) != 0) config.setTxnWriteNoSync(onoff); - if ((args.flags & DbConstants.DB_YIELDCPU) != 0) config.setYieldCPU(onoff); - - reply.status = 0; - } catch (Throwable t) { - reply.status = Util.handleException(t); - } - } - - // txn_recover implementation - public void txn_recover(Dispatcher server, - __txn_recover_msg args, __txn_recover_reply reply) { - try { - PreparedTransaction[] prep_list = dbenv.recover(args.count, args.flags == DbConstants.DB_NEXT); - if (prep_list != null && prep_list.length > 0) { - int count = prep_list.length; - reply.retcount = count; - reply.txn = new int[count]; - reply.gid = new byte[count * DbConstants.DB_XIDDATASIZE]; - - for (int i = 0; i < count; i++) { - reply.txn[i] = server.addTxn(new RpcDbTxn(this, prep_list[i].getTransaction())); - System.arraycopy(prep_list[i].getGID(), 0, reply.gid, i * DbConstants.DB_XIDDATASIZE, DbConstants.DB_XIDDATASIZE); - } - } - - reply.status = 0; - } catch (Throwable t) { - reply.status = Util.handleException(t); - } - } -} diff --git a/storage/bdb/rpc_server/java/RpcDbTxn.java b/storage/bdb/rpc_server/java/RpcDbTxn.java deleted file mode 100644 index 85cdad0380a..00000000000 --- a/storage/bdb/rpc_server/java/RpcDbTxn.java +++ /dev/null @@ -1,132 +0,0 @@ -/*- - * See the file LICENSE for redistribution information. - * - * Copyright (c) 2001-2004 - * Sleepycat Software. All rights reserved. - * - * $Id: RpcDbTxn.java,v 1.9 2004/05/04 13:45:33 sue Exp $ - */ - -package com.sleepycat.db.rpcserver; - -import com.sleepycat.db.*; -import com.sleepycat.db.internal.DbConstants; -import java.io.*; -import java.util.*; - -/** - * RPC wrapper around a txn object for the Java RPC server. - */ -public class RpcDbTxn extends Timer { - RpcDbEnv rdbenv; - Transaction txn; - - public RpcDbTxn(RpcDbEnv rdbenv, Transaction txn) { - this.rdbenv = rdbenv; - this.txn = txn; - } - - void dispose() { - if (txn != null) { - try { - txn.abort(); - } catch (DatabaseException e) { - e.printStackTrace(Server.err); - } - txn = null; - } - } - - public void abort(Dispatcher server, - __txn_abort_msg args, __txn_abort_reply reply) { - try { - txn.abort(); - txn = null; - reply.status = 0; - } catch (Throwable t) { - reply.status = Util.handleException(t); - } finally { - server.delTxn(this, false); - } - } - - public void begin(Dispatcher server, - __txn_begin_msg args, __txn_begin_reply reply) { - try { - if (rdbenv == null) { - reply.status = DbConstants.DB_NOSERVER_ID; - return; - } - Environment dbenv = rdbenv.dbenv; - RpcDbTxn rparent = server.getTxn(args.parentcl_id); - Transaction parent = (rparent != null) ? rparent.txn : null; - - TransactionConfig config = new TransactionConfig(); - config.setDegree2((args.flags & DbConstants.DB_DEGREE_2) != 0); - config.setDirtyRead((args.flags & DbConstants.DB_DIRTY_READ) != 0); - config.setNoSync((args.flags & DbConstants.DB_TXN_NOSYNC) != 0); - config.setNoWait(true); - config.setSync((args.flags & DbConstants.DB_TXN_SYNC) != 0); - - txn = dbenv.beginTransaction(parent, config); - - if (rparent != null) - timer = rparent.timer; - reply.txnidcl_id = server.addTxn(this); - reply.status = 0; - } catch (Throwable t) { - reply.status = Util.handleException(t); - } - } - - public void commit(Dispatcher server, - __txn_commit_msg args, __txn_commit_reply reply) { - try { - switch(args.flags) { - case 0: - txn.commit(); - break; - - case DbConstants.DB_TXN_SYNC: - txn.commitSync(); - break; - - case DbConstants.DB_TXN_NOSYNC: - txn.commitSync(); - break; - - default: - throw new UnsupportedOperationException("Unknown flag: " + (args.flags & ~Server.DB_MODIFIER_MASK)); - } - txn = null; - reply.status = 0; - } catch (Throwable t) { - reply.status = Util.handleException(t); - } finally { - server.delTxn(this, false); - } - } - - public void discard(Dispatcher server, - __txn_discard_msg args, __txn_discard_reply reply) { - try { - txn.discard(/* args.flags == 0 */); - txn = null; - reply.status = 0; - } catch (Throwable t) { - reply.status = Util.handleException(t); - } finally { - server.delTxn(this, false); - } - } - - public void prepare(Dispatcher server, - __txn_prepare_msg args, __txn_prepare_reply reply) { - try { - txn.prepare(args.gid); - reply.status = 0; - } catch (Throwable t) { - reply.status = Util.handleException(t); - } - } -} diff --git a/storage/bdb/rpc_server/java/RpcDbc.java b/storage/bdb/rpc_server/java/RpcDbc.java deleted file mode 100644 index 75f644c4b13..00000000000 --- a/storage/bdb/rpc_server/java/RpcDbc.java +++ /dev/null @@ -1,317 +0,0 @@ -/*- - * See the file LICENSE for redistribution information. - * - * Copyright (c) 2001-2004 - * Sleepycat Software. All rights reserved. - * - * $Id: RpcDbc.java,v 1.13 2004/11/05 01:08:31 mjc Exp $ - */ - -package com.sleepycat.db.rpcserver; - -import com.sleepycat.db.*; -import com.sleepycat.db.internal.DbConstants; -import java.io.*; -import java.util.*; - -/** - * RPC wrapper around a dbc object for the Java RPC server. - */ -public class RpcDbc extends Timer { - static final byte[] empty = new byte[0]; - RpcDbEnv rdbenv; - RpcDb rdb; - Cursor dbc; - Timer orig_timer; - boolean isJoin; - - public RpcDbc(RpcDb rdb, Cursor dbc, boolean isJoin) { - this.rdb = rdb; - this.rdbenv = rdb.rdbenv; - this.dbc = dbc; - this.isJoin = isJoin; - } - - void dispose() { - if (dbc != null) { - try { - dbc.close(); - } catch (Throwable t) { - Util.handleException(t); - } - dbc = null; - } - } - - public void close(Dispatcher server, - __dbc_close_msg args, __dbc_close_reply reply) { - try { - dbc.close(); - dbc = null; - - if (isJoin) - for (LocalIterator i = ((Server)server).cursor_list.iterator(); i.hasNext();) { - RpcDbc rdbc = (RpcDbc)i.next(); - // Unjoin cursors that were joined to create this - if (rdbc != null && rdbc.timer == this) - rdbc.timer = rdbc.orig_timer; - } - - reply.status = 0; - } catch (Throwable t) { - reply.status = Util.handleException(t); - } finally { - server.delCursor(this, false); - } - } - - public void count(Dispatcher server, - __dbc_count_msg args, __dbc_count_reply reply) { - try { - reply.dupcount = dbc.count(/* args.flags == 0 */); - reply.status = 0; - } catch (Throwable t) { - reply.status = Util.handleException(t); - } - } - - public void del(Dispatcher server, - __dbc_del_msg args, __dbc_del_reply reply) { - try { - reply.status = Util.getStatus(dbc.delete(/* args.flags == 0 */)); - } catch (Throwable t) { - reply.status = Util.handleException(t); - } - } - - public void dup(Dispatcher server, - __dbc_dup_msg args, __dbc_dup_reply reply) { - try { - Cursor newdbc = dbc.dup(args.flags == DbConstants.DB_POSITION); - RpcDbc rdbc = new RpcDbc(rdb, newdbc, false); - /* If this cursor has a parent txn, we need to use it too. */ - if (timer != this) - rdbc.timer = timer; - reply.dbcidcl_id = server.addCursor(rdbc); - reply.status = 0; - } catch (Throwable t) { - reply.status = Util.handleException(t); - } - } - - public void get(Dispatcher server, - __dbc_get_msg args, __dbc_get_reply reply) { - try { - DatabaseEntry key = Util.makeDatabaseEntry(args.keydata, args.keydlen, args.keydoff, args.keyulen, args.keyflags); - DatabaseEntry data = Util.makeDatabaseEntry(args.datadata, - args.datadlen, args.datadoff, args.dataulen, args.dataflags, - args.flags & (DbConstants.DB_MULTIPLE | DbConstants.DB_MULTIPLE_KEY)); - - OperationStatus status; - switch(args.flags & ~Server.DB_MODIFIER_MASK) { - case DbConstants.DB_CURRENT: - status = dbc.getCurrent(key, data, Util.getLockMode(args.flags)); - break; - - case DbConstants.DB_FIRST: - status = dbc.getFirst(key, data, Util.getLockMode(args.flags)); - break; - - case DbConstants.DB_LAST: - status = dbc.getLast(key, data, Util.getLockMode(args.flags)); - break; - - case DbConstants.DB_NEXT: - status = dbc.getNext(key, data, Util.getLockMode(args.flags)); - break; - - case DbConstants.DB_NEXT_DUP: - status = dbc.getNextDup(key, data, Util.getLockMode(args.flags)); - break; - - case DbConstants.DB_NEXT_NODUP: - status = dbc.getNextNoDup(key, data, Util.getLockMode(args.flags)); - break; - - case DbConstants.DB_PREV: - status = dbc.getPrev(key, data, Util.getLockMode(args.flags)); - break; - - case DbConstants.DB_PREV_NODUP: - status = dbc.getPrevNoDup(key, data, Util.getLockMode(args.flags)); - break; - - case DbConstants.DB_GET_RECNO: - status = dbc.getRecordNumber(data, Util.getLockMode(args.flags)); - break; - - case DbConstants.DB_SET: - status = dbc.getSearchKey(key, data, Util.getLockMode(args.flags)); - break; - - case DbConstants.DB_SET_RANGE: - status = dbc.getSearchKeyRange(key, data, Util.getLockMode(args.flags)); - break; - - case DbConstants.DB_GET_BOTH: - status = dbc.getSearchBoth(key, data, Util.getLockMode(args.flags)); - break; - - case DbConstants.DB_GET_BOTH_RANGE: - status = dbc.getSearchBothRange(key, data, Util.getLockMode(args.flags)); - break; - - case DbConstants.DB_SET_RECNO: - status = dbc.getSearchRecordNumber(key, data, Util.getLockMode(args.flags)); - break; - - /* Join cursors */ - case 0: - status = ((JoinCursorAdapter)dbc).jc.getNext(key, Util.getLockMode(args.flags)); - break; - - case DbConstants.DB_JOIN_ITEM: - status = ((JoinCursorAdapter)dbc).jc.getNext(key, Util.getLockMode(args.flags)); - break; - - default: - throw new UnsupportedOperationException("Unknown flag: " + (args.flags & ~Server.DB_MODIFIER_MASK)); - } - reply.status = Util.getStatus(status); - reply.keydata = Util.returnDatabaseEntry(key); - reply.datadata = Util.returnDatabaseEntry(data); - } catch (Throwable t) { - reply.status = Util.handleException(t); - reply.keydata = reply.datadata = empty; - } - } - - public void pget(Dispatcher server, - __dbc_pget_msg args, __dbc_pget_reply reply) { - try { - DatabaseEntry skey = Util.makeDatabaseEntry(args.skeydata, args.skeydlen, args.skeydoff, args.skeyulen, args.skeyflags); - DatabaseEntry pkey = Util.makeDatabaseEntry(args.pkeydata, args.pkeydlen, args.pkeydoff, args.pkeyulen, args.pkeyflags); - DatabaseEntry data = Util.makeDatabaseEntry(args.datadata, args.datadlen, args.datadoff, args.dataulen, args.dataflags); - - OperationStatus status; - switch(args.flags & ~Server.DB_MODIFIER_MASK) { - case DbConstants.DB_CURRENT: - status = ((SecondaryCursor)dbc).getCurrent(skey, pkey, data, Util.getLockMode(args.flags)); - break; - - case DbConstants.DB_FIRST: - status = ((SecondaryCursor)dbc).getFirst(skey, pkey, data, Util.getLockMode(args.flags)); - break; - - case DbConstants.DB_LAST: - status = ((SecondaryCursor)dbc).getLast(skey, pkey, data, Util.getLockMode(args.flags)); - break; - - case DbConstants.DB_NEXT: - status = ((SecondaryCursor)dbc).getNext(skey, pkey, data, Util.getLockMode(args.flags)); - break; - - case DbConstants.DB_NEXT_DUP: - status = ((SecondaryCursor)dbc).getNextDup(skey, pkey, data, Util.getLockMode(args.flags)); - break; - - case DbConstants.DB_NEXT_NODUP: - status = ((SecondaryCursor)dbc).getNextNoDup(skey, pkey, data, Util.getLockMode(args.flags)); - break; - - case DbConstants.DB_PREV: - status = ((SecondaryCursor)dbc).getPrev(skey, pkey, data, Util.getLockMode(args.flags)); - break; - - case DbConstants.DB_PREV_NODUP: - status = ((SecondaryCursor)dbc).getPrevNoDup(skey, pkey, data, Util.getLockMode(args.flags)); - break; - - case DbConstants.DB_GET_RECNO: - status = ((SecondaryCursor)dbc).getRecordNumber(pkey, data, Util.getLockMode(args.flags)); - break; - - case DbConstants.DB_SET: - status = ((SecondaryCursor)dbc).getSearchKey(skey, pkey, data, Util.getLockMode(args.flags)); - break; - - case DbConstants.DB_SET_RANGE: - status = ((SecondaryCursor)dbc).getSearchKeyRange(skey, pkey, data, Util.getLockMode(args.flags)); - break; - - case DbConstants.DB_GET_BOTH: - status = ((SecondaryCursor)dbc).getSearchBoth(skey, pkey, data, Util.getLockMode(args.flags)); - break; - - case DbConstants.DB_GET_BOTH_RANGE: - status = ((SecondaryCursor)dbc).getSearchBothRange(skey, pkey, data, Util.getLockMode(args.flags)); - break; - - case DbConstants.DB_SET_RECNO: - status = ((SecondaryCursor)dbc).getSearchRecordNumber(skey, pkey, data, Util.getLockMode(args.flags)); - break; - - default: - throw new UnsupportedOperationException("Unknown flag: " + (args.flags & ~Server.DB_MODIFIER_MASK)); - } - reply.status = Util.getStatus(status); - reply.skeydata = Util.returnDatabaseEntry(skey); - reply.pkeydata = Util.returnDatabaseEntry(pkey); - reply.datadata = Util.returnDatabaseEntry(data); - } catch (Throwable t) { - reply.status = Util.handleException(t); - reply.skeydata = reply.pkeydata = reply.datadata = empty; - } - } - - public void put(Dispatcher server, - __dbc_put_msg args, __dbc_put_reply reply) { - try { - DatabaseEntry key = Util.makeDatabaseEntry(args.keydata, args.keydlen, args.keydoff, args.keyulen, args.keyflags); - DatabaseEntry data = Util.makeDatabaseEntry(args.datadata, args.datadlen, args.datadoff, args.dataulen, args.dataflags); - - OperationStatus status; - switch(args.flags & ~Server.DB_MODIFIER_MASK) { - case 0: - status = dbc.put(key, data); - break; - - case DbConstants.DB_AFTER: - status = dbc.putAfter(key, data); - break; - - case DbConstants.DB_BEFORE: - status = dbc.putBefore(key, data); - break; - - case DbConstants.DB_NOOVERWRITE: - status = dbc.putNoOverwrite(key, data); - break; - - case DbConstants.DB_KEYFIRST: - status = dbc.putKeyFirst(key, data); - break; - - case DbConstants.DB_KEYLAST: - status = dbc.putKeyLast(key, data); - break; - - case DbConstants.DB_NODUPDATA: - status = dbc.putNoDupData(key, data); - break; - - case DbConstants.DB_CURRENT: - status = dbc.putCurrent(data); - break; - - default: - throw new UnsupportedOperationException("Unknown flag: " + (args.flags & ~Server.DB_MODIFIER_MASK)); - } - reply.status = Util.getStatus(status); - reply.keydata = Util.returnDatabaseEntry(key); - } catch (Throwable t) { - reply.status = Util.handleException(t); - reply.keydata = empty; - } - } -} diff --git a/storage/bdb/rpc_server/java/Timer.java b/storage/bdb/rpc_server/java/Timer.java deleted file mode 100644 index 469ad1ad4d4..00000000000 --- a/storage/bdb/rpc_server/java/Timer.java +++ /dev/null @@ -1,21 +0,0 @@ -/*- - * See the file LICENSE for redistribution information. - * - * Copyright (c) 2001-2004 - * Sleepycat Software. All rights reserved. - * - * $Id: Timer.java,v 1.4 2004/04/06 20:43:42 mjc Exp $ - */ - -package com.sleepycat.db.rpcserver; - -/** - * Class to keep track of access times. This is slightly devious by having - * both the access_time and a reference to another Timer that can be - * used to group/share access times. This is done to keep the Java code - * close to the canonical C implementation of the RPC server. - */ -public class Timer { - Timer timer = this; - long last_access; -} diff --git a/storage/bdb/rpc_server/java/gen/DbServerStub.java b/storage/bdb/rpc_server/java/gen/DbServerStub.java deleted file mode 100644 index 90fc13a6d9c..00000000000 --- a/storage/bdb/rpc_server/java/gen/DbServerStub.java +++ /dev/null @@ -1,495 +0,0 @@ -/* - * Automatically generated by jrpcgen 0.95.1 on 3/19/02 10:30 AM - * jrpcgen is part of the "Remote Tea" ONC/RPC package for Java - * See http://acplt.org/ks/remotetea.html for details - */ -package com.sleepycat.db.rpcserver; -import org.acplt.oncrpc.*; -import java.io.IOException; - -import org.acplt.oncrpc.server.*; - -/** - */ -public abstract class DbServerStub extends OncRpcServerStub implements OncRpcDispatchable { - - public DbServerStub() - throws OncRpcException, IOException { - this(0); - } - - public DbServerStub(int port) - throws OncRpcException, IOException { - info = new OncRpcServerTransportRegistrationInfo [] { - new OncRpcServerTransportRegistrationInfo(db_server.DB_RPC_SERVERPROG, 4001), - }; - transports = new OncRpcServerTransport [] { - new OncRpcUdpServerTransport(this, port, info, 32768), - new OncRpcTcpServerTransport(this, port, info, 32768) - }; - } - - public void dispatchOncRpcCall(OncRpcCallInformation call, int program, int version, int procedure) - throws OncRpcException, IOException { - if ( version == 4001 ) { - switch ( procedure ) { - case 1: { - __env_cachesize_msg args$ = new __env_cachesize_msg(); - call.retrieveCall(args$); - __env_cachesize_reply result$ = __DB_env_cachesize_4001(args$); - call.reply(result$); - break; - } - case 2: { - __env_close_msg args$ = new __env_close_msg(); - call.retrieveCall(args$); - __env_close_reply result$ = __DB_env_close_4001(args$); - call.reply(result$); - break; - } - case 3: { - __env_create_msg args$ = new __env_create_msg(); - call.retrieveCall(args$); - __env_create_reply result$ = __DB_env_create_4001(args$); - call.reply(result$); - break; - } - case 4: { - __env_dbremove_msg args$ = new __env_dbremove_msg(); - call.retrieveCall(args$); - __env_dbremove_reply result$ = __DB_env_dbremove_4001(args$); - call.reply(result$); - break; - } - case 5: { - __env_dbrename_msg args$ = new __env_dbrename_msg(); - call.retrieveCall(args$); - __env_dbrename_reply result$ = __DB_env_dbrename_4001(args$); - call.reply(result$); - break; - } - case 6: { - __env_encrypt_msg args$ = new __env_encrypt_msg(); - call.retrieveCall(args$); - __env_encrypt_reply result$ = __DB_env_encrypt_4001(args$); - call.reply(result$); - break; - } - case 7: { - __env_flags_msg args$ = new __env_flags_msg(); - call.retrieveCall(args$); - __env_flags_reply result$ = __DB_env_flags_4001(args$); - call.reply(result$); - break; - } - case 8: { - __env_open_msg args$ = new __env_open_msg(); - call.retrieveCall(args$); - __env_open_reply result$ = __DB_env_open_4001(args$); - call.reply(result$); - break; - } - case 9: { - __env_remove_msg args$ = new __env_remove_msg(); - call.retrieveCall(args$); - __env_remove_reply result$ = __DB_env_remove_4001(args$); - call.reply(result$); - break; - } - case 10: { - __txn_abort_msg args$ = new __txn_abort_msg(); - call.retrieveCall(args$); - __txn_abort_reply result$ = __DB_txn_abort_4001(args$); - call.reply(result$); - break; - } - case 11: { - __txn_begin_msg args$ = new __txn_begin_msg(); - call.retrieveCall(args$); - __txn_begin_reply result$ = __DB_txn_begin_4001(args$); - call.reply(result$); - break; - } - case 12: { - __txn_commit_msg args$ = new __txn_commit_msg(); - call.retrieveCall(args$); - __txn_commit_reply result$ = __DB_txn_commit_4001(args$); - call.reply(result$); - break; - } - case 13: { - __txn_discard_msg args$ = new __txn_discard_msg(); - call.retrieveCall(args$); - __txn_discard_reply result$ = __DB_txn_discard_4001(args$); - call.reply(result$); - break; - } - case 14: { - __txn_prepare_msg args$ = new __txn_prepare_msg(); - call.retrieveCall(args$); - __txn_prepare_reply result$ = __DB_txn_prepare_4001(args$); - call.reply(result$); - break; - } - case 15: { - __txn_recover_msg args$ = new __txn_recover_msg(); - call.retrieveCall(args$); - __txn_recover_reply result$ = __DB_txn_recover_4001(args$); - call.reply(result$); - break; - } - case 16: { - __db_associate_msg args$ = new __db_associate_msg(); - call.retrieveCall(args$); - __db_associate_reply result$ = __DB_db_associate_4001(args$); - call.reply(result$); - break; - } - case 17: { - __db_bt_maxkey_msg args$ = new __db_bt_maxkey_msg(); - call.retrieveCall(args$); - __db_bt_maxkey_reply result$ = __DB_db_bt_maxkey_4001(args$); - call.reply(result$); - break; - } - case 18: { - __db_bt_minkey_msg args$ = new __db_bt_minkey_msg(); - call.retrieveCall(args$); - __db_bt_minkey_reply result$ = __DB_db_bt_minkey_4001(args$); - call.reply(result$); - break; - } - case 19: { - __db_close_msg args$ = new __db_close_msg(); - call.retrieveCall(args$); - __db_close_reply result$ = __DB_db_close_4001(args$); - call.reply(result$); - break; - } - case 20: { - __db_create_msg args$ = new __db_create_msg(); - call.retrieveCall(args$); - __db_create_reply result$ = __DB_db_create_4001(args$); - call.reply(result$); - break; - } - case 21: { - __db_del_msg args$ = new __db_del_msg(); - call.retrieveCall(args$); - __db_del_reply result$ = __DB_db_del_4001(args$); - call.reply(result$); - break; - } - case 22: { - __db_encrypt_msg args$ = new __db_encrypt_msg(); - call.retrieveCall(args$); - __db_encrypt_reply result$ = __DB_db_encrypt_4001(args$); - call.reply(result$); - break; - } - case 23: { - __db_extentsize_msg args$ = new __db_extentsize_msg(); - call.retrieveCall(args$); - __db_extentsize_reply result$ = __DB_db_extentsize_4001(args$); - call.reply(result$); - break; - } - case 24: { - __db_flags_msg args$ = new __db_flags_msg(); - call.retrieveCall(args$); - __db_flags_reply result$ = __DB_db_flags_4001(args$); - call.reply(result$); - break; - } - case 25: { - __db_get_msg args$ = new __db_get_msg(); - call.retrieveCall(args$); - __db_get_reply result$ = __DB_db_get_4001(args$); - call.reply(result$); - break; - } - case 26: { - __db_h_ffactor_msg args$ = new __db_h_ffactor_msg(); - call.retrieveCall(args$); - __db_h_ffactor_reply result$ = __DB_db_h_ffactor_4001(args$); - call.reply(result$); - break; - } - case 27: { - __db_h_nelem_msg args$ = new __db_h_nelem_msg(); - call.retrieveCall(args$); - __db_h_nelem_reply result$ = __DB_db_h_nelem_4001(args$); - call.reply(result$); - break; - } - case 28: { - __db_key_range_msg args$ = new __db_key_range_msg(); - call.retrieveCall(args$); - __db_key_range_reply result$ = __DB_db_key_range_4001(args$); - call.reply(result$); - break; - } - case 29: { - __db_lorder_msg args$ = new __db_lorder_msg(); - call.retrieveCall(args$); - __db_lorder_reply result$ = __DB_db_lorder_4001(args$); - call.reply(result$); - break; - } - case 30: { - __db_open_msg args$ = new __db_open_msg(); - call.retrieveCall(args$); - __db_open_reply result$ = __DB_db_open_4001(args$); - call.reply(result$); - break; - } - case 31: { - __db_pagesize_msg args$ = new __db_pagesize_msg(); - call.retrieveCall(args$); - __db_pagesize_reply result$ = __DB_db_pagesize_4001(args$); - call.reply(result$); - break; - } - case 32: { - __db_pget_msg args$ = new __db_pget_msg(); - call.retrieveCall(args$); - __db_pget_reply result$ = __DB_db_pget_4001(args$); - call.reply(result$); - break; - } - case 33: { - __db_put_msg args$ = new __db_put_msg(); - call.retrieveCall(args$); - __db_put_reply result$ = __DB_db_put_4001(args$); - call.reply(result$); - break; - } - case 34: { - __db_re_delim_msg args$ = new __db_re_delim_msg(); - call.retrieveCall(args$); - __db_re_delim_reply result$ = __DB_db_re_delim_4001(args$); - call.reply(result$); - break; - } - case 35: { - __db_re_len_msg args$ = new __db_re_len_msg(); - call.retrieveCall(args$); - __db_re_len_reply result$ = __DB_db_re_len_4001(args$); - call.reply(result$); - break; - } - case 36: { - __db_re_pad_msg args$ = new __db_re_pad_msg(); - call.retrieveCall(args$); - __db_re_pad_reply result$ = __DB_db_re_pad_4001(args$); - call.reply(result$); - break; - } - case 37: { - __db_remove_msg args$ = new __db_remove_msg(); - call.retrieveCall(args$); - __db_remove_reply result$ = __DB_db_remove_4001(args$); - call.reply(result$); - break; - } - case 38: { - __db_rename_msg args$ = new __db_rename_msg(); - call.retrieveCall(args$); - __db_rename_reply result$ = __DB_db_rename_4001(args$); - call.reply(result$); - break; - } - case 39: { - __db_stat_msg args$ = new __db_stat_msg(); - call.retrieveCall(args$); - __db_stat_reply result$ = __DB_db_stat_4001(args$); - call.reply(result$); - break; - } - case 40: { - __db_sync_msg args$ = new __db_sync_msg(); - call.retrieveCall(args$); - __db_sync_reply result$ = __DB_db_sync_4001(args$); - call.reply(result$); - break; - } - case 41: { - __db_truncate_msg args$ = new __db_truncate_msg(); - call.retrieveCall(args$); - __db_truncate_reply result$ = __DB_db_truncate_4001(args$); - call.reply(result$); - break; - } - case 42: { - __db_cursor_msg args$ = new __db_cursor_msg(); - call.retrieveCall(args$); - __db_cursor_reply result$ = __DB_db_cursor_4001(args$); - call.reply(result$); - break; - } - case 43: { - __db_join_msg args$ = new __db_join_msg(); - call.retrieveCall(args$); - __db_join_reply result$ = __DB_db_join_4001(args$); - call.reply(result$); - break; - } - case 44: { - __dbc_close_msg args$ = new __dbc_close_msg(); - call.retrieveCall(args$); - __dbc_close_reply result$ = __DB_dbc_close_4001(args$); - call.reply(result$); - break; - } - case 45: { - __dbc_count_msg args$ = new __dbc_count_msg(); - call.retrieveCall(args$); - __dbc_count_reply result$ = __DB_dbc_count_4001(args$); - call.reply(result$); - break; - } - case 46: { - __dbc_del_msg args$ = new __dbc_del_msg(); - call.retrieveCall(args$); - __dbc_del_reply result$ = __DB_dbc_del_4001(args$); - call.reply(result$); - break; - } - case 47: { - __dbc_dup_msg args$ = new __dbc_dup_msg(); - call.retrieveCall(args$); - __dbc_dup_reply result$ = __DB_dbc_dup_4001(args$); - call.reply(result$); - break; - } - case 48: { - __dbc_get_msg args$ = new __dbc_get_msg(); - call.retrieveCall(args$); - __dbc_get_reply result$ = __DB_dbc_get_4001(args$); - call.reply(result$); - break; - } - case 49: { - __dbc_pget_msg args$ = new __dbc_pget_msg(); - call.retrieveCall(args$); - __dbc_pget_reply result$ = __DB_dbc_pget_4001(args$); - call.reply(result$); - break; - } - case 50: { - __dbc_put_msg args$ = new __dbc_put_msg(); - call.retrieveCall(args$); - __dbc_put_reply result$ = __DB_dbc_put_4001(args$); - call.reply(result$); - break; - } - default: - call.failProcedureUnavailable(); - } - } else { - call.failProcedureUnavailable(); - } - } - - public abstract __env_cachesize_reply __DB_env_cachesize_4001(__env_cachesize_msg arg1); - - public abstract __env_close_reply __DB_env_close_4001(__env_close_msg arg1); - - public abstract __env_create_reply __DB_env_create_4001(__env_create_msg arg1); - - public abstract __env_dbremove_reply __DB_env_dbremove_4001(__env_dbremove_msg arg1); - - public abstract __env_dbrename_reply __DB_env_dbrename_4001(__env_dbrename_msg arg1); - - public abstract __env_encrypt_reply __DB_env_encrypt_4001(__env_encrypt_msg arg1); - - public abstract __env_flags_reply __DB_env_flags_4001(__env_flags_msg arg1); - - public abstract __env_open_reply __DB_env_open_4001(__env_open_msg arg1); - - public abstract __env_remove_reply __DB_env_remove_4001(__env_remove_msg arg1); - - public abstract __txn_abort_reply __DB_txn_abort_4001(__txn_abort_msg arg1); - - public abstract __txn_begin_reply __DB_txn_begin_4001(__txn_begin_msg arg1); - - public abstract __txn_commit_reply __DB_txn_commit_4001(__txn_commit_msg arg1); - - public abstract __txn_discard_reply __DB_txn_discard_4001(__txn_discard_msg arg1); - - public abstract __txn_prepare_reply __DB_txn_prepare_4001(__txn_prepare_msg arg1); - - public abstract __txn_recover_reply __DB_txn_recover_4001(__txn_recover_msg arg1); - - public abstract __db_associate_reply __DB_db_associate_4001(__db_associate_msg arg1); - - public abstract __db_bt_maxkey_reply __DB_db_bt_maxkey_4001(__db_bt_maxkey_msg arg1); - - public abstract __db_bt_minkey_reply __DB_db_bt_minkey_4001(__db_bt_minkey_msg arg1); - - public abstract __db_close_reply __DB_db_close_4001(__db_close_msg arg1); - - public abstract __db_create_reply __DB_db_create_4001(__db_create_msg arg1); - - public abstract __db_del_reply __DB_db_del_4001(__db_del_msg arg1); - - public abstract __db_encrypt_reply __DB_db_encrypt_4001(__db_encrypt_msg arg1); - - public abstract __db_extentsize_reply __DB_db_extentsize_4001(__db_extentsize_msg arg1); - - public abstract __db_flags_reply __DB_db_flags_4001(__db_flags_msg arg1); - - public abstract __db_get_reply __DB_db_get_4001(__db_get_msg arg1); - - public abstract __db_h_ffactor_reply __DB_db_h_ffactor_4001(__db_h_ffactor_msg arg1); - - public abstract __db_h_nelem_reply __DB_db_h_nelem_4001(__db_h_nelem_msg arg1); - - public abstract __db_key_range_reply __DB_db_key_range_4001(__db_key_range_msg arg1); - - public abstract __db_lorder_reply __DB_db_lorder_4001(__db_lorder_msg arg1); - - public abstract __db_open_reply __DB_db_open_4001(__db_open_msg arg1); - - public abstract __db_pagesize_reply __DB_db_pagesize_4001(__db_pagesize_msg arg1); - - public abstract __db_pget_reply __DB_db_pget_4001(__db_pget_msg arg1); - - public abstract __db_put_reply __DB_db_put_4001(__db_put_msg arg1); - - public abstract __db_re_delim_reply __DB_db_re_delim_4001(__db_re_delim_msg arg1); - - public abstract __db_re_len_reply __DB_db_re_len_4001(__db_re_len_msg arg1); - - public abstract __db_re_pad_reply __DB_db_re_pad_4001(__db_re_pad_msg arg1); - - public abstract __db_remove_reply __DB_db_remove_4001(__db_remove_msg arg1); - - public abstract __db_rename_reply __DB_db_rename_4001(__db_rename_msg arg1); - - public abstract __db_stat_reply __DB_db_stat_4001(__db_stat_msg arg1); - - public abstract __db_sync_reply __DB_db_sync_4001(__db_sync_msg arg1); - - public abstract __db_truncate_reply __DB_db_truncate_4001(__db_truncate_msg arg1); - - public abstract __db_cursor_reply __DB_db_cursor_4001(__db_cursor_msg arg1); - - public abstract __db_join_reply __DB_db_join_4001(__db_join_msg arg1); - - public abstract __dbc_close_reply __DB_dbc_close_4001(__dbc_close_msg arg1); - - public abstract __dbc_count_reply __DB_dbc_count_4001(__dbc_count_msg arg1); - - public abstract __dbc_del_reply __DB_dbc_del_4001(__dbc_del_msg arg1); - - public abstract __dbc_dup_reply __DB_dbc_dup_4001(__dbc_dup_msg arg1); - - public abstract __dbc_get_reply __DB_dbc_get_4001(__dbc_get_msg arg1); - - public abstract __dbc_pget_reply __DB_dbc_pget_4001(__dbc_pget_msg arg1); - - public abstract __dbc_put_reply __DB_dbc_put_4001(__dbc_put_msg arg1); - -} -// End of DbServerStub.java diff --git a/storage/bdb/rpc_server/java/gen/__db_associate_msg.java b/storage/bdb/rpc_server/java/gen/__db_associate_msg.java deleted file mode 100644 index 8977303b99a..00000000000 --- a/storage/bdb/rpc_server/java/gen/__db_associate_msg.java +++ /dev/null @@ -1,41 +0,0 @@ -/* - * Automatically generated by jrpcgen 0.95.1 on 4/25/02 11:01 AM - * jrpcgen is part of the "Remote Tea" ONC/RPC package for Java - * See http://acplt.org/ks/remotetea.html for details - */ -package com.sleepycat.db.rpcserver; -import org.acplt.oncrpc.*; -import java.io.IOException; - -public class __db_associate_msg implements XdrAble { - public int dbpcl_id; - public int txnpcl_id; - public int sdbpcl_id; - public int flags; - - public __db_associate_msg() { - } - - public __db_associate_msg(XdrDecodingStream xdr) - throws OncRpcException, IOException { - xdrDecode(xdr); - } - - public void xdrEncode(XdrEncodingStream xdr) - throws OncRpcException, IOException { - xdr.xdrEncodeInt(dbpcl_id); - xdr.xdrEncodeInt(txnpcl_id); - xdr.xdrEncodeInt(sdbpcl_id); - xdr.xdrEncodeInt(flags); - } - - public void xdrDecode(XdrDecodingStream xdr) - throws OncRpcException, IOException { - dbpcl_id = xdr.xdrDecodeInt(); - txnpcl_id = xdr.xdrDecodeInt(); - sdbpcl_id = xdr.xdrDecodeInt(); - flags = xdr.xdrDecodeInt(); - } - -} -// End of __db_associate_msg.java diff --git a/storage/bdb/rpc_server/java/gen/__db_associate_reply.java b/storage/bdb/rpc_server/java/gen/__db_associate_reply.java deleted file mode 100644 index 476d0868b33..00000000000 --- a/storage/bdb/rpc_server/java/gen/__db_associate_reply.java +++ /dev/null @@ -1,32 +0,0 @@ -/* - * Automatically generated by jrpcgen 0.95.1 on 12/18/01 7:23 PM - * jrpcgen is part of the "Remote Tea" ONC/RPC package for Java - * See http://acplt.org/ks/remotetea.html for details - */ -package com.sleepycat.db.rpcserver; -import org.acplt.oncrpc.*; -import java.io.IOException; - -public class __db_associate_reply implements XdrAble { - public int status; - - public __db_associate_reply() { - } - - public __db_associate_reply(XdrDecodingStream xdr) - throws OncRpcException, IOException { - xdrDecode(xdr); - } - - public void xdrEncode(XdrEncodingStream xdr) - throws OncRpcException, IOException { - xdr.xdrEncodeInt(status); - } - - public void xdrDecode(XdrDecodingStream xdr) - throws OncRpcException, IOException { - status = xdr.xdrDecodeInt(); - } - -} -// End of __db_associate_reply.java diff --git a/storage/bdb/rpc_server/java/gen/__db_bt_maxkey_msg.java b/storage/bdb/rpc_server/java/gen/__db_bt_maxkey_msg.java deleted file mode 100644 index 007ce16a974..00000000000 --- a/storage/bdb/rpc_server/java/gen/__db_bt_maxkey_msg.java +++ /dev/null @@ -1,35 +0,0 @@ -/* - * Automatically generated by jrpcgen 0.95.1 on 12/18/01 7:23 PM - * jrpcgen is part of the "Remote Tea" ONC/RPC package for Java - * See http://acplt.org/ks/remotetea.html for details - */ -package com.sleepycat.db.rpcserver; -import org.acplt.oncrpc.*; -import java.io.IOException; - -public class __db_bt_maxkey_msg implements XdrAble { - public int dbpcl_id; - public int maxkey; - - public __db_bt_maxkey_msg() { - } - - public __db_bt_maxkey_msg(XdrDecodingStream xdr) - throws OncRpcException, IOException { - xdrDecode(xdr); - } - - public void xdrEncode(XdrEncodingStream xdr) - throws OncRpcException, IOException { - xdr.xdrEncodeInt(dbpcl_id); - xdr.xdrEncodeInt(maxkey); - } - - public void xdrDecode(XdrDecodingStream xdr) - throws OncRpcException, IOException { - dbpcl_id = xdr.xdrDecodeInt(); - maxkey = xdr.xdrDecodeInt(); - } - -} -// End of __db_bt_maxkey_msg.java diff --git a/storage/bdb/rpc_server/java/gen/__db_bt_maxkey_reply.java b/storage/bdb/rpc_server/java/gen/__db_bt_maxkey_reply.java deleted file mode 100644 index 855573271b3..00000000000 --- a/storage/bdb/rpc_server/java/gen/__db_bt_maxkey_reply.java +++ /dev/null @@ -1,32 +0,0 @@ -/* - * Automatically generated by jrpcgen 0.95.1 on 12/18/01 7:23 PM - * jrpcgen is part of the "Remote Tea" ONC/RPC package for Java - * See http://acplt.org/ks/remotetea.html for details - */ -package com.sleepycat.db.rpcserver; -import org.acplt.oncrpc.*; -import java.io.IOException; - -public class __db_bt_maxkey_reply implements XdrAble { - public int status; - - public __db_bt_maxkey_reply() { - } - - public __db_bt_maxkey_reply(XdrDecodingStream xdr) - throws OncRpcException, IOException { - xdrDecode(xdr); - } - - public void xdrEncode(XdrEncodingStream xdr) - throws OncRpcException, IOException { - xdr.xdrEncodeInt(status); - } - - public void xdrDecode(XdrDecodingStream xdr) - throws OncRpcException, IOException { - status = xdr.xdrDecodeInt(); - } - -} -// End of __db_bt_maxkey_reply.java diff --git a/storage/bdb/rpc_server/java/gen/__db_bt_minkey_msg.java b/storage/bdb/rpc_server/java/gen/__db_bt_minkey_msg.java deleted file mode 100644 index c86ec382456..00000000000 --- a/storage/bdb/rpc_server/java/gen/__db_bt_minkey_msg.java +++ /dev/null @@ -1,35 +0,0 @@ -/* - * Automatically generated by jrpcgen 0.95.1 on 12/18/01 7:23 PM - * jrpcgen is part of the "Remote Tea" ONC/RPC package for Java - * See http://acplt.org/ks/remotetea.html for details - */ -package com.sleepycat.db.rpcserver; -import org.acplt.oncrpc.*; -import java.io.IOException; - -public class __db_bt_minkey_msg implements XdrAble { - public int dbpcl_id; - public int minkey; - - public __db_bt_minkey_msg() { - } - - public __db_bt_minkey_msg(XdrDecodingStream xdr) - throws OncRpcException, IOException { - xdrDecode(xdr); - } - - public void xdrEncode(XdrEncodingStream xdr) - throws OncRpcException, IOException { - xdr.xdrEncodeInt(dbpcl_id); - xdr.xdrEncodeInt(minkey); - } - - public void xdrDecode(XdrDecodingStream xdr) - throws OncRpcException, IOException { - dbpcl_id = xdr.xdrDecodeInt(); - minkey = xdr.xdrDecodeInt(); - } - -} -// End of __db_bt_minkey_msg.java diff --git a/storage/bdb/rpc_server/java/gen/__db_bt_minkey_reply.java b/storage/bdb/rpc_server/java/gen/__db_bt_minkey_reply.java deleted file mode 100644 index 4d944b6bf33..00000000000 --- a/storage/bdb/rpc_server/java/gen/__db_bt_minkey_reply.java +++ /dev/null @@ -1,32 +0,0 @@ -/* - * Automatically generated by jrpcgen 0.95.1 on 12/18/01 7:23 PM - * jrpcgen is part of the "Remote Tea" ONC/RPC package for Java - * See http://acplt.org/ks/remotetea.html for details - */ -package com.sleepycat.db.rpcserver; -import org.acplt.oncrpc.*; -import java.io.IOException; - -public class __db_bt_minkey_reply implements XdrAble { - public int status; - - public __db_bt_minkey_reply() { - } - - public __db_bt_minkey_reply(XdrDecodingStream xdr) - throws OncRpcException, IOException { - xdrDecode(xdr); - } - - public void xdrEncode(XdrEncodingStream xdr) - throws OncRpcException, IOException { - xdr.xdrEncodeInt(status); - } - - public void xdrDecode(XdrDecodingStream xdr) - throws OncRpcException, IOException { - status = xdr.xdrDecodeInt(); - } - -} -// End of __db_bt_minkey_reply.java diff --git a/storage/bdb/rpc_server/java/gen/__db_close_msg.java b/storage/bdb/rpc_server/java/gen/__db_close_msg.java deleted file mode 100644 index ce8d213701b..00000000000 --- a/storage/bdb/rpc_server/java/gen/__db_close_msg.java +++ /dev/null @@ -1,35 +0,0 @@ -/* - * Automatically generated by jrpcgen 0.95.1 on 12/18/01 7:23 PM - * jrpcgen is part of the "Remote Tea" ONC/RPC package for Java - * See http://acplt.org/ks/remotetea.html for details - */ -package com.sleepycat.db.rpcserver; -import org.acplt.oncrpc.*; -import java.io.IOException; - -public class __db_close_msg implements XdrAble { - public int dbpcl_id; - public int flags; - - public __db_close_msg() { - } - - public __db_close_msg(XdrDecodingStream xdr) - throws OncRpcException, IOException { - xdrDecode(xdr); - } - - public void xdrEncode(XdrEncodingStream xdr) - throws OncRpcException, IOException { - xdr.xdrEncodeInt(dbpcl_id); - xdr.xdrEncodeInt(flags); - } - - public void xdrDecode(XdrDecodingStream xdr) - throws OncRpcException, IOException { - dbpcl_id = xdr.xdrDecodeInt(); - flags = xdr.xdrDecodeInt(); - } - -} -// End of __db_close_msg.java diff --git a/storage/bdb/rpc_server/java/gen/__db_close_reply.java b/storage/bdb/rpc_server/java/gen/__db_close_reply.java deleted file mode 100644 index a9380e9c053..00000000000 --- a/storage/bdb/rpc_server/java/gen/__db_close_reply.java +++ /dev/null @@ -1,32 +0,0 @@ -/* - * Automatically generated by jrpcgen 0.95.1 on 12/18/01 7:23 PM - * jrpcgen is part of the "Remote Tea" ONC/RPC package for Java - * See http://acplt.org/ks/remotetea.html for details - */ -package com.sleepycat.db.rpcserver; -import org.acplt.oncrpc.*; -import java.io.IOException; - -public class __db_close_reply implements XdrAble { - public int status; - - public __db_close_reply() { - } - - public __db_close_reply(XdrDecodingStream xdr) - throws OncRpcException, IOException { - xdrDecode(xdr); - } - - public void xdrEncode(XdrEncodingStream xdr) - throws OncRpcException, IOException { - xdr.xdrEncodeInt(status); - } - - public void xdrDecode(XdrDecodingStream xdr) - throws OncRpcException, IOException { - status = xdr.xdrDecodeInt(); - } - -} -// End of __db_close_reply.java diff --git a/storage/bdb/rpc_server/java/gen/__db_create_msg.java b/storage/bdb/rpc_server/java/gen/__db_create_msg.java deleted file mode 100644 index d21ca50f807..00000000000 --- a/storage/bdb/rpc_server/java/gen/__db_create_msg.java +++ /dev/null @@ -1,35 +0,0 @@ -/* - * Automatically generated by jrpcgen 0.95.1 on 12/18/01 7:23 PM - * jrpcgen is part of the "Remote Tea" ONC/RPC package for Java - * See http://acplt.org/ks/remotetea.html for details - */ -package com.sleepycat.db.rpcserver; -import org.acplt.oncrpc.*; -import java.io.IOException; - -public class __db_create_msg implements XdrAble { - public int dbenvcl_id; - public int flags; - - public __db_create_msg() { - } - - public __db_create_msg(XdrDecodingStream xdr) - throws OncRpcException, IOException { - xdrDecode(xdr); - } - - public void xdrEncode(XdrEncodingStream xdr) - throws OncRpcException, IOException { - xdr.xdrEncodeInt(dbenvcl_id); - xdr.xdrEncodeInt(flags); - } - - public void xdrDecode(XdrDecodingStream xdr) - throws OncRpcException, IOException { - dbenvcl_id = xdr.xdrDecodeInt(); - flags = xdr.xdrDecodeInt(); - } - -} -// End of __db_create_msg.java diff --git a/storage/bdb/rpc_server/java/gen/__db_create_reply.java b/storage/bdb/rpc_server/java/gen/__db_create_reply.java deleted file mode 100644 index e3dcbbab14e..00000000000 --- a/storage/bdb/rpc_server/java/gen/__db_create_reply.java +++ /dev/null @@ -1,35 +0,0 @@ -/* - * Automatically generated by jrpcgen 0.95.1 on 12/18/01 7:23 PM - * jrpcgen is part of the "Remote Tea" ONC/RPC package for Java - * See http://acplt.org/ks/remotetea.html for details - */ -package com.sleepycat.db.rpcserver; -import org.acplt.oncrpc.*; -import java.io.IOException; - -public class __db_create_reply implements XdrAble { - public int status; - public int dbcl_id; - - public __db_create_reply() { - } - - public __db_create_reply(XdrDecodingStream xdr) - throws OncRpcException, IOException { - xdrDecode(xdr); - } - - public void xdrEncode(XdrEncodingStream xdr) - throws OncRpcException, IOException { - xdr.xdrEncodeInt(status); - xdr.xdrEncodeInt(dbcl_id); - } - - public void xdrDecode(XdrDecodingStream xdr) - throws OncRpcException, IOException { - status = xdr.xdrDecodeInt(); - dbcl_id = xdr.xdrDecodeInt(); - } - -} -// End of __db_create_reply.java diff --git a/storage/bdb/rpc_server/java/gen/__db_cursor_msg.java b/storage/bdb/rpc_server/java/gen/__db_cursor_msg.java deleted file mode 100644 index 60e09db6ebb..00000000000 --- a/storage/bdb/rpc_server/java/gen/__db_cursor_msg.java +++ /dev/null @@ -1,38 +0,0 @@ -/* - * Automatically generated by jrpcgen 0.95.1 on 12/18/01 7:23 PM - * jrpcgen is part of the "Remote Tea" ONC/RPC package for Java - * See http://acplt.org/ks/remotetea.html for details - */ -package com.sleepycat.db.rpcserver; -import org.acplt.oncrpc.*; -import java.io.IOException; - -public class __db_cursor_msg implements XdrAble { - public int dbpcl_id; - public int txnpcl_id; - public int flags; - - public __db_cursor_msg() { - } - - public __db_cursor_msg(XdrDecodingStream xdr) - throws OncRpcException, IOException { - xdrDecode(xdr); - } - - public void xdrEncode(XdrEncodingStream xdr) - throws OncRpcException, IOException { - xdr.xdrEncodeInt(dbpcl_id); - xdr.xdrEncodeInt(txnpcl_id); - xdr.xdrEncodeInt(flags); - } - - public void xdrDecode(XdrDecodingStream xdr) - throws OncRpcException, IOException { - dbpcl_id = xdr.xdrDecodeInt(); - txnpcl_id = xdr.xdrDecodeInt(); - flags = xdr.xdrDecodeInt(); - } - -} -// End of __db_cursor_msg.java diff --git a/storage/bdb/rpc_server/java/gen/__db_cursor_reply.java b/storage/bdb/rpc_server/java/gen/__db_cursor_reply.java deleted file mode 100644 index bafd2817c67..00000000000 --- a/storage/bdb/rpc_server/java/gen/__db_cursor_reply.java +++ /dev/null @@ -1,35 +0,0 @@ -/* - * Automatically generated by jrpcgen 0.95.1 on 12/18/01 7:23 PM - * jrpcgen is part of the "Remote Tea" ONC/RPC package for Java - * See http://acplt.org/ks/remotetea.html for details - */ -package com.sleepycat.db.rpcserver; -import org.acplt.oncrpc.*; -import java.io.IOException; - -public class __db_cursor_reply implements XdrAble { - public int status; - public int dbcidcl_id; - - public __db_cursor_reply() { - } - - public __db_cursor_reply(XdrDecodingStream xdr) - throws OncRpcException, IOException { - xdrDecode(xdr); - } - - public void xdrEncode(XdrEncodingStream xdr) - throws OncRpcException, IOException { - xdr.xdrEncodeInt(status); - xdr.xdrEncodeInt(dbcidcl_id); - } - - public void xdrDecode(XdrDecodingStream xdr) - throws OncRpcException, IOException { - status = xdr.xdrDecodeInt(); - dbcidcl_id = xdr.xdrDecodeInt(); - } - -} -// End of __db_cursor_reply.java diff --git a/storage/bdb/rpc_server/java/gen/__db_del_msg.java b/storage/bdb/rpc_server/java/gen/__db_del_msg.java deleted file mode 100644 index fdf47907dd6..00000000000 --- a/storage/bdb/rpc_server/java/gen/__db_del_msg.java +++ /dev/null @@ -1,53 +0,0 @@ -/* - * Automatically generated by jrpcgen 0.95.1 on 12/18/01 7:23 PM - * jrpcgen is part of the "Remote Tea" ONC/RPC package for Java - * See http://acplt.org/ks/remotetea.html for details - */ -package com.sleepycat.db.rpcserver; -import org.acplt.oncrpc.*; -import java.io.IOException; - -public class __db_del_msg implements XdrAble { - public int dbpcl_id; - public int txnpcl_id; - public int keydlen; - public int keydoff; - public int keyulen; - public int keyflags; - public byte [] keydata; - public int flags; - - public __db_del_msg() { - } - - public __db_del_msg(XdrDecodingStream xdr) - throws OncRpcException, IOException { - xdrDecode(xdr); - } - - public void xdrEncode(XdrEncodingStream xdr) - throws OncRpcException, IOException { - xdr.xdrEncodeInt(dbpcl_id); - xdr.xdrEncodeInt(txnpcl_id); - xdr.xdrEncodeInt(keydlen); - xdr.xdrEncodeInt(keydoff); - xdr.xdrEncodeInt(keyulen); - xdr.xdrEncodeInt(keyflags); - xdr.xdrEncodeDynamicOpaque(keydata); - xdr.xdrEncodeInt(flags); - } - - public void xdrDecode(XdrDecodingStream xdr) - throws OncRpcException, IOException { - dbpcl_id = xdr.xdrDecodeInt(); - txnpcl_id = xdr.xdrDecodeInt(); - keydlen = xdr.xdrDecodeInt(); - keydoff = xdr.xdrDecodeInt(); - keyulen = xdr.xdrDecodeInt(); - keyflags = xdr.xdrDecodeInt(); - keydata = xdr.xdrDecodeDynamicOpaque(); - flags = xdr.xdrDecodeInt(); - } - -} -// End of __db_del_msg.java diff --git a/storage/bdb/rpc_server/java/gen/__db_del_reply.java b/storage/bdb/rpc_server/java/gen/__db_del_reply.java deleted file mode 100644 index 8a55445944f..00000000000 --- a/storage/bdb/rpc_server/java/gen/__db_del_reply.java +++ /dev/null @@ -1,32 +0,0 @@ -/* - * Automatically generated by jrpcgen 0.95.1 on 12/18/01 7:23 PM - * jrpcgen is part of the "Remote Tea" ONC/RPC package for Java - * See http://acplt.org/ks/remotetea.html for details - */ -package com.sleepycat.db.rpcserver; -import org.acplt.oncrpc.*; -import java.io.IOException; - -public class __db_del_reply implements XdrAble { - public int status; - - public __db_del_reply() { - } - - public __db_del_reply(XdrDecodingStream xdr) - throws OncRpcException, IOException { - xdrDecode(xdr); - } - - public void xdrEncode(XdrEncodingStream xdr) - throws OncRpcException, IOException { - xdr.xdrEncodeInt(status); - } - - public void xdrDecode(XdrDecodingStream xdr) - throws OncRpcException, IOException { - status = xdr.xdrDecodeInt(); - } - -} -// End of __db_del_reply.java diff --git a/storage/bdb/rpc_server/java/gen/__db_encrypt_msg.java b/storage/bdb/rpc_server/java/gen/__db_encrypt_msg.java deleted file mode 100644 index 46d9f8ee7e8..00000000000 --- a/storage/bdb/rpc_server/java/gen/__db_encrypt_msg.java +++ /dev/null @@ -1,38 +0,0 @@ -/* - * Automatically generated by jrpcgen 0.95.1 on 2/13/02 1:05 PM - * jrpcgen is part of the "Remote Tea" ONC/RPC package for Java - * See http://acplt.org/ks/remotetea.html for details - */ -package com.sleepycat.db.rpcserver; -import org.acplt.oncrpc.*; -import java.io.IOException; - -public class __db_encrypt_msg implements XdrAble { - public int dbpcl_id; - public String passwd; - public int flags; - - public __db_encrypt_msg() { - } - - public __db_encrypt_msg(XdrDecodingStream xdr) - throws OncRpcException, IOException { - xdrDecode(xdr); - } - - public void xdrEncode(XdrEncodingStream xdr) - throws OncRpcException, IOException { - xdr.xdrEncodeInt(dbpcl_id); - xdr.xdrEncodeString(passwd); - xdr.xdrEncodeInt(flags); - } - - public void xdrDecode(XdrDecodingStream xdr) - throws OncRpcException, IOException { - dbpcl_id = xdr.xdrDecodeInt(); - passwd = xdr.xdrDecodeString(); - flags = xdr.xdrDecodeInt(); - } - -} -// End of __db_encrypt_msg.java diff --git a/storage/bdb/rpc_server/java/gen/__db_encrypt_reply.java b/storage/bdb/rpc_server/java/gen/__db_encrypt_reply.java deleted file mode 100644 index a97cc98c90b..00000000000 --- a/storage/bdb/rpc_server/java/gen/__db_encrypt_reply.java +++ /dev/null @@ -1,32 +0,0 @@ -/* - * Automatically generated by jrpcgen 0.95.1 on 2/13/02 1:05 PM - * jrpcgen is part of the "Remote Tea" ONC/RPC package for Java - * See http://acplt.org/ks/remotetea.html for details - */ -package com.sleepycat.db.rpcserver; -import org.acplt.oncrpc.*; -import java.io.IOException; - -public class __db_encrypt_reply implements XdrAble { - public int status; - - public __db_encrypt_reply() { - } - - public __db_encrypt_reply(XdrDecodingStream xdr) - throws OncRpcException, IOException { - xdrDecode(xdr); - } - - public void xdrEncode(XdrEncodingStream xdr) - throws OncRpcException, IOException { - xdr.xdrEncodeInt(status); - } - - public void xdrDecode(XdrDecodingStream xdr) - throws OncRpcException, IOException { - status = xdr.xdrDecodeInt(); - } - -} -// End of __db_encrypt_reply.java diff --git a/storage/bdb/rpc_server/java/gen/__db_extentsize_msg.java b/storage/bdb/rpc_server/java/gen/__db_extentsize_msg.java deleted file mode 100644 index 41a51cff9c4..00000000000 --- a/storage/bdb/rpc_server/java/gen/__db_extentsize_msg.java +++ /dev/null @@ -1,35 +0,0 @@ -/* - * Automatically generated by jrpcgen 0.95.1 on 12/18/01 7:23 PM - * jrpcgen is part of the "Remote Tea" ONC/RPC package for Java - * See http://acplt.org/ks/remotetea.html for details - */ -package com.sleepycat.db.rpcserver; -import org.acplt.oncrpc.*; -import java.io.IOException; - -public class __db_extentsize_msg implements XdrAble { - public int dbpcl_id; - public int extentsize; - - public __db_extentsize_msg() { - } - - public __db_extentsize_msg(XdrDecodingStream xdr) - throws OncRpcException, IOException { - xdrDecode(xdr); - } - - public void xdrEncode(XdrEncodingStream xdr) - throws OncRpcException, IOException { - xdr.xdrEncodeInt(dbpcl_id); - xdr.xdrEncodeInt(extentsize); - } - - public void xdrDecode(XdrDecodingStream xdr) - throws OncRpcException, IOException { - dbpcl_id = xdr.xdrDecodeInt(); - extentsize = xdr.xdrDecodeInt(); - } - -} -// End of __db_extentsize_msg.java diff --git a/storage/bdb/rpc_server/java/gen/__db_extentsize_reply.java b/storage/bdb/rpc_server/java/gen/__db_extentsize_reply.java deleted file mode 100644 index 409625486c7..00000000000 --- a/storage/bdb/rpc_server/java/gen/__db_extentsize_reply.java +++ /dev/null @@ -1,32 +0,0 @@ -/* - * Automatically generated by jrpcgen 0.95.1 on 12/18/01 7:23 PM - * jrpcgen is part of the "Remote Tea" ONC/RPC package for Java - * See http://acplt.org/ks/remotetea.html for details - */ -package com.sleepycat.db.rpcserver; -import org.acplt.oncrpc.*; -import java.io.IOException; - -public class __db_extentsize_reply implements XdrAble { - public int status; - - public __db_extentsize_reply() { - } - - public __db_extentsize_reply(XdrDecodingStream xdr) - throws OncRpcException, IOException { - xdrDecode(xdr); - } - - public void xdrEncode(XdrEncodingStream xdr) - throws OncRpcException, IOException { - xdr.xdrEncodeInt(status); - } - - public void xdrDecode(XdrDecodingStream xdr) - throws OncRpcException, IOException { - status = xdr.xdrDecodeInt(); - } - -} -// End of __db_extentsize_reply.java diff --git a/storage/bdb/rpc_server/java/gen/__db_flags_msg.java b/storage/bdb/rpc_server/java/gen/__db_flags_msg.java deleted file mode 100644 index d8752e2e4dd..00000000000 --- a/storage/bdb/rpc_server/java/gen/__db_flags_msg.java +++ /dev/null @@ -1,35 +0,0 @@ -/* - * Automatically generated by jrpcgen 0.95.1 on 12/18/01 7:23 PM - * jrpcgen is part of the "Remote Tea" ONC/RPC package for Java - * See http://acplt.org/ks/remotetea.html for details - */ -package com.sleepycat.db.rpcserver; -import org.acplt.oncrpc.*; -import java.io.IOException; - -public class __db_flags_msg implements XdrAble { - public int dbpcl_id; - public int flags; - - public __db_flags_msg() { - } - - public __db_flags_msg(XdrDecodingStream xdr) - throws OncRpcException, IOException { - xdrDecode(xdr); - } - - public void xdrEncode(XdrEncodingStream xdr) - throws OncRpcException, IOException { - xdr.xdrEncodeInt(dbpcl_id); - xdr.xdrEncodeInt(flags); - } - - public void xdrDecode(XdrDecodingStream xdr) - throws OncRpcException, IOException { - dbpcl_id = xdr.xdrDecodeInt(); - flags = xdr.xdrDecodeInt(); - } - -} -// End of __db_flags_msg.java diff --git a/storage/bdb/rpc_server/java/gen/__db_flags_reply.java b/storage/bdb/rpc_server/java/gen/__db_flags_reply.java deleted file mode 100644 index c4ec253db83..00000000000 --- a/storage/bdb/rpc_server/java/gen/__db_flags_reply.java +++ /dev/null @@ -1,32 +0,0 @@ -/* - * Automatically generated by jrpcgen 0.95.1 on 12/18/01 7:23 PM - * jrpcgen is part of the "Remote Tea" ONC/RPC package for Java - * See http://acplt.org/ks/remotetea.html for details - */ -package com.sleepycat.db.rpcserver; -import org.acplt.oncrpc.*; -import java.io.IOException; - -public class __db_flags_reply implements XdrAble { - public int status; - - public __db_flags_reply() { - } - - public __db_flags_reply(XdrDecodingStream xdr) - throws OncRpcException, IOException { - xdrDecode(xdr); - } - - public void xdrEncode(XdrEncodingStream xdr) - throws OncRpcException, IOException { - xdr.xdrEncodeInt(status); - } - - public void xdrDecode(XdrDecodingStream xdr) - throws OncRpcException, IOException { - status = xdr.xdrDecodeInt(); - } - -} -// End of __db_flags_reply.java diff --git a/storage/bdb/rpc_server/java/gen/__db_get_msg.java b/storage/bdb/rpc_server/java/gen/__db_get_msg.java deleted file mode 100644 index 3dfe8e9d86e..00000000000 --- a/storage/bdb/rpc_server/java/gen/__db_get_msg.java +++ /dev/null @@ -1,68 +0,0 @@ -/* - * Automatically generated by jrpcgen 0.95.1 on 12/18/01 7:23 PM - * jrpcgen is part of the "Remote Tea" ONC/RPC package for Java - * See http://acplt.org/ks/remotetea.html for details - */ -package com.sleepycat.db.rpcserver; -import org.acplt.oncrpc.*; -import java.io.IOException; - -public class __db_get_msg implements XdrAble { - public int dbpcl_id; - public int txnpcl_id; - public int keydlen; - public int keydoff; - public int keyulen; - public int keyflags; - public byte [] keydata; - public int datadlen; - public int datadoff; - public int dataulen; - public int dataflags; - public byte [] datadata; - public int flags; - - public __db_get_msg() { - } - - public __db_get_msg(XdrDecodingStream xdr) - throws OncRpcException, IOException { - xdrDecode(xdr); - } - - public void xdrEncode(XdrEncodingStream xdr) - throws OncRpcException, IOException { - xdr.xdrEncodeInt(dbpcl_id); - xdr.xdrEncodeInt(txnpcl_id); - xdr.xdrEncodeInt(keydlen); - xdr.xdrEncodeInt(keydoff); - xdr.xdrEncodeInt(keyulen); - xdr.xdrEncodeInt(keyflags); - xdr.xdrEncodeDynamicOpaque(keydata); - xdr.xdrEncodeInt(datadlen); - xdr.xdrEncodeInt(datadoff); - xdr.xdrEncodeInt(dataulen); - xdr.xdrEncodeInt(dataflags); - xdr.xdrEncodeDynamicOpaque(datadata); - xdr.xdrEncodeInt(flags); - } - - public void xdrDecode(XdrDecodingStream xdr) - throws OncRpcException, IOException { - dbpcl_id = xdr.xdrDecodeInt(); - txnpcl_id = xdr.xdrDecodeInt(); - keydlen = xdr.xdrDecodeInt(); - keydoff = xdr.xdrDecodeInt(); - keyulen = xdr.xdrDecodeInt(); - keyflags = xdr.xdrDecodeInt(); - keydata = xdr.xdrDecodeDynamicOpaque(); - datadlen = xdr.xdrDecodeInt(); - datadoff = xdr.xdrDecodeInt(); - dataulen = xdr.xdrDecodeInt(); - dataflags = xdr.xdrDecodeInt(); - datadata = xdr.xdrDecodeDynamicOpaque(); - flags = xdr.xdrDecodeInt(); - } - -} -// End of __db_get_msg.java diff --git a/storage/bdb/rpc_server/java/gen/__db_get_reply.java b/storage/bdb/rpc_server/java/gen/__db_get_reply.java deleted file mode 100644 index 64ce525728a..00000000000 --- a/storage/bdb/rpc_server/java/gen/__db_get_reply.java +++ /dev/null @@ -1,38 +0,0 @@ -/* - * Automatically generated by jrpcgen 0.95.1 on 12/18/01 7:23 PM - * jrpcgen is part of the "Remote Tea" ONC/RPC package for Java - * See http://acplt.org/ks/remotetea.html for details - */ -package com.sleepycat.db.rpcserver; -import org.acplt.oncrpc.*; -import java.io.IOException; - -public class __db_get_reply implements XdrAble { - public int status; - public byte [] keydata; - public byte [] datadata; - - public __db_get_reply() { - } - - public __db_get_reply(XdrDecodingStream xdr) - throws OncRpcException, IOException { - xdrDecode(xdr); - } - - public void xdrEncode(XdrEncodingStream xdr) - throws OncRpcException, IOException { - xdr.xdrEncodeInt(status); - xdr.xdrEncodeDynamicOpaque(keydata); - xdr.xdrEncodeDynamicOpaque(datadata); - } - - public void xdrDecode(XdrDecodingStream xdr) - throws OncRpcException, IOException { - status = xdr.xdrDecodeInt(); - keydata = xdr.xdrDecodeDynamicOpaque(); - datadata = xdr.xdrDecodeDynamicOpaque(); - } - -} -// End of __db_get_reply.java diff --git a/storage/bdb/rpc_server/java/gen/__db_h_ffactor_msg.java b/storage/bdb/rpc_server/java/gen/__db_h_ffactor_msg.java deleted file mode 100644 index 8d2ed1b1c0b..00000000000 --- a/storage/bdb/rpc_server/java/gen/__db_h_ffactor_msg.java +++ /dev/null @@ -1,35 +0,0 @@ -/* - * Automatically generated by jrpcgen 0.95.1 on 12/18/01 7:23 PM - * jrpcgen is part of the "Remote Tea" ONC/RPC package for Java - * See http://acplt.org/ks/remotetea.html for details - */ -package com.sleepycat.db.rpcserver; -import org.acplt.oncrpc.*; -import java.io.IOException; - -public class __db_h_ffactor_msg implements XdrAble { - public int dbpcl_id; - public int ffactor; - - public __db_h_ffactor_msg() { - } - - public __db_h_ffactor_msg(XdrDecodingStream xdr) - throws OncRpcException, IOException { - xdrDecode(xdr); - } - - public void xdrEncode(XdrEncodingStream xdr) - throws OncRpcException, IOException { - xdr.xdrEncodeInt(dbpcl_id); - xdr.xdrEncodeInt(ffactor); - } - - public void xdrDecode(XdrDecodingStream xdr) - throws OncRpcException, IOException { - dbpcl_id = xdr.xdrDecodeInt(); - ffactor = xdr.xdrDecodeInt(); - } - -} -// End of __db_h_ffactor_msg.java diff --git a/storage/bdb/rpc_server/java/gen/__db_h_ffactor_reply.java b/storage/bdb/rpc_server/java/gen/__db_h_ffactor_reply.java deleted file mode 100644 index 1885ec50240..00000000000 --- a/storage/bdb/rpc_server/java/gen/__db_h_ffactor_reply.java +++ /dev/null @@ -1,32 +0,0 @@ -/* - * Automatically generated by jrpcgen 0.95.1 on 12/18/01 7:23 PM - * jrpcgen is part of the "Remote Tea" ONC/RPC package for Java - * See http://acplt.org/ks/remotetea.html for details - */ -package com.sleepycat.db.rpcserver; -import org.acplt.oncrpc.*; -import java.io.IOException; - -public class __db_h_ffactor_reply implements XdrAble { - public int status; - - public __db_h_ffactor_reply() { - } - - public __db_h_ffactor_reply(XdrDecodingStream xdr) - throws OncRpcException, IOException { - xdrDecode(xdr); - } - - public void xdrEncode(XdrEncodingStream xdr) - throws OncRpcException, IOException { - xdr.xdrEncodeInt(status); - } - - public void xdrDecode(XdrDecodingStream xdr) - throws OncRpcException, IOException { - status = xdr.xdrDecodeInt(); - } - -} -// End of __db_h_ffactor_reply.java diff --git a/storage/bdb/rpc_server/java/gen/__db_h_nelem_msg.java b/storage/bdb/rpc_server/java/gen/__db_h_nelem_msg.java deleted file mode 100644 index 7d084351755..00000000000 --- a/storage/bdb/rpc_server/java/gen/__db_h_nelem_msg.java +++ /dev/null @@ -1,35 +0,0 @@ -/* - * Automatically generated by jrpcgen 0.95.1 on 12/18/01 7:23 PM - * jrpcgen is part of the "Remote Tea" ONC/RPC package for Java - * See http://acplt.org/ks/remotetea.html for details - */ -package com.sleepycat.db.rpcserver; -import org.acplt.oncrpc.*; -import java.io.IOException; - -public class __db_h_nelem_msg implements XdrAble { - public int dbpcl_id; - public int nelem; - - public __db_h_nelem_msg() { - } - - public __db_h_nelem_msg(XdrDecodingStream xdr) - throws OncRpcException, IOException { - xdrDecode(xdr); - } - - public void xdrEncode(XdrEncodingStream xdr) - throws OncRpcException, IOException { - xdr.xdrEncodeInt(dbpcl_id); - xdr.xdrEncodeInt(nelem); - } - - public void xdrDecode(XdrDecodingStream xdr) - throws OncRpcException, IOException { - dbpcl_id = xdr.xdrDecodeInt(); - nelem = xdr.xdrDecodeInt(); - } - -} -// End of __db_h_nelem_msg.java diff --git a/storage/bdb/rpc_server/java/gen/__db_h_nelem_reply.java b/storage/bdb/rpc_server/java/gen/__db_h_nelem_reply.java deleted file mode 100644 index 20c5c774e69..00000000000 --- a/storage/bdb/rpc_server/java/gen/__db_h_nelem_reply.java +++ /dev/null @@ -1,32 +0,0 @@ -/* - * Automatically generated by jrpcgen 0.95.1 on 12/18/01 7:23 PM - * jrpcgen is part of the "Remote Tea" ONC/RPC package for Java - * See http://acplt.org/ks/remotetea.html for details - */ -package com.sleepycat.db.rpcserver; -import org.acplt.oncrpc.*; -import java.io.IOException; - -public class __db_h_nelem_reply implements XdrAble { - public int status; - - public __db_h_nelem_reply() { - } - - public __db_h_nelem_reply(XdrDecodingStream xdr) - throws OncRpcException, IOException { - xdrDecode(xdr); - } - - public void xdrEncode(XdrEncodingStream xdr) - throws OncRpcException, IOException { - xdr.xdrEncodeInt(status); - } - - public void xdrDecode(XdrDecodingStream xdr) - throws OncRpcException, IOException { - status = xdr.xdrDecodeInt(); - } - -} -// End of __db_h_nelem_reply.java diff --git a/storage/bdb/rpc_server/java/gen/__db_join_msg.java b/storage/bdb/rpc_server/java/gen/__db_join_msg.java deleted file mode 100644 index 88c72dbd6ba..00000000000 --- a/storage/bdb/rpc_server/java/gen/__db_join_msg.java +++ /dev/null @@ -1,38 +0,0 @@ -/* - * Automatically generated by jrpcgen 0.95.1 on 12/18/01 7:23 PM - * jrpcgen is part of the "Remote Tea" ONC/RPC package for Java - * See http://acplt.org/ks/remotetea.html for details - */ -package com.sleepycat.db.rpcserver; -import org.acplt.oncrpc.*; -import java.io.IOException; - -public class __db_join_msg implements XdrAble { - public int dbpcl_id; - public int [] curs; - public int flags; - - public __db_join_msg() { - } - - public __db_join_msg(XdrDecodingStream xdr) - throws OncRpcException, IOException { - xdrDecode(xdr); - } - - public void xdrEncode(XdrEncodingStream xdr) - throws OncRpcException, IOException { - xdr.xdrEncodeInt(dbpcl_id); - xdr.xdrEncodeIntVector(curs); - xdr.xdrEncodeInt(flags); - } - - public void xdrDecode(XdrDecodingStream xdr) - throws OncRpcException, IOException { - dbpcl_id = xdr.xdrDecodeInt(); - curs = xdr.xdrDecodeIntVector(); - flags = xdr.xdrDecodeInt(); - } - -} -// End of __db_join_msg.java diff --git a/storage/bdb/rpc_server/java/gen/__db_join_reply.java b/storage/bdb/rpc_server/java/gen/__db_join_reply.java deleted file mode 100644 index 80980e23d6c..00000000000 --- a/storage/bdb/rpc_server/java/gen/__db_join_reply.java +++ /dev/null @@ -1,35 +0,0 @@ -/* - * Automatically generated by jrpcgen 0.95.1 on 12/18/01 7:23 PM - * jrpcgen is part of the "Remote Tea" ONC/RPC package for Java - * See http://acplt.org/ks/remotetea.html for details - */ -package com.sleepycat.db.rpcserver; -import org.acplt.oncrpc.*; -import java.io.IOException; - -public class __db_join_reply implements XdrAble { - public int status; - public int dbcidcl_id; - - public __db_join_reply() { - } - - public __db_join_reply(XdrDecodingStream xdr) - throws OncRpcException, IOException { - xdrDecode(xdr); - } - - public void xdrEncode(XdrEncodingStream xdr) - throws OncRpcException, IOException { - xdr.xdrEncodeInt(status); - xdr.xdrEncodeInt(dbcidcl_id); - } - - public void xdrDecode(XdrDecodingStream xdr) - throws OncRpcException, IOException { - status = xdr.xdrDecodeInt(); - dbcidcl_id = xdr.xdrDecodeInt(); - } - -} -// End of __db_join_reply.java diff --git a/storage/bdb/rpc_server/java/gen/__db_key_range_msg.java b/storage/bdb/rpc_server/java/gen/__db_key_range_msg.java deleted file mode 100644 index 233077e0964..00000000000 --- a/storage/bdb/rpc_server/java/gen/__db_key_range_msg.java +++ /dev/null @@ -1,53 +0,0 @@ -/* - * Automatically generated by jrpcgen 0.95.1 on 12/18/01 7:23 PM - * jrpcgen is part of the "Remote Tea" ONC/RPC package for Java - * See http://acplt.org/ks/remotetea.html for details - */ -package com.sleepycat.db.rpcserver; -import org.acplt.oncrpc.*; -import java.io.IOException; - -public class __db_key_range_msg implements XdrAble { - public int dbpcl_id; - public int txnpcl_id; - public int keydlen; - public int keydoff; - public int keyulen; - public int keyflags; - public byte [] keydata; - public int flags; - - public __db_key_range_msg() { - } - - public __db_key_range_msg(XdrDecodingStream xdr) - throws OncRpcException, IOException { - xdrDecode(xdr); - } - - public void xdrEncode(XdrEncodingStream xdr) - throws OncRpcException, IOException { - xdr.xdrEncodeInt(dbpcl_id); - xdr.xdrEncodeInt(txnpcl_id); - xdr.xdrEncodeInt(keydlen); - xdr.xdrEncodeInt(keydoff); - xdr.xdrEncodeInt(keyulen); - xdr.xdrEncodeInt(keyflags); - xdr.xdrEncodeDynamicOpaque(keydata); - xdr.xdrEncodeInt(flags); - } - - public void xdrDecode(XdrDecodingStream xdr) - throws OncRpcException, IOException { - dbpcl_id = xdr.xdrDecodeInt(); - txnpcl_id = xdr.xdrDecodeInt(); - keydlen = xdr.xdrDecodeInt(); - keydoff = xdr.xdrDecodeInt(); - keyulen = xdr.xdrDecodeInt(); - keyflags = xdr.xdrDecodeInt(); - keydata = xdr.xdrDecodeDynamicOpaque(); - flags = xdr.xdrDecodeInt(); - } - -} -// End of __db_key_range_msg.java diff --git a/storage/bdb/rpc_server/java/gen/__db_key_range_reply.java b/storage/bdb/rpc_server/java/gen/__db_key_range_reply.java deleted file mode 100644 index 09244c13d1d..00000000000 --- a/storage/bdb/rpc_server/java/gen/__db_key_range_reply.java +++ /dev/null @@ -1,41 +0,0 @@ -/* - * Automatically generated by jrpcgen 0.95.1 on 12/18/01 7:23 PM - * jrpcgen is part of the "Remote Tea" ONC/RPC package for Java - * See http://acplt.org/ks/remotetea.html for details - */ -package com.sleepycat.db.rpcserver; -import org.acplt.oncrpc.*; -import java.io.IOException; - -public class __db_key_range_reply implements XdrAble { - public int status; - public double less; - public double equal; - public double greater; - - public __db_key_range_reply() { - } - - public __db_key_range_reply(XdrDecodingStream xdr) - throws OncRpcException, IOException { - xdrDecode(xdr); - } - - public void xdrEncode(XdrEncodingStream xdr) - throws OncRpcException, IOException { - xdr.xdrEncodeInt(status); - xdr.xdrEncodeDouble(less); - xdr.xdrEncodeDouble(equal); - xdr.xdrEncodeDouble(greater); - } - - public void xdrDecode(XdrDecodingStream xdr) - throws OncRpcException, IOException { - status = xdr.xdrDecodeInt(); - less = xdr.xdrDecodeDouble(); - equal = xdr.xdrDecodeDouble(); - greater = xdr.xdrDecodeDouble(); - } - -} -// End of __db_key_range_reply.java diff --git a/storage/bdb/rpc_server/java/gen/__db_lorder_msg.java b/storage/bdb/rpc_server/java/gen/__db_lorder_msg.java deleted file mode 100644 index 3399ad8daf0..00000000000 --- a/storage/bdb/rpc_server/java/gen/__db_lorder_msg.java +++ /dev/null @@ -1,35 +0,0 @@ -/* - * Automatically generated by jrpcgen 0.95.1 on 12/18/01 7:23 PM - * jrpcgen is part of the "Remote Tea" ONC/RPC package for Java - * See http://acplt.org/ks/remotetea.html for details - */ -package com.sleepycat.db.rpcserver; -import org.acplt.oncrpc.*; -import java.io.IOException; - -public class __db_lorder_msg implements XdrAble { - public int dbpcl_id; - public int lorder; - - public __db_lorder_msg() { - } - - public __db_lorder_msg(XdrDecodingStream xdr) - throws OncRpcException, IOException { - xdrDecode(xdr); - } - - public void xdrEncode(XdrEncodingStream xdr) - throws OncRpcException, IOException { - xdr.xdrEncodeInt(dbpcl_id); - xdr.xdrEncodeInt(lorder); - } - - public void xdrDecode(XdrDecodingStream xdr) - throws OncRpcException, IOException { - dbpcl_id = xdr.xdrDecodeInt(); - lorder = xdr.xdrDecodeInt(); - } - -} -// End of __db_lorder_msg.java diff --git a/storage/bdb/rpc_server/java/gen/__db_lorder_reply.java b/storage/bdb/rpc_server/java/gen/__db_lorder_reply.java deleted file mode 100644 index cdcda4d4f43..00000000000 --- a/storage/bdb/rpc_server/java/gen/__db_lorder_reply.java +++ /dev/null @@ -1,32 +0,0 @@ -/* - * Automatically generated by jrpcgen 0.95.1 on 12/18/01 7:23 PM - * jrpcgen is part of the "Remote Tea" ONC/RPC package for Java - * See http://acplt.org/ks/remotetea.html for details - */ -package com.sleepycat.db.rpcserver; -import org.acplt.oncrpc.*; -import java.io.IOException; - -public class __db_lorder_reply implements XdrAble { - public int status; - - public __db_lorder_reply() { - } - - public __db_lorder_reply(XdrDecodingStream xdr) - throws OncRpcException, IOException { - xdrDecode(xdr); - } - - public void xdrEncode(XdrEncodingStream xdr) - throws OncRpcException, IOException { - xdr.xdrEncodeInt(status); - } - - public void xdrDecode(XdrDecodingStream xdr) - throws OncRpcException, IOException { - status = xdr.xdrDecodeInt(); - } - -} -// End of __db_lorder_reply.java diff --git a/storage/bdb/rpc_server/java/gen/__db_open_msg.java b/storage/bdb/rpc_server/java/gen/__db_open_msg.java deleted file mode 100644 index 14dbd9e3b0c..00000000000 --- a/storage/bdb/rpc_server/java/gen/__db_open_msg.java +++ /dev/null @@ -1,50 +0,0 @@ -/* - * Automatically generated by jrpcgen 0.95.1 on 2/13/02 1:05 PM - * jrpcgen is part of the "Remote Tea" ONC/RPC package for Java - * See http://acplt.org/ks/remotetea.html for details - */ -package com.sleepycat.db.rpcserver; -import org.acplt.oncrpc.*; -import java.io.IOException; - -public class __db_open_msg implements XdrAble { - public int dbpcl_id; - public int txnpcl_id; - public String name; - public String subdb; - public int type; - public int flags; - public int mode; - - public __db_open_msg() { - } - - public __db_open_msg(XdrDecodingStream xdr) - throws OncRpcException, IOException { - xdrDecode(xdr); - } - - public void xdrEncode(XdrEncodingStream xdr) - throws OncRpcException, IOException { - xdr.xdrEncodeInt(dbpcl_id); - xdr.xdrEncodeInt(txnpcl_id); - xdr.xdrEncodeString(name); - xdr.xdrEncodeString(subdb); - xdr.xdrEncodeInt(type); - xdr.xdrEncodeInt(flags); - xdr.xdrEncodeInt(mode); - } - - public void xdrDecode(XdrDecodingStream xdr) - throws OncRpcException, IOException { - dbpcl_id = xdr.xdrDecodeInt(); - txnpcl_id = xdr.xdrDecodeInt(); - name = xdr.xdrDecodeString(); - subdb = xdr.xdrDecodeString(); - type = xdr.xdrDecodeInt(); - flags = xdr.xdrDecodeInt(); - mode = xdr.xdrDecodeInt(); - } - -} -// End of __db_open_msg.java diff --git a/storage/bdb/rpc_server/java/gen/__db_open_reply.java b/storage/bdb/rpc_server/java/gen/__db_open_reply.java deleted file mode 100644 index 9b36b44a626..00000000000 --- a/storage/bdb/rpc_server/java/gen/__db_open_reply.java +++ /dev/null @@ -1,41 +0,0 @@ -/* - * Automatically generated by jrpcgen 0.95.1 on 7/15/04 4:39 PM - * jrpcgen is part of the "Remote Tea" ONC/RPC package for Java - * See http://acplt.org/ks/remotetea.html for details - */ -package com.sleepycat.db.rpcserver; -import org.acplt.oncrpc.*; -import java.io.IOException; - -public class __db_open_reply implements XdrAble { - public int status; - public int dbcl_id; - public int type; - public int lorder; - - public __db_open_reply() { - } - - public __db_open_reply(XdrDecodingStream xdr) - throws OncRpcException, IOException { - xdrDecode(xdr); - } - - public void xdrEncode(XdrEncodingStream xdr) - throws OncRpcException, IOException { - xdr.xdrEncodeInt(status); - xdr.xdrEncodeInt(dbcl_id); - xdr.xdrEncodeInt(type); - xdr.xdrEncodeInt(lorder); - } - - public void xdrDecode(XdrDecodingStream xdr) - throws OncRpcException, IOException { - status = xdr.xdrDecodeInt(); - dbcl_id = xdr.xdrDecodeInt(); - type = xdr.xdrDecodeInt(); - lorder = xdr.xdrDecodeInt(); - } - -} -// End of __db_open_reply.java diff --git a/storage/bdb/rpc_server/java/gen/__db_pagesize_msg.java b/storage/bdb/rpc_server/java/gen/__db_pagesize_msg.java deleted file mode 100644 index a452ea4e381..00000000000 --- a/storage/bdb/rpc_server/java/gen/__db_pagesize_msg.java +++ /dev/null @@ -1,35 +0,0 @@ -/* - * Automatically generated by jrpcgen 0.95.1 on 12/18/01 7:23 PM - * jrpcgen is part of the "Remote Tea" ONC/RPC package for Java - * See http://acplt.org/ks/remotetea.html for details - */ -package com.sleepycat.db.rpcserver; -import org.acplt.oncrpc.*; -import java.io.IOException; - -public class __db_pagesize_msg implements XdrAble { - public int dbpcl_id; - public int pagesize; - - public __db_pagesize_msg() { - } - - public __db_pagesize_msg(XdrDecodingStream xdr) - throws OncRpcException, IOException { - xdrDecode(xdr); - } - - public void xdrEncode(XdrEncodingStream xdr) - throws OncRpcException, IOException { - xdr.xdrEncodeInt(dbpcl_id); - xdr.xdrEncodeInt(pagesize); - } - - public void xdrDecode(XdrDecodingStream xdr) - throws OncRpcException, IOException { - dbpcl_id = xdr.xdrDecodeInt(); - pagesize = xdr.xdrDecodeInt(); - } - -} -// End of __db_pagesize_msg.java diff --git a/storage/bdb/rpc_server/java/gen/__db_pagesize_reply.java b/storage/bdb/rpc_server/java/gen/__db_pagesize_reply.java deleted file mode 100644 index 830b2078b34..00000000000 --- a/storage/bdb/rpc_server/java/gen/__db_pagesize_reply.java +++ /dev/null @@ -1,32 +0,0 @@ -/* - * Automatically generated by jrpcgen 0.95.1 on 12/18/01 7:23 PM - * jrpcgen is part of the "Remote Tea" ONC/RPC package for Java - * See http://acplt.org/ks/remotetea.html for details - */ -package com.sleepycat.db.rpcserver; -import org.acplt.oncrpc.*; -import java.io.IOException; - -public class __db_pagesize_reply implements XdrAble { - public int status; - - public __db_pagesize_reply() { - } - - public __db_pagesize_reply(XdrDecodingStream xdr) - throws OncRpcException, IOException { - xdrDecode(xdr); - } - - public void xdrEncode(XdrEncodingStream xdr) - throws OncRpcException, IOException { - xdr.xdrEncodeInt(status); - } - - public void xdrDecode(XdrDecodingStream xdr) - throws OncRpcException, IOException { - status = xdr.xdrDecodeInt(); - } - -} -// End of __db_pagesize_reply.java diff --git a/storage/bdb/rpc_server/java/gen/__db_pget_msg.java b/storage/bdb/rpc_server/java/gen/__db_pget_msg.java deleted file mode 100644 index 11d27ca9e46..00000000000 --- a/storage/bdb/rpc_server/java/gen/__db_pget_msg.java +++ /dev/null @@ -1,83 +0,0 @@ -/* - * Automatically generated by jrpcgen 0.95.1 on 12/18/01 7:23 PM - * jrpcgen is part of the "Remote Tea" ONC/RPC package for Java - * See http://acplt.org/ks/remotetea.html for details - */ -package com.sleepycat.db.rpcserver; -import org.acplt.oncrpc.*; -import java.io.IOException; - -public class __db_pget_msg implements XdrAble { - public int dbpcl_id; - public int txnpcl_id; - public int skeydlen; - public int skeydoff; - public int skeyulen; - public int skeyflags; - public byte [] skeydata; - public int pkeydlen; - public int pkeydoff; - public int pkeyulen; - public int pkeyflags; - public byte [] pkeydata; - public int datadlen; - public int datadoff; - public int dataulen; - public int dataflags; - public byte [] datadata; - public int flags; - - public __db_pget_msg() { - } - - public __db_pget_msg(XdrDecodingStream xdr) - throws OncRpcException, IOException { - xdrDecode(xdr); - } - - public void xdrEncode(XdrEncodingStream xdr) - throws OncRpcException, IOException { - xdr.xdrEncodeInt(dbpcl_id); - xdr.xdrEncodeInt(txnpcl_id); - xdr.xdrEncodeInt(skeydlen); - xdr.xdrEncodeInt(skeydoff); - xdr.xdrEncodeInt(skeyulen); - xdr.xdrEncodeInt(skeyflags); - xdr.xdrEncodeDynamicOpaque(skeydata); - xdr.xdrEncodeInt(pkeydlen); - xdr.xdrEncodeInt(pkeydoff); - xdr.xdrEncodeInt(pkeyulen); - xdr.xdrEncodeInt(pkeyflags); - xdr.xdrEncodeDynamicOpaque(pkeydata); - xdr.xdrEncodeInt(datadlen); - xdr.xdrEncodeInt(datadoff); - xdr.xdrEncodeInt(dataulen); - xdr.xdrEncodeInt(dataflags); - xdr.xdrEncodeDynamicOpaque(datadata); - xdr.xdrEncodeInt(flags); - } - - public void xdrDecode(XdrDecodingStream xdr) - throws OncRpcException, IOException { - dbpcl_id = xdr.xdrDecodeInt(); - txnpcl_id = xdr.xdrDecodeInt(); - skeydlen = xdr.xdrDecodeInt(); - skeydoff = xdr.xdrDecodeInt(); - skeyulen = xdr.xdrDecodeInt(); - skeyflags = xdr.xdrDecodeInt(); - skeydata = xdr.xdrDecodeDynamicOpaque(); - pkeydlen = xdr.xdrDecodeInt(); - pkeydoff = xdr.xdrDecodeInt(); - pkeyulen = xdr.xdrDecodeInt(); - pkeyflags = xdr.xdrDecodeInt(); - pkeydata = xdr.xdrDecodeDynamicOpaque(); - datadlen = xdr.xdrDecodeInt(); - datadoff = xdr.xdrDecodeInt(); - dataulen = xdr.xdrDecodeInt(); - dataflags = xdr.xdrDecodeInt(); - datadata = xdr.xdrDecodeDynamicOpaque(); - flags = xdr.xdrDecodeInt(); - } - -} -// End of __db_pget_msg.java diff --git a/storage/bdb/rpc_server/java/gen/__db_pget_reply.java b/storage/bdb/rpc_server/java/gen/__db_pget_reply.java deleted file mode 100644 index 86c9c2111b9..00000000000 --- a/storage/bdb/rpc_server/java/gen/__db_pget_reply.java +++ /dev/null @@ -1,41 +0,0 @@ -/* - * Automatically generated by jrpcgen 0.95.1 on 12/18/01 7:23 PM - * jrpcgen is part of the "Remote Tea" ONC/RPC package for Java - * See http://acplt.org/ks/remotetea.html for details - */ -package com.sleepycat.db.rpcserver; -import org.acplt.oncrpc.*; -import java.io.IOException; - -public class __db_pget_reply implements XdrAble { - public int status; - public byte [] skeydata; - public byte [] pkeydata; - public byte [] datadata; - - public __db_pget_reply() { - } - - public __db_pget_reply(XdrDecodingStream xdr) - throws OncRpcException, IOException { - xdrDecode(xdr); - } - - public void xdrEncode(XdrEncodingStream xdr) - throws OncRpcException, IOException { - xdr.xdrEncodeInt(status); - xdr.xdrEncodeDynamicOpaque(skeydata); - xdr.xdrEncodeDynamicOpaque(pkeydata); - xdr.xdrEncodeDynamicOpaque(datadata); - } - - public void xdrDecode(XdrDecodingStream xdr) - throws OncRpcException, IOException { - status = xdr.xdrDecodeInt(); - skeydata = xdr.xdrDecodeDynamicOpaque(); - pkeydata = xdr.xdrDecodeDynamicOpaque(); - datadata = xdr.xdrDecodeDynamicOpaque(); - } - -} -// End of __db_pget_reply.java diff --git a/storage/bdb/rpc_server/java/gen/__db_put_msg.java b/storage/bdb/rpc_server/java/gen/__db_put_msg.java deleted file mode 100644 index b6159cff3a8..00000000000 --- a/storage/bdb/rpc_server/java/gen/__db_put_msg.java +++ /dev/null @@ -1,68 +0,0 @@ -/* - * Automatically generated by jrpcgen 0.95.1 on 12/18/01 7:23 PM - * jrpcgen is part of the "Remote Tea" ONC/RPC package for Java - * See http://acplt.org/ks/remotetea.html for details - */ -package com.sleepycat.db.rpcserver; -import org.acplt.oncrpc.*; -import java.io.IOException; - -public class __db_put_msg implements XdrAble { - public int dbpcl_id; - public int txnpcl_id; - public int keydlen; - public int keydoff; - public int keyulen; - public int keyflags; - public byte [] keydata; - public int datadlen; - public int datadoff; - public int dataulen; - public int dataflags; - public byte [] datadata; - public int flags; - - public __db_put_msg() { - } - - public __db_put_msg(XdrDecodingStream xdr) - throws OncRpcException, IOException { - xdrDecode(xdr); - } - - public void xdrEncode(XdrEncodingStream xdr) - throws OncRpcException, IOException { - xdr.xdrEncodeInt(dbpcl_id); - xdr.xdrEncodeInt(txnpcl_id); - xdr.xdrEncodeInt(keydlen); - xdr.xdrEncodeInt(keydoff); - xdr.xdrEncodeInt(keyulen); - xdr.xdrEncodeInt(keyflags); - xdr.xdrEncodeDynamicOpaque(keydata); - xdr.xdrEncodeInt(datadlen); - xdr.xdrEncodeInt(datadoff); - xdr.xdrEncodeInt(dataulen); - xdr.xdrEncodeInt(dataflags); - xdr.xdrEncodeDynamicOpaque(datadata); - xdr.xdrEncodeInt(flags); - } - - public void xdrDecode(XdrDecodingStream xdr) - throws OncRpcException, IOException { - dbpcl_id = xdr.xdrDecodeInt(); - txnpcl_id = xdr.xdrDecodeInt(); - keydlen = xdr.xdrDecodeInt(); - keydoff = xdr.xdrDecodeInt(); - keyulen = xdr.xdrDecodeInt(); - keyflags = xdr.xdrDecodeInt(); - keydata = xdr.xdrDecodeDynamicOpaque(); - datadlen = xdr.xdrDecodeInt(); - datadoff = xdr.xdrDecodeInt(); - dataulen = xdr.xdrDecodeInt(); - dataflags = xdr.xdrDecodeInt(); - datadata = xdr.xdrDecodeDynamicOpaque(); - flags = xdr.xdrDecodeInt(); - } - -} -// End of __db_put_msg.java diff --git a/storage/bdb/rpc_server/java/gen/__db_put_reply.java b/storage/bdb/rpc_server/java/gen/__db_put_reply.java deleted file mode 100644 index fc89ae1c3bd..00000000000 --- a/storage/bdb/rpc_server/java/gen/__db_put_reply.java +++ /dev/null @@ -1,35 +0,0 @@ -/* - * Automatically generated by jrpcgen 0.95.1 on 12/18/01 7:23 PM - * jrpcgen is part of the "Remote Tea" ONC/RPC package for Java - * See http://acplt.org/ks/remotetea.html for details - */ -package com.sleepycat.db.rpcserver; -import org.acplt.oncrpc.*; -import java.io.IOException; - -public class __db_put_reply implements XdrAble { - public int status; - public byte [] keydata; - - public __db_put_reply() { - } - - public __db_put_reply(XdrDecodingStream xdr) - throws OncRpcException, IOException { - xdrDecode(xdr); - } - - public void xdrEncode(XdrEncodingStream xdr) - throws OncRpcException, IOException { - xdr.xdrEncodeInt(status); - xdr.xdrEncodeDynamicOpaque(keydata); - } - - public void xdrDecode(XdrDecodingStream xdr) - throws OncRpcException, IOException { - status = xdr.xdrDecodeInt(); - keydata = xdr.xdrDecodeDynamicOpaque(); - } - -} -// End of __db_put_reply.java diff --git a/storage/bdb/rpc_server/java/gen/__db_re_delim_msg.java b/storage/bdb/rpc_server/java/gen/__db_re_delim_msg.java deleted file mode 100644 index c386bddd256..00000000000 --- a/storage/bdb/rpc_server/java/gen/__db_re_delim_msg.java +++ /dev/null @@ -1,35 +0,0 @@ -/* - * Automatically generated by jrpcgen 0.95.1 on 12/18/01 7:23 PM - * jrpcgen is part of the "Remote Tea" ONC/RPC package for Java - * See http://acplt.org/ks/remotetea.html for details - */ -package com.sleepycat.db.rpcserver; -import org.acplt.oncrpc.*; -import java.io.IOException; - -public class __db_re_delim_msg implements XdrAble { - public int dbpcl_id; - public int delim; - - public __db_re_delim_msg() { - } - - public __db_re_delim_msg(XdrDecodingStream xdr) - throws OncRpcException, IOException { - xdrDecode(xdr); - } - - public void xdrEncode(XdrEncodingStream xdr) - throws OncRpcException, IOException { - xdr.xdrEncodeInt(dbpcl_id); - xdr.xdrEncodeInt(delim); - } - - public void xdrDecode(XdrDecodingStream xdr) - throws OncRpcException, IOException { - dbpcl_id = xdr.xdrDecodeInt(); - delim = xdr.xdrDecodeInt(); - } - -} -// End of __db_re_delim_msg.java diff --git a/storage/bdb/rpc_server/java/gen/__db_re_delim_reply.java b/storage/bdb/rpc_server/java/gen/__db_re_delim_reply.java deleted file mode 100644 index aa8a797f53d..00000000000 --- a/storage/bdb/rpc_server/java/gen/__db_re_delim_reply.java +++ /dev/null @@ -1,32 +0,0 @@ -/* - * Automatically generated by jrpcgen 0.95.1 on 12/18/01 7:23 PM - * jrpcgen is part of the "Remote Tea" ONC/RPC package for Java - * See http://acplt.org/ks/remotetea.html for details - */ -package com.sleepycat.db.rpcserver; -import org.acplt.oncrpc.*; -import java.io.IOException; - -public class __db_re_delim_reply implements XdrAble { - public int status; - - public __db_re_delim_reply() { - } - - public __db_re_delim_reply(XdrDecodingStream xdr) - throws OncRpcException, IOException { - xdrDecode(xdr); - } - - public void xdrEncode(XdrEncodingStream xdr) - throws OncRpcException, IOException { - xdr.xdrEncodeInt(status); - } - - public void xdrDecode(XdrDecodingStream xdr) - throws OncRpcException, IOException { - status = xdr.xdrDecodeInt(); - } - -} -// End of __db_re_delim_reply.java diff --git a/storage/bdb/rpc_server/java/gen/__db_re_len_msg.java b/storage/bdb/rpc_server/java/gen/__db_re_len_msg.java deleted file mode 100644 index 664de5c899c..00000000000 --- a/storage/bdb/rpc_server/java/gen/__db_re_len_msg.java +++ /dev/null @@ -1,35 +0,0 @@ -/* - * Automatically generated by jrpcgen 0.95.1 on 12/18/01 7:23 PM - * jrpcgen is part of the "Remote Tea" ONC/RPC package for Java - * See http://acplt.org/ks/remotetea.html for details - */ -package com.sleepycat.db.rpcserver; -import org.acplt.oncrpc.*; -import java.io.IOException; - -public class __db_re_len_msg implements XdrAble { - public int dbpcl_id; - public int len; - - public __db_re_len_msg() { - } - - public __db_re_len_msg(XdrDecodingStream xdr) - throws OncRpcException, IOException { - xdrDecode(xdr); - } - - public void xdrEncode(XdrEncodingStream xdr) - throws OncRpcException, IOException { - xdr.xdrEncodeInt(dbpcl_id); - xdr.xdrEncodeInt(len); - } - - public void xdrDecode(XdrDecodingStream xdr) - throws OncRpcException, IOException { - dbpcl_id = xdr.xdrDecodeInt(); - len = xdr.xdrDecodeInt(); - } - -} -// End of __db_re_len_msg.java diff --git a/storage/bdb/rpc_server/java/gen/__db_re_len_reply.java b/storage/bdb/rpc_server/java/gen/__db_re_len_reply.java deleted file mode 100644 index dda27c8c123..00000000000 --- a/storage/bdb/rpc_server/java/gen/__db_re_len_reply.java +++ /dev/null @@ -1,32 +0,0 @@ -/* - * Automatically generated by jrpcgen 0.95.1 on 12/18/01 7:23 PM - * jrpcgen is part of the "Remote Tea" ONC/RPC package for Java - * See http://acplt.org/ks/remotetea.html for details - */ -package com.sleepycat.db.rpcserver; -import org.acplt.oncrpc.*; -import java.io.IOException; - -public class __db_re_len_reply implements XdrAble { - public int status; - - public __db_re_len_reply() { - } - - public __db_re_len_reply(XdrDecodingStream xdr) - throws OncRpcException, IOException { - xdrDecode(xdr); - } - - public void xdrEncode(XdrEncodingStream xdr) - throws OncRpcException, IOException { - xdr.xdrEncodeInt(status); - } - - public void xdrDecode(XdrDecodingStream xdr) - throws OncRpcException, IOException { - status = xdr.xdrDecodeInt(); - } - -} -// End of __db_re_len_reply.java diff --git a/storage/bdb/rpc_server/java/gen/__db_re_pad_msg.java b/storage/bdb/rpc_server/java/gen/__db_re_pad_msg.java deleted file mode 100644 index 2c1290b6e74..00000000000 --- a/storage/bdb/rpc_server/java/gen/__db_re_pad_msg.java +++ /dev/null @@ -1,35 +0,0 @@ -/* - * Automatically generated by jrpcgen 0.95.1 on 12/18/01 7:23 PM - * jrpcgen is part of the "Remote Tea" ONC/RPC package for Java - * See http://acplt.org/ks/remotetea.html for details - */ -package com.sleepycat.db.rpcserver; -import org.acplt.oncrpc.*; -import java.io.IOException; - -public class __db_re_pad_msg implements XdrAble { - public int dbpcl_id; - public int pad; - - public __db_re_pad_msg() { - } - - public __db_re_pad_msg(XdrDecodingStream xdr) - throws OncRpcException, IOException { - xdrDecode(xdr); - } - - public void xdrEncode(XdrEncodingStream xdr) - throws OncRpcException, IOException { - xdr.xdrEncodeInt(dbpcl_id); - xdr.xdrEncodeInt(pad); - } - - public void xdrDecode(XdrDecodingStream xdr) - throws OncRpcException, IOException { - dbpcl_id = xdr.xdrDecodeInt(); - pad = xdr.xdrDecodeInt(); - } - -} -// End of __db_re_pad_msg.java diff --git a/storage/bdb/rpc_server/java/gen/__db_re_pad_reply.java b/storage/bdb/rpc_server/java/gen/__db_re_pad_reply.java deleted file mode 100644 index f0aaa9a3a70..00000000000 --- a/storage/bdb/rpc_server/java/gen/__db_re_pad_reply.java +++ /dev/null @@ -1,32 +0,0 @@ -/* - * Automatically generated by jrpcgen 0.95.1 on 12/18/01 7:23 PM - * jrpcgen is part of the "Remote Tea" ONC/RPC package for Java - * See http://acplt.org/ks/remotetea.html for details - */ -package com.sleepycat.db.rpcserver; -import org.acplt.oncrpc.*; -import java.io.IOException; - -public class __db_re_pad_reply implements XdrAble { - public int status; - - public __db_re_pad_reply() { - } - - public __db_re_pad_reply(XdrDecodingStream xdr) - throws OncRpcException, IOException { - xdrDecode(xdr); - } - - public void xdrEncode(XdrEncodingStream xdr) - throws OncRpcException, IOException { - xdr.xdrEncodeInt(status); - } - - public void xdrDecode(XdrDecodingStream xdr) - throws OncRpcException, IOException { - status = xdr.xdrDecodeInt(); - } - -} -// End of __db_re_pad_reply.java diff --git a/storage/bdb/rpc_server/java/gen/__db_remove_msg.java b/storage/bdb/rpc_server/java/gen/__db_remove_msg.java deleted file mode 100644 index dfa9066a7ec..00000000000 --- a/storage/bdb/rpc_server/java/gen/__db_remove_msg.java +++ /dev/null @@ -1,41 +0,0 @@ -/* - * Automatically generated by jrpcgen 0.95.1 on 12/18/01 7:23 PM - * jrpcgen is part of the "Remote Tea" ONC/RPC package for Java - * See http://acplt.org/ks/remotetea.html for details - */ -package com.sleepycat.db.rpcserver; -import org.acplt.oncrpc.*; -import java.io.IOException; - -public class __db_remove_msg implements XdrAble { - public int dbpcl_id; - public String name; - public String subdb; - public int flags; - - public __db_remove_msg() { - } - - public __db_remove_msg(XdrDecodingStream xdr) - throws OncRpcException, IOException { - xdrDecode(xdr); - } - - public void xdrEncode(XdrEncodingStream xdr) - throws OncRpcException, IOException { - xdr.xdrEncodeInt(dbpcl_id); - xdr.xdrEncodeString(name); - xdr.xdrEncodeString(subdb); - xdr.xdrEncodeInt(flags); - } - - public void xdrDecode(XdrDecodingStream xdr) - throws OncRpcException, IOException { - dbpcl_id = xdr.xdrDecodeInt(); - name = xdr.xdrDecodeString(); - subdb = xdr.xdrDecodeString(); - flags = xdr.xdrDecodeInt(); - } - -} -// End of __db_remove_msg.java diff --git a/storage/bdb/rpc_server/java/gen/__db_remove_reply.java b/storage/bdb/rpc_server/java/gen/__db_remove_reply.java deleted file mode 100644 index a2b86c04985..00000000000 --- a/storage/bdb/rpc_server/java/gen/__db_remove_reply.java +++ /dev/null @@ -1,32 +0,0 @@ -/* - * Automatically generated by jrpcgen 0.95.1 on 12/18/01 7:23 PM - * jrpcgen is part of the "Remote Tea" ONC/RPC package for Java - * See http://acplt.org/ks/remotetea.html for details - */ -package com.sleepycat.db.rpcserver; -import org.acplt.oncrpc.*; -import java.io.IOException; - -public class __db_remove_reply implements XdrAble { - public int status; - - public __db_remove_reply() { - } - - public __db_remove_reply(XdrDecodingStream xdr) - throws OncRpcException, IOException { - xdrDecode(xdr); - } - - public void xdrEncode(XdrEncodingStream xdr) - throws OncRpcException, IOException { - xdr.xdrEncodeInt(status); - } - - public void xdrDecode(XdrDecodingStream xdr) - throws OncRpcException, IOException { - status = xdr.xdrDecodeInt(); - } - -} -// End of __db_remove_reply.java diff --git a/storage/bdb/rpc_server/java/gen/__db_rename_msg.java b/storage/bdb/rpc_server/java/gen/__db_rename_msg.java deleted file mode 100644 index 12b434e3375..00000000000 --- a/storage/bdb/rpc_server/java/gen/__db_rename_msg.java +++ /dev/null @@ -1,44 +0,0 @@ -/* - * Automatically generated by jrpcgen 0.95.1 on 12/18/01 7:23 PM - * jrpcgen is part of the "Remote Tea" ONC/RPC package for Java - * See http://acplt.org/ks/remotetea.html for details - */ -package com.sleepycat.db.rpcserver; -import org.acplt.oncrpc.*; -import java.io.IOException; - -public class __db_rename_msg implements XdrAble { - public int dbpcl_id; - public String name; - public String subdb; - public String newname; - public int flags; - - public __db_rename_msg() { - } - - public __db_rename_msg(XdrDecodingStream xdr) - throws OncRpcException, IOException { - xdrDecode(xdr); - } - - public void xdrEncode(XdrEncodingStream xdr) - throws OncRpcException, IOException { - xdr.xdrEncodeInt(dbpcl_id); - xdr.xdrEncodeString(name); - xdr.xdrEncodeString(subdb); - xdr.xdrEncodeString(newname); - xdr.xdrEncodeInt(flags); - } - - public void xdrDecode(XdrDecodingStream xdr) - throws OncRpcException, IOException { - dbpcl_id = xdr.xdrDecodeInt(); - name = xdr.xdrDecodeString(); - subdb = xdr.xdrDecodeString(); - newname = xdr.xdrDecodeString(); - flags = xdr.xdrDecodeInt(); - } - -} -// End of __db_rename_msg.java diff --git a/storage/bdb/rpc_server/java/gen/__db_rename_reply.java b/storage/bdb/rpc_server/java/gen/__db_rename_reply.java deleted file mode 100644 index 4e4a22be570..00000000000 --- a/storage/bdb/rpc_server/java/gen/__db_rename_reply.java +++ /dev/null @@ -1,32 +0,0 @@ -/* - * Automatically generated by jrpcgen 0.95.1 on 12/18/01 7:23 PM - * jrpcgen is part of the "Remote Tea" ONC/RPC package for Java - * See http://acplt.org/ks/remotetea.html for details - */ -package com.sleepycat.db.rpcserver; -import org.acplt.oncrpc.*; -import java.io.IOException; - -public class __db_rename_reply implements XdrAble { - public int status; - - public __db_rename_reply() { - } - - public __db_rename_reply(XdrDecodingStream xdr) - throws OncRpcException, IOException { - xdrDecode(xdr); - } - - public void xdrEncode(XdrEncodingStream xdr) - throws OncRpcException, IOException { - xdr.xdrEncodeInt(status); - } - - public void xdrDecode(XdrDecodingStream xdr) - throws OncRpcException, IOException { - status = xdr.xdrDecodeInt(); - } - -} -// End of __db_rename_reply.java diff --git a/storage/bdb/rpc_server/java/gen/__db_stat_msg.java b/storage/bdb/rpc_server/java/gen/__db_stat_msg.java deleted file mode 100644 index 419ee14a66e..00000000000 --- a/storage/bdb/rpc_server/java/gen/__db_stat_msg.java +++ /dev/null @@ -1,38 +0,0 @@ -/* - * Automatically generated by jrpcgen 0.95.1 on 7/15/04 4:39 PM - * jrpcgen is part of the "Remote Tea" ONC/RPC package for Java - * See http://acplt.org/ks/remotetea.html for details - */ -package com.sleepycat.db.rpcserver; -import org.acplt.oncrpc.*; -import java.io.IOException; - -public class __db_stat_msg implements XdrAble { - public int dbpcl_id; - public int txnpcl_id; - public int flags; - - public __db_stat_msg() { - } - - public __db_stat_msg(XdrDecodingStream xdr) - throws OncRpcException, IOException { - xdrDecode(xdr); - } - - public void xdrEncode(XdrEncodingStream xdr) - throws OncRpcException, IOException { - xdr.xdrEncodeInt(dbpcl_id); - xdr.xdrEncodeInt(txnpcl_id); - xdr.xdrEncodeInt(flags); - } - - public void xdrDecode(XdrDecodingStream xdr) - throws OncRpcException, IOException { - dbpcl_id = xdr.xdrDecodeInt(); - txnpcl_id = xdr.xdrDecodeInt(); - flags = xdr.xdrDecodeInt(); - } - -} -// End of __db_stat_msg.java diff --git a/storage/bdb/rpc_server/java/gen/__db_stat_reply.java b/storage/bdb/rpc_server/java/gen/__db_stat_reply.java deleted file mode 100644 index 8df1460149a..00000000000 --- a/storage/bdb/rpc_server/java/gen/__db_stat_reply.java +++ /dev/null @@ -1,35 +0,0 @@ -/* - * Automatically generated by jrpcgen 0.95.1 on 12/18/01 7:23 PM - * jrpcgen is part of the "Remote Tea" ONC/RPC package for Java - * See http://acplt.org/ks/remotetea.html for details - */ -package com.sleepycat.db.rpcserver; -import org.acplt.oncrpc.*; -import java.io.IOException; - -public class __db_stat_reply implements XdrAble { - public int status; - public int [] stats; - - public __db_stat_reply() { - } - - public __db_stat_reply(XdrDecodingStream xdr) - throws OncRpcException, IOException { - xdrDecode(xdr); - } - - public void xdrEncode(XdrEncodingStream xdr) - throws OncRpcException, IOException { - xdr.xdrEncodeInt(status); - xdr.xdrEncodeIntVector(stats); - } - - public void xdrDecode(XdrDecodingStream xdr) - throws OncRpcException, IOException { - status = xdr.xdrDecodeInt(); - stats = xdr.xdrDecodeIntVector(); - } - -} -// End of __db_stat_reply.java diff --git a/storage/bdb/rpc_server/java/gen/__db_sync_msg.java b/storage/bdb/rpc_server/java/gen/__db_sync_msg.java deleted file mode 100644 index c6594670fc6..00000000000 --- a/storage/bdb/rpc_server/java/gen/__db_sync_msg.java +++ /dev/null @@ -1,35 +0,0 @@ -/* - * Automatically generated by jrpcgen 0.95.1 on 12/18/01 7:23 PM - * jrpcgen is part of the "Remote Tea" ONC/RPC package for Java - * See http://acplt.org/ks/remotetea.html for details - */ -package com.sleepycat.db.rpcserver; -import org.acplt.oncrpc.*; -import java.io.IOException; - -public class __db_sync_msg implements XdrAble { - public int dbpcl_id; - public int flags; - - public __db_sync_msg() { - } - - public __db_sync_msg(XdrDecodingStream xdr) - throws OncRpcException, IOException { - xdrDecode(xdr); - } - - public void xdrEncode(XdrEncodingStream xdr) - throws OncRpcException, IOException { - xdr.xdrEncodeInt(dbpcl_id); - xdr.xdrEncodeInt(flags); - } - - public void xdrDecode(XdrDecodingStream xdr) - throws OncRpcException, IOException { - dbpcl_id = xdr.xdrDecodeInt(); - flags = xdr.xdrDecodeInt(); - } - -} -// End of __db_sync_msg.java diff --git a/storage/bdb/rpc_server/java/gen/__db_sync_reply.java b/storage/bdb/rpc_server/java/gen/__db_sync_reply.java deleted file mode 100644 index d0a8bc8b196..00000000000 --- a/storage/bdb/rpc_server/java/gen/__db_sync_reply.java +++ /dev/null @@ -1,32 +0,0 @@ -/* - * Automatically generated by jrpcgen 0.95.1 on 12/18/01 7:23 PM - * jrpcgen is part of the "Remote Tea" ONC/RPC package for Java - * See http://acplt.org/ks/remotetea.html for details - */ -package com.sleepycat.db.rpcserver; -import org.acplt.oncrpc.*; -import java.io.IOException; - -public class __db_sync_reply implements XdrAble { - public int status; - - public __db_sync_reply() { - } - - public __db_sync_reply(XdrDecodingStream xdr) - throws OncRpcException, IOException { - xdrDecode(xdr); - } - - public void xdrEncode(XdrEncodingStream xdr) - throws OncRpcException, IOException { - xdr.xdrEncodeInt(status); - } - - public void xdrDecode(XdrDecodingStream xdr) - throws OncRpcException, IOException { - status = xdr.xdrDecodeInt(); - } - -} -// End of __db_sync_reply.java diff --git a/storage/bdb/rpc_server/java/gen/__db_truncate_msg.java b/storage/bdb/rpc_server/java/gen/__db_truncate_msg.java deleted file mode 100644 index 38810d65660..00000000000 --- a/storage/bdb/rpc_server/java/gen/__db_truncate_msg.java +++ /dev/null @@ -1,38 +0,0 @@ -/* - * Automatically generated by jrpcgen 0.95.1 on 12/18/01 7:23 PM - * jrpcgen is part of the "Remote Tea" ONC/RPC package for Java - * See http://acplt.org/ks/remotetea.html for details - */ -package com.sleepycat.db.rpcserver; -import org.acplt.oncrpc.*; -import java.io.IOException; - -public class __db_truncate_msg implements XdrAble { - public int dbpcl_id; - public int txnpcl_id; - public int flags; - - public __db_truncate_msg() { - } - - public __db_truncate_msg(XdrDecodingStream xdr) - throws OncRpcException, IOException { - xdrDecode(xdr); - } - - public void xdrEncode(XdrEncodingStream xdr) - throws OncRpcException, IOException { - xdr.xdrEncodeInt(dbpcl_id); - xdr.xdrEncodeInt(txnpcl_id); - xdr.xdrEncodeInt(flags); - } - - public void xdrDecode(XdrDecodingStream xdr) - throws OncRpcException, IOException { - dbpcl_id = xdr.xdrDecodeInt(); - txnpcl_id = xdr.xdrDecodeInt(); - flags = xdr.xdrDecodeInt(); - } - -} -// End of __db_truncate_msg.java diff --git a/storage/bdb/rpc_server/java/gen/__db_truncate_reply.java b/storage/bdb/rpc_server/java/gen/__db_truncate_reply.java deleted file mode 100644 index c4f68869007..00000000000 --- a/storage/bdb/rpc_server/java/gen/__db_truncate_reply.java +++ /dev/null @@ -1,35 +0,0 @@ -/* - * Automatically generated by jrpcgen 0.95.1 on 12/18/01 7:23 PM - * jrpcgen is part of the "Remote Tea" ONC/RPC package for Java - * See http://acplt.org/ks/remotetea.html for details - */ -package com.sleepycat.db.rpcserver; -import org.acplt.oncrpc.*; -import java.io.IOException; - -public class __db_truncate_reply implements XdrAble { - public int status; - public int count; - - public __db_truncate_reply() { - } - - public __db_truncate_reply(XdrDecodingStream xdr) - throws OncRpcException, IOException { - xdrDecode(xdr); - } - - public void xdrEncode(XdrEncodingStream xdr) - throws OncRpcException, IOException { - xdr.xdrEncodeInt(status); - xdr.xdrEncodeInt(count); - } - - public void xdrDecode(XdrDecodingStream xdr) - throws OncRpcException, IOException { - status = xdr.xdrDecodeInt(); - count = xdr.xdrDecodeInt(); - } - -} -// End of __db_truncate_reply.java diff --git a/storage/bdb/rpc_server/java/gen/__dbc_close_msg.java b/storage/bdb/rpc_server/java/gen/__dbc_close_msg.java deleted file mode 100644 index eb1ca7f7e17..00000000000 --- a/storage/bdb/rpc_server/java/gen/__dbc_close_msg.java +++ /dev/null @@ -1,32 +0,0 @@ -/* - * Automatically generated by jrpcgen 0.95.1 on 12/18/01 7:23 PM - * jrpcgen is part of the "Remote Tea" ONC/RPC package for Java - * See http://acplt.org/ks/remotetea.html for details - */ -package com.sleepycat.db.rpcserver; -import org.acplt.oncrpc.*; -import java.io.IOException; - -public class __dbc_close_msg implements XdrAble { - public int dbccl_id; - - public __dbc_close_msg() { - } - - public __dbc_close_msg(XdrDecodingStream xdr) - throws OncRpcException, IOException { - xdrDecode(xdr); - } - - public void xdrEncode(XdrEncodingStream xdr) - throws OncRpcException, IOException { - xdr.xdrEncodeInt(dbccl_id); - } - - public void xdrDecode(XdrDecodingStream xdr) - throws OncRpcException, IOException { - dbccl_id = xdr.xdrDecodeInt(); - } - -} -// End of __dbc_close_msg.java diff --git a/storage/bdb/rpc_server/java/gen/__dbc_close_reply.java b/storage/bdb/rpc_server/java/gen/__dbc_close_reply.java deleted file mode 100644 index 47459aace36..00000000000 --- a/storage/bdb/rpc_server/java/gen/__dbc_close_reply.java +++ /dev/null @@ -1,32 +0,0 @@ -/* - * Automatically generated by jrpcgen 0.95.1 on 12/18/01 7:23 PM - * jrpcgen is part of the "Remote Tea" ONC/RPC package for Java - * See http://acplt.org/ks/remotetea.html for details - */ -package com.sleepycat.db.rpcserver; -import org.acplt.oncrpc.*; -import java.io.IOException; - -public class __dbc_close_reply implements XdrAble { - public int status; - - public __dbc_close_reply() { - } - - public __dbc_close_reply(XdrDecodingStream xdr) - throws OncRpcException, IOException { - xdrDecode(xdr); - } - - public void xdrEncode(XdrEncodingStream xdr) - throws OncRpcException, IOException { - xdr.xdrEncodeInt(status); - } - - public void xdrDecode(XdrDecodingStream xdr) - throws OncRpcException, IOException { - status = xdr.xdrDecodeInt(); - } - -} -// End of __dbc_close_reply.java diff --git a/storage/bdb/rpc_server/java/gen/__dbc_count_msg.java b/storage/bdb/rpc_server/java/gen/__dbc_count_msg.java deleted file mode 100644 index 5f554e18a1b..00000000000 --- a/storage/bdb/rpc_server/java/gen/__dbc_count_msg.java +++ /dev/null @@ -1,35 +0,0 @@ -/* - * Automatically generated by jrpcgen 0.95.1 on 12/18/01 7:23 PM - * jrpcgen is part of the "Remote Tea" ONC/RPC package for Java - * See http://acplt.org/ks/remotetea.html for details - */ -package com.sleepycat.db.rpcserver; -import org.acplt.oncrpc.*; -import java.io.IOException; - -public class __dbc_count_msg implements XdrAble { - public int dbccl_id; - public int flags; - - public __dbc_count_msg() { - } - - public __dbc_count_msg(XdrDecodingStream xdr) - throws OncRpcException, IOException { - xdrDecode(xdr); - } - - public void xdrEncode(XdrEncodingStream xdr) - throws OncRpcException, IOException { - xdr.xdrEncodeInt(dbccl_id); - xdr.xdrEncodeInt(flags); - } - - public void xdrDecode(XdrDecodingStream xdr) - throws OncRpcException, IOException { - dbccl_id = xdr.xdrDecodeInt(); - flags = xdr.xdrDecodeInt(); - } - -} -// End of __dbc_count_msg.java diff --git a/storage/bdb/rpc_server/java/gen/__dbc_count_reply.java b/storage/bdb/rpc_server/java/gen/__dbc_count_reply.java deleted file mode 100644 index 4daecdd2296..00000000000 --- a/storage/bdb/rpc_server/java/gen/__dbc_count_reply.java +++ /dev/null @@ -1,35 +0,0 @@ -/* - * Automatically generated by jrpcgen 0.95.1 on 12/18/01 7:23 PM - * jrpcgen is part of the "Remote Tea" ONC/RPC package for Java - * See http://acplt.org/ks/remotetea.html for details - */ -package com.sleepycat.db.rpcserver; -import org.acplt.oncrpc.*; -import java.io.IOException; - -public class __dbc_count_reply implements XdrAble { - public int status; - public int dupcount; - - public __dbc_count_reply() { - } - - public __dbc_count_reply(XdrDecodingStream xdr) - throws OncRpcException, IOException { - xdrDecode(xdr); - } - - public void xdrEncode(XdrEncodingStream xdr) - throws OncRpcException, IOException { - xdr.xdrEncodeInt(status); - xdr.xdrEncodeInt(dupcount); - } - - public void xdrDecode(XdrDecodingStream xdr) - throws OncRpcException, IOException { - status = xdr.xdrDecodeInt(); - dupcount = xdr.xdrDecodeInt(); - } - -} -// End of __dbc_count_reply.java diff --git a/storage/bdb/rpc_server/java/gen/__dbc_del_msg.java b/storage/bdb/rpc_server/java/gen/__dbc_del_msg.java deleted file mode 100644 index bc4bd05f573..00000000000 --- a/storage/bdb/rpc_server/java/gen/__dbc_del_msg.java +++ /dev/null @@ -1,35 +0,0 @@ -/* - * Automatically generated by jrpcgen 0.95.1 on 12/18/01 7:23 PM - * jrpcgen is part of the "Remote Tea" ONC/RPC package for Java - * See http://acplt.org/ks/remotetea.html for details - */ -package com.sleepycat.db.rpcserver; -import org.acplt.oncrpc.*; -import java.io.IOException; - -public class __dbc_del_msg implements XdrAble { - public int dbccl_id; - public int flags; - - public __dbc_del_msg() { - } - - public __dbc_del_msg(XdrDecodingStream xdr) - throws OncRpcException, IOException { - xdrDecode(xdr); - } - - public void xdrEncode(XdrEncodingStream xdr) - throws OncRpcException, IOException { - xdr.xdrEncodeInt(dbccl_id); - xdr.xdrEncodeInt(flags); - } - - public void xdrDecode(XdrDecodingStream xdr) - throws OncRpcException, IOException { - dbccl_id = xdr.xdrDecodeInt(); - flags = xdr.xdrDecodeInt(); - } - -} -// End of __dbc_del_msg.java diff --git a/storage/bdb/rpc_server/java/gen/__dbc_del_reply.java b/storage/bdb/rpc_server/java/gen/__dbc_del_reply.java deleted file mode 100644 index e55ac9ffaf6..00000000000 --- a/storage/bdb/rpc_server/java/gen/__dbc_del_reply.java +++ /dev/null @@ -1,32 +0,0 @@ -/* - * Automatically generated by jrpcgen 0.95.1 on 12/18/01 7:23 PM - * jrpcgen is part of the "Remote Tea" ONC/RPC package for Java - * See http://acplt.org/ks/remotetea.html for details - */ -package com.sleepycat.db.rpcserver; -import org.acplt.oncrpc.*; -import java.io.IOException; - -public class __dbc_del_reply implements XdrAble { - public int status; - - public __dbc_del_reply() { - } - - public __dbc_del_reply(XdrDecodingStream xdr) - throws OncRpcException, IOException { - xdrDecode(xdr); - } - - public void xdrEncode(XdrEncodingStream xdr) - throws OncRpcException, IOException { - xdr.xdrEncodeInt(status); - } - - public void xdrDecode(XdrDecodingStream xdr) - throws OncRpcException, IOException { - status = xdr.xdrDecodeInt(); - } - -} -// End of __dbc_del_reply.java diff --git a/storage/bdb/rpc_server/java/gen/__dbc_dup_msg.java b/storage/bdb/rpc_server/java/gen/__dbc_dup_msg.java deleted file mode 100644 index 9a3894e6158..00000000000 --- a/storage/bdb/rpc_server/java/gen/__dbc_dup_msg.java +++ /dev/null @@ -1,35 +0,0 @@ -/* - * Automatically generated by jrpcgen 0.95.1 on 12/18/01 7:23 PM - * jrpcgen is part of the "Remote Tea" ONC/RPC package for Java - * See http://acplt.org/ks/remotetea.html for details - */ -package com.sleepycat.db.rpcserver; -import org.acplt.oncrpc.*; -import java.io.IOException; - -public class __dbc_dup_msg implements XdrAble { - public int dbccl_id; - public int flags; - - public __dbc_dup_msg() { - } - - public __dbc_dup_msg(XdrDecodingStream xdr) - throws OncRpcException, IOException { - xdrDecode(xdr); - } - - public void xdrEncode(XdrEncodingStream xdr) - throws OncRpcException, IOException { - xdr.xdrEncodeInt(dbccl_id); - xdr.xdrEncodeInt(flags); - } - - public void xdrDecode(XdrDecodingStream xdr) - throws OncRpcException, IOException { - dbccl_id = xdr.xdrDecodeInt(); - flags = xdr.xdrDecodeInt(); - } - -} -// End of __dbc_dup_msg.java diff --git a/storage/bdb/rpc_server/java/gen/__dbc_dup_reply.java b/storage/bdb/rpc_server/java/gen/__dbc_dup_reply.java deleted file mode 100644 index 6b942f1a61a..00000000000 --- a/storage/bdb/rpc_server/java/gen/__dbc_dup_reply.java +++ /dev/null @@ -1,35 +0,0 @@ -/* - * Automatically generated by jrpcgen 0.95.1 on 12/18/01 7:23 PM - * jrpcgen is part of the "Remote Tea" ONC/RPC package for Java - * See http://acplt.org/ks/remotetea.html for details - */ -package com.sleepycat.db.rpcserver; -import org.acplt.oncrpc.*; -import java.io.IOException; - -public class __dbc_dup_reply implements XdrAble { - public int status; - public int dbcidcl_id; - - public __dbc_dup_reply() { - } - - public __dbc_dup_reply(XdrDecodingStream xdr) - throws OncRpcException, IOException { - xdrDecode(xdr); - } - - public void xdrEncode(XdrEncodingStream xdr) - throws OncRpcException, IOException { - xdr.xdrEncodeInt(status); - xdr.xdrEncodeInt(dbcidcl_id); - } - - public void xdrDecode(XdrDecodingStream xdr) - throws OncRpcException, IOException { - status = xdr.xdrDecodeInt(); - dbcidcl_id = xdr.xdrDecodeInt(); - } - -} -// End of __dbc_dup_reply.java diff --git a/storage/bdb/rpc_server/java/gen/__dbc_get_msg.java b/storage/bdb/rpc_server/java/gen/__dbc_get_msg.java deleted file mode 100644 index 672ace43fdd..00000000000 --- a/storage/bdb/rpc_server/java/gen/__dbc_get_msg.java +++ /dev/null @@ -1,65 +0,0 @@ -/* - * Automatically generated by jrpcgen 0.95.1 on 12/18/01 7:23 PM - * jrpcgen is part of the "Remote Tea" ONC/RPC package for Java - * See http://acplt.org/ks/remotetea.html for details - */ -package com.sleepycat.db.rpcserver; -import org.acplt.oncrpc.*; -import java.io.IOException; - -public class __dbc_get_msg implements XdrAble { - public int dbccl_id; - public int keydlen; - public int keydoff; - public int keyulen; - public int keyflags; - public byte [] keydata; - public int datadlen; - public int datadoff; - public int dataulen; - public int dataflags; - public byte [] datadata; - public int flags; - - public __dbc_get_msg() { - } - - public __dbc_get_msg(XdrDecodingStream xdr) - throws OncRpcException, IOException { - xdrDecode(xdr); - } - - public void xdrEncode(XdrEncodingStream xdr) - throws OncRpcException, IOException { - xdr.xdrEncodeInt(dbccl_id); - xdr.xdrEncodeInt(keydlen); - xdr.xdrEncodeInt(keydoff); - xdr.xdrEncodeInt(keyulen); - xdr.xdrEncodeInt(keyflags); - xdr.xdrEncodeDynamicOpaque(keydata); - xdr.xdrEncodeInt(datadlen); - xdr.xdrEncodeInt(datadoff); - xdr.xdrEncodeInt(dataulen); - xdr.xdrEncodeInt(dataflags); - xdr.xdrEncodeDynamicOpaque(datadata); - xdr.xdrEncodeInt(flags); - } - - public void xdrDecode(XdrDecodingStream xdr) - throws OncRpcException, IOException { - dbccl_id = xdr.xdrDecodeInt(); - keydlen = xdr.xdrDecodeInt(); - keydoff = xdr.xdrDecodeInt(); - keyulen = xdr.xdrDecodeInt(); - keyflags = xdr.xdrDecodeInt(); - keydata = xdr.xdrDecodeDynamicOpaque(); - datadlen = xdr.xdrDecodeInt(); - datadoff = xdr.xdrDecodeInt(); - dataulen = xdr.xdrDecodeInt(); - dataflags = xdr.xdrDecodeInt(); - datadata = xdr.xdrDecodeDynamicOpaque(); - flags = xdr.xdrDecodeInt(); - } - -} -// End of __dbc_get_msg.java diff --git a/storage/bdb/rpc_server/java/gen/__dbc_get_reply.java b/storage/bdb/rpc_server/java/gen/__dbc_get_reply.java deleted file mode 100644 index 8671fec6335..00000000000 --- a/storage/bdb/rpc_server/java/gen/__dbc_get_reply.java +++ /dev/null @@ -1,38 +0,0 @@ -/* - * Automatically generated by jrpcgen 0.95.1 on 12/18/01 7:23 PM - * jrpcgen is part of the "Remote Tea" ONC/RPC package for Java - * See http://acplt.org/ks/remotetea.html for details - */ -package com.sleepycat.db.rpcserver; -import org.acplt.oncrpc.*; -import java.io.IOException; - -public class __dbc_get_reply implements XdrAble { - public int status; - public byte [] keydata; - public byte [] datadata; - - public __dbc_get_reply() { - } - - public __dbc_get_reply(XdrDecodingStream xdr) - throws OncRpcException, IOException { - xdrDecode(xdr); - } - - public void xdrEncode(XdrEncodingStream xdr) - throws OncRpcException, IOException { - xdr.xdrEncodeInt(status); - xdr.xdrEncodeDynamicOpaque(keydata); - xdr.xdrEncodeDynamicOpaque(datadata); - } - - public void xdrDecode(XdrDecodingStream xdr) - throws OncRpcException, IOException { - status = xdr.xdrDecodeInt(); - keydata = xdr.xdrDecodeDynamicOpaque(); - datadata = xdr.xdrDecodeDynamicOpaque(); - } - -} -// End of __dbc_get_reply.java diff --git a/storage/bdb/rpc_server/java/gen/__dbc_pget_msg.java b/storage/bdb/rpc_server/java/gen/__dbc_pget_msg.java deleted file mode 100644 index 8ca3c6171a1..00000000000 --- a/storage/bdb/rpc_server/java/gen/__dbc_pget_msg.java +++ /dev/null @@ -1,80 +0,0 @@ -/* - * Automatically generated by jrpcgen 0.95.1 on 12/18/01 7:23 PM - * jrpcgen is part of the "Remote Tea" ONC/RPC package for Java - * See http://acplt.org/ks/remotetea.html for details - */ -package com.sleepycat.db.rpcserver; -import org.acplt.oncrpc.*; -import java.io.IOException; - -public class __dbc_pget_msg implements XdrAble { - public int dbccl_id; - public int skeydlen; - public int skeydoff; - public int skeyulen; - public int skeyflags; - public byte [] skeydata; - public int pkeydlen; - public int pkeydoff; - public int pkeyulen; - public int pkeyflags; - public byte [] pkeydata; - public int datadlen; - public int datadoff; - public int dataulen; - public int dataflags; - public byte [] datadata; - public int flags; - - public __dbc_pget_msg() { - } - - public __dbc_pget_msg(XdrDecodingStream xdr) - throws OncRpcException, IOException { - xdrDecode(xdr); - } - - public void xdrEncode(XdrEncodingStream xdr) - throws OncRpcException, IOException { - xdr.xdrEncodeInt(dbccl_id); - xdr.xdrEncodeInt(skeydlen); - xdr.xdrEncodeInt(skeydoff); - xdr.xdrEncodeInt(skeyulen); - xdr.xdrEncodeInt(skeyflags); - xdr.xdrEncodeDynamicOpaque(skeydata); - xdr.xdrEncodeInt(pkeydlen); - xdr.xdrEncodeInt(pkeydoff); - xdr.xdrEncodeInt(pkeyulen); - xdr.xdrEncodeInt(pkeyflags); - xdr.xdrEncodeDynamicOpaque(pkeydata); - xdr.xdrEncodeInt(datadlen); - xdr.xdrEncodeInt(datadoff); - xdr.xdrEncodeInt(dataulen); - xdr.xdrEncodeInt(dataflags); - xdr.xdrEncodeDynamicOpaque(datadata); - xdr.xdrEncodeInt(flags); - } - - public void xdrDecode(XdrDecodingStream xdr) - throws OncRpcException, IOException { - dbccl_id = xdr.xdrDecodeInt(); - skeydlen = xdr.xdrDecodeInt(); - skeydoff = xdr.xdrDecodeInt(); - skeyulen = xdr.xdrDecodeInt(); - skeyflags = xdr.xdrDecodeInt(); - skeydata = xdr.xdrDecodeDynamicOpaque(); - pkeydlen = xdr.xdrDecodeInt(); - pkeydoff = xdr.xdrDecodeInt(); - pkeyulen = xdr.xdrDecodeInt(); - pkeyflags = xdr.xdrDecodeInt(); - pkeydata = xdr.xdrDecodeDynamicOpaque(); - datadlen = xdr.xdrDecodeInt(); - datadoff = xdr.xdrDecodeInt(); - dataulen = xdr.xdrDecodeInt(); - dataflags = xdr.xdrDecodeInt(); - datadata = xdr.xdrDecodeDynamicOpaque(); - flags = xdr.xdrDecodeInt(); - } - -} -// End of __dbc_pget_msg.java diff --git a/storage/bdb/rpc_server/java/gen/__dbc_pget_reply.java b/storage/bdb/rpc_server/java/gen/__dbc_pget_reply.java deleted file mode 100644 index 16cc795878d..00000000000 --- a/storage/bdb/rpc_server/java/gen/__dbc_pget_reply.java +++ /dev/null @@ -1,41 +0,0 @@ -/* - * Automatically generated by jrpcgen 0.95.1 on 12/18/01 7:23 PM - * jrpcgen is part of the "Remote Tea" ONC/RPC package for Java - * See http://acplt.org/ks/remotetea.html for details - */ -package com.sleepycat.db.rpcserver; -import org.acplt.oncrpc.*; -import java.io.IOException; - -public class __dbc_pget_reply implements XdrAble { - public int status; - public byte [] skeydata; - public byte [] pkeydata; - public byte [] datadata; - - public __dbc_pget_reply() { - } - - public __dbc_pget_reply(XdrDecodingStream xdr) - throws OncRpcException, IOException { - xdrDecode(xdr); - } - - public void xdrEncode(XdrEncodingStream xdr) - throws OncRpcException, IOException { - xdr.xdrEncodeInt(status); - xdr.xdrEncodeDynamicOpaque(skeydata); - xdr.xdrEncodeDynamicOpaque(pkeydata); - xdr.xdrEncodeDynamicOpaque(datadata); - } - - public void xdrDecode(XdrDecodingStream xdr) - throws OncRpcException, IOException { - status = xdr.xdrDecodeInt(); - skeydata = xdr.xdrDecodeDynamicOpaque(); - pkeydata = xdr.xdrDecodeDynamicOpaque(); - datadata = xdr.xdrDecodeDynamicOpaque(); - } - -} -// End of __dbc_pget_reply.java diff --git a/storage/bdb/rpc_server/java/gen/__dbc_put_msg.java b/storage/bdb/rpc_server/java/gen/__dbc_put_msg.java deleted file mode 100644 index 98d12423dc5..00000000000 --- a/storage/bdb/rpc_server/java/gen/__dbc_put_msg.java +++ /dev/null @@ -1,65 +0,0 @@ -/* - * Automatically generated by jrpcgen 0.95.1 on 12/18/01 7:23 PM - * jrpcgen is part of the "Remote Tea" ONC/RPC package for Java - * See http://acplt.org/ks/remotetea.html for details - */ -package com.sleepycat.db.rpcserver; -import org.acplt.oncrpc.*; -import java.io.IOException; - -public class __dbc_put_msg implements XdrAble { - public int dbccl_id; - public int keydlen; - public int keydoff; - public int keyulen; - public int keyflags; - public byte [] keydata; - public int datadlen; - public int datadoff; - public int dataulen; - public int dataflags; - public byte [] datadata; - public int flags; - - public __dbc_put_msg() { - } - - public __dbc_put_msg(XdrDecodingStream xdr) - throws OncRpcException, IOException { - xdrDecode(xdr); - } - - public void xdrEncode(XdrEncodingStream xdr) - throws OncRpcException, IOException { - xdr.xdrEncodeInt(dbccl_id); - xdr.xdrEncodeInt(keydlen); - xdr.xdrEncodeInt(keydoff); - xdr.xdrEncodeInt(keyulen); - xdr.xdrEncodeInt(keyflags); - xdr.xdrEncodeDynamicOpaque(keydata); - xdr.xdrEncodeInt(datadlen); - xdr.xdrEncodeInt(datadoff); - xdr.xdrEncodeInt(dataulen); - xdr.xdrEncodeInt(dataflags); - xdr.xdrEncodeDynamicOpaque(datadata); - xdr.xdrEncodeInt(flags); - } - - public void xdrDecode(XdrDecodingStream xdr) - throws OncRpcException, IOException { - dbccl_id = xdr.xdrDecodeInt(); - keydlen = xdr.xdrDecodeInt(); - keydoff = xdr.xdrDecodeInt(); - keyulen = xdr.xdrDecodeInt(); - keyflags = xdr.xdrDecodeInt(); - keydata = xdr.xdrDecodeDynamicOpaque(); - datadlen = xdr.xdrDecodeInt(); - datadoff = xdr.xdrDecodeInt(); - dataulen = xdr.xdrDecodeInt(); - dataflags = xdr.xdrDecodeInt(); - datadata = xdr.xdrDecodeDynamicOpaque(); - flags = xdr.xdrDecodeInt(); - } - -} -// End of __dbc_put_msg.java diff --git a/storage/bdb/rpc_server/java/gen/__dbc_put_reply.java b/storage/bdb/rpc_server/java/gen/__dbc_put_reply.java deleted file mode 100644 index 385f9f783fb..00000000000 --- a/storage/bdb/rpc_server/java/gen/__dbc_put_reply.java +++ /dev/null @@ -1,35 +0,0 @@ -/* - * Automatically generated by jrpcgen 0.95.1 on 12/18/01 7:23 PM - * jrpcgen is part of the "Remote Tea" ONC/RPC package for Java - * See http://acplt.org/ks/remotetea.html for details - */ -package com.sleepycat.db.rpcserver; -import org.acplt.oncrpc.*; -import java.io.IOException; - -public class __dbc_put_reply implements XdrAble { - public int status; - public byte [] keydata; - - public __dbc_put_reply() { - } - - public __dbc_put_reply(XdrDecodingStream xdr) - throws OncRpcException, IOException { - xdrDecode(xdr); - } - - public void xdrEncode(XdrEncodingStream xdr) - throws OncRpcException, IOException { - xdr.xdrEncodeInt(status); - xdr.xdrEncodeDynamicOpaque(keydata); - } - - public void xdrDecode(XdrDecodingStream xdr) - throws OncRpcException, IOException { - status = xdr.xdrDecodeInt(); - keydata = xdr.xdrDecodeDynamicOpaque(); - } - -} -// End of __dbc_put_reply.java diff --git a/storage/bdb/rpc_server/java/gen/__env_cachesize_msg.java b/storage/bdb/rpc_server/java/gen/__env_cachesize_msg.java deleted file mode 100644 index d1fce1ffa35..00000000000 --- a/storage/bdb/rpc_server/java/gen/__env_cachesize_msg.java +++ /dev/null @@ -1,41 +0,0 @@ -/* - * Automatically generated by jrpcgen 0.95.1 on 12/18/01 7:23 PM - * jrpcgen is part of the "Remote Tea" ONC/RPC package for Java - * See http://acplt.org/ks/remotetea.html for details - */ -package com.sleepycat.db.rpcserver; -import org.acplt.oncrpc.*; -import java.io.IOException; - -public class __env_cachesize_msg implements XdrAble { - public int dbenvcl_id; - public int gbytes; - public int bytes; - public int ncache; - - public __env_cachesize_msg() { - } - - public __env_cachesize_msg(XdrDecodingStream xdr) - throws OncRpcException, IOException { - xdrDecode(xdr); - } - - public void xdrEncode(XdrEncodingStream xdr) - throws OncRpcException, IOException { - xdr.xdrEncodeInt(dbenvcl_id); - xdr.xdrEncodeInt(gbytes); - xdr.xdrEncodeInt(bytes); - xdr.xdrEncodeInt(ncache); - } - - public void xdrDecode(XdrDecodingStream xdr) - throws OncRpcException, IOException { - dbenvcl_id = xdr.xdrDecodeInt(); - gbytes = xdr.xdrDecodeInt(); - bytes = xdr.xdrDecodeInt(); - ncache = xdr.xdrDecodeInt(); - } - -} -// End of __env_cachesize_msg.java diff --git a/storage/bdb/rpc_server/java/gen/__env_cachesize_reply.java b/storage/bdb/rpc_server/java/gen/__env_cachesize_reply.java deleted file mode 100644 index 193f8355d71..00000000000 --- a/storage/bdb/rpc_server/java/gen/__env_cachesize_reply.java +++ /dev/null @@ -1,32 +0,0 @@ -/* - * Automatically generated by jrpcgen 0.95.1 on 12/18/01 7:23 PM - * jrpcgen is part of the "Remote Tea" ONC/RPC package for Java - * See http://acplt.org/ks/remotetea.html for details - */ -package com.sleepycat.db.rpcserver; -import org.acplt.oncrpc.*; -import java.io.IOException; - -public class __env_cachesize_reply implements XdrAble { - public int status; - - public __env_cachesize_reply() { - } - - public __env_cachesize_reply(XdrDecodingStream xdr) - throws OncRpcException, IOException { - xdrDecode(xdr); - } - - public void xdrEncode(XdrEncodingStream xdr) - throws OncRpcException, IOException { - xdr.xdrEncodeInt(status); - } - - public void xdrDecode(XdrDecodingStream xdr) - throws OncRpcException, IOException { - status = xdr.xdrDecodeInt(); - } - -} -// End of __env_cachesize_reply.java diff --git a/storage/bdb/rpc_server/java/gen/__env_close_msg.java b/storage/bdb/rpc_server/java/gen/__env_close_msg.java deleted file mode 100644 index 5e657bacfa5..00000000000 --- a/storage/bdb/rpc_server/java/gen/__env_close_msg.java +++ /dev/null @@ -1,35 +0,0 @@ -/* - * Automatically generated by jrpcgen 0.95.1 on 12/18/01 7:23 PM - * jrpcgen is part of the "Remote Tea" ONC/RPC package for Java - * See http://acplt.org/ks/remotetea.html for details - */ -package com.sleepycat.db.rpcserver; -import org.acplt.oncrpc.*; -import java.io.IOException; - -public class __env_close_msg implements XdrAble { - public int dbenvcl_id; - public int flags; - - public __env_close_msg() { - } - - public __env_close_msg(XdrDecodingStream xdr) - throws OncRpcException, IOException { - xdrDecode(xdr); - } - - public void xdrEncode(XdrEncodingStream xdr) - throws OncRpcException, IOException { - xdr.xdrEncodeInt(dbenvcl_id); - xdr.xdrEncodeInt(flags); - } - - public void xdrDecode(XdrDecodingStream xdr) - throws OncRpcException, IOException { - dbenvcl_id = xdr.xdrDecodeInt(); - flags = xdr.xdrDecodeInt(); - } - -} -// End of __env_close_msg.java diff --git a/storage/bdb/rpc_server/java/gen/__env_close_reply.java b/storage/bdb/rpc_server/java/gen/__env_close_reply.java deleted file mode 100644 index 11e61f7c8c3..00000000000 --- a/storage/bdb/rpc_server/java/gen/__env_close_reply.java +++ /dev/null @@ -1,32 +0,0 @@ -/* - * Automatically generated by jrpcgen 0.95.1 on 12/18/01 7:23 PM - * jrpcgen is part of the "Remote Tea" ONC/RPC package for Java - * See http://acplt.org/ks/remotetea.html for details - */ -package com.sleepycat.db.rpcserver; -import org.acplt.oncrpc.*; -import java.io.IOException; - -public class __env_close_reply implements XdrAble { - public int status; - - public __env_close_reply() { - } - - public __env_close_reply(XdrDecodingStream xdr) - throws OncRpcException, IOException { - xdrDecode(xdr); - } - - public void xdrEncode(XdrEncodingStream xdr) - throws OncRpcException, IOException { - xdr.xdrEncodeInt(status); - } - - public void xdrDecode(XdrDecodingStream xdr) - throws OncRpcException, IOException { - status = xdr.xdrDecodeInt(); - } - -} -// End of __env_close_reply.java diff --git a/storage/bdb/rpc_server/java/gen/__env_create_msg.java b/storage/bdb/rpc_server/java/gen/__env_create_msg.java deleted file mode 100644 index dbe546ae23a..00000000000 --- a/storage/bdb/rpc_server/java/gen/__env_create_msg.java +++ /dev/null @@ -1,32 +0,0 @@ -/* - * Automatically generated by jrpcgen 0.95.1 on 12/18/01 7:23 PM - * jrpcgen is part of the "Remote Tea" ONC/RPC package for Java - * See http://acplt.org/ks/remotetea.html for details - */ -package com.sleepycat.db.rpcserver; -import org.acplt.oncrpc.*; -import java.io.IOException; - -public class __env_create_msg implements XdrAble { - public int timeout; - - public __env_create_msg() { - } - - public __env_create_msg(XdrDecodingStream xdr) - throws OncRpcException, IOException { - xdrDecode(xdr); - } - - public void xdrEncode(XdrEncodingStream xdr) - throws OncRpcException, IOException { - xdr.xdrEncodeInt(timeout); - } - - public void xdrDecode(XdrDecodingStream xdr) - throws OncRpcException, IOException { - timeout = xdr.xdrDecodeInt(); - } - -} -// End of __env_create_msg.java diff --git a/storage/bdb/rpc_server/java/gen/__env_create_reply.java b/storage/bdb/rpc_server/java/gen/__env_create_reply.java deleted file mode 100644 index 5427fc4bc1e..00000000000 --- a/storage/bdb/rpc_server/java/gen/__env_create_reply.java +++ /dev/null @@ -1,35 +0,0 @@ -/* - * Automatically generated by jrpcgen 0.95.1 on 12/18/01 7:23 PM - * jrpcgen is part of the "Remote Tea" ONC/RPC package for Java - * See http://acplt.org/ks/remotetea.html for details - */ -package com.sleepycat.db.rpcserver; -import org.acplt.oncrpc.*; -import java.io.IOException; - -public class __env_create_reply implements XdrAble { - public int status; - public int envcl_id; - - public __env_create_reply() { - } - - public __env_create_reply(XdrDecodingStream xdr) - throws OncRpcException, IOException { - xdrDecode(xdr); - } - - public void xdrEncode(XdrEncodingStream xdr) - throws OncRpcException, IOException { - xdr.xdrEncodeInt(status); - xdr.xdrEncodeInt(envcl_id); - } - - public void xdrDecode(XdrDecodingStream xdr) - throws OncRpcException, IOException { - status = xdr.xdrDecodeInt(); - envcl_id = xdr.xdrDecodeInt(); - } - -} -// End of __env_create_reply.java diff --git a/storage/bdb/rpc_server/java/gen/__env_dbremove_msg.java b/storage/bdb/rpc_server/java/gen/__env_dbremove_msg.java deleted file mode 100644 index 9730a92c590..00000000000 --- a/storage/bdb/rpc_server/java/gen/__env_dbremove_msg.java +++ /dev/null @@ -1,44 +0,0 @@ -/* - * Automatically generated by jrpcgen 0.95.1 on 3/19/02 10:30 AM - * jrpcgen is part of the "Remote Tea" ONC/RPC package for Java - * See http://acplt.org/ks/remotetea.html for details - */ -package com.sleepycat.db.rpcserver; -import org.acplt.oncrpc.*; -import java.io.IOException; - -public class __env_dbremove_msg implements XdrAble { - public int dbenvcl_id; - public int txnpcl_id; - public String name; - public String subdb; - public int flags; - - public __env_dbremove_msg() { - } - - public __env_dbremove_msg(XdrDecodingStream xdr) - throws OncRpcException, IOException { - xdrDecode(xdr); - } - - public void xdrEncode(XdrEncodingStream xdr) - throws OncRpcException, IOException { - xdr.xdrEncodeInt(dbenvcl_id); - xdr.xdrEncodeInt(txnpcl_id); - xdr.xdrEncodeString(name); - xdr.xdrEncodeString(subdb); - xdr.xdrEncodeInt(flags); - } - - public void xdrDecode(XdrDecodingStream xdr) - throws OncRpcException, IOException { - dbenvcl_id = xdr.xdrDecodeInt(); - txnpcl_id = xdr.xdrDecodeInt(); - name = xdr.xdrDecodeString(); - subdb = xdr.xdrDecodeString(); - flags = xdr.xdrDecodeInt(); - } - -} -// End of __env_dbremove_msg.java diff --git a/storage/bdb/rpc_server/java/gen/__env_dbremove_reply.java b/storage/bdb/rpc_server/java/gen/__env_dbremove_reply.java deleted file mode 100644 index 75cc5a940cc..00000000000 --- a/storage/bdb/rpc_server/java/gen/__env_dbremove_reply.java +++ /dev/null @@ -1,32 +0,0 @@ -/* - * Automatically generated by jrpcgen 0.95.1 on 3/19/02 10:30 AM - * jrpcgen is part of the "Remote Tea" ONC/RPC package for Java - * See http://acplt.org/ks/remotetea.html for details - */ -package com.sleepycat.db.rpcserver; -import org.acplt.oncrpc.*; -import java.io.IOException; - -public class __env_dbremove_reply implements XdrAble { - public int status; - - public __env_dbremove_reply() { - } - - public __env_dbremove_reply(XdrDecodingStream xdr) - throws OncRpcException, IOException { - xdrDecode(xdr); - } - - public void xdrEncode(XdrEncodingStream xdr) - throws OncRpcException, IOException { - xdr.xdrEncodeInt(status); - } - - public void xdrDecode(XdrDecodingStream xdr) - throws OncRpcException, IOException { - status = xdr.xdrDecodeInt(); - } - -} -// End of __env_dbremove_reply.java diff --git a/storage/bdb/rpc_server/java/gen/__env_dbrename_msg.java b/storage/bdb/rpc_server/java/gen/__env_dbrename_msg.java deleted file mode 100644 index 0bbda262b64..00000000000 --- a/storage/bdb/rpc_server/java/gen/__env_dbrename_msg.java +++ /dev/null @@ -1,47 +0,0 @@ -/* - * Automatically generated by jrpcgen 0.95.1 on 3/19/02 10:30 AM - * jrpcgen is part of the "Remote Tea" ONC/RPC package for Java - * See http://acplt.org/ks/remotetea.html for details - */ -package com.sleepycat.db.rpcserver; -import org.acplt.oncrpc.*; -import java.io.IOException; - -public class __env_dbrename_msg implements XdrAble { - public int dbenvcl_id; - public int txnpcl_id; - public String name; - public String subdb; - public String newname; - public int flags; - - public __env_dbrename_msg() { - } - - public __env_dbrename_msg(XdrDecodingStream xdr) - throws OncRpcException, IOException { - xdrDecode(xdr); - } - - public void xdrEncode(XdrEncodingStream xdr) - throws OncRpcException, IOException { - xdr.xdrEncodeInt(dbenvcl_id); - xdr.xdrEncodeInt(txnpcl_id); - xdr.xdrEncodeString(name); - xdr.xdrEncodeString(subdb); - xdr.xdrEncodeString(newname); - xdr.xdrEncodeInt(flags); - } - - public void xdrDecode(XdrDecodingStream xdr) - throws OncRpcException, IOException { - dbenvcl_id = xdr.xdrDecodeInt(); - txnpcl_id = xdr.xdrDecodeInt(); - name = xdr.xdrDecodeString(); - subdb = xdr.xdrDecodeString(); - newname = xdr.xdrDecodeString(); - flags = xdr.xdrDecodeInt(); - } - -} -// End of __env_dbrename_msg.java diff --git a/storage/bdb/rpc_server/java/gen/__env_dbrename_reply.java b/storage/bdb/rpc_server/java/gen/__env_dbrename_reply.java deleted file mode 100644 index 0cc8882305d..00000000000 --- a/storage/bdb/rpc_server/java/gen/__env_dbrename_reply.java +++ /dev/null @@ -1,32 +0,0 @@ -/* - * Automatically generated by jrpcgen 0.95.1 on 3/19/02 10:30 AM - * jrpcgen is part of the "Remote Tea" ONC/RPC package for Java - * See http://acplt.org/ks/remotetea.html for details - */ -package com.sleepycat.db.rpcserver; -import org.acplt.oncrpc.*; -import java.io.IOException; - -public class __env_dbrename_reply implements XdrAble { - public int status; - - public __env_dbrename_reply() { - } - - public __env_dbrename_reply(XdrDecodingStream xdr) - throws OncRpcException, IOException { - xdrDecode(xdr); - } - - public void xdrEncode(XdrEncodingStream xdr) - throws OncRpcException, IOException { - xdr.xdrEncodeInt(status); - } - - public void xdrDecode(XdrDecodingStream xdr) - throws OncRpcException, IOException { - status = xdr.xdrDecodeInt(); - } - -} -// End of __env_dbrename_reply.java diff --git a/storage/bdb/rpc_server/java/gen/__env_encrypt_msg.java b/storage/bdb/rpc_server/java/gen/__env_encrypt_msg.java deleted file mode 100644 index 84e9a36d372..00000000000 --- a/storage/bdb/rpc_server/java/gen/__env_encrypt_msg.java +++ /dev/null @@ -1,38 +0,0 @@ -/* - * Automatically generated by jrpcgen 0.95.1 on 2/13/02 1:05 PM - * jrpcgen is part of the "Remote Tea" ONC/RPC package for Java - * See http://acplt.org/ks/remotetea.html for details - */ -package com.sleepycat.db.rpcserver; -import org.acplt.oncrpc.*; -import java.io.IOException; - -public class __env_encrypt_msg implements XdrAble { - public int dbenvcl_id; - public String passwd; - public int flags; - - public __env_encrypt_msg() { - } - - public __env_encrypt_msg(XdrDecodingStream xdr) - throws OncRpcException, IOException { - xdrDecode(xdr); - } - - public void xdrEncode(XdrEncodingStream xdr) - throws OncRpcException, IOException { - xdr.xdrEncodeInt(dbenvcl_id); - xdr.xdrEncodeString(passwd); - xdr.xdrEncodeInt(flags); - } - - public void xdrDecode(XdrDecodingStream xdr) - throws OncRpcException, IOException { - dbenvcl_id = xdr.xdrDecodeInt(); - passwd = xdr.xdrDecodeString(); - flags = xdr.xdrDecodeInt(); - } - -} -// End of __env_encrypt_msg.java diff --git a/storage/bdb/rpc_server/java/gen/__env_encrypt_reply.java b/storage/bdb/rpc_server/java/gen/__env_encrypt_reply.java deleted file mode 100644 index e202a3089d0..00000000000 --- a/storage/bdb/rpc_server/java/gen/__env_encrypt_reply.java +++ /dev/null @@ -1,32 +0,0 @@ -/* - * Automatically generated by jrpcgen 0.95.1 on 2/13/02 1:05 PM - * jrpcgen is part of the "Remote Tea" ONC/RPC package for Java - * See http://acplt.org/ks/remotetea.html for details - */ -package com.sleepycat.db.rpcserver; -import org.acplt.oncrpc.*; -import java.io.IOException; - -public class __env_encrypt_reply implements XdrAble { - public int status; - - public __env_encrypt_reply() { - } - - public __env_encrypt_reply(XdrDecodingStream xdr) - throws OncRpcException, IOException { - xdrDecode(xdr); - } - - public void xdrEncode(XdrEncodingStream xdr) - throws OncRpcException, IOException { - xdr.xdrEncodeInt(status); - } - - public void xdrDecode(XdrDecodingStream xdr) - throws OncRpcException, IOException { - status = xdr.xdrDecodeInt(); - } - -} -// End of __env_encrypt_reply.java diff --git a/storage/bdb/rpc_server/java/gen/__env_flags_msg.java b/storage/bdb/rpc_server/java/gen/__env_flags_msg.java deleted file mode 100644 index 25cd5f85f6d..00000000000 --- a/storage/bdb/rpc_server/java/gen/__env_flags_msg.java +++ /dev/null @@ -1,38 +0,0 @@ -/* - * Automatically generated by jrpcgen 0.95.1 on 12/18/01 7:23 PM - * jrpcgen is part of the "Remote Tea" ONC/RPC package for Java - * See http://acplt.org/ks/remotetea.html for details - */ -package com.sleepycat.db.rpcserver; -import org.acplt.oncrpc.*; -import java.io.IOException; - -public class __env_flags_msg implements XdrAble { - public int dbenvcl_id; - public int flags; - public int onoff; - - public __env_flags_msg() { - } - - public __env_flags_msg(XdrDecodingStream xdr) - throws OncRpcException, IOException { - xdrDecode(xdr); - } - - public void xdrEncode(XdrEncodingStream xdr) - throws OncRpcException, IOException { - xdr.xdrEncodeInt(dbenvcl_id); - xdr.xdrEncodeInt(flags); - xdr.xdrEncodeInt(onoff); - } - - public void xdrDecode(XdrDecodingStream xdr) - throws OncRpcException, IOException { - dbenvcl_id = xdr.xdrDecodeInt(); - flags = xdr.xdrDecodeInt(); - onoff = xdr.xdrDecodeInt(); - } - -} -// End of __env_flags_msg.java diff --git a/storage/bdb/rpc_server/java/gen/__env_flags_reply.java b/storage/bdb/rpc_server/java/gen/__env_flags_reply.java deleted file mode 100644 index d348a9224ea..00000000000 --- a/storage/bdb/rpc_server/java/gen/__env_flags_reply.java +++ /dev/null @@ -1,32 +0,0 @@ -/* - * Automatically generated by jrpcgen 0.95.1 on 12/18/01 7:23 PM - * jrpcgen is part of the "Remote Tea" ONC/RPC package for Java - * See http://acplt.org/ks/remotetea.html for details - */ -package com.sleepycat.db.rpcserver; -import org.acplt.oncrpc.*; -import java.io.IOException; - -public class __env_flags_reply implements XdrAble { - public int status; - - public __env_flags_reply() { - } - - public __env_flags_reply(XdrDecodingStream xdr) - throws OncRpcException, IOException { - xdrDecode(xdr); - } - - public void xdrEncode(XdrEncodingStream xdr) - throws OncRpcException, IOException { - xdr.xdrEncodeInt(status); - } - - public void xdrDecode(XdrDecodingStream xdr) - throws OncRpcException, IOException { - status = xdr.xdrDecodeInt(); - } - -} -// End of __env_flags_reply.java diff --git a/storage/bdb/rpc_server/java/gen/__env_open_msg.java b/storage/bdb/rpc_server/java/gen/__env_open_msg.java deleted file mode 100644 index e4649b41f9e..00000000000 --- a/storage/bdb/rpc_server/java/gen/__env_open_msg.java +++ /dev/null @@ -1,41 +0,0 @@ -/* - * Automatically generated by jrpcgen 0.95.1 on 12/18/01 7:23 PM - * jrpcgen is part of the "Remote Tea" ONC/RPC package for Java - * See http://acplt.org/ks/remotetea.html for details - */ -package com.sleepycat.db.rpcserver; -import org.acplt.oncrpc.*; -import java.io.IOException; - -public class __env_open_msg implements XdrAble { - public int dbenvcl_id; - public String home; - public int flags; - public int mode; - - public __env_open_msg() { - } - - public __env_open_msg(XdrDecodingStream xdr) - throws OncRpcException, IOException { - xdrDecode(xdr); - } - - public void xdrEncode(XdrEncodingStream xdr) - throws OncRpcException, IOException { - xdr.xdrEncodeInt(dbenvcl_id); - xdr.xdrEncodeString(home); - xdr.xdrEncodeInt(flags); - xdr.xdrEncodeInt(mode); - } - - public void xdrDecode(XdrDecodingStream xdr) - throws OncRpcException, IOException { - dbenvcl_id = xdr.xdrDecodeInt(); - home = xdr.xdrDecodeString(); - flags = xdr.xdrDecodeInt(); - mode = xdr.xdrDecodeInt(); - } - -} -// End of __env_open_msg.java diff --git a/storage/bdb/rpc_server/java/gen/__env_open_reply.java b/storage/bdb/rpc_server/java/gen/__env_open_reply.java deleted file mode 100644 index 1994afb4cf2..00000000000 --- a/storage/bdb/rpc_server/java/gen/__env_open_reply.java +++ /dev/null @@ -1,35 +0,0 @@ -/* - * Automatically generated by jrpcgen 0.95.1 on 12/18/01 7:23 PM - * jrpcgen is part of the "Remote Tea" ONC/RPC package for Java - * See http://acplt.org/ks/remotetea.html for details - */ -package com.sleepycat.db.rpcserver; -import org.acplt.oncrpc.*; -import java.io.IOException; - -public class __env_open_reply implements XdrAble { - public int status; - public int envcl_id; - - public __env_open_reply() { - } - - public __env_open_reply(XdrDecodingStream xdr) - throws OncRpcException, IOException { - xdrDecode(xdr); - } - - public void xdrEncode(XdrEncodingStream xdr) - throws OncRpcException, IOException { - xdr.xdrEncodeInt(status); - xdr.xdrEncodeInt(envcl_id); - } - - public void xdrDecode(XdrDecodingStream xdr) - throws OncRpcException, IOException { - status = xdr.xdrDecodeInt(); - envcl_id = xdr.xdrDecodeInt(); - } - -} -// End of __env_open_reply.java diff --git a/storage/bdb/rpc_server/java/gen/__env_remove_msg.java b/storage/bdb/rpc_server/java/gen/__env_remove_msg.java deleted file mode 100644 index b32d758f0f5..00000000000 --- a/storage/bdb/rpc_server/java/gen/__env_remove_msg.java +++ /dev/null @@ -1,38 +0,0 @@ -/* - * Automatically generated by jrpcgen 0.95.1 on 12/18/01 7:23 PM - * jrpcgen is part of the "Remote Tea" ONC/RPC package for Java - * See http://acplt.org/ks/remotetea.html for details - */ -package com.sleepycat.db.rpcserver; -import org.acplt.oncrpc.*; -import java.io.IOException; - -public class __env_remove_msg implements XdrAble { - public int dbenvcl_id; - public String home; - public int flags; - - public __env_remove_msg() { - } - - public __env_remove_msg(XdrDecodingStream xdr) - throws OncRpcException, IOException { - xdrDecode(xdr); - } - - public void xdrEncode(XdrEncodingStream xdr) - throws OncRpcException, IOException { - xdr.xdrEncodeInt(dbenvcl_id); - xdr.xdrEncodeString(home); - xdr.xdrEncodeInt(flags); - } - - public void xdrDecode(XdrDecodingStream xdr) - throws OncRpcException, IOException { - dbenvcl_id = xdr.xdrDecodeInt(); - home = xdr.xdrDecodeString(); - flags = xdr.xdrDecodeInt(); - } - -} -// End of __env_remove_msg.java diff --git a/storage/bdb/rpc_server/java/gen/__env_remove_reply.java b/storage/bdb/rpc_server/java/gen/__env_remove_reply.java deleted file mode 100644 index 19e4d52f662..00000000000 --- a/storage/bdb/rpc_server/java/gen/__env_remove_reply.java +++ /dev/null @@ -1,32 +0,0 @@ -/* - * Automatically generated by jrpcgen 0.95.1 on 12/18/01 7:23 PM - * jrpcgen is part of the "Remote Tea" ONC/RPC package for Java - * See http://acplt.org/ks/remotetea.html for details - */ -package com.sleepycat.db.rpcserver; -import org.acplt.oncrpc.*; -import java.io.IOException; - -public class __env_remove_reply implements XdrAble { - public int status; - - public __env_remove_reply() { - } - - public __env_remove_reply(XdrDecodingStream xdr) - throws OncRpcException, IOException { - xdrDecode(xdr); - } - - public void xdrEncode(XdrEncodingStream xdr) - throws OncRpcException, IOException { - xdr.xdrEncodeInt(status); - } - - public void xdrDecode(XdrDecodingStream xdr) - throws OncRpcException, IOException { - status = xdr.xdrDecodeInt(); - } - -} -// End of __env_remove_reply.java diff --git a/storage/bdb/rpc_server/java/gen/__txn_abort_msg.java b/storage/bdb/rpc_server/java/gen/__txn_abort_msg.java deleted file mode 100644 index ff44c534e46..00000000000 --- a/storage/bdb/rpc_server/java/gen/__txn_abort_msg.java +++ /dev/null @@ -1,32 +0,0 @@ -/* - * Automatically generated by jrpcgen 0.95.1 on 12/18/01 7:23 PM - * jrpcgen is part of the "Remote Tea" ONC/RPC package for Java - * See http://acplt.org/ks/remotetea.html for details - */ -package com.sleepycat.db.rpcserver; -import org.acplt.oncrpc.*; -import java.io.IOException; - -public class __txn_abort_msg implements XdrAble { - public int txnpcl_id; - - public __txn_abort_msg() { - } - - public __txn_abort_msg(XdrDecodingStream xdr) - throws OncRpcException, IOException { - xdrDecode(xdr); - } - - public void xdrEncode(XdrEncodingStream xdr) - throws OncRpcException, IOException { - xdr.xdrEncodeInt(txnpcl_id); - } - - public void xdrDecode(XdrDecodingStream xdr) - throws OncRpcException, IOException { - txnpcl_id = xdr.xdrDecodeInt(); - } - -} -// End of __txn_abort_msg.java diff --git a/storage/bdb/rpc_server/java/gen/__txn_abort_reply.java b/storage/bdb/rpc_server/java/gen/__txn_abort_reply.java deleted file mode 100644 index 58f275c1a8f..00000000000 --- a/storage/bdb/rpc_server/java/gen/__txn_abort_reply.java +++ /dev/null @@ -1,32 +0,0 @@ -/* - * Automatically generated by jrpcgen 0.95.1 on 12/18/01 7:23 PM - * jrpcgen is part of the "Remote Tea" ONC/RPC package for Java - * See http://acplt.org/ks/remotetea.html for details - */ -package com.sleepycat.db.rpcserver; -import org.acplt.oncrpc.*; -import java.io.IOException; - -public class __txn_abort_reply implements XdrAble { - public int status; - - public __txn_abort_reply() { - } - - public __txn_abort_reply(XdrDecodingStream xdr) - throws OncRpcException, IOException { - xdrDecode(xdr); - } - - public void xdrEncode(XdrEncodingStream xdr) - throws OncRpcException, IOException { - xdr.xdrEncodeInt(status); - } - - public void xdrDecode(XdrDecodingStream xdr) - throws OncRpcException, IOException { - status = xdr.xdrDecodeInt(); - } - -} -// End of __txn_abort_reply.java diff --git a/storage/bdb/rpc_server/java/gen/__txn_begin_msg.java b/storage/bdb/rpc_server/java/gen/__txn_begin_msg.java deleted file mode 100644 index 877031e8d3a..00000000000 --- a/storage/bdb/rpc_server/java/gen/__txn_begin_msg.java +++ /dev/null @@ -1,38 +0,0 @@ -/* - * Automatically generated by jrpcgen 0.95.1 on 12/18/01 7:23 PM - * jrpcgen is part of the "Remote Tea" ONC/RPC package for Java - * See http://acplt.org/ks/remotetea.html for details - */ -package com.sleepycat.db.rpcserver; -import org.acplt.oncrpc.*; -import java.io.IOException; - -public class __txn_begin_msg implements XdrAble { - public int dbenvcl_id; - public int parentcl_id; - public int flags; - - public __txn_begin_msg() { - } - - public __txn_begin_msg(XdrDecodingStream xdr) - throws OncRpcException, IOException { - xdrDecode(xdr); - } - - public void xdrEncode(XdrEncodingStream xdr) - throws OncRpcException, IOException { - xdr.xdrEncodeInt(dbenvcl_id); - xdr.xdrEncodeInt(parentcl_id); - xdr.xdrEncodeInt(flags); - } - - public void xdrDecode(XdrDecodingStream xdr) - throws OncRpcException, IOException { - dbenvcl_id = xdr.xdrDecodeInt(); - parentcl_id = xdr.xdrDecodeInt(); - flags = xdr.xdrDecodeInt(); - } - -} -// End of __txn_begin_msg.java diff --git a/storage/bdb/rpc_server/java/gen/__txn_begin_reply.java b/storage/bdb/rpc_server/java/gen/__txn_begin_reply.java deleted file mode 100644 index 65a0c4016c2..00000000000 --- a/storage/bdb/rpc_server/java/gen/__txn_begin_reply.java +++ /dev/null @@ -1,35 +0,0 @@ -/* - * Automatically generated by jrpcgen 0.95.1 on 12/18/01 7:23 PM - * jrpcgen is part of the "Remote Tea" ONC/RPC package for Java - * See http://acplt.org/ks/remotetea.html for details - */ -package com.sleepycat.db.rpcserver; -import org.acplt.oncrpc.*; -import java.io.IOException; - -public class __txn_begin_reply implements XdrAble { - public int status; - public int txnidcl_id; - - public __txn_begin_reply() { - } - - public __txn_begin_reply(XdrDecodingStream xdr) - throws OncRpcException, IOException { - xdrDecode(xdr); - } - - public void xdrEncode(XdrEncodingStream xdr) - throws OncRpcException, IOException { - xdr.xdrEncodeInt(status); - xdr.xdrEncodeInt(txnidcl_id); - } - - public void xdrDecode(XdrDecodingStream xdr) - throws OncRpcException, IOException { - status = xdr.xdrDecodeInt(); - txnidcl_id = xdr.xdrDecodeInt(); - } - -} -// End of __txn_begin_reply.java diff --git a/storage/bdb/rpc_server/java/gen/__txn_commit_msg.java b/storage/bdb/rpc_server/java/gen/__txn_commit_msg.java deleted file mode 100644 index 4b988d0c282..00000000000 --- a/storage/bdb/rpc_server/java/gen/__txn_commit_msg.java +++ /dev/null @@ -1,35 +0,0 @@ -/* - * Automatically generated by jrpcgen 0.95.1 on 12/18/01 7:23 PM - * jrpcgen is part of the "Remote Tea" ONC/RPC package for Java - * See http://acplt.org/ks/remotetea.html for details - */ -package com.sleepycat.db.rpcserver; -import org.acplt.oncrpc.*; -import java.io.IOException; - -public class __txn_commit_msg implements XdrAble { - public int txnpcl_id; - public int flags; - - public __txn_commit_msg() { - } - - public __txn_commit_msg(XdrDecodingStream xdr) - throws OncRpcException, IOException { - xdrDecode(xdr); - } - - public void xdrEncode(XdrEncodingStream xdr) - throws OncRpcException, IOException { - xdr.xdrEncodeInt(txnpcl_id); - xdr.xdrEncodeInt(flags); - } - - public void xdrDecode(XdrDecodingStream xdr) - throws OncRpcException, IOException { - txnpcl_id = xdr.xdrDecodeInt(); - flags = xdr.xdrDecodeInt(); - } - -} -// End of __txn_commit_msg.java diff --git a/storage/bdb/rpc_server/java/gen/__txn_commit_reply.java b/storage/bdb/rpc_server/java/gen/__txn_commit_reply.java deleted file mode 100644 index b26937b82dd..00000000000 --- a/storage/bdb/rpc_server/java/gen/__txn_commit_reply.java +++ /dev/null @@ -1,32 +0,0 @@ -/* - * Automatically generated by jrpcgen 0.95.1 on 12/18/01 7:23 PM - * jrpcgen is part of the "Remote Tea" ONC/RPC package for Java - * See http://acplt.org/ks/remotetea.html for details - */ -package com.sleepycat.db.rpcserver; -import org.acplt.oncrpc.*; -import java.io.IOException; - -public class __txn_commit_reply implements XdrAble { - public int status; - - public __txn_commit_reply() { - } - - public __txn_commit_reply(XdrDecodingStream xdr) - throws OncRpcException, IOException { - xdrDecode(xdr); - } - - public void xdrEncode(XdrEncodingStream xdr) - throws OncRpcException, IOException { - xdr.xdrEncodeInt(status); - } - - public void xdrDecode(XdrDecodingStream xdr) - throws OncRpcException, IOException { - status = xdr.xdrDecodeInt(); - } - -} -// End of __txn_commit_reply.java diff --git a/storage/bdb/rpc_server/java/gen/__txn_discard_msg.java b/storage/bdb/rpc_server/java/gen/__txn_discard_msg.java deleted file mode 100644 index 87f5d4f77a7..00000000000 --- a/storage/bdb/rpc_server/java/gen/__txn_discard_msg.java +++ /dev/null @@ -1,35 +0,0 @@ -/* - * Automatically generated by jrpcgen 0.95.1 on 12/18/01 7:23 PM - * jrpcgen is part of the "Remote Tea" ONC/RPC package for Java - * See http://acplt.org/ks/remotetea.html for details - */ -package com.sleepycat.db.rpcserver; -import org.acplt.oncrpc.*; -import java.io.IOException; - -public class __txn_discard_msg implements XdrAble { - public int txnpcl_id; - public int flags; - - public __txn_discard_msg() { - } - - public __txn_discard_msg(XdrDecodingStream xdr) - throws OncRpcException, IOException { - xdrDecode(xdr); - } - - public void xdrEncode(XdrEncodingStream xdr) - throws OncRpcException, IOException { - xdr.xdrEncodeInt(txnpcl_id); - xdr.xdrEncodeInt(flags); - } - - public void xdrDecode(XdrDecodingStream xdr) - throws OncRpcException, IOException { - txnpcl_id = xdr.xdrDecodeInt(); - flags = xdr.xdrDecodeInt(); - } - -} -// End of __txn_discard_msg.java diff --git a/storage/bdb/rpc_server/java/gen/__txn_discard_reply.java b/storage/bdb/rpc_server/java/gen/__txn_discard_reply.java deleted file mode 100644 index 9792211afcc..00000000000 --- a/storage/bdb/rpc_server/java/gen/__txn_discard_reply.java +++ /dev/null @@ -1,32 +0,0 @@ -/* - * Automatically generated by jrpcgen 0.95.1 on 12/18/01 7:23 PM - * jrpcgen is part of the "Remote Tea" ONC/RPC package for Java - * See http://acplt.org/ks/remotetea.html for details - */ -package com.sleepycat.db.rpcserver; -import org.acplt.oncrpc.*; -import java.io.IOException; - -public class __txn_discard_reply implements XdrAble { - public int status; - - public __txn_discard_reply() { - } - - public __txn_discard_reply(XdrDecodingStream xdr) - throws OncRpcException, IOException { - xdrDecode(xdr); - } - - public void xdrEncode(XdrEncodingStream xdr) - throws OncRpcException, IOException { - xdr.xdrEncodeInt(status); - } - - public void xdrDecode(XdrDecodingStream xdr) - throws OncRpcException, IOException { - status = xdr.xdrDecodeInt(); - } - -} -// End of __txn_discard_reply.java diff --git a/storage/bdb/rpc_server/java/gen/__txn_prepare_msg.java b/storage/bdb/rpc_server/java/gen/__txn_prepare_msg.java deleted file mode 100644 index 6e09f2c7771..00000000000 --- a/storage/bdb/rpc_server/java/gen/__txn_prepare_msg.java +++ /dev/null @@ -1,35 +0,0 @@ -/* - * Automatically generated by jrpcgen 0.95.1 on 12/18/01 7:23 PM - * jrpcgen is part of the "Remote Tea" ONC/RPC package for Java - * See http://acplt.org/ks/remotetea.html for details - */ -package com.sleepycat.db.rpcserver; -import org.acplt.oncrpc.*; -import java.io.IOException; - -public class __txn_prepare_msg implements XdrAble { - public int txnpcl_id; - public byte [] gid; - - public __txn_prepare_msg() { - } - - public __txn_prepare_msg(XdrDecodingStream xdr) - throws OncRpcException, IOException { - xdrDecode(xdr); - } - - public void xdrEncode(XdrEncodingStream xdr) - throws OncRpcException, IOException { - xdr.xdrEncodeInt(txnpcl_id); - xdr.xdrEncodeOpaque(gid, 128); - } - - public void xdrDecode(XdrDecodingStream xdr) - throws OncRpcException, IOException { - txnpcl_id = xdr.xdrDecodeInt(); - gid = xdr.xdrDecodeOpaque(128); - } - -} -// End of __txn_prepare_msg.java diff --git a/storage/bdb/rpc_server/java/gen/__txn_prepare_reply.java b/storage/bdb/rpc_server/java/gen/__txn_prepare_reply.java deleted file mode 100644 index d7590117952..00000000000 --- a/storage/bdb/rpc_server/java/gen/__txn_prepare_reply.java +++ /dev/null @@ -1,32 +0,0 @@ -/* - * Automatically generated by jrpcgen 0.95.1 on 12/18/01 7:23 PM - * jrpcgen is part of the "Remote Tea" ONC/RPC package for Java - * See http://acplt.org/ks/remotetea.html for details - */ -package com.sleepycat.db.rpcserver; -import org.acplt.oncrpc.*; -import java.io.IOException; - -public class __txn_prepare_reply implements XdrAble { - public int status; - - public __txn_prepare_reply() { - } - - public __txn_prepare_reply(XdrDecodingStream xdr) - throws OncRpcException, IOException { - xdrDecode(xdr); - } - - public void xdrEncode(XdrEncodingStream xdr) - throws OncRpcException, IOException { - xdr.xdrEncodeInt(status); - } - - public void xdrDecode(XdrDecodingStream xdr) - throws OncRpcException, IOException { - status = xdr.xdrDecodeInt(); - } - -} -// End of __txn_prepare_reply.java diff --git a/storage/bdb/rpc_server/java/gen/__txn_recover_msg.java b/storage/bdb/rpc_server/java/gen/__txn_recover_msg.java deleted file mode 100644 index 65153334403..00000000000 --- a/storage/bdb/rpc_server/java/gen/__txn_recover_msg.java +++ /dev/null @@ -1,38 +0,0 @@ -/* - * Automatically generated by jrpcgen 0.95.1 on 12/18/01 7:23 PM - * jrpcgen is part of the "Remote Tea" ONC/RPC package for Java - * See http://acplt.org/ks/remotetea.html for details - */ -package com.sleepycat.db.rpcserver; -import org.acplt.oncrpc.*; -import java.io.IOException; - -public class __txn_recover_msg implements XdrAble { - public int dbenvcl_id; - public int count; - public int flags; - - public __txn_recover_msg() { - } - - public __txn_recover_msg(XdrDecodingStream xdr) - throws OncRpcException, IOException { - xdrDecode(xdr); - } - - public void xdrEncode(XdrEncodingStream xdr) - throws OncRpcException, IOException { - xdr.xdrEncodeInt(dbenvcl_id); - xdr.xdrEncodeInt(count); - xdr.xdrEncodeInt(flags); - } - - public void xdrDecode(XdrDecodingStream xdr) - throws OncRpcException, IOException { - dbenvcl_id = xdr.xdrDecodeInt(); - count = xdr.xdrDecodeInt(); - flags = xdr.xdrDecodeInt(); - } - -} -// End of __txn_recover_msg.java diff --git a/storage/bdb/rpc_server/java/gen/__txn_recover_reply.java b/storage/bdb/rpc_server/java/gen/__txn_recover_reply.java deleted file mode 100644 index 0161ec949da..00000000000 --- a/storage/bdb/rpc_server/java/gen/__txn_recover_reply.java +++ /dev/null @@ -1,41 +0,0 @@ -/* - * Automatically generated by jrpcgen 0.95.1 on 12/18/01 7:23 PM - * jrpcgen is part of the "Remote Tea" ONC/RPC package for Java - * See http://acplt.org/ks/remotetea.html for details - */ -package com.sleepycat.db.rpcserver; -import org.acplt.oncrpc.*; -import java.io.IOException; - -public class __txn_recover_reply implements XdrAble { - public int status; - public int [] txn; - public byte [] gid; - public int retcount; - - public __txn_recover_reply() { - } - - public __txn_recover_reply(XdrDecodingStream xdr) - throws OncRpcException, IOException { - xdrDecode(xdr); - } - - public void xdrEncode(XdrEncodingStream xdr) - throws OncRpcException, IOException { - xdr.xdrEncodeInt(status); - xdr.xdrEncodeIntVector(txn); - xdr.xdrEncodeDynamicOpaque(gid); - xdr.xdrEncodeInt(retcount); - } - - public void xdrDecode(XdrDecodingStream xdr) - throws OncRpcException, IOException { - status = xdr.xdrDecodeInt(); - txn = xdr.xdrDecodeIntVector(); - gid = xdr.xdrDecodeDynamicOpaque(); - retcount = xdr.xdrDecodeInt(); - } - -} -// End of __txn_recover_reply.java diff --git a/storage/bdb/rpc_server/java/gen/db_server.java b/storage/bdb/rpc_server/java/gen/db_server.java deleted file mode 100644 index dccc3d8ad16..00000000000 --- a/storage/bdb/rpc_server/java/gen/db_server.java +++ /dev/null @@ -1,85 +0,0 @@ -/* - * Automatically generated by jrpcgen 0.95.1 on 2/11/04 1:28 PM - * jrpcgen is part of the "Remote Tea" ONC/RPC package for Java - * See http://acplt.org/ks/remotetea.html for details - */ -package com.sleepycat.db.rpcserver; -import org.acplt.oncrpc.*; -import java.io.IOException; - -/** - * A collection of constants used by the "db_server" ONC/RPC program. - */ -public interface db_server { - public static final int __DB_db_get_re_delim_4003 = 49; - public static final int __DB_db_close_4003 = 25; - public static final int __DB_db_flags_4003 = 33; - public static final int __DB_dbc_dup_4003 = 65; - public static final int __DB_db_get_open_flags_4003 = 36; - public static final int __DB_db_encrypt_4003 = 29; - public static final int __DB_env_remove_4003 = 14; - public static final int __DB_env_dbrename_4003 = 6; - public static final int __DB_dbc_pget_4003 = 67; - public static final int __DB_env_get_cachesize_4003 = 1; - public static final int __DB_env_cachesize_4003 = 2; - public static final int __DB_db_get_lorder_4003 = 42; - public static final int __DB_db_lorder_4003 = 43; - public static final int __DB_db_key_range_4003 = 41; - public static final int __DB_env_get_open_flags_4003 = 12; - public static final int __DB_db_bt_minkey_4003 = 24; - public static final int __DB_db_sync_4003 = 58; - public static final int __DB_dbc_close_4003 = 62; - public static final int __DB_db_join_4003 = 61; - public static final int __DB_db_pagesize_4003 = 46; - public static final int DB_RPC_SERVERVERS = 4003; - public static final int __DB_db_open_4003 = 44; - public static final int __DB_db_get_extentsize_4003 = 30; - public static final int __DB_dbc_get_4003 = 66; - public static final int __DB_db_cursor_4003 = 60; - public static final int __DB_txn_commit_4003 = 17; - public static final int __DB_dbc_del_4003 = 64; - public static final int __DB_env_create_4003 = 4; - public static final int __DB_env_open_4003 = 13; - public static final int __DB_txn_prepare_4003 = 19; - public static final int __DB_db_get_re_pad_4003 = 54; - public static final int __DB_db_pget_4003 = 47; - public static final int __DB_db_stat_4003 = 57; - public static final int __DB_db_h_nelem_4003 = 40; - public static final int __DB_db_remove_4003 = 55; - public static final int __DB_db_get_flags_4003 = 32; - public static final int __DB_db_re_delim_4003 = 50; - public static final int __DB_db_re_pad_4003 = 53; - public static final int __DB_env_get_flags_4003 = 9; - public static final int __DB_txn_abort_4003 = 15; - public static final int __DB_env_get_encrypt_flags_4003 = 7; - public static final int __DB_db_get_encrypt_flags_4003 = 28; - public static final int __DB_db_get_h_ffactor_4003 = 37; - public static final int __DB_txn_recover_4003 = 20; - public static final int __DB_db_get_4003 = 34; - public static final int __DB_db_extentsize_4003 = 31; - public static final int __DB_db_get_h_nelem_4003 = 39; - public static final int __DB_dbc_put_4003 = 68; - public static final int DB_RPC_SERVERPROG = 351457; - public static final int __DB_db_get_re_len_4003 = 51; - public static final int __DB_db_truncate_4003 = 59; - public static final int __DB_db_del_4003 = 27; - public static final int __DB_db_bt_maxkey_4003 = 22; - public static final int __DB_env_dbremove_4003 = 5; - public static final int __DB_db_get_pagesize_4003 = 45; - public static final int __DB_db_get_name_4003 = 35; - public static final int __DB_txn_discard_4003 = 18; - public static final int __DB_db_re_len_4003 = 52; - public static final int __DB_env_close_4003 = 3; - public static final int __DB_env_flags_4003 = 10; - public static final int __DB_db_rename_4003 = 56; - public static final int __DB_db_get_bt_minkey_4003 = 23; - public static final int __DB_db_associate_4003 = 21; - public static final int __DB_txn_begin_4003 = 16; - public static final int __DB_env_encrypt_4003 = 8; - public static final int __DB_db_h_ffactor_4003 = 38; - public static final int __DB_db_put_4003 = 48; - public static final int __DB_db_create_4003 = 26; - public static final int __DB_env_get_home_4003 = 11; - public static final int __DB_dbc_count_4003 = 63; -} -// End of db_server.java diff --git a/storage/bdb/rpc_server/java/jrpcgen.jar b/storage/bdb/rpc_server/java/jrpcgen.jar deleted file mode 100644 index 338825b848d..00000000000 Binary files a/storage/bdb/rpc_server/java/jrpcgen.jar and /dev/null differ diff --git a/storage/bdb/rpc_server/java/oncrpc.jar b/storage/bdb/rpc_server/java/oncrpc.jar deleted file mode 100644 index e0f5cfa6966..00000000000 Binary files a/storage/bdb/rpc_server/java/oncrpc.jar and /dev/null differ diff --git a/storage/bdb/rpc_server/java/s_jrpcgen b/storage/bdb/rpc_server/java/s_jrpcgen deleted file mode 100644 index b585f431f0b..00000000000 --- a/storage/bdb/rpc_server/java/s_jrpcgen +++ /dev/null @@ -1,3 +0,0 @@ -#!/bin/sh - -java -jar jrpcgen.jar -d gen -noclient -nobackup -p com.sleepycat.db.rpcserver -s ServerStubs ../db_server.x diff --git a/storage/bdb/rpc_server/rpc.src b/storage/bdb/rpc_server/rpc.src deleted file mode 100644 index fd6af9f96b1..00000000000 --- a/storage/bdb/rpc_server/rpc.src +++ /dev/null @@ -1,994 +0,0 @@ -# See the file LICENSE for redistribution information. -# -# Copyright (c) 1999-2004 -# Sleepycat Software. All rights reserved. -# -# $Id: rpc.src,v 1.90 2004/07/15 19:54:11 sue Exp $ -# -# Syntax: -# BEGIN function_name {CODE | RETCODE | NOFUNC} -# CODE: generate XDR and client code, return status -# Used for functions that just return a status and nothing else. -# RETCODE:generate XDR and client code, call return function -# (generate template return function) -# Used for functions that returns data. -# NOFUNC: generate a client "unsupported function" with right args -# Used for unsupported functions. -# -# ARG {IGNORE | STRING | INT | DBT | LIST | ID | CONST} C-type varname -# IGNORE: not passed to server -# STRING: string passed to server -# DBT: DBT arg passed to server -# LIST: list passed to server (NULL-terminated list of something) -# INT: integer passed to server -# ID: cl_id from arg passed to server -# GID: global id passed to server -# CONST: do not generate COMPQUIET (for NOFUNC only) -# FUNCPROT prototype -# FUNCARG functiontype -# These two *MUST* go together and FUNCPROT *MUST* be first. These -# are for the tricky user-supplied functions to some methods. They -# are not supported in RPC, so will be ignored, but the complicated -# syntax of their argument requires we have a special flag for them -# that contains the verbatim text to use in the prototype and the -# c-type, respectively. The FUNCARG must include the function, and -# must call it 'funcN', where N is the count of functions. Almost -# always it must be func0. A *very* few methods have more than one -# user-supplied functions, in those cases, it must be func0, func1, etc. -# -# All messages automatically return "status" and return that from -# the call to the function. RET's are additional things the server -# may return. RET is like ARG but does not need the IGNORE option. -# ARETs are RETs which are returned in arguments by the client. -# {RET | ARET} {STRING | INT | DBT | LIST | ID} varname [GID | INT | ID] -# STRING: string from server -# DBT: DBT arg from server -# LIST: list from server (NULL-terminated list) -# Must have list type of GID, ID or INT specified -# INT: integer from server -# ID: id from server stored in cl_id -# END function end. - -# -# Environment functions -# -BEGIN env_alloc NOFUNC -ARG ID DB_ENV * dbenv -FUNCPROT void *(*)(size_t) -FUNCARG void *(*func0) __P((size_t)) -FUNCPROT void *(*)(void *, size_t) -FUNCARG void *(*func1) __P((void *, size_t)) -FUNCPROT void (*)(void *) -FUNCARG void (*func2) __P((void *)) -END -BEGIN set_app_dispatch NOFUNC -ARG ID DB_ENV * dbenv -FUNCPROT int (*)(DB_ENV *, DBT *, DB_LSN *, db_recops) -FUNCARG int (*func0) __P((DB_ENV *, DBT *, DB_LSN *, db_recops)) -END -BEGIN env_get_cachesize CODE -ARG ID DB_ENV * dbenv -ARET INT u_int32_t gbytes -ARET INT u_int32_t bytes -ARET INT int ncache -END -BEGIN env_cachesize CODE -ARG ID DB_ENV * dbenv -ARG INT u_int32_t gbytes -ARG INT u_int32_t bytes -ARG INT int ncache -END -BEGIN env_close CODE -ARG ID DB_ENV * dbenv -ARG INT u_int32_t flags -END -BEGIN env_create RETCODE -ARG IGNORE DB_ENV * dbenv -ARG INT long timeout -RET ID long env -END -BEGIN get_data_dirs NOFUNC -ARG ID DB_ENV * dbenv -ARG STRING const char *** dirpp -END -BEGIN set_data_dir NOFUNC -ARG ID DB_ENV * dbenv -ARG STRING const char * dir -END -BEGIN env_dbremove CODE -ARG ID DB_ENV * dbenv -ARG ID DB_TXN * txnp -ARG STRING const char * name -ARG STRING const char * subdb -ARG INT u_int32_t flags -END -BEGIN env_dbrename CODE -ARG ID DB_ENV * dbenv -ARG ID DB_TXN * txnp -ARG STRING const char * name -ARG STRING const char * subdb -ARG STRING const char * newname -ARG INT u_int32_t flags -END -BEGIN env_get_encrypt_flags CODE -ARG ID DB_ENV * dbenv -ARET INT u_int32_t flags -END -BEGIN env_encrypt CODE -ARG ID DB_ENV * dbenv -ARG STRING const char * passwd -ARG INT u_int32_t flags -END -BEGIN env_set_feedback NOFUNC -ARG ID DB_ENV * dbenv -FUNCPROT void (*)(DB_ENV *, int, int) -FUNCARG void (*func0) __P((DB_ENV *, int, int)) -END -BEGIN env_get_flags CODE -ARG ID DB_ENV * dbenv -ARET INT u_int32_t flags -END -BEGIN env_flags CODE -ARG ID DB_ENV * dbenv -ARG INT u_int32_t flags -ARG INT int onoff -END -BEGIN get_lg_bsize NOFUNC -ARG ID DB_ENV * dbenv -ARET INT u_int32_t bsize -END -BEGIN set_lg_bsize NOFUNC -ARG ID DB_ENV * dbenv -ARG INT u_int32_t bsize -END -BEGIN get_lg_dir NOFUNC -ARG ID DB_ENV * dbenv -ARET STRING const char * dir -END -BEGIN set_lg_dir NOFUNC -ARG ID DB_ENV * dbenv -ARG STRING const char * dir -END -BEGIN get_lg_max NOFUNC -ARG ID DB_ENV * dbenv -ARET INT u_int32_t max -END -BEGIN set_lg_max NOFUNC -ARG ID DB_ENV * dbenv -ARG INT u_int32_t max -END -BEGIN get_lg_regionmax NOFUNC -ARG ID DB_ENV * dbenv -ARET INT u_int32_t max -END -BEGIN set_lg_regionmax NOFUNC -ARG ID DB_ENV * dbenv -ARG INT u_int32_t max -END -BEGIN get_lk_conflicts NOFUNC -ARG ID DB_ENV * dbenv -ARG IGNORE const u_int8_t ** conflicts -ARG IGNORE int * modes -END -BEGIN set_lk_conflict NOFUNC -ARG ID DB_ENV * dbenv -ARG INT u_int8_t * conflicts -ARG INT int modes -END -BEGIN get_lk_detect NOFUNC -ARG ID DB_ENV * dbenv -ARET INT u_int32_t detect -END -BEGIN set_lk_detect NOFUNC -ARG ID DB_ENV * dbenv -ARG INT u_int32_t detect -END -BEGIN set_lk_max NOFUNC -ARG ID DB_ENV * dbenv -ARG INT u_int32_t max -END -BEGIN get_lk_max_locks NOFUNC -ARG ID DB_ENV * dbenv -ARET INT u_int32_t max -END -BEGIN set_lk_max_locks NOFUNC -ARG ID DB_ENV * dbenv -ARG INT u_int32_t max -END -BEGIN get_lk_max_lockers NOFUNC -ARG ID DB_ENV * dbenv -ARET INT u_int32_t max -END -BEGIN set_lk_max_lockers NOFUNC -ARG ID DB_ENV * dbenv -ARG INT u_int32_t max -END -BEGIN get_lk_max_objects NOFUNC -ARG ID DB_ENV * dbenv -ARET INT u_int32_t max -END -BEGIN set_lk_max_objects NOFUNC -ARG ID DB_ENV * dbenv -ARG INT u_int32_t max -END -BEGIN get_mp_max_openfd NOFUNC -ARG ID DB_ENV * dbenv -ARET INT int nopen -END -BEGIN set_mp_max_openfd NOFUNC -ARG ID DB_ENV * dbenv -ARG INT int nopen -END -BEGIN get_mp_max_write NOFUNC -ARG ID DB_ENV * dbenv -ARET INT int nwrite -ARET INT int nsleep -END -BEGIN set_mp_max_write NOFUNC -ARG ID DB_ENV * dbenv -ARG INT int nwrite -ARG INT int nsleep -END -BEGIN get_mp_mmapsize NOFUNC -ARG ID DB_ENV * dbenv -ARET INT size_t mmapsize -END -BEGIN set_mp_mmapsize NOFUNC -ARG ID DB_ENV * dbenv -ARG INT size_t mmapsize -END -BEGIN env_get_home CODE -ARG ID DB_ENV * dbenv -ARET STRING const char * home -END -BEGIN env_get_open_flags CODE -ARG ID DB_ENV * dbenv -ARET INT u_int32_t flags -END -BEGIN env_open RETCODE -ARG ID DB_ENV * dbenv -ARG STRING const char * home -ARG INT u_int32_t flags -ARG INT int mode -RET ID long env -END -BEGIN env_paniccall NOFUNC -ARG ID DB_ENV * dbenv -FUNCPROT void (*)(DB_ENV *, int) -FUNCARG void (*func0) __P((DB_ENV *, int)) -END -BEGIN env_remove RETCODE -ARG ID DB_ENV * dbenv -ARG STRING const char * home -ARG INT u_int32_t flags -END -BEGIN get_shm_key NOFUNC -ARG ID DB_ENV * dbenv -ARET INT long shm_key -END -BEGIN set_shm_key NOFUNC -ARG ID DB_ENV * dbenv -ARG INT long shm_key -END -BEGIN get_tas_spins NOFUNC -ARG ID DB_ENV * dbenv -ARET INT u_int32_t tas_spins -END -BEGIN set_tas_spins NOFUNC -ARG ID DB_ENV * dbenv -ARG INT u_int32_t tas_spins -END -BEGIN get_timeout NOFUNC -ARG ID DB_ENV * dbenv -ARET INT u_int32_t timeout -ARG INT u_int32_t flags -END -BEGIN set_timeout NOFUNC -ARG ID DB_ENV * dbenv -ARG INT u_int32_t timeout -ARG INT u_int32_t flags -END -BEGIN get_tmp_dir NOFUNC -ARG ID DB_ENV * dbenv -ARET STRING const char * dir -END -BEGIN set_tmp_dir NOFUNC -ARG ID DB_ENV * dbenv -ARG STRING const char * dir -END -BEGIN get_tx_max NOFUNC -ARG ID DB_ENV * dbenv -ARET INT u_int32_t max -END -BEGIN set_tx_max NOFUNC -ARG ID DB_ENV * dbenv -ARG INT u_int32_t max -END -BEGIN get_tx_timestamp NOFUNC -ARG ID DB_ENV * dbenv -ARET INT time_t max -END -BEGIN set_tx_timestamp NOFUNC -ARG ID DB_ENV * dbenv -ARG INT time_t * max -END -BEGIN get_verbose NOFUNC -ARG ID DB_ENV * dbenv -ARG INT u_int32_t which -ARET INT int onoff -END -BEGIN set_verbose NOFUNC -ARG ID DB_ENV * dbenv -ARG INT u_int32_t which -ARG INT int onoff -END -# -# Transaction functions -# -BEGIN txn_abort RETCODE -ARG ID DB_TXN * txnp -END -BEGIN txn_begin RETCODE -ARG ID DB_ENV * dbenv -ARG ID DB_TXN * parent -ARG IGNORE DB_TXN ** txnpp -ARG INT u_int32_t flags -RET ID long txnid -END -BEGIN txn_checkpoint NOFUNC -ARG ID DB_ENV * dbenv -ARG INT u_int32_t kbyte -ARG INT u_int32_t min -ARG INT u_int32_t flags -END -BEGIN txn_commit RETCODE -ARG ID DB_TXN * txnp -ARG INT u_int32_t flags -END -BEGIN txn_discard RETCODE -ARG ID DB_TXN * txnp -ARG INT u_int32_t flags -END -BEGIN txn_prepare CODE -ARG ID DB_TXN * txnp -ARG GID u_int8_t * gid -END -BEGIN txn_recover RETCODE -ARG ID DB_ENV * dbenv -ARG IGNORE DB_PREPLIST * preplist -ARG INT long count -ARG IGNORE long * retp -ARG INT u_int32_t flags -RET LIST DB_TXN * txn ID -RET LIST u_int8_t * gid GID -RET INT long retcount -END -BEGIN txn_stat NOFUNC -ARG ID DB_ENV * dbenv -ARG IGNORE DB_TXN_STAT ** statp -ARG INT u_int32_t flags -END -BEGIN txn_timeout NOFUNC -ARG ID DB_TXN * txnp -ARG INT u_int32_t timeout -ARG INT u_int32_t flags -END -# -# Replication functions -# -BEGIN rep_elect NOFUNC -ARG ID DB_ENV * dbenv -ARG INT int nsites -ARG INT int nvotes -ARG INT int pri -ARG INT u_int32_t timeout -ARG IGNORE int * idp -ARG INT u_int32_t flags -END -BEGIN rep_flush NOFUNC -ARG ID DB_ENV * dbenv -END -BEGIN rep_process_message NOFUNC -ARG ID DB_ENV * dbenv -ARG DBT DBT * rec -ARG DBT DBT * control -ARG IGNORE int * idp -ARG IGNORE DB_LSN * ret_lsnp -END -BEGIN rep_get_limit NOFUNC -ARG ID DB_ENV * dbenv -ARET INT u_int32_t mbytes -ARET INT u_int32_t bytes -END -BEGIN rep_set_limit NOFUNC -ARG ID DB_ENV * dbenv -ARG INT u_int32_t mbytes -ARG INT u_int32_t bytes -END -BEGIN rep_set_request NOFUNC -ARG ID DB_ENV * dbenv -ARG INT u_int32_t min -ARG INT u_int32_t max -END -BEGIN rep_set_rep_transport NOFUNC -ARG ID DB_ENV * dbenv -ARG INT int id -FUNCPROT int (*)(DB_ENV *, const DBT *, const DBT *, const DB_LSN *, int, u_int32_t) -FUNCARG int (*func0) __P((DB_ENV *, const DBT *, const DBT *, const DB_LSN *, int, u_int32_t)) -END -BEGIN rep_start NOFUNC -ARG ID DB_ENV * dbenv -ARG DBT DBT * cdata -ARG INT u_int32_t flags -END -BEGIN rep_stat NOFUNC -ARG ID DB_ENV * dbenv -ARG IGNORE DB_REP_STAT ** statp -ARG INT u_int32_t flags -END - -# -# Database functions -# -BEGIN db_alloc NOFUNC -ARG ID DB * dbp -FUNCPROT void *(*)(size_t) -FUNCARG void *(*func0) __P((size_t)) -FUNCPROT void *(*)(void *, size_t) -FUNCARG void *(*func1) __P((void *, size_t)) -FUNCPROT void (*)(void *) -FUNCARG void (*func2) __P((void *)) -END -BEGIN db_associate CODE -ARG ID DB * dbp -ARG ID DB_TXN * txnp -ARG ID DB * sdbp -FUNCPROT int (*)(DB *, const DBT *, const DBT *, DBT *) -FUNCARG int (*func0) __P((DB *, const DBT *, const DBT *, DBT *)) -ARG INT u_int32_t flags -END -BEGIN db_bt_compare NOFUNC -ARG ID DB * dbp -FUNCPROT int (*)(DB *, const DBT *, const DBT *) -FUNCARG int (*func0) __P((DB *, const DBT *, const DBT *)) -END -BEGIN db_bt_maxkey CODE -ARG ID DB * dbp -ARG INT u_int32_t maxkey -END -BEGIN db_get_bt_minkey CODE -ARG ID DB * dbp -ARET INT u_int32_t minkey -END -BEGIN db_bt_minkey CODE -ARG ID DB * dbp -ARG INT u_int32_t minkey -END -BEGIN db_bt_prefix NOFUNC -ARG ID DB * dbp -FUNCPROT size_t(*)(DB *, const DBT *, const DBT *) -FUNCARG size_t (*func0) __P((DB *, const DBT *, const DBT *)) -END -BEGIN db_set_append_recno NOFUNC -ARG ID DB * dbp -FUNCPROT int (*)(DB *, DBT *, db_recno_t) -FUNCARG int (*func0) __P((DB *, DBT *, db_recno_t)) -END -BEGIN db_get_cachesize NOFUNC -ARG ID DB * dbp -ARET INT u_int32_t gbytes -ARET INT u_int32_t bytes -ARET INT int ncache -END -BEGIN db_cachesize NOFUNC -ARG ID DB * dbp -ARG INT u_int32_t gbytes -ARG INT u_int32_t bytes -ARG INT int ncache -END -BEGIN db_close RETCODE -ARG ID DB * dbp -ARG INT u_int32_t flags -END -BEGIN db_create RETCODE -ARG IGNORE DB * dbp -ARG ID DB_ENV * dbenv -ARG INT u_int32_t flags -RET ID long db -END -BEGIN db_del CODE -ARG ID DB * dbp -ARG ID DB_TXN * txnp -ARG DBT DBT * key -ARG INT u_int32_t flags -END -BEGIN db_dup_compare NOFUNC -ARG ID DB * dbp -FUNCPROT int (*)(DB *, const DBT *, const DBT *) -FUNCARG int (*func0) __P((DB *, const DBT *, const DBT *)) -END -BEGIN db_get_encrypt_flags CODE -ARG ID DB * dbp -ARET INT u_int32_t flags -END -BEGIN db_encrypt CODE -ARG ID DB * dbp -ARG STRING const char * passwd -ARG INT u_int32_t flags -END -BEGIN db_get_extentsize CODE -ARG ID DB * dbp -ARET INT u_int32_t extentsize -END -BEGIN db_extentsize CODE -ARG ID DB * dbp -ARG INT u_int32_t extentsize -END -BEGIN db_fd NOFUNC -ARG ID DB * dbp -ARG IGNORE int * fdp -END -BEGIN db_feedback NOFUNC -ARG ID DB * dbp -FUNCPROT void (*)(DB *, int, int) -FUNCARG void (*func0) __P((DB *, int, int)) -END -BEGIN db_get_flags CODE -ARG ID DB * dbp -ARET INT u_int32_t flags -END -BEGIN db_flags CODE -ARG ID DB * dbp -ARG INT u_int32_t flags -END -BEGIN db_get RETCODE -ARG ID DB * dbp -ARG ID DB_TXN * txnp -ARG DBT DBT * key -ARG DBT DBT * data -ARG INT u_int32_t flags -RET DBT DBT * key -RET DBT DBT * data -END -BEGIN db_get_name CODE -ARG ID DB * dbp -ARET STRING const char * filename -ARET STRING const char * dbname -END -BEGIN db_get_open_flags CODE -ARG ID DB * dbp -ARET INT u_int32_t flags -END -BEGIN db_get_h_ffactor CODE -ARG ID DB * dbp -ARET INT u_int32_t ffactor -END -BEGIN db_h_ffactor CODE -ARG ID DB * dbp -ARG INT u_int32_t ffactor -END -BEGIN db_h_hash NOFUNC -ARG ID DB * dbp -FUNCPROT u_int32_t(*)(DB *, const void *, u_int32_t) -FUNCARG u_int32_t (*func0) __P((DB *, const void *, u_int32_t)) -END -BEGIN db_get_h_nelem CODE -ARG ID DB * dbp -ARET INT u_int32_t nelem -END -BEGIN db_h_nelem CODE -ARG ID DB * dbp -ARG INT u_int32_t nelem -END -BEGIN db_key_range RETCODE -ARG ID DB * dbp -ARG ID DB_TXN * txnp -ARG DBT DBT * key -ARG IGNORE DB_KEY_RANGE * range -ARG INT u_int32_t flags -RET DBL double less -RET DBL double equal -RET DBL double greater -END -BEGIN db_get_lorder CODE -ARG ID DB * dbp -ARET INT int lorder -END -BEGIN db_lorder CODE -ARG ID DB * dbp -ARG INT int lorder -END -# XXX -# The line: -# RET INT u_int32_t dbflags -# should go away when a get_flags method exists. It is -# needed now because Tcl looks at dbp->flags. -# -BEGIN db_open RETCODE -ARG ID DB * dbp -ARG ID DB_TXN * txnp -ARG STRING const char * name -ARG STRING const char * subdb -ARG INT DBTYPE type -ARG INT u_int32_t flags -ARG INT int mode -RET ID long db -RET INT DBTYPE type -RET INT int lorder -END -BEGIN db_get_pagesize CODE -ARG ID DB * dbp -ARET INT u_int32_t pagesize -END -BEGIN db_pagesize CODE -ARG ID DB * dbp -ARG INT u_int32_t pagesize -END -BEGIN db_panic NOFUNC -ARG ID DB * dbp -FUNCPROT void (*)(DB_ENV *, int) -FUNCARG void (*func0) __P((DB_ENV *, int)) -END -BEGIN db_pget RETCODE -ARG ID DB * dbp -ARG ID DB_TXN * txnp -ARG DBT DBT * skey -ARG DBT DBT * pkey -ARG DBT DBT * data -ARG INT u_int32_t flags -RET DBT DBT * skey -RET DBT DBT * pkey -RET DBT DBT * data -END -BEGIN db_put RETCODE -ARG ID DB * dbp -ARG ID DB_TXN * txnp -ARG DBT DBT * key -ARG DBT DBT * data -ARG INT u_int32_t flags -RET DBT DBT * key -END -BEGIN db_get_re_delim CODE -ARG ID DB * dbp -ARET INT int delim -END -BEGIN db_re_delim CODE -ARG ID DB * dbp -ARG INT int delim -END -BEGIN db_get_re_len CODE -ARG ID DB * dbp -ARET INT u_int32_t len -END -BEGIN db_re_len CODE -ARG ID DB * dbp -ARG INT u_int32_t len -END -BEGIN db_re_pad CODE -ARG ID DB * dbp -ARG INT int pad -END -BEGIN db_get_re_pad CODE -ARG ID DB * dbp -ARET INT int pad -END -BEGIN db_get_re_source NOFUNC -ARG ID DB * dbp -ARET STRING const char * re_source -END -BEGIN db_re_source NOFUNC -ARG ID DB * dbp -ARG STRING const char * re_source -END -BEGIN db_remove RETCODE -ARG ID DB * dbp -ARG STRING const char * name -ARG STRING const char * subdb -ARG INT u_int32_t flags -END -BEGIN db_rename RETCODE -ARG ID DB * dbp -ARG STRING const char * name -ARG STRING const char * subdb -ARG STRING const char * newname -ARG INT u_int32_t flags -END -BEGIN db_stat RETCODE -ARG ID DB * dbp -ARG ID DB_TXN * txnp -ARG IGNORE void * sp -ARG INT u_int32_t flags -RET LIST u_int32_t * stats INT -END -BEGIN db_sync CODE -ARG ID DB * dbp -ARG INT u_int32_t flags -END -BEGIN db_truncate RETCODE -ARG ID DB * dbp -ARG ID DB_TXN * txnp -ARG IGNORE u_int32_t * countp -ARG INT u_int32_t flags -RET INT u_int32_t count -END -BEGIN db_upgrade NOFUNC -ARG ID DB * dbp -ARG STRING const char * fname -ARG INT u_int32_t flags -END -BEGIN db_verify NOFUNC -ARG ID DB * dbp -ARG STRING const char * fname -ARG STRING const char * subdb -ARG IGNORE FILE * outfile -ARG INT u_int32_t flags -END -# -# Cursor functions -# -BEGIN db_cursor RETCODE -ARG ID DB * dbp -ARG ID DB_TXN * txnp -ARG IGNORE DBC ** dbcpp -ARG INT u_int32_t flags -RET ID long dbcid -END -BEGIN db_join RETCODE -ARG ID DB * dbp -ARG LIST DBC ** curs ID -ARG IGNORE DBC ** dbcp -ARG INT u_int32_t flags -RET ID long dbcid -END -BEGIN dbc_close RETCODE -ARG ID DBC * dbc -END -BEGIN dbc_count RETCODE -ARG ID DBC * dbc -ARG IGNORE db_recno_t * countp -ARG INT u_int32_t flags -RET INT db_recno_t dupcount -END -BEGIN dbc_del CODE -ARG ID DBC * dbc -ARG INT u_int32_t flags -END -BEGIN dbc_dup RETCODE -ARG ID DBC * dbc -ARG IGNORE DBC ** dbcp -ARG INT u_int32_t flags -RET ID long dbcid -END -BEGIN dbc_get RETCODE -ARG ID DBC * dbc -ARG DBT DBT * key -ARG DBT DBT * data -ARG INT u_int32_t flags -RET DBT DBT * key -RET DBT DBT * data -END -BEGIN dbc_pget RETCODE -ARG ID DBC * dbc -ARG DBT DBT * skey -ARG DBT DBT * pkey -ARG DBT DBT * data -ARG INT u_int32_t flags -RET DBT DBT * skey -RET DBT DBT * pkey -RET DBT DBT * data -END -BEGIN dbc_put RETCODE -ARG ID DBC * dbc -ARG DBT DBT * key -ARG DBT DBT * data -ARG INT u_int32_t flags -RET DBT DBT * key -END - -# -# Unsupported environment subsystems -# -# -# Locking subsystem -# -BEGIN lock_detect NOFUNC -ARG ID DB_ENV * dbenv -ARG INT u_int32_t flags -ARG INT u_int32_t atype -ARG IGNORE int * aborted -END -BEGIN lock_get NOFUNC -ARG ID DB_ENV * dbenv -ARG INT u_int32_t locker -ARG INT u_int32_t flags -ARG CONST const DBT * obj -ARG INT db_lockmode_t mode -ARG IGNORE DB_LOCK * lock -END -BEGIN lock_id NOFUNC -ARG ID DB_ENV * dbenv -ARG INT u_int32_t * idp -END -BEGIN lock_id_free NOFUNC -ARG ID DB_ENV * dbenv -ARG INT u_int32_t id -END -BEGIN lock_put NOFUNC -ARG ID DB_ENV * dbenv -ARG ID DB_LOCK * lock -END -BEGIN lock_stat NOFUNC -ARG ID DB_ENV * dbenv -ARG IGNORE DB_LOCK_STAT ** statp -ARG INT u_int32_t flags -END -BEGIN lock_vec NOFUNC -ARG ID DB_ENV * dbenv -ARG INT u_int32_t locker -ARG INT u_int32_t flags -ARG IGNORE DB_LOCKREQ * list -ARG INT int nlist -ARG IGNORE DB_LOCKREQ ** elistp -END -# -# Logging subsystem -# -BEGIN log_archive NOFUNC -ARG ID DB_ENV * dbenv -ARG IGNORE char *** listp -ARG INT u_int32_t flags -END -BEGIN log_cursor NOFUNC -ARG ID DB_ENV * dbenv -ARG IGNORE DB_LOGC ** logcp -ARG INT u_int32_t flags -END -# -# Don't do log_compare. It doesn't have an env we can get at, -# and it doesn't manipulate DB internal information. -# -BEGIN log_file NOFUNC -ARG ID DB_ENV * dbenv -ARG CONST const DB_LSN * lsn -ARG STRING char * namep -ARG INT size_t len -END -BEGIN log_flush NOFUNC -ARG ID DB_ENV * dbenv -ARG CONST const DB_LSN * lsn -END -BEGIN log_put NOFUNC -ARG ID DB_ENV * dbenv -ARG IGNORE DB_LSN * lsn -ARG DBT const DBT * data -ARG INT u_int32_t flags -END -BEGIN log_stat NOFUNC -ARG ID DB_ENV * dbenv -ARG IGNORE DB_LOG_STAT ** statp -ARG INT u_int32_t flags -END - -# -# DB_MPOOL methods. -# -BEGIN memp_register NOFUNC -ARG ID DB_ENV * dbenv -ARG INT int ftype -FUNCPROT int (*)(DB_ENV *, db_pgno_t, void *, DBT *) -FUNCARG int (*func0) __P((DB_ENV *, db_pgno_t, void *, DBT *)) -FUNCPROT int (*)(DB_ENV *, db_pgno_t, void *, DBT *) -FUNCARG int (*func1) __P((DB_ENV *, db_pgno_t, void *, DBT *)) -END -BEGIN memp_stat NOFUNC -ARG ID DB_ENV * dbenv -ARG IGNORE DB_MPOOL_STAT ** gstatp -ARG IGNORE DB_MPOOL_FSTAT *** fstatp -ARG INT u_int32_t flags -END -BEGIN memp_sync NOFUNC -ARG ID DB_ENV * dbenv -ARG IGNORE DB_LSN * lsn -END -BEGIN memp_trickle NOFUNC -ARG ID DB_ENV * dbenv -ARG INT int pct -ARG IGNORE int * nwrotep -END - -# -# DB_MPOOLFILE methods. -# -BEGIN memp_fget NOFUNC -ARG ID DB_MPOOLFILE * dbmfp -ARG IGNORE u_int32_t * pgnoaddr -ARG IGNORE u_int32_t flags -ARG IGNORE void * addrp -END -BEGIN memp_fopen NOFUNC -ARG ID DB_MPOOLFILE * dbmfp -ARG IGNORE const char * path -ARG IGNORE u_int32_t flags -ARG IGNORE int mode -ARG IGNORE size_t pagesize -END -BEGIN memp_fput NOFUNC -ARG ID DB_MPOOLFILE * dbmfp -ARG IGNORE void * pgaddr -ARG IGNORE u_int32_t flags -END -BEGIN memp_fset NOFUNC -ARG ID DB_MPOOLFILE * dbmfp -ARG IGNORE void * pgaddr -ARG IGNORE u_int32_t flags -END -BEGIN memp_get_clear_len NOFUNC -ARG ID DB_MPOOLFILE * dbmfp -ARG IGNORE u_int32_t * clear_lenp -END -BEGIN memp_set_clear_len NOFUNC -ARG ID DB_MPOOLFILE * dbmfp -ARG IGNORE u_int32_t clear_len -END -BEGIN memp_get_fileid NOFUNC -ARG ID DB_MPOOLFILE * dbmfp -ARG IGNORE u_int8_t * fileid -END -BEGIN memp_set_fileid NOFUNC -ARG ID DB_MPOOLFILE * dbmfp -ARG IGNORE u_int8_t * fileid -END -BEGIN memp_get_flags NOFUNC -ARG ID DB_MPOOLFILE * dbmfp -ARG IGNORE u_int32_t * flagsp -END -BEGIN memp_set_flags NOFUNC -ARG ID DB_MPOOLFILE * dbmfp -ARG IGNORE u_int32_t flags -ARG IGNORE int onoff -END -BEGIN memp_get_ftype NOFUNC -ARG ID DB_MPOOLFILE * dbmfp -ARG IGNORE int * ftype -END -BEGIN memp_set_ftype NOFUNC -ARG ID DB_MPOOLFILE * dbmfp -ARG IGNORE int ftype -END -BEGIN memp_get_lsn_offset NOFUNC -ARG ID DB_MPOOLFILE * dbmfp -ARG IGNORE int32_t * lsn_offsetp -END -BEGIN memp_set_lsn_offset NOFUNC -ARG ID DB_MPOOLFILE * dbmfp -ARG IGNORE int32_t lsn_offset -END -BEGIN memp_get_maxsize NOFUNC -ARG ID DB_MPOOLFILE * dbmfp -ARG IGNORE u_int32_t * gbytesp -ARG IGNORE u_int32_t * bytesp -END -BEGIN memp_set_maxsize NOFUNC -ARG ID DB_MPOOLFILE * dbmfp -ARG IGNORE u_int32_t gbytes -ARG IGNORE u_int32_t bytes -END -BEGIN memp_get_pgcookie NOFUNC -ARG ID DB_MPOOLFILE * dbmfp -ARG IGNORE DBT * pgcookie -END -BEGIN memp_set_pgcookie NOFUNC -ARG ID DB_MPOOLFILE * dbmfp -ARG IGNORE DBT * pgcookie -END -BEGIN memp_get_priority NOFUNC -ARG ID DB_MPOOLFILE * dbmfp -ARG IGNORE DB_CACHE_PRIORITY * priorityp -END -BEGIN memp_set_priority NOFUNC -ARG ID DB_MPOOLFILE * dbmfp -ARG IGNORE DB_CACHE_PRIORITY priority -END -BEGIN memp_fsync NOFUNC -ARG ID DB_MPOOLFILE * dbmfp -END diff --git a/storage/bdb/sequence/seq_stat.c b/storage/bdb/sequence/seq_stat.c index af38c1a553b..aeea1dda28a 100644 --- a/storage/bdb/sequence/seq_stat.c +++ b/storage/bdb/sequence/seq_stat.c @@ -1,10 +1,10 @@ /*- * See the file LICENSE for redistribution information. * - * Copyright (c) 2004 + * Copyright (c) 2004-2005 * Sleepycat Software. All rights reserved. * - * $Id: seq_stat.c,v 1.19 2004/09/28 17:28:15 bostic Exp $ + * $Id: seq_stat.c,v 12.6 2005/10/07 20:21:39 ubell Exp $ */ #include "db_config.h" @@ -18,9 +18,9 @@ #endif #include "db_int.h" -#include "dbinc_auto/sequence_ext.h" #include "dbinc/db_page.h" #include "dbinc/db_am.h" +#include "dbinc_auto/sequence_ext.h" #ifdef HAVE_STATISTICS static int __seq_print_all __P((DB_SEQUENCE *, u_int32_t)); @@ -40,13 +40,15 @@ __seq_stat(seq, spp, flags) { DB *dbp; DB_ENV *dbenv; + DB_THREAD_INFO *ip; DB_SEQ_RECORD record; DB_SEQUENCE_STAT *sp; DBT data; - int ret; + int handle_check, ret, t_ret; dbp = seq->seq_dbp; dbenv = dbp->dbenv; + switch (flags) { case DB_STAT_CLEAR: case DB_STAT_ALL: @@ -56,32 +58,41 @@ __seq_stat(seq, spp, flags) return (__db_ferr(dbenv, "DB_SEQUENCE->stat", 0)); } + ENV_ENTER(dbenv, ip); + + /* Check for replication block. */ + handle_check = IS_ENV_REPLICATED(dbenv); + if (handle_check && (ret = __db_rep_enter(dbp, 1, 0, 0)) != 0) { + handle_check = 0; + goto err; + } + /* Allocate and clear the structure. */ if ((ret = __os_umalloc(dbenv, sizeof(*sp), &sp)) != 0) - return (ret); + goto err; memset(sp, 0, sizeof(*sp)); - if (seq->seq_mutexp != NULL) { - sp->st_wait = seq->seq_mutexp->mutex_set_wait; - sp->st_nowait = seq->seq_mutexp->mutex_set_nowait; + if (seq->mtx_seq != MUTEX_INVALID) { + __mutex_set_wait_info( + dbenv, seq->mtx_seq, &sp->st_wait, &sp->st_nowait); if (LF_ISSET(DB_STAT_CLEAR)) - MUTEX_CLEAR(seq->seq_mutexp); + __mutex_clear(dbenv, seq->mtx_seq); } memset(&data, 0, sizeof(data)); data.data = &record; data.ulen = sizeof(record); data.flags = DB_DBT_USERMEM; -retry: if ((ret = dbp->get(dbp, NULL, &seq->seq_key, &data, 0)) != 0) { +retry: if ((ret = __db_get(dbp, NULL, &seq->seq_key, &data, 0)) != 0) { if (ret == DB_BUFFER_SMALL && data.size > sizeof(seq->seq_record)) { if ((ret = __os_malloc(dbenv, data.size, &data.data)) != 0) - return (ret); + goto err; data.ulen = data.size; goto retry; } - return (ret); + goto err; } if (data.data != &record) @@ -97,7 +108,12 @@ retry: if ((ret = dbp->get(dbp, NULL, &seq->seq_key, &data, 0)) != 0) { *spp = sp; if (data.data != &record) __os_free(dbenv, data.data); - return (0); + + /* Release replication block. */ +err: if (handle_check && (t_ret = __env_db_rep_exit(dbenv)) != 0 && ret == 0) + ret = t_ret; + ENV_LEAVE(dbenv, ip); + return (ret); } /* @@ -111,16 +127,36 @@ __seq_stat_print(seq, flags) DB_SEQUENCE *seq; u_int32_t flags; { - int ret; + DB *dbp; + DB_ENV *dbenv; + DB_THREAD_INFO *ip; + int handle_check, ret, t_ret; + + dbp = seq->seq_dbp; + dbenv = dbp->dbenv; + + ENV_ENTER(dbenv, ip); + + /* Check for replication block. */ + handle_check = IS_ENV_REPLICATED(dbenv); + if (handle_check && (ret = __db_rep_enter(dbp, 1, 0, 0)) != 0) { + handle_check = 0; + goto err; + } if ((ret = __seq_print_stats(seq, flags)) != 0) - return (ret); + goto err; if (LF_ISSET(DB_STAT_ALL) && (ret = __seq_print_all(seq, flags)) != 0) - return (ret); + goto err; - return (0); + /* Release replication block. */ +err: if (handle_check && (t_ret = __env_db_rep_exit(dbenv)) != 0 && ret == 0) + ret = t_ret; + + ENV_LEAVE(dbenv, ip); + return (ret); } @@ -166,7 +202,7 @@ __seq_print_stats(seq, flags) (u_long)sp->st_wait, DB_PCT(sp->st_wait, sp->st_wait + sp->st_nowait), NULL); STAT_FMT("The current sequence value", - INT64_FMT, int64_t, sp->st_current); + INT64_FMT, int64_t, sp->st_current); STAT_FMT("The cached sequence value", INT64_FMT, int64_t, sp->st_value); STAT_FMT("The last cached sequence value", diff --git a/storage/bdb/sequence/sequence.c b/storage/bdb/sequence/sequence.c index 26f741951a3..925b7f5b1be 100644 --- a/storage/bdb/sequence/sequence.c +++ b/storage/bdb/sequence/sequence.c @@ -1,10 +1,10 @@ /*- * See the file LICENSE for redistribution information. * - * Copyright (c) 2004 + * Copyright (c) 2004-2005 * Sleepycat Software. All rights reserved. * - * $Id: sequence.c,v 1.26 2004/10/25 17:59:28 bostic Exp $ + * $Id: sequence.c,v 12.28 2005/10/24 19:22:00 bostic Exp $ */ #include "db_config.h" @@ -12,18 +12,30 @@ #ifndef NO_SYSTEM_INCLUDES #include -#include +#ifdef HAVE_RPC +#include +#endif + #include #endif +#ifdef HAVE_RPC +#include "db_server.h" +#endif + #include "db_int.h" -#include "dbinc/db_shash.h" #include "dbinc/db_page.h" -#include "dbinc/db_swap.h" +#include "dbinc/db_shash.h" #include "dbinc/db_am.h" +#include "dbinc/lock.h" #include "dbinc/mp.h" +#include "dbinc/txn.h" #include "dbinc_auto/sequence_ext.h" +#ifdef HAVE_RPC +#include "dbinc_auto/rpc_client_ext.h" +#endif + #ifdef HAVE_SEQUENCE #define SEQ_ILLEGAL_AFTER_OPEN(seq, name) \ if (seq->seq_key.data != NULL) \ @@ -33,16 +45,28 @@ if (seq->seq_key.data == NULL) \ return (__db_mi_open((seq)->seq_dbp->dbenv, name, 0)); -#define SEQ_SWAP(rp) \ - do { \ - M_32_SWAP((rp)->seq_version); \ - M_32_SWAP((rp)->flags); \ - M_64_SWAP((rp)->seq_value); \ - M_64_SWAP((rp)->seq_max); \ - M_64_SWAP((rp)->seq_min); \ +#define SEQ_IS_OPEN(seq) ((seq)->seq_key.data != NULL) + +/* + * Sequences must be architecture independent but they are stored as user + * data in databases so the code here must handle the byte ordering. We + * store them in little-endian byte ordering. If we are on a big-endian + * machine we swap in and out when we read from the database. seq->seq_rp + * always points to the record in native ordering. + * + * Version 1 always stored things in native format so if we detect this we + * upgrade on the fly and write the record back at open time. + */ +#define SEQ_SWAP(rp) \ + do { \ + M_32_SWAP((rp)->seq_version); \ + M_32_SWAP((rp)->flags); \ + M_64_SWAP((rp)->seq_value); \ + M_64_SWAP((rp)->seq_max); \ + M_64_SWAP((rp)->seq_min); \ } while (0) -#define SEQ_SWAP_IN(seq) \ +#define SEQ_SWAP_IN(seq) \ do { \ if (__db_isbigendian()) { \ memcpy(&seq->seq_record, seq->seq_data.data, \ @@ -50,8 +74,8 @@ SEQ_SWAP(&seq->seq_record); \ } \ } while (0) - -#define SEQ_SWAP_OUT(seq) \ + +#define SEQ_SWAP_OUT(seq) \ do { \ if (__db_isbigendian()) { \ memcpy(seq->seq_data.data, \ @@ -59,22 +83,22 @@ SEQ_SWAP((DB_SEQ_RECORD*)seq->seq_data.data); \ } \ } while (0) - +static int __seq_chk_cachesize __P((DB_ENV *, int32_t, db_seq_t, db_seq_t)); static int __seq_close __P((DB_SEQUENCE *, u_int32_t)); -static int __seq_get __P((DB_SEQUENCE *, - DB_TXN *, int32_t, db_seq_t *, u_int32_t)); +static int __seq_get + __P((DB_SEQUENCE *, DB_TXN *, int32_t, db_seq_t *, u_int32_t)); static int __seq_get_cachesize __P((DB_SEQUENCE *, int32_t *)); +static int __seq_get_db __P((DB_SEQUENCE *, DB **)); static int __seq_get_flags __P((DB_SEQUENCE *, u_int32_t *)); static int __seq_get_key __P((DB_SEQUENCE *, DBT *)); static int __seq_get_range __P((DB_SEQUENCE *, db_seq_t *, db_seq_t *)); -static int __seq_set_range __P((DB_SEQUENCE *, db_seq_t, db_seq_t)); -static int __seq_get_db __P((DB_SEQUENCE *, DB **)); static int __seq_initial_value __P((DB_SEQUENCE *, db_seq_t)); -static int __seq_open __P((DB_SEQUENCE *, DB_TXN *, DBT *, u_int32_t)); +static int __seq_open_pp __P((DB_SEQUENCE *, DB_TXN *, DBT *, u_int32_t)); static int __seq_remove __P((DB_SEQUENCE *, DB_TXN *, u_int32_t)); static int __seq_set_cachesize __P((DB_SEQUENCE *, int32_t)); static int __seq_set_flags __P((DB_SEQUENCE *, u_int32_t)); +static int __seq_set_range __P((DB_SEQUENCE *, db_seq_t, db_seq_t)); static int __seq_update __P((DB_SEQUENCE *, DB_TXN *, int32_t, u_int32_t)); /* @@ -95,6 +119,12 @@ db_sequence_create(seqp, dbp, flags) dbenv = dbp->dbenv; + DB_ILLEGAL_BEFORE_OPEN(dbp, "db_sequence_create"); +#ifdef HAVE_RPC + if (RPC_ON(dbenv)) + return (__dbcl_dbenv_illegal(dbenv)); +#endif + /* Check for invalid function flags. */ switch (flags) { case 0: @@ -103,8 +133,6 @@ db_sequence_create(seqp, dbp, flags) return (__db_ferr(dbenv, "db_sequence_create", 0)); } - DB_ILLEGAL_BEFORE_OPEN(dbp, "db_sequence_create"); - /* Allocate the sequence. */ if ((ret = __os_calloc(dbenv, 1, sizeof(*seq), &seq)) != 0) return (ret); @@ -119,7 +147,7 @@ db_sequence_create(seqp, dbp, flags) seq->get_key = __seq_get_key; seq->get_range = __seq_get_range; seq->initial_value = __seq_initial_value; - seq->open = __seq_open; + seq->open = __seq_open_pp; seq->remove = __seq_remove; seq->set_flags = __seq_set_flags; seq->set_range = __seq_set_range; @@ -137,7 +165,7 @@ db_sequence_create(seqp, dbp, flags) * */ static int -__seq_open(seq, txn, keyp, flags) +__seq_open_pp(seq, txn, keyp, flags) DB_SEQUENCE *seq; DB_TXN *txn; DBT *keyp; @@ -145,39 +173,56 @@ __seq_open(seq, txn, keyp, flags) { DB *dbp; DB_ENV *dbenv; - DB_MPOOL *dbmp; DB_SEQ_RECORD *rp; + DB_THREAD_INFO *ip; u_int32_t tflags; - int txn_local, ret; -#define SEQ_OPEN_FLAGS (DB_AUTO_COMMIT | DB_CREATE | DB_EXCL | DB_THREAD) + int handle_check, txn_local, ret, t_ret; +#define SEQ_OPEN_FLAGS (DB_CREATE | DB_EXCL | DB_THREAD) dbp = seq->seq_dbp; dbenv = dbp->dbenv; txn_local = 0; + STRIP_AUTO_COMMIT(flags); SEQ_ILLEGAL_AFTER_OPEN(seq, "DB_SEQUENCE->open"); - if (keyp->size == 0) { - __db_err(dbenv, "Zero length sequence key specified"); - return (EINVAL); + + ENV_ENTER(dbenv, ip); + + /* Check for replication block. */ + handle_check = IS_ENV_REPLICATED(dbenv); + if (handle_check && + (ret = __db_rep_enter(dbp, 1, 0, txn != NULL)) != 0) { + handle_check = 0; + goto err; } - if (LF_ISSET(~SEQ_OPEN_FLAGS)) - return (__db_ferr(dbenv, "DB_SEQUENCE->open", 0)); + if ((ret = __db_fchk(dbenv, + "DB_SEQUENCE->open", flags, SEQ_OPEN_FLAGS)) != 0) + goto err; - if ((ret = dbp->get_flags(dbp, &tflags)) != 0) - return (ret); + if (keyp->size == 0) { + __db_err(dbenv, "Zero length sequence key specified"); + goto err; + } + + if ((ret = __db_get_flags(dbp, &tflags)) != 0) + goto err; if (FLD_ISSET(tflags, DB_DUP)) { __db_err(dbenv, "Sequences not supported in databases configured for duplicate data"); - return (EINVAL); + goto err; } if (LF_ISSET(DB_THREAD)) { - dbmp = dbenv->mp_handle; - if ((ret = __db_mutex_setup(dbenv, dbmp->reginfo, - &seq->seq_mutexp, MUTEX_ALLOC | MUTEX_THREAD)) != 0) - return (ret); + if (RPC_ON(dbenv)) { + __db_err(dbenv, + "DB_SEQUENCE->open: DB_THREAD not supported with RPC"); + goto err; + } + if ((ret = __mutex_alloc(dbenv, + MTX_SEQUENCE, DB_MUTEX_THREAD, &seq->mtx_seq)) != 0) + goto err; } memset(&seq->seq_data, 0, sizeof(DBT)); @@ -196,14 +241,12 @@ __seq_open(seq, txn, keyp, flags) memset(&seq->seq_key, 0, sizeof(DBT)); if ((ret = __os_malloc(dbenv, keyp->size, &seq->seq_key.data)) != 0) - return (ret); + goto err; memcpy(seq->seq_key.data, keyp->data, keyp->size); seq->seq_key.size = seq->seq_key.ulen = keyp->size; seq->seq_key.flags = DB_DBT_USERMEM; - - -retry: if ((ret = dbp->get(dbp, txn, &seq->seq_key, &seq->seq_data, 0)) != 0) { +retry: if ((ret = __db_get(dbp, txn, &seq->seq_key, &seq->seq_data, 0)) != 0) { if (ret == DB_BUFFER_SMALL && seq->seq_data.size > sizeof(seq->seq_record)) { seq->seq_data.flags = DB_DBT_REALLOC; @@ -216,8 +259,6 @@ retry: if ((ret = dbp->get(dbp, txn, &seq->seq_key, &seq->seq_data, 0)) != 0) { ret = 0; rp = &seq->seq_record; - tflags = DB_NOOVERWRITE; - tflags |= LF_ISSET(DB_AUTO_COMMIT); if (!F_ISSET(rp, DB_SEQ_RANGE_SET)) { rp->seq_max = INT64_MAX; rp->seq_min = INT64_MIN; @@ -235,8 +276,8 @@ retry: if ((ret = dbp->get(dbp, txn, &seq->seq_key, &seq->seq_data, 0)) != 0) { goto err; } else { SEQ_SWAP_OUT(seq); - if ((ret = dbp->put(dbp, txn, - &seq->seq_key, &seq->seq_data, tflags)) != 0) { + if ((ret = __db_put(dbp, txn, &seq->seq_key, + &seq->seq_data, DB_NOOVERWRITE)) != 0) { __db_err(dbenv, "Sequence create failed"); goto err; } @@ -255,25 +296,23 @@ retry: if ((ret = dbp->get(dbp, txn, &seq->seq_key, &seq->seq_data, 0)) != 0) { /* * The first release was stored in native mode. - * Check the verison number before swapping. + * Check the version number before swapping. */ rp = seq->seq_data.data; if (rp->seq_version == DB_SEQUENCE_OLDVER) { oldver: rp->seq_version = DB_SEQUENCE_VERSION; if (__db_isbigendian()) { - if (IS_AUTO_COMMIT(dbp, txn, flags)) { + if (IS_DB_AUTO_COMMIT(dbp, txn)) { if ((ret = - __db_txn_auto_init(dbenv, &txn)) != 0) - return (ret); + __txn_begin(dbenv, NULL, &txn, 0)) != 0) + goto err; txn_local = 1; - LF_CLR(DB_AUTO_COMMIT); goto retry; - } else - txn_local = 0; + } memcpy(&seq->seq_record, rp, sizeof(seq->seq_record)); SEQ_SWAP_OUT(seq); } - if ((ret = dbp->put(dbp, + if ((ret = __db_put(dbp, txn, &seq->seq_key, &seq->seq_data, 0)) != 0) goto err; } @@ -300,7 +339,7 @@ oldver: rp->seq_version = DB_SEQUENCE_VERSION; } M_32_SWAP(rp->seq_version); __db_err(dbenv, - "Unknown sequence version: %d", rp->seq_version); + "Unsupported sequence version: %d", rp->seq_version); goto err; } @@ -310,11 +349,26 @@ oldver: rp->seq_version = DB_SEQUENCE_VERSION; else seq->seq_last_value++; -err: if (ret != 0) { + /* + * It's an error to specify a cache larger than the range of sequences. + */ + if (seq->seq_cache_size != 0 && (ret = __seq_chk_cachesize( + dbenv, seq->seq_cache_size, rp->seq_max, rp->seq_min)) != 0) + goto err; + +err: if (txn_local && + (t_ret = __db_txn_auto_resolve(dbenv, txn, 0, ret)) && ret == 0) + ret = t_ret; + if (ret != 0) { __os_free(dbenv, seq->seq_key.data); seq->seq_key.data = NULL; } - return (txn_local ? __db_txn_auto_resolve(dbenv, txn, 0, ret) : ret); + /* Release replication block. */ + if (handle_check && (t_ret = __env_db_rep_exit(dbenv)) != 0 && ret == 0) + ret = t_ret; + + ENV_LEAVE(dbenv, ip); + return (ret); } /* @@ -343,11 +397,23 @@ __seq_set_cachesize(seq, cachesize) DB_SEQUENCE *seq; int32_t cachesize; { + DB_ENV *dbenv; + int ret; + + dbenv = seq->seq_dbp->dbenv; + if (cachesize < 0) { - __db_err(seq->seq_dbp->dbenv, - "Illegal cache size: %d", cachesize); + __db_err(dbenv, "Cache size must be >= 0"); return (EINVAL); } + + /* + * It's an error to specify a cache larger than the range of sequences. + */ + if (SEQ_IS_OPEN(seq) && (ret = __seq_chk_cachesize(dbenv, + cachesize, seq->seq_rp->seq_max, seq->seq_rp->seq_min)) != 0) + return (ret); + seq->seq_cache_size = cachesize; return (0); } @@ -385,11 +451,12 @@ __seq_set_flags(seq, flags) dbenv = seq->seq_dbp->dbenv; rp = seq->seq_rp; + SEQ_ILLEGAL_AFTER_OPEN(seq, "DB_SEQUENCE->set_flags"); - if (LF_ISSET(~SEQ_SET_FLAGS)) - return (__db_ferr(dbenv, "DB_SEQUENCE->set_flags", 0)); - + if ((ret = __db_fchk( + dbenv, "DB_SEQUENCE->set_flags", flags, SEQ_SET_FLAGS)) != 0) + return (ret); if ((ret = __db_fcchk(dbenv, "DB_SEQUENCE->set_flags", flags, DB_SEQ_DEC, DB_SEQ_INC)) != 0) return (ret); @@ -403,7 +470,7 @@ __seq_set_flags(seq, flags) /* * __seq_initial_value -- - * DB_SEQUENCE->init_value. + * DB_SEQUENCE->initial_value. * */ static int @@ -415,7 +482,7 @@ __seq_initial_value(seq, value) DB_SEQ_RECORD *rp; dbenv = seq->seq_dbp->dbenv; - SEQ_ILLEGAL_AFTER_OPEN(seq, "DB_SEQUENCE->init_value"); + SEQ_ILLEGAL_AFTER_OPEN(seq, "DB_SEQUENCE->initial_value"); rp = seq->seq_rp; if (F_ISSET(rp, DB_SEQ_RANGE_SET) && @@ -441,7 +508,6 @@ __seq_get_range(seq, minp, maxp) { SEQ_ILLEGAL_BEFORE_OPEN(seq, "DB_SEQUENCE->get_range"); - F_SET(seq->seq_rp, DB_SEQ_RANGE_SET); *minp = seq->seq_rp->seq_min; *maxp = seq->seq_rp->seq_max; return (0); @@ -463,7 +529,8 @@ __seq_set_range(seq, min, max) SEQ_ILLEGAL_AFTER_OPEN(seq, "DB_SEQUENCE->set_range"); if (min >= max) { - __db_err(dbenv, "Illegal sequence range"); + __db_err(dbenv, + "Minimum sequence value must be less than maximum sequence value"); return (EINVAL); } @@ -485,16 +552,28 @@ __seq_update(seq, txn, delta, flags) DB_ENV *dbenv; DB_SEQ_RECORD *rp; int32_t adjust; - int ret; + int ret, txn_local; dbp = seq->seq_dbp; dbenv = dbp->dbenv; - if (LF_ISSET(DB_AUTO_COMMIT) && - (ret = __db_txn_auto_init(dbenv, &txn)) != 0) - return (ret); -retry: - if ((ret = dbp->get(dbp, txn, &seq->seq_key, &seq->seq_data, 0)) != 0) { + /* + * Create a local transaction as necessary, check for consistent + * transaction usage, and, if we have no transaction but do have + * locking on, acquire a locker id for the handle lock acquisition. + */ + if (IS_DB_AUTO_COMMIT(dbp, txn)) { + if ((ret = __txn_begin(dbenv, NULL, &txn, 0)) != 0) + return (ret); + txn_local = 1; + } else + txn_local = 0; + + /* Check for consistent transaction usage. */ + if ((ret = __db_check_txn(dbp, txn, DB_LOCK_INVALIDID, 0)) != 0) + goto err; + +retry: if ((ret = __db_get(dbp, txn, &seq->seq_key, &seq->seq_data, 0)) != 0) { if (ret == DB_BUFFER_SMALL && seq->seq_data.size > sizeof(seq->seq_record)) { seq->seq_data.flags = DB_DBT_REALLOC; @@ -509,6 +588,9 @@ retry: SEQ_SWAP_IN(seq); rp = seq->seq_rp; + if (F_ISSET(rp, DB_SEQ_WRAPPED)) + goto overflow; + if (seq->seq_data.size < sizeof(seq->seq_record)) { __db_err(dbenv, "Bad sequence record format"); ret = EINVAL; @@ -522,17 +604,18 @@ retry: * * The sequence minimum and maximum values can be INT64_MIN and * INT64_MAX, so we need to do the test carefully to cope with - * arithmetic overflow. That means we need to check whether the value - * is in a range, we can't get away with a single comparison. - * - * For example, if seq_value == -1 and seq_max == INT64_MAX, the first - * test below will be true, since -1 - (INT64_MAX + 1) == INT64_MAX. - * The second part of the test makes sure that seq_value is close - * enough to the maximum to really cause wrapping. + * arithmetic overflow. The first part of the test below checks + * whether we will hit the end of the 64-bit range. The second part + * checks whether we hit the end of the sequence. */ - if (F_ISSET(rp, DB_SEQ_INC)) { - if (rp->seq_value - ((rp->seq_max - adjust) + 2) >= 0 && - (rp->seq_max + 1) - rp->seq_value >= 0) { +again: if (F_ISSET(rp, DB_SEQ_INC)) { + if (rp->seq_value + adjust - 1 < rp->seq_value || + rp->seq_value + adjust - 1 > rp->seq_max) { + /* Don't wrap just to fill the cache. */ + if (adjust > delta) { + adjust = delta; + goto again; + } if (F_ISSET(rp, DB_SEQ_WRAP)) rp->seq_value = rp->seq_min; else { @@ -541,20 +624,33 @@ overflow: __db_err(dbenv, "Sequence overflow"); goto err; } } + /* See if we are at the end of the 64 bit range. */ + if (!F_ISSET(rp, DB_SEQ_WRAP) && + rp->seq_value + adjust < rp->seq_value) + F_SET(rp, DB_SEQ_WRAPPED); } else { - if (rp->seq_value - (rp->seq_min - 1) >= 0 && - (rp->seq_min + adjust - 2) - rp->seq_value >= 0) { + if ((rp->seq_value - adjust) + 1 > rp->seq_value || + (rp->seq_value - adjust) + 1 < rp->seq_min) { + /* Don't wrap just to fill the cache. */ + if (adjust > delta) { + adjust = delta; + goto again; + } if (F_ISSET(rp, DB_SEQ_WRAP)) rp->seq_value = rp->seq_max; else goto overflow; } + /* See if we are at the end of the 64 bit range. */ + if (!F_ISSET(rp, DB_SEQ_WRAP) && + rp->seq_value - adjust > rp->seq_value) + F_SET(rp, DB_SEQ_WRAPPED); adjust = -adjust; } rp->seq_value += adjust; SEQ_SWAP_OUT(seq); - ret = dbp->put(dbp, txn, &seq->seq_key, &seq->seq_data, 0); + ret = __db_put(dbp, txn, &seq->seq_key, &seq->seq_data, 0); rp->seq_value -= adjust; if (ret != 0) { __db_err(dbenv, "Sequence update failed"); @@ -566,11 +662,8 @@ overflow: __db_err(dbenv, "Sequence overflow"); else seq->seq_last_value++; -err: if (LF_ISSET(DB_AUTO_COMMIT)) - ret = __db_txn_auto_resolve(dbenv, - txn, LF_ISSET(DB_TXN_NOSYNC), ret); - return (ret); - +err: return (txn_local ? __db_txn_auto_resolve( + dbenv, txn, LF_ISSET(DB_TXN_NOSYNC), ret) : ret); } static int @@ -584,20 +677,36 @@ __seq_get(seq, txn, delta, retp, flags) DB *dbp; DB_ENV *dbenv; DB_SEQ_RECORD *rp; - int ret; + DB_THREAD_INFO *ip; + int handle_check, ret, t_ret; dbp = seq->seq_dbp; dbenv = dbp->dbenv; rp = seq->seq_rp; ret = 0; + STRIP_AUTO_COMMIT(flags); SEQ_ILLEGAL_BEFORE_OPEN(seq, "DB_SEQUENCE->get"); if (delta <= 0) { __db_err(dbenv, "Sequence delta must be greater than 0"); return (EINVAL); } - MUTEX_THREAD_LOCK(dbenv, seq->seq_mutexp); + + if (seq->seq_cache_size != 0 && txn != NULL) { + __db_err(dbenv, + "Sequence with non-zero cache may not specify transaction handle"); + return (EINVAL); + } + + ENV_ENTER(dbenv, ip); + + /* Check for replication block. */ + handle_check = IS_ENV_REPLICATED(dbenv); + if (handle_check && (ret = __db_rep_enter(dbp, 1, 0, txn != NULL)) != 0) + return (ret); + + MUTEX_LOCK(dbenv, seq->mtx_seq); if (rp->seq_min + delta > rp->seq_max) { __db_err(dbenv, "Sequence overflow"); @@ -623,8 +732,13 @@ __seq_get(seq, txn, delta, retp, flags) rp->seq_value -= delta; } -err: MUTEX_THREAD_UNLOCK(dbenv, seq->seq_mutexp); +err: MUTEX_UNLOCK(dbenv, seq->mtx_seq); + /* Release replication block. */ + if (handle_check && (t_ret = __env_db_rep_exit(dbenv)) != 0 && ret == 0) + ret = t_ret; + + ENV_LEAVE(dbenv, ip); return (ret); } @@ -673,26 +787,27 @@ __seq_close(seq, flags) u_int32_t flags; { DB_ENV *dbenv; - DB_MPOOL *dbmp; - int ret; + int ret, t_ret; ret = 0; dbenv = seq->seq_dbp->dbenv; if (flags != 0) ret = __db_ferr(dbenv, "DB_SEQUENCE->close", 0); - if (seq->seq_mutexp != NULL) { - dbmp = dbenv->mp_handle; - __db_mutex_free(dbenv, dbmp->reginfo, seq->seq_mutexp); - } + + if ((t_ret = __mutex_free(dbenv, &seq->mtx_seq)) != 0 && ret == 0) + ret = t_ret; + if (seq->seq_key.data != NULL) __os_free(dbenv, seq->seq_key.data); if (seq->seq_data.data != NULL && seq->seq_data.data != &seq->seq_record) __os_ufree(dbenv, seq->seq_data.data); seq->seq_key.data = NULL; + memset(seq, CLEAR_BYTE, sizeof(*seq)); __os_free(dbenv, seq); + return (ret); } @@ -708,28 +823,62 @@ __seq_remove(seq, txn, flags) { DB *dbp; DB_ENV *dbenv; - int ret, t_ret; + DB_THREAD_INFO *ip; + int handle_check, ret, t_ret; dbp = seq->seq_dbp; dbenv = dbp->dbenv; SEQ_ILLEGAL_BEFORE_OPEN(seq, "DB_SEQUENCE->remove"); + ENV_ENTER(dbenv, ip); - if (LF_ISSET(DB_AUTO_COMMIT) && - (ret = __db_txn_auto_init(dbenv, &txn)) != 0) + /* Check for replication block. */ + handle_check = IS_ENV_REPLICATED(dbenv); + if (handle_check && + (ret = __db_rep_enter(dbp, 1, 0, txn != NULL)) != 0) { + handle_check = 0; goto err; + } + if (flags != 0) + ret = __db_ferr(dbenv, "DB_SEQUENCE->remove", 0); - ret = dbp->del(dbp, txn, &seq->seq_key, 0); + ret = __db_del(dbp, txn, &seq->seq_key, 0); - if (LF_ISSET(DB_AUTO_COMMIT)) - ret = __db_txn_auto_resolve(dbenv, - txn, LF_ISSET(DB_TXN_NOSYNC), ret); - -err: if ((t_ret = __seq_close(seq, 0)) != 0 && ret == 0) + if ((t_ret = __seq_close(seq, 0)) != 0 && ret == 0) ret = t_ret; + + /* Release replication block. */ + if (handle_check && (t_ret = __env_db_rep_exit(dbenv)) != 0 && ret == 0) + ret = t_ret; +err: ENV_LEAVE(dbenv, ip); return (ret); } +/* + * __seq_chk_cachesize -- + * Validate the cache size vs. the range. + */ +static int +__seq_chk_cachesize(dbenv, cachesize, max, min) + DB_ENV *dbenv; + int32_t cachesize; + db_seq_t max, min; +{ + /* + * It's an error to specify caches larger than the sequence range. + * + * The min and max of the range can be either positive or negative, + * the difference will fit in an unsigned variable of the same type. + * Assume a 2's complement machine, and simply subtract. + */ + if ((u_int32_t)cachesize > (u_int64_t)max - (u_int64_t)min) { + __db_err(dbenv, + "Number of items to be cached is larger than the sequence range"); + return (EINVAL); + } + return (0); +} + #else /* !HAVE_SEQUENCE */ int diff --git a/storage/bdb/tcl/docs/db.html b/storage/bdb/tcl/docs/db.html deleted file mode 100644 index db8382b759f..00000000000 --- a/storage/bdb/tcl/docs/db.html +++ /dev/null @@ -1,263 +0,0 @@ - - - - - - - - - -

-Database Commands

-The database commands provide a fairly straightforward mapping to the -DB method functions. - -

-> berkdb open -

- -
[-btcompare proc]
-Sets the Btree comparison function to the Tcl procedure named -proc using the -DB->set_bt_compare -method. - -
[-btree|-hash|-recno|-queue|-unknown]
- -Select the database type:
-DB_BTREE, DB_HASH, DB_RECNO, DB_QUEUE or DB_UNKNOWN. - - -
[-cachesize {gbytes bytes ncaches}]
-Sets the size of the database cache to the size specified by -gbytes and bytes, broken up into ncaches number of -caches using the -DB->set_cachesize -method. - -
[-create]
-Selects the DB_CREATE flag to create underlying files. - -
[-delim delim]
-Sets the delimiting byte for variable length records to delim -using the -DB->set_re_delim -method. - -
[-dup]
-Selects the DB_DUP flag to permit duplicates in the database. - -
[-dupcompare proc]
-Sets the duplicate data comparison function to the Tcl procedure named -proc using the -DB->set_dup_compare -method. - -
[-dupsort]
-Selects the DB_DUPSORT flag to support sorted duplicates. - -
[-env env]
-The database environment. - -
[-errfile filename]
-Specifies the error file to use for this environment to filename -by calling -DB->set_errfile. -If the file already exists then we will append to the end of the file. - -
[-excl]
-Selects the DB_EXCL flag to exclusively create underlying files. - -
[-extent size]
-Sets the size of a Queue database extent to the given size using -the -DB->set_q_extentsize -method. - -
[-ffactor density]
-Sets the hash table key density to the given density using the -DB->set_h_ffactor -method. - -
[-hashproc proc]
-Sets a user-defined hash function to the Tcl procedure named proc -using the -DB->set_h_hash method. - -
[-len len]
-Sets the length of fixed-length records to len using the -DB->set_re_len -method. - -
[-lorder order]
-Sets the byte order for integers stored in the database meta-data to -the given order using the -DB->set_lorder -method. - -
[-minkey minkey]
-Sets the minimum number of keys per Btree page to minkey using -the -DB->set_bt_minkey -method. - -
[-mode mode]
-Specifies the mode for created files. - -
[-nelem size]
-Sets the hash table size estimate to the given size using the -DB->set_h_nelem -method. - -
[-nommap]
-Selects the DB_NOMMAP flag to forbid mmaping of files. - -
[-pad pad]
-Sets the pad character used for fixed length records to pad using -the -DB->set_re_pad method. - -
[-pagesize pagesize]
-Sets the size of the database page to pagesize using the -DB->set_pagesize -method. - -
[-rdonly]
-Selects the DB_RDONLY flag for opening in read-only mode. - -
[-recnum]
-Selects the DB_RECNUM flag to support record numbers in Btrees. - -
[-renumber]
-Selects the DB_RENUMBER flag to support mutable record numbers. - -
[-revsplitoff]
-Selects the DB_REVSPLITOFF flag to suppress reverse splitting of pages -on deletion. - -
[-snapshot]
-Selects the DB_SNAPSHOT flag to support database snapshots. - -
[-source file]
-Sets the backing source file name to file using the -DB->set_re_source -method. - -
[-truncate]
-Selects the DB_TRUNCATE flag to truncate the database. - -
[--]
-Terminate the list of options and use remaining arguments as the file -or subdb names (thus allowing the use of filenames beginning with a dash -'-'). - -
[filename [subdbname]]
-The names of the database and sub-database. -
- -
-> berkdb upgrade [-dupsort] [-env env] [--] [filename] -

This command will invoke the DB->upgrade -function.  If the command is given the -env option, then we -will accordingly upgrade the database filename within the context of that -environment. The -dupsort option selects the DB_DUPSORT flag for -upgrading. The use of -- terminates the list of options, thus allowing -filenames beginning with a dash. -

- -


-> berkdb verify [-env env] [--] [filename] -

This command will invoke the DB->verify -function.  If the command is given the -env option, then we -will accordingly verify the database filename within the context of that -environment.  The use of -- terminates the list of options, -thus allowing filenames beginning with a dash. -

- -


> db del -

There are no undocumented options. - -


-> db join [-nosort] db0.c0 db1.c0 ... -

This command will invoke the db_join -function.  After it successfully joins a database, we bind it to a -new Tcl command of the form dbN.cX, where X is an integer -starting at 0 (e.g. db2.c0, db3.c0, etc).  We use the Tcl_CreateObjCommand()  -to create the top level database function.  It is through this cursor -handle that the user can access the joined data items. -

The options are: -

    -
  • --nosort - This flag causes DB not to sort the cursors based on the -number of data items they reference.  It results in the DB_JOIN_NOSORT -flag being set.
  • -
- -

-This command will invoke the -db_create function. If -the command is given the -env option, then we will accordingly -creating the database within the context of that environment. After it -successfully gets a handle to a database, we bind it to a new Tcl -command of the form dbX, where X is an integer starting -at 0 (e.g. db0, db1, etc). - -

-We use the Tcl_CreateObjCommand() to create the top level -database function. It is through this handle that the user can access -all of the commands described in the -Database Commands section. Internally, the database handle -is sent as the ClientData portion of the new command set so that -all future database calls access the appropriate handle. - -

-After parsing all of the optional arguments affecting the setup of the -database and making the appropriate calls to DB to manipulate those -values, we open the database for the user. It translates to the -DB->open method call after -parsing all of the various optional arguments. We automatically set the -DB_THREAD flag. The arguments are: - -


-> db get_join [-nosort] {db key} {db key} ... -

This command performs a join operation on the keys specified and returns -a list of the joined {key data} pairs. -

The options are: -

    -
  • --nosort This flag causes DB not to sort the cursors based on the -number of data items they reference.  It results in the DB_JOIN_NOSORT -flag being set.
  • -
- -
-> db keyrange [-txn id] key -

This command returns the range for the given key.  It returns -a list of 3 double elements of the form {less equal greater} -where less is the percentage of keys less than the given -key, equal is the percentage equal to the given key and greater -is the percentage greater than the given key.  If the -txn option -is specified it performs this operation under transaction protection. - -


> db put -

The undocumented options are: -

-
-nodupdata
-This flag causes DB not to insert the key/data pair if it already -exists, that is, both the key and data items are already in the -database. The -nodupdata flag may only be specified if the underlying -database has been configured to support sorted duplicates. -
- -
> dbc put -

The undocumented options are: -

-
-nodupdata
-This flag causes DB not to insert the key/data pair if it already -exists, that is, both the key and data items are already in the -database. The -nodupdata flag may only be specified if the underlying -database has been configured to support sorted duplicates. -
- - - diff --git a/storage/bdb/tcl/docs/env.html b/storage/bdb/tcl/docs/env.html deleted file mode 100644 index 3203a02b8d9..00000000000 --- a/storage/bdb/tcl/docs/env.html +++ /dev/null @@ -1,350 +0,0 @@ - - - - - - - - - -

-Environment Commands

-Environments provide a structure for creating a consistent environment -for processes using one or more of the features of Berkeley DB.  Unlike -some of the database commands, the environment commands are very low level. -
-
-

The user may create and open a new DB environment  by invoking: -

> berkdb env -
    [-cdb] [-cdb_alldb] [-lock] [-log] [-txn [nosync]] -
    [-create] [-home directory] [-mode mode] -
    [-data_dir directory] [-log_dir directory] -[-tmp_dir directory] -
    [-nommap] [-private] [-recover] [-recover_fatal] -[-system_mem] [-errfile filename] -
    [-use_environ] [-use_environ_root] [-verbose -{which on|off}] -
    [-region_init] -
    [-cachesize {gbytes bytes ncaches}] -
    [-mmapsize size] -
    [-log_max max] -
    [-log_buffer size] -
    [-lock_conflict {nmodes {matrix}}] -
    [-lock_detect default|oldest|random|youngest] -
    [-lock_max max] -
    [-lock_max_locks max] -
    [-lock_max_lockers max] -
    [-lock_max_objects max] -
    [-lock_timeout timeout] -
    [-overwrite] -
    [-txn_max max] -
    [-txn_timeout timeout] -
    [-client_timeout seconds] -
    [-server_timeout seconds] -
    [-server hostname] -
    [-rep_master] [-rep_client] -
    [-rep_transport { machineid sendproc }] -
  -

This command opens up an environment.   We automatically set -the DB_THREAD and the DB_INIT_MPOOL flags.  The arguments are: -

    -
  • --cdb selects the DB_INIT_CDB flag for Concurrent Data Store
  • - -
  • --cdb_alldb selects the DB_CDB_ALLDB flag for Concurrent Data Store
  • - -
  • --lock selects the DB_INIT_LOCK flag for the locking subsystem
  • - -
  • --log selects the DB_INIT_LOG flag for the logging subsystem
  • - -
  • --txn selects the DB_INIT_TXN, DB_INIT_LOCK and DB_INIT_LOG flags -for the transaction subsystem.  If nosync is specified, then -it will also select DB_TXN_NOSYNC to indicate no flushes of log on commits
  • - -
  • --create selects the DB_CREATE flag to create underlying files
  • - -
  • --home directory selects the home directory of the environment
  • - -
  • --data_dir directory selects the data file directory of the -environment by calling DBENV->set_data_dir.
  • - -
  • --log_dir directory selects the log file directory of the -environment  by calling DBENV->set_lg_dir.
  • - -
  • --tmp_dir directory selects the temporary file directory of -the environment  by calling DBENV->set_tmp_dir.
  • - -
  • --mode mode sets the permissions of created files to mode
  • - -
  • --nommap selects the DB_NOMMAP flag to disallow using mmap'ed files
  • - -
  • --private selects the DB_PRIVATE flag for a private environment
  • - -
  • --recover selects the DB_RECOVER flag for recovery
  • - -
  • --recover_fatal selects the DB_RECOVER_FATAL flag for catastrophic -recovery
  • - -
  • --system_mem selects the DB_SYSTEM_MEM flag to use system memory
  • - -
  • --errfile specifies the error file to use for this environment to -filename -by calling DBENV->set_errfile. -If -the file already exists then we will append to the end of the file
  • - -
  • --use_environ selects the DB_USE_ENVIRON flag to affect file naming
  • - -
  • --use_environ_root selects the DB_USE_ENVIRON_ROOT flag to have the -root environment affect file naming
  • - -
  • --verbose produces verbose error output for the given which subsystem, -using the DBENV->set_verbose -method.   See the description of verbose -below for valid which values
  • - -
  • --region_init specifies that the user wants to page fault the region -in on startup using the DBENV->set_region_init -method call
  • - -
  • --cachesize sets the size of the database cache to the size  -specified by gbytes and bytes, broken up into -ncaches -number of caches using the DBENV->set_cachesize -method
  • - -
  • --mmapsize sets the size of the database page to size using -the DBENV->set_mp_mmapsize -method
  • - -
  • --log_max sets the maximum size of the log file to max -using the DBENV->set_lg_max -call
  • - -
  • --log_regionmax sets the size of the log region to max -using the DBENV->set_lg_regionmax -call
  • - -
  • --log_buffer sets the size of the log file in bytes to size -using the DBENV->set_lg_bsize -call
  • - -
  • --lock_conflict sets the number of lock modes to nmodes -and sets the locking policy for those modes to the conflict_matrix -given using the DBENV->set_lk_conflict -method call
  • - -
  • --lock_detect sets the deadlock detection policy to the given policy -using the DBENV->set_lk_detect -method call.  The policy choices are:
  • - -
      -
    • -default selects the DB_LOCK_DEFAULT policy for default detection
    • - -
    • -oldest selects DB_LOCK_OLDEST to abort the oldest locker on a deadlock
    • - -
    • -random selects DB_LOCK_RANDOM to abort a random locker on a deadlock
    • - -
    • -youngest selects DB_LOCK_YOUNGEST to abort the youngest locker on -a deadlock
    • -
    - -
  • --lock_max sets the maximum size of the lock table to max using -the DBENV->set_lk_max -method call
  • - -
  • --lock_max_locks sets the maximum number of locks to max using -the DBENV->set_lk_max_locks -method call
  • - -
  • --lock_max_lockers sets the maximum number of locking entities to -max -using the DBENV->set_lk_max_lockers -method call
  • - -
  • --lock_max_objects sets the maximum number of simultaneously locked -objects to max using the DBENV->set_lk_max_objects -method call
  • - -
  • --lock_timeout sets the timeout for locks in the environment
  • - -
  • --overwrite sets DB_OVERWRITE flag
  • - -
  • --txn_max sets the maximum size of the transaction table to max -using the DBENV->set_txn_max -method call
  • - -
  • --txn_timeout sets the timeout for transactions in the environment
  • - -
  • --client_timeout sets the timeout value for the client waiting for -a reply from the server for RPC operations to seconds.
  • - -
  • --server_timeout sets the timeout value for the server to determine -an idle client is gone to seconds.
  • - -
  • --server specifies the hostname of the server -to connect to in the DBENV->set_server -call.
  • - -
  • --rep_client sets the newly created environment to be a -replication client, using the -DBENV->rep_client call.
  • - -
  • --rep_master sets the newly created environment to be a -replication master, using the -DBENV->rep_master call.
  • - -
  • --rep_transport specifies the replication transport function, -using the -DBENV->set_rep_transport -call. This site's machine ID is set to machineid and -the send function, a Tcl proc, is set to sendproc.
  • - -
- -This command will invoke the db_env_create -function.  After it successfully gets a handle to an environment, -we bind it to a new Tcl command of the form envX, where X -is an integer starting at  0 (e.g. env0, env1, etc).  -We use the Tcl_CreateObjCommand() to create the top level environment -command function.  It is through this handle that the user can access -all the commands described in the Environment -Commands section.  Internally, the handle we get back from DB -will be stored as the ClientData portion of the new command set -so that all future environment calls will have that handle readily available.  -Then we call the DBENV->open -method call and possibly some number of setup calls as described above. -

-


-
> <env> verbose which -on|off -

This command controls the use of debugging output for the environment.  -This command directly translates to a call to the DBENV->set_verbose -method call.  It returns either a 0 (for success), a DB error message -or it throws a Tcl error with a system message.  The user specifies -which -subsystem to control, and indicates whether debug messages should be turned -on -or off for that subsystem.  The value of which -must be one of the following: -

    -
  • -deadlock - Chooses the deadlocking code by using the DB_VERB_DEADLOCK -value
  • - -
  • -recovery - Chooses the recovery code by using the DB_VERB_RECOVERY -value
  • - -
  • -wait - Chooses the waitsfor code by using the DB_VERB_WAITSFOR value
  • -
- -
-

> <env> close -

This command closes an environment and deletes the handle.  This -command directly translates to a call to the DBENV->close -method call.  It returns either a 0 (for success), a DB error message -or it throws a Tcl error with a system message. -

Additionally, since the handle is no longer valid, we will call Tcl_DeleteCommand() -so -that further uses of the handle will be dealt with properly by Tcl itself. -

Also, the close command will automatically abort any transactions -and close any mpool memory files.  As such -we must maintain a list of open transaction and mpool handles so that we -can call Tcl_DeleteCommand on those as well. -

-


- -> berkdb envremove
-[-data_dir directory]
-[-force]
-[-home directory]
-[-log_dir directory]
-[-overwrite]
-[-tmp_dir directory]
-[-use_environ]
-[-use_environ_root]
- -

This command removes the environment if it is not in use and deletes -the handle.  This command directly translates to a call to the DBENV->remove -method call.  It returns either a 0 (for success), a DB error message -or it throws a Tcl error with a system message.  The arguments are: -

    -
  • --force selects the DB_FORCE flag to remove even if other processes -have the environment open
  • - -
  • --home directory specifies the home directory of the environment
  • - -
  • --data_dir directory selects the data file directory of the -environment by calling DBENV->set_data_dir.
  • - -
  • --log_dir directory selects the log file directory of the -environment  by calling DBENV->set_lg_dir.
  • - -
  • --overwrite sets DB_OVERWRITE flag
  • - -
  • --tmp_dir directory selects the temporary file directory of -the environment  by calling DBENV->set_tmp_dir.
  • - -
  • --use_environ selects the DB_USE_ENVIRON flag to affect file naming
  • - -
  • --use_environ_root selects the DB_USE_ENVIRON_ROOT flag to affect -file naming
  • -
- - - diff --git a/storage/bdb/tcl/docs/historic.html b/storage/bdb/tcl/docs/historic.html deleted file mode 100644 index f5a43e14de6..00000000000 --- a/storage/bdb/tcl/docs/historic.html +++ /dev/null @@ -1,169 +0,0 @@ - - - - - - - - - -

-Compatibility Commands

-The compatibility commands for old Dbm and Ndbm are described in the dbm -manpage. -

> berkdb dbminit filename -

This command will invoke the dbminit function.   Filename -is used as the name of the database. -

-


> berkdb dbmclose -

This command will invoke the dbmclose function. -

-


> berkdb fetch key -

This command will invoke the fetch function.   It will return -the data associated with the given key or a Tcl error. -

-


> berkdb store key data -

This command will invoke the store function.   It will store -the key/data pair.  It will return a 0 on success or -throw a Tcl error. -

-


> berkdb delete key -

This command will invoke the deletet function.   It will delete -the key from the database.  It will return a 0 on success -or throw a Tcl error. -

-


> berkdb firstkey -

This command will invoke the firstkey function.   It will -return the first key in the database or a Tcl error. -

-


> berkdb nextkey key -

This command will invoke the nextkey function.   It will return -the next key after the given key or a Tcl error. -

-


> berkdb hcreate nelem -

This command will invoke the hcreate function with nelem -elements.  It will return a 0 on success or a Tcl error. -

-


> berkdb hsearch key data action -

This command will invoke the hsearch function with key -and data.  The action must be either find -or enter.  If it is find, it will return the resultant -data.  If it is enter, it will return a 0 on success or a Tcl -error. -

-


> berkdb hdestroy -

This command will invoke the hdestroy function.  It will return -a 0. -


> berkdb ndbm_open [-create] [-rdonly] [-truncate] -[-mode -mode] [--] filename -

This command will invoke the dbm_open function.    After -it successfully gets a handle to a database, we bind it to a new Tcl command -of the form ndbmX, where X is an integer starting at 0 (e.g. -ndbm0, -ndbm1, etc).  We use the Tcl_CreateObjCommand()  to -create the top level database function.  It is through this handle -that the user can access all of the commands described below.  Internally, -the database handle is sent as the ClientData portion of the new -command set so that all future database calls access the appropriate handle. -

The arguments are: -

    -
  • --- - Terminate the list of options and use remaining arguments as -the file or subdb names (thus allowing the use of filenames beginning with -a dash '-')
  • - -
  • --create selects the O_CREAT flag  to create underlying files
  • - -
  • --rdonly selects the O_RDONLY flag for opening in read-only mode
  • - -
  • --truncate selects the O_TRUNC flag to truncate the database
  • - -
  • --mode mode specifies the mode for created files
  • - -
  • -filename indicates the name of the database
  • -
- -


-


-
> <ndbm> close -

This command closes the database and renders the handle invalid.   -This command directly translates to the dbm_close function call.  -It returns either a 0 (for success),  or it throws a Tcl error with -a system message. -

Additionally, since the handle is no longer valid, we will call Tcl_DeleteCommand() -so -that further uses of the handle will be dealt with properly by Tcl itself.  -


-
> <ndbm> clearerr -

This command clears errors  the database.   This command -directly translates to the dbm_clearerr function call.  It returns -either a 0 (for success),  or it throws a Tcl error with a system -message. -

-


-
> <ndbm> delete key -

This command deletes the key from thedatabase.   -This command directly translates to the dbm_delete function call.  -It returns either a 0 (for success),  or it throws a Tcl error with -a system message. -

-


-
> <ndbm> dirfno -

This command directly translates to the dbm_dirfno function call.  -It returns either resultts,  or it throws a Tcl error with a system -message. -

-


-
> <ndbm> error -

This command returns the last error.   This command directly -translates to the dbm_error function call.  It returns an error string.. -

-


-
> <ndbm> fetch key -

This command gets the given key from the database.   -This command directly translates to the dbm_fetch function call.  -It returns either the data,  or it throws a Tcl error with a system -message. -

-


-
> <ndbm> firstkey -

This command returns the first key in the database.   This -command directly translates to the dbm_firstkey function call.  It -returns either the key,  or it throws a Tcl error with a system message. -

-


-
> <ndbm> nextkey -

This command returns the next key in the database.   This -command directly translates to the dbm_nextkey function call.  It -returns either the key,  or it throws a Tcl error with a system message. -

-


-
> <ndbm> pagfno -

This command directly translates to the dbm_pagfno function call.  -It returns either resultts,  or it throws a Tcl error with a system -message. -
-


-
> <ndbm> rdonly -

This command changes the database to readonly.   This command -directly translates to the dbm_rdonly function call.  It returns either -a 0 (for success),  or it throws a Tcl error with a system message. -

-


-
> <ndbm> store key data insert|replace -

This command puts the given key and data -pair into the database.   This command directly translates to -the dbm_store function call.  It will either insert or replace -the data based on the action given in the third argument.  It returns -either a 0 (for success),  or it throws a Tcl error with a system -message. -
-


- - diff --git a/storage/bdb/tcl/docs/index.html b/storage/bdb/tcl/docs/index.html deleted file mode 100644 index 4f4e1e90c91..00000000000 --- a/storage/bdb/tcl/docs/index.html +++ /dev/null @@ -1,51 +0,0 @@ - - - - - - - - - -
-

-Complete Tcl Interface for Berkeley DB

- - - - - - diff --git a/storage/bdb/tcl/docs/library.html b/storage/bdb/tcl/docs/library.html deleted file mode 100644 index 217213ed8c2..00000000000 --- a/storage/bdb/tcl/docs/library.html +++ /dev/null @@ -1,27 +0,0 @@ - - - - - - - - -
-

-Convenience Commands

-The convenience commands are provided for ease of use with the DB test -suite. -

> berkdb rand -

This command will invoke the rand function and return the random number. -

-


> berkdb random_int low high -

This command will invoke the rand function and return a number between -low -and high. -

-


-

> berkdb srand seed -

This command will invoke the srand function with the given seed -and return 0. -

-


diff --git a/storage/bdb/tcl/docs/lock.html b/storage/bdb/tcl/docs/lock.html deleted file mode 100644 index 75e0bb2de6d..00000000000 --- a/storage/bdb/tcl/docs/lock.html +++ /dev/null @@ -1,207 +0,0 @@ - - - - - - - - - -

-Locking Commands

-Most locking commands work with the environment handle.  However, -when a user gets a lock we create a new lock handle that they then use -with in a similar manner to all the other handles to release the lock.  -We present the general locking functions first, and then those that manipulate -locks. -

> <env> lock_detect [default|oldest|youngest|random] -

This command runs the deadlock detector.  It directly translates -to the lock_detect DB call.  -It returns either a 0 (for success), a DB error message or it throws a -Tcl error with a system message.  The first argument sets the policy -for deadlock as follows: -

    -
  • -default selects the DB_LOCK_DEFAULT policy for default detection -(default if not specified)
  • - -
  • -oldest selects DB_LOCK_OLDEST to abort the oldest locker on a deadlock
  • - -
  • -random selects DB_LOCK_RANDOM to abort a random locker on a deadlock
  • - -
  • -youngest selects DB_LOCK_YOUNGEST to abort the youngest locker on -a deadlock
  • -
- -
-
> <env> lock_stat -

This command returns a list of name/value pairs where the names correspond -to the C-structure field names of DB_LOCK_STAT and the values are the data -returned.  This command is a direct translation of the lock_stat -DB call. -


-
> <env> lock_id -

This command returns a unique locker ID value.  It directly translates -to the lock_id DB call. -
-


-
> <env> lock_id_free  locker -

This command frees the locker allockated by the lock_id call. It directly -translates to the  lock_id_free -DB -call. -


-
> <env> lock_id_set  current -max -

This  is a diagnostic command to set the locker id that will get -allocated next and the maximum id that -
will trigger the id reclaim algorithm. -


-
> <env> lock_get [-nowait]lockmode -locker obj -

This command gets a lock. It will invoke the lock_get -function.  After it successfully gets a handle to a lock, we bind -it to a new Tcl command of the form $env.lockX, where X is -an integer starting at  0 (e.g. $env.lock0, $env.lock1, etc).  -We use the Tcl_CreateObjCommand() to create the top level locking -command function.  It is through this handle that the user can release -the lock.  Internally, the handle we get back from DB will be stored -as the ClientData portion of the new command set so that future -locking calls will have that handle readily available. -

The arguments are: -

    -
  • -locker specifies the locker ID returned from the lock_id -command
  • - -
  • -obj specifies an object to lock
  • - -
  • -the lock mode is specified as one of the following:
  • - -
      -
    • -ng specifies DB_LOCK_NG for not granted (always 0)
    • - -
    • -read specifies DB_LOCK_READ for a read (shared) lock
    • - -
    • -write specifies DB_LOCK_WRITE for an exclusive write lock
    • - -
    • -iwrite specifies DB_LOCK_IWRITE for intent for exclusive write lock
    • - -
    • -iread specifies DB_LOCK_IREAD for intent for shared read lock
    • - -
    • -iwr specifies DB_LOCK_IWR for intent for eread and write lock
    • -
    - -
  • --nowait selects the DB_LOCK_NOWAIT to indicate that we do not want -to wait on the lock
  • -
- -
-
> <lock> put -

This command releases the lock referenced by the command.  It is -a direct translation of the lock_put -function.  It returns either a 0 (for success), a DB error message -or it throws a Tcl error with a system message.  Additionally, since -the handle is no longer valid, we will call -Tcl_DeleteCommand() -so -that further uses of the handle will be dealt with properly by Tcl itself. -
-


-
> <env> lock_vec [-nowait] locker -{get|put|put_all|put_obj -[obj] [lockmode] [lock]} ... -

This command performs a series of lock calls.  It is a direct translation -of the lock_vec function.  -This command will return a list of the return values from each operation -specified in the argument list.  For the 'put' operations the entry -in the return value list is either a 0 (for success) or an error.  -For the 'get' operation, the entry is the lock widget handle, $env.lockN -(as described above in <env> lock_get) -or an error.  If an error occurs, the return list will contain the -return values for all the successful operations up the erroneous one and -the error code for that operation.  Subsequent operations will be -ignored. -

As for the other operations, if we are doing a 'get' we will create -the commands and if we are doing a 'put' we will have to delete the commands.  -Additionally, we will have to do this after the call to the DB lock_vec -and iterate over the results, creating and/or deleting Tcl commands.  -It is possible that we may return a lock widget from a get operation that -is considered invalid, if, for instance, there was a put_all operation -performed later in the vector of operations.  The arguments are: -

    -
  • -locker specifies the locker ID returned from the lock_id -command
  • - -
  • --nowait selects the DB_LOCK_NOWAIT to indicate that we do not want -to wait on the lock
  • - -
  • -the lock vectors are tuple consisting of {an operation, lock object, lock -mode, lock handle} where what is required is based on the operation desired:
  • - -
      -
    • -get specifes DB_LOCK_GET to get a lock.  Requires a tuple {get -objmode} -where -mode -is:
    • - -
        -
      • -ng specifies DB_LOCK_NG for not granted (always 0)
      • - -
      • -read specifies DB_LOCK_READ for a read (shared) lock
      • - -
      • -write specifies DB_LOCK_WRITE for an exclusive write lock
      • - -
      • -iwrite specifies DB_LOCK_IWRITE for intent for exclusive write lock
      • - -
      • -iread specifies DB_LOCK_IREAD for intent for shared read lock
      • - -
      • -iwr specifies DB_LOCK_IWR for intent for eread and write lock
      • -
      - -
    • -put specifies DB_LOCK_PUT to release a lock.  -Requires a tuple {put lock}
    • - -
    • -put_all specifies DB_LOCK_PUT_ALL to release all locks held by locker.  -Requires a tuple {put_all}
    • - -
    • -put_obj specifies DB_LOCK_PUT_OBJ to release all locks held by locker -associated with the given obj.  Requires a tuple {put_obj -obj}
    • -
    -
- -
-
> <env> lock_timeout timeout -

This command sets the lock timeout for all future locks in this environment.  -The timeout is in micorseconds. -
  -
  - - diff --git a/storage/bdb/tcl/docs/log.html b/storage/bdb/tcl/docs/log.html deleted file mode 100644 index 5fdd132d5da..00000000000 --- a/storage/bdb/tcl/docs/log.html +++ /dev/null @@ -1,124 +0,0 @@ - - - - - - - - - -

-Logging Commands

-Logging commands work from the environment handle to control the use of -the log files.  Log files are opened when the environment is opened -and closed when the environment is closed.  In all of the commands -in the logging subsystem that take or return a log sequence number, it -is of the form: -
{fileid offset} -
where the fileid is an identifier of the log file, as -returned from the log_get call. -

> <env> log_archive [-arch_abs] [-arch_data] [-arch_log] -

This command returns  a list of log files that are no longer in -use.  It is a direct call to the log_archive -function. The arguments are: -

    -
  • --arch_abs selects DB_ARCH_ABS to return all pathnames as absolute -pathnames
  • - -
  • --arch_data selects DB_ARCH_DATA to return a list of database files
  • - -
  • --arch_log selects DB_ARCH_LOG to return a list of log files
  • -
- -
-
> <env> log_compare lsn1 lsn2 -

This command compares two log sequence numbers, given as lsn1 -and lsn2.  It is a direct call to the log_compare -function.  It will return a -1, 0, 1 to indicate if lsn1 -is less than, equal to or greater than lsn2 respectively. -
-


-
> <env> log_file lsn -

This command returns  the file name associated with the given lsn.  -It is a direct call to the log_file -function. -
-


-
> <env> log_flush [lsn] -

This command  flushes the log up to the specified lsn -or flushes all records if none is given  It is a direct call to the -log_flush -function.  It returns either a 0 (for success), a DB error message -or it throws a Tcl error with a system message. -
-


-
> <env> log_get [-checkpoint] -[-current] [-first] [-last] [-next] [-prev] [-set lsn] -

This command retrieves a record from the log according to the lsn -given and returns it and the data.  It is a direct call to the log_get -function.  It is a way of implementing a manner of log iteration similar -to cursors.   -The information we return is similar to database information.  We -return a list where the first item is the LSN (which is a list itself) -and the second item is the data.  So it looks like, fully expanded, -{{fileid -offset} -data}.  -In the case where DB_NOTFOUND is returned, we return an empty list {}.  -All other errors return a Tcl error.  The arguments are: -

    -
  • --checkpoint selects the DB_CHECKPOINT flag to return the LSN/data -pair of the last record written through log_put -with DB_CHECKPOINT specified
  • - -
  • --current selects the DB_CURRENT flag to return the current record
  • - -
  • --first selects the DB_FIRST flag to return the first record in the -log.
  • - -
  • --last selects the DB_LAST flag to return the last record in the -log.
  • - -
  • --next selects the DB_NEXT flag to return the next record in the -log.
  • - -
  • --prev selects the DB_PREV flag to return the  previous record -in the log.
  • - -
  • --set selects the DB_SET flag to return the record specified by the -given lsn
  • -
- -
-
> <env> log_put [-checkpoint] -[-flush] record -

This command stores a record into the log and returns -the LSN of the log record.  It is a direct call to the log_put -function.  It returns either an LSN or it throws a Tcl error with -a system message.  The arguments are: -

    -
  • --checkpoint selects the DB_CHECKPOINT flag
  • - -
  • --flush selects the DB_FLUSH flag to flush the log to disk.
  • -
- -
-
> <env> log_stat -

This command returns  the statistics associated with the logging -subsystem.  It is a direct call to the log_stat -function.  It returns a list of name/value pairs of the DB_LOG_STAT -structure. - - diff --git a/storage/bdb/tcl/docs/mpool.html b/storage/bdb/tcl/docs/mpool.html deleted file mode 100644 index 83c1f452c3c..00000000000 --- a/storage/bdb/tcl/docs/mpool.html +++ /dev/null @@ -1,190 +0,0 @@ - - - - - - - - - -

-Memory Pool Commands

-Memory pools are used in a manner similar to the other subsystems.  -We create a handle to the pool and  then use it for a variety of operations.  -Some of the memory pool commands use the environment instead. Those are -presented first. -

> <env> mpool_stat -

This command returns  the statistics associated with the memory -pool subsystem.  It is a direct call to the memp_stat -function.  It returns a list of name/value pairs of the DB_MPOOL_STAT -structure. -
-


-
> <env> mpool_sync lsn -

This command flushes the memory pool for all pages with a log sequence -number less than lsn.  It is a direct call to the memp_sync  -function.  It returns either a 0 (for success), a DB error message -or it throws a Tcl error with a system message. -
-


-
> <env> mpool_trickle percent -

This command tells DB to ensure that at least percent -percent of the pages are clean by writing out enough to dirty pages to -achieve that percentage.  It is a direct call to the memp_trickle -function.  The command will return the number of pages actually written.  -It returns either the number of pages on success, or it throws a Tcl error -with a system message. -
-


-

> <env> mpool [-create] [-nommap] [-rdonly] [-mode mode] --pagesize size [file] -

This command creates a new memory pool.  It invokes the memp_fopen -function.  After it successfully gets a handle to a memory pool, we -bind it to a new Tcl command of the form $env.mpX, where -X is an integer starting at  0 (e.g. $env.mp0, $env.mp1, etc).  -We use the Tcl_CreateObjCommand() to create the top level memory -pool functions.  It is through this handle that the user can manipulate -the pool.  Internally, the handle we get back from DB will be stored -as the ClientData portion of the new command set so that future -memory pool calls will have that handle readily available.  Additionally, -we need to maintain this handle in relation to the environment so that -if the user calls <env> close without closing -the memory pool we can properly clean up.  The arguments are: -

    -
  • -file is the name of the file to open
  • - -
  • --create selects the DB_CREATE flag to create underlying file
  • - -
  • --mode mode sets the permissions of created file to mode
  • - -
  • --nommap selects the DB_NOMMAP flag to disallow using mmap'ed files
  • - -
  • --pagesize sets the underlying file page size to size
  • - -
  • --rdonly selects the DB_RDONLY flag for read only access
  • -
- -
-
> <mp> close -

This command closes the memory pool.  It is a direct call to the -memp_close -function.  It returns either a 0 (for success), a DB error message -or it throws a Tcl error with a system message. -

Additionally, since the handle is no longer valid, we will call -Tcl_DeleteCommand() -so -that further uses of the handle will be dealt with properly by Tcl itself.  -We must also remove the reference to this handle from the environment.  -We will go through the list of pinned pages that were acquired by the get -command and -put them back. -


-
> <mp> fsync -

This command flushes all of the file's dirty pages to disk.  It -is a direct call to the memp_fsync -function.  It returns either a 0 (for success), a DB error message -or it throws a Tcl error with a system message. -


-
> <mp> get [-create] [-last] [-new] -[pgno] -

This command gets the  pgno page from the memory -pool.  It invokes the memp_fget -function and possibly the memp_fset -function if any options are chosen to set the page characteristics.  -After it successfully gets a handle to a page,  we bind it to and -return a new Tcl command of the form $env.mpN.pX, where X -is an integer starting at  0 (e.g. $env.mp0.p0, $env.mp1.p0, etc).  -We use the Tcl_CreateObjCommand() to create the top level page functions.  -It is through this handle that the user can manipulate the page.  -Internally, the handle we get back from DB will be stored as the ClientData -portion of the new command set.  We need to store this handle in  -relation to the memory pool handle so that if the memory pool is closed, -we will put back the pages (setting the discard -flag) and delete that set of commands. -

The arguments are: -

    -
  • --create selects the DB_MPOOL_CREATE flag  to create the page -if it does not exist.
  • - -
  • --last selects the DB_MPOOL_LAST flag to return the last page in -the file
  • - -
  • --new selects the DB_MPOOL_NEW flag to create a new page
  • -
- -
-
> <pg> pgnum -

This command returns the page number associated with this memory pool -page.  Primarily it will be used after an <mp> -get call. -
-


> <pg> pgsize -

This command returns the page size associated with this memory pool -page.  Primarily it will be used after an <mp> -get call. -
-


> <pg> set [-clean] [-dirty] [-discard] -

This command sets the characteristics of the page.  It is a direct -call to the memp_fset function.  -It returns either a 0 (for success), a DB error message or it throws a -Tcl error with a system message.  The arguments are: -

    -
  • --clean selects the DB_MPOOL_CLEAN flag to indicate this is a clean -page
  • - -
  • --dirty selects the DB_MPOOL_DIRTY flag to indicate this page should -be flushed before eviction
  • - -
  • --discard selects the DB_MPOOL_DISCARD flag to indicate this page -is unimportant
  • -
- -
-
> <pg> put [-clean] [-dirty] [-discard] -

This command will put back the page to the memory pool.  It is -a direct call to the memp_fput -function.  It returns either a 0 (for success), a DB error message -or it throws a Tcl error with a system message. Additionally, since the -handle is no longer valid, we will call -Tcl_DeleteCommand() -so that -further uses of the handle will be dealt with properly by Tcl itself.  -We must also remove the reference to this handle from the memory pool. -

The arguments are: -

    -
  • --clean selects the DB_MPOOL_CLEAN flag to indicate this is a clean -page
  • - -
  • --dirty selects the DB_MPOOL_DIRTY flag to indicate this page should -be flushed before eviction
  • - -
  • --discard selects the DB_MPOOL_DISCARD flag to indicate this page -is unimportant
  • -
- -
-
> <pg> init val|string -

This command initializes the page to the val given or -places the string given at the beginning of the page.  -It returns a 0 for success or it throws a Tcl error with an error message. -

-


-
> <pg> is_setto val|string -

This command verifies the page contains the val given -or checks that the string given is at the beginning of the page.  -It returns a 1 if the page is correctly set to the value and a 0 otherwise. diff --git a/storage/bdb/tcl/docs/rep.html b/storage/bdb/tcl/docs/rep.html deleted file mode 100644 index d50b62375e6..00000000000 --- a/storage/bdb/tcl/docs/rep.html +++ /dev/null @@ -1,51 +0,0 @@ - - - - - - Replication commands - - - -

-Replication Commands

-Replication commands are invoked from the environment handle, after -it has been opened with the appropriate flags defined -here.
-
-

> <env> rep_process_message machid control -rec -

This command processes a single incoming replication message.  It -is a direct translation of the rep_process_message -function.  -It returns either a 0 (for success), a DB error message or it throws a -Tcl error with a system message.  The arguments are: -

    -
  • -machid is the machine ID of the machine that sent this -message.
  • - -
  • -control is a binary string containing the exact contents of the -control argument to the sendproc function -that was passed this message on another site.
  • - -
  • -rec is a binary string containing the exact contents of the -rec argument to the sendproc function -that was passed this message on another site.
  • -
- -
-
> <env> rep_elect nsites pri wait -sleep -

This command causes a replication election.  It is a direct translation -of the rep_elect function.  -Its arguments, all integers, correspond exactly to that C function's -parameters. -It will return a list containing two integers, which contain, -respectively, the integer values returned in the C function's -midp and selfp parameters. - - diff --git a/storage/bdb/tcl/docs/test.html b/storage/bdb/tcl/docs/test.html deleted file mode 100644 index a011401838a..00000000000 --- a/storage/bdb/tcl/docs/test.html +++ /dev/null @@ -1,150 +0,0 @@ - - - - - - - - - -

-Debugging and Testing

-We have imported the debugging system from the old test suite into the -new interface to aid in debugging problems.  There are several variables -that are available both in gdb as globals to the C code, and variables -in Tcl that the user can set.  These variables are linked together -so that changes in one venue are reflected in the other.  The names -of the variables have been modified a bit to reduce the likelihood -
of namespace trampling.  We have added a double underscore to -all the names. -

The variables are all initialized to zero (0) thus resulting in debugging -being turned off.  The purpose of the debugging, fundamentally, is -to allow the user to set a breakpoint prior to making a DB call.  -This breakpoint is set in the __db_loadme() function.  The -user may selectively turn on various debugging areas each controlled by -a separate variable (note they all have two (2) underscores prepended to -the name): -

    -
  • -__debug_on - Turns on the debugging system.  This must be on -for any debugging to occur
  • - -
  • -__debug_print - Turns on printing a debug count statement on each -call
  • - -
  • -__debug_test - Hits the breakpoint in __db_loadme on the -specific iteration
  • - -
  • -__debug_stop - Hits the breakpoint in __db_loadme on every -(or the next) iteration
  • -
-Note to developers:  Anyone extending this interface must place -a call to _debug_check() (no arguments) before every call into the -DB library. -

There is also a command available that will force a call to the _debug_check -function. -

> berkdb debug_check -

-


-
For testing purposes we have added several hooks into the DB library -and a small interface into the environment and/or database commands to -manipulate the hooks.  This command interface and the hooks and everything -that goes with it is only enabled when the test option is configured into -DB. -

> <env> test copy location -
> <db> test copy location -
> <env> test abort location -
> <db> test abort location -

In order to test recovery we need to be able to abort the creation or -deletion process at various points.  Also we want to invoke a copy -function to copy the database file(s)  at various points as well so -that we can obtain before/after snapshots of the databases.  The interface -provides the test command to specify a location where we -wish to invoke a copy or an abort.  The command is available -from either the environment or the database for convenience.  The -location -can be one of the following: -

    -
  • -none - Clears the location
  • - -
  • -preopen - Sets the location prior to the __os_open call in the creation -process
  • - -
  • -postopen - Sets the location to immediately following the __os_open -call in creation
  • - -
  • -postlogmeta - Sets the location to immediately following the __db_log_page -call to log the meta data in creation.  Only valid for Btree.
  • - -
  • -postlog - Sets the location to immediately following the last (or -only) __db_log_page call in creation.
  • - -
  • -postsync - Sets the location to immediately following the sync of -the log page in creation.
  • - -
  • -prerename - Sets the location prior to the __os_rename call in the -deletion process.
  • - -
  • -postrename - Sets the location to immediately following the __os_rename -call in deletion
  • -
- -
-
> <env> mutex mode nitems -

This command creates a mutex region for testing.  It sets the mode -of the region to mode and sets up for nitems -number of mutex entries.  After we successfully get a handle to a -mutex we create a command of the form $env.mutexX, where -X is an integer starting at  0 (e.g. $env.mutex0, $env.mutex1, -etc).   -We use the Tcl_CreateObjCommand()  to create the top level -mutex function.  It is through this handle that the user can access -all of the commands described below.  Internally, the mutex handle -is sent as the ClientData portion of the new command set so that -all future mutex calls access the appropriate handle. -

-


> <mutex> close -

This command closes the mutex and renders the handle invalid.   -This command directly translates to the __db_r_detach function call.  -It returns either a 0 (for success),  or it throws a Tcl error with -a system message. -

Additionally, since the handle is no longer valid, we will call Tcl_DeleteCommand() -so -that further uses of the handle will be dealt with properly by Tcl itself.  -


> <mutex> get id -

This command locks the mutex identified by id.  It -returns either a 0 (for success),  or it throws a Tcl error with a -system message. -
-


> <mutex> release id -

This command releases the mutex identified by id.  -It returns either a 0 (for success),  or it throws a Tcl error with -a system message. -
-


> <mutex> getval id -

This command gets the value stored for the mutex identified by id.  -It returns either the value,  or it throws a Tcl error with a system -message. -
-


> <mutex> setval id val -

This command sets the value stored for the mutex identified by id -to -val.  -It returns either a 0 (for success),  or it throws a Tcl error with -a system message. -
-


-
  - - diff --git a/storage/bdb/tcl/docs/txn.html b/storage/bdb/tcl/docs/txn.html deleted file mode 100644 index 8abef4b31b2..00000000000 --- a/storage/bdb/tcl/docs/txn.html +++ /dev/null @@ -1,70 +0,0 @@ - - - - - - - - - -

-Transaction Commands

-Transactions are used in a manner similar to the other subsystems.  -We create a handle to the transaction and  then use it for a variety -of operations.  Some of the transaction commands use the environment -instead.  Those are presented first.  The transaction command -handle returned is the handle used by the various commands that can be -transaction protected, such as cursors. -
-
-

> <env> txn_checkpoint [-kbyte kb] [-min min] -

This command causes a checkpoint of the transaction region.  It -is a direct translation of the txn_checkpoint -function.  -It returns either a 0 (for success), a DB error message or it throws a -Tcl error with a system message.  The arguments are: -

    -
  • --forcecauses the checkpoint to occur regardless of inactivity - -
  • --kbytecauses the checkpoint to occur only if kb kilobytes -of log data has been written since the last checkpoint - -
  • --min causes the checkpoint to occur only if min minutes -have passed since the last checkpoint -
- -
-
> <env> txn_stat -

This command returns transaction statistics.  It is a direct translation -of the txn_stat function.  -It will return a list of name/value pairs that correspond to the DB_TXN_STAT -structure. -


-
> <env> txn_id_set  current max -

This is a diagnosic command that sets the next transaction id to be -allocated and the maximum transaction -
id, which is the point at which the relcaimation algorthm is triggered. -


-
>  <txn> id -

This command returns the transaction id.  It is a direct call to -the txn_id function.  The -typical use of this identifier is as the locker value for -the lock_get and lock_vec -calls. -


-
> <txn> prepare -

This command initiates a two-phase commit.  It is a direct call -to the txn_prepare function.  -It returns either a 0 (for success), a DB error message or it throws a -Tcl error with a system message. -


> <env> txn_timeout -timeout -

This command sets thetransaction timeout for transactions started in -the future in this environment.  The timeout is in micorseconds. -
  -
  - - diff --git a/storage/bdb/tcl/tcl_compat.c b/storage/bdb/tcl/tcl_compat.c deleted file mode 100644 index 8b518f761c7..00000000000 --- a/storage/bdb/tcl/tcl_compat.c +++ /dev/null @@ -1,747 +0,0 @@ -/*- - * See the file LICENSE for redistribution information. - * - * Copyright (c) 1999-2004 - * Sleepycat Software. All rights reserved. - * - * $Id: tcl_compat.c,v 11.46 2004/10/07 16:48:39 bostic Exp $ - */ - -#include "db_config.h" - -#ifdef CONFIG_TEST - -#ifndef NO_SYSTEM_INCLUDES -#include - -#include -#include -#include -#include -#endif - -#define DB_DBM_HSEARCH 1 - -#include "db_int.h" -#include "dbinc/tcl_db.h" - -/* - * bdb_HCommand -- - * Implements h* functions. - * - * PUBLIC: int bdb_HCommand __P((Tcl_Interp *, int, Tcl_Obj * CONST*)); - */ -int -bdb_HCommand(interp, objc, objv) - Tcl_Interp *interp; /* Interpreter */ - int objc; /* How many arguments? */ - Tcl_Obj *CONST objv[]; /* The argument objects */ -{ - static const char *hcmds[] = { - "hcreate", - "hdestroy", - "hsearch", - NULL - }; - enum hcmds { - HHCREATE, - HHDESTROY, - HHSEARCH - }; - static const char *srchacts[] = { - "enter", - "find", - NULL - }; - enum srchacts { - ACT_ENTER, - ACT_FIND - }; - ENTRY item, *hres; - ACTION action; - int actindex, cmdindex, nelem, result, ret; - Tcl_Obj *res; - - result = TCL_OK; - /* - * Get the command name index from the object based on the cmds - * defined above. This SHOULD NOT fail because we already checked - * in the 'berkdb' command. - */ - if (Tcl_GetIndexFromObj(interp, - objv[1], hcmds, "command", TCL_EXACT, &cmdindex) != TCL_OK) - return (IS_HELP(objv[1])); - - res = NULL; - switch ((enum hcmds)cmdindex) { - case HHCREATE: - /* - * Must be 1 arg, nelem. Error if not. - */ - if (objc != 3) { - Tcl_WrongNumArgs(interp, 2, objv, "nelem"); - return (TCL_ERROR); - } - result = Tcl_GetIntFromObj(interp, objv[2], &nelem); - if (result == TCL_OK) { - _debug_check(); - ret = hcreate((size_t)nelem) == 0 ? 1: 0; - (void)_ReturnSetup( - interp, ret, DB_RETOK_STD(ret), "hcreate"); - } - break; - case HHSEARCH: - /* - * 3 args for this. Error if different. - */ - if (objc != 5) { - Tcl_WrongNumArgs(interp, 2, objv, "key data action"); - return (TCL_ERROR); - } - item.key = Tcl_GetStringFromObj(objv[2], NULL); - item.data = Tcl_GetStringFromObj(objv[3], NULL); - if (Tcl_GetIndexFromObj(interp, objv[4], srchacts, - "action", TCL_EXACT, &actindex) != TCL_OK) - return (IS_HELP(objv[4])); - switch ((enum srchacts)actindex) { - case ACT_ENTER: - action = ENTER; - break; - default: - case ACT_FIND: - action = FIND; - break; - } - _debug_check(); - hres = hsearch(item, action); - if (hres == NULL) - Tcl_SetResult(interp, "-1", TCL_STATIC); - else if (action == FIND) - Tcl_SetResult(interp, (char *)hres->data, TCL_STATIC); - else - /* action is ENTER */ - Tcl_SetResult(interp, "0", TCL_STATIC); - - break; - case HHDESTROY: - /* - * No args for this. Error if there are some. - */ - if (objc != 2) { - Tcl_WrongNumArgs(interp, 2, objv, NULL); - return (TCL_ERROR); - } - _debug_check(); - hdestroy(); - res = Tcl_NewIntObj(0); - break; - } - /* - * Only set result if we have a res. Otherwise, lower - * functions have already done so. - */ - if (result == TCL_OK && res) - Tcl_SetObjResult(interp, res); - return (result); -} - -/* - * - * bdb_NdbmOpen -- - * Opens an ndbm database. - * - * PUBLIC: #if DB_DBM_HSEARCH != 0 - * PUBLIC: int bdb_NdbmOpen __P((Tcl_Interp *, int, Tcl_Obj * CONST*, DBM **)); - * PUBLIC: #endif - */ -int -bdb_NdbmOpen(interp, objc, objv, dbpp) - Tcl_Interp *interp; /* Interpreter */ - int objc; /* How many arguments? */ - Tcl_Obj *CONST objv[]; /* The argument objects */ - DBM **dbpp; /* Dbm pointer */ -{ - static const char *ndbopen[] = { - "-create", - "-mode", - "-rdonly", - "-truncate", - "--", - NULL - }; - enum ndbopen { - NDB_CREATE, - NDB_MODE, - NDB_RDONLY, - NDB_TRUNC, - NDB_ENDARG - }; - - int endarg, i, mode, open_flags, optindex, read_only, result, ret; - char *arg, *db; - - result = TCL_OK; - endarg = mode = open_flags = read_only = 0; - - if (objc < 2) { - Tcl_WrongNumArgs(interp, 2, objv, "?args?"); - return (TCL_ERROR); - } - - /* - * Get the option name index from the object based on the args - * defined above. - */ - i = 2; - while (i < objc) { - if (Tcl_GetIndexFromObj(interp, objv[i], ndbopen, "option", - TCL_EXACT, &optindex) != TCL_OK) { - arg = Tcl_GetStringFromObj(objv[i], NULL); - if (arg[0] == '-') { - result = IS_HELP(objv[i]); - goto error; - } else - Tcl_ResetResult(interp); - break; - } - i++; - switch ((enum ndbopen)optindex) { - case NDB_CREATE: - open_flags |= O_CREAT; - break; - case NDB_RDONLY: - read_only = 1; - break; - case NDB_TRUNC: - open_flags |= O_TRUNC; - break; - case NDB_MODE: - if (i >= objc) { - Tcl_WrongNumArgs(interp, 2, objv, - "?-mode mode?"); - result = TCL_ERROR; - break; - } - /* - * Don't need to check result here because - * if TCL_ERROR, the error message is already - * set up, and we'll bail out below. If ok, - * the mode is set and we go on. - */ - result = Tcl_GetIntFromObj(interp, objv[i++], &mode); - break; - case NDB_ENDARG: - endarg = 1; - break; - } - - /* - * If, at any time, parsing the args we get an error, - * bail out and return. - */ - if (result != TCL_OK) - goto error; - if (endarg) - break; - } - if (result != TCL_OK) - goto error; - - /* - * Any args we have left, (better be 0, or 1 left) is a - * file name. If we have 0, then an in-memory db. If - * there is 1, a db name. - */ - db = NULL; - if (i != objc && i != objc - 1) { - Tcl_WrongNumArgs(interp, 2, objv, "?args? ?file?"); - result = TCL_ERROR; - goto error; - } - if (i != objc) - db = Tcl_GetStringFromObj(objv[objc - 1], NULL); - - /* - * When we get here, we have already parsed all of our args - * and made all our calls to set up the database. Everything - * is okay so far, no errors, if we get here. - * - * Now open the database. - */ - if (read_only) - open_flags |= O_RDONLY; - else - open_flags |= O_RDWR; - _debug_check(); - if ((*dbpp = dbm_open(db, open_flags, mode)) == NULL) { - ret = Tcl_GetErrno(); - result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret), - "db open"); - goto error; - } - return (TCL_OK); - -error: - *dbpp = NULL; - return (result); -} - -/* - * bdb_DbmCommand -- - * Implements "dbm" commands. - * - * PUBLIC: #if DB_DBM_HSEARCH != 0 - * PUBLIC: int bdb_DbmCommand - * PUBLIC: __P((Tcl_Interp *, int, Tcl_Obj * CONST*, int, DBM *)); - * PUBLIC: #endif - */ -int -bdb_DbmCommand(interp, objc, objv, flag, dbm) - Tcl_Interp *interp; /* Interpreter */ - int objc; /* How many arguments? */ - Tcl_Obj *CONST objv[]; /* The argument objects */ - int flag; /* Which db interface */ - DBM *dbm; /* DBM pointer */ -{ - static const char *dbmcmds[] = { - "dbmclose", - "dbminit", - "delete", - "fetch", - "firstkey", - "nextkey", - "store", - NULL - }; - enum dbmcmds { - DBMCLOSE, - DBMINIT, - DBMDELETE, - DBMFETCH, - DBMFIRST, - DBMNEXT, - DBMSTORE - }; - static const char *stflag[] = { - "insert", "replace", - NULL - }; - enum stflag { - STINSERT, STREPLACE - }; - datum key, data; - void *dtmp, *ktmp; - u_int32_t size; - int cmdindex, freedata, freekey, stindex, result, ret; - char *name, *t; - - result = TCL_OK; - freekey = freedata = 0; - dtmp = ktmp = NULL; - - /* - * Get the command name index from the object based on the cmds - * defined above. This SHOULD NOT fail because we already checked - * in the 'berkdb' command. - */ - if (Tcl_GetIndexFromObj(interp, - objv[1], dbmcmds, "command", TCL_EXACT, &cmdindex) != TCL_OK) - return (IS_HELP(objv[1])); - - switch ((enum dbmcmds)cmdindex) { - case DBMCLOSE: - /* - * No arg for this. Error if different. - */ - if (objc != 2) { - Tcl_WrongNumArgs(interp, 2, objv, NULL); - return (TCL_ERROR); - } - _debug_check(); - if (flag == DBTCL_DBM) - ret = dbmclose(); - else { - Tcl_SetResult(interp, - "Bad interface flag for command", TCL_STATIC); - return (TCL_ERROR); - } - (void)_ReturnSetup(interp, ret, DB_RETOK_STD(ret), "dbmclose"); - break; - case DBMINIT: - /* - * Must be 1 arg - file. - */ - if (objc != 3) { - Tcl_WrongNumArgs(interp, 2, objv, "file"); - return (TCL_ERROR); - } - name = Tcl_GetStringFromObj(objv[2], NULL); - if (flag == DBTCL_DBM) - ret = dbminit(name); - else { - Tcl_SetResult(interp, "Bad interface flag for command", - TCL_STATIC); - return (TCL_ERROR); - } - (void)_ReturnSetup(interp, ret, DB_RETOK_STD(ret), "dbminit"); - break; - case DBMFETCH: - /* - * 1 arg for this. Error if different. - */ - if (objc != 3) { - Tcl_WrongNumArgs(interp, 2, objv, "key"); - return (TCL_ERROR); - } - if ((ret = _CopyObjBytes( - interp, objv[2], &ktmp, &size, &freekey)) != 0) { - result = _ReturnSetup(interp, ret, - DB_RETOK_STD(ret), "dbm fetch"); - goto out; - } - key.dsize = (int)size; - key.dptr = (char *)ktmp; - _debug_check(); - if (flag == DBTCL_DBM) - data = fetch(key); - else if (flag == DBTCL_NDBM) - data = dbm_fetch(dbm, key); - else { - Tcl_SetResult(interp, - "Bad interface flag for command", TCL_STATIC); - result = TCL_ERROR; - goto out; - } - if (data.dptr == NULL || - (ret = __os_malloc(NULL, (size_t)data.dsize + 1, &t)) != 0) - Tcl_SetResult(interp, "-1", TCL_STATIC); - else { - memcpy(t, data.dptr, (size_t)data.dsize); - t[data.dsize] = '\0'; - Tcl_SetResult(interp, t, TCL_VOLATILE); - __os_free(NULL, t); - } - break; - case DBMSTORE: - /* - * 2 args for this. Error if different. - */ - if (objc != 4 && flag == DBTCL_DBM) { - Tcl_WrongNumArgs(interp, 2, objv, "key data"); - return (TCL_ERROR); - } - if (objc != 5 && flag == DBTCL_NDBM) { - Tcl_WrongNumArgs(interp, 2, objv, "key data action"); - return (TCL_ERROR); - } - if ((ret = _CopyObjBytes( - interp, objv[2], &ktmp, &size, &freekey)) != 0) { - result = _ReturnSetup(interp, ret, - DB_RETOK_STD(ret), "dbm fetch"); - goto out; - } - key.dsize = (int)size; - key.dptr = (char *)ktmp; - if ((ret = _CopyObjBytes( - interp, objv[3], &dtmp, &size, &freedata)) != 0) { - result = _ReturnSetup(interp, ret, - DB_RETOK_STD(ret), "dbm fetch"); - goto out; - } - data.dsize = (int)size; - data.dptr = (char *)dtmp; - _debug_check(); - if (flag == DBTCL_DBM) - ret = store(key, data); - else if (flag == DBTCL_NDBM) { - if (Tcl_GetIndexFromObj(interp, objv[4], stflag, - "flag", TCL_EXACT, &stindex) != TCL_OK) - return (IS_HELP(objv[4])); - switch ((enum stflag)stindex) { - case STINSERT: - flag = DBM_INSERT; - break; - case STREPLACE: - flag = DBM_REPLACE; - break; - } - ret = dbm_store(dbm, key, data, flag); - } else { - Tcl_SetResult(interp, - "Bad interface flag for command", TCL_STATIC); - return (TCL_ERROR); - } - (void)_ReturnSetup(interp, ret, DB_RETOK_STD(ret), "store"); - break; - case DBMDELETE: - /* - * 1 arg for this. Error if different. - */ - if (objc != 3) { - Tcl_WrongNumArgs(interp, 2, objv, "key"); - return (TCL_ERROR); - } - if ((ret = _CopyObjBytes( - interp, objv[2], &ktmp, &size, &freekey)) != 0) { - result = _ReturnSetup(interp, ret, - DB_RETOK_STD(ret), "dbm fetch"); - goto out; - } - key.dsize = (int)size; - key.dptr = (char *)ktmp; - _debug_check(); - if (flag == DBTCL_DBM) - ret = delete(key); - else if (flag == DBTCL_NDBM) - ret = dbm_delete(dbm, key); - else { - Tcl_SetResult(interp, - "Bad interface flag for command", TCL_STATIC); - return (TCL_ERROR); - } - (void)_ReturnSetup(interp, ret, DB_RETOK_STD(ret), "delete"); - break; - case DBMFIRST: - /* - * No arg for this. Error if different. - */ - if (objc != 2) { - Tcl_WrongNumArgs(interp, 2, objv, NULL); - return (TCL_ERROR); - } - _debug_check(); - if (flag == DBTCL_DBM) - key = firstkey(); - else if (flag == DBTCL_NDBM) - key = dbm_firstkey(dbm); - else { - Tcl_SetResult(interp, - "Bad interface flag for command", TCL_STATIC); - return (TCL_ERROR); - } - if (key.dptr == NULL || - (ret = __os_malloc(NULL, (size_t)key.dsize + 1, &t)) != 0) - Tcl_SetResult(interp, "-1", TCL_STATIC); - else { - memcpy(t, key.dptr, (size_t)key.dsize); - t[key.dsize] = '\0'; - Tcl_SetResult(interp, t, TCL_VOLATILE); - __os_free(NULL, t); - } - break; - case DBMNEXT: - /* - * 0 or 1 arg for this. Error if different. - */ - _debug_check(); - if (flag == DBTCL_DBM) { - if (objc != 3) { - Tcl_WrongNumArgs(interp, 2, objv, NULL); - return (TCL_ERROR); - } - if ((ret = _CopyObjBytes( - interp, objv[2], &ktmp, &size, &freekey)) != 0) { - result = _ReturnSetup(interp, ret, - DB_RETOK_STD(ret), "dbm fetch"); - goto out; - } - key.dsize = (int)size; - key.dptr = (char *)ktmp; - data = nextkey(key); - } else if (flag == DBTCL_NDBM) { - if (objc != 2) { - Tcl_WrongNumArgs(interp, 2, objv, NULL); - return (TCL_ERROR); - } - data = dbm_nextkey(dbm); - } else { - Tcl_SetResult(interp, - "Bad interface flag for command", TCL_STATIC); - return (TCL_ERROR); - } - if (data.dptr == NULL || - (ret = __os_malloc(NULL, (size_t)data.dsize + 1, &t)) != 0) - Tcl_SetResult(interp, "-1", TCL_STATIC); - else { - memcpy(t, data.dptr, (size_t)data.dsize); - t[data.dsize] = '\0'; - Tcl_SetResult(interp, t, TCL_VOLATILE); - __os_free(NULL, t); - } - break; - } - -out: if (dtmp != NULL && freedata) - __os_free(NULL, dtmp); - if (ktmp != NULL && freekey) - __os_free(NULL, ktmp); - return (result); -} - -/* - * ndbm_Cmd -- - * Implements the "ndbm" widget. - * - * PUBLIC: int ndbm_Cmd __P((ClientData, Tcl_Interp *, int, Tcl_Obj * CONST*)); - */ -int -ndbm_Cmd(clientData, interp, objc, objv) - ClientData clientData; /* DB handle */ - Tcl_Interp *interp; /* Interpreter */ - int objc; /* How many arguments? */ - Tcl_Obj *CONST objv[]; /* The argument objects */ -{ - static const char *ndbcmds[] = { - "clearerr", - "close", - "delete", - "dirfno", - "error", - "fetch", - "firstkey", - "nextkey", - "pagfno", - "rdonly", - "store", - NULL - }; - enum ndbcmds { - NDBCLRERR, - NDBCLOSE, - NDBDELETE, - NDBDIRFNO, - NDBERR, - NDBFETCH, - NDBFIRST, - NDBNEXT, - NDBPAGFNO, - NDBRDONLY, - NDBSTORE - }; - DBM *dbp; - DBTCL_INFO *dbip; - Tcl_Obj *res; - int cmdindex, result, ret; - - Tcl_ResetResult(interp); - dbp = (DBM *)clientData; - dbip = _PtrToInfo((void *)dbp); - result = TCL_OK; - if (objc <= 1) { - Tcl_WrongNumArgs(interp, 1, objv, "command cmdargs"); - return (TCL_ERROR); - } - if (dbp == NULL) { - Tcl_SetResult(interp, "NULL db pointer", TCL_STATIC); - return (TCL_ERROR); - } - if (dbip == NULL) { - Tcl_SetResult(interp, "NULL db info pointer", TCL_STATIC); - return (TCL_ERROR); - } - - /* - * Get the command name index from the object based on the dbcmds - * defined above. - */ - if (Tcl_GetIndexFromObj(interp, - objv[1], ndbcmds, "command", TCL_EXACT, &cmdindex) != TCL_OK) - return (IS_HELP(objv[1])); - - res = NULL; - switch ((enum ndbcmds)cmdindex) { - case NDBCLOSE: - _debug_check(); - dbm_close(dbp); - (void)Tcl_DeleteCommand(interp, dbip->i_name); - _DeleteInfo(dbip); - res = Tcl_NewIntObj(0); - break; - case NDBDELETE: - case NDBFETCH: - case NDBFIRST: - case NDBNEXT: - case NDBSTORE: - result = bdb_DbmCommand(interp, objc, objv, DBTCL_NDBM, dbp); - break; - case NDBCLRERR: - /* - * No args for this. Error if there are some. - */ - if (objc > 2) { - Tcl_WrongNumArgs(interp, 2, objv, NULL); - return (TCL_ERROR); - } - _debug_check(); - ret = dbm_clearerr(dbp); - if (ret) - (void)_ReturnSetup( - interp, ret, DB_RETOK_STD(ret), "clearerr"); - else - res = Tcl_NewIntObj(ret); - break; - case NDBDIRFNO: - /* - * No args for this. Error if there are some. - */ - if (objc > 2) { - Tcl_WrongNumArgs(interp, 2, objv, NULL); - return (TCL_ERROR); - } - _debug_check(); - ret = dbm_dirfno(dbp); - res = Tcl_NewIntObj(ret); - break; - case NDBPAGFNO: - /* - * No args for this. Error if there are some. - */ - if (objc > 2) { - Tcl_WrongNumArgs(interp, 2, objv, NULL); - return (TCL_ERROR); - } - _debug_check(); - ret = dbm_pagfno(dbp); - res = Tcl_NewIntObj(ret); - break; - case NDBERR: - /* - * No args for this. Error if there are some. - */ - if (objc > 2) { - Tcl_WrongNumArgs(interp, 2, objv, NULL); - return (TCL_ERROR); - } - _debug_check(); - ret = dbm_error(dbp); - Tcl_SetErrno(ret); - Tcl_SetResult(interp, - (char *)Tcl_PosixError(interp), TCL_STATIC); - break; - case NDBRDONLY: - /* - * No args for this. Error if there are some. - */ - if (objc > 2) { - Tcl_WrongNumArgs(interp, 2, objv, NULL); - return (TCL_ERROR); - } - _debug_check(); - ret = dbm_rdonly(dbp); - if (ret) - (void)_ReturnSetup( - interp, ret, DB_RETOK_STD(ret), "rdonly"); - else - res = Tcl_NewIntObj(ret); - break; - } - - /* - * Only set result if we have a res. Otherwise, lower functions have - * already done so. - */ - if (result == TCL_OK && res) - Tcl_SetObjResult(interp, res); - return (result); -} -#endif /* CONFIG_TEST */ diff --git a/storage/bdb/tcl/tcl_db.c b/storage/bdb/tcl/tcl_db.c deleted file mode 100644 index f60be3f4381..00000000000 --- a/storage/bdb/tcl/tcl_db.c +++ /dev/null @@ -1,2871 +0,0 @@ -/*- - * See the file LICENSE for redistribution information. - * - * Copyright (c) 1999-2004 - * Sleepycat Software. All rights reserved. - * - * $Id: tcl_db.c,v 11.145 2004/10/07 16:48:39 bostic Exp $ - */ - -#include "db_config.h" - -#ifndef NO_SYSTEM_INCLUDES -#include - -#include -#include -#include -#endif - -#include "db_int.h" -#include "dbinc/db_page.h" -#include "dbinc/db_am.h" -#include "dbinc/tcl_db.h" - -/* - * Prototypes for procedures defined later in this file: - */ -static int tcl_DbAssociate __P((Tcl_Interp *, - int, Tcl_Obj * CONST*, DB *)); -static int tcl_DbClose __P((Tcl_Interp *, - int, Tcl_Obj * CONST*, DB *, DBTCL_INFO *)); -static int tcl_DbDelete __P((Tcl_Interp *, int, Tcl_Obj * CONST*, DB *)); -static int tcl_DbGet __P((Tcl_Interp *, int, Tcl_Obj * CONST*, DB *, int)); -#ifdef CONFIG_TEST -static int tcl_DbKeyRange __P((Tcl_Interp *, int, Tcl_Obj * CONST*, DB *)); -#endif -static int tcl_DbPut __P((Tcl_Interp *, int, Tcl_Obj * CONST*, DB *)); -static int tcl_DbStat __P((Tcl_Interp *, int, Tcl_Obj * CONST*, DB *)); -static int tcl_DbTruncate __P((Tcl_Interp *, int, Tcl_Obj * CONST*, DB *)); -static int tcl_DbCursor __P((Tcl_Interp *, - int, Tcl_Obj * CONST*, DB *, DBC **)); -static int tcl_DbJoin __P((Tcl_Interp *, - int, Tcl_Obj * CONST*, DB *, DBC **)); -static int tcl_DbGetFlags __P((Tcl_Interp *, int, Tcl_Obj * CONST*, DB *)); -static int tcl_DbGetOpenFlags __P((Tcl_Interp *, - int, Tcl_Obj * CONST*, DB *)); -static int tcl_DbGetjoin __P((Tcl_Interp *, int, Tcl_Obj * CONST*, DB *)); -static int tcl_DbCount __P((Tcl_Interp *, int, Tcl_Obj * CONST*, DB *)); -static int tcl_second_call __P((DB *, const DBT *, const DBT *, DBT *)); - -/* - * _DbInfoDelete -- - * - * PUBLIC: void _DbInfoDelete __P((Tcl_Interp *, DBTCL_INFO *)); - */ -void -_DbInfoDelete(interp, dbip) - Tcl_Interp *interp; - DBTCL_INFO *dbip; -{ - DBTCL_INFO *nextp, *p; - /* - * First we have to close any open cursors. Then we close - * our db. - */ - for (p = LIST_FIRST(&__db_infohead); p != NULL; p = nextp) { - nextp = LIST_NEXT(p, entries); - /* - * Check if this is a cursor info structure and if - * it is, if it belongs to this DB. If so, remove - * its commands and info structure. - */ - if (p->i_parent == dbip && p->i_type == I_DBC) { - (void)Tcl_DeleteCommand(interp, p->i_name); - _DeleteInfo(p); - } - } - (void)Tcl_DeleteCommand(interp, dbip->i_name); - _DeleteInfo(dbip); -} - -/* - * - * PUBLIC: int db_Cmd __P((ClientData, Tcl_Interp *, int, Tcl_Obj * CONST*)); - * - * db_Cmd -- - * Implements the "db" widget. - */ -int -db_Cmd(clientData, interp, objc, objv) - ClientData clientData; /* DB handle */ - Tcl_Interp *interp; /* Interpreter */ - int objc; /* How many arguments? */ - Tcl_Obj *CONST objv[]; /* The argument objects */ -{ - static const char *dbcmds[] = { -#ifdef CONFIG_TEST - "keyrange", - "pget", - "rpcid", - "test", -#endif - "associate", - "close", - "count", - "cursor", - "del", - "get", - "get_bt_minkey", - "get_cachesize", - "get_dbname", - "get_encrypt_flags", - "get_env", - "get_errpfx", - "get_flags", - "get_h_ffactor", - "get_h_nelem", - "get_join", - "get_lorder", - "get_open_flags", - "get_pagesize", - "get_q_extentsize", - "get_re_delim", - "get_re_len", - "get_re_pad", - "get_re_source", - "get_type", - "is_byteswapped", - "join", - "put", - "stat", - "sync", - "truncate", - NULL - }; - enum dbcmds { -#ifdef CONFIG_TEST - DBKEYRANGE, - DBPGET, - DBRPCID, - DBTEST, -#endif - DBASSOCIATE, - DBCLOSE, - DBCOUNT, - DBCURSOR, - DBDELETE, - DBGET, - DBGETBTMINKEY, - DBGETCACHESIZE, - DBGETDBNAME, - DBGETENCRYPTFLAGS, - DBGETENV, - DBGETERRPFX, - DBGETFLAGS, - DBGETHFFACTOR, - DBGETHNELEM, - DBGETJOIN, - DBGETLORDER, - DBGETOPENFLAGS, - DBGETPAGESIZE, - DBGETQEXTENTSIZE, - DBGETREDELIM, - DBGETRELEN, - DBGETREPAD, - DBGETRESOURCE, - DBGETTYPE, - DBSWAPPED, - DBJOIN, - DBPUT, - DBSTAT, - DBSYNC, - DBTRUNCATE - }; - DB *dbp; - DB_ENV *dbenv; - DBC *dbc; - DBTCL_INFO *dbip, *ip; - DBTYPE type; - Tcl_Obj *res, *myobjv[3]; - int cmdindex, intval, ncache, result, ret; - char newname[MSG_SIZE]; - u_int32_t bytes, gbytes, value; - const char *strval, *filename, *dbname, *envid; - - Tcl_ResetResult(interp); - dbp = (DB *)clientData; - dbip = _PtrToInfo((void *)dbp); - memset(newname, 0, MSG_SIZE); - result = TCL_OK; - if (objc <= 1) { - Tcl_WrongNumArgs(interp, 1, objv, "command cmdargs"); - return (TCL_ERROR); - } - if (dbp == NULL) { - Tcl_SetResult(interp, "NULL db pointer", TCL_STATIC); - return (TCL_ERROR); - } - if (dbip == NULL) { - Tcl_SetResult(interp, "NULL db info pointer", TCL_STATIC); - return (TCL_ERROR); - } - - /* - * Get the command name index from the object based on the dbcmds - * defined above. - */ - if (Tcl_GetIndexFromObj(interp, - objv[1], dbcmds, "command", TCL_EXACT, &cmdindex) != TCL_OK) - return (IS_HELP(objv[1])); - - res = NULL; - switch ((enum dbcmds)cmdindex) { -#ifdef CONFIG_TEST - case DBKEYRANGE: - result = tcl_DbKeyRange(interp, objc, objv, dbp); - break; - case DBPGET: - result = tcl_DbGet(interp, objc, objv, dbp, 1); - break; - case DBRPCID: - /* - * No args for this. Error if there are some. - */ - if (objc > 2) { - Tcl_WrongNumArgs(interp, 2, objv, NULL); - return (TCL_ERROR); - } - /* - * !!! Retrieve the client ID from the dbp handle directly. - * This is for testing purposes only. It is dbp-private data. - */ - res = Tcl_NewLongObj((long)dbp->cl_id); - break; - case DBTEST: - result = tcl_EnvTest(interp, objc, objv, dbp->dbenv); - break; -#endif - case DBASSOCIATE: - result = tcl_DbAssociate(interp, objc, objv, dbp); - break; - case DBCLOSE: - result = tcl_DbClose(interp, objc, objv, dbp, dbip); - break; - case DBDELETE: - result = tcl_DbDelete(interp, objc, objv, dbp); - break; - case DBGET: - result = tcl_DbGet(interp, objc, objv, dbp, 0); - break; - case DBPUT: - result = tcl_DbPut(interp, objc, objv, dbp); - break; - case DBCOUNT: - result = tcl_DbCount(interp, objc, objv, dbp); - break; - case DBSWAPPED: - /* - * No args for this. Error if there are some. - */ - if (objc > 2) { - Tcl_WrongNumArgs(interp, 2, objv, NULL); - return (TCL_ERROR); - } - _debug_check(); - ret = dbp->get_byteswapped(dbp, &intval); - res = Tcl_NewIntObj(intval); - break; - case DBGETTYPE: - /* - * No args for this. Error if there are some. - */ - if (objc > 2) { - Tcl_WrongNumArgs(interp, 2, objv, NULL); - return (TCL_ERROR); - } - _debug_check(); - ret = dbp->get_type(dbp, &type); - if (type == DB_BTREE) - res = NewStringObj("btree", strlen("btree")); - else if (type == DB_HASH) - res = NewStringObj("hash", strlen("hash")); - else if (type == DB_RECNO) - res = NewStringObj("recno", strlen("recno")); - else if (type == DB_QUEUE) - res = NewStringObj("queue", strlen("queue")); - else { - Tcl_SetResult(interp, - "db gettype: Returned unknown type\n", TCL_STATIC); - result = TCL_ERROR; - } - break; - case DBSTAT: - result = tcl_DbStat(interp, objc, objv, dbp); - break; - case DBSYNC: - /* - * No args for this. Error if there are some. - */ - if (objc > 2) { - Tcl_WrongNumArgs(interp, 2, objv, NULL); - return (TCL_ERROR); - } - _debug_check(); - ret = dbp->sync(dbp, 0); - res = Tcl_NewIntObj(ret); - if (ret != 0) { - Tcl_SetObjResult(interp, res); - result = TCL_ERROR; - } - break; - case DBCURSOR: - snprintf(newname, sizeof(newname), - "%s.c%d", dbip->i_name, dbip->i_dbdbcid); - ip = _NewInfo(interp, NULL, newname, I_DBC); - if (ip != NULL) { - result = tcl_DbCursor(interp, objc, objv, dbp, &dbc); - if (result == TCL_OK) { - dbip->i_dbdbcid++; - ip->i_parent = dbip; - (void)Tcl_CreateObjCommand(interp, newname, - (Tcl_ObjCmdProc *)dbc_Cmd, - (ClientData)dbc, NULL); - res = NewStringObj(newname, strlen(newname)); - _SetInfoData(ip, dbc); - } else - _DeleteInfo(ip); - } else { - Tcl_SetResult(interp, - "Could not set up info", TCL_STATIC); - result = TCL_ERROR; - } - break; - case DBJOIN: - snprintf(newname, sizeof(newname), - "%s.c%d", dbip->i_name, dbip->i_dbdbcid); - ip = _NewInfo(interp, NULL, newname, I_DBC); - if (ip != NULL) { - result = tcl_DbJoin(interp, objc, objv, dbp, &dbc); - if (result == TCL_OK) { - dbip->i_dbdbcid++; - ip->i_parent = dbip; - (void)Tcl_CreateObjCommand(interp, newname, - (Tcl_ObjCmdProc *)dbc_Cmd, - (ClientData)dbc, NULL); - res = NewStringObj(newname, strlen(newname)); - _SetInfoData(ip, dbc); - } else - _DeleteInfo(ip); - } else { - Tcl_SetResult(interp, - "Could not set up info", TCL_STATIC); - result = TCL_ERROR; - } - break; - case DBGETBTMINKEY: - if (objc != 2) { - Tcl_WrongNumArgs(interp, 1, objv, NULL); - return (TCL_ERROR); - } - ret = dbp->get_bt_minkey(dbp, &value); - if ((result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret), - "db get_bt_minkey")) == TCL_OK) - res = Tcl_NewIntObj((int)value); - break; - case DBGETCACHESIZE: - if (objc != 2) { - Tcl_WrongNumArgs(interp, 1, objv, NULL); - return (TCL_ERROR); - } - ret = dbp->get_cachesize(dbp, &gbytes, &bytes, &ncache); - if ((result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret), - "db get_cachesize")) == TCL_OK) { - myobjv[0] = Tcl_NewIntObj((int)gbytes); - myobjv[1] = Tcl_NewIntObj((int)bytes); - myobjv[2] = Tcl_NewIntObj((int)ncache); - res = Tcl_NewListObj(3, myobjv); - } - break; - case DBGETDBNAME: - if (objc != 2) { - Tcl_WrongNumArgs(interp, 1, objv, NULL); - return (TCL_ERROR); - } - ret = dbp->get_dbname(dbp, &filename, &dbname); - if ((result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret), - "db get_dbname")) == TCL_OK) { - myobjv[0] = NewStringObj(filename, strlen(filename)); - myobjv[1] = NewStringObj(dbname, strlen(dbname)); - res = Tcl_NewListObj(2, myobjv); - } - break; - case DBGETENCRYPTFLAGS: - result = tcl_EnvGetEncryptFlags(interp, objc, objv, dbp->dbenv); - break; - case DBGETENV: - if (objc != 2) { - Tcl_WrongNumArgs(interp, 1, objv, NULL); - return (TCL_ERROR); - } - dbenv = dbp->get_env(dbp); - if (dbenv != NULL && (ip = _PtrToInfo(dbenv)) != NULL) { - envid = ip->i_name; - res = NewStringObj(envid, strlen(envid)); - } else - Tcl_ResetResult(interp); - break; - case DBGETERRPFX: - if (objc != 2) { - Tcl_WrongNumArgs(interp, 1, objv, NULL); - return (TCL_ERROR); - } - dbp->get_errpfx(dbp, &strval); - res = NewStringObj(strval, strlen(strval)); - break; - case DBGETFLAGS: - result = tcl_DbGetFlags(interp, objc, objv, dbp); - break; - case DBGETHFFACTOR: - if (objc != 2) { - Tcl_WrongNumArgs(interp, 1, objv, NULL); - return (TCL_ERROR); - } - ret = dbp->get_h_ffactor(dbp, &value); - if ((result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret), - "db get_h_ffactor")) == TCL_OK) - res = Tcl_NewIntObj((int)value); - break; - case DBGETHNELEM: - if (objc != 2) { - Tcl_WrongNumArgs(interp, 1, objv, NULL); - return (TCL_ERROR); - } - ret = dbp->get_h_nelem(dbp, &value); - if ((result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret), - "db get_h_nelem")) == TCL_OK) - res = Tcl_NewIntObj((int)value); - break; - case DBGETJOIN: - result = tcl_DbGetjoin(interp, objc, objv, dbp); - break; - case DBGETLORDER: - /* - * No args for this. Error if there are some. - */ - if (objc > 2) { - Tcl_WrongNumArgs(interp, 2, objv, NULL); - return (TCL_ERROR); - } - _debug_check(); - ret = dbp->get_lorder(dbp, &intval); - res = Tcl_NewIntObj(intval); - break; - case DBGETOPENFLAGS: - result = tcl_DbGetOpenFlags(interp, objc, objv, dbp); - break; - case DBGETPAGESIZE: - if (objc != 2) { - Tcl_WrongNumArgs(interp, 1, objv, NULL); - return (TCL_ERROR); - } - ret = dbp->get_pagesize(dbp, &value); - if ((result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret), - "db get_pagesize")) == TCL_OK) - res = Tcl_NewIntObj((int)value); - break; - case DBGETQEXTENTSIZE: - if (objc != 2) { - Tcl_WrongNumArgs(interp, 1, objv, NULL); - return (TCL_ERROR); - } - ret = dbp->get_q_extentsize(dbp, &value); - if ((result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret), - "db get_q_extentsize")) == TCL_OK) - res = Tcl_NewIntObj((int)value); - break; - case DBGETREDELIM: - if (objc != 2) { - Tcl_WrongNumArgs(interp, 1, objv, NULL); - return (TCL_ERROR); - } - ret = dbp->get_re_delim(dbp, &intval); - if ((result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret), - "db get_re_delim")) == TCL_OK) - res = Tcl_NewIntObj(intval); - break; - case DBGETRELEN: - if (objc != 2) { - Tcl_WrongNumArgs(interp, 1, objv, NULL); - return (TCL_ERROR); - } - ret = dbp->get_re_len(dbp, &value); - if ((result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret), - "db get_re_len")) == TCL_OK) - res = Tcl_NewIntObj((int)value); - break; - case DBGETREPAD: - if (objc != 2) { - Tcl_WrongNumArgs(interp, 1, objv, NULL); - return (TCL_ERROR); - } - ret = dbp->get_re_pad(dbp, &intval); - if ((result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret), - "db get_re_pad")) == TCL_OK) - res = Tcl_NewIntObj((int)intval); - break; - case DBGETRESOURCE: - if (objc != 2) { - Tcl_WrongNumArgs(interp, 1, objv, NULL); - return (TCL_ERROR); - } - ret = dbp->get_re_source(dbp, &strval); - if ((result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret), - "db get_re_source")) == TCL_OK) - res = NewStringObj(strval, strlen(strval)); - break; - case DBTRUNCATE: - result = tcl_DbTruncate(interp, objc, objv, dbp); - break; - } - /* - * Only set result if we have a res. Otherwise, lower - * functions have already done so. - */ - if (result == TCL_OK && res) - Tcl_SetObjResult(interp, res); - return (result); -} - -/* - * tcl_db_stat -- - */ -static int -tcl_DbStat(interp, objc, objv, dbp) - Tcl_Interp *interp; /* Interpreter */ - int objc; /* How many arguments? */ - Tcl_Obj *CONST objv[]; /* The argument objects */ - DB *dbp; /* Database pointer */ -{ - static const char *dbstatopts[] = { -#ifdef CONFIG_TEST - "-degree_2", - "-dirty", -#endif - "-faststat", - "-txn", - NULL - }; - enum dbstatopts { -#ifdef CONFIG_TEST - DBCUR_DEGREE2, - DBCUR_DIRTY, -#endif - DBCUR_FASTSTAT, - DBCUR_TXN - }; - DBTYPE type; - DB_BTREE_STAT *bsp; - DB_HASH_STAT *hsp; - DB_QUEUE_STAT *qsp; - DB_TXN *txn; - Tcl_Obj *res, *flaglist, *myobjv[2]; - u_int32_t flag; - int i, optindex, result, ret; - char *arg, msg[MSG_SIZE]; - void *sp; - - result = TCL_OK; - flag = 0; - txn = NULL; - sp = NULL; - i = 2; - while (i < objc) { - if (Tcl_GetIndexFromObj(interp, objv[i], dbstatopts, "option", - TCL_EXACT, &optindex) != TCL_OK) { - result = IS_HELP(objv[i]); - goto error; - } - i++; - switch ((enum dbstatopts)optindex) { -#ifdef CONFIG_TEST - case DBCUR_DEGREE2: - flag |= DB_DEGREE_2; - break; - case DBCUR_DIRTY: - flag |= DB_DIRTY_READ; - break; -#endif - case DBCUR_FASTSTAT: - flag |= DB_FAST_STAT; - break; - case DBCUR_TXN: - if (i == objc) { - Tcl_WrongNumArgs(interp, 2, objv, "?-txn id?"); - result = TCL_ERROR; - break; - } - arg = Tcl_GetStringFromObj(objv[i++], NULL); - txn = NAME_TO_TXN(arg); - if (txn == NULL) { - snprintf(msg, MSG_SIZE, - "Stat: Invalid txn: %s\n", arg); - Tcl_SetResult(interp, msg, TCL_VOLATILE); - result = TCL_ERROR; - } - break; - } - if (result != TCL_OK) - break; - } - if (result != TCL_OK) - goto error; - - _debug_check(); - ret = dbp->stat(dbp, txn, &sp, flag); - result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret), "db stat"); - if (result == TCL_ERROR) - return (result); - - (void)dbp->get_type(dbp, &type); - /* - * Have our stats, now construct the name value - * list pairs and free up the memory. - */ - res = Tcl_NewObj(); - - /* - * MAKE_STAT_LIST assumes 'res' and 'error' label. - */ - if (type == DB_HASH) { - hsp = (DB_HASH_STAT *)sp; - MAKE_STAT_LIST("Magic", hsp->hash_magic); - MAKE_STAT_LIST("Version", hsp->hash_version); - MAKE_STAT_LIST("Page size", hsp->hash_pagesize); - MAKE_STAT_LIST("Number of keys", hsp->hash_nkeys); - MAKE_STAT_LIST("Number of records", hsp->hash_ndata); - MAKE_STAT_LIST("Fill factor", hsp->hash_ffactor); - MAKE_STAT_LIST("Buckets", hsp->hash_buckets); - if (flag != DB_FAST_STAT) { - MAKE_STAT_LIST("Free pages", hsp->hash_free); - MAKE_STAT_LIST("Bytes free", hsp->hash_bfree); - MAKE_STAT_LIST("Number of big pages", - hsp->hash_bigpages); - MAKE_STAT_LIST("Big pages bytes free", - hsp->hash_big_bfree); - MAKE_STAT_LIST("Overflow pages", hsp->hash_overflows); - MAKE_STAT_LIST("Overflow bytes free", - hsp->hash_ovfl_free); - MAKE_STAT_LIST("Duplicate pages", hsp->hash_dup); - MAKE_STAT_LIST("Duplicate pages bytes free", - hsp->hash_dup_free); - } - } else if (type == DB_QUEUE) { - qsp = (DB_QUEUE_STAT *)sp; - MAKE_STAT_LIST("Magic", qsp->qs_magic); - MAKE_STAT_LIST("Version", qsp->qs_version); - MAKE_STAT_LIST("Page size", qsp->qs_pagesize); - MAKE_STAT_LIST("Extent size", qsp->qs_extentsize); - MAKE_STAT_LIST("Number of records", qsp->qs_nkeys); - MAKE_STAT_LIST("Record length", qsp->qs_re_len); - MAKE_STAT_LIST("Record pad", qsp->qs_re_pad); - MAKE_STAT_LIST("First record number", qsp->qs_first_recno); - MAKE_STAT_LIST("Last record number", qsp->qs_cur_recno); - if (flag != DB_FAST_STAT) { - MAKE_STAT_LIST("Number of pages", qsp->qs_pages); - MAKE_STAT_LIST("Bytes free", qsp->qs_pgfree); - } - } else { /* BTREE and RECNO are same stats */ - bsp = (DB_BTREE_STAT *)sp; - MAKE_STAT_LIST("Magic", bsp->bt_magic); - MAKE_STAT_LIST("Version", bsp->bt_version); - MAKE_STAT_LIST("Number of keys", bsp->bt_nkeys); - MAKE_STAT_LIST("Number of records", bsp->bt_ndata); - MAKE_STAT_LIST("Minimum keys per page", bsp->bt_minkey); - MAKE_STAT_LIST("Fixed record length", bsp->bt_re_len); - MAKE_STAT_LIST("Record pad", bsp->bt_re_pad); - MAKE_STAT_LIST("Page size", bsp->bt_pagesize); - if (flag != DB_FAST_STAT) { - MAKE_STAT_LIST("Levels", bsp->bt_levels); - MAKE_STAT_LIST("Internal pages", bsp->bt_int_pg); - MAKE_STAT_LIST("Leaf pages", bsp->bt_leaf_pg); - MAKE_STAT_LIST("Duplicate pages", bsp->bt_dup_pg); - MAKE_STAT_LIST("Overflow pages", bsp->bt_over_pg); - MAKE_STAT_LIST("Empty pages", bsp->bt_empty_pg); - MAKE_STAT_LIST("Pages on freelist", bsp->bt_free); - MAKE_STAT_LIST("Internal pages bytes free", - bsp->bt_int_pgfree); - MAKE_STAT_LIST("Leaf pages bytes free", - bsp->bt_leaf_pgfree); - MAKE_STAT_LIST("Duplicate pages bytes free", - bsp->bt_dup_pgfree); - MAKE_STAT_LIST("Bytes free in overflow pages", - bsp->bt_over_pgfree); - } - } - - /* - * Construct a {name {flag1 flag2 ... flagN}} list for the - * dbp flags. These aren't access-method dependent, but they - * include all the interesting flags, and the integer value - * isn't useful from Tcl--return the strings instead. - */ - myobjv[0] = NewStringObj("Flags", strlen("Flags")); - myobjv[1] = _GetFlagsList(interp, dbp->flags, __db_get_flags_fn()); - flaglist = Tcl_NewListObj(2, myobjv); - if (flaglist == NULL) { - result = TCL_ERROR; - goto error; - } - if ((result = - Tcl_ListObjAppendElement(interp, res, flaglist)) != TCL_OK) - goto error; - - Tcl_SetObjResult(interp, res); -error: - if (sp != NULL) - __os_ufree(dbp->dbenv, sp); - return (result); -} - -/* - * tcl_db_close -- - */ -static int -tcl_DbClose(interp, objc, objv, dbp, dbip) - Tcl_Interp *interp; /* Interpreter */ - int objc; /* How many arguments? */ - Tcl_Obj *CONST objv[]; /* The argument objects */ - DB *dbp; /* Database pointer */ - DBTCL_INFO *dbip; /* Info pointer */ -{ - static const char *dbclose[] = { - "-nosync", "--", NULL - }; - enum dbclose { - TCL_DBCLOSE_NOSYNC, - TCL_DBCLOSE_ENDARG - }; - u_int32_t flag; - int endarg, i, optindex, result, ret; - char *arg; - - result = TCL_OK; - endarg = 0; - flag = 0; - if (objc > 4) { - Tcl_WrongNumArgs(interp, 2, objv, "?-nosync?"); - return (TCL_ERROR); - } - - for (i = 2; i < objc; ++i) { - if (Tcl_GetIndexFromObj(interp, objv[i], dbclose, - "option", TCL_EXACT, &optindex) != TCL_OK) { - arg = Tcl_GetStringFromObj(objv[i], NULL); - if (arg[0] == '-') - return (IS_HELP(objv[i])); - else - Tcl_ResetResult(interp); - break; - } - switch ((enum dbclose)optindex) { - case TCL_DBCLOSE_NOSYNC: - flag = DB_NOSYNC; - break; - case TCL_DBCLOSE_ENDARG: - endarg = 1; - break; - } - /* - * If, at any time, parsing the args we get an error, - * bail out and return. - */ - if (result != TCL_OK) - return (result); - if (endarg) - break; - } - _DbInfoDelete(interp, dbip); - _debug_check(); - - /* Paranoia. */ - dbp->api_internal = NULL; - - ret = (dbp)->close(dbp, flag); - result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret), "db close"); - return (result); -} - -/* - * tcl_db_put -- - */ -static int -tcl_DbPut(interp, objc, objv, dbp) - Tcl_Interp *interp; /* Interpreter */ - int objc; /* How many arguments? */ - Tcl_Obj *CONST objv[]; /* The argument objects */ - DB *dbp; /* Database pointer */ -{ - static const char *dbputopts[] = { -#ifdef CONFIG_TEST - "-nodupdata", -#endif - "-append", - "-auto_commit", - "-nooverwrite", - "-partial", - "-txn", - NULL - }; - enum dbputopts { -#ifdef CONFIG_TEST - DBGET_NODUPDATA, -#endif - DBPUT_APPEND, - DBPUT_AUTO_COMMIT, - DBPUT_NOOVER, - DBPUT_PART, - DBPUT_TXN - }; - static const char *dbputapp[] = { - "-append", NULL - }; - enum dbputapp { DBPUT_APPEND0 }; - DBT key, data; - DBTYPE type; - DB_TXN *txn; - Tcl_Obj **elemv, *res; - void *dtmp, *ktmp; - db_recno_t recno; - u_int32_t flag; - int auto_commit, elemc, end, freekey, freedata; - int i, optindex, result, ret; - char *arg, msg[MSG_SIZE]; - - txn = NULL; - result = TCL_OK; - flag = 0; - if (objc <= 3) { - Tcl_WrongNumArgs(interp, 2, objv, "?-args? key data"); - return (TCL_ERROR); - } - - dtmp = ktmp = NULL; - freekey = freedata = 0; - memset(&key, 0, sizeof(key)); - memset(&data, 0, sizeof(data)); - - /* - * If it is a QUEUE or RECNO database, the key is a record number - * and must be setup up to contain a db_recno_t. Otherwise the - * key is a "string". - */ - (void)dbp->get_type(dbp, &type); - - /* - * We need to determine where the end of required args are. If we - * are using a QUEUE/RECNO db and -append, then there is just one - * req arg (data). Otherwise there are two (key data). - * - * We preparse the list to determine this since we need to know - * to properly check # of args for other options below. - */ - end = objc - 2; - if (type == DB_QUEUE || type == DB_RECNO) { - i = 2; - while (i < objc - 1) { - if (Tcl_GetIndexFromObj(interp, objv[i++], dbputapp, - "option", TCL_EXACT, &optindex) != TCL_OK) - continue; - switch ((enum dbputapp)optindex) { - case DBPUT_APPEND0: - end = objc - 1; - break; - } - } - } - Tcl_ResetResult(interp); - - /* - * Get the command name index from the object based on the options - * defined above. - */ - i = 2; - auto_commit = 0; - while (i < end) { - if (Tcl_GetIndexFromObj(interp, objv[i], - dbputopts, "option", TCL_EXACT, &optindex) != TCL_OK) - return (IS_HELP(objv[i])); - i++; - switch ((enum dbputopts)optindex) { -#ifdef CONFIG_TEST - case DBGET_NODUPDATA: - FLAG_CHECK(flag); - flag = DB_NODUPDATA; - break; -#endif - case DBPUT_TXN: - if (i > (end - 1)) { - Tcl_WrongNumArgs(interp, 2, objv, "?-txn id?"); - result = TCL_ERROR; - break; - } - arg = Tcl_GetStringFromObj(objv[i++], NULL); - txn = NAME_TO_TXN(arg); - if (txn == NULL) { - snprintf(msg, MSG_SIZE, - "Put: Invalid txn: %s\n", arg); - Tcl_SetResult(interp, msg, TCL_VOLATILE); - result = TCL_ERROR; - } - break; - case DBPUT_AUTO_COMMIT: - auto_commit = 1; - break; - case DBPUT_APPEND: - FLAG_CHECK(flag); - flag = DB_APPEND; - break; - case DBPUT_NOOVER: - FLAG_CHECK(flag); - flag = DB_NOOVERWRITE; - break; - case DBPUT_PART: - if (i > (end - 1)) { - Tcl_WrongNumArgs(interp, 2, objv, - "?-partial {offset length}?"); - result = TCL_ERROR; - break; - } - /* - * Get sublist as {offset length} - */ - result = Tcl_ListObjGetElements(interp, objv[i++], - &elemc, &elemv); - if (elemc != 2) { - Tcl_SetResult(interp, - "List must be {offset length}", TCL_STATIC); - result = TCL_ERROR; - break; - } - data.flags = DB_DBT_PARTIAL; - result = _GetUInt32(interp, elemv[0], &data.doff); - if (result != TCL_OK) - break; - result = _GetUInt32(interp, elemv[1], &data.dlen); - /* - * NOTE: We don't check result here because all we'd - * do is break anyway, and we are doing that. If you - * add code here, you WILL need to add the check - * for result. (See the check for save.doff, a few - * lines above and copy that.) - */ - break; - } - if (result != TCL_OK) - break; - } - - if (result == TCL_ERROR) - return (result); - - /* - * If we are a recno db and we are NOT using append, then the 2nd - * last arg is the key. - */ - if (type == DB_QUEUE || type == DB_RECNO) { - key.data = &recno; - key.ulen = key.size = sizeof(db_recno_t); - key.flags = DB_DBT_USERMEM; - if (flag == DB_APPEND) - recno = 0; - else { - result = _GetUInt32(interp, objv[objc-2], &recno); - if (result != TCL_OK) - return (result); - } - } else { - COMPQUIET(recno, 0); - - ret = _CopyObjBytes(interp, objv[objc-2], &ktmp, - &key.size, &freekey); - if (ret != 0) { - result = _ReturnSetup(interp, ret, - DB_RETOK_DBPUT(ret), "db put"); - return (result); - } - key.data = ktmp; - } - if (auto_commit) - flag |= DB_AUTO_COMMIT; - ret = _CopyObjBytes(interp, objv[objc-1], &dtmp, &data.size, &freedata); - if (ret != 0) { - result = _ReturnSetup(interp, ret, - DB_RETOK_DBPUT(ret), "db put"); - goto out; - } - data.data = dtmp; - _debug_check(); - ret = dbp->put(dbp, txn, &key, &data, flag); - result = _ReturnSetup(interp, ret, DB_RETOK_DBPUT(ret), "db put"); - - /* We may have a returned record number. */ - if (ret == 0 && - (type == DB_QUEUE || type == DB_RECNO) && flag == DB_APPEND) { - res = Tcl_NewWideIntObj((Tcl_WideInt)recno); - Tcl_SetObjResult(interp, res); - } - -out: if (dtmp != NULL && freedata) - __os_free(dbp->dbenv, dtmp); - if (ktmp != NULL && freekey) - __os_free(dbp->dbenv, ktmp); - return (result); -} - -/* - * tcl_db_get -- - */ -static int -tcl_DbGet(interp, objc, objv, dbp, ispget) - Tcl_Interp *interp; /* Interpreter */ - int objc; /* How many arguments? */ - Tcl_Obj *CONST objv[]; /* The argument objects */ - DB *dbp; /* Database pointer */ - int ispget; /* 1 for pget, 0 for get */ -{ - static const char *dbgetopts[] = { -#ifdef CONFIG_TEST - "-degree2", - "-dirty", - "-multi", -#endif - "-auto_commit", - "-consume", - "-consume_wait", - "-get_both", - "-glob", - "-partial", - "-recno", - "-rmw", - "-txn", - "--", - NULL - }; - enum dbgetopts { -#ifdef CONFIG_TEST - DBGET_DEGREE2, - DBGET_DIRTY, - DBGET_MULTI, -#endif - DBGET_AUTO_COMMIT, - DBGET_CONSUME, - DBGET_CONSUME_WAIT, - DBGET_BOTH, - DBGET_GLOB, - DBGET_PART, - DBGET_RECNO, - DBGET_RMW, - DBGET_TXN, - DBGET_ENDARG - }; - DBC *dbc; - DBT key, pkey, data, save; - DBTYPE ptype, type; - DB_TXN *txn; - Tcl_Obj **elemv, *retlist; - db_recno_t precno, recno; - u_int32_t aflag, flag, cflag, isdup, mflag, rmw; - int elemc, end, endarg, freekey, freedata, i; - int optindex, result, ret, useglob, useprecno, userecno; - char *arg, *pattern, *prefix, msg[MSG_SIZE]; - void *dtmp, *ktmp; -#ifdef CONFIG_TEST - int bufsize; -#endif - - result = TCL_OK; - freekey = freedata = 0; - aflag = cflag = endarg = flag = mflag = rmw = 0; - useglob = userecno = 0; - txn = NULL; - pattern = prefix = NULL; - dtmp = ktmp = NULL; -#ifdef CONFIG_TEST - COMPQUIET(bufsize, 0); -#endif - - if (objc < 3) { - Tcl_WrongNumArgs(interp, 2, objv, "?-args? key"); - return (TCL_ERROR); - } - - memset(&key, 0, sizeof(key)); - memset(&data, 0, sizeof(data)); - memset(&save, 0, sizeof(save)); - - /* For the primary key in a pget call. */ - memset(&pkey, 0, sizeof(pkey)); - - /* - * Get the command name index from the object based on the options - * defined above. - */ - i = 2; - (void)dbp->get_type(dbp, &type); - end = objc; - while (i < end) { - if (Tcl_GetIndexFromObj(interp, objv[i], dbgetopts, "option", - TCL_EXACT, &optindex) != TCL_OK) { - arg = Tcl_GetStringFromObj(objv[i], NULL); - if (arg[0] == '-') { - result = IS_HELP(objv[i]); - goto out; - } else - Tcl_ResetResult(interp); - break; - } - i++; - switch ((enum dbgetopts)optindex) { -#ifdef CONFIG_TEST - case DBGET_DIRTY: - rmw |= DB_DIRTY_READ; - break; - case DBGET_DEGREE2: - rmw |= DB_DEGREE_2; - break; - case DBGET_MULTI: - mflag |= DB_MULTIPLE; - result = Tcl_GetIntFromObj(interp, objv[i], &bufsize); - if (result != TCL_OK) - goto out; - i++; - break; -#endif - case DBGET_AUTO_COMMIT: - aflag |= DB_AUTO_COMMIT; - break; - case DBGET_BOTH: - /* - * Change 'end' and make sure we aren't already past - * the new end. - */ - if (i > objc - 2) { - Tcl_WrongNumArgs(interp, 2, objv, - "?-get_both key data?"); - result = TCL_ERROR; - break; - } - end = objc - 2; - FLAG_CHECK(flag); - flag = DB_GET_BOTH; - break; - case DBGET_TXN: - if (i >= end) { - Tcl_WrongNumArgs(interp, 2, objv, "?-txn id?"); - result = TCL_ERROR; - break; - } - arg = Tcl_GetStringFromObj(objv[i++], NULL); - txn = NAME_TO_TXN(arg); - if (txn == NULL) { - snprintf(msg, MSG_SIZE, - "Get: Invalid txn: %s\n", arg); - Tcl_SetResult(interp, msg, TCL_VOLATILE); - result = TCL_ERROR; - } - break; - case DBGET_GLOB: - useglob = 1; - end = objc - 1; - break; - case DBGET_CONSUME: - FLAG_CHECK(flag); - flag = DB_CONSUME; - break; - case DBGET_CONSUME_WAIT: - FLAG_CHECK(flag); - flag = DB_CONSUME_WAIT; - break; - case DBGET_RECNO: - end = objc - 1; - userecno = 1; - if (type != DB_RECNO && type != DB_QUEUE) { - FLAG_CHECK(flag); - flag = DB_SET_RECNO; - key.flags |= DB_DBT_MALLOC; - } - break; - case DBGET_RMW: - rmw |= DB_RMW; - break; - case DBGET_PART: - end = objc - 1; - if (i == end) { - Tcl_WrongNumArgs(interp, 2, objv, - "?-partial {offset length}?"); - result = TCL_ERROR; - break; - } - /* - * Get sublist as {offset length} - */ - result = Tcl_ListObjGetElements(interp, objv[i++], - &elemc, &elemv); - if (elemc != 2) { - Tcl_SetResult(interp, - "List must be {offset length}", TCL_STATIC); - result = TCL_ERROR; - break; - } - save.flags = DB_DBT_PARTIAL; - result = _GetUInt32(interp, elemv[0], &save.doff); - if (result != TCL_OK) - break; - result = _GetUInt32(interp, elemv[1], &save.dlen); - /* - * NOTE: We don't check result here because all we'd - * do is break anyway, and we are doing that. If you - * add code here, you WILL need to add the check - * for result. (See the check for save.doff, a few - * lines above and copy that.) - */ - break; - case DBGET_ENDARG: - endarg = 1; - break; - } - if (result != TCL_OK) - break; - if (endarg) - break; - } - if (result != TCL_OK) - goto out; - - if (type == DB_RECNO || type == DB_QUEUE) - userecno = 1; - - /* - * Check args we have left versus the flags we were given. - * We might have 0, 1 or 2 left. If we have 0, it must - * be DB_CONSUME*, if 2, then DB_GET_BOTH, all others should - * be 1. - */ - if (((flag == DB_CONSUME || flag == DB_CONSUME_WAIT) && i != objc) || - (flag == DB_GET_BOTH && i != objc - 2)) { - Tcl_SetResult(interp, - "Wrong number of key/data given based on flags specified\n", - TCL_STATIC); - result = TCL_ERROR; - goto out; - } else if (flag == 0 && i != objc - 1) { - Tcl_SetResult(interp, - "Wrong number of key/data given\n", TCL_STATIC); - result = TCL_ERROR; - goto out; - } - - /* - * Find out whether the primary key should also be a recno. - */ - if (ispget && dbp->s_primary != NULL) { - (void)dbp->s_primary->get_type(dbp->s_primary, &ptype); - useprecno = ptype == DB_RECNO || ptype == DB_QUEUE; - } else - useprecno = 0; - - /* - * Check for illegal combos of options. - */ - if (useglob && (userecno || flag == DB_SET_RECNO || - type == DB_RECNO || type == DB_QUEUE)) { - Tcl_SetResult(interp, - "Cannot use -glob and record numbers.\n", - TCL_STATIC); - result = TCL_ERROR; - goto out; - } - if (useglob && flag == DB_GET_BOTH) { - Tcl_SetResult(interp, - "Only one of -glob or -get_both can be specified.\n", - TCL_STATIC); - result = TCL_ERROR; - goto out; - } - - if (useglob) - pattern = Tcl_GetStringFromObj(objv[objc - 1], NULL); - - /* - * This is the list we return - */ - retlist = Tcl_NewListObj(0, NULL); - save.flags |= DB_DBT_MALLOC; - - /* - * isdup is used to know if we support duplicates. If not, we - * can just do a db->get call and avoid using cursors. - */ - if ((ret = dbp->get_flags(dbp, &isdup)) != 0) { - result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret), "db get"); - goto out; - } - isdup &= DB_DUP; - - /* - * If the database doesn't support duplicates or we're performing - * ops that don't require returning multiple items, use DB->get - * instead of a cursor operation. - */ - if (pattern == NULL && (isdup == 0 || mflag != 0 || - flag == DB_SET_RECNO || flag == DB_GET_BOTH || - flag == DB_CONSUME || flag == DB_CONSUME_WAIT)) { - if (flag == DB_GET_BOTH) { - if (userecno) { - result = _GetUInt32(interp, - objv[(objc - 2)], &recno); - if (result == TCL_OK) { - key.data = &recno; - key.size = sizeof(db_recno_t); - } else - goto out; - } else { - /* - * Some get calls (SET_*) can change the - * key pointers. So, we need to store - * the allocated key space in a tmp. - */ - ret = _CopyObjBytes(interp, objv[objc-2], - &ktmp, &key.size, &freekey); - if (ret != 0) { - result = _ReturnSetup(interp, ret, - DB_RETOK_DBGET(ret), "db get"); - goto out; - } - key.data = ktmp; - } - /* - * Already checked args above. Fill in key and save. - * Save is used in the dbp->get call below to fill in - * data. - * - * If the "data" here is really a primary key--that - * is, if we're in a pget--and that primary key - * is a recno, treat it appropriately as an int. - */ - if (useprecno) { - result = _GetUInt32(interp, - objv[objc - 1], &precno); - if (result == TCL_OK) { - save.data = &precno; - save.size = sizeof(db_recno_t); - } else - goto out; - } else { - ret = _CopyObjBytes(interp, objv[objc-1], - &dtmp, &save.size, &freedata); - if (ret != 0) { - result = _ReturnSetup(interp, ret, - DB_RETOK_DBGET(ret), "db get"); - goto out; - } - save.data = dtmp; - } - } else if (flag != DB_CONSUME && flag != DB_CONSUME_WAIT) { - if (userecno) { - result = _GetUInt32( - interp, objv[(objc - 1)], &recno); - if (result == TCL_OK) { - key.data = &recno; - key.size = sizeof(db_recno_t); - } else - goto out; - } else { - /* - * Some get calls (SET_*) can change the - * key pointers. So, we need to store - * the allocated key space in a tmp. - */ - ret = _CopyObjBytes(interp, objv[objc-1], - &ktmp, &key.size, &freekey); - if (ret != 0) { - result = _ReturnSetup(interp, ret, - DB_RETOK_DBGET(ret), "db get"); - goto out; - } - key.data = ktmp; - } -#ifdef CONFIG_TEST - if (mflag & DB_MULTIPLE) { - if ((ret = __os_malloc(dbp->dbenv, - (size_t)bufsize, &save.data)) != 0) { - Tcl_SetResult(interp, - db_strerror(ret), TCL_STATIC); - goto out; - } - save.ulen = (u_int32_t)bufsize; - F_CLR(&save, DB_DBT_MALLOC); - F_SET(&save, DB_DBT_USERMEM); - } -#endif - } - - data = save; - - if (ispget) { - if (flag == DB_GET_BOTH) { - pkey.data = save.data; - pkey.size = save.size; - data.data = NULL; - data.size = 0; - } - F_SET(&pkey, DB_DBT_MALLOC); - _debug_check(); - ret = dbp->pget(dbp, - txn, &key, &pkey, &data, flag | rmw); - } else { - _debug_check(); - ret = dbp->get(dbp, - txn, &key, &data, flag | aflag | rmw | mflag); - } - result = _ReturnSetup(interp, ret, DB_RETOK_DBGET(ret), - "db get"); - if (ret == 0) { - /* - * Success. Return a list of the form {name value} - * If it was a recno in key.data, we need to convert - * into a string/object representation of that recno. - */ - if (mflag & DB_MULTIPLE) - result = _SetMultiList(interp, - retlist, &key, &data, type, flag); - else if (type == DB_RECNO || type == DB_QUEUE) - if (ispget) - result = _Set3DBTList(interp, - retlist, &key, 1, &pkey, - useprecno, &data); - else - result = _SetListRecnoElem(interp, - retlist, *(db_recno_t *)key.data, - data.data, data.size); - else { - if (ispget) - result = _Set3DBTList(interp, - retlist, &key, 0, &pkey, - useprecno, &data); - else - result = _SetListElem(interp, retlist, - key.data, key.size, - data.data, data.size); - } - } - /* - * Free space from DBT. - * - * If we set DB_DBT_MALLOC, we need to free the space if and - * only if we succeeded and if DB allocated anything (the - * pointer has changed from what we passed in). If - * DB_DBT_MALLOC is not set, this is a bulk get buffer, and - * needs to be freed no matter what. - */ - if (F_ISSET(&key, DB_DBT_MALLOC) && ret == 0 && - key.data != ktmp) - __os_ufree(dbp->dbenv, key.data); - if (F_ISSET(&data, DB_DBT_MALLOC) && ret == 0 && - data.data != dtmp) - __os_ufree(dbp->dbenv, data.data); - else if (!F_ISSET(&data, DB_DBT_MALLOC)) - __os_free(dbp->dbenv, data.data); - if (ispget && ret == 0 && pkey.data != save.data) - __os_ufree(dbp->dbenv, pkey.data); - if (result == TCL_OK) - Tcl_SetObjResult(interp, retlist); - goto out; - } - - if (userecno) { - result = _GetUInt32(interp, objv[(objc - 1)], &recno); - if (result == TCL_OK) { - key.data = &recno; - key.size = sizeof(db_recno_t); - } else - goto out; - } else { - /* - * Some get calls (SET_*) can change the - * key pointers. So, we need to store - * the allocated key space in a tmp. - */ - ret = _CopyObjBytes(interp, objv[objc-1], &ktmp, - &key.size, &freekey); - if (ret != 0) { - result = _ReturnSetup(interp, ret, - DB_RETOK_DBGET(ret), "db get"); - return (result); - } - key.data = ktmp; - } - ret = dbp->cursor(dbp, txn, &dbc, 0); - result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret), "db cursor"); - if (result == TCL_ERROR) - goto out; - - /* - * At this point, we have a cursor, if we have a pattern, - * we go to the nearest one and step forward until we don't - * have any more that match the pattern prefix. If we have - * an exact key, we go to that key position, and step through - * all the duplicates. In either case we build up a list of - * the form {{key data} {key data}...} along the way. - */ - memset(&data, 0, sizeof(data)); - /* - * Restore any "partial" info we have saved. - */ - data = save; - if (pattern) { - /* - * Note, prefix is returned in new space. Must free it. - */ - ret = _GetGlobPrefix(pattern, &prefix); - if (ret) { - result = TCL_ERROR; - Tcl_SetResult(interp, - "Unable to allocate pattern space", TCL_STATIC); - goto out1; - } - key.data = prefix; - key.size = strlen(prefix); - /* - * If they give us an empty pattern string - * (i.e. -glob *), go through entire DB. - */ - if (strlen(prefix) == 0) - cflag = DB_FIRST; - else - cflag = DB_SET_RANGE; - } else - cflag = DB_SET; - if (ispget) { - _debug_check(); - F_SET(&pkey, DB_DBT_MALLOC); - ret = dbc->c_pget(dbc, &key, &pkey, &data, cflag | rmw); - } else { - _debug_check(); - ret = dbc->c_get(dbc, &key, &data, cflag | rmw); - } - result = _ReturnSetup(interp, ret, DB_RETOK_DBCGET(ret), - "db get (cursor)"); - if (result == TCL_ERROR) - goto out1; - if (pattern) { - if (ret == 0 && prefix != NULL && - memcmp(key.data, prefix, strlen(prefix)) != 0) { - /* - * Free space from DB_DBT_MALLOC - */ - __os_ufree(dbp->dbenv, data.data); - goto out1; - } - cflag = DB_NEXT; - } else - cflag = DB_NEXT_DUP; - - while (ret == 0 && result == TCL_OK) { - /* - * Build up our {name value} sublist - */ - if (ispget) - result = _Set3DBTList(interp, retlist, &key, 0, - &pkey, useprecno, &data); - else - result = _SetListElem(interp, retlist, - key.data, key.size, data.data, data.size); - /* - * Free space from DB_DBT_MALLOC - */ - if (ispget) - __os_ufree(dbp->dbenv, pkey.data); - __os_ufree(dbp->dbenv, data.data); - if (result != TCL_OK) - break; - /* - * Append {name value} to return list - */ - memset(&key, 0, sizeof(key)); - memset(&pkey, 0, sizeof(pkey)); - memset(&data, 0, sizeof(data)); - /* - * Restore any "partial" info we have saved. - */ - data = save; - if (ispget) { - F_SET(&pkey, DB_DBT_MALLOC); - ret = dbc->c_pget(dbc, &key, &pkey, &data, cflag | rmw); - } else - ret = dbc->c_get(dbc, &key, &data, cflag | rmw); - if (ret == 0 && prefix != NULL && - memcmp(key.data, prefix, strlen(prefix)) != 0) { - /* - * Free space from DB_DBT_MALLOC - */ - __os_ufree(dbp->dbenv, data.data); - break; - } - } -out1: - (void)dbc->c_close(dbc); - if (result == TCL_OK) - Tcl_SetObjResult(interp, retlist); -out: - /* - * _GetGlobPrefix(), the function which allocates prefix, works - * by copying and condensing another string. Thus prefix may - * have multiple nuls at the end, so we free using __os_free(). - */ - if (prefix != NULL) - __os_free(dbp->dbenv, prefix); - if (dtmp != NULL && freedata) - __os_free(dbp->dbenv, dtmp); - if (ktmp != NULL && freekey) - __os_free(dbp->dbenv, ktmp); - return (result); -} - -/* - * tcl_db_delete -- - */ -static int -tcl_DbDelete(interp, objc, objv, dbp) - Tcl_Interp *interp; /* Interpreter */ - int objc; /* How many arguments? */ - Tcl_Obj *CONST objv[]; /* The argument objects */ - DB *dbp; /* Database pointer */ -{ - static const char *dbdelopts[] = { - "-auto_commit", - "-glob", - "-txn", - NULL - }; - enum dbdelopts { - DBDEL_AUTO_COMMIT, - DBDEL_GLOB, - DBDEL_TXN - }; - DBC *dbc; - DBT key, data; - DBTYPE type; - DB_TXN *txn; - void *ktmp; - db_recno_t recno; - int freekey, i, optindex, result, ret; - u_int32_t flag; - char *arg, *pattern, *prefix, msg[MSG_SIZE]; - - result = TCL_OK; - freekey = 0; - flag = 0; - pattern = prefix = NULL; - txn = NULL; - if (objc < 3) { - Tcl_WrongNumArgs(interp, 2, objv, "?-args? key"); - return (TCL_ERROR); - } - - ktmp = NULL; - memset(&key, 0, sizeof(key)); - /* - * The first arg must be -auto_commit, -glob, -txn or a list of keys. - */ - i = 2; - while (i < objc) { - if (Tcl_GetIndexFromObj(interp, objv[i], dbdelopts, "option", - TCL_EXACT, &optindex) != TCL_OK) { - /* - * If we don't have a -auto_commit, -glob or -txn, - * then the remaining args must be exact keys. - * Reset the result so we don't get an errant error - * message if there is another error. - */ - if (IS_HELP(objv[i]) == TCL_OK) - return (TCL_OK); - Tcl_ResetResult(interp); - break; - } - i++; - switch ((enum dbdelopts)optindex) { - case DBDEL_TXN: - if (i == objc) { - /* - * Someone could conceivably have a key of - * the same name. So just break and use it. - */ - i--; - break; - } - arg = Tcl_GetStringFromObj(objv[i++], NULL); - txn = NAME_TO_TXN(arg); - if (txn == NULL) { - snprintf(msg, MSG_SIZE, - "Delete: Invalid txn: %s\n", arg); - Tcl_SetResult(interp, msg, TCL_VOLATILE); - result = TCL_ERROR; - } - break; - case DBDEL_AUTO_COMMIT: - flag |= DB_AUTO_COMMIT; - break; - case DBDEL_GLOB: - /* - * Get the pattern. Get the prefix and use cursors to - * get all the data items. - */ - if (i == objc) { - /* - * Someone could conceivably have a key of - * the same name. So just break and use it. - */ - i--; - break; - } - pattern = Tcl_GetStringFromObj(objv[i++], NULL); - break; - } - if (result != TCL_OK) - break; - } - - if (result != TCL_OK) - goto out; - /* - * XXX - * For consistency with get, we have decided for the moment, to - * allow -glob, or one key, not many. The code was originally - * written to take many keys and we'll leave it that way, because - * tcl_DbGet may one day accept many disjoint keys to get, rather - * than one, and at that time we'd make delete be consistent. In - * any case, the code is already here and there is no need to remove, - * just check that we only have one arg left. - * - * If we have a pattern AND more keys to process, there is an error. - * Either we have some number of exact keys, or we have a pattern. - * - * If we have a pattern and an auto commit flag, there is an error. - */ - if (pattern == NULL) { - if (i != (objc - 1)) { - Tcl_WrongNumArgs( - interp, 2, objv, "?args? -glob pattern | key"); - result = TCL_ERROR; - goto out; - } - } else { - if (i != objc) { - Tcl_WrongNumArgs( - interp, 2, objv, "?args? -glob pattern | key"); - result = TCL_ERROR; - goto out; - } - if (flag & DB_AUTO_COMMIT) { - Tcl_SetResult(interp, - "Cannot use -auto_commit and patterns.\n", - TCL_STATIC); - result = TCL_ERROR; - goto out; - } - } - - /* - * If we have remaining args, they are all exact keys. Call - * DB->del on each of those keys. - * - * If it is a RECNO database, the key is a record number and must be - * setup up to contain a db_recno_t. Otherwise the key is a "string". - */ - (void)dbp->get_type(dbp, &type); - ret = 0; - while (i < objc && ret == 0) { - memset(&key, 0, sizeof(key)); - if (type == DB_RECNO || type == DB_QUEUE) { - result = _GetUInt32(interp, objv[i++], &recno); - if (result == TCL_OK) { - key.data = &recno; - key.size = sizeof(db_recno_t); - } else - return (result); - } else { - ret = _CopyObjBytes(interp, objv[i++], &ktmp, - &key.size, &freekey); - if (ret != 0) { - result = _ReturnSetup(interp, ret, - DB_RETOK_DBDEL(ret), "db del"); - return (result); - } - key.data = ktmp; - } - _debug_check(); - ret = dbp->del(dbp, txn, &key, flag); - /* - * If we have any error, set up return result and stop - * processing keys. - */ - if (ktmp != NULL && freekey) - __os_free(dbp->dbenv, ktmp); - if (ret != 0) - break; - } - result = _ReturnSetup(interp, ret, DB_RETOK_DBDEL(ret), "db del"); - - /* - * At this point we've either finished or, if we have a pattern, - * we go to the nearest one and step forward until we don't - * have any more that match the pattern prefix. - */ - if (pattern) { - ret = dbp->cursor(dbp, txn, &dbc, 0); - if (ret != 0) { - result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret), - "db cursor"); - goto out; - } - /* - * Note, prefix is returned in new space. Must free it. - */ - memset(&key, 0, sizeof(key)); - memset(&data, 0, sizeof(data)); - ret = _GetGlobPrefix(pattern, &prefix); - if (ret) { - result = TCL_ERROR; - Tcl_SetResult(interp, - "Unable to allocate pattern space", TCL_STATIC); - goto out; - } - key.data = prefix; - key.size = strlen(prefix); - if (strlen(prefix) == 0) - flag = DB_FIRST; - else - flag = DB_SET_RANGE; - ret = dbc->c_get(dbc, &key, &data, flag); - while (ret == 0 && - memcmp(key.data, prefix, strlen(prefix)) == 0) { - /* - * Each time through here the cursor is pointing - * at the current valid item. Delete it and - * move ahead. - */ - _debug_check(); - ret = dbc->c_del(dbc, 0); - if (ret != 0) { - result = _ReturnSetup(interp, ret, - DB_RETOK_DBCDEL(ret), "db c_del"); - break; - } - /* - * Deleted the current, now move to the next item - * in the list, check if it matches the prefix pattern. - */ - memset(&key, 0, sizeof(key)); - memset(&data, 0, sizeof(data)); - ret = dbc->c_get(dbc, &key, &data, DB_NEXT); - } - if (ret == DB_NOTFOUND) - ret = 0; - /* - * _GetGlobPrefix(), the function which allocates prefix, works - * by copying and condensing another string. Thus prefix may - * have multiple nuls at the end, so we free using __os_free(). - */ - __os_free(dbp->dbenv, prefix); - (void)dbc->c_close(dbc); - result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret), "db del"); - } -out: - return (result); -} - -/* - * tcl_db_cursor -- - */ -static int -tcl_DbCursor(interp, objc, objv, dbp, dbcp) - Tcl_Interp *interp; /* Interpreter */ - int objc; /* How many arguments? */ - Tcl_Obj *CONST objv[]; /* The argument objects */ - DB *dbp; /* Database pointer */ - DBC **dbcp; /* Return cursor pointer */ -{ - static const char *dbcuropts[] = { -#ifdef CONFIG_TEST - "-degree_2", - "-dirty", - "-update", -#endif - "-txn", - NULL - }; - enum dbcuropts { -#ifdef CONFIG_TEST - DBCUR_DEGREE2, - DBCUR_DIRTY, - DBCUR_UPDATE, -#endif - DBCUR_TXN - }; - DB_TXN *txn; - u_int32_t flag; - int i, optindex, result, ret; - char *arg, msg[MSG_SIZE]; - - result = TCL_OK; - flag = 0; - txn = NULL; - i = 2; - while (i < objc) { - if (Tcl_GetIndexFromObj(interp, objv[i], dbcuropts, "option", - TCL_EXACT, &optindex) != TCL_OK) { - result = IS_HELP(objv[i]); - goto out; - } - i++; - switch ((enum dbcuropts)optindex) { -#ifdef CONFIG_TEST - case DBCUR_DEGREE2: - flag |= DB_DEGREE_2; - break; - case DBCUR_DIRTY: - flag |= DB_DIRTY_READ; - break; - case DBCUR_UPDATE: - flag |= DB_WRITECURSOR; - break; -#endif - case DBCUR_TXN: - if (i == objc) { - Tcl_WrongNumArgs(interp, 2, objv, "?-txn id?"); - result = TCL_ERROR; - break; - } - arg = Tcl_GetStringFromObj(objv[i++], NULL); - txn = NAME_TO_TXN(arg); - if (txn == NULL) { - snprintf(msg, MSG_SIZE, - "Cursor: Invalid txn: %s\n", arg); - Tcl_SetResult(interp, msg, TCL_VOLATILE); - result = TCL_ERROR; - } - break; - } - if (result != TCL_OK) - break; - } - if (result != TCL_OK) - goto out; - - _debug_check(); - ret = dbp->cursor(dbp, txn, dbcp, flag); - if (ret != 0) - result = _ErrorSetup(interp, ret, "db cursor"); -out: - return (result); -} - -/* - * tcl_DbAssociate -- - * Call DB->associate(). - */ -static int -tcl_DbAssociate(interp, objc, objv, dbp) - Tcl_Interp *interp; - int objc; - Tcl_Obj *CONST objv[]; - DB *dbp; -{ - static const char *dbaopts[] = { - "-auto_commit", - "-create", - "-txn", - NULL - }; - enum dbaopts { - DBA_AUTO_COMMIT, - DBA_CREATE, - DBA_TXN - }; - DB *sdbp; - DB_TXN *txn; - DBTCL_INFO *sdbip; - int i, optindex, result, ret; - char *arg, msg[MSG_SIZE]; - u_int32_t flag; -#ifdef CONFIG_TEST - /* - * When calling DB->associate over RPC, the Tcl API uses - * special flags that the RPC server interprets to set the - * callback correctly. - */ - const char *cbname; - struct { - const char *name; - u_int32_t flag; - } *cb, callbacks[] = { - { "", 0 }, /* A NULL callback in Tcl. */ - { "_s_reversedata", DB_RPC2ND_REVERSEDATA }, - { "_s_noop", DB_RPC2ND_NOOP }, - { "_s_concatkeydata", DB_RPC2ND_CONCATKEYDATA }, - { "_s_concatdatakey", DB_RPC2ND_CONCATDATAKEY }, - { "_s_reverseconcat", DB_RPC2ND_REVERSECONCAT }, - { "_s_truncdata", DB_RPC2ND_TRUNCDATA }, - { "_s_reversedata", DB_RPC2ND_REVERSEDATA }, - { "_s_constant", DB_RPC2ND_CONSTANT }, - { "sj_getzip", DB_RPC2ND_GETZIP }, - { "sj_getname", DB_RPC2ND_GETNAME }, - { NULL, 0 } - }; -#endif - - txn = NULL; - result = TCL_OK; - flag = 0; - if (objc < 2) { - Tcl_WrongNumArgs(interp, 2, objv, "[callback] secondary"); - return (TCL_ERROR); - } - - i = 2; - while (i < objc) { - if (Tcl_GetIndexFromObj(interp, objv[i], dbaopts, "option", - TCL_EXACT, &optindex) != TCL_OK) { - result = IS_HELP(objv[i]); - if (result == TCL_OK) - return (result); - result = TCL_OK; - Tcl_ResetResult(interp); - break; - } - i++; - switch ((enum dbaopts)optindex) { - case DBA_AUTO_COMMIT: - flag |= DB_AUTO_COMMIT; - break; - case DBA_CREATE: - flag |= DB_CREATE; - break; - case DBA_TXN: - if (i > (objc - 1)) { - Tcl_WrongNumArgs(interp, 2, objv, "?-txn id?"); - result = TCL_ERROR; - break; - } - arg = Tcl_GetStringFromObj(objv[i++], NULL); - txn = NAME_TO_TXN(arg); - if (txn == NULL) { - snprintf(msg, MSG_SIZE, - "Associate: Invalid txn: %s\n", arg); - Tcl_SetResult(interp, msg, TCL_VOLATILE); - result = TCL_ERROR; - } - break; - } - } - if (result != TCL_OK) - return (result); - - /* - * Better be 1 or 2 args left. The last arg must be the sdb - * handle. If 2 args then objc-2 is the callback proc, else - * we have a NULL callback. - */ - /* Get the secondary DB handle. */ - arg = Tcl_GetStringFromObj(objv[objc - 1], NULL); - sdbp = NAME_TO_DB(arg); - if (sdbp == NULL) { - snprintf(msg, MSG_SIZE, - "Associate: Invalid database handle: %s\n", arg); - Tcl_SetResult(interp, msg, TCL_VOLATILE); - return (TCL_ERROR); - } - - /* - * The callback is simply a Tcl object containing the name - * of the callback proc, which is the second-to-last argument. - * - * Note that the callback needs to go in the *secondary* DB handle's - * info struct; we may have multiple secondaries with different - * callbacks. - */ - sdbip = (DBTCL_INFO *)sdbp->api_internal; - -#ifdef CONFIG_TEST - if (i != objc - 1 && RPC_ON(dbp->dbenv)) { - /* - * The flag values allowed to DB->associate may have changed to - * overlap with the range we've chosen. If this happens, we - * need to reset all of the RPC_2ND_* flags to a new range. - */ - if ((flag & DB_RPC2ND_MASK) != 0) { - snprintf(msg, MSG_SIZE, - "RPC secondary flags overlap -- recalculate!\n"); - Tcl_SetResult(interp, msg, TCL_VOLATILE); - return (TCL_ERROR); - } - - cbname = Tcl_GetStringFromObj(objv[objc - 2], NULL); - for (cb = callbacks; cb->name != NULL; cb++) - if (strcmp(cb->name, cbname) == 0) { - flag |= cb->flag; - break; - } - - if (cb->name == NULL) { - snprintf(msg, MSG_SIZE, - "Associate: unknown callback: %s\n", cbname); - Tcl_SetResult(interp, msg, TCL_VOLATILE); - return (TCL_ERROR); - } - - ret = dbp->associate(dbp, txn, sdbp, NULL, flag); - - /* - * The primary reference isn't set when calling through - * the RPC server, but the Tcl API peeks at it in other - * places (see tcl_DbGet). - */ - if (ret == 0) - sdbp->s_primary = dbp; - } else if (i != objc - 1) { -#else - if (i != objc - 1) { -#endif - /* - * We have 2 args, get the callback. - */ - sdbip->i_second_call = objv[objc - 2]; - Tcl_IncrRefCount(sdbip->i_second_call); - - /* Now call associate. */ - _debug_check(); - ret = dbp->associate(dbp, txn, sdbp, tcl_second_call, flag); - } else { - /* - * We have a NULL callback. - */ - sdbip->i_second_call = NULL; - ret = dbp->associate(dbp, txn, sdbp, NULL, flag); - } - result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret), "associate"); - - return (result); -} - -/* - * tcl_second_call -- - * Callback function for secondary indices. Get the callback - * out of ip->i_second_call and call it. - */ -static int -tcl_second_call(dbp, pkey, data, skey) - DB *dbp; - const DBT *pkey, *data; - DBT *skey; -{ - DBTCL_INFO *ip; - Tcl_Interp *interp; - Tcl_Obj *pobj, *dobj, *objv[3]; - size_t len; - int ilen, result, ret; - void *retbuf, *databuf; - - ip = (DBTCL_INFO *)dbp->api_internal; - interp = ip->i_interp; - objv[0] = ip->i_second_call; - - /* - * Create two ByteArray objects, with the contents of the pkey - * and data DBTs that are our inputs. - */ - pobj = Tcl_NewByteArrayObj(pkey->data, (int)pkey->size); - Tcl_IncrRefCount(pobj); - dobj = Tcl_NewByteArrayObj(data->data, (int)data->size); - Tcl_IncrRefCount(dobj); - - objv[1] = pobj; - objv[2] = dobj; - - result = Tcl_EvalObjv(interp, 3, objv, 0); - - Tcl_DecrRefCount(pobj); - Tcl_DecrRefCount(dobj); - - if (result != TCL_OK) { - __db_err(dbp->dbenv, - "Tcl callback function failed with code %d", result); - return (EINVAL); - } - - retbuf = Tcl_GetByteArrayFromObj(Tcl_GetObjResult(interp), &ilen); - len = (size_t)ilen; - - /* - * retbuf is owned by Tcl; copy it into malloc'ed memory. - * We need to use __os_umalloc rather than ufree because this will - * be freed by DB using __os_ufree--the DB_DBT_APPMALLOC flag - * tells DB to free application-allocated memory. - */ - if ((ret = __os_umalloc(dbp->dbenv, len, &databuf)) != 0) - return (ret); - memcpy(databuf, retbuf, len); - - skey->data = databuf; - skey->size = len; - F_SET(skey, DB_DBT_APPMALLOC); - - return (0); -} - -/* - * tcl_db_join -- - */ -static int -tcl_DbJoin(interp, objc, objv, dbp, dbcp) - Tcl_Interp *interp; /* Interpreter */ - int objc; /* How many arguments? */ - Tcl_Obj *CONST objv[]; /* The argument objects */ - DB *dbp; /* Database pointer */ - DBC **dbcp; /* Cursor pointer */ -{ - static const char *dbjopts[] = { - "-nosort", - NULL - }; - enum dbjopts { - DBJ_NOSORT - }; - DBC **listp; - size_t size; - u_int32_t flag; - int adj, i, j, optindex, result, ret; - char *arg, msg[MSG_SIZE]; - - result = TCL_OK; - flag = 0; - if (objc < 3) { - Tcl_WrongNumArgs(interp, 2, objv, "curs1 curs2 ..."); - return (TCL_ERROR); - } - - for (adj = i = 2; i < objc; i++) { - if (Tcl_GetIndexFromObj(interp, objv[i], dbjopts, "option", - TCL_EXACT, &optindex) != TCL_OK) { - result = IS_HELP(objv[i]); - if (result == TCL_OK) - return (result); - result = TCL_OK; - Tcl_ResetResult(interp); - break; - } - switch ((enum dbjopts)optindex) { - case DBJ_NOSORT: - flag |= DB_JOIN_NOSORT; - adj++; - break; - } - } - if (result != TCL_OK) - return (result); - /* - * Allocate one more for NULL ptr at end of list. - */ - size = sizeof(DBC *) * (size_t)((objc - adj) + 1); - ret = __os_malloc(dbp->dbenv, size, &listp); - if (ret != 0) { - Tcl_SetResult(interp, db_strerror(ret), TCL_STATIC); - return (TCL_ERROR); - } - - memset(listp, 0, size); - for (j = 0, i = adj; i < objc; i++, j++) { - arg = Tcl_GetStringFromObj(objv[i], NULL); - listp[j] = NAME_TO_DBC(arg); - if (listp[j] == NULL) { - snprintf(msg, MSG_SIZE, - "Join: Invalid cursor: %s\n", arg); - Tcl_SetResult(interp, msg, TCL_VOLATILE); - result = TCL_ERROR; - goto out; - } - } - listp[j] = NULL; - _debug_check(); - ret = dbp->join(dbp, listp, dbcp, flag); - result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret), "db join"); - -out: - __os_free(dbp->dbenv, listp); - return (result); -} - -/* - * tcl_db_getjoin -- - */ -static int -tcl_DbGetjoin(interp, objc, objv, dbp) - Tcl_Interp *interp; /* Interpreter */ - int objc; /* How many arguments? */ - Tcl_Obj *CONST objv[]; /* The argument objects */ - DB *dbp; /* Database pointer */ -{ - static const char *dbgetjopts[] = { -#ifdef CONFIG_TEST - "-nosort", -#endif - "-txn", - NULL - }; - enum dbgetjopts { -#ifdef CONFIG_TEST - DBGETJ_NOSORT, -#endif - DBGETJ_TXN - }; - DB_TXN *txn; - DB *elemdbp; - DBC **listp; - DBC *dbc; - DBT key, data; - Tcl_Obj **elemv, *retlist; - void *ktmp; - size_t size; - u_int32_t flag; - int adj, elemc, freekey, i, j, optindex, result, ret; - char *arg, msg[MSG_SIZE]; - - result = TCL_OK; - flag = 0; - ktmp = NULL; - freekey = 0; - if (objc < 3) { - Tcl_WrongNumArgs(interp, 2, objv, "{db1 key1} {db2 key2} ..."); - return (TCL_ERROR); - } - - txn = NULL; - i = 2; - adj = i; - while (i < objc) { - if (Tcl_GetIndexFromObj(interp, objv[i], dbgetjopts, "option", - TCL_EXACT, &optindex) != TCL_OK) { - result = IS_HELP(objv[i]); - if (result == TCL_OK) - return (result); - result = TCL_OK; - Tcl_ResetResult(interp); - break; - } - i++; - switch ((enum dbgetjopts)optindex) { -#ifdef CONFIG_TEST - case DBGETJ_NOSORT: - flag |= DB_JOIN_NOSORT; - adj++; - break; -#endif - case DBGETJ_TXN: - if (i == objc) { - Tcl_WrongNumArgs(interp, 2, objv, "?-txn id?"); - result = TCL_ERROR; - break; - } - arg = Tcl_GetStringFromObj(objv[i++], NULL); - txn = NAME_TO_TXN(arg); - adj += 2; - if (txn == NULL) { - snprintf(msg, MSG_SIZE, - "GetJoin: Invalid txn: %s\n", arg); - Tcl_SetResult(interp, msg, TCL_VOLATILE); - result = TCL_ERROR; - } - break; - } - } - if (result != TCL_OK) - return (result); - size = sizeof(DBC *) * (size_t)((objc - adj) + 1); - ret = __os_malloc(NULL, size, &listp); - if (ret != 0) { - Tcl_SetResult(interp, db_strerror(ret), TCL_STATIC); - return (TCL_ERROR); - } - - memset(listp, 0, size); - for (j = 0, i = adj; i < objc; i++, j++) { - /* - * Get each sublist as {db key} - */ - result = Tcl_ListObjGetElements(interp, objv[i], - &elemc, &elemv); - if (elemc != 2) { - Tcl_SetResult(interp, "Lists must be {db key}", - TCL_STATIC); - result = TCL_ERROR; - goto out; - } - /* - * Get a pointer to that open db. Then, open a cursor in - * that db, and go to the "key" place. - */ - elemdbp = NAME_TO_DB(Tcl_GetStringFromObj(elemv[0], NULL)); - if (elemdbp == NULL) { - snprintf(msg, MSG_SIZE, "Get_join: Invalid db: %s\n", - Tcl_GetStringFromObj(elemv[0], NULL)); - Tcl_SetResult(interp, msg, TCL_VOLATILE); - result = TCL_ERROR; - goto out; - } - ret = elemdbp->cursor(elemdbp, txn, &listp[j], 0); - if ((result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret), - "db cursor")) == TCL_ERROR) - goto out; - memset(&key, 0, sizeof(key)); - memset(&data, 0, sizeof(data)); - ret = _CopyObjBytes(interp, elemv[elemc-1], &ktmp, - &key.size, &freekey); - if (ret != 0) { - result = _ReturnSetup(interp, ret, - DB_RETOK_STD(ret), "db join"); - goto out; - } - key.data = ktmp; - ret = (listp[j])->c_get(listp[j], &key, &data, DB_SET); - if ((result = _ReturnSetup(interp, ret, DB_RETOK_DBCGET(ret), - "db cget")) == TCL_ERROR) - goto out; - } - listp[j] = NULL; - _debug_check(); - ret = dbp->join(dbp, listp, &dbc, flag); - result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret), "db join"); - if (result == TCL_ERROR) - goto out; - - retlist = Tcl_NewListObj(0, NULL); - while (ret == 0 && result == TCL_OK) { - memset(&key, 0, sizeof(key)); - memset(&data, 0, sizeof(data)); - key.flags |= DB_DBT_MALLOC; - data.flags |= DB_DBT_MALLOC; - ret = dbc->c_get(dbc, &key, &data, 0); - /* - * Build up our {name value} sublist - */ - if (ret == 0) { - result = _SetListElem(interp, retlist, - key.data, key.size, - data.data, data.size); - __os_ufree(dbp->dbenv, key.data); - __os_ufree(dbp->dbenv, data.data); - } - } - (void)dbc->c_close(dbc); - if (result == TCL_OK) - Tcl_SetObjResult(interp, retlist); -out: - if (ktmp != NULL && freekey) - __os_free(dbp->dbenv, ktmp); - while (j) { - if (listp[j]) - (void)(listp[j])->c_close(listp[j]); - j--; - } - __os_free(dbp->dbenv, listp); - return (result); -} - -/* - * tcl_DbGetFlags -- - */ -static int -tcl_DbGetFlags(interp, objc, objv, dbp) - Tcl_Interp *interp; /* Interpreter */ - int objc; /* How many arguments? */ - Tcl_Obj *CONST objv[]; /* The argument objects */ - DB *dbp; /* Database pointer */ -{ - int i, ret, result; - u_int32_t flags; - char buf[512]; - Tcl_Obj *res; - - static const struct { - u_int32_t flag; - char *arg; - } db_flags[] = { - { DB_CHKSUM, "-chksum" }, - { DB_DUP, "-dup" }, - { DB_DUPSORT, "-dupsort" }, - { DB_ENCRYPT, "-encrypt" }, - { DB_INORDER, "-inorder" }, - { DB_TXN_NOT_DURABLE, "-notdurable" }, - { DB_RECNUM, "-recnum" }, - { DB_RENUMBER, "-renumber" }, - { DB_REVSPLITOFF, "-revsplitoff" }, - { DB_SNAPSHOT, "-snapshot" }, - { 0, NULL } - }; - - if (objc != 2) { - Tcl_WrongNumArgs(interp, 1, objv, NULL); - return (TCL_ERROR); - } - - ret = dbp->get_flags(dbp, &flags); - if ((result = _ReturnSetup( - interp, ret, DB_RETOK_STD(ret), "db get_flags")) == TCL_OK) { - buf[0] = '\0'; - - for (i = 0; db_flags[i].flag != 0; i++) - if (LF_ISSET(db_flags[i].flag)) { - if (strlen(buf) > 0) - (void)strncat(buf, " ", sizeof(buf)); - (void)strncat( - buf, db_flags[i].arg, sizeof(buf)); - } - - res = NewStringObj(buf, strlen(buf)); - Tcl_SetObjResult(interp, res); - } - - return (result); -} - -/* - * tcl_DbGetOpenFlags -- - */ -static int -tcl_DbGetOpenFlags(interp, objc, objv, dbp) - Tcl_Interp *interp; /* Interpreter */ - int objc; /* How many arguments? */ - Tcl_Obj *CONST objv[]; /* The argument objects */ - DB *dbp; /* Database pointer */ -{ - int i, ret, result; - u_int32_t flags; - char buf[512]; - Tcl_Obj *res; - - static const struct { - u_int32_t flag; - char *arg; - } open_flags[] = { - { DB_AUTO_COMMIT, "-auto_commit" }, - { DB_CREATE, "-create" }, - { DB_DEGREE_2, "-degree_2" }, - { DB_DIRTY_READ, "-dirty" }, - { DB_EXCL, "-excl" }, - { DB_NOMMAP, "-nommap" }, - { DB_RDONLY, "-rdonly" }, - { DB_THREAD, "-thread" }, - { DB_TRUNCATE, "-truncate" }, - { 0, NULL } - }; - - if (objc != 2) { - Tcl_WrongNumArgs(interp, 1, objv, NULL); - return (TCL_ERROR); - } - - ret = dbp->get_open_flags(dbp, &flags); - if ((result = _ReturnSetup( - interp, ret, DB_RETOK_STD(ret), "db get_open_flags")) == TCL_OK) { - buf[0] = '\0'; - - for (i = 0; open_flags[i].flag != 0; i++) - if (LF_ISSET(open_flags[i].flag)) { - if (strlen(buf) > 0) - (void)strncat(buf, " ", sizeof(buf)); - (void)strncat( - buf, open_flags[i].arg, sizeof(buf)); - } - - res = NewStringObj(buf, strlen(buf)); - Tcl_SetObjResult(interp, res); - } - - return (result); -} - -/* - * tcl_DbCount -- - */ -static int -tcl_DbCount(interp, objc, objv, dbp) - Tcl_Interp *interp; /* Interpreter */ - int objc; /* How many arguments? */ - Tcl_Obj *CONST objv[]; /* The argument objects */ - DB *dbp; /* Database pointer */ -{ - Tcl_Obj *res; - DBC *dbc; - DBT key, data; - void *ktmp; - db_recno_t count, recno; - int freekey, result, ret; - - res = NULL; - count = 0; - freekey = ret = 0; - ktmp = NULL; - result = TCL_OK; - - if (objc != 3) { - Tcl_WrongNumArgs(interp, 2, objv, "key"); - return (TCL_ERROR); - } - - /* - * Get the count for our key. - * We do this by getting a cursor for this DB. Moving the cursor - * to the set location, and getting a count on that cursor. - */ - memset(&key, 0, sizeof(key)); - memset(&data, 0, sizeof(data)); - - /* - * If it's a queue or recno database, we must make sure to - * treat the key as a recno rather than as a byte string. - */ - if (dbp->type == DB_RECNO || dbp->type == DB_QUEUE) { - result = _GetUInt32(interp, objv[2], &recno); - if (result == TCL_OK) { - key.data = &recno; - key.size = sizeof(db_recno_t); - } else - return (result); - } else { - ret = _CopyObjBytes(interp, objv[2], &ktmp, - &key.size, &freekey); - if (ret != 0) { - result = _ReturnSetup(interp, ret, - DB_RETOK_STD(ret), "db count"); - return (result); - } - key.data = ktmp; - } - _debug_check(); - ret = dbp->cursor(dbp, NULL, &dbc, 0); - if (ret != 0) { - result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret), - "db cursor"); - goto out; - } - /* - * Move our cursor to the key. - */ - ret = dbc->c_get(dbc, &key, &data, DB_SET); - if (ret == DB_KEYEMPTY || ret == DB_NOTFOUND) - count = 0; - else { - ret = dbc->c_count(dbc, &count, 0); - if (ret != 0) { - result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret), - "db c count"); - goto out; - } - } - res = Tcl_NewWideIntObj((Tcl_WideInt)count); - Tcl_SetObjResult(interp, res); - -out: if (ktmp != NULL && freekey) - __os_free(dbp->dbenv, ktmp); - (void)dbc->c_close(dbc); - return (result); -} - -#ifdef CONFIG_TEST -/* - * tcl_DbKeyRange -- - */ -static int -tcl_DbKeyRange(interp, objc, objv, dbp) - Tcl_Interp *interp; /* Interpreter */ - int objc; /* How many arguments? */ - Tcl_Obj *CONST objv[]; /* The argument objects */ - DB *dbp; /* Database pointer */ -{ - static const char *dbkeyropts[] = { - "-txn", - NULL - }; - enum dbkeyropts { - DBKEYR_TXN - }; - DB_TXN *txn; - DB_KEY_RANGE range; - DBT key; - DBTYPE type; - Tcl_Obj *myobjv[3], *retlist; - void *ktmp; - db_recno_t recno; - u_int32_t flag; - int freekey, i, myobjc, optindex, result, ret; - char *arg, msg[MSG_SIZE]; - - ktmp = NULL; - flag = 0; - freekey = 0; - result = TCL_OK; - if (objc < 3) { - Tcl_WrongNumArgs(interp, 2, objv, "?-txn id? key"); - return (TCL_ERROR); - } - - txn = NULL; - for (i = 2; i < objc;) { - if (Tcl_GetIndexFromObj(interp, objv[i], dbkeyropts, "option", - TCL_EXACT, &optindex) != TCL_OK) { - result = IS_HELP(objv[i]); - if (result == TCL_OK) - return (result); - result = TCL_OK; - Tcl_ResetResult(interp); - break; - } - i++; - switch ((enum dbkeyropts)optindex) { - case DBKEYR_TXN: - if (i == objc) { - Tcl_WrongNumArgs(interp, 2, objv, "?-txn id?"); - result = TCL_ERROR; - break; - } - arg = Tcl_GetStringFromObj(objv[i++], NULL); - txn = NAME_TO_TXN(arg); - if (txn == NULL) { - snprintf(msg, MSG_SIZE, - "KeyRange: Invalid txn: %s\n", arg); - Tcl_SetResult(interp, msg, TCL_VOLATILE); - result = TCL_ERROR; - } - break; - } - } - if (result != TCL_OK) - return (result); - (void)dbp->get_type(dbp, &type); - ret = 0; - /* - * Make sure we have a key. - */ - if (i != (objc - 1)) { - Tcl_WrongNumArgs(interp, 2, objv, "?args? key"); - result = TCL_ERROR; - goto out; - } - memset(&key, 0, sizeof(key)); - if (type == DB_RECNO || type == DB_QUEUE) { - result = _GetUInt32(interp, objv[i], &recno); - if (result == TCL_OK) { - key.data = &recno; - key.size = sizeof(db_recno_t); - } else - return (result); - } else { - ret = _CopyObjBytes(interp, objv[i++], &ktmp, - &key.size, &freekey); - if (ret != 0) { - result = _ReturnSetup(interp, ret, - DB_RETOK_STD(ret), "db keyrange"); - return (result); - } - key.data = ktmp; - } - _debug_check(); - ret = dbp->key_range(dbp, txn, &key, &range, flag); - result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret), "db keyrange"); - if (result == TCL_ERROR) - goto out; - - /* - * If we succeeded, set up return list. - */ - myobjc = 3; - myobjv[0] = Tcl_NewDoubleObj(range.less); - myobjv[1] = Tcl_NewDoubleObj(range.equal); - myobjv[2] = Tcl_NewDoubleObj(range.greater); - retlist = Tcl_NewListObj(myobjc, myobjv); - if (result == TCL_OK) - Tcl_SetObjResult(interp, retlist); - -out: if (ktmp != NULL && freekey) - __os_free(dbp->dbenv, ktmp); - return (result); -} -#endif - -/* - * tcl_DbTruncate -- - */ -static int -tcl_DbTruncate(interp, objc, objv, dbp) - Tcl_Interp *interp; /* Interpreter */ - int objc; /* How many arguments? */ - Tcl_Obj *CONST objv[]; /* The argument objects */ - DB *dbp; /* Database pointer */ -{ - static const char *dbcuropts[] = { - "-auto_commit", - "-txn", - NULL - }; - enum dbcuropts { - DBTRUNC_AUTO_COMMIT, - DBTRUNC_TXN - }; - DB_TXN *txn; - Tcl_Obj *res; - u_int32_t count, flag; - int i, optindex, result, ret; - char *arg, msg[MSG_SIZE]; - - txn = NULL; - flag = 0; - result = TCL_OK; - - i = 2; - while (i < objc) { - if (Tcl_GetIndexFromObj(interp, objv[i], dbcuropts, "option", - TCL_EXACT, &optindex) != TCL_OK) { - result = IS_HELP(objv[i]); - goto out; - } - i++; - switch ((enum dbcuropts)optindex) { - case DBTRUNC_AUTO_COMMIT: - flag |= DB_AUTO_COMMIT; - break; - case DBTRUNC_TXN: - if (i == objc) { - Tcl_WrongNumArgs(interp, 2, objv, "?-txn id?"); - result = TCL_ERROR; - break; - } - arg = Tcl_GetStringFromObj(objv[i++], NULL); - txn = NAME_TO_TXN(arg); - if (txn == NULL) { - snprintf(msg, MSG_SIZE, - "Truncate: Invalid txn: %s\n", arg); - Tcl_SetResult(interp, msg, TCL_VOLATILE); - result = TCL_ERROR; - } - break; - } - if (result != TCL_OK) - break; - } - if (result != TCL_OK) - goto out; - - _debug_check(); - ret = dbp->truncate(dbp, txn, &count, flag); - if (ret != 0) - result = _ErrorSetup(interp, ret, "db truncate"); - - else { - res = Tcl_NewWideIntObj((Tcl_WideInt)count); - Tcl_SetObjResult(interp, res); - } -out: - return (result); -} diff --git a/storage/bdb/tcl/tcl_db_pkg.c b/storage/bdb/tcl/tcl_db_pkg.c deleted file mode 100644 index ce55df84298..00000000000 --- a/storage/bdb/tcl/tcl_db_pkg.c +++ /dev/null @@ -1,3599 +0,0 @@ -/*- - * See the file LICENSE for redistribution information. - * - * Copyright (c) 1999-2004 - * Sleepycat Software. All rights reserved. - * - * $Id: tcl_db_pkg.c,v 11.190 2004/10/27 16:48:32 bostic Exp $ - */ - -#include "db_config.h" - -#ifndef NO_SYSTEM_INCLUDES -#include - -#include -#include -#include -#endif - -#ifdef CONFIG_TEST -#define DB_DBM_HSEARCH 1 -#endif - -#include "db_int.h" -#include "dbinc/db_page.h" -#include "dbinc/hash.h" -#include "dbinc/tcl_db.h" - -/* XXX we must declare global data in just one place */ -DBTCL_GLOBAL __dbtcl_global; - -/* - * Prototypes for procedures defined later in this file: - */ -static int berkdb_Cmd __P((ClientData, Tcl_Interp *, int, - Tcl_Obj * CONST*)); -static int bdb_EnvOpen __P((Tcl_Interp *, int, Tcl_Obj * CONST*, - DBTCL_INFO *, DB_ENV **)); -static int bdb_DbOpen __P((Tcl_Interp *, int, Tcl_Obj * CONST*, - DBTCL_INFO *, DB **)); -static int bdb_DbRemove __P((Tcl_Interp *, int, Tcl_Obj * CONST*)); -static int bdb_DbRename __P((Tcl_Interp *, int, Tcl_Obj * CONST*)); -static int bdb_Version __P((Tcl_Interp *, int, Tcl_Obj * CONST*)); -static int bdb_SeqOpen __P((Tcl_Interp *, int, Tcl_Obj * CONST*, - DBTCL_INFO *, DB_SEQUENCE **)); - -#ifdef CONFIG_TEST -static int bdb_DbUpgrade __P((Tcl_Interp *, int, Tcl_Obj * CONST*)); -static int bdb_DbVerify __P((Tcl_Interp *, int, Tcl_Obj * CONST*)); -static int bdb_Handles __P((Tcl_Interp *, int, Tcl_Obj * CONST*)); -static int bdb_MsgType __P((Tcl_Interp *, int, Tcl_Obj * CONST*)); - -static int tcl_bt_compare __P((DB *, const DBT *, const DBT *)); -static int tcl_compare_callback __P((DB *, const DBT *, const DBT *, - Tcl_Obj *, char *)); -static void tcl_db_free __P((void *)); -static void * tcl_db_malloc __P((size_t)); -static void * tcl_db_realloc __P((void *, size_t)); -static int tcl_dup_compare __P((DB *, const DBT *, const DBT *)); -static u_int32_t tcl_h_hash __P((DB *, const void *, u_int32_t)); -static int tcl_rep_send __P((DB_ENV *, - const DBT *, const DBT *, const DB_LSN *, int, u_int32_t)); -#endif - -/* - * Db_tcl_Init -- - * - * This is a package initialization procedure, which is called by Tcl when - * this package is to be added to an interpreter. The name is based on the - * name of the shared library, currently libdb_tcl-X.Y.so, which Tcl uses - * to determine the name of this function. - */ -int -Db_tcl_Init(interp) - Tcl_Interp *interp; /* Interpreter in which the package is - * to be made available. */ -{ - int code; - char pkg[12]; - - snprintf(pkg, sizeof(pkg), "%d.%d", DB_VERSION_MAJOR, DB_VERSION_MINOR); - code = Tcl_PkgProvide(interp, "Db_tcl", pkg); - if (code != TCL_OK) - return (code); - - (void)Tcl_CreateObjCommand(interp, - "berkdb", (Tcl_ObjCmdProc *)berkdb_Cmd, (ClientData)0, NULL); - /* - * Create shared global debugging variables - */ - (void)Tcl_LinkVar( - interp, "__debug_on", (char *)&__debug_on, TCL_LINK_INT); - (void)Tcl_LinkVar( - interp, "__debug_print", (char *)&__debug_print, TCL_LINK_INT); - (void)Tcl_LinkVar( - interp, "__debug_stop", (char *)&__debug_stop, TCL_LINK_INT); - (void)Tcl_LinkVar( - interp, "__debug_test", (char *)&__debug_test, - TCL_LINK_INT); - LIST_INIT(&__db_infohead); - return (TCL_OK); -} - -/* - * berkdb_cmd -- - * Implements the "berkdb" command. - * This command supports three sub commands: - * berkdb version - Returns a list {major minor patch} - * berkdb env - Creates a new DB_ENV and returns a binding - * to a new command of the form dbenvX, where X is an - * integer starting at 0 (dbenv0, dbenv1, ...) - * berkdb open - Creates a new DB (optionally within - * the given environment. Returns a binding to a new - * command of the form dbX, where X is an integer - * starting at 0 (db0, db1, ...) - */ -static int -berkdb_Cmd(notused, interp, objc, objv) - ClientData notused; /* Not used. */ - Tcl_Interp *interp; /* Interpreter */ - int objc; /* How many arguments? */ - Tcl_Obj *CONST objv[]; /* The argument objects */ -{ - static const char *berkdbcmds[] = { -#ifdef CONFIG_TEST - "dbverify", - "handles", - "msgtype", - "upgrade", -#endif - "dbremove", - "dbrename", - "env", - "envremove", - "open", -#ifdef HAVE_SEQUENCE - "sequence", -#endif - "version", -#ifdef CONFIG_TEST - /* All below are compatibility functions */ - "hcreate", "hsearch", "hdestroy", - "dbminit", "fetch", "store", - "delete", "firstkey", "nextkey", - "ndbm_open", "dbmclose", -#endif - /* All below are convenience functions */ - "rand", "random_int", "srand", - "debug_check", - NULL - }; - /* - * All commands enums below ending in X are compatibility - */ - enum berkdbcmds { -#ifdef CONFIG_TEST - BDB_DBVERIFY, - BDB_HANDLES, - BDB_MSGTYPE, - BDB_UPGRADE, -#endif - BDB_DBREMOVE, - BDB_DBRENAME, - BDB_ENV, - BDB_ENVREMOVE, - BDB_OPEN, -#ifdef HAVE_SEQUENCE - BDB_SEQUENCE, -#endif - BDB_VERSION, -#ifdef CONFIG_TEST - BDB_HCREATEX, BDB_HSEARCHX, BDB_HDESTROYX, - BDB_DBMINITX, BDB_FETCHX, BDB_STOREX, - BDB_DELETEX, BDB_FIRSTKEYX, BDB_NEXTKEYX, - BDB_NDBMOPENX, BDB_DBMCLOSEX, -#endif - BDB_RANDX, BDB_RAND_INTX, BDB_SRANDX, - BDB_DBGCKX - }; - static int env_id = 0; - static int db_id = 0; -#ifdef HAVE_SEQUENCE - static int seq_id = 0; -#endif - - DB *dbp; -#ifdef HAVE_SEQUENCE - DB_SEQUENCE *seq; -#endif -#ifdef CONFIG_TEST - DBM *ndbmp; - static int ndbm_id = 0; -#endif - DBTCL_INFO *ip; - DB_ENV *envp; - Tcl_Obj *res; - int cmdindex, result; - char newname[MSG_SIZE]; - - COMPQUIET(notused, NULL); - - Tcl_ResetResult(interp); - memset(newname, 0, MSG_SIZE); - result = TCL_OK; - if (objc <= 1) { - Tcl_WrongNumArgs(interp, 1, objv, "command cmdargs"); - return (TCL_ERROR); - } - - /* - * Get the command name index from the object based on the berkdbcmds - * defined above. - */ - if (Tcl_GetIndexFromObj(interp, - objv[1], berkdbcmds, "command", TCL_EXACT, &cmdindex) != TCL_OK) - return (IS_HELP(objv[1])); - res = NULL; - switch ((enum berkdbcmds)cmdindex) { -#ifdef CONFIG_TEST - case BDB_DBVERIFY: - result = bdb_DbVerify(interp, objc, objv); - break; - case BDB_HANDLES: - result = bdb_Handles(interp, objc, objv); - break; - case BDB_MSGTYPE: - result = bdb_MsgType(interp, objc, objv); - break; - case BDB_UPGRADE: - result = bdb_DbUpgrade(interp, objc, objv); - break; -#endif - case BDB_VERSION: - _debug_check(); - result = bdb_Version(interp, objc, objv); - break; - case BDB_ENV: - snprintf(newname, sizeof(newname), "env%d", env_id); - ip = _NewInfo(interp, NULL, newname, I_ENV); - if (ip != NULL) { - result = bdb_EnvOpen(interp, objc, objv, ip, &envp); - if (result == TCL_OK && envp != NULL) { - env_id++; - (void)Tcl_CreateObjCommand(interp, newname, - (Tcl_ObjCmdProc *)env_Cmd, - (ClientData)envp, NULL); - /* Use ip->i_name - newname is overwritten */ - res = NewStringObj(newname, strlen(newname)); - _SetInfoData(ip, envp); - } else - _DeleteInfo(ip); - } else { - Tcl_SetResult(interp, "Could not set up info", - TCL_STATIC); - result = TCL_ERROR; - } - break; - case BDB_DBREMOVE: - result = bdb_DbRemove(interp, objc, objv); - break; - case BDB_DBRENAME: - result = bdb_DbRename(interp, objc, objv); - break; - case BDB_ENVREMOVE: - result = tcl_EnvRemove(interp, objc, objv, NULL, NULL); - break; - case BDB_OPEN: - snprintf(newname, sizeof(newname), "db%d", db_id); - ip = _NewInfo(interp, NULL, newname, I_DB); - if (ip != NULL) { - result = bdb_DbOpen(interp, objc, objv, ip, &dbp); - if (result == TCL_OK && dbp != NULL) { - db_id++; - (void)Tcl_CreateObjCommand(interp, newname, - (Tcl_ObjCmdProc *)db_Cmd, - (ClientData)dbp, NULL); - /* Use ip->i_name - newname is overwritten */ - res = NewStringObj(newname, strlen(newname)); - _SetInfoData(ip, dbp); - } else - _DeleteInfo(ip); - } else { - Tcl_SetResult(interp, "Could not set up info", - TCL_STATIC); - result = TCL_ERROR; - } - break; -#ifdef HAVE_SEQUENCE - case BDB_SEQUENCE: - snprintf(newname, sizeof(newname), "seq%d", seq_id); - ip = _NewInfo(interp, NULL, newname, I_SEQ); - if (ip != NULL) { - result = bdb_SeqOpen(interp, objc, objv, ip, &seq); - if (result == TCL_OK && seq != NULL) { - seq_id++; - (void)Tcl_CreateObjCommand(interp, newname, - (Tcl_ObjCmdProc *)seq_Cmd, - (ClientData)seq, NULL); - /* Use ip->i_name - newname is overwritten */ - res = NewStringObj(newname, strlen(newname)); - _SetInfoData(ip, seq); - } else - _DeleteInfo(ip); - } else { - Tcl_SetResult(interp, "Could not set up info", - TCL_STATIC); - result = TCL_ERROR; - } - break; -#endif -#ifdef CONFIG_TEST - case BDB_HCREATEX: - case BDB_HSEARCHX: - case BDB_HDESTROYX: - result = bdb_HCommand(interp, objc, objv); - break; - case BDB_DBMINITX: - case BDB_DBMCLOSEX: - case BDB_FETCHX: - case BDB_STOREX: - case BDB_DELETEX: - case BDB_FIRSTKEYX: - case BDB_NEXTKEYX: - result = bdb_DbmCommand(interp, objc, objv, DBTCL_DBM, NULL); - break; - case BDB_NDBMOPENX: - snprintf(newname, sizeof(newname), "ndbm%d", ndbm_id); - ip = _NewInfo(interp, NULL, newname, I_NDBM); - if (ip != NULL) { - result = bdb_NdbmOpen(interp, objc, objv, &ndbmp); - if (result == TCL_OK) { - ndbm_id++; - (void)Tcl_CreateObjCommand(interp, newname, - (Tcl_ObjCmdProc *)ndbm_Cmd, - (ClientData)ndbmp, NULL); - /* Use ip->i_name - newname is overwritten */ - res = NewStringObj(newname, strlen(newname)); - _SetInfoData(ip, ndbmp); - } else - _DeleteInfo(ip); - } else { - Tcl_SetResult(interp, "Could not set up info", - TCL_STATIC); - result = TCL_ERROR; - } - break; -#endif - case BDB_RANDX: - case BDB_RAND_INTX: - case BDB_SRANDX: - result = bdb_RandCommand(interp, objc, objv); - break; - case BDB_DBGCKX: - _debug_check(); - res = Tcl_NewIntObj(0); - break; - } - /* - * For each different arg call different function to create - * new commands (or if version, get/return it). - */ - if (result == TCL_OK && res != NULL) - Tcl_SetObjResult(interp, res); - return (result); -} - -/* - * bdb_EnvOpen - - * Implements the environment open command. - * There are many, many options to the open command. - * Here is the general flow: - * - * 1. Call db_env_create to create the env handle. - * 2. Parse args tracking options. - * 3. Make any pre-open setup calls necessary. - * 4. Call DB_ENV->open to open the env. - * 5. Return env widget handle to user. - */ -static int -bdb_EnvOpen(interp, objc, objv, ip, env) - Tcl_Interp *interp; /* Interpreter */ - int objc; /* How many arguments? */ - Tcl_Obj *CONST objv[]; /* The argument objects */ - DBTCL_INFO *ip; /* Our internal info */ - DB_ENV **env; /* Environment pointer */ -{ - static const char *envopen[] = { -#ifdef CONFIG_TEST - "-alloc", - "-auto_commit", - "-cdb", - "-cdb_alldb", - "-client_timeout", - "-lock", - "-lock_conflict", - "-lock_detect", - "-lock_max", - "-lock_max_locks", - "-lock_max_lockers", - "-lock_max_objects", - "-lock_timeout", - "-log", - "-log_buffer", - "-log_inmemory", - "-log_max", - "-log_regionmax", - "-log_remove", - "-mpool_max_openfd", - "-mpool_max_write", - "-mpool_mmap_size", - "-mpool_nommap", - "-overwrite", - "-region_init", - "-rep_client", - "-rep_master", - "-rep_transport", - "-server", - "-server_timeout", - "-set_intermediate_dir", - "-thread", - "-time_notgranted", - "-txn_timeout", - "-txn_timestamp", - "-verbose", - "-wrnosync", -#endif - "-cachesize", - "-create", - "-data_dir", - "-encryptaes", - "-encryptany", - "-errfile", - "-errpfx", - "-home", - "-log_dir", - "-mode", - "-private", - "-recover", - "-recover_fatal", - "-shm_key", - "-system_mem", - "-tmp_dir", - "-txn", - "-txn_max", - "-use_environ", - "-use_environ_root", - NULL - }; - /* - * !!! - * These have to be in the same order as the above, - * which is close to but not quite alphabetical. - */ - enum envopen { -#ifdef CONFIG_TEST - ENV_ALLOC, - ENV_AUTO_COMMIT, - ENV_CDB, - ENV_CDB_ALLDB, - ENV_CLIENT_TO, - ENV_LOCK, - ENV_CONFLICT, - ENV_DETECT, - ENV_LOCK_MAX, - ENV_LOCK_MAX_LOCKS, - ENV_LOCK_MAX_LOCKERS, - ENV_LOCK_MAX_OBJECTS, - ENV_LOCK_TIMEOUT, - ENV_LOG, - ENV_LOG_BUFFER, - ENV_LOG_INMEMORY, - ENV_LOG_MAX, - ENV_LOG_REGIONMAX, - ENV_LOG_REMOVE, - ENV_MPOOL_MAX_OPENFD, - ENV_MPOOL_MAX_WRITE, - ENV_MPOOL_MMAP_SIZE, - ENV_MPOOL_NOMMAP, - ENV_OVERWRITE, - ENV_REGION_INIT, - ENV_REP_CLIENT, - ENV_REP_MASTER, - ENV_REP_TRANSPORT, - ENV_SERVER, - ENV_SERVER_TO, - ENV_SET_INTERMEDIATE_DIR, - ENV_THREAD, - ENV_TIME_NOTGRANTED, - ENV_TXN_TIMEOUT, - ENV_TXN_TIME, - ENV_VERBOSE, - ENV_WRNOSYNC, -#endif - ENV_CACHESIZE, - ENV_CREATE, - ENV_DATA_DIR, - ENV_ENCRYPT_AES, - ENV_ENCRYPT_ANY, - ENV_ERRFILE, - ENV_ERRPFX, - ENV_HOME, - ENV_LOG_DIR, - ENV_MODE, - ENV_PRIVATE, - ENV_RECOVER, - ENV_RECOVER_FATAL, - ENV_SHM_KEY, - ENV_SYSTEM_MEM, - ENV_TMP_DIR, - ENV_TXN, - ENV_TXN_MAX, - ENV_USE_ENVIRON, - ENV_USE_ENVIRON_ROOT - }; - Tcl_Obj **myobjv; - u_int32_t cr_flags, gbytes, bytes, logbufset, logmaxset; - u_int32_t open_flags, rep_flags, set_flags, uintarg; - int i, mode, myobjc, ncaches, optindex, result, ret; - long client_to, server_to, shm; - char *arg, *home, *passwd, *server; -#ifdef CONFIG_TEST - Tcl_Obj **myobjv1; - time_t timestamp; - long v; - u_int32_t detect; - u_int8_t *conflicts; - int intarg, intarg2, j, nmodes, temp; -#endif - - result = TCL_OK; - mode = 0; - rep_flags = set_flags = cr_flags = 0; - home = NULL; - - /* - * XXX - * If/when our Tcl interface becomes thread-safe, we should enable - * DB_THREAD here in all cases. For now, we turn it on later in this - * function, and only when we're in testing and we specify the - * -thread flag, so that we can exercise MUTEX_THREAD_LOCK cases. - * - * In order to become truly thread-safe, we need to look at making sure - * DBTCL_INFO structs are safe to share across threads (they're not - * mutex-protected) before we declare the Tcl interface thread-safe. - * Meanwhile, there's no strong reason to enable DB_THREAD when not - * testing. - */ - open_flags = DB_JOINENV; - - logmaxset = logbufset = 0; - - if (objc <= 2) { - Tcl_WrongNumArgs(interp, 2, objv, "?args?"); - return (TCL_ERROR); - } - - /* - * Server code must go before the call to db_env_create. - */ - server = NULL; - server_to = client_to = 0; - i = 2; - while (i < objc) { - if (Tcl_GetIndexFromObj(interp, objv[i++], envopen, "option", - TCL_EXACT, &optindex) != TCL_OK) { - Tcl_ResetResult(interp); - continue; - } -#ifdef CONFIG_TEST - switch ((enum envopen)optindex) { - case ENV_SERVER: - if (i >= objc) { - Tcl_WrongNumArgs(interp, 2, objv, - "?-server hostname"); - result = TCL_ERROR; - break; - } - FLD_SET(cr_flags, DB_RPCCLIENT); - server = Tcl_GetStringFromObj(objv[i++], NULL); - break; - case ENV_SERVER_TO: - if (i >= objc) { - Tcl_WrongNumArgs(interp, 2, objv, - "?-server_to secs"); - result = TCL_ERROR; - break; - } - FLD_SET(cr_flags, DB_RPCCLIENT); - result = Tcl_GetLongFromObj(interp, objv[i++], - &server_to); - break; - case ENV_CLIENT_TO: - if (i >= objc) { - Tcl_WrongNumArgs(interp, 2, objv, - "?-client_to secs"); - result = TCL_ERROR; - break; - } - FLD_SET(cr_flags, DB_RPCCLIENT); - result = Tcl_GetLongFromObj(interp, objv[i++], - &client_to); - break; - default: - break; - } -#endif - } - if (result != TCL_OK) - return (TCL_ERROR); - ret = db_env_create(env, cr_flags); - if (ret) - return (_ReturnSetup(interp, ret, DB_RETOK_STD(ret), - "db_env_create")); - /* - * From here on we must 'goto error' in order to clean up the - * env from db_env_create. - */ - if (server != NULL) { - (*env)->set_errpfx((*env), ip->i_name); - (*env)->set_errcall((*env), _ErrorFunc); - if ((ret = (*env)->set_rpc_server((*env), NULL, server, - client_to, server_to, 0)) != 0) { - result = TCL_ERROR; - goto error; - } - } else { - /* - * Create the environment handle before parsing the args - * since we'll be modifying the environment as we parse. - */ - (*env)->set_errpfx((*env), ip->i_name); - (*env)->set_errcall((*env), _ErrorFunc); - } - - /* Hang our info pointer on the env handle, so we can do callbacks. */ - (*env)->app_private = ip; - - /* - * Get the command name index from the object based on the bdbcmds - * defined above. - */ - i = 2; - while (i < objc) { - Tcl_ResetResult(interp); - if (Tcl_GetIndexFromObj(interp, objv[i], envopen, "option", - TCL_EXACT, &optindex) != TCL_OK) { - result = IS_HELP(objv[i]); - goto error; - } - i++; - switch ((enum envopen)optindex) { -#ifdef CONFIG_TEST - case ENV_SERVER: - case ENV_SERVER_TO: - case ENV_CLIENT_TO: - /* - * Already handled these, skip them and their arg. - */ - i++; - break; - case ENV_ALLOC: - /* - * Use a Tcl-local alloc and free function so that - * we're sure to test whether we use umalloc/ufree in - * the right places. - */ - (void)(*env)->set_alloc(*env, - tcl_db_malloc, tcl_db_realloc, tcl_db_free); - break; - case ENV_AUTO_COMMIT: - FLD_SET(set_flags, DB_AUTO_COMMIT); - break; - case ENV_CDB: - FLD_SET(open_flags, DB_INIT_CDB | DB_INIT_MPOOL); - FLD_CLR(open_flags, DB_JOINENV); - break; - case ENV_CDB_ALLDB: - FLD_SET(set_flags, DB_CDB_ALLDB); - break; - case ENV_LOCK: - FLD_SET(open_flags, DB_INIT_LOCK | DB_INIT_MPOOL); - FLD_CLR(open_flags, DB_JOINENV); - break; - case ENV_CONFLICT: - /* - * Get conflict list. List is: - * {nmodes {matrix}} - * - * Where matrix must be nmodes*nmodes big. - * Set up conflicts array to pass. - */ - result = Tcl_ListObjGetElements(interp, objv[i], - &myobjc, &myobjv); - if (result == TCL_OK) - i++; - else - break; - if (myobjc != 2) { - Tcl_WrongNumArgs(interp, 2, objv, - "?-lock_conflict {nmodes {matrix}}?"); - result = TCL_ERROR; - break; - } - result = Tcl_GetIntFromObj(interp, myobjv[0], &nmodes); - if (result != TCL_OK) - break; - result = Tcl_ListObjGetElements(interp, myobjv[1], - &myobjc, &myobjv1); - if (myobjc != (nmodes * nmodes)) { - Tcl_WrongNumArgs(interp, 2, objv, - "?-lock_conflict {nmodes {matrix}}?"); - result = TCL_ERROR; - break; - } - - ret = __os_malloc(*env, sizeof(u_int8_t) * - (size_t)nmodes * (size_t)nmodes, &conflicts); - if (ret != 0) { - result = TCL_ERROR; - break; - } - for (j = 0; j < myobjc; j++) { - result = Tcl_GetIntFromObj(interp, myobjv1[j], - &temp); - conflicts[j] = temp; - if (result != TCL_OK) { - __os_free(NULL, conflicts); - break; - } - } - _debug_check(); - ret = (*env)->set_lk_conflicts(*env, - (u_int8_t *)conflicts, nmodes); - __os_free(NULL, conflicts); - result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret), - "set_lk_conflicts"); - break; - case ENV_DETECT: - if (i >= objc) { - Tcl_WrongNumArgs(interp, 2, objv, - "?-lock_detect policy?"); - result = TCL_ERROR; - break; - } - arg = Tcl_GetStringFromObj(objv[i++], NULL); - if (strcmp(arg, "default") == 0) - detect = DB_LOCK_DEFAULT; - else if (strcmp(arg, "expire") == 0) - detect = DB_LOCK_EXPIRE; - else if (strcmp(arg, "maxlocks") == 0) - detect = DB_LOCK_MAXLOCKS; - else if (strcmp(arg, "maxwrites") == 0) - detect = DB_LOCK_MAXWRITE; - else if (strcmp(arg, "minlocks") == 0) - detect = DB_LOCK_MINLOCKS; - else if (strcmp(arg, "minwrites") == 0) - detect = DB_LOCK_MINWRITE; - else if (strcmp(arg, "oldest") == 0) - detect = DB_LOCK_OLDEST; - else if (strcmp(arg, "youngest") == 0) - detect = DB_LOCK_YOUNGEST; - else if (strcmp(arg, "random") == 0) - detect = DB_LOCK_RANDOM; - else { - Tcl_AddErrorInfo(interp, - "lock_detect: illegal policy"); - result = TCL_ERROR; - break; - } - _debug_check(); - ret = (*env)->set_lk_detect(*env, detect); - result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret), - "lock_detect"); - break; - case ENV_LOCK_MAX: - case ENV_LOCK_MAX_LOCKS: - case ENV_LOCK_MAX_LOCKERS: - case ENV_LOCK_MAX_OBJECTS: - if (i >= objc) { - Tcl_WrongNumArgs(interp, 2, objv, - "?-lock_max max?"); - result = TCL_ERROR; - break; - } - result = _GetUInt32(interp, objv[i++], &uintarg); - if (result == TCL_OK) { - _debug_check(); - switch ((enum envopen)optindex) { - case ENV_LOCK_MAX: - ret = (*env)->set_lk_max(*env, - uintarg); - break; - case ENV_LOCK_MAX_LOCKS: - ret = (*env)->set_lk_max_locks(*env, - uintarg); - break; - case ENV_LOCK_MAX_LOCKERS: - ret = (*env)->set_lk_max_lockers(*env, - uintarg); - break; - case ENV_LOCK_MAX_OBJECTS: - ret = (*env)->set_lk_max_objects(*env, - uintarg); - break; - default: - break; - } - result = _ReturnSetup(interp, ret, - DB_RETOK_STD(ret), "lock_max"); - } - break; - case ENV_TXN_TIME: - case ENV_TXN_TIMEOUT: - case ENV_LOCK_TIMEOUT: - if (i >= objc) { - Tcl_WrongNumArgs(interp, 2, objv, - "?-txn_timestamp time?"); - result = TCL_ERROR; - break; - } - - if ((result = Tcl_GetLongFromObj( - interp, objv[i++], &v)) != TCL_OK) - break; - timestamp = (time_t)v; - - _debug_check(); - if ((enum envopen)optindex == ENV_TXN_TIME) - ret = - (*env)->set_tx_timestamp(*env, ×tamp); - else - ret = (*env)->set_timeout(*env, - (db_timeout_t)timestamp, - (enum envopen)optindex == ENV_TXN_TIMEOUT ? - DB_SET_TXN_TIMEOUT : DB_SET_LOCK_TIMEOUT); - result = _ReturnSetup(interp, ret, - DB_RETOK_STD(ret), "txn_timestamp"); - break; - case ENV_LOG: - FLD_SET(open_flags, DB_INIT_LOG | DB_INIT_MPOOL); - FLD_CLR(open_flags, DB_JOINENV); - break; - case ENV_LOG_BUFFER: - if (i >= objc) { - Tcl_WrongNumArgs(interp, 2, objv, - "?-log_buffer size?"); - result = TCL_ERROR; - break; - } - result = _GetUInt32(interp, objv[i++], &uintarg); - if (result == TCL_OK) { - _debug_check(); - ret = (*env)->set_lg_bsize(*env, uintarg); - result = _ReturnSetup(interp, ret, - DB_RETOK_STD(ret), "log_bsize"); - logbufset = 1; - if (logmaxset) { - _debug_check(); - ret = (*env)->set_lg_max(*env, - logmaxset); - result = _ReturnSetup(interp, ret, - DB_RETOK_STD(ret), "log_max"); - logmaxset = 0; - logbufset = 0; - } - } - break; - case ENV_LOG_INMEMORY: - FLD_SET(set_flags, DB_LOG_INMEMORY); - break; - case ENV_LOG_MAX: - if (i >= objc) { - Tcl_WrongNumArgs(interp, 2, objv, - "?-log_max max?"); - result = TCL_ERROR; - break; - } - result = _GetUInt32(interp, objv[i++], &uintarg); - if (result == TCL_OK && logbufset) { - _debug_check(); - ret = (*env)->set_lg_max(*env, uintarg); - result = _ReturnSetup(interp, ret, - DB_RETOK_STD(ret), "log_max"); - logbufset = 0; - } else - logmaxset = uintarg; - break; - case ENV_LOG_REGIONMAX: - if (i >= objc) { - Tcl_WrongNumArgs(interp, 2, objv, - "?-log_regionmax size?"); - result = TCL_ERROR; - break; - } - result = _GetUInt32(interp, objv[i++], &uintarg); - if (result == TCL_OK) { - _debug_check(); - ret = (*env)->set_lg_regionmax(*env, uintarg); - result = - _ReturnSetup(interp, ret, DB_RETOK_STD(ret), - "log_regionmax"); - } - break; - case ENV_LOG_REMOVE: - FLD_SET(set_flags, DB_LOG_AUTOREMOVE); - break; - case ENV_MPOOL_MAX_OPENFD: - if (i >= objc) { - Tcl_WrongNumArgs(interp, 2, objv, - "?-mpool_max_openfd fd_count?"); - result = TCL_ERROR; - break; - } - result = Tcl_GetIntFromObj(interp, objv[i++], &intarg); - if (result == TCL_OK) { - _debug_check(); - ret = (*env)->set_mp_max_openfd(*env, intarg); - result = _ReturnSetup(interp, ret, - DB_RETOK_STD(ret), "mpool_max_openfd"); - } - break; - case ENV_MPOOL_MAX_WRITE: - result = Tcl_ListObjGetElements(interp, objv[i], - &myobjc, &myobjv); - if (result == TCL_OK) - i++; - else - break; - if (myobjc != 2) { - Tcl_WrongNumArgs(interp, 2, objv, - "?-mpool_max_write {nwrite nsleep}?"); - result = TCL_ERROR; - break; - } - result = Tcl_GetIntFromObj(interp, myobjv[0], &intarg); - if (result != TCL_OK) - break; - result = Tcl_GetIntFromObj(interp, myobjv[1], &intarg2); - if (result != TCL_OK) - break; - _debug_check(); - ret = (*env)->set_mp_max_write(*env, intarg, intarg2); - result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret), - "set_mp_max_write"); - break; - case ENV_MPOOL_MMAP_SIZE: - if (i >= objc) { - Tcl_WrongNumArgs(interp, 2, objv, - "?-mpool_mmap_size size?"); - result = TCL_ERROR; - break; - } - result = Tcl_GetIntFromObj(interp, objv[i++], &intarg); - if (result == TCL_OK) { - _debug_check(); - ret = (*env)->set_mp_mmapsize(*env, - (size_t)intarg); - result = _ReturnSetup(interp, ret, - DB_RETOK_STD(ret), "mpool_mmap_size"); - } - break; - case ENV_MPOOL_NOMMAP: - FLD_SET(set_flags, DB_NOMMAP); - break; - case ENV_OVERWRITE: - FLD_SET(set_flags, DB_OVERWRITE); - break; - case ENV_REGION_INIT: - _debug_check(); - ret = (*env)->set_flags(*env, DB_REGION_INIT, 1); - result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret), - "region_init"); - break; - case ENV_SET_INTERMEDIATE_DIR: - if (i >= objc) { - Tcl_WrongNumArgs(interp, - 2, objv, "?-set_intermediate_dir mode?"); - result = TCL_ERROR; - break; - } - result = Tcl_GetIntFromObj(interp, objv[i++], &intarg); - if (result == TCL_OK) { - _debug_check(); - ret = (*env)-> - set_intermediate_dir(*env, intarg, 0); - result = _ReturnSetup(interp, ret, - DB_RETOK_STD(ret), "set_intermediate_dir"); - } - break; - case ENV_REP_CLIENT: - rep_flags = DB_REP_CLIENT; - FLD_SET(open_flags, DB_INIT_REP); - break; - case ENV_REP_MASTER: - rep_flags = DB_REP_MASTER; - FLD_SET(open_flags, DB_INIT_REP); - break; - case ENV_REP_TRANSPORT: - if (i >= objc) { - Tcl_WrongNumArgs(interp, 2, objv, - "-rep_transport {envid sendproc}"); - result = TCL_ERROR; - break; - } - - /* - * Store the objects containing the machine ID - * and the procedure name. We don't need to crack - * the send procedure out now, but we do convert the - * machine ID to an int, since set_rep_transport needs - * it. Even so, it'll be easier later to deal with - * the Tcl_Obj *, so we save that, not the int. - * - * Note that we Tcl_IncrRefCount both objects - * independently; Tcl is free to discard the list - * that they're bundled into. - */ - result = Tcl_ListObjGetElements(interp, objv[i++], - &myobjc, &myobjv); - if (myobjc != 2) { - Tcl_SetResult(interp, - "List must be {envid sendproc}", - TCL_STATIC); - result = TCL_ERROR; - break; - } - - FLD_SET(open_flags, DB_INIT_REP); - /* - * Check that the machine ID is an int. Note that - * we do want to use GetIntFromObj; the machine - * ID is explicitly an int, not a u_int32_t. - */ - ip->i_rep_eid = myobjv[0]; - Tcl_IncrRefCount(ip->i_rep_eid); - result = Tcl_GetIntFromObj(interp, - ip->i_rep_eid, &intarg); - if (result != TCL_OK) - break; - - ip->i_rep_send = myobjv[1]; - Tcl_IncrRefCount(ip->i_rep_send); - _debug_check(); - ret = (*env)->set_rep_transport(*env, - intarg, tcl_rep_send); - result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret), - "set_rep_transport"); - break; - case ENV_THREAD: - /* Enable DB_THREAD when specified in testing. */ - FLD_SET(open_flags, DB_THREAD); - break; - case ENV_TIME_NOTGRANTED: - FLD_SET(set_flags, DB_TIME_NOTGRANTED); - break; - case ENV_VERBOSE: - result = Tcl_ListObjGetElements(interp, objv[i], - &myobjc, &myobjv); - if (result == TCL_OK) - i++; - else - break; - if (myobjc != 2) { - Tcl_WrongNumArgs(interp, 2, objv, - "?-verbose {which on|off}?"); - result = TCL_ERROR; - break; - } - result = tcl_EnvVerbose(interp, *env, - myobjv[0], myobjv[1]); - break; - case ENV_WRNOSYNC: - FLD_SET(set_flags, DB_TXN_WRITE_NOSYNC); - break; -#endif - case ENV_TXN: - FLD_SET(open_flags, DB_INIT_LOCK | - DB_INIT_LOG | DB_INIT_MPOOL | DB_INIT_TXN); - FLD_CLR(open_flags, DB_JOINENV); - /* Make sure we have an arg to check against! */ - if (i < objc) { - arg = Tcl_GetStringFromObj(objv[i], NULL); - if (strcmp(arg, "nosync") == 0) { - FLD_SET(set_flags, DB_TXN_NOSYNC); - i++; - } - } - break; - case ENV_CREATE: - FLD_SET(open_flags, DB_CREATE | DB_INIT_MPOOL); - FLD_CLR(open_flags, DB_JOINENV); - break; - case ENV_ENCRYPT_AES: - /* Make sure we have an arg to check against! */ - if (i >= objc) { - Tcl_WrongNumArgs(interp, 2, objv, - "?-encryptaes passwd?"); - result = TCL_ERROR; - break; - } - passwd = Tcl_GetStringFromObj(objv[i++], NULL); - _debug_check(); - ret = (*env)->set_encrypt(*env, passwd, DB_ENCRYPT_AES); - result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret), - "set_encrypt"); - break; - case ENV_ENCRYPT_ANY: - /* Make sure we have an arg to check against! */ - if (i >= objc) { - Tcl_WrongNumArgs(interp, 2, objv, - "?-encryptany passwd?"); - result = TCL_ERROR; - break; - } - passwd = Tcl_GetStringFromObj(objv[i++], NULL); - _debug_check(); - ret = (*env)->set_encrypt(*env, passwd, 0); - result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret), - "set_encrypt"); - break; - case ENV_HOME: - /* Make sure we have an arg to check against! */ - if (i >= objc) { - Tcl_WrongNumArgs(interp, 2, objv, - "?-home dir?"); - result = TCL_ERROR; - break; - } - home = Tcl_GetStringFromObj(objv[i++], NULL); - break; - case ENV_MODE: - if (i >= objc) { - Tcl_WrongNumArgs(interp, 2, objv, - "?-mode mode?"); - result = TCL_ERROR; - break; - } - /* - * Don't need to check result here because - * if TCL_ERROR, the error message is already - * set up, and we'll bail out below. If ok, - * the mode is set and we go on. - */ - result = Tcl_GetIntFromObj(interp, objv[i++], &mode); - break; - case ENV_PRIVATE: - FLD_SET(open_flags, DB_PRIVATE | DB_INIT_MPOOL); - FLD_CLR(open_flags, DB_JOINENV); - break; - case ENV_RECOVER: - FLD_SET(open_flags, DB_RECOVER); - break; - case ENV_RECOVER_FATAL: - FLD_SET(open_flags, DB_RECOVER_FATAL); - break; - case ENV_SYSTEM_MEM: - FLD_SET(open_flags, DB_SYSTEM_MEM); - break; - case ENV_USE_ENVIRON_ROOT: - FLD_SET(open_flags, DB_USE_ENVIRON_ROOT); - break; - case ENV_USE_ENVIRON: - FLD_SET(open_flags, DB_USE_ENVIRON); - break; - case ENV_CACHESIZE: - result = Tcl_ListObjGetElements(interp, objv[i], - &myobjc, &myobjv); - if (result == TCL_OK) - i++; - else - break; - if (myobjc != 3) { - Tcl_WrongNumArgs(interp, 2, objv, - "?-cachesize {gbytes bytes ncaches}?"); - result = TCL_ERROR; - break; - } - result = _GetUInt32(interp, myobjv[0], &gbytes); - if (result != TCL_OK) - break; - result = _GetUInt32(interp, myobjv[1], &bytes); - if (result != TCL_OK) - break; - result = Tcl_GetIntFromObj(interp, myobjv[2], &ncaches); - if (result != TCL_OK) - break; - _debug_check(); - ret = (*env)->set_cachesize(*env, gbytes, bytes, - ncaches); - result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret), - "set_cachesize"); - break; - case ENV_SHM_KEY: - if (i >= objc) { - Tcl_WrongNumArgs(interp, 2, objv, - "?-shm_key key?"); - result = TCL_ERROR; - break; - } - result = Tcl_GetLongFromObj(interp, objv[i++], &shm); - if (result == TCL_OK) { - _debug_check(); - ret = (*env)->set_shm_key(*env, shm); - result = _ReturnSetup(interp, ret, - DB_RETOK_STD(ret), "shm_key"); - } - break; - case ENV_TXN_MAX: - if (i >= objc) { - Tcl_WrongNumArgs(interp, 2, objv, - "?-txn_max max?"); - result = TCL_ERROR; - break; - } - result = _GetUInt32(interp, objv[i++], &uintarg); - if (result == TCL_OK) { - _debug_check(); - ret = (*env)->set_tx_max(*env, uintarg); - result = _ReturnSetup(interp, ret, - DB_RETOK_STD(ret), "txn_max"); - } - break; - case ENV_ERRFILE: - if (i >= objc) { - Tcl_WrongNumArgs(interp, 2, objv, - "-errfile file"); - result = TCL_ERROR; - break; - } - arg = Tcl_GetStringFromObj(objv[i++], NULL); - tcl_EnvSetErrfile(interp, *env, ip, arg); - break; - case ENV_ERRPFX: - if (i >= objc) { - Tcl_WrongNumArgs(interp, 2, objv, - "-errpfx prefix"); - result = TCL_ERROR; - break; - } - arg = Tcl_GetStringFromObj(objv[i++], NULL); - _debug_check(); - result = tcl_EnvSetErrpfx(interp, *env, ip, arg); - break; - case ENV_DATA_DIR: - if (i >= objc) { - Tcl_WrongNumArgs(interp, 2, objv, - "-data_dir dir"); - result = TCL_ERROR; - break; - } - arg = Tcl_GetStringFromObj(objv[i++], NULL); - _debug_check(); - ret = (*env)->set_data_dir(*env, arg); - result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret), - "set_data_dir"); - break; - case ENV_LOG_DIR: - if (i >= objc) { - Tcl_WrongNumArgs(interp, 2, objv, - "-log_dir dir"); - result = TCL_ERROR; - break; - } - arg = Tcl_GetStringFromObj(objv[i++], NULL); - _debug_check(); - ret = (*env)->set_lg_dir(*env, arg); - result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret), - "set_lg_dir"); - break; - case ENV_TMP_DIR: - if (i >= objc) { - Tcl_WrongNumArgs(interp, 2, objv, - "-tmp_dir dir"); - result = TCL_ERROR; - break; - } - arg = Tcl_GetStringFromObj(objv[i++], NULL); - _debug_check(); - ret = (*env)->set_tmp_dir(*env, arg); - result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret), - "set_tmp_dir"); - break; - } - /* - * If, at any time, parsing the args we get an error, - * bail out and return. - */ - if (result != TCL_OK) - goto error; - } - - /* - * We have to check this here. We want to set the log buffer - * size first, if it is specified. So if the user did so, - * then we took care of it above. But, if we get out here and - * logmaxset is non-zero, then they set the log_max without - * resetting the log buffer size, so we now have to do the - * call to set_lg_max, since we didn't do it above. - */ - if (logmaxset) { - _debug_check(); - ret = (*env)->set_lg_max(*env, (u_int32_t)logmaxset); - result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret), - "log_max"); - } - - if (result != TCL_OK) - goto error; - - if (set_flags) { - ret = (*env)->set_flags(*env, set_flags, 1); - result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret), - "set_flags"); - if (result == TCL_ERROR) - goto error; - /* - * If we are successful, clear the result so that the - * return from set_flags isn't part of the result. - */ - Tcl_ResetResult(interp); - } - /* - * When we get here, we have already parsed all of our args - * and made all our calls to set up the environment. Everything - * is okay so far, no errors, if we get here. - * - * Now open the environment. - */ - _debug_check(); - ret = (*env)->open(*env, home, open_flags, mode); - result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret), "env open"); - - if (rep_flags != 0 && result == TCL_OK) { - _debug_check(); - ret = (*env)->rep_start(*env, NULL, rep_flags); - result = _ReturnSetup(interp, - ret, DB_RETOK_STD(ret), "rep_start"); - } - -error: if (result == TCL_ERROR) { - if (ip->i_err && ip->i_err != stdout && ip->i_err != stderr) { - (void)fclose(ip->i_err); - ip->i_err = NULL; - } - (void)(*env)->close(*env, 0); - *env = NULL; - } - return (result); -} - -/* - * bdb_DbOpen -- - * Implements the "db_create/db_open" command. - * There are many, many options to the open command. - * Here is the general flow: - * - * 0. Preparse args to determine if we have -env. - * 1. Call db_create to create the db handle. - * 2. Parse args tracking options. - * 3. Make any pre-open setup calls necessary. - * 4. Call DB->open to open the database. - * 5. Return db widget handle to user. - */ -static int -bdb_DbOpen(interp, objc, objv, ip, dbp) - Tcl_Interp *interp; /* Interpreter */ - int objc; /* How many arguments? */ - Tcl_Obj *CONST objv[]; /* The argument objects */ - DBTCL_INFO *ip; /* Our internal info */ - DB **dbp; /* DB handle */ -{ - static const char *bdbenvopen[] = { - "-env", NULL - }; - enum bdbenvopen { - TCL_DB_ENV0 - }; - static const char *bdbopen[] = { -#ifdef CONFIG_TEST - "-btcompare", - "-dirty", - "-dupcompare", - "-hashproc", - "-lorder", - "-minkey", - "-nommap", - "-notdurable", - "-revsplitoff", - "-test", - "-thread", -#endif - "-auto_commit", - "-btree", - "-cachesize", - "-chksum", - "-create", - "-delim", - "-dup", - "-dupsort", - "-encrypt", - "-encryptaes", - "-encryptany", - "-env", - "-errfile", - "-errpfx", - "-excl", - "-extent", - "-ffactor", - "-hash", - "-inorder", - "-len", - "-maxsize", - "-mode", - "-nelem", - "-pad", - "-pagesize", - "-queue", - "-rdonly", - "-recno", - "-recnum", - "-renumber", - "-snapshot", - "-source", - "-truncate", - "-txn", - "-unknown", - "--", - NULL - }; - enum bdbopen { -#ifdef CONFIG_TEST - TCL_DB_BTCOMPARE, - TCL_DB_DIRTY, - TCL_DB_DUPCOMPARE, - TCL_DB_HASHPROC, - TCL_DB_LORDER, - TCL_DB_MINKEY, - TCL_DB_NOMMAP, - TCL_DB_NOTDURABLE, - TCL_DB_REVSPLIT, - TCL_DB_TEST, - TCL_DB_THREAD, -#endif - TCL_DB_AUTO_COMMIT, - TCL_DB_BTREE, - TCL_DB_CACHESIZE, - TCL_DB_CHKSUM, - TCL_DB_CREATE, - TCL_DB_DELIM, - TCL_DB_DUP, - TCL_DB_DUPSORT, - TCL_DB_ENCRYPT, - TCL_DB_ENCRYPT_AES, - TCL_DB_ENCRYPT_ANY, - TCL_DB_ENV, - TCL_DB_ERRFILE, - TCL_DB_ERRPFX, - TCL_DB_EXCL, - TCL_DB_EXTENT, - TCL_DB_FFACTOR, - TCL_DB_HASH, - TCL_DB_INORDER, - TCL_DB_LEN, - TCL_DB_MAXSIZE, - TCL_DB_MODE, - TCL_DB_NELEM, - TCL_DB_PAD, - TCL_DB_PAGESIZE, - TCL_DB_QUEUE, - TCL_DB_RDONLY, - TCL_DB_RECNO, - TCL_DB_RECNUM, - TCL_DB_RENUMBER, - TCL_DB_SNAPSHOT, - TCL_DB_SOURCE, - TCL_DB_TRUNCATE, - TCL_DB_TXN, - TCL_DB_UNKNOWN, - TCL_DB_ENDARG - }; - - DBTCL_INFO *envip, *errip; - DB_TXN *txn; - DBTYPE type; - DB_ENV *envp; - Tcl_Obj **myobjv; - u_int32_t gbytes, bytes, open_flags, set_flags, uintarg; - int endarg, i, intarg, mode, myobjc, ncaches; - int optindex, result, ret, set_err, set_pfx, subdblen; - u_char *subdbtmp; - char *arg, *db, *passwd, *subdb, msg[MSG_SIZE]; - - type = DB_UNKNOWN; - endarg = mode = set_err = set_flags = set_pfx = 0; - result = TCL_OK; - subdbtmp = NULL; - db = subdb = NULL; - - /* - * XXX - * If/when our Tcl interface becomes thread-safe, we should enable - * DB_THREAD here in all cases. For now, we turn it on later in this - * function, and only when we're in testing and we specify the - * -thread flag, so that we can exercise MUTEX_THREAD_LOCK cases. - * - * In order to become truly thread-safe, we need to look at making sure - * DBTCL_INFO structs are safe to share across threads (they're not - * mutex-protected) before we declare the Tcl interface thread-safe. - * Meanwhile, there's no strong reason to enable DB_THREAD when not - * testing. - */ - open_flags = 0; - - envp = NULL; - txn = NULL; - - if (objc < 2) { - Tcl_WrongNumArgs(interp, 2, objv, "?args?"); - return (TCL_ERROR); - } - - /* - * We must first parse for the environment flag, since that - * is needed for db_create. Then create the db handle. - */ - i = 2; - while (i < objc) { - if (Tcl_GetIndexFromObj(interp, objv[i++], bdbenvopen, - "option", TCL_EXACT, &optindex) != TCL_OK) { - /* - * Reset the result so we don't get - * an errant error message if there is another error. - */ - Tcl_ResetResult(interp); - continue; - } - switch ((enum bdbenvopen)optindex) { - case TCL_DB_ENV0: - arg = Tcl_GetStringFromObj(objv[i], NULL); - envp = NAME_TO_ENV(arg); - if (envp == NULL) { - Tcl_SetResult(interp, - "db open: illegal environment", TCL_STATIC); - return (TCL_ERROR); - } - } - break; - } - - /* - * Create the db handle before parsing the args - * since we'll be modifying the database options as we parse. - */ - ret = db_create(dbp, envp, 0); - if (ret) - return (_ReturnSetup(interp, ret, DB_RETOK_STD(ret), - "db_create")); - - /* Hang our info pointer on the DB handle, so we can do callbacks. */ - (*dbp)->api_internal = ip; - - /* - * XXX Remove restriction when err stuff is not tied to env. - * - * The DB->set_err* functions actually overwrite in the - * environment. So, if we are explicitly using an env, - * don't overwrite what we have already set up. If we are - * not using one, then we set up since we get a private - * default env. - */ - /* XXX - remove this conditional if/when err is not tied to env */ - if (envp == NULL) { - (*dbp)->set_errpfx((*dbp), ip->i_name); - (*dbp)->set_errcall((*dbp), _ErrorFunc); - } - envip = _PtrToInfo(envp); /* XXX */ - /* - * If we are using an env, we keep track of err info in the env's ip. - * Otherwise use the DB's ip. - */ - if (envip) - errip = envip; - else - errip = ip; - /* - * Get the option name index from the object based on the args - * defined above. - */ - i = 2; - while (i < objc) { - Tcl_ResetResult(interp); - if (Tcl_GetIndexFromObj(interp, objv[i], bdbopen, "option", - TCL_EXACT, &optindex) != TCL_OK) { - arg = Tcl_GetStringFromObj(objv[i], NULL); - if (arg[0] == '-') { - result = IS_HELP(objv[i]); - goto error; - } else - Tcl_ResetResult(interp); - break; - } - i++; - switch ((enum bdbopen)optindex) { -#ifdef CONFIG_TEST - case TCL_DB_BTCOMPARE: - if (i >= objc) { - Tcl_WrongNumArgs(interp, 2, objv, - "-btcompare compareproc"); - result = TCL_ERROR; - break; - } - - /* - * Store the object containing the procedure name. - * We don't need to crack it out now--we'll want - * to bundle it up to pass into Tcl_EvalObjv anyway. - * Tcl's object refcounting will--I hope--take care - * of the memory management here. - */ - ip->i_btcompare = objv[i++]; - Tcl_IncrRefCount(ip->i_btcompare); - _debug_check(); - ret = (*dbp)->set_bt_compare(*dbp, tcl_bt_compare); - result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret), - "set_bt_compare"); - break; - case TCL_DB_DIRTY: - open_flags |= DB_DIRTY_READ; - break; - case TCL_DB_DUPCOMPARE: - if (i >= objc) { - Tcl_WrongNumArgs(interp, 2, objv, - "-dupcompare compareproc"); - result = TCL_ERROR; - break; - } - - /* - * Store the object containing the procedure name. - * See TCL_DB_BTCOMPARE. - */ - ip->i_dupcompare = objv[i++]; - Tcl_IncrRefCount(ip->i_dupcompare); - _debug_check(); - ret = (*dbp)->set_dup_compare(*dbp, tcl_dup_compare); - result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret), - "set_dup_compare"); - break; - case TCL_DB_HASHPROC: - if (i >= objc) { - Tcl_WrongNumArgs(interp, 2, objv, - "-hashproc hashproc"); - result = TCL_ERROR; - break; - } - - /* - * Store the object containing the procedure name. - * See TCL_DB_BTCOMPARE. - */ - ip->i_hashproc = objv[i++]; - Tcl_IncrRefCount(ip->i_hashproc); - _debug_check(); - ret = (*dbp)->set_h_hash(*dbp, tcl_h_hash); - result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret), - "set_h_hash"); - break; - case TCL_DB_LORDER: - if (i >= objc) { - Tcl_WrongNumArgs(interp, 2, objv, - "-lorder 1234|4321"); - result = TCL_ERROR; - break; - } - result = Tcl_GetIntFromObj(interp, objv[i++], &intarg); - if (result == TCL_OK) { - _debug_check(); - ret = (*dbp)->set_lorder(*dbp, intarg); - result = _ReturnSetup(interp, ret, - DB_RETOK_STD(ret), "set_lorder"); - } - break; - case TCL_DB_MINKEY: - if (i >= objc) { - Tcl_WrongNumArgs(interp, 2, objv, - "-minkey minkey"); - result = TCL_ERROR; - break; - } - result = _GetUInt32(interp, objv[i++], &uintarg); - if (result == TCL_OK) { - _debug_check(); - ret = (*dbp)->set_bt_minkey(*dbp, uintarg); - result = _ReturnSetup(interp, ret, - DB_RETOK_STD(ret), "set_bt_minkey"); - } - break; - case TCL_DB_NOMMAP: - open_flags |= DB_NOMMAP; - break; - case TCL_DB_NOTDURABLE: - set_flags |= DB_TXN_NOT_DURABLE; - break; - case TCL_DB_REVSPLIT: - set_flags |= DB_REVSPLITOFF; - break; - case TCL_DB_TEST: - ret = (*dbp)->set_h_hash(*dbp, __ham_test); - result = _ReturnSetup(interp, ret, - DB_RETOK_STD(ret), "set_h_hash"); - break; - case TCL_DB_THREAD: - /* Enable DB_THREAD when specified in testing. */ - open_flags |= DB_THREAD; - break; -#endif - case TCL_DB_AUTO_COMMIT: - open_flags |= DB_AUTO_COMMIT; - break; - case TCL_DB_ENV: - /* - * Already parsed this, skip it and the env pointer. - */ - i++; - continue; - case TCL_DB_TXN: - if (i > (objc - 1)) { - Tcl_WrongNumArgs(interp, 2, objv, "?-txn id?"); - result = TCL_ERROR; - break; - } - arg = Tcl_GetStringFromObj(objv[i++], NULL); - txn = NAME_TO_TXN(arg); - if (txn == NULL) { - snprintf(msg, MSG_SIZE, - "Open: Invalid txn: %s\n", arg); - Tcl_SetResult(interp, msg, TCL_VOLATILE); - result = TCL_ERROR; - } - break; - case TCL_DB_BTREE: - if (type != DB_UNKNOWN) { - Tcl_SetResult(interp, - "Too many DB types specified", TCL_STATIC); - result = TCL_ERROR; - goto error; - } - type = DB_BTREE; - break; - case TCL_DB_HASH: - if (type != DB_UNKNOWN) { - Tcl_SetResult(interp, - "Too many DB types specified", TCL_STATIC); - result = TCL_ERROR; - goto error; - } - type = DB_HASH; - break; - case TCL_DB_RECNO: - if (type != DB_UNKNOWN) { - Tcl_SetResult(interp, - "Too many DB types specified", TCL_STATIC); - result = TCL_ERROR; - goto error; - } - type = DB_RECNO; - break; - case TCL_DB_QUEUE: - if (type != DB_UNKNOWN) { - Tcl_SetResult(interp, - "Too many DB types specified", TCL_STATIC); - result = TCL_ERROR; - goto error; - } - type = DB_QUEUE; - break; - case TCL_DB_UNKNOWN: - if (type != DB_UNKNOWN) { - Tcl_SetResult(interp, - "Too many DB types specified", TCL_STATIC); - result = TCL_ERROR; - goto error; - } - break; - case TCL_DB_CREATE: - open_flags |= DB_CREATE; - break; - case TCL_DB_EXCL: - open_flags |= DB_EXCL; - break; - case TCL_DB_RDONLY: - open_flags |= DB_RDONLY; - break; - case TCL_DB_TRUNCATE: - open_flags |= DB_TRUNCATE; - break; - case TCL_DB_MODE: - if (i >= objc) { - Tcl_WrongNumArgs(interp, 2, objv, - "?-mode mode?"); - result = TCL_ERROR; - break; - } - /* - * Don't need to check result here because - * if TCL_ERROR, the error message is already - * set up, and we'll bail out below. If ok, - * the mode is set and we go on. - */ - result = Tcl_GetIntFromObj(interp, objv[i++], &mode); - break; - case TCL_DB_DUP: - set_flags |= DB_DUP; - break; - case TCL_DB_DUPSORT: - set_flags |= DB_DUPSORT; - break; - case TCL_DB_INORDER: - set_flags |= DB_INORDER; - break; - case TCL_DB_RECNUM: - set_flags |= DB_RECNUM; - break; - case TCL_DB_RENUMBER: - set_flags |= DB_RENUMBER; - break; - case TCL_DB_SNAPSHOT: - set_flags |= DB_SNAPSHOT; - break; - case TCL_DB_CHKSUM: - set_flags |= DB_CHKSUM; - break; - case TCL_DB_ENCRYPT: - set_flags |= DB_ENCRYPT; - break; - case TCL_DB_ENCRYPT_AES: - /* Make sure we have an arg to check against! */ - if (i >= objc) { - Tcl_WrongNumArgs(interp, 2, objv, - "?-encryptaes passwd?"); - result = TCL_ERROR; - break; - } - passwd = Tcl_GetStringFromObj(objv[i++], NULL); - _debug_check(); - ret = (*dbp)->set_encrypt(*dbp, passwd, DB_ENCRYPT_AES); - result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret), - "set_encrypt"); - break; - case TCL_DB_ENCRYPT_ANY: - /* Make sure we have an arg to check against! */ - if (i >= objc) { - Tcl_WrongNumArgs(interp, 2, objv, - "?-encryptany passwd?"); - result = TCL_ERROR; - break; - } - passwd = Tcl_GetStringFromObj(objv[i++], NULL); - _debug_check(); - ret = (*dbp)->set_encrypt(*dbp, passwd, 0); - result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret), - "set_encrypt"); - break; - case TCL_DB_FFACTOR: - if (i >= objc) { - Tcl_WrongNumArgs(interp, 2, objv, - "-ffactor density"); - result = TCL_ERROR; - break; - } - result = _GetUInt32(interp, objv[i++], &uintarg); - if (result == TCL_OK) { - _debug_check(); - ret = (*dbp)->set_h_ffactor(*dbp, uintarg); - result = _ReturnSetup(interp, ret, - DB_RETOK_STD(ret), "set_h_ffactor"); - } - break; - case TCL_DB_NELEM: - if (i >= objc) { - Tcl_WrongNumArgs(interp, 2, objv, - "-nelem nelem"); - result = TCL_ERROR; - break; - } - result = _GetUInt32(interp, objv[i++], &uintarg); - if (result == TCL_OK) { - _debug_check(); - ret = (*dbp)->set_h_nelem(*dbp, uintarg); - result = _ReturnSetup(interp, ret, - DB_RETOK_STD(ret), "set_h_nelem"); - } - break; - case TCL_DB_DELIM: - if (i >= objc) { - Tcl_WrongNumArgs(interp, 2, objv, - "-delim delim"); - result = TCL_ERROR; - break; - } - result = Tcl_GetIntFromObj(interp, objv[i++], &intarg); - if (result == TCL_OK) { - _debug_check(); - ret = (*dbp)->set_re_delim(*dbp, intarg); - result = _ReturnSetup(interp, ret, - DB_RETOK_STD(ret), "set_re_delim"); - } - break; - case TCL_DB_LEN: - if (i >= objc) { - Tcl_WrongNumArgs(interp, 2, objv, - "-len length"); - result = TCL_ERROR; - break; - } - result = _GetUInt32(interp, objv[i++], &uintarg); - if (result == TCL_OK) { - _debug_check(); - ret = (*dbp)->set_re_len(*dbp, uintarg); - result = _ReturnSetup(interp, ret, - DB_RETOK_STD(ret), "set_re_len"); - } - break; - case TCL_DB_MAXSIZE: - if (i >= objc) { - Tcl_WrongNumArgs(interp, 2, objv, - "-len length"); - result = TCL_ERROR; - break; - } - result = _GetUInt32(interp, objv[i++], &uintarg); - if (result == TCL_OK) { - _debug_check(); - ret = (*dbp)->mpf->set_maxsize( - (*dbp)->mpf, 0, uintarg); - result = _ReturnSetup(interp, ret, - DB_RETOK_STD(ret), "set_re_len"); - } - break; - case TCL_DB_PAD: - if (i >= objc) { - Tcl_WrongNumArgs(interp, 2, objv, - "-pad pad"); - result = TCL_ERROR; - break; - } - result = Tcl_GetIntFromObj(interp, objv[i++], &intarg); - if (result == TCL_OK) { - _debug_check(); - ret = (*dbp)->set_re_pad(*dbp, intarg); - result = _ReturnSetup(interp, ret, - DB_RETOK_STD(ret), "set_re_pad"); - } - break; - case TCL_DB_SOURCE: - if (i >= objc) { - Tcl_WrongNumArgs(interp, 2, objv, - "-source file"); - result = TCL_ERROR; - break; - } - arg = Tcl_GetStringFromObj(objv[i++], NULL); - _debug_check(); - ret = (*dbp)->set_re_source(*dbp, arg); - result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret), - "set_re_source"); - break; - case TCL_DB_EXTENT: - if (i >= objc) { - Tcl_WrongNumArgs(interp, 2, objv, - "-extent size"); - result = TCL_ERROR; - break; - } - result = _GetUInt32(interp, objv[i++], &uintarg); - if (result == TCL_OK) { - _debug_check(); - ret = (*dbp)->set_q_extentsize(*dbp, uintarg); - result = _ReturnSetup(interp, ret, - DB_RETOK_STD(ret), "set_q_extentsize"); - } - break; - case TCL_DB_CACHESIZE: - result = Tcl_ListObjGetElements(interp, objv[i++], - &myobjc, &myobjv); - if (result != TCL_OK) - break; - if (myobjc != 3) { - Tcl_WrongNumArgs(interp, 2, objv, - "?-cachesize {gbytes bytes ncaches}?"); - result = TCL_ERROR; - break; - } - result = _GetUInt32(interp, myobjv[0], &gbytes); - if (result != TCL_OK) - break; - result = _GetUInt32(interp, myobjv[1], &bytes); - if (result != TCL_OK) - break; - result = Tcl_GetIntFromObj(interp, myobjv[2], &ncaches); - if (result != TCL_OK) - break; - _debug_check(); - ret = (*dbp)->set_cachesize(*dbp, gbytes, bytes, - ncaches); - result = _ReturnSetup(interp, ret, - DB_RETOK_STD(ret), "set_cachesize"); - break; - case TCL_DB_PAGESIZE: - if (i >= objc) { - Tcl_WrongNumArgs(interp, 2, objv, - "?-pagesize size?"); - result = TCL_ERROR; - break; - } - result = Tcl_GetIntFromObj(interp, objv[i++], &intarg); - if (result == TCL_OK) { - _debug_check(); - ret = (*dbp)->set_pagesize(*dbp, - (size_t)intarg); - result = _ReturnSetup(interp, ret, - DB_RETOK_STD(ret), "set pagesize"); - } - break; - case TCL_DB_ERRFILE: - if (i >= objc) { - Tcl_WrongNumArgs(interp, 2, objv, - "-errfile file"); - result = TCL_ERROR; - break; - } - arg = Tcl_GetStringFromObj(objv[i++], NULL); - /* - * If the user already set one, close it. - */ - if (errip->i_err != NULL && - errip->i_err != stdout && errip->i_err != stderr) - (void)fclose(errip->i_err); - if (strcmp(arg, "/dev/stdout") == 0) - errip->i_err = stdout; - else if (strcmp(arg, "/dev/stderr") == 0) - errip->i_err = stderr; - else - errip->i_err = fopen(arg, "a"); - if (errip->i_err != NULL) { - _debug_check(); - (*dbp)->set_errfile(*dbp, errip->i_err); - set_err = 1; - } - break; - case TCL_DB_ERRPFX: - if (i >= objc) { - Tcl_WrongNumArgs(interp, 2, objv, - "-errpfx prefix"); - result = TCL_ERROR; - break; - } - arg = Tcl_GetStringFromObj(objv[i++], NULL); - /* - * If the user already set one, free it. - */ - if (errip->i_errpfx != NULL) - __os_free(NULL, errip->i_errpfx); - if ((ret = __os_strdup((*dbp)->dbenv, - arg, &errip->i_errpfx)) != 0) { - result = _ReturnSetup(interp, ret, - DB_RETOK_STD(ret), "__os_strdup"); - break; - } - if (errip->i_errpfx != NULL) { - _debug_check(); - (*dbp)->set_errpfx(*dbp, errip->i_errpfx); - set_pfx = 1; - } - break; - case TCL_DB_ENDARG: - endarg = 1; - break; - } /* switch */ - - /* - * If, at any time, parsing the args we get an error, - * bail out and return. - */ - if (result != TCL_OK) - goto error; - if (endarg) - break; - } - if (result != TCL_OK) - goto error; - - /* - * Any args we have left, (better be 0, 1 or 2 left) are - * file names. If we have 0, then an in-memory db. If - * there is 1, a db name, if 2 a db and subdb name. - */ - if (i != objc) { - /* - * Dbs must be NULL terminated file names, but subdbs can - * be anything. Use Strings for the db name and byte - * arrays for the subdb. - */ - db = Tcl_GetStringFromObj(objv[i++], NULL); - if (i != objc) { - subdbtmp = - Tcl_GetByteArrayFromObj(objv[i++], &subdblen); - if ((ret = __os_malloc(envp, - (size_t)subdblen + 1, &subdb)) != 0) { - Tcl_SetResult(interp, db_strerror(ret), - TCL_STATIC); - return (0); - } - memcpy(subdb, subdbtmp, (size_t)subdblen); - subdb[subdblen] = '\0'; - } - } - if (set_flags) { - ret = (*dbp)->set_flags(*dbp, set_flags); - result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret), - "set_flags"); - if (result == TCL_ERROR) - goto error; - /* - * If we are successful, clear the result so that the - * return from set_flags isn't part of the result. - */ - Tcl_ResetResult(interp); - } - - /* - * When we get here, we have already parsed all of our args and made - * all our calls to set up the database. Everything is okay so far, - * no errors, if we get here. - */ - _debug_check(); - - /* Open the database. */ - ret = (*dbp)->open(*dbp, txn, db, subdb, type, open_flags, mode); - result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret), "db open"); - -error: - if (subdb) - __os_free(envp, subdb); - if (result == TCL_ERROR) { - (void)(*dbp)->close(*dbp, 0); - /* - * If we opened and set up the error file in the environment - * on this open, but we failed for some other reason, clean - * up and close the file. - * - * XXX when err stuff isn't tied to env, change to use ip, - * instead of envip. Also, set_err is irrelevant when that - * happens. It will just read: - * if (ip->i_err) - * fclose(ip->i_err); - */ - if (set_err && errip && errip->i_err != NULL && - errip->i_err != stdout && errip->i_err != stderr) { - (void)fclose(errip->i_err); - errip->i_err = NULL; - } - if (set_pfx && errip && errip->i_errpfx != NULL) { - __os_free(envp, errip->i_errpfx); - errip->i_errpfx = NULL; - } - *dbp = NULL; - } - return (result); -} - -#ifdef HAVE_SEQUENCE -/* - * bdb_SeqOpen -- - * Implements the "Seq_create/Seq_open" command. - */ -static int -bdb_SeqOpen(interp, objc, objv, ip, seqp) - Tcl_Interp *interp; /* Interpreter */ - int objc; /* How many arguments? */ - Tcl_Obj *CONST objv[]; /* The argument objects */ - DBTCL_INFO *ip; /* Our internal info */ - DB_SEQUENCE **seqp; /* DB_SEQUENCE handle */ -{ - static const char *seqopen[] = { - "-auto_commit", - "-cachesize", - "-create", - "-inc", - "-init", - "-dec", - "-max", - "-min", - "-txn", - "-wrap", - "--", - NULL - } ; - enum seqopen { - TCL_SEQ_AUTO_COMMIT, - TCL_SEQ_CACHESIZE, - TCL_SEQ_CREATE, - TCL_SEQ_INC, - TCL_SEQ_INIT, - TCL_SEQ_DEC, - TCL_SEQ_MAX, - TCL_SEQ_MIN, - TCL_SEQ_TXN, - TCL_SEQ_WRAP, - TCL_SEQ_ENDARG - }; - DB *dbp; - DBT key; - DBTYPE type; - DB_TXN *txn; - db_recno_t recno; - db_seq_t min, max, value; - u_int32_t flags, oflags; - int cache, endarg, i, optindex, result, ret, setrange, setvalue, v; - char *arg, *db, msg[MSG_SIZE]; - - COMPQUIET(ip, NULL); - COMPQUIET(value, 0); - - if (objc < 2) { - Tcl_WrongNumArgs(interp, 2, objv, "?args?"); - return (TCL_ERROR); - } - - txn = NULL; - endarg = 0; - flags = oflags = 0; - setrange = setvalue = 0; - min = INT64_MIN; - max = INT64_MAX; - cache = 0; - - for (i = 2; i < objc;) { - Tcl_ResetResult(interp); - if (Tcl_GetIndexFromObj(interp, objv[i], seqopen, "option", - TCL_EXACT, &optindex) != TCL_OK) { - arg = Tcl_GetStringFromObj(objv[i], NULL); - if (arg[0] == '-') { - result = IS_HELP(objv[i]); - goto error; - } else - Tcl_ResetResult(interp); - break; - } - i++; - result = TCL_OK; - switch ((enum seqopen)optindex) { - case TCL_SEQ_AUTO_COMMIT: - oflags |= DB_AUTO_COMMIT; - break; - case TCL_SEQ_CREATE: - oflags |= DB_CREATE; - break; - case TCL_SEQ_INC: - LF_SET(DB_SEQ_INC); - break; - case TCL_SEQ_CACHESIZE: - if (i >= objc) { - Tcl_WrongNumArgs(interp, 2, objv, - "?-cachesize value?"); - result = TCL_ERROR; - break; - } - result = Tcl_GetIntFromObj(interp, objv[i++], &cache); - break; - case TCL_SEQ_INIT: - if (i >= objc) { - Tcl_WrongNumArgs(interp, 2, objv, - "?-init value?"); - result = TCL_ERROR; - break; - } - result = - Tcl_GetWideIntFromObj(interp, objv[i++], &value); - setvalue = 1; - break; - case TCL_SEQ_DEC: - LF_SET(DB_SEQ_DEC); - break; - case TCL_SEQ_MAX: - if (i >= objc) { - Tcl_WrongNumArgs(interp, 2, objv, - "?-max value?"); - result = TCL_ERROR; - break; - } - if ((result = - Tcl_GetWideIntFromObj(interp, - objv[i++], &max)) != TCL_OK) - goto error; - setrange = 1; - break; - case TCL_SEQ_MIN: - if (i >= objc) { - Tcl_WrongNumArgs(interp, 2, objv, - "?-min value?"); - result = TCL_ERROR; - break; - } - if ((result = - Tcl_GetWideIntFromObj(interp, - objv[i++], &min)) != TCL_OK) - goto error; - setrange = 1; - break; - case TCL_SEQ_TXN: - if (i > (objc - 1)) { - Tcl_WrongNumArgs(interp, 2, objv, "?-txn id?"); - result = TCL_ERROR; - break; - } - arg = Tcl_GetStringFromObj(objv[i++], NULL); - txn = NAME_TO_TXN(arg); - if (txn == NULL) { - snprintf(msg, MSG_SIZE, - "Sequence: Invalid txn: %s\n", arg); - Tcl_SetResult(interp, msg, TCL_VOLATILE); - result = TCL_ERROR; - } - break; - case TCL_SEQ_WRAP: - LF_SET(DB_SEQ_WRAP); - break; - case TCL_SEQ_ENDARG: - endarg = 1; - break; - } - /* - * If, at any time, parsing the args we get an error, - * bail out and return. - */ - if (result != TCL_OK) - goto error; - if (endarg) - break; - } - - if (objc - i != 2) { - Tcl_WrongNumArgs(interp, 2, objv, "?args?"); - return (TCL_ERROR); - } - /* - * The db must be a string but the sequence key may - * be anything. - */ - db = Tcl_GetStringFromObj(objv[i++], NULL); - if ((dbp = NAME_TO_DB(db)) == NULL) { - Tcl_SetResult(interp, "No such dbp", TCL_STATIC); - return (TCL_ERROR); - } - (void)dbp->get_type(dbp, &type); - - memset(&key, 0, sizeof(key)); - if (type == DB_QUEUE || type == DB_RECNO) { - result = _GetUInt32(interp, objv[i++], &recno); - if (result != TCL_OK) - return (result); - key.data = &recno; - key.size = sizeof(recno); - } else { - key.data = Tcl_GetByteArrayFromObj(objv[i++], &v); - key.size = (u_int32_t)v; - } - ret = db_sequence_create(seqp, dbp, 0); - if ((result = _ReturnSetup(interp, - ret, DB_RETOK_STD(ret), "sequence create")) != TCL_OK) { - *seqp = NULL; - return (result); - } - - ret = (*seqp)->set_flags(*seqp, flags); - if ((result = _ReturnSetup(interp, - ret, DB_RETOK_STD(ret), "sequence set_flags")) != TCL_OK) - goto error; - if (setrange) { - ret = (*seqp)->set_range(*seqp, min, max); - if ((result = _ReturnSetup(interp, - ret, DB_RETOK_STD(ret), "sequence set_range")) != TCL_OK) - goto error; - } - if (cache) { - ret = (*seqp)->set_cachesize(*seqp, cache); - if ((result = _ReturnSetup(interp, - ret, DB_RETOK_STD(ret), "sequence cachesize")) != TCL_OK) - goto error; - } - if (setvalue) { - ret = (*seqp)->initial_value(*seqp, value); - if ((result = _ReturnSetup(interp, - ret, DB_RETOK_STD(ret), "sequence init")) != TCL_OK) - goto error; - } - ret = (*seqp)->open(*seqp, txn, &key, oflags); - if ((result = _ReturnSetup(interp, - ret, DB_RETOK_STD(ret), "sequence open")) != TCL_OK) - goto error; - - if (0) { -error: (void)(*seqp)->close(*seqp, 0); - *seqp = NULL; - } - return (result); -} -#endif - -/* - * bdb_DbRemove -- - * Implements the DB_ENV->remove and DB->remove command. - */ -static int -bdb_DbRemove(interp, objc, objv) - Tcl_Interp *interp; /* Interpreter */ - int objc; /* How many arguments? */ - Tcl_Obj *CONST objv[]; /* The argument objects */ -{ - static const char *bdbrem[] = { - "-auto_commit", - "-encrypt", - "-encryptaes", - "-encryptany", - "-env", - "-txn", - "--", - NULL - }; - enum bdbrem { - TCL_DBREM_AUTOCOMMIT, - TCL_DBREM_ENCRYPT, - TCL_DBREM_ENCRYPT_AES, - TCL_DBREM_ENCRYPT_ANY, - TCL_DBREM_ENV, - TCL_DBREM_TXN, - TCL_DBREM_ENDARG - }; - DB *dbp; - DB_ENV *envp; - DB_TXN *txn; - int endarg, i, optindex, result, ret, subdblen; - u_int32_t enc_flag, iflags, set_flags; - u_char *subdbtmp; - char *arg, *db, msg[MSG_SIZE], *passwd, *subdb; - - db = subdb = NULL; - dbp = NULL; - endarg = 0; - envp = NULL; - iflags = enc_flag = set_flags = 0; - passwd = NULL; - result = TCL_OK; - subdbtmp = NULL; - txn = NULL; - - if (objc < 2) { - Tcl_WrongNumArgs(interp, 2, objv, "?args? filename ?database?"); - return (TCL_ERROR); - } - - /* - * We must first parse for the environment flag, since that - * is needed for db_create. Then create the db handle. - */ - i = 2; - while (i < objc) { - if (Tcl_GetIndexFromObj(interp, objv[i], bdbrem, - "option", TCL_EXACT, &optindex) != TCL_OK) { - arg = Tcl_GetStringFromObj(objv[i], NULL); - if (arg[0] == '-') { - result = IS_HELP(objv[i]); - goto error; - } else - Tcl_ResetResult(interp); - break; - } - i++; - switch ((enum bdbrem)optindex) { - case TCL_DBREM_AUTOCOMMIT: - iflags |= DB_AUTO_COMMIT; - _debug_check(); - break; - case TCL_DBREM_ENCRYPT: - set_flags |= DB_ENCRYPT; - _debug_check(); - break; - case TCL_DBREM_ENCRYPT_AES: - /* Make sure we have an arg to check against! */ - if (i >= objc) { - Tcl_WrongNumArgs(interp, 2, objv, - "?-encryptaes passwd?"); - result = TCL_ERROR; - break; - } - passwd = Tcl_GetStringFromObj(objv[i++], NULL); - enc_flag = DB_ENCRYPT_AES; - break; - case TCL_DBREM_ENCRYPT_ANY: - /* Make sure we have an arg to check against! */ - if (i >= objc) { - Tcl_WrongNumArgs(interp, 2, objv, - "?-encryptany passwd?"); - result = TCL_ERROR; - break; - } - passwd = Tcl_GetStringFromObj(objv[i++], NULL); - enc_flag = 0; - break; - case TCL_DBREM_ENV: - arg = Tcl_GetStringFromObj(objv[i++], NULL); - envp = NAME_TO_ENV(arg); - if (envp == NULL) { - Tcl_SetResult(interp, - "db remove: illegal environment", - TCL_STATIC); - return (TCL_ERROR); - } - break; - case TCL_DBREM_ENDARG: - endarg = 1; - break; - case TCL_DBREM_TXN: - if (i >= objc) { - Tcl_WrongNumArgs(interp, 2, objv, "?-txn id?"); - result = TCL_ERROR; - break; - } - arg = Tcl_GetStringFromObj(objv[i++], NULL); - txn = NAME_TO_TXN(arg); - if (txn == NULL) { - snprintf(msg, MSG_SIZE, - "Put: Invalid txn: %s\n", arg); - Tcl_SetResult(interp, msg, TCL_VOLATILE); - result = TCL_ERROR; - } - break; - } - /* - * If, at any time, parsing the args we get an error, - * bail out and return. - */ - if (result != TCL_OK) - goto error; - if (endarg) - break; - } - if (result != TCL_OK) - goto error; - /* - * Any args we have left, (better be 1 or 2 left) are - * file names. If there is 1, a db name, if 2 a db and subdb name. - */ - if ((i != (objc - 1)) || (i != (objc - 2))) { - /* - * Dbs must be NULL terminated file names, but subdbs can - * be anything. Use Strings for the db name and byte - * arrays for the subdb. - */ - db = Tcl_GetStringFromObj(objv[i++], NULL); - if (i != objc) { - subdbtmp = - Tcl_GetByteArrayFromObj(objv[i++], &subdblen); - if ((ret = __os_malloc(envp, (size_t)subdblen + 1, - &subdb)) != 0) { Tcl_SetResult(interp, - db_strerror(ret), TCL_STATIC); - return (0); - } - memcpy(subdb, subdbtmp, (size_t)subdblen); - subdb[subdblen] = '\0'; - } - } else { - Tcl_WrongNumArgs(interp, 2, objv, "?args? filename ?database?"); - result = TCL_ERROR; - goto error; - } - if (envp == NULL) { - ret = db_create(&dbp, envp, 0); - if (ret) { - result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret), - "db_create"); - goto error; - } - - if (passwd != NULL) { - ret = dbp->set_encrypt(dbp, passwd, enc_flag); - result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret), - "set_encrypt"); - } - if (set_flags != 0) { - ret = dbp->set_flags(dbp, set_flags); - result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret), - "set_flags"); - } - } - - /* - * The dbremove method is a destructor, NULL out the dbp. - */ - _debug_check(); - if (dbp == NULL) - ret = envp->dbremove(envp, txn, db, subdb, iflags); - else - ret = dbp->remove(dbp, db, subdb, 0); - - result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret), "db remove"); - dbp = NULL; -error: - if (subdb) - __os_free(envp, subdb); - if (result == TCL_ERROR && dbp != NULL) - (void)dbp->close(dbp, 0); - return (result); -} - -/* - * bdb_DbRename -- - * Implements the DB_ENV->dbrename and DB->rename commands. - */ -static int -bdb_DbRename(interp, objc, objv) - Tcl_Interp *interp; /* Interpreter */ - int objc; /* How many arguments? */ - Tcl_Obj *CONST objv[]; /* The argument objects */ -{ - static const char *bdbmv[] = { - "-auto_commit", - "-encrypt", - "-encryptaes", - "-encryptany", - "-env", - "-txn", - "--", - NULL - }; - enum bdbmv { - TCL_DBMV_AUTOCOMMIT, - TCL_DBMV_ENCRYPT, - TCL_DBMV_ENCRYPT_AES, - TCL_DBMV_ENCRYPT_ANY, - TCL_DBMV_ENV, - TCL_DBMV_TXN, - TCL_DBMV_ENDARG - }; - DB *dbp; - DB_ENV *envp; - DB_TXN *txn; - u_int32_t enc_flag, iflags, set_flags; - int endarg, i, newlen, optindex, result, ret, subdblen; - u_char *subdbtmp; - char *arg, *db, msg[MSG_SIZE], *newname, *passwd, *subdb; - - db = newname = subdb = NULL; - dbp = NULL; - endarg = 0; - envp = NULL; - iflags = enc_flag = set_flags = 0; - passwd = NULL; - result = TCL_OK; - subdbtmp = NULL; - txn = NULL; - - if (objc < 2) { - Tcl_WrongNumArgs(interp, - 3, objv, "?args? filename ?database? ?newname?"); - return (TCL_ERROR); - } - - /* - * We must first parse for the environment flag, since that - * is needed for db_create. Then create the db handle. - */ - i = 2; - while (i < objc) { - if (Tcl_GetIndexFromObj(interp, objv[i], bdbmv, - "option", TCL_EXACT, &optindex) != TCL_OK) { - arg = Tcl_GetStringFromObj(objv[i], NULL); - if (arg[0] == '-') { - result = IS_HELP(objv[i]); - goto error; - } else - Tcl_ResetResult(interp); - break; - } - i++; - switch ((enum bdbmv)optindex) { - case TCL_DBMV_AUTOCOMMIT: - iflags |= DB_AUTO_COMMIT; - _debug_check(); - break; - case TCL_DBMV_ENCRYPT: - set_flags |= DB_ENCRYPT; - _debug_check(); - break; - case TCL_DBMV_ENCRYPT_AES: - /* Make sure we have an arg to check against! */ - if (i >= objc) { - Tcl_WrongNumArgs(interp, 2, objv, - "?-encryptaes passwd?"); - result = TCL_ERROR; - break; - } - passwd = Tcl_GetStringFromObj(objv[i++], NULL); - enc_flag = DB_ENCRYPT_AES; - break; - case TCL_DBMV_ENCRYPT_ANY: - /* Make sure we have an arg to check against! */ - if (i >= objc) { - Tcl_WrongNumArgs(interp, 2, objv, - "?-encryptany passwd?"); - result = TCL_ERROR; - break; - } - passwd = Tcl_GetStringFromObj(objv[i++], NULL); - enc_flag = 0; - break; - case TCL_DBMV_ENV: - arg = Tcl_GetStringFromObj(objv[i++], NULL); - envp = NAME_TO_ENV(arg); - if (envp == NULL) { - Tcl_SetResult(interp, - "db rename: illegal environment", - TCL_STATIC); - return (TCL_ERROR); - } - break; - case TCL_DBMV_ENDARG: - endarg = 1; - break; - case TCL_DBMV_TXN: - if (i >= objc) { - Tcl_WrongNumArgs(interp, 2, objv, "?-txn id?"); - result = TCL_ERROR; - break; - } - arg = Tcl_GetStringFromObj(objv[i++], NULL); - txn = NAME_TO_TXN(arg); - if (txn == NULL) { - snprintf(msg, MSG_SIZE, - "Put: Invalid txn: %s\n", arg); - Tcl_SetResult(interp, msg, TCL_VOLATILE); - result = TCL_ERROR; - } - break; - } - /* - * If, at any time, parsing the args we get an error, - * bail out and return. - */ - if (result != TCL_OK) - goto error; - if (endarg) - break; - } - if (result != TCL_OK) - goto error; - /* - * Any args we have left, (better be 2 or 3 left) are - * file names. If there is 2, a file name, if 3 a file and db name. - */ - if ((i != (objc - 2)) || (i != (objc - 3))) { - /* - * Dbs must be NULL terminated file names, but subdbs can - * be anything. Use Strings for the db name and byte - * arrays for the subdb. - */ - db = Tcl_GetStringFromObj(objv[i++], NULL); - if (i == objc - 2) { - subdbtmp = - Tcl_GetByteArrayFromObj(objv[i++], &subdblen); - if ((ret = __os_malloc(envp, (size_t)subdblen + 1, - &subdb)) != 0) { - Tcl_SetResult(interp, - db_strerror(ret), TCL_STATIC); - return (0); - } - memcpy(subdb, subdbtmp, (size_t)subdblen); - subdb[subdblen] = '\0'; - } - subdbtmp = - Tcl_GetByteArrayFromObj(objv[i++], &newlen); - if ((ret = __os_malloc(envp, (size_t)newlen + 1, - &newname)) != 0) { - Tcl_SetResult(interp, - db_strerror(ret), TCL_STATIC); - return (0); - } - memcpy(newname, subdbtmp, (size_t)newlen); - newname[newlen] = '\0'; - } else { - Tcl_WrongNumArgs( - interp, 3, objv, "?args? filename ?database? ?newname?"); - result = TCL_ERROR; - goto error; - } - if (envp == NULL) { - ret = db_create(&dbp, envp, 0); - if (ret) { - result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret), - "db_create"); - goto error; - } - if (passwd != NULL) { - ret = dbp->set_encrypt(dbp, passwd, enc_flag); - result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret), - "set_encrypt"); - } - if (set_flags != 0) { - ret = dbp->set_flags(dbp, set_flags); - result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret), - "set_flags"); - } - } - - /* - * The dbrename method is a destructor, NULL out the dbp. - */ - _debug_check(); - if (dbp == NULL) - ret = envp->dbrename(envp, txn, db, subdb, newname, iflags); - else - ret = dbp->rename(dbp, db, subdb, newname, 0); - result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret), "db rename"); - dbp = NULL; -error: - if (subdb) - __os_free(envp, subdb); - if (newname) - __os_free(envp, newname); - if (result == TCL_ERROR && dbp != NULL) - (void)dbp->close(dbp, 0); - return (result); -} - -#ifdef CONFIG_TEST -/* - * bdb_DbVerify -- - * Implements the DB->verify command. - */ -static int -bdb_DbVerify(interp, objc, objv) - Tcl_Interp *interp; /* Interpreter */ - int objc; /* How many arguments? */ - Tcl_Obj *CONST objv[]; /* The argument objects */ -{ - static const char *bdbverify[] = { - "-encrypt", - "-encryptaes", - "-encryptany", - "-env", - "-errfile", - "-errpfx", - "-unref", - "--", - NULL - }; - enum bdbvrfy { - TCL_DBVRFY_ENCRYPT, - TCL_DBVRFY_ENCRYPT_AES, - TCL_DBVRFY_ENCRYPT_ANY, - TCL_DBVRFY_ENV, - TCL_DBVRFY_ERRFILE, - TCL_DBVRFY_ERRPFX, - TCL_DBVRFY_UNREF, - TCL_DBVRFY_ENDARG - }; - DB_ENV *envp; - DB *dbp; - FILE *errf; - u_int32_t enc_flag, flags, set_flags; - int endarg, i, optindex, result, ret; - char *arg, *db, *errpfx, *passwd; - - envp = NULL; - dbp = NULL; - passwd = NULL; - result = TCL_OK; - db = errpfx = NULL; - errf = NULL; - flags = endarg = 0; - enc_flag = set_flags = 0; - - if (objc < 2) { - Tcl_WrongNumArgs(interp, 2, objv, "?args? filename"); - return (TCL_ERROR); - } - - /* - * We must first parse for the environment flag, since that - * is needed for db_create. Then create the db handle. - */ - i = 2; - while (i < objc) { - if (Tcl_GetIndexFromObj(interp, objv[i], bdbverify, - "option", TCL_EXACT, &optindex) != TCL_OK) { - arg = Tcl_GetStringFromObj(objv[i], NULL); - if (arg[0] == '-') { - result = IS_HELP(objv[i]); - goto error; - } else - Tcl_ResetResult(interp); - break; - } - i++; - switch ((enum bdbvrfy)optindex) { - case TCL_DBVRFY_ENCRYPT: - set_flags |= DB_ENCRYPT; - _debug_check(); - break; - case TCL_DBVRFY_ENCRYPT_AES: - /* Make sure we have an arg to check against! */ - if (i >= objc) { - Tcl_WrongNumArgs(interp, 2, objv, - "?-encryptaes passwd?"); - result = TCL_ERROR; - break; - } - passwd = Tcl_GetStringFromObj(objv[i++], NULL); - enc_flag = DB_ENCRYPT_AES; - break; - case TCL_DBVRFY_ENCRYPT_ANY: - /* Make sure we have an arg to check against! */ - if (i >= objc) { - Tcl_WrongNumArgs(interp, 2, objv, - "?-encryptany passwd?"); - result = TCL_ERROR; - break; - } - passwd = Tcl_GetStringFromObj(objv[i++], NULL); - enc_flag = 0; - break; - case TCL_DBVRFY_ENV: - arg = Tcl_GetStringFromObj(objv[i++], NULL); - envp = NAME_TO_ENV(arg); - if (envp == NULL) { - Tcl_SetResult(interp, - "db verify: illegal environment", - TCL_STATIC); - result = TCL_ERROR; - break; - } - break; - case TCL_DBVRFY_ERRFILE: - if (i >= objc) { - Tcl_WrongNumArgs(interp, 2, objv, - "-errfile file"); - result = TCL_ERROR; - break; - } - arg = Tcl_GetStringFromObj(objv[i++], NULL); - /* - * If the user already set one, close it. - */ - if (errf != NULL && errf != stdout && errf != stderr) - (void)fclose(errf); - if (strcmp(arg, "/dev/stdout") == 0) - errf = stdout; - else if (strcmp(arg, "/dev/stderr") == 0) - errf = stderr; - else - errf = fopen(arg, "a"); - break; - case TCL_DBVRFY_ERRPFX: - if (i >= objc) { - Tcl_WrongNumArgs(interp, 2, objv, - "-errpfx prefix"); - result = TCL_ERROR; - break; - } - arg = Tcl_GetStringFromObj(objv[i++], NULL); - /* - * If the user already set one, free it. - */ - if (errpfx != NULL) - __os_free(envp, errpfx); - if ((ret = __os_strdup(NULL, arg, &errpfx)) != 0) { - result = _ReturnSetup(interp, ret, - DB_RETOK_STD(ret), "__os_strdup"); - break; - } - break; - case TCL_DBVRFY_UNREF: - flags |= DB_UNREF; - break; - case TCL_DBVRFY_ENDARG: - endarg = 1; - break; - } - /* - * If, at any time, parsing the args we get an error, - * bail out and return. - */ - if (result != TCL_OK) - goto error; - if (endarg) - break; - } - if (result != TCL_OK) - goto error; - /* - * The remaining arg is the db filename. - */ - if (i == (objc - 1)) - db = Tcl_GetStringFromObj(objv[i++], NULL); - else { - Tcl_WrongNumArgs(interp, 2, objv, "?args? filename"); - result = TCL_ERROR; - goto error; - } - ret = db_create(&dbp, envp, 0); - if (ret) { - result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret), - "db_create"); - goto error; - } - - if (passwd != NULL) { - ret = dbp->set_encrypt(dbp, passwd, enc_flag); - result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret), - "set_encrypt"); - } - - if (set_flags != 0) { - ret = dbp->set_flags(dbp, set_flags); - result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret), - "set_flags"); - } - if (errf != NULL) - dbp->set_errfile(dbp, errf); - if (errpfx != NULL) - dbp->set_errpfx(dbp, errpfx); - - /* - * The verify method is a destructor, NULL out the dbp. - */ - ret = dbp->verify(dbp, db, NULL, NULL, flags); - result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret), "db verify"); - dbp = NULL; -error: - if (errf != NULL && errf != stdout && errf != stderr) - (void)fclose(errf); - if (errpfx != NULL) - __os_free(envp, errpfx); - if (dbp) - (void)dbp->close(dbp, 0); - return (result); -} -#endif - -/* - * bdb_Version -- - * Implements the version command. - */ -static int -bdb_Version(interp, objc, objv) - Tcl_Interp *interp; /* Interpreter */ - int objc; /* How many arguments? */ - Tcl_Obj *CONST objv[]; /* The argument objects */ -{ - static const char *bdbver[] = { - "-string", NULL - }; - enum bdbver { - TCL_VERSTRING - }; - int i, optindex, maj, min, patch, result, string, verobjc; - char *arg, *v; - Tcl_Obj *res, *verobjv[3]; - - result = TCL_OK; - string = 0; - - if (objc < 2) { - Tcl_WrongNumArgs(interp, 2, objv, "?args?"); - return (TCL_ERROR); - } - - /* - * We must first parse for the environment flag, since that - * is needed for db_create. Then create the db handle. - */ - i = 2; - while (i < objc) { - if (Tcl_GetIndexFromObj(interp, objv[i], bdbver, - "option", TCL_EXACT, &optindex) != TCL_OK) { - arg = Tcl_GetStringFromObj(objv[i], NULL); - if (arg[0] == '-') { - result = IS_HELP(objv[i]); - goto error; - } else - Tcl_ResetResult(interp); - break; - } - i++; - switch ((enum bdbver)optindex) { - case TCL_VERSTRING: - string = 1; - break; - } - /* - * If, at any time, parsing the args we get an error, - * bail out and return. - */ - if (result != TCL_OK) - goto error; - } - if (result != TCL_OK) - goto error; - - v = db_version(&maj, &min, &patch); - if (string) - res = NewStringObj(v, strlen(v)); - else { - verobjc = 3; - verobjv[0] = Tcl_NewIntObj(maj); - verobjv[1] = Tcl_NewIntObj(min); - verobjv[2] = Tcl_NewIntObj(patch); - res = Tcl_NewListObj(verobjc, verobjv); - } - Tcl_SetObjResult(interp, res); -error: - return (result); -} - -#ifdef CONFIG_TEST -/* - * bdb_Handles -- - * Implements the handles command. - */ -static int -bdb_Handles(interp, objc, objv) - Tcl_Interp *interp; /* Interpreter */ - int objc; /* How many arguments? */ - Tcl_Obj *CONST objv[]; /* The argument objects */ -{ - DBTCL_INFO *p; - Tcl_Obj *res, *handle; - - /* - * No args. Error if we have some - */ - if (objc != 2) { - Tcl_WrongNumArgs(interp, 2, objv, ""); - return (TCL_ERROR); - } - res = Tcl_NewListObj(0, NULL); - - for (p = LIST_FIRST(&__db_infohead); p != NULL; - p = LIST_NEXT(p, entries)) { - handle = NewStringObj(p->i_name, strlen(p->i_name)); - if (Tcl_ListObjAppendElement(interp, res, handle) != TCL_OK) - return (TCL_ERROR); - } - Tcl_SetObjResult(interp, res); - return (TCL_OK); -} - -/* - * bdb_MsgType - - * Implements the msgtype command. - * Given a replication message return its message type name. - */ -static int -bdb_MsgType(interp, objc, objv) - Tcl_Interp *interp; /* Interpreter */ - int objc; /* How many arguments? */ - Tcl_Obj *CONST objv[]; /* The argument objects */ -{ - REP_CONTROL *rp; - Tcl_Obj *msgname; - u_int32_t len, msgtype; - int freerp, ret; - - /* - * If the messages in rep.h change, this must change too! - * Add "no_type" for 0 so that we directly index. - */ - static const char *msgnames[] = { - "no_type", "alive", "alive_req", "all_req", - "dupmaster", "file", "file_fail", "file_req", "log", - "log_more", "log_req", "master_req", "newclient", - "newfile", "newmaster", "newsite", "page", - "page_fail", "page_req", "update", "update_req", - "verify", "verify_fail", "verify_req", - "vote1", "vote2", NULL - }; - - /* - * 1 arg, the message. Error if different. - */ - if (objc != 3) { - Tcl_WrongNumArgs(interp, 3, objv, "msgtype msg"); - return (TCL_ERROR); - } - - ret = _CopyObjBytes(interp, objv[2], (void **)&rp, &len, &freerp); - if (ret != TCL_OK) { - Tcl_SetResult(interp, - "msgtype: bad control message", TCL_STATIC); - return (TCL_ERROR); - } - msgtype = rp->rectype; - msgname = NewStringObj(msgnames[msgtype], strlen(msgnames[msgtype])); - Tcl_SetObjResult(interp, msgname); - if (rp != NULL && freerp) - __os_free(NULL, rp); - return (TCL_OK); -} - -/* - * bdb_DbUpgrade -- - * Implements the DB->upgrade command. - */ -static int -bdb_DbUpgrade(interp, objc, objv) - Tcl_Interp *interp; /* Interpreter */ - int objc; /* How many arguments? */ - Tcl_Obj *CONST objv[]; /* The argument objects */ -{ - static const char *bdbupg[] = { - "-dupsort", "-env", "--", NULL - }; - enum bdbupg { - TCL_DBUPG_DUPSORT, - TCL_DBUPG_ENV, - TCL_DBUPG_ENDARG - }; - DB_ENV *envp; - DB *dbp; - u_int32_t flags; - int endarg, i, optindex, result, ret; - char *arg, *db; - - envp = NULL; - dbp = NULL; - result = TCL_OK; - db = NULL; - flags = endarg = 0; - - if (objc < 2) { - Tcl_WrongNumArgs(interp, 2, objv, "?args? filename"); - return (TCL_ERROR); - } - - i = 2; - while (i < objc) { - if (Tcl_GetIndexFromObj(interp, objv[i], bdbupg, - "option", TCL_EXACT, &optindex) != TCL_OK) { - arg = Tcl_GetStringFromObj(objv[i], NULL); - if (arg[0] == '-') { - result = IS_HELP(objv[i]); - goto error; - } else - Tcl_ResetResult(interp); - break; - } - i++; - switch ((enum bdbupg)optindex) { - case TCL_DBUPG_DUPSORT: - flags |= DB_DUPSORT; - break; - case TCL_DBUPG_ENV: - arg = Tcl_GetStringFromObj(objv[i++], NULL); - envp = NAME_TO_ENV(arg); - if (envp == NULL) { - Tcl_SetResult(interp, - "db upgrade: illegal environment", - TCL_STATIC); - return (TCL_ERROR); - } - break; - case TCL_DBUPG_ENDARG: - endarg = 1; - break; - } - /* - * If, at any time, parsing the args we get an error, - * bail out and return. - */ - if (result != TCL_OK) - goto error; - if (endarg) - break; - } - if (result != TCL_OK) - goto error; - /* - * The remaining arg is the db filename. - */ - if (i == (objc - 1)) - db = Tcl_GetStringFromObj(objv[i++], NULL); - else { - Tcl_WrongNumArgs(interp, 2, objv, "?args? filename"); - result = TCL_ERROR; - goto error; - } - ret = db_create(&dbp, envp, 0); - if (ret) { - result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret), - "db_create"); - goto error; - } - - ret = dbp->upgrade(dbp, db, flags); - result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret), "db upgrade"); -error: - if (dbp) - (void)dbp->close(dbp, 0); - return (result); -} - -/* - * tcl_bt_compare and tcl_dup_compare -- - * These two are basically identical internally, so may as well - * share code. The only differences are the name used in error - * reporting and the Tcl_Obj representing their respective procs. - */ -static int -tcl_bt_compare(dbp, dbta, dbtb) - DB *dbp; - const DBT *dbta, *dbtb; -{ - return (tcl_compare_callback(dbp, dbta, dbtb, - ((DBTCL_INFO *)dbp->api_internal)->i_btcompare, "bt_compare")); -} - -static int -tcl_dup_compare(dbp, dbta, dbtb) - DB *dbp; - const DBT *dbta, *dbtb; -{ - return (tcl_compare_callback(dbp, dbta, dbtb, - ((DBTCL_INFO *)dbp->api_internal)->i_dupcompare, "dup_compare")); -} - -/* - * tcl_compare_callback -- - * Tcl callback for set_bt_compare and set_dup_compare. What this - * function does is stuff the data fields of the two DBTs into Tcl ByteArray - * objects, then call the procedure stored in ip->i_btcompare on the two - * objects. Then we return that procedure's result as the comparison. - */ -static int -tcl_compare_callback(dbp, dbta, dbtb, procobj, errname) - DB *dbp; - const DBT *dbta, *dbtb; - Tcl_Obj *procobj; - char *errname; -{ - DBTCL_INFO *ip; - Tcl_Interp *interp; - Tcl_Obj *a, *b, *resobj, *objv[3]; - int result, cmp; - - ip = (DBTCL_INFO *)dbp->api_internal; - interp = ip->i_interp; - objv[0] = procobj; - - /* - * Create two ByteArray objects, with the two data we've been passed. - * This will involve a copy, which is unpleasantly slow, but there's - * little we can do to avoid this (I think). - */ - a = Tcl_NewByteArrayObj(dbta->data, (int)dbta->size); - Tcl_IncrRefCount(a); - b = Tcl_NewByteArrayObj(dbtb->data, (int)dbtb->size); - Tcl_IncrRefCount(b); - - objv[1] = a; - objv[2] = b; - - result = Tcl_EvalObjv(interp, 3, objv, 0); - if (result != TCL_OK) { - /* - * XXX - * If this or the next Tcl call fails, we're doomed. - * There's no way to return an error from comparison functions, - * no way to determine what the correct sort order is, and - * so no way to avoid corrupting the database if we proceed. - * We could play some games stashing return values on the - * DB handle, but it's not worth the trouble--no one with - * any sense is going to be using this other than for testing, - * and failure typically means that the bt_compare proc - * had a syntax error in it or something similarly dumb. - * - * So, drop core. If we're not running with diagnostic - * mode, panic--and always return a negative number. :-) - */ -panic: __db_err(dbp->dbenv, "Tcl %s callback failed", errname); - DB_ASSERT(0); - return (__db_panic(dbp->dbenv, DB_RUNRECOVERY)); - } - - resobj = Tcl_GetObjResult(interp); - result = Tcl_GetIntFromObj(interp, resobj, &cmp); - if (result != TCL_OK) - goto panic; - - Tcl_DecrRefCount(a); - Tcl_DecrRefCount(b); - return (cmp); -} - -/* - * tcl_h_hash -- - * Tcl callback for the hashing function. See tcl_compare_callback-- - * this works much the same way, only we're given a buffer and a length - * instead of two DBTs. - */ -static u_int32_t -tcl_h_hash(dbp, buf, len) - DB *dbp; - const void *buf; - u_int32_t len; -{ - DBTCL_INFO *ip; - Tcl_Interp *interp; - Tcl_Obj *objv[2]; - int result, hval; - - ip = (DBTCL_INFO *)dbp->api_internal; - interp = ip->i_interp; - objv[0] = ip->i_hashproc; - - /* - * Create a ByteArray for the buffer. - */ - objv[1] = Tcl_NewByteArrayObj((void *)buf, (int)len); - Tcl_IncrRefCount(objv[1]); - result = Tcl_EvalObjv(interp, 2, objv, 0); - if (result != TCL_OK) - goto panic; - - result = Tcl_GetIntFromObj(interp, Tcl_GetObjResult(interp), &hval); - if (result != TCL_OK) - goto panic; - - Tcl_DecrRefCount(objv[1]); - return ((u_int32_t)hval); - -panic: /* - * We drop core on error, in diagnostic mode. See the comment in - * tcl_compare_callback. - */ - __db_err(dbp->dbenv, "Tcl h_hash callback failed"); - (void)__db_panic(dbp->dbenv, DB_RUNRECOVERY); - - DB_ASSERT(0); - - /* NOTREACHED */ - return (0); -} - -/* - * tcl_rep_send -- - * Replication send callback. - */ -static int -tcl_rep_send(dbenv, control, rec, lsnp, eid, flags) - DB_ENV *dbenv; - const DBT *control, *rec; - const DB_LSN *lsnp; - int eid; - u_int32_t flags; -{ -#define TCLDB_SENDITEMS 7 - DBTCL_INFO *ip; - Tcl_Interp *interp; - Tcl_Obj *control_o, *eid_o, *flags_o, *lsn_o, *origobj, *rec_o; - Tcl_Obj *myobjv[2], *resobj, *objv[TCLDB_SENDITEMS]; - int myobjc, result, ret; - - ip = (DBTCL_INFO *)dbenv->app_private; - interp = ip->i_interp; - objv[0] = ip->i_rep_send; - - control_o = Tcl_NewByteArrayObj(control->data, (int)control->size); - Tcl_IncrRefCount(control_o); - - rec_o = Tcl_NewByteArrayObj(rec->data, (int)rec->size); - Tcl_IncrRefCount(rec_o); - - eid_o = Tcl_NewIntObj(eid); - Tcl_IncrRefCount(eid_o); - - if (LF_ISSET(DB_REP_PERMANENT)) - flags_o = NewStringObj("perm", strlen("perm")); - else if (LF_ISSET(DB_REP_NOBUFFER)) - flags_o = NewStringObj("nobuffer", strlen("nobuffer")); - else - flags_o = NewStringObj("none", strlen("none")); - Tcl_IncrRefCount(flags_o); - - myobjc = 2; - myobjv[0] = Tcl_NewLongObj((long)lsnp->file); - myobjv[1] = Tcl_NewLongObj((long)lsnp->offset); - lsn_o = Tcl_NewListObj(myobjc, myobjv); - - objv[1] = control_o; - objv[2] = rec_o; - objv[3] = ip->i_rep_eid; /* From ID */ - objv[4] = eid_o; /* To ID */ - objv[5] = flags_o; /* Flags */ - objv[6] = lsn_o; /* LSN */ - - /* - * We really want to return the original result to the - * user. So, save the result obj here, and then after - * we've taken care of the Tcl_EvalObjv, set the result - * back to this original result. - */ - origobj = Tcl_GetObjResult(interp); - Tcl_IncrRefCount(origobj); - result = Tcl_EvalObjv(interp, TCLDB_SENDITEMS, objv, 0); - if (result != TCL_OK) { - /* - * XXX - * This probably isn't the right error behavior, but - * this error should only happen if the Tcl callback is - * somehow invalid, which is a fatal scripting bug. - */ -err: __db_err(dbenv, "Tcl rep_send failure"); - return (EINVAL); - } - - resobj = Tcl_GetObjResult(interp); - result = Tcl_GetIntFromObj(interp, resobj, &ret); - if (result != TCL_OK) - goto err; - - Tcl_SetObjResult(interp, origobj); - Tcl_DecrRefCount(origobj); - Tcl_DecrRefCount(control_o); - Tcl_DecrRefCount(rec_o); - Tcl_DecrRefCount(eid_o); - Tcl_DecrRefCount(flags_o); - - return (ret); -} -#endif - -#ifdef CONFIG_TEST -/* - * tcl_db_malloc, tcl_db_realloc, tcl_db_free -- - * Tcl-local malloc, realloc, and free functions to use for user data - * to exercise umalloc/urealloc/ufree. Allocate the memory as a Tcl object - * so we're sure to exacerbate and catch any shared-library issues. - */ -static void * -tcl_db_malloc(size) - size_t size; -{ - Tcl_Obj *obj; - void *buf; - - obj = Tcl_NewObj(); - if (obj == NULL) - return (NULL); - Tcl_IncrRefCount(obj); - - Tcl_SetObjLength(obj, (int)(size + sizeof(Tcl_Obj *))); - buf = Tcl_GetString(obj); - memcpy(buf, &obj, sizeof(&obj)); - - buf = (Tcl_Obj **)buf + 1; - return (buf); -} - -static void * -tcl_db_realloc(ptr, size) - void *ptr; - size_t size; -{ - Tcl_Obj *obj; - - if (ptr == NULL) - return (tcl_db_malloc(size)); - - obj = *(Tcl_Obj **)((Tcl_Obj **)ptr - 1); - Tcl_SetObjLength(obj, (int)(size + sizeof(Tcl_Obj *))); - - ptr = Tcl_GetString(obj); - memcpy(ptr, &obj, sizeof(&obj)); - - ptr = (Tcl_Obj **)ptr + 1; - return (ptr); -} - -static void -tcl_db_free(ptr) - void *ptr; -{ - Tcl_Obj *obj; - - obj = *(Tcl_Obj **)((Tcl_Obj **)ptr - 1); - Tcl_DecrRefCount(obj); -} -#endif diff --git a/storage/bdb/tcl/tcl_dbcursor.c b/storage/bdb/tcl/tcl_dbcursor.c deleted file mode 100644 index f0ca788f4ca..00000000000 --- a/storage/bdb/tcl/tcl_dbcursor.c +++ /dev/null @@ -1,940 +0,0 @@ -/*- - * See the file LICENSE for redistribution information. - * - * Copyright (c) 1999-2004 - * Sleepycat Software. All rights reserved. - * - * $Id: tcl_dbcursor.c,v 11.65 2004/10/07 16:48:39 bostic Exp $ - */ - -#include "db_config.h" - -#ifndef NO_SYSTEM_INCLUDES -#include - -#include -#include -#include -#endif - -#include "db_int.h" -#include "dbinc/tcl_db.h" - -/* - * Prototypes for procedures defined later in this file: - */ -static int tcl_DbcDup __P((Tcl_Interp *, int, Tcl_Obj * CONST*, DBC *)); -static int tcl_DbcGet __P((Tcl_Interp *, int, Tcl_Obj * CONST*, DBC *, int)); -static int tcl_DbcPut __P((Tcl_Interp *, int, Tcl_Obj * CONST*, DBC *)); - -/* - * PUBLIC: int dbc_Cmd __P((ClientData, Tcl_Interp *, int, Tcl_Obj * CONST*)); - * - * dbc_cmd -- - * Implements the cursor command. - */ -int -dbc_Cmd(clientData, interp, objc, objv) - ClientData clientData; /* Cursor handle */ - Tcl_Interp *interp; /* Interpreter */ - int objc; /* How many arguments? */ - Tcl_Obj *CONST objv[]; /* The argument objects */ -{ - static const char *dbccmds[] = { -#ifdef CONFIG_TEST - "pget", -#endif - "close", - "del", - "dup", - "get", - "put", - NULL - }; - enum dbccmds { -#ifdef CONFIG_TEST - DBCPGET, -#endif - DBCCLOSE, - DBCDELETE, - DBCDUP, - DBCGET, - DBCPUT - }; - DBC *dbc; - DBTCL_INFO *dbip; - int cmdindex, result, ret; - - Tcl_ResetResult(interp); - dbc = (DBC *)clientData; - dbip = _PtrToInfo((void *)dbc); - result = TCL_OK; - - if (objc <= 1) { - Tcl_WrongNumArgs(interp, 1, objv, "command cmdargs"); - return (TCL_ERROR); - } - if (dbc == NULL) { - Tcl_SetResult(interp, "NULL dbc pointer", TCL_STATIC); - return (TCL_ERROR); - } - if (dbip == NULL) { - Tcl_SetResult(interp, "NULL dbc info pointer", TCL_STATIC); - return (TCL_ERROR); - } - - /* - * Get the command name index from the object based on the berkdbcmds - * defined above. - */ - if (Tcl_GetIndexFromObj(interp, objv[1], dbccmds, "command", - TCL_EXACT, &cmdindex) != TCL_OK) - return (IS_HELP(objv[1])); - switch ((enum dbccmds)cmdindex) { -#ifdef CONFIG_TEST - case DBCPGET: - result = tcl_DbcGet(interp, objc, objv, dbc, 1); - break; -#endif - case DBCCLOSE: - /* - * No args for this. Error if there are some. - */ - if (objc > 2) { - Tcl_WrongNumArgs(interp, 2, objv, NULL); - return (TCL_ERROR); - } - _debug_check(); - ret = dbc->c_close(dbc); - result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret), - "dbc close"); - if (result == TCL_OK) { - (void)Tcl_DeleteCommand(interp, dbip->i_name); - _DeleteInfo(dbip); - } - break; - case DBCDELETE: - /* - * No args for this. Error if there are some. - */ - if (objc > 2) { - Tcl_WrongNumArgs(interp, 2, objv, NULL); - return (TCL_ERROR); - } - _debug_check(); - ret = dbc->c_del(dbc, 0); - result = _ReturnSetup(interp, ret, DB_RETOK_DBCDEL(ret), - "dbc delete"); - break; - case DBCDUP: - result = tcl_DbcDup(interp, objc, objv, dbc); - break; - case DBCGET: - result = tcl_DbcGet(interp, objc, objv, dbc, 0); - break; - case DBCPUT: - result = tcl_DbcPut(interp, objc, objv, dbc); - break; - } - return (result); -} - -/* - * tcl_DbcPut -- - */ -static int -tcl_DbcPut(interp, objc, objv, dbc) - Tcl_Interp *interp; /* Interpreter */ - int objc; /* How many arguments? */ - Tcl_Obj *CONST objv[]; /* The argument objects */ - DBC *dbc; /* Cursor pointer */ -{ - static const char *dbcutopts[] = { -#ifdef CONFIG_TEST - "-nodupdata", -#endif - "-after", - "-before", - "-current", - "-keyfirst", - "-keylast", - "-partial", - NULL - }; - enum dbcutopts { -#ifdef CONFIG_TEST - DBCPUT_NODUPDATA, -#endif - DBCPUT_AFTER, - DBCPUT_BEFORE, - DBCPUT_CURRENT, - DBCPUT_KEYFIRST, - DBCPUT_KEYLAST, - DBCPUT_PART - }; - DB *thisdbp; - DBT key, data; - DBTCL_INFO *dbcip, *dbip; - DBTYPE type; - Tcl_Obj **elemv, *res; - void *dtmp, *ktmp; - db_recno_t recno; - u_int32_t flag; - int elemc, freekey, freedata, i, optindex, result, ret; - - COMPQUIET(dtmp, NULL); - COMPQUIET(ktmp, NULL); - - result = TCL_OK; - flag = 0; - freekey = freedata = 0; - - if (objc < 2) { - Tcl_WrongNumArgs(interp, 2, objv, "?-args? ?key?"); - return (TCL_ERROR); - } - - memset(&key, 0, sizeof(key)); - memset(&data, 0, sizeof(data)); - - /* - * Get the command name index from the object based on the options - * defined above. - */ - i = 2; - while (i < (objc - 1)) { - if (Tcl_GetIndexFromObj(interp, objv[i], dbcutopts, "option", - TCL_EXACT, &optindex) != TCL_OK) { - /* - * Reset the result so we don't get - * an errant error message if there is another error. - */ - if (IS_HELP(objv[i]) == TCL_OK) { - result = TCL_OK; - goto out; - } - Tcl_ResetResult(interp); - break; - } - i++; - switch ((enum dbcutopts)optindex) { -#ifdef CONFIG_TEST - case DBCPUT_NODUPDATA: - FLAG_CHECK(flag); - flag = DB_NODUPDATA; - break; -#endif - case DBCPUT_AFTER: - FLAG_CHECK(flag); - flag = DB_AFTER; - break; - case DBCPUT_BEFORE: - FLAG_CHECK(flag); - flag = DB_BEFORE; - break; - case DBCPUT_CURRENT: - FLAG_CHECK(flag); - flag = DB_CURRENT; - break; - case DBCPUT_KEYFIRST: - FLAG_CHECK(flag); - flag = DB_KEYFIRST; - break; - case DBCPUT_KEYLAST: - FLAG_CHECK(flag); - flag = DB_KEYLAST; - break; - case DBCPUT_PART: - if (i > (objc - 2)) { - Tcl_WrongNumArgs(interp, 2, objv, - "?-partial {offset length}?"); - result = TCL_ERROR; - break; - } - /* - * Get sublist as {offset length} - */ - result = Tcl_ListObjGetElements(interp, objv[i++], - &elemc, &elemv); - if (elemc != 2) { - Tcl_SetResult(interp, - "List must be {offset length}", TCL_STATIC); - result = TCL_ERROR; - break; - } - data.flags |= DB_DBT_PARTIAL; - result = _GetUInt32(interp, elemv[0], &data.doff); - if (result != TCL_OK) - break; - result = _GetUInt32(interp, elemv[1], &data.dlen); - /* - * NOTE: We don't check result here because all we'd - * do is break anyway, and we are doing that. If you - * add code here, you WILL need to add the check - * for result. (See the check for save.doff, a few - * lines above and copy that.) - */ - } - if (result != TCL_OK) - break; - } - if (result != TCL_OK) - goto out; - - /* - * We need to determine if we are a recno database or not. If we are, - * then key.data is a recno, not a string. - */ - dbcip = _PtrToInfo(dbc); - if (dbcip == NULL) - type = DB_UNKNOWN; - else { - dbip = dbcip->i_parent; - if (dbip == NULL) { - Tcl_SetResult(interp, "Cursor without parent database", - TCL_STATIC); - result = TCL_ERROR; - return (result); - } - thisdbp = dbip->i_dbp; - (void)thisdbp->get_type(thisdbp, &type); - } - /* - * When we get here, we better have: - * 1 arg if -after, -before or -current - * 2 args in all other cases - */ - if (flag == DB_AFTER || flag == DB_BEFORE || flag == DB_CURRENT) { - if (i != (objc - 1)) { - Tcl_WrongNumArgs(interp, 2, objv, - "?-args? data"); - result = TCL_ERROR; - goto out; - } - /* - * We want to get the key back, so we need to set - * up the location to get it back in. - */ - if (type == DB_RECNO || type == DB_QUEUE) { - recno = 0; - key.data = &recno; - key.size = sizeof(db_recno_t); - } - } else { - if (i != (objc - 2)) { - Tcl_WrongNumArgs(interp, 2, objv, - "?-args? key data"); - result = TCL_ERROR; - goto out; - } - if (type == DB_RECNO || type == DB_QUEUE) { - result = _GetUInt32(interp, objv[objc-2], &recno); - if (result == TCL_OK) { - key.data = &recno; - key.size = sizeof(db_recno_t); - } else - return (result); - } else { - ret = _CopyObjBytes(interp, objv[objc-2], &ktmp, - &key.size, &freekey); - if (ret != 0) { - result = _ReturnSetup(interp, ret, - DB_RETOK_DBCPUT(ret), "dbc put"); - return (result); - } - key.data = ktmp; - } - } - ret = _CopyObjBytes(interp, objv[objc-1], &dtmp, - &data.size, &freedata); - data.data = dtmp; - if (ret != 0) { - result = _ReturnSetup(interp, ret, - DB_RETOK_DBCPUT(ret), "dbc put"); - goto out; - } - _debug_check(); - ret = dbc->c_put(dbc, &key, &data, flag); - result = _ReturnSetup(interp, ret, DB_RETOK_DBCPUT(ret), - "dbc put"); - if (ret == 0 && - (flag == DB_AFTER || flag == DB_BEFORE) && type == DB_RECNO) { - res = Tcl_NewWideIntObj((Tcl_WideInt)*(db_recno_t *)key.data); - Tcl_SetObjResult(interp, res); - } -out: - if (freedata) - __os_free(NULL, dtmp); - if (freekey) - __os_free(NULL, ktmp); - return (result); -} - -/* - * tcl_dbc_get -- - */ -static int -tcl_DbcGet(interp, objc, objv, dbc, ispget) - Tcl_Interp *interp; /* Interpreter */ - int objc; /* How many arguments? */ - Tcl_Obj *CONST objv[]; /* The argument objects */ - DBC *dbc; /* Cursor pointer */ - int ispget; /* 1 for pget, 0 for get */ -{ - static const char *dbcgetopts[] = { -#ifdef CONFIG_TEST - "-degree_2", - "-dirty", - "-get_both_range", - "-multi", - "-multi_key", -#endif - "-current", - "-first", - "-get_both", - "-get_recno", - "-join_item", - "-last", - "-next", - "-nextdup", - "-nextnodup", - "-partial", - "-prev", - "-prevnodup", - "-rmw", - "-set", - "-set_range", - "-set_recno", - NULL - }; - enum dbcgetopts { -#ifdef CONFIG_TEST - DBCGET_DEGREE2, - DBCGET_DIRTY, - DBCGET_BOTH_RANGE, - DBCGET_MULTI, - DBCGET_MULTI_KEY, -#endif - DBCGET_CURRENT, - DBCGET_FIRST, - DBCGET_BOTH, - DBCGET_RECNO, - DBCGET_JOIN, - DBCGET_LAST, - DBCGET_NEXT, - DBCGET_NEXTDUP, - DBCGET_NEXTNODUP, - DBCGET_PART, - DBCGET_PREV, - DBCGET_PREVNODUP, - DBCGET_RMW, - DBCGET_SET, - DBCGET_SETRANGE, - DBCGET_SETRECNO - }; - DB *thisdbp; - DBT key, data, pdata; - DBTCL_INFO *dbcip, *dbip; - DBTYPE ptype, type; - Tcl_Obj **elemv, *myobj, *retlist; - void *dtmp, *ktmp; - db_recno_t precno, recno; - u_int32_t flag, op; - int elemc, freekey, freedata, i, optindex, result, ret; -#ifdef CONFIG_TEST - int bufsize; - - bufsize = 0; -#endif - COMPQUIET(dtmp, NULL); - COMPQUIET(ktmp, NULL); - - result = TCL_OK; - flag = 0; - freekey = freedata = 0; - - if (objc < 2) { - Tcl_WrongNumArgs(interp, 2, objv, "?-args? ?key?"); - return (TCL_ERROR); - } - - memset(&key, 0, sizeof(key)); - memset(&data, 0, sizeof(data)); - /* - * Get the command name index from the object based on the options - * defined above. - */ - i = 2; - while (i < objc) { - if (Tcl_GetIndexFromObj(interp, objv[i], dbcgetopts, - "option", TCL_EXACT, &optindex) != TCL_OK) { - /* - * Reset the result so we don't get - * an errant error message if there is another error. - */ - if (IS_HELP(objv[i]) == TCL_OK) { - result = TCL_OK; - goto out; - } - Tcl_ResetResult(interp); - break; - } - i++; - switch ((enum dbcgetopts)optindex) { -#ifdef CONFIG_TEST - case DBCGET_DEGREE2: - flag |= DB_DEGREE_2; - break; - case DBCGET_DIRTY: - flag |= DB_DIRTY_READ; - break; - case DBCGET_BOTH_RANGE: - FLAG_CHECK2(flag, - DB_RMW|DB_MULTIPLE|DB_MULTIPLE_KEY|DB_DIRTY_READ); - flag |= DB_GET_BOTH_RANGE; - break; - case DBCGET_MULTI: - flag |= DB_MULTIPLE; - result = Tcl_GetIntFromObj(interp, objv[i], &bufsize); - if (result != TCL_OK) - goto out; - i++; - break; - case DBCGET_MULTI_KEY: - flag |= DB_MULTIPLE_KEY; - result = Tcl_GetIntFromObj(interp, objv[i], &bufsize); - if (result != TCL_OK) - goto out; - i++; - break; -#endif - case DBCGET_RMW: - flag |= DB_RMW; - break; - case DBCGET_CURRENT: - FLAG_CHECK2(flag, - DB_RMW|DB_MULTIPLE|DB_MULTIPLE_KEY|DB_DIRTY_READ); - flag |= DB_CURRENT; - break; - case DBCGET_FIRST: - FLAG_CHECK2(flag, - DB_RMW|DB_MULTIPLE|DB_MULTIPLE_KEY|DB_DIRTY_READ); - flag |= DB_FIRST; - break; - case DBCGET_LAST: - FLAG_CHECK2(flag, - DB_RMW|DB_MULTIPLE|DB_MULTIPLE_KEY|DB_DIRTY_READ); - flag |= DB_LAST; - break; - case DBCGET_NEXT: - FLAG_CHECK2(flag, - DB_RMW|DB_MULTIPLE|DB_MULTIPLE_KEY|DB_DIRTY_READ); - flag |= DB_NEXT; - break; - case DBCGET_PREV: - FLAG_CHECK2(flag, - DB_RMW|DB_MULTIPLE|DB_MULTIPLE_KEY|DB_DIRTY_READ); - flag |= DB_PREV; - break; - case DBCGET_PREVNODUP: - FLAG_CHECK2(flag, - DB_RMW|DB_MULTIPLE|DB_MULTIPLE_KEY|DB_DIRTY_READ); - flag |= DB_PREV_NODUP; - break; - case DBCGET_NEXTNODUP: - FLAG_CHECK2(flag, - DB_RMW|DB_MULTIPLE|DB_MULTIPLE_KEY|DB_DIRTY_READ); - flag |= DB_NEXT_NODUP; - break; - case DBCGET_NEXTDUP: - FLAG_CHECK2(flag, - DB_RMW|DB_MULTIPLE|DB_MULTIPLE_KEY|DB_DIRTY_READ); - flag |= DB_NEXT_DUP; - break; - case DBCGET_BOTH: - FLAG_CHECK2(flag, - DB_RMW|DB_MULTIPLE|DB_MULTIPLE_KEY|DB_DIRTY_READ); - flag |= DB_GET_BOTH; - break; - case DBCGET_RECNO: - FLAG_CHECK2(flag, - DB_RMW|DB_MULTIPLE|DB_MULTIPLE_KEY|DB_DIRTY_READ); - flag |= DB_GET_RECNO; - break; - case DBCGET_JOIN: - FLAG_CHECK2(flag, - DB_RMW|DB_MULTIPLE|DB_MULTIPLE_KEY|DB_DIRTY_READ); - flag |= DB_JOIN_ITEM; - break; - case DBCGET_SET: - FLAG_CHECK2(flag, - DB_RMW|DB_MULTIPLE|DB_MULTIPLE_KEY|DB_DIRTY_READ); - flag |= DB_SET; - break; - case DBCGET_SETRANGE: - FLAG_CHECK2(flag, - DB_RMW|DB_MULTIPLE|DB_MULTIPLE_KEY|DB_DIRTY_READ); - flag |= DB_SET_RANGE; - break; - case DBCGET_SETRECNO: - FLAG_CHECK2(flag, - DB_RMW|DB_MULTIPLE|DB_MULTIPLE_KEY|DB_DIRTY_READ); - flag |= DB_SET_RECNO; - break; - case DBCGET_PART: - if (i == objc) { - Tcl_WrongNumArgs(interp, 2, objv, - "?-partial {offset length}?"); - result = TCL_ERROR; - break; - } - /* - * Get sublist as {offset length} - */ - result = Tcl_ListObjGetElements(interp, objv[i++], - &elemc, &elemv); - if (elemc != 2) { - Tcl_SetResult(interp, - "List must be {offset length}", TCL_STATIC); - result = TCL_ERROR; - break; - } - data.flags |= DB_DBT_PARTIAL; - result = _GetUInt32(interp, elemv[0], &data.doff); - if (result != TCL_OK) - break; - result = _GetUInt32(interp, elemv[1], &data.dlen); - /* - * NOTE: We don't check result here because all we'd - * do is break anyway, and we are doing that. If you - * add code here, you WILL need to add the check - * for result. (See the check for save.doff, a few - * lines above and copy that.) - */ - break; - } - if (result != TCL_OK) - break; - } - if (result != TCL_OK) - goto out; - - /* - * We need to determine if we are a recno database - * or not. If we are, then key.data is a recno, not - * a string. - */ - dbcip = _PtrToInfo(dbc); - if (dbcip == NULL) { - type = DB_UNKNOWN; - ptype = DB_UNKNOWN; - } else { - dbip = dbcip->i_parent; - if (dbip == NULL) { - Tcl_SetResult(interp, "Cursor without parent database", - TCL_STATIC); - result = TCL_ERROR; - goto out; - } - thisdbp = dbip->i_dbp; - (void)thisdbp->get_type(thisdbp, &type); - if (ispget && thisdbp->s_primary != NULL) - (void)thisdbp-> - s_primary->get_type(thisdbp->s_primary, &ptype); - else - ptype = DB_UNKNOWN; - } - /* - * When we get here, we better have: - * 2 args, key and data if GET_BOTH/GET_BOTH_RANGE was specified. - * 1 arg if -set, -set_range or -set_recno - * 0 in all other cases. - */ - op = flag & DB_OPFLAGS_MASK; - switch (op) { - case DB_GET_BOTH: -#ifdef CONFIG_TEST - case DB_GET_BOTH_RANGE: -#endif - if (i != (objc - 2)) { - Tcl_WrongNumArgs(interp, 2, objv, - "?-args? -get_both key data"); - result = TCL_ERROR; - goto out; - } else { - if (type == DB_RECNO || type == DB_QUEUE) { - result = _GetUInt32( - interp, objv[objc-2], &recno); - if (result == TCL_OK) { - key.data = &recno; - key.size = sizeof(db_recno_t); - } else - goto out; - } else { - /* - * Some get calls (SET_*) can change the - * key pointers. So, we need to store - * the allocated key space in a tmp. - */ - ret = _CopyObjBytes(interp, objv[objc-2], - &ktmp, &key.size, &freekey); - if (ret != 0) { - result = _ReturnSetup(interp, ret, - DB_RETOK_DBCGET(ret), "dbc get"); - return (result); - } - key.data = ktmp; - } - if (ptype == DB_RECNO || ptype == DB_QUEUE) { - result = _GetUInt32( - interp, objv[objc-1], &precno); - if (result == TCL_OK) { - data.data = &precno; - data.size = sizeof(db_recno_t); - } else - goto out; - } else { - ret = _CopyObjBytes(interp, objv[objc-1], - &dtmp, &data.size, &freedata); - if (ret != 0) { - result = _ReturnSetup(interp, ret, - DB_RETOK_DBCGET(ret), "dbc get"); - goto out; - } - data.data = dtmp; - } - } - break; - case DB_SET: - case DB_SET_RANGE: - case DB_SET_RECNO: - if (i != (objc - 1)) { - Tcl_WrongNumArgs(interp, 2, objv, "?-args? key"); - result = TCL_ERROR; - goto out; - } -#ifdef CONFIG_TEST - if (flag & (DB_MULTIPLE|DB_MULTIPLE_KEY)) { - (void)__os_malloc(NULL, (size_t)bufsize, &data.data); - data.ulen = (u_int32_t)bufsize; - data.flags |= DB_DBT_USERMEM; - } else -#endif - data.flags |= DB_DBT_MALLOC; - if (op == DB_SET_RECNO || - type == DB_RECNO || type == DB_QUEUE) { - result = _GetUInt32(interp, objv[objc - 1], &recno); - key.data = &recno; - key.size = sizeof(db_recno_t); - } else { - /* - * Some get calls (SET_*) can change the - * key pointers. So, we need to store - * the allocated key space in a tmp. - */ - ret = _CopyObjBytes(interp, objv[objc-1], - &ktmp, &key.size, &freekey); - if (ret != 0) { - result = _ReturnSetup(interp, ret, - DB_RETOK_DBCGET(ret), "dbc get"); - return (result); - } - key.data = ktmp; - } - break; - default: - if (i != objc) { - Tcl_WrongNumArgs(interp, 2, objv, "?-args?"); - result = TCL_ERROR; - goto out; - } - key.flags |= DB_DBT_MALLOC; -#ifdef CONFIG_TEST - if (flag & (DB_MULTIPLE|DB_MULTIPLE_KEY)) { - (void)__os_malloc(NULL, (size_t)bufsize, &data.data); - data.ulen = (u_int32_t)bufsize; - data.flags |= DB_DBT_USERMEM; - } else -#endif - data.flags |= DB_DBT_MALLOC; - } - - _debug_check(); - memset(&pdata, 0, sizeof(DBT)); - if (ispget) { - F_SET(&pdata, DB_DBT_MALLOC); - ret = dbc->c_pget(dbc, &key, &data, &pdata, flag); - } else - ret = dbc->c_get(dbc, &key, &data, flag); - result = _ReturnSetup(interp, ret, DB_RETOK_DBCGET(ret), "dbc get"); - if (result == TCL_ERROR) - goto out; - - retlist = Tcl_NewListObj(0, NULL); - if (ret != 0) - goto out1; - if (op == DB_GET_RECNO) { - recno = *((db_recno_t *)data.data); - myobj = Tcl_NewWideIntObj((Tcl_WideInt)recno); - result = Tcl_ListObjAppendElement(interp, retlist, myobj); - } else { - if (flag & (DB_MULTIPLE|DB_MULTIPLE_KEY)) - result = _SetMultiList(interp, - retlist, &key, &data, type, flag); - else if ((type == DB_RECNO || type == DB_QUEUE) && - key.data != NULL) { - if (ispget) - result = _Set3DBTList(interp, retlist, &key, 1, - &data, - (ptype == DB_RECNO || ptype == DB_QUEUE), - &pdata); - else - result = _SetListRecnoElem(interp, retlist, - *(db_recno_t *)key.data, - data.data, data.size); - } else { - if (ispget) - result = _Set3DBTList(interp, retlist, &key, 0, - &data, - (ptype == DB_RECNO || ptype == DB_QUEUE), - &pdata); - else - result = _SetListElem(interp, retlist, - key.data, key.size, data.data, data.size); - } - } - if (key.data != NULL && F_ISSET(&key, DB_DBT_MALLOC)) - __os_ufree(dbc->dbp->dbenv, key.data); - if (data.data != NULL && F_ISSET(&data, DB_DBT_MALLOC)) - __os_ufree(dbc->dbp->dbenv, data.data); - if (pdata.data != NULL && F_ISSET(&pdata, DB_DBT_MALLOC)) - __os_ufree(dbc->dbp->dbenv, pdata.data); -out1: - if (result == TCL_OK) - Tcl_SetObjResult(interp, retlist); -out: - if (data.data != NULL && flag & (DB_MULTIPLE|DB_MULTIPLE_KEY)) - __os_free(dbc->dbp->dbenv, data.data); - if (freedata) - __os_free(NULL, dtmp); - if (freekey) - __os_free(NULL, ktmp); - return (result); - -} - -/* - * tcl_DbcDup -- - */ -static int -tcl_DbcDup(interp, objc, objv, dbc) - Tcl_Interp *interp; /* Interpreter */ - int objc; /* How many arguments? */ - Tcl_Obj *CONST objv[]; /* The argument objects */ - DBC *dbc; /* Cursor pointer */ -{ - static const char *dbcdupopts[] = { - "-position", - NULL - }; - enum dbcdupopts { - DBCDUP_POS - }; - DBC *newdbc; - DBTCL_INFO *dbcip, *newdbcip, *dbip; - Tcl_Obj *res; - u_int32_t flag; - int i, optindex, result, ret; - char newname[MSG_SIZE]; - - result = TCL_OK; - flag = 0; - res = NULL; - - if (objc < 2) { - Tcl_WrongNumArgs(interp, 2, objv, "?-args?"); - return (TCL_ERROR); - } - - /* - * Get the command name index from the object based on the options - * defined above. - */ - i = 2; - while (i < objc) { - if (Tcl_GetIndexFromObj(interp, objv[i], dbcdupopts, - "option", TCL_EXACT, &optindex) != TCL_OK) { - /* - * Reset the result so we don't get - * an errant error message if there is another error. - */ - if (IS_HELP(objv[i]) == TCL_OK) { - result = TCL_OK; - goto out; - } - Tcl_ResetResult(interp); - break; - } - i++; - switch ((enum dbcdupopts)optindex) { - case DBCDUP_POS: - flag = DB_POSITION; - break; - } - if (result != TCL_OK) - break; - } - if (result != TCL_OK) - goto out; - - /* - * We need to determine if we are a recno database - * or not. If we are, then key.data is a recno, not - * a string. - */ - dbcip = _PtrToInfo(dbc); - if (dbcip == NULL) { - Tcl_SetResult(interp, "Cursor without info structure", - TCL_STATIC); - result = TCL_ERROR; - goto out; - } else { - dbip = dbcip->i_parent; - if (dbip == NULL) { - Tcl_SetResult(interp, "Cursor without parent database", - TCL_STATIC); - result = TCL_ERROR; - goto out; - } - } - /* - * Now duplicate the cursor. If successful, we need to create - * a new cursor command. - */ - snprintf(newname, sizeof(newname), - "%s.c%d", dbip->i_name, dbip->i_dbdbcid); - newdbcip = _NewInfo(interp, NULL, newname, I_DBC); - if (newdbcip != NULL) { - ret = dbc->c_dup(dbc, &newdbc, flag); - if (ret == 0) { - dbip->i_dbdbcid++; - newdbcip->i_parent = dbip; - (void)Tcl_CreateObjCommand(interp, newname, - (Tcl_ObjCmdProc *)dbc_Cmd, - (ClientData)newdbc, NULL); - res = NewStringObj(newname, strlen(newname)); - _SetInfoData(newdbcip, newdbc); - Tcl_SetObjResult(interp, res); - } else { - result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret), - "db dup"); - _DeleteInfo(newdbcip); - } - } else { - Tcl_SetResult(interp, "Could not set up info", TCL_STATIC); - result = TCL_ERROR; - } -out: - return (result); - -} diff --git a/storage/bdb/tcl/tcl_env.c b/storage/bdb/tcl/tcl_env.c deleted file mode 100644 index e513b961d22..00000000000 --- a/storage/bdb/tcl/tcl_env.c +++ /dev/null @@ -1,2164 +0,0 @@ -/*- - * See the file LICENSE for redistribution information. - * - * Copyright (c) 1999-2004 - * Sleepycat Software. All rights reserved. - * - * $Id: tcl_env.c,v 11.121 2004/10/07 16:48:39 bostic Exp $ - */ - -#include "db_config.h" - -#ifndef NO_SYSTEM_INCLUDES -#include - -#include -#include -#include -#endif - -#include "db_int.h" -#include "dbinc/db_shash.h" -#include "dbinc/lock.h" -#include "dbinc/txn.h" -#include "dbinc/tcl_db.h" - -/* - * Prototypes for procedures defined later in this file: - */ -static void _EnvInfoDelete __P((Tcl_Interp *, DBTCL_INFO *)); -static int env_DbRemove __P((Tcl_Interp *, int, Tcl_Obj * CONST*, DB_ENV *)); -static int env_DbRename __P((Tcl_Interp *, int, Tcl_Obj * CONST*, DB_ENV *)); -static int env_GetFlags __P((Tcl_Interp *, int, Tcl_Obj * CONST*, DB_ENV *)); -static int env_GetOpenFlag - __P((Tcl_Interp *, int, Tcl_Obj * CONST*, DB_ENV *)); -static int env_GetLockDetect - __P((Tcl_Interp *, int, Tcl_Obj * CONST*, DB_ENV *)); -static int env_GetTimeout __P((Tcl_Interp *, int, Tcl_Obj * CONST*, DB_ENV *)); -static int env_GetVerbose __P((Tcl_Interp *, int, Tcl_Obj * CONST*, DB_ENV *)); - -/* - * PUBLIC: int env_Cmd __P((ClientData, Tcl_Interp *, int, Tcl_Obj * CONST*)); - * - * env_Cmd -- - * Implements the "env" command. - */ -int -env_Cmd(clientData, interp, objc, objv) - ClientData clientData; /* Env handle */ - Tcl_Interp *interp; /* Interpreter */ - int objc; /* How many arguments? */ - Tcl_Obj *CONST objv[]; /* The argument objects */ -{ - static const char *envcmds[] = { -#ifdef CONFIG_TEST - "attributes", - "errfile", - "errpfx", - "lock_detect", - "lock_id", - "lock_id_free", - "lock_id_set", - "lock_get", - "lock_stat", - "lock_timeout", - "lock_vec", - "log_archive", - "log_compare", - "log_cursor", - "log_file", - "log_flush", - "log_get", - "log_put", - "log_stat", - "mpool", - "mpool_stat", - "mpool_sync", - "mpool_trickle", - "mutex", - "rep_elect", - "rep_flush", - "rep_limit", - "rep_process_message", - "rep_request", - "rep_start", - "rep_stat", - "rpcid", - "set_flags", - "test", - "txn_id_set", - "txn_recover", - "txn_stat", - "txn_timeout", - "verbose", -#endif - "close", - "dbremove", - "dbrename", - "get_cachesize", - "get_data_dirs", - "get_encrypt_flags", - "get_errpfx", - "get_flags", - "get_home", - "get_lg_bsize", - "get_lg_dir", - "get_lg_max", - "get_lg_regionmax", - "get_lk_detect", - "get_lk_max_lockers", - "get_lk_max_locks", - "get_lk_max_objects", - "get_mp_max_openfd", - "get_mp_max_write", - "get_mp_mmapsize", - "get_open_flags", - "get_rep_limit", - "get_shm_key", - "get_tas_spins", - "get_timeout", - "get_tmp_dir", - "get_tx_max", - "get_tx_timestamp", - "get_verbose", - "txn", - "txn_checkpoint", - NULL - }; - enum envcmds { -#ifdef CONFIG_TEST - ENVATTR, - ENVERRFILE, - ENVERRPFX, - ENVLKDETECT, - ENVLKID, - ENVLKFREEID, - ENVLKSETID, - ENVLKGET, - ENVLKSTAT, - ENVLKTIMEOUT, - ENVLKVEC, - ENVLOGARCH, - ENVLOGCMP, - ENVLOGCURSOR, - ENVLOGFILE, - ENVLOGFLUSH, - ENVLOGGET, - ENVLOGPUT, - ENVLOGSTAT, - ENVMP, - ENVMPSTAT, - ENVMPSYNC, - ENVTRICKLE, - ENVMUTEX, - ENVREPELECT, - ENVREPFLUSH, - ENVREPLIMIT, - ENVREPPROCMESS, - ENVREPREQUEST, - ENVREPSTART, - ENVREPSTAT, - ENVRPCID, - ENVSETFLAGS, - ENVTEST, - ENVTXNSETID, - ENVTXNRECOVER, - ENVTXNSTAT, - ENVTXNTIMEOUT, - ENVVERB, -#endif - ENVCLOSE, - ENVDBREMOVE, - ENVDBRENAME, - ENVGETCACHESIZE, - ENVGETDATADIRS, - ENVGETENCRYPTFLAGS, - ENVGETERRPFX, - ENVGETFLAGS, - ENVGETHOME, - ENVGETLGBSIZE, - ENVGETLGDIR, - ENVGETLGMAX, - ENVGETLGREGIONMAX, - ENVGETLKDETECT, - ENVGETLKMAXLOCKERS, - ENVGETLKMAXLOCKS, - ENVGETLKMAXOBJECTS, - ENVGETMPMAXOPENFD, - ENVGETMPMAXWRITE, - ENVGETMPMMAPSIZE, - ENVGETOPENFLAG, - ENVGETREPLIMIT, - ENVGETSHMKEY, - ENVGETTASSPINS, - ENVGETTIMEOUT, - ENVGETTMPDIR, - ENVGETTXMAX, - ENVGETTXTIMESTAMP, - ENVGETVERBOSE, - ENVTXN, - ENVTXNCKP - }; - DBTCL_INFO *envip; - DB_ENV *dbenv; - Tcl_Obj *res, *myobjv[3]; - char newname[MSG_SIZE]; - int cmdindex, i, intvalue1, intvalue2, ncache, result, ret; - u_int32_t bytes, gbytes, value; - size_t size; - long shm_key; - time_t timeval; - const char *strval, **dirs; -#ifdef CONFIG_TEST - DBTCL_INFO *logcip; - DB_LOGC *logc; - char *strarg; - u_int32_t lockid; - long newval, otherval; -#endif - - Tcl_ResetResult(interp); - dbenv = (DB_ENV *)clientData; - envip = _PtrToInfo((void *)dbenv); - result = TCL_OK; - memset(newname, 0, MSG_SIZE); - - if (objc <= 1) { - Tcl_WrongNumArgs(interp, 1, objv, "command cmdargs"); - return (TCL_ERROR); - } - if (dbenv == NULL) { - Tcl_SetResult(interp, "NULL env pointer", TCL_STATIC); - return (TCL_ERROR); - } - if (envip == NULL) { - Tcl_SetResult(interp, "NULL env info pointer", TCL_STATIC); - return (TCL_ERROR); - } - - /* - * Get the command name index from the object based on the berkdbcmds - * defined above. - */ - if (Tcl_GetIndexFromObj(interp, objv[1], envcmds, "command", - TCL_EXACT, &cmdindex) != TCL_OK) - return (IS_HELP(objv[1])); - res = NULL; - switch ((enum envcmds)cmdindex) { -#ifdef CONFIG_TEST - case ENVLKDETECT: - result = tcl_LockDetect(interp, objc, objv, dbenv); - break; - case ENVLKSTAT: - result = tcl_LockStat(interp, objc, objv, dbenv); - break; - case ENVLKTIMEOUT: - result = tcl_LockTimeout(interp, objc, objv, dbenv); - break; - case ENVLKID: - /* - * No args for this. Error if there are some. - */ - if (objc > 2) { - Tcl_WrongNumArgs(interp, 2, objv, NULL); - return (TCL_ERROR); - } - _debug_check(); - ret = dbenv->lock_id(dbenv, &lockid); - result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret), - "lock_id"); - if (result == TCL_OK) - res = Tcl_NewWideIntObj((Tcl_WideInt)lockid); - break; - case ENVLKFREEID: - if (objc != 3) { - Tcl_WrongNumArgs(interp, 3, objv, NULL); - return (TCL_ERROR); - } - result = Tcl_GetLongFromObj(interp, objv[2], &newval); - if (result != TCL_OK) - return (result); - ret = dbenv->lock_id_free(dbenv, (u_int32_t)newval); - result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret), - "lock id_free"); - break; - case ENVLKSETID: - if (objc != 4) { - Tcl_WrongNumArgs(interp, 4, objv, "current max"); - return (TCL_ERROR); - } - result = Tcl_GetLongFromObj(interp, objv[2], &newval); - if (result != TCL_OK) - return (result); - result = Tcl_GetLongFromObj(interp, objv[3], &otherval); - if (result != TCL_OK) - return (result); - ret = __lock_id_set(dbenv, - (u_int32_t)newval, (u_int32_t)otherval); - result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret), - "lock id_free"); - break; - case ENVLKGET: - result = tcl_LockGet(interp, objc, objv, dbenv); - break; - case ENVLKVEC: - result = tcl_LockVec(interp, objc, objv, dbenv); - break; - case ENVLOGARCH: - result = tcl_LogArchive(interp, objc, objv, dbenv); - break; - case ENVLOGCMP: - result = tcl_LogCompare(interp, objc, objv); - break; - case ENVLOGCURSOR: - snprintf(newname, sizeof(newname), - "%s.logc%d", envip->i_name, envip->i_envlogcid); - logcip = _NewInfo(interp, NULL, newname, I_LOGC); - if (logcip != NULL) { - ret = dbenv->log_cursor(dbenv, &logc, 0); - if (ret == 0) { - result = TCL_OK; - envip->i_envlogcid++; - /* - * We do NOT want to set i_parent to - * envip here because log cursors are - * not "tied" to the env. That is, they - * are NOT closed if the env is closed. - */ - (void)Tcl_CreateObjCommand(interp, newname, - (Tcl_ObjCmdProc *)logc_Cmd, - (ClientData)logc, NULL); - res = NewStringObj(newname, strlen(newname)); - _SetInfoData(logcip, logc); - } else { - _DeleteInfo(logcip); - result = _ErrorSetup(interp, ret, "log cursor"); - } - } else { - Tcl_SetResult(interp, - "Could not set up info", TCL_STATIC); - result = TCL_ERROR; - } - break; - case ENVLOGFILE: - result = tcl_LogFile(interp, objc, objv, dbenv); - break; - case ENVLOGFLUSH: - result = tcl_LogFlush(interp, objc, objv, dbenv); - break; - case ENVLOGGET: - result = tcl_LogGet(interp, objc, objv, dbenv); - break; - case ENVLOGPUT: - result = tcl_LogPut(interp, objc, objv, dbenv); - break; - case ENVLOGSTAT: - result = tcl_LogStat(interp, objc, objv, dbenv); - break; - case ENVMPSTAT: - result = tcl_MpStat(interp, objc, objv, dbenv); - break; - case ENVMPSYNC: - result = tcl_MpSync(interp, objc, objv, dbenv); - break; - case ENVTRICKLE: - result = tcl_MpTrickle(interp, objc, objv, dbenv); - break; - case ENVMP: - result = tcl_Mp(interp, objc, objv, dbenv, envip); - break; - case ENVREPELECT: - result = tcl_RepElect(interp, objc, objv, dbenv); - break; - case ENVREPFLUSH: - result = tcl_RepFlush(interp, objc, objv, dbenv); - break; - case ENVREPLIMIT: - result = tcl_RepLimit(interp, objc, objv, dbenv); - break; - case ENVREPPROCMESS: - result = tcl_RepProcessMessage(interp, objc, objv, dbenv); - break; - case ENVREPREQUEST: - result = tcl_RepRequest(interp, objc, objv, dbenv); - break; - case ENVREPSTART: - result = tcl_RepStart(interp, objc, objv, dbenv); - break; - case ENVREPSTAT: - result = tcl_RepStat(interp, objc, objv, dbenv); - break; - case ENVRPCID: - /* - * No args for this. Error if there are some. - */ - if (objc > 2) { - Tcl_WrongNumArgs(interp, 2, objv, NULL); - return (TCL_ERROR); - } - /* - * !!! Retrieve the client ID from the dbp handle directly. - * This is for testing purposes only. It is dbp-private data. - */ - res = Tcl_NewLongObj((long)dbenv->cl_id); - break; - case ENVTXNSETID: - if (objc != 4) { - Tcl_WrongNumArgs(interp, 4, objv, "current max"); - return (TCL_ERROR); - } - result = Tcl_GetLongFromObj(interp, objv[2], &newval); - if (result != TCL_OK) - return (result); - result = Tcl_GetLongFromObj(interp, objv[3], &otherval); - if (result != TCL_OK) - return (result); - ret = __txn_id_set(dbenv, - (u_int32_t)newval, (u_int32_t)otherval); - result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret), - "txn setid"); - break; - case ENVTXNRECOVER: - result = tcl_TxnRecover(interp, objc, objv, dbenv, envip); - break; - case ENVTXNSTAT: - result = tcl_TxnStat(interp, objc, objv, dbenv); - break; - case ENVTXNTIMEOUT: - result = tcl_TxnTimeout(interp, objc, objv, dbenv); - break; - case ENVMUTEX: - result = tcl_Mutex(interp, objc, objv, dbenv, envip); - break; - case ENVATTR: - result = tcl_EnvAttr(interp, objc, objv, dbenv); - break; - case ENVERRFILE: - /* - * One args for this. Error if different. - */ - if (objc != 3) { - Tcl_WrongNumArgs(interp, 2, objv, "errfile"); - return (TCL_ERROR); - } - strarg = Tcl_GetStringFromObj(objv[2], NULL); - tcl_EnvSetErrfile(interp, dbenv, envip, strarg); - result = TCL_OK; - break; - case ENVERRPFX: - /* - * One args for this. Error if different. - */ - if (objc != 3) { - Tcl_WrongNumArgs(interp, 2, objv, "pfx"); - return (TCL_ERROR); - } - strarg = Tcl_GetStringFromObj(objv[2], NULL); - result = tcl_EnvSetErrpfx(interp, dbenv, envip, strarg); - break; - case ENVSETFLAGS: - /* - * Two args for this. Error if different. - */ - if (objc != 4) { - Tcl_WrongNumArgs(interp, 2, objv, "which on|off"); - return (TCL_ERROR); - } - result = tcl_EnvSetFlags(interp, dbenv, objv[2], objv[3]); - break; - case ENVTEST: - result = tcl_EnvTest(interp, objc, objv, dbenv); - break; - case ENVVERB: - /* - * Two args for this. Error if different. - */ - if (objc != 4) { - Tcl_WrongNumArgs(interp, 2, objv, NULL); - return (TCL_ERROR); - } - result = tcl_EnvVerbose(interp, dbenv, objv[2], objv[3]); - break; -#endif - case ENVCLOSE: - /* - * No args for this. Error if there are some. - */ - if (objc > 2) { - Tcl_WrongNumArgs(interp, 2, objv, NULL); - return (TCL_ERROR); - } - /* - * Any transactions will be aborted, and an mpools - * closed automatically. We must delete any txn - * and mp widgets we have here too for this env. - * NOTE: envip is freed when we come back from - * this function. Set it to NULL to make sure no - * one tries to use it later. - */ - _debug_check(); - ret = dbenv->close(dbenv, 0); - result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret), - "env close"); - _EnvInfoDelete(interp, envip); - envip = NULL; - break; - case ENVDBREMOVE: - result = env_DbRemove(interp, objc, objv, dbenv); - break; - case ENVDBRENAME: - result = env_DbRename(interp, objc, objv, dbenv); - break; - case ENVGETCACHESIZE: - if (objc != 2) { - Tcl_WrongNumArgs(interp, 1, objv, NULL); - return (TCL_ERROR); - } - ret = dbenv->get_cachesize(dbenv, &gbytes, &bytes, &ncache); - if ((result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret), - "env get_cachesize")) == TCL_OK) { - myobjv[0] = Tcl_NewLongObj((long)gbytes); - myobjv[1] = Tcl_NewLongObj((long)bytes); - myobjv[2] = Tcl_NewLongObj((long)ncache); - res = Tcl_NewListObj(3, myobjv); - } - break; - case ENVGETDATADIRS: - if (objc != 2) { - Tcl_WrongNumArgs(interp, 1, objv, NULL); - return (TCL_ERROR); - } - ret = dbenv->get_data_dirs(dbenv, &dirs); - if ((result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret), - "env get_data_dirs")) == TCL_OK) { - res = Tcl_NewListObj(0, NULL); - for (i = 0; result == TCL_OK && dirs[i] != NULL; i++) - result = Tcl_ListObjAppendElement(interp, res, - NewStringObj(dirs[i], strlen(dirs[i]))); - } - break; - case ENVGETENCRYPTFLAGS: - result = tcl_EnvGetEncryptFlags(interp, objc, objv, dbenv); - break; - case ENVGETERRPFX: - if (objc != 2) { - Tcl_WrongNumArgs(interp, 1, objv, NULL); - return (TCL_ERROR); - } - dbenv->get_errpfx(dbenv, &strval); - res = NewStringObj(strval, strlen(strval)); - break; - case ENVGETFLAGS: - result = env_GetFlags(interp, objc, objv, dbenv); - break; - case ENVGETHOME: - if (objc != 2) { - Tcl_WrongNumArgs(interp, 1, objv, NULL); - return (TCL_ERROR); - } - ret = dbenv->get_home(dbenv, &strval); - if ((result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret), - "env get_home")) == TCL_OK) - res = NewStringObj(strval, strlen(strval)); - break; - case ENVGETLGBSIZE: - if (objc != 2) { - Tcl_WrongNumArgs(interp, 1, objv, NULL); - return (TCL_ERROR); - } - ret = dbenv->get_lg_bsize(dbenv, &value); - if ((result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret), - "env get_lg_bsize")) == TCL_OK) - res = Tcl_NewLongObj((long)value); - break; - case ENVGETLGDIR: - if (objc != 2) { - Tcl_WrongNumArgs(interp, 1, objv, NULL); - return (TCL_ERROR); - } - ret = dbenv->get_lg_dir(dbenv, &strval); - if ((result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret), - "env get_lg_dir")) == TCL_OK) - res = NewStringObj(strval, strlen(strval)); - break; - case ENVGETLGMAX: - if (objc != 2) { - Tcl_WrongNumArgs(interp, 1, objv, NULL); - return (TCL_ERROR); - } - ret = dbenv->get_lg_max(dbenv, &value); - if ((result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret), - "env get_lg_max")) == TCL_OK) - res = Tcl_NewLongObj((long)value); - break; - case ENVGETLGREGIONMAX: - if (objc != 2) { - Tcl_WrongNumArgs(interp, 1, objv, NULL); - return (TCL_ERROR); - } - ret = dbenv->get_lg_regionmax(dbenv, &value); - if ((result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret), - "env get_lg_regionmax")) == TCL_OK) - res = Tcl_NewLongObj((long)value); - break; - case ENVGETLKDETECT: - result = env_GetLockDetect(interp, objc, objv, dbenv); - break; - case ENVGETLKMAXLOCKERS: - if (objc != 2) { - Tcl_WrongNumArgs(interp, 1, objv, NULL); - return (TCL_ERROR); - } - ret = dbenv->get_lk_max_lockers(dbenv, &value); - if ((result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret), - "env get_lk_max_lockers")) == TCL_OK) - res = Tcl_NewLongObj((long)value); - break; - case ENVGETLKMAXLOCKS: - if (objc != 2) { - Tcl_WrongNumArgs(interp, 1, objv, NULL); - return (TCL_ERROR); - } - ret = dbenv->get_lk_max_locks(dbenv, &value); - if ((result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret), - "env get_lk_max_locks")) == TCL_OK) - res = Tcl_NewLongObj((long)value); - break; - case ENVGETLKMAXOBJECTS: - if (objc != 2) { - Tcl_WrongNumArgs(interp, 1, objv, NULL); - return (TCL_ERROR); - } - ret = dbenv->get_lk_max_objects(dbenv, &value); - if ((result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret), - "env get_lk_max_objects")) == TCL_OK) - res = Tcl_NewLongObj((long)value); - break; - case ENVGETMPMAXOPENFD: - if (objc != 2) { - Tcl_WrongNumArgs(interp, 1, objv, NULL); - return (TCL_ERROR); - } - ret = dbenv->get_mp_max_openfd(dbenv, &intvalue1); - if ((result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret), - "env get_mp_max_openfd")) == TCL_OK) - res = Tcl_NewIntObj(intvalue1); - break; - case ENVGETMPMAXWRITE: - if (objc != 2) { - Tcl_WrongNumArgs(interp, 1, objv, NULL); - return (TCL_ERROR); - } - ret = dbenv->get_mp_max_write(dbenv, &intvalue1, &intvalue2); - if ((result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret), - "env get_mp_max_write")) == TCL_OK) { - myobjv[0] = Tcl_NewIntObj(intvalue1); - myobjv[1] = Tcl_NewIntObj(intvalue2); - res = Tcl_NewListObj(2, myobjv); - } - break; - case ENVGETMPMMAPSIZE: - if (objc != 2) { - Tcl_WrongNumArgs(interp, 1, objv, NULL); - return (TCL_ERROR); - } - ret = dbenv->get_mp_mmapsize(dbenv, &size); - if ((result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret), - "env get_mp_mmapsize")) == TCL_OK) - res = Tcl_NewLongObj((long)size); - break; - case ENVGETOPENFLAG: - result = env_GetOpenFlag(interp, objc, objv, dbenv); - break; - case ENVGETREPLIMIT: - if (objc != 2) { - Tcl_WrongNumArgs(interp, 1, objv, NULL); - return (TCL_ERROR); - } - ret = dbenv->get_rep_limit(dbenv, &gbytes, &bytes); - if ((result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret), - "env get_rep_limit")) == TCL_OK) { - myobjv[0] = Tcl_NewLongObj((long)gbytes); - myobjv[1] = Tcl_NewLongObj((long)bytes); - res = Tcl_NewListObj(2, myobjv); - } - break; - case ENVGETSHMKEY: - if (objc != 2) { - Tcl_WrongNumArgs(interp, 1, objv, NULL); - return (TCL_ERROR); - } - ret = dbenv->get_shm_key(dbenv, &shm_key); - if ((result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret), - "env shm_key")) == TCL_OK) - res = Tcl_NewLongObj(shm_key); - break; - case ENVGETTASSPINS: - if (objc != 2) { - Tcl_WrongNumArgs(interp, 1, objv, NULL); - return (TCL_ERROR); - } - ret = dbenv->get_tas_spins(dbenv, &value); - if ((result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret), - "env get_tas_spins")) == TCL_OK) - res = Tcl_NewLongObj((long)value); - break; - case ENVGETTIMEOUT: - result = env_GetTimeout(interp, objc, objv, dbenv); - break; - case ENVGETTMPDIR: - if (objc != 2) { - Tcl_WrongNumArgs(interp, 1, objv, NULL); - return (TCL_ERROR); - } - ret = dbenv->get_tmp_dir(dbenv, &strval); - if ((result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret), - "env get_tmp_dir")) == TCL_OK) - res = NewStringObj(strval, strlen(strval)); - break; - case ENVGETTXMAX: - if (objc != 2) { - Tcl_WrongNumArgs(interp, 1, objv, NULL); - return (TCL_ERROR); - } - ret = dbenv->get_tx_max(dbenv, &value); - if ((result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret), - "env get_tx_max")) == TCL_OK) - res = Tcl_NewLongObj((long)value); - break; - case ENVGETTXTIMESTAMP: - if (objc != 2) { - Tcl_WrongNumArgs(interp, 1, objv, NULL); - return (TCL_ERROR); - } - ret = dbenv->get_tx_timestamp(dbenv, &timeval); - if ((result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret), - "env get_tx_timestamp")) == TCL_OK) - res = Tcl_NewLongObj((long)timeval); - break; - case ENVGETVERBOSE: - result = env_GetVerbose(interp, objc, objv, dbenv); - break; - case ENVTXN: - result = tcl_Txn(interp, objc, objv, dbenv, envip); - break; - case ENVTXNCKP: - result = tcl_TxnCheckpoint(interp, objc, objv, dbenv); - break; - } - /* - * Only set result if we have a res. Otherwise, lower - * functions have already done so. - */ - if (result == TCL_OK && res) - Tcl_SetObjResult(interp, res); - return (result); -} - -/* - * PUBLIC: int tcl_EnvRemove __P((Tcl_Interp *, int, Tcl_Obj * CONST*, - * PUBLIC: DB_ENV *, DBTCL_INFO *)); - * - * tcl_EnvRemove -- - */ -int -tcl_EnvRemove(interp, objc, objv, dbenv, envip) - Tcl_Interp *interp; /* Interpreter */ - int objc; /* How many arguments? */ - Tcl_Obj *CONST objv[]; /* The argument objects */ - DB_ENV *dbenv; /* Env pointer */ - DBTCL_INFO *envip; /* Info pointer */ -{ - static const char *envremopts[] = { -#ifdef CONFIG_TEST - "-overwrite", - "-server", -#endif - "-data_dir", - "-encryptaes", - "-encryptany", - "-force", - "-home", - "-log_dir", - "-tmp_dir", - "-use_environ", - "-use_environ_root", - NULL - }; - enum envremopts { -#ifdef CONFIG_TEST - ENVREM_OVERWRITE, - ENVREM_SERVER, -#endif - ENVREM_DATADIR, - ENVREM_ENCRYPT_AES, - ENVREM_ENCRYPT_ANY, - ENVREM_FORCE, - ENVREM_HOME, - ENVREM_LOGDIR, - ENVREM_TMPDIR, - ENVREM_USE_ENVIRON, - ENVREM_USE_ENVIRON_ROOT - }; - DB_ENV *e; - u_int32_t cflag, enc_flag, flag, forceflag, sflag; - int i, optindex, result, ret; - char *datadir, *home, *logdir, *passwd, *server, *tmpdir; - - result = TCL_OK; - cflag = flag = forceflag = sflag = 0; - home = NULL; - passwd = NULL; - datadir = logdir = tmpdir = NULL; - server = NULL; - enc_flag = 0; - - if (objc < 2) { - Tcl_WrongNumArgs(interp, 2, objv, "?args?"); - return (TCL_ERROR); - } - - i = 2; - while (i < objc) { - if (Tcl_GetIndexFromObj(interp, objv[i], envremopts, "option", - TCL_EXACT, &optindex) != TCL_OK) { - result = IS_HELP(objv[i]); - goto error; - } - i++; - switch ((enum envremopts)optindex) { -#ifdef CONFIG_TEST - case ENVREM_SERVER: - /* Make sure we have an arg to check against! */ - if (i >= objc) { - Tcl_WrongNumArgs(interp, 2, objv, - "?-server name?"); - result = TCL_ERROR; - break; - } - server = Tcl_GetStringFromObj(objv[i++], NULL); - cflag = DB_RPCCLIENT; - break; -#endif - case ENVREM_ENCRYPT_AES: - /* Make sure we have an arg to check against! */ - if (i >= objc) { - Tcl_WrongNumArgs(interp, 2, objv, - "?-encryptaes passwd?"); - result = TCL_ERROR; - break; - } - passwd = Tcl_GetStringFromObj(objv[i++], NULL); - enc_flag = DB_ENCRYPT_AES; - break; - case ENVREM_ENCRYPT_ANY: - /* Make sure we have an arg to check against! */ - if (i >= objc) { - Tcl_WrongNumArgs(interp, 2, objv, - "?-encryptany passwd?"); - result = TCL_ERROR; - break; - } - passwd = Tcl_GetStringFromObj(objv[i++], NULL); - enc_flag = 0; - break; - case ENVREM_FORCE: - forceflag |= DB_FORCE; - break; - case ENVREM_HOME: - /* Make sure we have an arg to check against! */ - if (i >= objc) { - Tcl_WrongNumArgs(interp, 2, objv, - "?-home dir?"); - result = TCL_ERROR; - break; - } - home = Tcl_GetStringFromObj(objv[i++], NULL); - break; -#ifdef CONFIG_TEST - case ENVREM_OVERWRITE: - sflag |= DB_OVERWRITE; - break; -#endif - case ENVREM_USE_ENVIRON: - flag |= DB_USE_ENVIRON; - break; - case ENVREM_USE_ENVIRON_ROOT: - flag |= DB_USE_ENVIRON_ROOT; - break; - case ENVREM_DATADIR: - if (i >= objc) { - Tcl_WrongNumArgs(interp, 2, objv, - "-data_dir dir"); - result = TCL_ERROR; - break; - } - datadir = Tcl_GetStringFromObj(objv[i++], NULL); - break; - case ENVREM_LOGDIR: - if (i >= objc) { - Tcl_WrongNumArgs(interp, 2, objv, - "-log_dir dir"); - result = TCL_ERROR; - break; - } - logdir = Tcl_GetStringFromObj(objv[i++], NULL); - break; - case ENVREM_TMPDIR: - if (i >= objc) { - Tcl_WrongNumArgs(interp, 2, objv, - "-tmp_dir dir"); - result = TCL_ERROR; - break; - } - tmpdir = Tcl_GetStringFromObj(objv[i++], NULL); - break; - } - /* - * If, at any time, parsing the args we get an error, - * bail out and return. - */ - if (result != TCL_OK) - goto error; - } - - /* - * If dbenv is NULL, we don't have an open env and we need to open - * one of the user. Don't bother with the info stuff. - */ - if (dbenv == NULL) { - if ((ret = db_env_create(&e, cflag)) != 0) { - result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret), - "db_env_create"); - goto error; - } - if (server != NULL) { - _debug_check(); - ret = e->set_rpc_server(e, NULL, server, 0, 0, 0); - result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret), - "set_rpc_server"); - if (result != TCL_OK) - goto error; - } - if (datadir != NULL) { - _debug_check(); - ret = e->set_data_dir(e, datadir); - result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret), - "set_data_dir"); - if (result != TCL_OK) - goto error; - } - if (logdir != NULL) { - _debug_check(); - ret = e->set_lg_dir(e, logdir); - result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret), - "set_log_dir"); - if (result != TCL_OK) - goto error; - } - if (tmpdir != NULL) { - _debug_check(); - ret = e->set_tmp_dir(e, tmpdir); - result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret), - "set_tmp_dir"); - if (result != TCL_OK) - goto error; - } - if (passwd != NULL) { - ret = e->set_encrypt(e, passwd, enc_flag); - result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret), - "set_encrypt"); - } - if (sflag != 0 && (ret = e->set_flags(e, sflag, 1)) != 0) { - _debug_check(); - result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret), - "set_flags"); - if (result != TCL_OK) - goto error; - } - } else { - /* - * We have to clean up any info associated with this env, - * regardless of the result of the remove so do it first. - * NOTE: envip is freed when we come back from this function. - */ - _EnvInfoDelete(interp, envip); - envip = NULL; - e = dbenv; - } - - flag |= forceflag; - /* - * When we get here we have parsed all the args. Now remove - * the environment. - */ - _debug_check(); - ret = e->remove(e, home, flag); - result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret), - "env remove"); -error: - return (result); -} - -static void -_EnvInfoDelete(interp, envip) - Tcl_Interp *interp; /* Tcl Interpreter */ - DBTCL_INFO *envip; /* Info for env */ -{ - DBTCL_INFO *nextp, *p; - - /* - * Before we can delete the environment info, we must close - * any open subsystems in this env. We will: - * 1. Abort any transactions (which aborts any nested txns). - * 2. Close any mpools (which will put any pages itself). - * 3. Put any locks and close log cursors. - * 4. Close the error file. - */ - for (p = LIST_FIRST(&__db_infohead); p != NULL; p = nextp) { - /* - * Check if this info structure "belongs" to this - * env. If so, remove its commands and info structure. - * We do not close/abort/whatever here, because we - * don't want to replicate DB behavior. - * - * NOTE: Only those types that can nest need to be - * itemized in the switch below. That is txns and mps. - * Other types like log cursors and locks will just - * get cleaned up here. - */ - if (p->i_parent == envip) { - switch (p->i_type) { - case I_TXN: - _TxnInfoDelete(interp, p); - break; - case I_MP: - _MpInfoDelete(interp, p); - break; - case I_DB: - case I_DBC: - case I_ENV: - case I_LOCK: - case I_LOGC: - case I_MUTEX: - case I_NDBM: - case I_PG: - case I_SEQ: - Tcl_SetResult(interp, - "_EnvInfoDelete: bad info type", - TCL_STATIC); - break; - } - nextp = LIST_NEXT(p, entries); - (void)Tcl_DeleteCommand(interp, p->i_name); - _DeleteInfo(p); - } else - nextp = LIST_NEXT(p, entries); - } - (void)Tcl_DeleteCommand(interp, envip->i_name); - _DeleteInfo(envip); -} - -#ifdef CONFIG_TEST -/* - * PUBLIC: int tcl_EnvVerbose __P((Tcl_Interp *, DB_ENV *, Tcl_Obj *, - * PUBLIC: Tcl_Obj *)); - * - * tcl_EnvVerbose -- - */ -int -tcl_EnvVerbose(interp, dbenv, which, onoff) - Tcl_Interp *interp; /* Interpreter */ - DB_ENV *dbenv; /* Env pointer */ - Tcl_Obj *which; /* Which subsystem */ - Tcl_Obj *onoff; /* On or off */ -{ - static const char *verbwhich[] = { - "deadlock", - "recovery", - "rep", - "wait", - NULL - }; - enum verbwhich { - ENVVERB_DEAD, - ENVVERB_REC, - ENVVERB_REP, - ENVVERB_WAIT - }; - static const char *verbonoff[] = { - "off", - "on", - NULL - }; - enum verbonoff { - ENVVERB_OFF, - ENVVERB_ON - }; - int on, optindex, ret; - u_int32_t wh; - - if (Tcl_GetIndexFromObj(interp, which, verbwhich, "option", - TCL_EXACT, &optindex) != TCL_OK) - return (IS_HELP(which)); - - switch ((enum verbwhich)optindex) { - case ENVVERB_DEAD: - wh = DB_VERB_DEADLOCK; - break; - case ENVVERB_REC: - wh = DB_VERB_RECOVERY; - break; - case ENVVERB_REP: - wh = DB_VERB_REPLICATION; - break; - case ENVVERB_WAIT: - wh = DB_VERB_WAITSFOR; - break; - default: - return (TCL_ERROR); - } - if (Tcl_GetIndexFromObj(interp, onoff, verbonoff, "option", - TCL_EXACT, &optindex) != TCL_OK) - return (IS_HELP(onoff)); - switch ((enum verbonoff)optindex) { - case ENVVERB_OFF: - on = 0; - break; - case ENVVERB_ON: - on = 1; - break; - default: - return (TCL_ERROR); - } - ret = dbenv->set_verbose(dbenv, wh, on); - return (_ReturnSetup(interp, ret, DB_RETOK_STD(ret), - "env set verbose")); -} -#endif - -#ifdef CONFIG_TEST -/* - * PUBLIC: int tcl_EnvAttr __P((Tcl_Interp *, int, Tcl_Obj * CONST*, DB_ENV *)); - * - * tcl_EnvAttr -- - * Return a list of the env's attributes - */ -int -tcl_EnvAttr(interp, objc, objv, dbenv) - Tcl_Interp *interp; /* Interpreter */ - int objc; /* How many arguments? */ - Tcl_Obj *CONST objv[]; /* The argument objects */ - DB_ENV *dbenv; /* Env pointer */ -{ - int result; - Tcl_Obj *myobj, *retlist; - - result = TCL_OK; - - if (objc > 2) { - Tcl_WrongNumArgs(interp, 2, objv, NULL); - return (TCL_ERROR); - } - retlist = Tcl_NewListObj(0, NULL); - /* - * XXX - * We peek at the dbenv to determine what subsystems - * we have available in this env. - */ - myobj = NewStringObj("-home", strlen("-home")); - if ((result = Tcl_ListObjAppendElement(interp, - retlist, myobj)) != TCL_OK) - goto err; - myobj = NewStringObj(dbenv->db_home, strlen(dbenv->db_home)); - if ((result = Tcl_ListObjAppendElement(interp, - retlist, myobj)) != TCL_OK) - goto err; - if (CDB_LOCKING(dbenv)) { - myobj = NewStringObj("-cdb", strlen("-cdb")); - if ((result = Tcl_ListObjAppendElement(interp, - retlist, myobj)) != TCL_OK) - goto err; - } - if (CRYPTO_ON(dbenv)) { - myobj = NewStringObj("-crypto", strlen("-crypto")); - if ((result = Tcl_ListObjAppendElement(interp, - retlist, myobj)) != TCL_OK) - goto err; - } - if (LOCKING_ON(dbenv)) { - myobj = NewStringObj("-lock", strlen("-lock")); - if ((result = Tcl_ListObjAppendElement(interp, - retlist, myobj)) != TCL_OK) - goto err; - } - if (LOGGING_ON(dbenv)) { - myobj = NewStringObj("-log", strlen("-log")); - if ((result = Tcl_ListObjAppendElement(interp, - retlist, myobj)) != TCL_OK) - goto err; - } - if (MPOOL_ON(dbenv)) { - myobj = NewStringObj("-mpool", strlen("-mpool")); - if ((result = Tcl_ListObjAppendElement(interp, - retlist, myobj)) != TCL_OK) - goto err; - } - if (RPC_ON(dbenv)) { - myobj = NewStringObj("-rpc", strlen("-rpc")); - if ((result = Tcl_ListObjAppendElement(interp, - retlist, myobj)) != TCL_OK) - goto err; - } - if (REP_ON(dbenv)) { - myobj = NewStringObj("-rep", strlen("-rep")); - if ((result = Tcl_ListObjAppendElement(interp, - retlist, myobj)) != TCL_OK) - goto err; - } - if (TXN_ON(dbenv)) { - myobj = NewStringObj("-txn", strlen("-txn")); - if ((result = Tcl_ListObjAppendElement(interp, - retlist, myobj)) != TCL_OK) - goto err; - } - Tcl_SetObjResult(interp, retlist); -err: - return (result); -} - -/* - * PUBLIC: int tcl_EnvSetFlags __P((Tcl_Interp *, DB_ENV *, Tcl_Obj *, - * PUBLIC: Tcl_Obj *)); - * - * tcl_EnvSetFlags -- - * Set flags in an env. - */ -int -tcl_EnvSetFlags(interp, dbenv, which, onoff) - Tcl_Interp *interp; /* Interpreter */ - DB_ENV *dbenv; /* Env pointer */ - Tcl_Obj *which; /* Which subsystem */ - Tcl_Obj *onoff; /* On or off */ -{ - static const char *sfwhich[] = { - "-auto_commit", - "-direct_db", - "-direct_log", - "-dsync_log", - "-log_inmemory", - "-log_remove", - "-nolock", - "-nommap", - "-nopanic", - "-nosync", - "-overwrite", - "-panic", - "-wrnosync", - NULL - }; - enum sfwhich { - ENVSF_AUTOCOMMIT, - ENVSF_DIRECTDB, - ENVSF_DIRECTLOG, - ENVSF_DSYNCLOG, - ENVSF_LOG_INMEMORY, - ENVSF_LOG_REMOVE, - ENVSF_NOLOCK, - ENVSF_NOMMAP, - ENVSF_NOPANIC, - ENVSF_NOSYNC, - ENVSF_OVERWRITE, - ENVSF_PANIC, - ENVSF_WRNOSYNC - }; - static const char *sfonoff[] = { - "off", - "on", - NULL - }; - enum sfonoff { - ENVSF_OFF, - ENVSF_ON - }; - int on, optindex, ret; - u_int32_t wh; - - if (Tcl_GetIndexFromObj(interp, which, sfwhich, "option", - TCL_EXACT, &optindex) != TCL_OK) - return (IS_HELP(which)); - - switch ((enum sfwhich)optindex) { - case ENVSF_AUTOCOMMIT: - wh = DB_AUTO_COMMIT; - break; - case ENVSF_DIRECTDB: - wh = DB_DIRECT_DB; - break; - case ENVSF_DIRECTLOG: - wh = DB_DIRECT_LOG; - break; - case ENVSF_DSYNCLOG: - wh = DB_DSYNC_LOG; - break; - case ENVSF_LOG_INMEMORY: - wh = DB_LOG_INMEMORY; - break; - case ENVSF_LOG_REMOVE: - wh = DB_LOG_AUTOREMOVE; - break; - case ENVSF_NOLOCK: - wh = DB_NOLOCKING; - break; - case ENVSF_NOMMAP: - wh = DB_NOMMAP; - break; - case ENVSF_NOSYNC: - wh = DB_TXN_NOSYNC; - break; - case ENVSF_NOPANIC: - wh = DB_NOPANIC; - break; - case ENVSF_PANIC: - wh = DB_PANIC_ENVIRONMENT; - break; - case ENVSF_OVERWRITE: - wh = DB_OVERWRITE; - break; - case ENVSF_WRNOSYNC: - wh = DB_TXN_WRITE_NOSYNC; - break; - default: - return (TCL_ERROR); - } - if (Tcl_GetIndexFromObj(interp, onoff, sfonoff, "option", - TCL_EXACT, &optindex) != TCL_OK) - return (IS_HELP(onoff)); - switch ((enum sfonoff)optindex) { - case ENVSF_OFF: - on = 0; - break; - case ENVSF_ON: - on = 1; - break; - default: - return (TCL_ERROR); - } - ret = dbenv->set_flags(dbenv, wh, on); - return (_ReturnSetup(interp, ret, DB_RETOK_STD(ret), - "env set verbose")); -} - -/* - * PUBLIC: int tcl_EnvTest __P((Tcl_Interp *, int, Tcl_Obj * CONST*, DB_ENV *)); - * - * tcl_EnvTest -- - */ -int -tcl_EnvTest(interp, objc, objv, dbenv) - Tcl_Interp *interp; /* Interpreter */ - int objc; /* How many arguments? */ - Tcl_Obj *CONST objv[]; /* The argument objects */ - DB_ENV *dbenv; /* Env pointer */ -{ - static const char *envtestcmd[] = { - "abort", - "check", - "copy", - NULL - }; - enum envtestcmd { - ENVTEST_ABORT, - ENVTEST_CHECK, - ENVTEST_COPY - }; - static const char *envtestat[] = { - "electinit", - "electvote1", - "none", - "predestroy", - "preopen", - "postdestroy", - "postlog", - "postlogmeta", - "postopen", - "postsync", - "subdb_lock", - NULL - }; - enum envtestat { - ENVTEST_ELECTINIT, - ENVTEST_ELECTVOTE1, - ENVTEST_NONE, - ENVTEST_PREDESTROY, - ENVTEST_PREOPEN, - ENVTEST_POSTDESTROY, - ENVTEST_POSTLOG, - ENVTEST_POSTLOGMETA, - ENVTEST_POSTOPEN, - ENVTEST_POSTSYNC, - ENVTEST_SUBDB_LOCKS - }; - int *loc, optindex, result, testval; - - result = TCL_OK; - loc = NULL; - - if (objc != 4) { - Tcl_WrongNumArgs(interp, 2, objv, "abort|copy location"); - return (TCL_ERROR); - } - - /* - * This must be the "check", "copy" or "abort" portion of the command. - */ - if (Tcl_GetIndexFromObj(interp, objv[2], envtestcmd, "command", - TCL_EXACT, &optindex) != TCL_OK) { - result = IS_HELP(objv[2]); - return (result); - } - switch ((enum envtestcmd)optindex) { - case ENVTEST_ABORT: - loc = &dbenv->test_abort; - break; - case ENVTEST_CHECK: - loc = &dbenv->test_check; - if (Tcl_GetIntFromObj(interp, objv[3], &testval) != TCL_OK) { - result = IS_HELP(objv[3]); - return (result); - } - goto done; - case ENVTEST_COPY: - loc = &dbenv->test_copy; - break; - default: - Tcl_SetResult(interp, "Illegal store location", TCL_STATIC); - return (TCL_ERROR); - } - - /* - * This must be the location portion of the command. - */ - if (Tcl_GetIndexFromObj(interp, objv[3], envtestat, "location", - TCL_EXACT, &optindex) != TCL_OK) { - result = IS_HELP(objv[3]); - return (result); - } - switch ((enum envtestat)optindex) { - case ENVTEST_ELECTINIT: - DB_ASSERT(loc == &dbenv->test_abort); - testval = DB_TEST_ELECTINIT; - break; - case ENVTEST_ELECTVOTE1: - DB_ASSERT(loc == &dbenv->test_abort); - testval = DB_TEST_ELECTVOTE1; - break; - case ENVTEST_NONE: - testval = 0; - break; - case ENVTEST_PREOPEN: - testval = DB_TEST_PREOPEN; - break; - case ENVTEST_PREDESTROY: - testval = DB_TEST_PREDESTROY; - break; - case ENVTEST_POSTLOG: - testval = DB_TEST_POSTLOG; - break; - case ENVTEST_POSTLOGMETA: - testval = DB_TEST_POSTLOGMETA; - break; - case ENVTEST_POSTOPEN: - testval = DB_TEST_POSTOPEN; - break; - case ENVTEST_POSTDESTROY: - testval = DB_TEST_POSTDESTROY; - break; - case ENVTEST_POSTSYNC: - testval = DB_TEST_POSTSYNC; - break; - case ENVTEST_SUBDB_LOCKS: - DB_ASSERT(loc == &dbenv->test_abort); - testval = DB_TEST_SUBDB_LOCKS; - break; - default: - Tcl_SetResult(interp, "Illegal test location", TCL_STATIC); - return (TCL_ERROR); - } -done: - *loc = testval; - Tcl_SetResult(interp, "0", TCL_STATIC); - return (result); -} -#endif - -/* - * env_DbRemove -- - * Implements the ENV->dbremove command. - */ -static int -env_DbRemove(interp, objc, objv, dbenv) - Tcl_Interp *interp; /* Interpreter */ - int objc; /* How many arguments? */ - Tcl_Obj *CONST objv[]; /* The argument objects */ - DB_ENV *dbenv; -{ - static const char *envdbrem[] = { - "-auto_commit", - "-txn", - "--", - NULL - }; - enum envdbrem { - TCL_EDBREM_COMMIT, - TCL_EDBREM_TXN, - TCL_EDBREM_ENDARG - }; - DB_TXN *txn; - u_int32_t flag; - int endarg, i, optindex, result, ret, subdblen; - u_char *subdbtmp; - char *arg, *db, *subdb, msg[MSG_SIZE]; - - txn = NULL; - result = TCL_OK; - subdbtmp = NULL; - db = subdb = NULL; - endarg = 0; - flag = 0; - - if (objc < 2) { - Tcl_WrongNumArgs(interp, 2, objv, "?args? filename ?database?"); - return (TCL_ERROR); - } - - /* - * We must first parse for the environment flag, since that - * is needed for db_create. Then create the db handle. - */ - i = 2; - while (i < objc) { - if (Tcl_GetIndexFromObj(interp, objv[i], envdbrem, - "option", TCL_EXACT, &optindex) != TCL_OK) { - arg = Tcl_GetStringFromObj(objv[i], NULL); - if (arg[0] == '-') { - result = IS_HELP(objv[i]); - goto error; - } else - Tcl_ResetResult(interp); - break; - } - i++; - switch ((enum envdbrem)optindex) { - case TCL_EDBREM_COMMIT: - flag |= DB_AUTO_COMMIT; - break; - case TCL_EDBREM_TXN: - if (i >= objc) { - Tcl_WrongNumArgs(interp, 2, objv, "?-txn id?"); - result = TCL_ERROR; - break; - } - arg = Tcl_GetStringFromObj(objv[i++], NULL); - txn = NAME_TO_TXN(arg); - if (txn == NULL) { - snprintf(msg, MSG_SIZE, - "env dbremove: Invalid txn %s\n", arg); - Tcl_SetResult(interp, msg, TCL_VOLATILE); - return (TCL_ERROR); - } - break; - case TCL_EDBREM_ENDARG: - endarg = 1; - break; - } - /* - * If, at any time, parsing the args we get an error, - * bail out and return. - */ - if (result != TCL_OK) - goto error; - if (endarg) - break; - } - if (result != TCL_OK) - goto error; - /* - * Any args we have left, (better be 1 or 2 left) are - * file names. If there is 1, a db name, if 2 a db and subdb name. - */ - if ((i != (objc - 1)) || (i != (objc - 2))) { - /* - * Dbs must be NULL terminated file names, but subdbs can - * be anything. Use Strings for the db name and byte - * arrays for the subdb. - */ - db = Tcl_GetStringFromObj(objv[i++], NULL); - if (i != objc) { - subdbtmp = - Tcl_GetByteArrayFromObj(objv[i++], &subdblen); - if ((ret = __os_malloc( - dbenv, (size_t)subdblen + 1, &subdb)) != 0) { - Tcl_SetResult(interp, - db_strerror(ret), TCL_STATIC); - return (0); - } - memcpy(subdb, subdbtmp, (size_t)subdblen); - subdb[subdblen] = '\0'; - } - } else { - Tcl_WrongNumArgs(interp, 2, objv, "?args? filename ?database?"); - result = TCL_ERROR; - goto error; - } - ret = dbenv->dbremove(dbenv, txn, db, subdb, flag); - result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret), - "env dbremove"); -error: - if (subdb) - __os_free(dbenv, subdb); - return (result); -} - -/* - * env_DbRename -- - * Implements the ENV->dbrename command. - */ -static int -env_DbRename(interp, objc, objv, dbenv) - Tcl_Interp *interp; /* Interpreter */ - int objc; /* How many arguments? */ - Tcl_Obj *CONST objv[]; /* The argument objects */ - DB_ENV *dbenv; -{ - static const char *envdbmv[] = { - "-auto_commit", - "-txn", - "--", - NULL - }; - enum envdbmv { - TCL_EDBMV_COMMIT, - TCL_EDBMV_TXN, - TCL_EDBMV_ENDARG - }; - DB_TXN *txn; - u_int32_t flag; - int endarg, i, newlen, optindex, result, ret, subdblen; - u_char *subdbtmp; - char *arg, *db, *newname, *subdb, msg[MSG_SIZE]; - - txn = NULL; - result = TCL_OK; - subdbtmp = NULL; - db = newname = subdb = NULL; - endarg = 0; - flag = 0; - - if (objc < 2) { - Tcl_WrongNumArgs(interp, 3, objv, - "?args? filename ?database? ?newname?"); - return (TCL_ERROR); - } - - /* - * We must first parse for the environment flag, since that - * is needed for db_create. Then create the db handle. - */ - i = 2; - while (i < objc) { - if (Tcl_GetIndexFromObj(interp, objv[i], envdbmv, - "option", TCL_EXACT, &optindex) != TCL_OK) { - arg = Tcl_GetStringFromObj(objv[i], NULL); - if (arg[0] == '-') { - result = IS_HELP(objv[i]); - goto error; - } else - Tcl_ResetResult(interp); - break; - } - i++; - switch ((enum envdbmv)optindex) { - case TCL_EDBMV_COMMIT: - flag |= DB_AUTO_COMMIT; - break; - case TCL_EDBMV_TXN: - if (i >= objc) { - Tcl_WrongNumArgs(interp, 2, objv, "?-txn id?"); - result = TCL_ERROR; - break; - } - arg = Tcl_GetStringFromObj(objv[i++], NULL); - txn = NAME_TO_TXN(arg); - if (txn == NULL) { - snprintf(msg, MSG_SIZE, - "env dbrename: Invalid txn %s\n", arg); - Tcl_SetResult(interp, msg, TCL_VOLATILE); - return (TCL_ERROR); - } - break; - case TCL_EDBMV_ENDARG: - endarg = 1; - break; - } - /* - * If, at any time, parsing the args we get an error, - * bail out and return. - */ - if (result != TCL_OK) - goto error; - if (endarg) - break; - } - if (result != TCL_OK) - goto error; - /* - * Any args we have left, (better be 2 or 3 left) are - * file names. If there is 2, a db name, if 3 a db and subdb name. - */ - if ((i != (objc - 2)) || (i != (objc - 3))) { - /* - * Dbs must be NULL terminated file names, but subdbs can - * be anything. Use Strings for the db name and byte - * arrays for the subdb. - */ - db = Tcl_GetStringFromObj(objv[i++], NULL); - if (i == objc - 2) { - subdbtmp = - Tcl_GetByteArrayFromObj(objv[i++], &subdblen); - if ((ret = __os_malloc( - dbenv, (size_t)subdblen + 1, &subdb)) != 0) { - Tcl_SetResult(interp, - db_strerror(ret), TCL_STATIC); - return (0); - } - memcpy(subdb, subdbtmp, (size_t)subdblen); - subdb[subdblen] = '\0'; - } - subdbtmp = Tcl_GetByteArrayFromObj(objv[i++], &newlen); - if ((ret = __os_malloc( - dbenv, (size_t)newlen + 1, &newname)) != 0) { - Tcl_SetResult(interp, - db_strerror(ret), TCL_STATIC); - return (0); - } - memcpy(newname, subdbtmp, (size_t)newlen); - newname[newlen] = '\0'; - } else { - Tcl_WrongNumArgs(interp, 3, objv, - "?args? filename ?database? ?newname?"); - result = TCL_ERROR; - goto error; - } - ret = dbenv->dbrename(dbenv, txn, db, subdb, newname, flag); - result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret), - "env dbrename"); -error: - if (subdb) - __os_free(dbenv, subdb); - if (newname) - __os_free(dbenv, newname); - return (result); -} - -/* - * env_GetFlags -- - * Implements the ENV->get_flags command. - */ -static int -env_GetFlags(interp, objc, objv, dbenv) - Tcl_Interp *interp; /* Interpreter */ - int objc; /* How many arguments? */ - Tcl_Obj *CONST objv[]; /* The argument objects */ - DB_ENV *dbenv; -{ - int i, ret, result; - u_int32_t flags; - char buf[512]; - Tcl_Obj *res; - - static const struct { - u_int32_t flag; - char *arg; - } open_flags[] = { - { DB_AUTO_COMMIT, "-auto_commit" }, - { DB_CDB_ALLDB, "-cdb_alldb" }, - { DB_DIRECT_DB, "-direct_db" }, - { DB_DIRECT_LOG, "-direct_log" }, - { DB_DSYNC_LOG, "-dsync_log" }, - { DB_LOG_AUTOREMOVE, "-log_remove" }, - { DB_LOG_INMEMORY, "-log_inmemory" }, - { DB_NOLOCKING, "-nolock" }, - { DB_NOMMAP, "-nommap" }, - { DB_NOPANIC, "-nopanic" }, - { DB_OVERWRITE, "-overwrite" }, - { DB_PANIC_ENVIRONMENT, "-panic" }, - { DB_REGION_INIT, "-region_init" }, - { DB_TXN_NOSYNC, "-nosync" }, - { DB_TXN_WRITE_NOSYNC, "-wrnosync" }, - { DB_YIELDCPU, "-yield" }, - { 0, NULL } - }; - - if (objc != 2) { - Tcl_WrongNumArgs(interp, 1, objv, NULL); - return (TCL_ERROR); - } - - ret = dbenv->get_flags(dbenv, &flags); - if ((result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret), - "env get_flags")) == TCL_OK) { - buf[0] = '\0'; - - for (i = 0; open_flags[i].flag != 0; i++) - if (LF_ISSET(open_flags[i].flag)) { - if (strlen(buf) > 0) - (void)strncat(buf, " ", sizeof(buf)); - (void)strncat( - buf, open_flags[i].arg, sizeof(buf)); - } - - res = NewStringObj(buf, strlen(buf)); - Tcl_SetObjResult(interp, res); - } - - return (result); -} - -/* - * env_GetOpenFlag -- - * Implements the ENV->get_open_flags command. - */ -static int -env_GetOpenFlag(interp, objc, objv, dbenv) - Tcl_Interp *interp; /* Interpreter */ - int objc; /* How many arguments? */ - Tcl_Obj *CONST objv[]; /* The argument objects */ - DB_ENV *dbenv; -{ - int i, ret, result; - u_int32_t flags; - char buf[512]; - Tcl_Obj *res; - - static const struct { - u_int32_t flag; - char *arg; - } open_flags[] = { - { DB_INIT_CDB, "-cdb" }, - { DB_INIT_LOCK, "-lock" }, - { DB_INIT_LOG, "-log" }, - { DB_INIT_MPOOL, "-mpool" }, - { DB_INIT_TXN, "-txn" }, - { DB_RECOVER, "-recover" }, - { DB_RECOVER_FATAL, "-recover_fatal" }, - { DB_USE_ENVIRON, "-use_environ" }, - { DB_USE_ENVIRON_ROOT, "-use_environ_root" }, - { DB_CREATE, "-create" }, - { DB_LOCKDOWN, "-lockdown" }, - { DB_PRIVATE, "-private" }, - { DB_SYSTEM_MEM, "-system_mem" }, - { DB_THREAD, "-thread" }, - { 0, NULL } - }; - - if (objc != 2) { - Tcl_WrongNumArgs(interp, 1, objv, NULL); - return (TCL_ERROR); - } - - ret = dbenv->get_open_flags(dbenv, &flags); - if ((result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret), - "env get_open_flags")) == TCL_OK) { - buf[0] = '\0'; - - for (i = 0; open_flags[i].flag != 0; i++) - if (LF_ISSET(open_flags[i].flag)) { - if (strlen(buf) > 0) - (void)strncat(buf, " ", sizeof(buf)); - (void)strncat( - buf, open_flags[i].arg, sizeof(buf)); - } - - res = NewStringObj(buf, strlen(buf)); - Tcl_SetObjResult(interp, res); - } - - return (result); -} - -/* - * PUBLIC: int tcl_EnvGetEncryptFlags __P((Tcl_Interp *, int, Tcl_Obj * CONST*, - * PUBLIC: DB_ENV *)); - * - * tcl_EnvGetEncryptFlags -- - * Implements the ENV->get_encrypt_flags command. - */ -int -tcl_EnvGetEncryptFlags(interp, objc, objv, dbenv) - Tcl_Interp *interp; /* Interpreter */ - int objc; /* How many arguments? */ - Tcl_Obj *CONST objv[]; /* The argument objects */ - DB_ENV *dbenv; /* Database pointer */ -{ - int i, ret, result; - u_int32_t flags; - char buf[512]; - Tcl_Obj *res; - - static const struct { - u_int32_t flag; - char *arg; - } encrypt_flags[] = { - { DB_ENCRYPT_AES, "-encryptaes" }, - { 0, NULL } - }; - - if (objc != 2) { - Tcl_WrongNumArgs(interp, 1, objv, NULL); - return (TCL_ERROR); - } - - ret = dbenv->get_encrypt_flags(dbenv, &flags); - if ((result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret), - "env get_encrypt_flags")) == TCL_OK) { - buf[0] = '\0'; - - for (i = 0; encrypt_flags[i].flag != 0; i++) - if (LF_ISSET(encrypt_flags[i].flag)) { - if (strlen(buf) > 0) - (void)strncat(buf, " ", sizeof(buf)); - (void)strncat( - buf, encrypt_flags[i].arg, sizeof(buf)); - } - - res = NewStringObj(buf, strlen(buf)); - Tcl_SetObjResult(interp, res); - } - - return (result); -} - -/* - * env_GetLockDetect -- - * Implements the ENV->get_lk_detect command. - */ -static int -env_GetLockDetect(interp, objc, objv, dbenv) - Tcl_Interp *interp; /* Interpreter */ - int objc; /* How many arguments? */ - Tcl_Obj *CONST objv[]; /* The argument objects */ - DB_ENV *dbenv; -{ - int i, ret, result; - u_int32_t lk_detect; - const char *answer; - Tcl_Obj *res; - static const struct { - u_int32_t flag; - char *name; - } lk_detect_returns[] = { - { DB_LOCK_DEFAULT, "default" }, - { DB_LOCK_EXPIRE, "expire" }, - { DB_LOCK_MAXLOCKS, "maxlocks" }, - { DB_LOCK_MAXWRITE, "maxwrite" }, - { DB_LOCK_MINLOCKS, "minlocks" }, - { DB_LOCK_MINWRITE, "minwrite" }, - { DB_LOCK_OLDEST, "oldest" }, - { DB_LOCK_RANDOM, "random" }, - { DB_LOCK_YOUNGEST, "youngest" }, - { 0, NULL } - }; - - if (objc != 2) { - Tcl_WrongNumArgs(interp, 1, objv, NULL); - return (TCL_ERROR); - } - ret = dbenv->get_lk_detect(dbenv, &lk_detect); - if ((result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret), - "env get_lk_detect")) == TCL_OK) { - answer = "unknown"; - for (i = 0; lk_detect_returns[i].flag != 0; i++) - if (lk_detect == lk_detect_returns[i].flag) - answer = lk_detect_returns[i].name; - - res = NewStringObj(answer, strlen(answer)); - Tcl_SetObjResult(interp, res); - } - - return (result); -} - -/* - * env_GetTimeout -- - * Implements the ENV->get_timeout command. - */ -static int -env_GetTimeout(interp, objc, objv, dbenv) - Tcl_Interp *interp; /* Interpreter */ - int objc; /* How many arguments? */ - Tcl_Obj *CONST objv[]; /* The argument objects */ - DB_ENV *dbenv; -{ - static const struct { - u_int32_t flag; - char *arg; - } timeout_flags[] = { - { DB_SET_TXN_TIMEOUT, "txn" }, - { DB_SET_LOCK_TIMEOUT, "lock" }, - { 0, NULL } - }; - Tcl_Obj *res; - db_timeout_t timeout; - u_int32_t which; - int i, ret, result; - const char *arg; - - COMPQUIET(timeout, 0); - - if (objc != 3) { - Tcl_WrongNumArgs(interp, 1, objv, NULL); - return (TCL_ERROR); - } - - arg = Tcl_GetStringFromObj(objv[2], NULL); - which = 0; - for (i = 0; timeout_flags[i].flag != 0; i++) - if (strcmp(arg, timeout_flags[i].arg) == 0) - which = timeout_flags[i].flag; - if (which == 0) { - ret = EINVAL; - goto err; - } - - ret = dbenv->get_timeout(dbenv, &timeout, which); -err: if ((result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret), - "env get_timeout")) == TCL_OK) { - res = Tcl_NewLongObj((long)timeout); - Tcl_SetObjResult(interp, res); - } - - return (result); -} - -/* - * env_GetVerbose -- - * Implements the ENV->get_open_flags command. - */ -static int -env_GetVerbose(interp, objc, objv, dbenv) - Tcl_Interp *interp; /* Interpreter */ - int objc; /* How many arguments? */ - Tcl_Obj *CONST objv[]; /* The argument objects */ - DB_ENV *dbenv; -{ - static const struct { - u_int32_t flag; - char *arg; - } verbose_flags[] = { - { DB_VERB_DEADLOCK, "deadlock" }, - { DB_VERB_RECOVERY, "recovery" }, - { DB_VERB_REPLICATION, "rep" }, - { DB_VERB_WAITSFOR, "wait" }, - { 0, NULL } - }; - Tcl_Obj *res; - u_int32_t which; - int i, onoff, ret, result; - const char *arg, *answer; - - COMPQUIET(onoff, 0); - - if (objc != 3) { - Tcl_WrongNumArgs(interp, 1, objv, NULL); - return (TCL_ERROR); - } - - arg = Tcl_GetStringFromObj(objv[2], NULL); - which = 0; - for (i = 0; verbose_flags[i].flag != 0; i++) - if (strcmp(arg, verbose_flags[i].arg) == 0) - which = verbose_flags[i].flag; - if (which == 0) { - ret = EINVAL; - goto err; - } - - ret = dbenv->get_verbose(dbenv, which, &onoff); -err: if ((result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret), - "env get_timeout")) == 0) { - answer = onoff ? "on" : "off"; - res = NewStringObj(answer, strlen(answer)); - Tcl_SetObjResult(interp, res); - } - - return (result); -} - -/* - * PUBLIC: void tcl_EnvSetErrfile __P((Tcl_Interp *, DB_ENV *, DBTCL_INFO *, - * PUBLIC: char *)); - * - * tcl_EnvSetErrfile -- - * Implements the ENV->set_errfile command. - */ -void -tcl_EnvSetErrfile(interp, dbenv, ip, errf) - Tcl_Interp *interp; /* Interpreter */ - DB_ENV *dbenv; /* Database pointer */ - DBTCL_INFO *ip; /* Our internal info */ - char *errf; -{ - COMPQUIET(interp, NULL); - /* - * If the user already set one, free it. - */ - if (ip->i_err != NULL && ip->i_err != stdout && - ip->i_err != stderr) - (void)fclose(ip->i_err); - if (strcmp(errf, "/dev/stdout") == 0) - ip->i_err = stdout; - else if (strcmp(errf, "/dev/stderr") == 0) - ip->i_err = stderr; - else - ip->i_err = fopen(errf, "a"); - if (ip->i_err != NULL) - dbenv->set_errfile(dbenv, ip->i_err); -} - -/* - * PUBLIC: int tcl_EnvSetErrpfx __P((Tcl_Interp *, DB_ENV *, DBTCL_INFO *, - * PUBLIC: char *)); - * - * tcl_EnvSetErrpfx -- - * Implements the ENV->set_errpfx command. - */ -int -tcl_EnvSetErrpfx(interp, dbenv, ip, pfx) - Tcl_Interp *interp; /* Interpreter */ - DB_ENV *dbenv; /* Database pointer */ - DBTCL_INFO *ip; /* Our internal info */ - char *pfx; -{ - int result, ret; - - /* - * Assume success. The only thing that can fail is - * the __os_strdup. - */ - result = TCL_OK; - Tcl_SetResult(interp, "0", TCL_STATIC); - /* - * If the user already set one, free it. - */ - if (ip->i_errpfx != NULL) - __os_free(dbenv, ip->i_errpfx); - if ((ret = __os_strdup(dbenv, pfx, &ip->i_errpfx)) != 0) { - result = _ReturnSetup(interp, ret, - DB_RETOK_STD(ret), "__os_strdup"); - ip->i_errpfx = NULL; - } - if (ip->i_errpfx != NULL) - dbenv->set_errpfx(dbenv, ip->i_errpfx); - return (result); -} diff --git a/storage/bdb/tcl/tcl_internal.c b/storage/bdb/tcl/tcl_internal.c deleted file mode 100644 index 6927b301893..00000000000 --- a/storage/bdb/tcl/tcl_internal.c +++ /dev/null @@ -1,713 +0,0 @@ -/*- - * See the file LICENSE for redistribution information. - * - * Copyright (c) 1999-2004 - * Sleepycat Software. All rights reserved. - * - * $Id: tcl_internal.c,v 11.70 2004/10/25 18:04:44 bostic Exp $ - */ - -#include "db_config.h" - -#ifndef NO_SYSTEM_INCLUDES -#include - -#include -#include -#include -#endif - -#include "db_int.h" -#include "dbinc/tcl_db.h" -#include "dbinc/db_page.h" -#include "dbinc/db_am.h" - -/* - * - * internal.c -- - * - * This file contains internal functions we need to maintain - * state for our Tcl interface. - * - * NOTE: This all uses a linear linked list. If we end up with - * too many info structs such that this is a performance hit, it - * should be redone using hashes or a list per type. The assumption - * is that the user won't have more than a few dozen info structs - * in operation at any given point in time. Even a complicated - * application with a few environments, nested transactions, locking, - * and several databases open, using cursors should not have a - * negative performance impact, in terms of searching the list to - * get/manipulate the info structure. - */ - -#define GLOB_CHAR(c) ((c) == '*' || (c) == '?') - -/* - * PUBLIC: DBTCL_INFO *_NewInfo __P((Tcl_Interp *, - * PUBLIC: void *, char *, enum INFOTYPE)); - * - * _NewInfo -- - * - * This function will create a new info structure and fill it in - * with the name and pointer, id and type. - */ -DBTCL_INFO * -_NewInfo(interp, anyp, name, type) - Tcl_Interp *interp; - void *anyp; - char *name; - enum INFOTYPE type; -{ - DBTCL_INFO *p; - int i, ret; - - if ((ret = __os_malloc(NULL, sizeof(DBTCL_INFO), &p)) != 0) { - Tcl_SetResult(interp, db_strerror(ret), TCL_STATIC); - return (NULL); - } - - if ((ret = __os_strdup(NULL, name, &p->i_name)) != 0) { - Tcl_SetResult(interp, db_strerror(ret), TCL_STATIC); - __os_free(NULL, p); - return (NULL); - } - p->i_interp = interp; - p->i_anyp = anyp; - p->i_data = 0; - p->i_data2 = 0; - p->i_type = type; - p->i_parent = NULL; - p->i_err = NULL; - p->i_errpfx = NULL; - p->i_lockobj.data = NULL; - p->i_btcompare = NULL; - p->i_dupcompare = NULL; - p->i_hashproc = NULL; - p->i_second_call = NULL; - p->i_rep_eid = NULL; - p->i_rep_send = NULL; - for (i = 0; i < MAX_ID; i++) - p->i_otherid[i] = 0; - - LIST_INSERT_HEAD(&__db_infohead, p, entries); - return (p); -} - -/* - * PUBLIC: void *_NameToPtr __P((CONST char *)); - */ -void * -_NameToPtr(name) - CONST char *name; -{ - DBTCL_INFO *p; - - for (p = LIST_FIRST(&__db_infohead); p != NULL; - p = LIST_NEXT(p, entries)) - if (strcmp(name, p->i_name) == 0) - return (p->i_anyp); - return (NULL); -} - -/* - * PUBLIC: DBTCL_INFO *_PtrToInfo __P((CONST void *)); - */ -DBTCL_INFO * -_PtrToInfo(ptr) - CONST void *ptr; -{ - DBTCL_INFO *p; - - for (p = LIST_FIRST(&__db_infohead); p != NULL; - p = LIST_NEXT(p, entries)) - if (p->i_anyp == ptr) - return (p); - return (NULL); -} - -/* - * PUBLIC: DBTCL_INFO *_NameToInfo __P((CONST char *)); - */ -DBTCL_INFO * -_NameToInfo(name) - CONST char *name; -{ - DBTCL_INFO *p; - - for (p = LIST_FIRST(&__db_infohead); p != NULL; - p = LIST_NEXT(p, entries)) - if (strcmp(name, p->i_name) == 0) - return (p); - return (NULL); -} - -/* - * PUBLIC: void _SetInfoData __P((DBTCL_INFO *, void *)); - */ -void -_SetInfoData(p, data) - DBTCL_INFO *p; - void *data; -{ - if (p == NULL) - return; - p->i_anyp = data; - return; -} - -/* - * PUBLIC: void _DeleteInfo __P((DBTCL_INFO *)); - */ -void -_DeleteInfo(p) - DBTCL_INFO *p; -{ - if (p == NULL) - return; - LIST_REMOVE(p, entries); - if (p->i_lockobj.data != NULL) - __os_free(NULL, p->i_lockobj.data); - if (p->i_err != NULL && p->i_err != stderr) { - (void)fclose(p->i_err); - p->i_err = NULL; - } - if (p->i_errpfx != NULL) - __os_free(NULL, p->i_errpfx); - if (p->i_btcompare != NULL) - Tcl_DecrRefCount(p->i_btcompare); - if (p->i_dupcompare != NULL) - Tcl_DecrRefCount(p->i_dupcompare); - if (p->i_hashproc != NULL) - Tcl_DecrRefCount(p->i_hashproc); - if (p->i_second_call != NULL) - Tcl_DecrRefCount(p->i_second_call); - if (p->i_rep_eid != NULL) - Tcl_DecrRefCount(p->i_rep_eid); - if (p->i_rep_send != NULL) - Tcl_DecrRefCount(p->i_rep_send); - __os_free(NULL, p->i_name); - __os_free(NULL, p); - - return; -} - -/* - * PUBLIC: int _SetListElem __P((Tcl_Interp *, - * PUBLIC: Tcl_Obj *, void *, u_int32_t, void *, u_int32_t)); - */ -int -_SetListElem(interp, list, elem1, e1cnt, elem2, e2cnt) - Tcl_Interp *interp; - Tcl_Obj *list; - void *elem1, *elem2; - u_int32_t e1cnt, e2cnt; -{ - Tcl_Obj *myobjv[2], *thislist; - int myobjc; - - myobjc = 2; - myobjv[0] = Tcl_NewByteArrayObj((u_char *)elem1, (int)e1cnt); - myobjv[1] = Tcl_NewByteArrayObj((u_char *)elem2, (int)e2cnt); - thislist = Tcl_NewListObj(myobjc, myobjv); - if (thislist == NULL) - return (TCL_ERROR); - return (Tcl_ListObjAppendElement(interp, list, thislist)); - -} - -/* - * PUBLIC: int _SetListElemInt __P((Tcl_Interp *, Tcl_Obj *, void *, long)); - */ -int -_SetListElemInt(interp, list, elem1, elem2) - Tcl_Interp *interp; - Tcl_Obj *list; - void *elem1; - long elem2; -{ - Tcl_Obj *myobjv[2], *thislist; - int myobjc; - - myobjc = 2; - myobjv[0] = - Tcl_NewByteArrayObj((u_char *)elem1, (int)strlen((char *)elem1)); - myobjv[1] = Tcl_NewLongObj(elem2); - thislist = Tcl_NewListObj(myobjc, myobjv); - if (thislist == NULL) - return (TCL_ERROR); - return (Tcl_ListObjAppendElement(interp, list, thislist)); -} - -/* - * Don't compile this code if we don't have sequences compiled into the DB - * library, it's likely because we don't have a 64-bit type, and trying to - * use int64_t is going to result in syntax errors. - */ -#ifdef HAVE_SEQUENCE -/* - * PUBLIC: int _SetListElemWideInt __P((Tcl_Interp *, - * PUBLIC: Tcl_Obj *, void *, int64_t)); - */ -int -_SetListElemWideInt(interp, list, elem1, elem2) - Tcl_Interp *interp; - Tcl_Obj *list; - void *elem1; - int64_t elem2; -{ - Tcl_Obj *myobjv[2], *thislist; - int myobjc; - - myobjc = 2; - myobjv[0] = - Tcl_NewByteArrayObj((u_char *)elem1, (int)strlen((char *)elem1)); - myobjv[1] = Tcl_NewWideIntObj(elem2); - thislist = Tcl_NewListObj(myobjc, myobjv); - if (thislist == NULL) - return (TCL_ERROR); - return (Tcl_ListObjAppendElement(interp, list, thislist)); -} -#endif /* HAVE_SEQUENCE */ - -/* - * PUBLIC: int _SetListRecnoElem __P((Tcl_Interp *, Tcl_Obj *, - * PUBLIC: db_recno_t, u_char *, u_int32_t)); - */ -int -_SetListRecnoElem(interp, list, elem1, elem2, e2size) - Tcl_Interp *interp; - Tcl_Obj *list; - db_recno_t elem1; - u_char *elem2; - u_int32_t e2size; -{ - Tcl_Obj *myobjv[2], *thislist; - int myobjc; - - myobjc = 2; - myobjv[0] = Tcl_NewWideIntObj((Tcl_WideInt)elem1); - myobjv[1] = Tcl_NewByteArrayObj(elem2, (int)e2size); - thislist = Tcl_NewListObj(myobjc, myobjv); - if (thislist == NULL) - return (TCL_ERROR); - return (Tcl_ListObjAppendElement(interp, list, thislist)); - -} - -/* - * _Set3DBTList -- - * This is really analogous to both _SetListElem and - * _SetListRecnoElem--it's used for three-DBT lists returned by - * DB->pget and DBC->pget(). We'd need a family of four functions - * to handle all the recno/non-recno cases, however, so we make - * this a little more aware of the internals and do the logic inside. - * - * XXX - * One of these days all these functions should probably be cleaned up - * to eliminate redundancy and bring them into the standard DB - * function namespace. - * - * PUBLIC: int _Set3DBTList __P((Tcl_Interp *, Tcl_Obj *, DBT *, int, - * PUBLIC: DBT *, int, DBT *)); - */ -int -_Set3DBTList(interp, list, elem1, is1recno, elem2, is2recno, elem3) - Tcl_Interp *interp; - Tcl_Obj *list; - DBT *elem1, *elem2, *elem3; - int is1recno, is2recno; -{ - - Tcl_Obj *myobjv[3], *thislist; - - if (is1recno) - myobjv[0] = Tcl_NewWideIntObj( - (Tcl_WideInt)*(db_recno_t *)elem1->data); - else - myobjv[0] = Tcl_NewByteArrayObj( - (u_char *)elem1->data, (int)elem1->size); - - if (is2recno) - myobjv[1] = Tcl_NewWideIntObj( - (Tcl_WideInt)*(db_recno_t *)elem2->data); - else - myobjv[1] = Tcl_NewByteArrayObj( - (u_char *)elem2->data, (int)elem2->size); - - myobjv[2] = Tcl_NewByteArrayObj( - (u_char *)elem3->data, (int)elem3->size); - - thislist = Tcl_NewListObj(3, myobjv); - - if (thislist == NULL) - return (TCL_ERROR); - return (Tcl_ListObjAppendElement(interp, list, thislist)); -} - -/* - * _SetMultiList -- build a list for return from multiple get. - * - * PUBLIC: int _SetMultiList __P((Tcl_Interp *, - * PUBLIC: Tcl_Obj *, DBT *, DBT*, DBTYPE, u_int32_t)); - */ -int -_SetMultiList(interp, list, key, data, type, flag) - Tcl_Interp *interp; - Tcl_Obj *list; - DBT *key, *data; - DBTYPE type; - u_int32_t flag; -{ - db_recno_t recno; - u_int32_t dlen, klen; - int result; - void *pointer, *dp, *kp; - - recno = 0; - dlen = 0; - kp = NULL; - - DB_MULTIPLE_INIT(pointer, data); - result = TCL_OK; - - if (type == DB_RECNO || type == DB_QUEUE) - recno = *(db_recno_t *) key->data; - else - kp = key->data; - klen = key->size; - do { - if (flag & DB_MULTIPLE_KEY) { - if (type == DB_RECNO || type == DB_QUEUE) - DB_MULTIPLE_RECNO_NEXT(pointer, - data, recno, dp, dlen); - else - DB_MULTIPLE_KEY_NEXT(pointer, - data, kp, klen, dp, dlen); - } else - DB_MULTIPLE_NEXT(pointer, data, dp, dlen); - - if (pointer == NULL) - break; - - if (type == DB_RECNO || type == DB_QUEUE) { - result = - _SetListRecnoElem(interp, list, recno, dp, dlen); - recno++; - /* Wrap around and skip zero. */ - if (recno == 0) - recno++; - } else - result = _SetListElem(interp, list, kp, klen, dp, dlen); - } while (result == TCL_OK); - - return (result); -} -/* - * PUBLIC: int _GetGlobPrefix __P((char *, char **)); - */ -int -_GetGlobPrefix(pattern, prefix) - char *pattern; - char **prefix; -{ - int i, j; - char *p; - - /* - * Duplicate it, we get enough space and most of the work is done. - */ - if (__os_strdup(NULL, pattern, prefix) != 0) - return (1); - - p = *prefix; - for (i = 0, j = 0; p[i] && !GLOB_CHAR(p[i]); i++, j++) - /* - * Check for an escaped character and adjust - */ - if (p[i] == '\\' && p[i+1]) { - p[j] = p[i+1]; - i++; - } else - p[j] = p[i]; - p[j] = 0; - return (0); -} - -/* - * PUBLIC: int _ReturnSetup __P((Tcl_Interp *, int, int, char *)); - */ -int -_ReturnSetup(interp, ret, ok, errmsg) - Tcl_Interp *interp; - int ret, ok; - char *errmsg; -{ - char *msg; - - if (ret > 0) - return (_ErrorSetup(interp, ret, errmsg)); - - /* - * We either have success or a DB error. If a DB error, set up the - * string. We return an error if not one of the errors we catch. - * If anyone wants to reset the result to return anything different, - * then the calling function is responsible for doing so via - * Tcl_ResetResult or another Tcl_SetObjResult. - */ - if (ret == 0) { - Tcl_SetResult(interp, "0", TCL_STATIC); - return (TCL_OK); - } - - msg = db_strerror(ret); - Tcl_AppendResult(interp, msg, NULL); - - if (ok) - return (TCL_OK); - else { - Tcl_SetErrorCode(interp, "BerkeleyDB", msg, NULL); - return (TCL_ERROR); - } -} - -/* - * PUBLIC: int _ErrorSetup __P((Tcl_Interp *, int, char *)); - */ -int -_ErrorSetup(interp, ret, errmsg) - Tcl_Interp *interp; - int ret; - char *errmsg; -{ - Tcl_SetErrno(ret); - Tcl_AppendResult(interp, errmsg, ":", Tcl_PosixError(interp), NULL); - return (TCL_ERROR); -} - -/* - * PUBLIC: void _ErrorFunc __P((const DB_ENV *, CONST char *, const char *)); - */ -void -_ErrorFunc(dbenv, pfx, msg) - const DB_ENV *dbenv; - CONST char *pfx; - const char *msg; -{ - DBTCL_INFO *p; - Tcl_Interp *interp; - size_t size; - char *err; - - COMPQUIET(dbenv, NULL); - - p = _NameToInfo(pfx); - if (p == NULL) - return; - interp = p->i_interp; - - size = strlen(pfx) + strlen(msg) + 4; - /* - * If we cannot allocate enough to put together the prefix - * and message then give them just the message. - */ - if (__os_malloc(NULL, size, &err) != 0) { - Tcl_AddErrorInfo(interp, msg); - Tcl_AppendResult(interp, msg, "\n", NULL); - return; - } - snprintf(err, size, "%s: %s", pfx, msg); - Tcl_AddErrorInfo(interp, err); - Tcl_AppendResult(interp, err, "\n", NULL); - __os_free(NULL, err); - return; -} - -#define INVALID_LSNMSG "Invalid LSN with %d parts. Should have 2.\n" - -/* - * PUBLIC: int _GetLsn __P((Tcl_Interp *, Tcl_Obj *, DB_LSN *)); - */ -int -_GetLsn(interp, obj, lsn) - Tcl_Interp *interp; - Tcl_Obj *obj; - DB_LSN *lsn; -{ - Tcl_Obj **myobjv; - char msg[MSG_SIZE]; - int myobjc, result; - u_int32_t tmp; - - result = Tcl_ListObjGetElements(interp, obj, &myobjc, &myobjv); - if (result == TCL_ERROR) - return (result); - if (myobjc != 2) { - result = TCL_ERROR; - snprintf(msg, MSG_SIZE, INVALID_LSNMSG, myobjc); - Tcl_SetResult(interp, msg, TCL_VOLATILE); - return (result); - } - result = _GetUInt32(interp, myobjv[0], &tmp); - if (result == TCL_ERROR) - return (result); - lsn->file = tmp; - result = _GetUInt32(interp, myobjv[1], &tmp); - lsn->offset = tmp; - return (result); -} - -/* - * _GetUInt32 -- - * Get a u_int32_t from a Tcl object. Tcl_GetIntFromObj does the - * right thing most of the time, but on machines where a long is 8 bytes - * and an int is 4 bytes, it errors on integers between the maximum - * int32_t and the maximum u_int32_t. This is correct, but we generally - * want a u_int32_t in the end anyway, so we use Tcl_GetLongFromObj and do - * the bounds checking ourselves. - * - * This code looks much like Tcl_GetIntFromObj, only with a different - * bounds check. It's essentially Tcl_GetUnsignedIntFromObj, which - * unfortunately doesn't exist. - * - * PUBLIC: int _GetUInt32 __P((Tcl_Interp *, Tcl_Obj *, u_int32_t *)); - */ -int -_GetUInt32(interp, obj, resp) - Tcl_Interp *interp; - Tcl_Obj *obj; - u_int32_t *resp; -{ - int result; - long ltmp; - - result = Tcl_GetLongFromObj(interp, obj, <mp); - if (result != TCL_OK) - return (result); - - if ((unsigned long)ltmp != (u_int32_t)ltmp) { - if (interp != NULL) { - Tcl_ResetResult(interp); - Tcl_AppendToObj(Tcl_GetObjResult(interp), - "integer value too large for u_int32_t", -1); - } - return (TCL_ERROR); - } - - *resp = (u_int32_t)ltmp; - return (TCL_OK); -} - -/* - * _GetFlagsList -- - * Get a new Tcl object, containing a list of the string values - * associated with a particular set of flag values. - * - * PUBLIC: Tcl_Obj *_GetFlagsList __P((Tcl_Interp *, u_int32_t, const FN *)); - */ -Tcl_Obj * -_GetFlagsList(interp, flags, fnp) - Tcl_Interp *interp; - u_int32_t flags; - const FN *fnp; -{ - Tcl_Obj *newlist, *newobj; - int result; - - newlist = Tcl_NewObj(); - - /* - * Append a Tcl_Obj containing each pertinent flag string to the - * specified Tcl list. - */ - for (; fnp->mask != 0; ++fnp) - if (LF_ISSET(fnp->mask)) { - newobj = NewStringObj(fnp->name, strlen(fnp->name)); - result = - Tcl_ListObjAppendElement(interp, newlist, newobj); - - /* - * Tcl_ListObjAppendElement is defined to return TCL_OK - * unless newlist isn't actually a list (or convertible - * into one). If this is the case, we screwed up badly - * somehow. - */ - DB_ASSERT(result == TCL_OK); - } - - return (newlist); -} - -int __debug_stop, __debug_on, __debug_print, __debug_test; - -/* - * PUBLIC: void _debug_check __P((void)); - */ -void -_debug_check() -{ - if (__debug_on == 0) - return; - - if (__debug_print != 0) { - printf("\r%7d:", __debug_on); - (void)fflush(stdout); - } - if (__debug_on++ == __debug_test || __debug_stop) - __db_loadme(); -} - -/* - * XXX - * Tcl 8.1+ Tcl_GetByteArrayFromObj/Tcl_GetIntFromObj bug. - * - * There is a bug in Tcl 8.1+ and byte arrays in that if it happens - * to use an object as both a byte array and something else like - * an int, and you've done a Tcl_GetByteArrayFromObj, then you - * do a Tcl_GetIntFromObj, your memory is deleted. - * - * Workaround is for all byte arrays we want to use, if it can be - * represented as an integer, we copy it so that we don't lose the - * memory. - */ -/* - * PUBLIC: int _CopyObjBytes __P((Tcl_Interp *, Tcl_Obj *obj, void **, - * PUBLIC: u_int32_t *, int *)); - */ -int -_CopyObjBytes(interp, obj, newp, sizep, freep) - Tcl_Interp *interp; - Tcl_Obj *obj; - void **newp; - u_int32_t *sizep; - int *freep; -{ - void *tmp, *new; - int i, len, ret; - - /* - * If the object is not an int, then just return the byte - * array because it won't be transformed out from under us. - * If it is a number, we need to copy it. - */ - *freep = 0; - ret = Tcl_GetIntFromObj(interp, obj, &i); - tmp = Tcl_GetByteArrayFromObj(obj, &len); - *sizep = (u_int32_t)len; - if (ret == TCL_ERROR) { - Tcl_ResetResult(interp); - *newp = tmp; - return (0); - } - - /* - * If we get here, we have an integer that might be reused - * at some other point so we cannot count on GetByteArray - * keeping our pointer valid. - */ - if ((ret = __os_malloc(NULL, (size_t)len, &new)) != 0) - return (ret); - memcpy(new, tmp, (size_t)len); - *newp = new; - *freep = 1; - return (0); -} diff --git a/storage/bdb/tcl/tcl_lock.c b/storage/bdb/tcl/tcl_lock.c deleted file mode 100644 index 4a3de36bd59..00000000000 --- a/storage/bdb/tcl/tcl_lock.c +++ /dev/null @@ -1,746 +0,0 @@ -/*- - * See the file LICENSE for redistribution information. - * - * Copyright (c) 1999-2004 - * Sleepycat Software. All rights reserved. - * - * $Id: tcl_lock.c,v 11.59 2004/10/07 16:48:39 bostic Exp $ - */ - -#include "db_config.h" - -#ifndef NO_SYSTEM_INCLUDES -#include - -#include -#include -#include -#endif - -#include "db_int.h" -#include "dbinc/tcl_db.h" - -/* - * Prototypes for procedures defined later in this file: - */ -#ifdef CONFIG_TEST -static int lock_Cmd __P((ClientData, Tcl_Interp *, int, Tcl_Obj * CONST*)); -static int _LockMode __P((Tcl_Interp *, Tcl_Obj *, db_lockmode_t *)); -static int _GetThisLock __P((Tcl_Interp *, DB_ENV *, u_int32_t, - u_int32_t, DBT *, db_lockmode_t, char *)); -static void _LockPutInfo __P((Tcl_Interp *, db_lockop_t, DB_LOCK *, - u_int32_t, DBT *)); - -/* - * tcl_LockDetect -- - * - * PUBLIC: int tcl_LockDetect __P((Tcl_Interp *, int, - * PUBLIC: Tcl_Obj * CONST*, DB_ENV *)); - */ -int -tcl_LockDetect(interp, objc, objv, envp) - Tcl_Interp *interp; /* Interpreter */ - int objc; /* How many arguments? */ - Tcl_Obj *CONST objv[]; /* The argument objects */ - DB_ENV *envp; /* Environment pointer */ -{ - static const char *ldopts[] = { - "default", - "expire", - "maxlocks", - "maxwrites", - "minlocks", - "minwrites", - "oldest", - "random", - "youngest", - NULL - }; - enum ldopts { - LD_DEFAULT, - LD_EXPIRE, - LD_MAXLOCKS, - LD_MAXWRITES, - LD_MINLOCKS, - LD_MINWRITES, - LD_OLDEST, - LD_RANDOM, - LD_YOUNGEST - }; - u_int32_t flag, policy; - int i, optindex, result, ret; - - result = TCL_OK; - flag = policy = 0; - i = 2; - while (i < objc) { - if (Tcl_GetIndexFromObj(interp, objv[i], - ldopts, "option", TCL_EXACT, &optindex) != TCL_OK) - return (IS_HELP(objv[i])); - i++; - switch ((enum ldopts)optindex) { - case LD_DEFAULT: - FLAG_CHECK(policy); - policy = DB_LOCK_DEFAULT; - break; - case LD_EXPIRE: - FLAG_CHECK(policy); - policy = DB_LOCK_EXPIRE; - break; - case LD_MAXLOCKS: - FLAG_CHECK(policy); - policy = DB_LOCK_MAXLOCKS; - break; - case LD_MAXWRITES: - FLAG_CHECK(policy); - policy = DB_LOCK_MAXWRITE; - break; - case LD_MINLOCKS: - FLAG_CHECK(policy); - policy = DB_LOCK_MINLOCKS; - break; - case LD_MINWRITES: - FLAG_CHECK(policy); - policy = DB_LOCK_MINWRITE; - break; - case LD_OLDEST: - FLAG_CHECK(policy); - policy = DB_LOCK_OLDEST; - break; - case LD_RANDOM: - FLAG_CHECK(policy); - policy = DB_LOCK_RANDOM; - break; - case LD_YOUNGEST: - FLAG_CHECK(policy); - policy = DB_LOCK_YOUNGEST; - break; - } - } - - _debug_check(); - ret = envp->lock_detect(envp, flag, policy, NULL); - result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret), "lock detect"); - return (result); -} - -/* - * tcl_LockGet -- - * - * PUBLIC: int tcl_LockGet __P((Tcl_Interp *, int, - * PUBLIC: Tcl_Obj * CONST*, DB_ENV *)); - */ -int -tcl_LockGet(interp, objc, objv, envp) - Tcl_Interp *interp; /* Interpreter */ - int objc; /* How many arguments? */ - Tcl_Obj *CONST objv[]; /* The argument objects */ - DB_ENV *envp; /* Environment pointer */ -{ - static const char *lgopts[] = { - "-nowait", - NULL - }; - enum lgopts { - LGNOWAIT - }; - DBT obj; - Tcl_Obj *res; - void *otmp; - db_lockmode_t mode; - u_int32_t flag, lockid; - int freeobj, optindex, result, ret; - char newname[MSG_SIZE]; - - result = TCL_OK; - freeobj = 0; - memset(newname, 0, MSG_SIZE); - if (objc != 5 && objc != 6) { - Tcl_WrongNumArgs(interp, 2, objv, "?-nowait? mode id obj"); - return (TCL_ERROR); - } - /* - * Work back from required args. - * Last arg is obj. - * Second last is lock id. - * Third last is lock mode. - */ - memset(&obj, 0, sizeof(obj)); - - if ((result = - _GetUInt32(interp, objv[objc-2], &lockid)) != TCL_OK) - return (result); - - ret = _CopyObjBytes(interp, objv[objc-1], &otmp, - &obj.size, &freeobj); - if (ret != 0) { - result = _ReturnSetup(interp, ret, - DB_RETOK_STD(ret), "lock get"); - return (result); - } - obj.data = otmp; - if ((result = _LockMode(interp, objv[(objc - 3)], &mode)) != TCL_OK) - goto out; - - /* - * Any left over arg is the flag. - */ - flag = 0; - if (objc == 6) { - if (Tcl_GetIndexFromObj(interp, objv[(objc - 4)], - lgopts, "option", TCL_EXACT, &optindex) != TCL_OK) - return (IS_HELP(objv[(objc - 4)])); - switch ((enum lgopts)optindex) { - case LGNOWAIT: - flag |= DB_LOCK_NOWAIT; - break; - } - } - - result = _GetThisLock(interp, envp, lockid, flag, &obj, mode, newname); - if (result == TCL_OK) { - res = NewStringObj(newname, strlen(newname)); - Tcl_SetObjResult(interp, res); - } -out: - if (freeobj) - __os_free(envp, otmp); - return (result); -} - -/* - * tcl_LockStat -- - * - * PUBLIC: int tcl_LockStat __P((Tcl_Interp *, int, - * PUBLIC: Tcl_Obj * CONST*, DB_ENV *)); - */ -int -tcl_LockStat(interp, objc, objv, envp) - Tcl_Interp *interp; /* Interpreter */ - int objc; /* How many arguments? */ - Tcl_Obj *CONST objv[]; /* The argument objects */ - DB_ENV *envp; /* Environment pointer */ -{ - DB_LOCK_STAT *sp; - Tcl_Obj *res; - int result, ret; - - result = TCL_OK; - /* - * No args for this. Error if there are some. - */ - if (objc != 2) { - Tcl_WrongNumArgs(interp, 2, objv, NULL); - return (TCL_ERROR); - } - _debug_check(); - ret = envp->lock_stat(envp, &sp, 0); - result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret), "lock stat"); - if (result == TCL_ERROR) - return (result); - /* - * Have our stats, now construct the name value - * list pairs and free up the memory. - */ - res = Tcl_NewObj(); - /* - * MAKE_STAT_LIST assumes 'res' and 'error' label. - */ - MAKE_STAT_LIST("Region size", sp->st_regsize); - MAKE_STAT_LIST("Last allocated locker ID", sp->st_id); - MAKE_STAT_LIST("Current maximum unused locker ID", sp->st_cur_maxid); - MAKE_STAT_LIST("Maximum locks", sp->st_maxlocks); - MAKE_STAT_LIST("Maximum lockers", sp->st_maxlockers); - MAKE_STAT_LIST("Maximum objects", sp->st_maxobjects); - MAKE_STAT_LIST("Lock modes", sp->st_nmodes); - MAKE_STAT_LIST("Current number of locks", sp->st_nlocks); - MAKE_STAT_LIST("Maximum number of locks so far", sp->st_maxnlocks); - MAKE_STAT_LIST("Current number of lockers", sp->st_nlockers); - MAKE_STAT_LIST("Maximum number of lockers so far", sp->st_maxnlockers); - MAKE_STAT_LIST("Current number of objects", sp->st_nobjects); - MAKE_STAT_LIST("Maximum number of objects so far", sp->st_maxnobjects); - MAKE_STAT_LIST("Number of conflicts", sp->st_nconflicts); - MAKE_STAT_LIST("Lock requests", sp->st_nrequests); - MAKE_STAT_LIST("Lock releases", sp->st_nreleases); - MAKE_STAT_LIST("Lock requests that would have waited", sp->st_nnowaits); - MAKE_STAT_LIST("Deadlocks detected", sp->st_ndeadlocks); - MAKE_STAT_LIST("Number of region lock waits", sp->st_region_wait); - MAKE_STAT_LIST("Number of region lock nowaits", sp->st_region_nowait); - MAKE_STAT_LIST("Lock timeout value", sp->st_locktimeout); - MAKE_STAT_LIST("Number of lock timeouts", sp->st_nlocktimeouts); - MAKE_STAT_LIST("Transaction timeout value", sp->st_txntimeout); - MAKE_STAT_LIST("Number of transaction timeouts", sp->st_ntxntimeouts); - Tcl_SetObjResult(interp, res); -error: - __os_ufree(envp, sp); - return (result); -} - -/* - * tcl_LockTimeout -- - * - * PUBLIC: int tcl_LockTimeout __P((Tcl_Interp *, int, - * PUBLIC: Tcl_Obj * CONST*, DB_ENV *)); - */ -int -tcl_LockTimeout(interp, objc, objv, envp) - Tcl_Interp *interp; /* Interpreter */ - int objc; /* How many arguments? */ - Tcl_Obj *CONST objv[]; /* The argument objects */ - DB_ENV *envp; /* Environment pointer */ -{ - long timeout; - int result, ret; - - /* - * One arg, the timeout. - */ - if (objc != 3) { - Tcl_WrongNumArgs(interp, 2, objv, "?timeout?"); - return (TCL_ERROR); - } - result = Tcl_GetLongFromObj(interp, objv[2], &timeout); - if (result != TCL_OK) - return (result); - _debug_check(); - ret = envp->set_timeout(envp, (u_int32_t)timeout, DB_SET_LOCK_TIMEOUT); - result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret), "lock timeout"); - return (result); -} - -/* - * lock_Cmd -- - * Implements the "lock" widget. - */ -static int -lock_Cmd(clientData, interp, objc, objv) - ClientData clientData; /* Lock handle */ - Tcl_Interp *interp; /* Interpreter */ - int objc; /* How many arguments? */ - Tcl_Obj *CONST objv[]; /* The argument objects */ -{ - static const char *lkcmds[] = { - "put", - NULL - }; - enum lkcmds { - LKPUT - }; - DB_ENV *env; - DB_LOCK *lock; - DBTCL_INFO *lkip; - int cmdindex, result, ret; - - Tcl_ResetResult(interp); - lock = (DB_LOCK *)clientData; - lkip = _PtrToInfo((void *)lock); - result = TCL_OK; - - if (lock == NULL) { - Tcl_SetResult(interp, "NULL lock", TCL_STATIC); - return (TCL_ERROR); - } - if (lkip == NULL) { - Tcl_SetResult(interp, "NULL lock info pointer", TCL_STATIC); - return (TCL_ERROR); - } - - env = NAME_TO_ENV(lkip->i_parent->i_name); - /* - * No args for this. Error if there are some. - */ - if (objc != 2) { - Tcl_WrongNumArgs(interp, 2, objv, NULL); - return (TCL_ERROR); - } - /* - * Get the command name index from the object based on the dbcmds - * defined above. - */ - if (Tcl_GetIndexFromObj(interp, - objv[1], lkcmds, "command", TCL_EXACT, &cmdindex) != TCL_OK) - return (IS_HELP(objv[1])); - - switch ((enum lkcmds)cmdindex) { - case LKPUT: - _debug_check(); - ret = env->lock_put(env, lock); - result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret), - "lock put"); - (void)Tcl_DeleteCommand(interp, lkip->i_name); - _DeleteInfo(lkip); - __os_free(env, lock); - break; - } - return (result); -} - -/* - * tcl_LockVec -- - * - * PUBLIC: int tcl_LockVec __P((Tcl_Interp *, int, Tcl_Obj * CONST*, DB_ENV *)); - */ -int -tcl_LockVec(interp, objc, objv, envp) - Tcl_Interp *interp; /* Interpreter */ - int objc; /* How many arguments? */ - Tcl_Obj *CONST objv[]; /* The argument objects */ - DB_ENV *envp; /* environment pointer */ -{ - static const char *lvopts[] = { - "-nowait", - NULL - }; - enum lvopts { - LVNOWAIT - }; - static const char *lkops[] = { - "get", - "put", - "put_all", - "put_obj", - "timeout", - NULL - }; - enum lkops { - LKGET, - LKPUT, - LKPUTALL, - LKPUTOBJ, - LKTIMEOUT - }; - - DB_LOCK *lock; - DB_LOCKREQ list; - DBT obj; - Tcl_Obj **myobjv, *res, *thisop; - void *otmp; - u_int32_t flag, lockid; - int freeobj, i, myobjc, optindex, result, ret; - char *lockname, msg[MSG_SIZE], newname[MSG_SIZE]; - - result = TCL_OK; - memset(newname, 0, MSG_SIZE); - memset(&list, 0, sizeof(DB_LOCKREQ)); - flag = 0; - freeobj = 0; - otmp = NULL; - - /* - * If -nowait is given, it MUST be first arg. - */ - if (Tcl_GetIndexFromObj(interp, objv[2], - lvopts, "option", TCL_EXACT, &optindex) == TCL_OK) { - switch ((enum lvopts)optindex) { - case LVNOWAIT: - flag |= DB_LOCK_NOWAIT; - break; - } - i = 3; - } else { - if (IS_HELP(objv[2]) == TCL_OK) - return (TCL_OK); - Tcl_ResetResult(interp); - i = 2; - } - - /* - * Our next arg MUST be the locker ID. - */ - result = _GetUInt32(interp, objv[i++], &lockid); - if (result != TCL_OK) - return (result); - - /* - * All other remaining args are operation tuples. - * Go through sequentially to decode, execute and build - * up list of return values. - */ - res = Tcl_NewListObj(0, NULL); - while (i < objc) { - /* - * Get the list of the tuple. - */ - lock = NULL; - result = Tcl_ListObjGetElements(interp, objv[i], - &myobjc, &myobjv); - if (result == TCL_OK) - i++; - else - break; - /* - * First we will set up the list of requests. - * We will make a "second pass" after we get back - * the results from the lock_vec call to create - * the return list. - */ - if (Tcl_GetIndexFromObj(interp, myobjv[0], - lkops, "option", TCL_EXACT, &optindex) != TCL_OK) { - result = IS_HELP(myobjv[0]); - goto error; - } - switch ((enum lkops)optindex) { - case LKGET: - if (myobjc != 3) { - Tcl_WrongNumArgs(interp, 1, myobjv, - "{get obj mode}"); - result = TCL_ERROR; - goto error; - } - result = _LockMode(interp, myobjv[2], &list.mode); - if (result != TCL_OK) - goto error; - ret = _CopyObjBytes(interp, myobjv[1], &otmp, - &obj.size, &freeobj); - if (ret != 0) { - result = _ReturnSetup(interp, ret, - DB_RETOK_STD(ret), "lock vec"); - return (result); - } - obj.data = otmp; - ret = _GetThisLock(interp, envp, lockid, flag, - &obj, list.mode, newname); - if (ret != 0) { - result = _ReturnSetup(interp, ret, - DB_RETOK_STD(ret), "lock vec"); - thisop = Tcl_NewIntObj(ret); - (void)Tcl_ListObjAppendElement(interp, res, - thisop); - goto error; - } - thisop = NewStringObj(newname, strlen(newname)); - (void)Tcl_ListObjAppendElement(interp, res, thisop); - if (freeobj && otmp != NULL) { - __os_free(envp, otmp); - freeobj = 0; - } - continue; - case LKPUT: - if (myobjc != 2) { - Tcl_WrongNumArgs(interp, 1, myobjv, - "{put lock}"); - result = TCL_ERROR; - goto error; - } - list.op = DB_LOCK_PUT; - lockname = Tcl_GetStringFromObj(myobjv[1], NULL); - lock = NAME_TO_LOCK(lockname); - if (lock == NULL) { - snprintf(msg, MSG_SIZE, "Invalid lock: %s\n", - lockname); - Tcl_SetResult(interp, msg, TCL_VOLATILE); - result = TCL_ERROR; - goto error; - } - list.lock = *lock; - break; - case LKPUTALL: - if (myobjc != 1) { - Tcl_WrongNumArgs(interp, 1, myobjv, - "{put_all}"); - result = TCL_ERROR; - goto error; - } - list.op = DB_LOCK_PUT_ALL; - break; - case LKPUTOBJ: - if (myobjc != 2) { - Tcl_WrongNumArgs(interp, 1, myobjv, - "{put_obj obj}"); - result = TCL_ERROR; - goto error; - } - list.op = DB_LOCK_PUT_OBJ; - ret = _CopyObjBytes(interp, myobjv[1], &otmp, - &obj.size, &freeobj); - if (ret != 0) { - result = _ReturnSetup(interp, ret, - DB_RETOK_STD(ret), "lock vec"); - return (result); - } - obj.data = otmp; - list.obj = &obj; - break; - case LKTIMEOUT: - list.op = DB_LOCK_TIMEOUT; - break; - - } - /* - * We get here, we have set up our request, now call - * lock_vec. - */ - _debug_check(); - ret = envp->lock_vec(envp, lockid, flag, &list, 1, NULL); - /* - * Now deal with whether or not the operation succeeded. - * Get's were done above, all these are only puts. - */ - thisop = Tcl_NewIntObj(ret); - result = Tcl_ListObjAppendElement(interp, res, thisop); - if (ret != 0 && result == TCL_OK) - result = _ReturnSetup(interp, ret, - DB_RETOK_STD(ret), "lock put"); - if (freeobj && otmp != NULL) { - __os_free(envp, otmp); - freeobj = 0; - } - /* - * We did a put of some kind. Since we did that, - * we have to delete the commands associated with - * any of the locks we just put. - */ - _LockPutInfo(interp, list.op, lock, lockid, &obj); - } - - if (result == TCL_OK && res) - Tcl_SetObjResult(interp, res); -error: - return (result); -} - -static int -_LockMode(interp, obj, mode) - Tcl_Interp *interp; - Tcl_Obj *obj; - db_lockmode_t *mode; -{ - static const char *lkmode[] = { - "ng", - "read", - "write", - "iwrite", - "iread", - "iwr", - NULL - }; - enum lkmode { - LK_NG, - LK_READ, - LK_WRITE, - LK_IWRITE, - LK_IREAD, - LK_IWR - }; - int optindex; - - if (Tcl_GetIndexFromObj(interp, obj, lkmode, "option", - TCL_EXACT, &optindex) != TCL_OK) - return (IS_HELP(obj)); - switch ((enum lkmode)optindex) { - case LK_NG: - *mode = DB_LOCK_NG; - break; - case LK_READ: - *mode = DB_LOCK_READ; - break; - case LK_WRITE: - *mode = DB_LOCK_WRITE; - break; - case LK_IREAD: - *mode = DB_LOCK_IREAD; - break; - case LK_IWRITE: - *mode = DB_LOCK_IWRITE; - break; - case LK_IWR: - *mode = DB_LOCK_IWR; - break; - } - return (TCL_OK); -} - -static void -_LockPutInfo(interp, op, lock, lockid, objp) - Tcl_Interp *interp; - db_lockop_t op; - DB_LOCK *lock; - u_int32_t lockid; - DBT *objp; -{ - DBTCL_INFO *p, *nextp; - int found; - - for (p = LIST_FIRST(&__db_infohead); p != NULL; p = nextp) { - found = 0; - nextp = LIST_NEXT(p, entries); - if ((op == DB_LOCK_PUT && (p->i_lock == lock)) || - (op == DB_LOCK_PUT_ALL && p->i_locker == lockid) || - (op == DB_LOCK_PUT_OBJ && p->i_lockobj.data && - memcmp(p->i_lockobj.data, objp->data, objp->size) == 0)) - found = 1; - if (found) { - (void)Tcl_DeleteCommand(interp, p->i_name); - __os_free(NULL, p->i_lock); - _DeleteInfo(p); - } - } -} - -static int -_GetThisLock(interp, envp, lockid, flag, objp, mode, newname) - Tcl_Interp *interp; /* Interpreter */ - DB_ENV *envp; /* Env handle */ - u_int32_t lockid; /* Locker ID */ - u_int32_t flag; /* Lock flag */ - DBT *objp; /* Object to lock */ - db_lockmode_t mode; /* Lock mode */ - char *newname; /* New command name */ -{ - DB_LOCK *lock; - DBTCL_INFO *envip, *ip; - int result, ret; - - result = TCL_OK; - envip = _PtrToInfo((void *)envp); - if (envip == NULL) { - Tcl_SetResult(interp, "Could not find env info\n", TCL_STATIC); - return (TCL_ERROR); - } - snprintf(newname, MSG_SIZE, "%s.lock%d", - envip->i_name, envip->i_envlockid); - ip = _NewInfo(interp, NULL, newname, I_LOCK); - if (ip == NULL) { - Tcl_SetResult(interp, "Could not set up info", - TCL_STATIC); - return (TCL_ERROR); - } - ret = __os_malloc(envp, sizeof(DB_LOCK), &lock); - if (ret != 0) { - Tcl_SetResult(interp, db_strerror(ret), TCL_STATIC); - return (TCL_ERROR); - } - _debug_check(); - ret = envp->lock_get(envp, lockid, flag, objp, mode, lock); - result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret), "lock get"); - if (result == TCL_ERROR) { - __os_free(envp, lock); - _DeleteInfo(ip); - return (result); - } - /* - * Success. Set up return. Set up new info - * and command widget for this lock. - */ - ret = __os_malloc(envp, objp->size, &ip->i_lockobj.data); - if (ret != 0) { - Tcl_SetResult(interp, "Could not duplicate obj", - TCL_STATIC); - (void)envp->lock_put(envp, lock); - __os_free(envp, lock); - _DeleteInfo(ip); - result = TCL_ERROR; - goto error; - } - memcpy(ip->i_lockobj.data, objp->data, objp->size); - ip->i_lockobj.size = objp->size; - envip->i_envlockid++; - ip->i_parent = envip; - ip->i_locker = lockid; - _SetInfoData(ip, lock); - (void)Tcl_CreateObjCommand(interp, newname, - (Tcl_ObjCmdProc *)lock_Cmd, (ClientData)lock, NULL); -error: - return (result); -} -#endif diff --git a/storage/bdb/tcl/tcl_log.c b/storage/bdb/tcl/tcl_log.c deleted file mode 100644 index 68c678101fe..00000000000 --- a/storage/bdb/tcl/tcl_log.c +++ /dev/null @@ -1,611 +0,0 @@ -/*- - * See the file LICENSE for redistribution information. - * - * Copyright (c) 1999-2004 - * Sleepycat Software. All rights reserved. - * - * $Id: tcl_log.c,v 11.61 2004/04/05 20:18:32 bostic Exp $ - */ - -#include "db_config.h" - -#ifndef NO_SYSTEM_INCLUDES -#include - -#include -#include -#include -#endif - -#include "db_int.h" -#include "dbinc/log.h" -#include "dbinc/tcl_db.h" -#include "dbinc/txn.h" - -#ifdef CONFIG_TEST -static int tcl_LogcGet __P((Tcl_Interp *, int, Tcl_Obj * CONST*, DB_LOGC *)); - -/* - * tcl_LogArchive -- - * - * PUBLIC: int tcl_LogArchive __P((Tcl_Interp *, int, - * PUBLIC: Tcl_Obj * CONST*, DB_ENV *)); - */ -int -tcl_LogArchive(interp, objc, objv, envp) - Tcl_Interp *interp; /* Interpreter */ - int objc; /* How many arguments? */ - Tcl_Obj *CONST objv[]; /* The argument objects */ - DB_ENV *envp; /* Environment pointer */ -{ - static const char *archopts[] = { - "-arch_abs", "-arch_data", "-arch_log", "-arch_remove", - NULL - }; - enum archopts { - ARCH_ABS, ARCH_DATA, ARCH_LOG, ARCH_REMOVE - }; - Tcl_Obj *fileobj, *res; - u_int32_t flag; - int i, optindex, result, ret; - char **file, **list; - - result = TCL_OK; - flag = 0; - /* - * Get the flag index from the object based on the options - * defined above. - */ - i = 2; - while (i < objc) { - if (Tcl_GetIndexFromObj(interp, objv[i], - archopts, "option", TCL_EXACT, &optindex) != TCL_OK) - return (IS_HELP(objv[i])); - i++; - switch ((enum archopts)optindex) { - case ARCH_ABS: - flag |= DB_ARCH_ABS; - break; - case ARCH_DATA: - flag |= DB_ARCH_DATA; - break; - case ARCH_LOG: - flag |= DB_ARCH_LOG; - break; - case ARCH_REMOVE: - flag |= DB_ARCH_REMOVE; - break; - } - } - _debug_check(); - list = NULL; - ret = envp->log_archive(envp, &list, flag); - result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret), "log archive"); - if (result == TCL_OK) { - res = Tcl_NewListObj(0, NULL); - for (file = list; file != NULL && *file != NULL; file++) { - fileobj = NewStringObj(*file, strlen(*file)); - result = Tcl_ListObjAppendElement(interp, res, fileobj); - if (result != TCL_OK) - break; - } - Tcl_SetObjResult(interp, res); - } - if (list != NULL) - __os_ufree(envp, list); - return (result); -} - -/* - * tcl_LogCompare -- - * - * PUBLIC: int tcl_LogCompare __P((Tcl_Interp *, int, - * PUBLIC: Tcl_Obj * CONST*)); - */ -int -tcl_LogCompare(interp, objc, objv) - Tcl_Interp *interp; /* Interpreter */ - int objc; /* How many arguments? */ - Tcl_Obj *CONST objv[]; /* The argument objects */ -{ - DB_LSN lsn0, lsn1; - Tcl_Obj *res; - int result, ret; - - result = TCL_OK; - /* - * No flags, must be 4 args. - */ - if (objc != 4) { - Tcl_WrongNumArgs(interp, 2, objv, "lsn1 lsn2"); - return (TCL_ERROR); - } - - result = _GetLsn(interp, objv[2], &lsn0); - if (result == TCL_ERROR) - return (result); - result = _GetLsn(interp, objv[3], &lsn1); - if (result == TCL_ERROR) - return (result); - - _debug_check(); - ret = log_compare(&lsn0, &lsn1); - res = Tcl_NewIntObj(ret); - Tcl_SetObjResult(interp, res); - return (result); -} - -/* - * tcl_LogFile -- - * - * PUBLIC: int tcl_LogFile __P((Tcl_Interp *, int, - * PUBLIC: Tcl_Obj * CONST*, DB_ENV *)); - */ -int -tcl_LogFile(interp, objc, objv, envp) - Tcl_Interp *interp; /* Interpreter */ - int objc; /* How many arguments? */ - Tcl_Obj *CONST objv[]; /* The argument objects */ - DB_ENV *envp; /* Environment pointer */ -{ - DB_LSN lsn; - Tcl_Obj *res; - size_t len; - int result, ret; - char *name; - - result = TCL_OK; - /* - * No flags, must be 3 args. - */ - if (objc != 3) { - Tcl_WrongNumArgs(interp, 2, objv, "lsn"); - return (TCL_ERROR); - } - - result = _GetLsn(interp, objv[2], &lsn); - if (result == TCL_ERROR) - return (result); - - len = MSG_SIZE; - ret = ENOMEM; - name = NULL; - while (ret == ENOMEM) { - if (name != NULL) - __os_free(envp, name); - ret = __os_malloc(envp, len, &name); - if (ret != 0) { - Tcl_SetResult(interp, db_strerror(ret), TCL_STATIC); - break; - } - _debug_check(); - ret = envp->log_file(envp, &lsn, name, len); - len *= 2; - } - result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret), "log_file"); - if (ret == 0) { - res = NewStringObj(name, strlen(name)); - Tcl_SetObjResult(interp, res); - } - - if (name != NULL) - __os_free(envp, name); - - return (result); -} - -/* - * tcl_LogFlush -- - * - * PUBLIC: int tcl_LogFlush __P((Tcl_Interp *, int, - * PUBLIC: Tcl_Obj * CONST*, DB_ENV *)); - */ -int -tcl_LogFlush(interp, objc, objv, envp) - Tcl_Interp *interp; /* Interpreter */ - int objc; /* How many arguments? */ - Tcl_Obj *CONST objv[]; /* The argument objects */ - DB_ENV *envp; /* Environment pointer */ -{ - DB_LSN lsn, *lsnp; - int result, ret; - - result = TCL_OK; - /* - * No flags, must be 2 or 3 args. - */ - if (objc > 3) { - Tcl_WrongNumArgs(interp, 2, objv, "?lsn?"); - return (TCL_ERROR); - } - - if (objc == 3) { - lsnp = &lsn; - result = _GetLsn(interp, objv[2], &lsn); - if (result == TCL_ERROR) - return (result); - } else - lsnp = NULL; - - _debug_check(); - ret = envp->log_flush(envp, lsnp); - result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret), "log_flush"); - return (result); -} - -/* - * tcl_LogGet -- - * - * PUBLIC: int tcl_LogGet __P((Tcl_Interp *, int, - * PUBLIC: Tcl_Obj * CONST*, DB_ENV *)); - */ -int -tcl_LogGet(interp, objc, objv, envp) - Tcl_Interp *interp; /* Interpreter */ - int objc; /* How many arguments? */ - Tcl_Obj *CONST objv[]; /* The argument objects */ - DB_ENV *envp; /* Environment pointer */ -{ - - COMPQUIET(objv, NULL); - COMPQUIET(objc, 0); - COMPQUIET(envp, NULL); - - Tcl_SetResult(interp, "FAIL: log_get deprecated\n", TCL_STATIC); - return (TCL_ERROR); -} - -/* - * tcl_LogPut -- - * - * PUBLIC: int tcl_LogPut __P((Tcl_Interp *, int, - * PUBLIC: Tcl_Obj * CONST*, DB_ENV *)); - */ -int -tcl_LogPut(interp, objc, objv, envp) - Tcl_Interp *interp; /* Interpreter */ - int objc; /* How many arguments? */ - Tcl_Obj *CONST objv[]; /* The argument objects */ - DB_ENV *envp; /* Environment pointer */ -{ - static const char *logputopts[] = { - "-flush", - NULL - }; - enum logputopts { - LOGPUT_FLUSH - }; - DB_LSN lsn; - DBT data; - Tcl_Obj *intobj, *res; - void *dtmp; - u_int32_t flag; - int freedata, optindex, result, ret; - - result = TCL_OK; - flag = 0; - freedata = 0; - if (objc < 3) { - Tcl_WrongNumArgs(interp, 2, objv, "?-args? record"); - return (TCL_ERROR); - } - - /* - * Data/record must be the last arg. - */ - memset(&data, 0, sizeof(data)); - ret = _CopyObjBytes(interp, objv[objc-1], &dtmp, - &data.size, &freedata); - if (ret != 0) { - result = _ReturnSetup(interp, ret, - DB_RETOK_STD(ret), "log put"); - return (result); - } - data.data = dtmp; - - /* - * Get the command name index from the object based on the options - * defined above. - */ - if (objc == 4) { - if (Tcl_GetIndexFromObj(interp, objv[2], - logputopts, "option", TCL_EXACT, &optindex) != TCL_OK) { - return (IS_HELP(objv[2])); - } - switch ((enum logputopts)optindex) { - case LOGPUT_FLUSH: - flag = DB_FLUSH; - break; - } - } - - if (result == TCL_ERROR) - return (result); - - _debug_check(); - ret = envp->log_put(envp, &lsn, &data, flag); - result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret), "log_put"); - if (result == TCL_ERROR) - return (result); - res = Tcl_NewListObj(0, NULL); - intobj = Tcl_NewWideIntObj((Tcl_WideInt)lsn.file); - result = Tcl_ListObjAppendElement(interp, res, intobj); - intobj = Tcl_NewWideIntObj((Tcl_WideInt)lsn.offset); - result = Tcl_ListObjAppendElement(interp, res, intobj); - Tcl_SetObjResult(interp, res); - if (freedata) - __os_free(NULL, dtmp); - return (result); -} -/* - * tcl_LogStat -- - * - * PUBLIC: int tcl_LogStat __P((Tcl_Interp *, int, - * PUBLIC: Tcl_Obj * CONST*, DB_ENV *)); - */ -int -tcl_LogStat(interp, objc, objv, envp) - Tcl_Interp *interp; /* Interpreter */ - int objc; /* How many arguments? */ - Tcl_Obj *CONST objv[]; /* The argument objects */ - DB_ENV *envp; /* Environment pointer */ -{ - DB_LOG_STAT *sp; - Tcl_Obj *res; - int result, ret; - - result = TCL_OK; - /* - * No args for this. Error if there are some. - */ - if (objc != 2) { - Tcl_WrongNumArgs(interp, 2, objv, NULL); - return (TCL_ERROR); - } - _debug_check(); - ret = envp->log_stat(envp, &sp, 0); - result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret), "log stat"); - if (result == TCL_ERROR) - return (result); - - /* - * Have our stats, now construct the name value - * list pairs and free up the memory. - */ - res = Tcl_NewObj(); - /* - * MAKE_STAT_LIST assumes 'res' and 'error' label. - */ - MAKE_STAT_LIST("Magic", sp->st_magic); - MAKE_STAT_LIST("Log file Version", sp->st_version); - MAKE_STAT_LIST("Region size", sp->st_regsize); - MAKE_STAT_LIST("Log file mode", sp->st_mode); - MAKE_STAT_LIST("Log record cache size", sp->st_lg_bsize); - MAKE_STAT_LIST("Current log file size", sp->st_lg_size); - MAKE_STAT_LIST("Mbytes written", sp->st_w_mbytes); - MAKE_STAT_LIST("Bytes written (over Mb)", sp->st_w_bytes); - MAKE_STAT_LIST("Mbytes written since checkpoint", sp->st_wc_mbytes); - MAKE_STAT_LIST("Bytes written (over Mb) since checkpoint", - sp->st_wc_bytes); - MAKE_STAT_LIST("Times log written", sp->st_wcount); - MAKE_STAT_LIST("Times log written because cache filled up", - sp->st_wcount_fill); - MAKE_STAT_LIST("Times log flushed", sp->st_scount); - MAKE_STAT_LIST("Current log file number", sp->st_cur_file); - MAKE_STAT_LIST("Current log file offset", sp->st_cur_offset); - MAKE_STAT_LIST("On-disk log file number", sp->st_disk_file); - MAKE_STAT_LIST("On-disk log file offset", sp->st_disk_offset); - MAKE_STAT_LIST("Max commits in a log flush", sp->st_maxcommitperflush); - MAKE_STAT_LIST("Min commits in a log flush", sp->st_mincommitperflush); - MAKE_STAT_LIST("Number of region lock waits", sp->st_region_wait); - MAKE_STAT_LIST("Number of region lock nowaits", sp->st_region_nowait); - Tcl_SetObjResult(interp, res); -error: - __os_ufree(envp, sp); - return (result); -} - -/* - * logc_Cmd -- - * Implements the log cursor command. - * - * PUBLIC: int logc_Cmd __P((ClientData, Tcl_Interp *, int, Tcl_Obj * CONST*)); - */ -int -logc_Cmd(clientData, interp, objc, objv) - ClientData clientData; /* Cursor handle */ - Tcl_Interp *interp; /* Interpreter */ - int objc; /* How many arguments? */ - Tcl_Obj *CONST objv[]; /* The argument objects */ -{ - static const char *logccmds[] = { - "close", - "get", - NULL - }; - enum logccmds { - LOGCCLOSE, - LOGCGET - }; - DB_LOGC *logc; - DBTCL_INFO *logcip; - int cmdindex, result, ret; - - Tcl_ResetResult(interp); - logc = (DB_LOGC *)clientData; - logcip = _PtrToInfo((void *)logc); - result = TCL_OK; - - if (objc <= 1) { - Tcl_WrongNumArgs(interp, 1, objv, "command cmdargs"); - return (TCL_ERROR); - } - if (logc == NULL) { - Tcl_SetResult(interp, "NULL logc pointer", TCL_STATIC); - return (TCL_ERROR); - } - if (logcip == NULL) { - Tcl_SetResult(interp, "NULL logc info pointer", TCL_STATIC); - return (TCL_ERROR); - } - - /* - * Get the command name index from the object based on the berkdbcmds - * defined above. - */ - if (Tcl_GetIndexFromObj(interp, objv[1], logccmds, "command", - TCL_EXACT, &cmdindex) != TCL_OK) - return (IS_HELP(objv[1])); - switch ((enum logccmds)cmdindex) { - case LOGCCLOSE: - /* - * No args for this. Error if there are some. - */ - if (objc > 2) { - Tcl_WrongNumArgs(interp, 2, objv, NULL); - return (TCL_ERROR); - } - _debug_check(); - ret = logc->close(logc, 0); - result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret), - "logc close"); - if (result == TCL_OK) { - (void)Tcl_DeleteCommand(interp, logcip->i_name); - _DeleteInfo(logcip); - } - break; - case LOGCGET: - result = tcl_LogcGet(interp, objc, objv, logc); - break; - } - return (result); -} - -static int -tcl_LogcGet(interp, objc, objv, logc) - Tcl_Interp *interp; - int objc; - Tcl_Obj * CONST *objv; - DB_LOGC *logc; -{ - static const char *logcgetopts[] = { - "-current", - "-first", - "-last", - "-next", - "-prev", - "-set", - NULL - }; - enum logcgetopts { - LOGCGET_CURRENT, - LOGCGET_FIRST, - LOGCGET_LAST, - LOGCGET_NEXT, - LOGCGET_PREV, - LOGCGET_SET - }; - DB_LSN lsn; - DBT data; - Tcl_Obj *dataobj, *lsnlist, *myobjv[2], *res; - u_int32_t flag; - int i, myobjc, optindex, result, ret; - - result = TCL_OK; - res = NULL; - flag = 0; - - if (objc < 3) { - Tcl_WrongNumArgs(interp, 2, objv, "?-args? lsn"); - return (TCL_ERROR); - } - - /* - * Get the command name index from the object based on the options - * defined above. - */ - i = 2; - while (i < objc) { - if (Tcl_GetIndexFromObj(interp, objv[i], - logcgetopts, "option", TCL_EXACT, &optindex) != TCL_OK) - return (IS_HELP(objv[i])); - i++; - switch ((enum logcgetopts)optindex) { - case LOGCGET_CURRENT: - FLAG_CHECK(flag); - flag |= DB_CURRENT; - break; - case LOGCGET_FIRST: - FLAG_CHECK(flag); - flag |= DB_FIRST; - break; - case LOGCGET_LAST: - FLAG_CHECK(flag); - flag |= DB_LAST; - break; - case LOGCGET_NEXT: - FLAG_CHECK(flag); - flag |= DB_NEXT; - break; - case LOGCGET_PREV: - FLAG_CHECK(flag); - flag |= DB_PREV; - break; - case LOGCGET_SET: - FLAG_CHECK(flag); - flag |= DB_SET; - if (i == objc) { - Tcl_WrongNumArgs(interp, 2, objv, "?-set lsn?"); - result = TCL_ERROR; - break; - } - result = _GetLsn(interp, objv[i++], &lsn); - break; - } - } - - if (result == TCL_ERROR) - return (result); - - memset(&data, 0, sizeof(data)); - - _debug_check(); - ret = logc->get(logc, &lsn, &data, flag); - - res = Tcl_NewListObj(0, NULL); - if (res == NULL) - goto memerr; - - if (ret == 0) { - /* - * Success. Set up return list as {LSN data} where LSN - * is a sublist {file offset}. - */ - myobjc = 2; - myobjv[0] = Tcl_NewWideIntObj((Tcl_WideInt)lsn.file); - myobjv[1] = Tcl_NewWideIntObj((Tcl_WideInt)lsn.offset); - lsnlist = Tcl_NewListObj(myobjc, myobjv); - if (lsnlist == NULL) - goto memerr; - - result = Tcl_ListObjAppendElement(interp, res, lsnlist); - dataobj = NewStringObj(data.data, data.size); - if (dataobj == NULL) { - goto memerr; - } - result = Tcl_ListObjAppendElement(interp, res, dataobj); - } else - result = _ReturnSetup(interp, ret, DB_RETOK_LGGET(ret), - "DB_LOGC->get"); - - Tcl_SetObjResult(interp, res); - - if (0) { -memerr: if (res != NULL) - Tcl_DecrRefCount(res); - Tcl_SetResult(interp, "allocation failed", TCL_STATIC); - } - - return (result); -} -#endif diff --git a/storage/bdb/tcl/tcl_mp.c b/storage/bdb/tcl/tcl_mp.c deleted file mode 100644 index 29691a31c50..00000000000 --- a/storage/bdb/tcl/tcl_mp.c +++ /dev/null @@ -1,929 +0,0 @@ -/*- - * See the file LICENSE for redistribution information. - * - * Copyright (c) 1999-2004 - * Sleepycat Software. All rights reserved. - * - * $Id: tcl_mp.c,v 11.58 2004/10/07 16:48:39 bostic Exp $ - */ - -#include "db_config.h" - -#ifndef NO_SYSTEM_INCLUDES -#include - -#include -#include -#include -#endif - -#include "db_int.h" -#include "dbinc/tcl_db.h" - -/* - * Prototypes for procedures defined later in this file: - */ -#ifdef CONFIG_TEST -static int mp_Cmd __P((ClientData, Tcl_Interp *, int, Tcl_Obj * CONST*)); -static int pg_Cmd __P((ClientData, Tcl_Interp *, int, Tcl_Obj * CONST*)); -static int tcl_MpGet __P((Tcl_Interp *, int, Tcl_Obj * CONST*, - DB_MPOOLFILE *, DBTCL_INFO *)); -static int tcl_Pg __P((Tcl_Interp *, int, Tcl_Obj * CONST*, - void *, DB_MPOOLFILE *, DBTCL_INFO *, int)); -static int tcl_PgInit __P((Tcl_Interp *, int, Tcl_Obj * CONST*, - void *, DBTCL_INFO *)); -static int tcl_PgIsset __P((Tcl_Interp *, int, Tcl_Obj * CONST*, - void *, DBTCL_INFO *)); -#endif - -/* - * _MpInfoDelete -- - * Removes "sub" mp page info structures that are children - * of this mp. - * - * PUBLIC: void _MpInfoDelete __P((Tcl_Interp *, DBTCL_INFO *)); - */ -void -_MpInfoDelete(interp, mpip) - Tcl_Interp *interp; /* Interpreter */ - DBTCL_INFO *mpip; /* Info for mp */ -{ - DBTCL_INFO *nextp, *p; - - for (p = LIST_FIRST(&__db_infohead); p != NULL; p = nextp) { - /* - * Check if this info structure "belongs" to this - * mp. Remove its commands and info structure. - */ - nextp = LIST_NEXT(p, entries); - if (p->i_parent == mpip && p->i_type == I_PG) { - (void)Tcl_DeleteCommand(interp, p->i_name); - _DeleteInfo(p); - } - } -} - -#ifdef CONFIG_TEST -/* - * tcl_MpSync -- - * - * PUBLIC: int tcl_MpSync __P((Tcl_Interp *, int, Tcl_Obj * CONST*, DB_ENV *)); - */ -int -tcl_MpSync(interp, objc, objv, envp) - Tcl_Interp *interp; /* Interpreter */ - int objc; /* How many arguments? */ - Tcl_Obj *CONST objv[]; /* The argument objects */ - DB_ENV *envp; /* Environment pointer */ -{ - - DB_LSN lsn, *lsnp; - int result, ret; - - result = TCL_OK; - lsnp = NULL; - /* - * No flags, must be 3 args. - */ - if (objc == 3) { - result = _GetLsn(interp, objv[2], &lsn); - if (result == TCL_ERROR) - return (result); - lsnp = &lsn; - } - else if (objc != 2) { - Tcl_WrongNumArgs(interp, 2, objv, "lsn"); - return (TCL_ERROR); - } - - _debug_check(); - ret = envp->memp_sync(envp, lsnp); - return (_ReturnSetup(interp, ret, DB_RETOK_STD(ret), "memp sync")); -} - -/* - * tcl_MpTrickle -- - * - * PUBLIC: int tcl_MpTrickle __P((Tcl_Interp *, int, - * PUBLIC: Tcl_Obj * CONST*, DB_ENV *)); - */ -int -tcl_MpTrickle(interp, objc, objv, envp) - Tcl_Interp *interp; /* Interpreter */ - int objc; /* How many arguments? */ - Tcl_Obj *CONST objv[]; /* The argument objects */ - DB_ENV *envp; /* Environment pointer */ -{ - - Tcl_Obj *res; - int pages, percent, result, ret; - - result = TCL_OK; - /* - * No flags, must be 3 args. - */ - if (objc != 3) { - Tcl_WrongNumArgs(interp, 2, objv, "percent"); - return (TCL_ERROR); - } - - result = Tcl_GetIntFromObj(interp, objv[2], &percent); - if (result == TCL_ERROR) - return (result); - - _debug_check(); - ret = envp->memp_trickle(envp, percent, &pages); - result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret), "memp trickle"); - if (result == TCL_ERROR) - return (result); - - res = Tcl_NewIntObj(pages); - Tcl_SetObjResult(interp, res); - return (result); - -} - -/* - * tcl_Mp -- - * - * PUBLIC: int tcl_Mp __P((Tcl_Interp *, int, - * PUBLIC: Tcl_Obj * CONST*, DB_ENV *, DBTCL_INFO *)); - */ -int -tcl_Mp(interp, objc, objv, envp, envip) - Tcl_Interp *interp; /* Interpreter */ - int objc; /* How many arguments? */ - Tcl_Obj *CONST objv[]; /* The argument objects */ - DB_ENV *envp; /* Environment pointer */ - DBTCL_INFO *envip; /* Info pointer */ -{ - static const char *mpopts[] = { - "-create", - "-mode", - "-nommap", - "-pagesize", - "-rdonly", - NULL - }; - enum mpopts { - MPCREATE, - MPMODE, - MPNOMMAP, - MPPAGE, - MPRDONLY - }; - DBTCL_INFO *ip; - DB_MPOOLFILE *mpf; - Tcl_Obj *res; - u_int32_t flag; - int i, pgsize, mode, optindex, result, ret; - char *file, newname[MSG_SIZE]; - - result = TCL_OK; - i = 2; - flag = 0; - mode = 0; - pgsize = 0; - memset(newname, 0, MSG_SIZE); - while (i < objc) { - if (Tcl_GetIndexFromObj(interp, objv[i], - mpopts, "option", TCL_EXACT, &optindex) != TCL_OK) { - /* - * Reset the result so we don't get an errant - * error message if there is another error. - * This arg is the file name. - */ - if (IS_HELP(objv[i]) == TCL_OK) - return (TCL_OK); - Tcl_ResetResult(interp); - break; - } - i++; - switch ((enum mpopts)optindex) { - case MPCREATE: - flag |= DB_CREATE; - break; - case MPNOMMAP: - flag |= DB_NOMMAP; - break; - case MPPAGE: - if (i >= objc) { - Tcl_WrongNumArgs(interp, 2, objv, - "?-pagesize size?"); - result = TCL_ERROR; - break; - } - /* - * Don't need to check result here because - * if TCL_ERROR, the error message is already - * set up, and we'll bail out below. If ok, - * the mode is set and we go on. - */ - result = Tcl_GetIntFromObj(interp, objv[i++], &pgsize); - break; - case MPRDONLY: - flag |= DB_RDONLY; - break; - case MPMODE: - if (i >= objc) { - Tcl_WrongNumArgs(interp, 2, objv, - "?-mode mode?"); - result = TCL_ERROR; - break; - } - /* - * Don't need to check result here because - * if TCL_ERROR, the error message is already - * set up, and we'll bail out below. If ok, - * the mode is set and we go on. - */ - result = Tcl_GetIntFromObj(interp, objv[i++], &mode); - break; - } - if (result != TCL_OK) - goto error; - } - /* - * Any left over arg is a file name. It better be the last arg. - */ - file = NULL; - if (i != objc) { - if (i != objc - 1) { - Tcl_WrongNumArgs(interp, 2, objv, "?args? ?file?"); - result = TCL_ERROR; - goto error; - } - file = Tcl_GetStringFromObj(objv[i++], NULL); - } - - snprintf(newname, sizeof(newname), "%s.mp%d", - envip->i_name, envip->i_envmpid); - ip = _NewInfo(interp, NULL, newname, I_MP); - if (ip == NULL) { - Tcl_SetResult(interp, "Could not set up info", - TCL_STATIC); - return (TCL_ERROR); - } - - _debug_check(); - if ((ret = envp->memp_fcreate(envp, &mpf, 0)) != 0) { - result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret), "mpool"); - _DeleteInfo(ip); - goto error; - } - - /* - * XXX - * Interface doesn't currently support DB_MPOOLFILE configuration. - */ - if ((ret = mpf->open(mpf, file, flag, mode, (size_t)pgsize)) != 0) { - result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret), "mpool"); - _DeleteInfo(ip); - - (void)mpf->close(mpf, 0); - goto error; - } - - /* - * Success. Set up return. Set up new info and command widget for - * this mpool. - */ - envip->i_envmpid++; - ip->i_parent = envip; - ip->i_pgsz = pgsize; - _SetInfoData(ip, mpf); - (void)Tcl_CreateObjCommand(interp, newname, - (Tcl_ObjCmdProc *)mp_Cmd, (ClientData)mpf, NULL); - res = NewStringObj(newname, strlen(newname)); - Tcl_SetObjResult(interp, res); - -error: - return (result); -} - -/* - * tcl_MpStat -- - * - * PUBLIC: int tcl_MpStat __P((Tcl_Interp *, int, Tcl_Obj * CONST*, DB_ENV *)); - */ -int -tcl_MpStat(interp, objc, objv, envp) - Tcl_Interp *interp; /* Interpreter */ - int objc; /* How many arguments? */ - Tcl_Obj *CONST objv[]; /* The argument objects */ - DB_ENV *envp; /* Environment pointer */ -{ - DB_MPOOL_STAT *sp; - DB_MPOOL_FSTAT **fsp, **savefsp; - int result; - int ret; - Tcl_Obj *res; - Tcl_Obj *res1; - - result = TCL_OK; - savefsp = NULL; - /* - * No args for this. Error if there are some. - */ - if (objc != 2) { - Tcl_WrongNumArgs(interp, 2, objv, NULL); - return (TCL_ERROR); - } - _debug_check(); - ret = envp->memp_stat(envp, &sp, &fsp, 0); - result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret), "memp stat"); - if (result == TCL_ERROR) - return (result); - - /* - * Have our stats, now construct the name value - * list pairs and free up the memory. - */ - res = Tcl_NewObj(); - /* - * MAKE_STAT_LIST assumes 'res' and 'error' label. - */ - MAKE_STAT_LIST("Cache size (gbytes)", sp->st_gbytes); - MAKE_STAT_LIST("Cache size (bytes)", sp->st_bytes); - MAKE_STAT_LIST("Number of caches", sp->st_ncache); - MAKE_STAT_LIST("Region size", sp->st_regsize); - MAKE_STAT_LIST("Maximum memory-mapped file size", sp->st_mmapsize); - MAKE_STAT_LIST("Maximum open file descriptors", sp->st_maxopenfd); - MAKE_STAT_LIST("Maximum sequential buffer writes", sp->st_maxwrite); - MAKE_STAT_LIST( - "Sleep after writing maximum buffers", sp->st_maxwrite_sleep); - MAKE_STAT_LIST("Pages mapped into address space", sp->st_map); - MAKE_STAT_LIST("Cache hits", sp->st_cache_hit); - MAKE_STAT_LIST("Cache misses", sp->st_cache_miss); - MAKE_STAT_LIST("Pages created", sp->st_page_create); - MAKE_STAT_LIST("Pages read in", sp->st_page_in); - MAKE_STAT_LIST("Pages written", sp->st_page_out); - MAKE_STAT_LIST("Clean page evictions", sp->st_ro_evict); - MAKE_STAT_LIST("Dirty page evictions", sp->st_rw_evict); - MAKE_STAT_LIST("Dirty pages trickled", sp->st_page_trickle); - MAKE_STAT_LIST("Cached pages", sp->st_pages); - MAKE_STAT_LIST("Cached clean pages", sp->st_page_clean); - MAKE_STAT_LIST("Cached dirty pages", sp->st_page_dirty); - MAKE_STAT_LIST("Hash buckets", sp->st_hash_buckets); - MAKE_STAT_LIST("Hash lookups", sp->st_hash_searches); - MAKE_STAT_LIST("Longest hash chain found", sp->st_hash_longest); - MAKE_STAT_LIST("Hash elements examined", sp->st_hash_examined); - MAKE_STAT_LIST("Number of hash bucket nowaits", sp->st_hash_nowait); - MAKE_STAT_LIST("Number of hash bucket waits", sp->st_hash_wait); - MAKE_STAT_LIST("Maximum number of hash bucket waits", - sp->st_hash_max_wait); - MAKE_STAT_LIST("Number of region lock nowaits", sp->st_region_nowait); - MAKE_STAT_LIST("Number of region lock waits", sp->st_region_wait); - MAKE_STAT_LIST("Page allocations", sp->st_alloc); - MAKE_STAT_LIST("Buckets examined during allocation", - sp->st_alloc_buckets); - MAKE_STAT_LIST("Maximum buckets examined during allocation", - sp->st_alloc_max_buckets); - MAKE_STAT_LIST("Pages examined during allocation", sp->st_alloc_pages); - MAKE_STAT_LIST("Maximum pages examined during allocation", - sp->st_alloc_max_pages); - - /* - * Save global stat list as res1. The MAKE_STAT_LIST - * macro assumes 'res' so we'll use that to build up - * our per-file sublist. - */ - res1 = res; - for (savefsp = fsp; fsp != NULL && *fsp != NULL; fsp++) { - res = Tcl_NewObj(); - result = _SetListElem(interp, res, "File Name", - strlen("File Name"), (*fsp)->file_name, - strlen((*fsp)->file_name)); - if (result != TCL_OK) - goto error; - MAKE_STAT_LIST("Page size", (*fsp)->st_pagesize); - MAKE_STAT_LIST("Pages mapped into address space", - (*fsp)->st_map); - MAKE_STAT_LIST("Cache hits", (*fsp)->st_cache_hit); - MAKE_STAT_LIST("Cache misses", (*fsp)->st_cache_miss); - MAKE_STAT_LIST("Pages created", (*fsp)->st_page_create); - MAKE_STAT_LIST("Pages read in", (*fsp)->st_page_in); - MAKE_STAT_LIST("Pages written", (*fsp)->st_page_out); - /* - * Now that we have a complete "per-file" stat list, append - * that to the other list. - */ - result = Tcl_ListObjAppendElement(interp, res1, res); - if (result != TCL_OK) - goto error; - } - Tcl_SetObjResult(interp, res1); -error: - __os_ufree(envp, sp); - if (savefsp != NULL) - __os_ufree(envp, savefsp); - return (result); -} - -/* - * mp_Cmd -- - * Implements the "mp" widget. - */ -static int -mp_Cmd(clientData, interp, objc, objv) - ClientData clientData; /* Mp handle */ - Tcl_Interp *interp; /* Interpreter */ - int objc; /* How many arguments? */ - Tcl_Obj *CONST objv[]; /* The argument objects */ -{ - static const char *mpcmds[] = { - "close", - "fsync", - "get", - "get_clear_len", - "get_fileid", - "get_ftype", - "get_lsn_offset", - "get_pgcookie", - NULL - }; - enum mpcmds { - MPCLOSE, - MPFSYNC, - MPGET, - MPGETCLEARLEN, - MPGETFILEID, - MPGETFTYPE, - MPGETLSNOFFSET, - MPGETPGCOOKIE - }; - DB_MPOOLFILE *mp; - int cmdindex, ftype, length, result, ret; - DBTCL_INFO *mpip; - Tcl_Obj *res; - char *obj_name; - u_int32_t value; - int32_t intval; - u_int8_t fileid[DB_FILE_ID_LEN]; - DBT cookie; - - Tcl_ResetResult(interp); - mp = (DB_MPOOLFILE *)clientData; - obj_name = Tcl_GetStringFromObj(objv[0], &length); - mpip = _NameToInfo(obj_name); - result = TCL_OK; - - if (mp == NULL) { - Tcl_SetResult(interp, "NULL mp pointer", TCL_STATIC); - return (TCL_ERROR); - } - if (mpip == NULL) { - Tcl_SetResult(interp, "NULL mp info pointer", TCL_STATIC); - return (TCL_ERROR); - } - - /* - * Get the command name index from the object based on the dbcmds - * defined above. - */ - if (Tcl_GetIndexFromObj(interp, - objv[1], mpcmds, "command", TCL_EXACT, &cmdindex) != TCL_OK) - return (IS_HELP(objv[1])); - - res = NULL; - switch ((enum mpcmds)cmdindex) { - case MPCLOSE: - if (objc != 2) { - Tcl_WrongNumArgs(interp, 1, objv, NULL); - return (TCL_ERROR); - } - _debug_check(); - ret = mp->close(mp, 0); - result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret), - "mp close"); - _MpInfoDelete(interp, mpip); - (void)Tcl_DeleteCommand(interp, mpip->i_name); - _DeleteInfo(mpip); - break; - case MPFSYNC: - if (objc != 2) { - Tcl_WrongNumArgs(interp, 1, objv, NULL); - return (TCL_ERROR); - } - _debug_check(); - ret = mp->sync(mp); - res = Tcl_NewIntObj(ret); - break; - case MPGET: - result = tcl_MpGet(interp, objc, objv, mp, mpip); - break; - case MPGETCLEARLEN: - if (objc != 2) { - Tcl_WrongNumArgs(interp, 1, objv, NULL); - return (TCL_ERROR); - } - ret = mp->get_clear_len(mp, &value); - if ((result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret), - "mp get_clear_len")) == TCL_OK) - res = Tcl_NewIntObj((int)value); - break; - case MPGETFILEID: - if (objc != 2) { - Tcl_WrongNumArgs(interp, 1, objv, NULL); - return (TCL_ERROR); - } - ret = mp->get_fileid(mp, fileid); - if ((result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret), - "mp get_fileid")) == TCL_OK) - res = NewStringObj((char *)fileid, DB_FILE_ID_LEN); - break; - case MPGETFTYPE: - if (objc != 2) { - Tcl_WrongNumArgs(interp, 1, objv, NULL); - return (TCL_ERROR); - } - ret = mp->get_ftype(mp, &ftype); - if ((result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret), - "mp get_ftype")) == TCL_OK) - res = Tcl_NewIntObj(ftype); - break; - case MPGETLSNOFFSET: - if (objc != 2) { - Tcl_WrongNumArgs(interp, 1, objv, NULL); - return (TCL_ERROR); - } - ret = mp->get_lsn_offset(mp, &intval); - if ((result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret), - "mp get_lsn_offset")) == TCL_OK) - res = Tcl_NewIntObj(intval); - break; - case MPGETPGCOOKIE: - if (objc != 2) { - Tcl_WrongNumArgs(interp, 1, objv, NULL); - return (TCL_ERROR); - } - memset(&cookie, 0, sizeof(DBT)); - ret = mp->get_pgcookie(mp, &cookie); - if ((result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret), - "mp get_pgcookie")) == TCL_OK) - res = Tcl_NewByteArrayObj((u_char *)cookie.data, - (int)cookie.size); - break; - } - /* - * Only set result if we have a res. Otherwise, lower - * functions have already done so. - */ - if (result == TCL_OK && res) - Tcl_SetObjResult(interp, res); - return (result); -} - -/* - * tcl_MpGet -- - */ -static int -tcl_MpGet(interp, objc, objv, mp, mpip) - Tcl_Interp *interp; /* Interpreter */ - int objc; /* How many arguments? */ - Tcl_Obj *CONST objv[]; /* The argument objects */ - DB_MPOOLFILE *mp; /* mp pointer */ - DBTCL_INFO *mpip; /* mp info pointer */ -{ - static const char *mpget[] = { - "-create", - "-last", - "-new", - NULL - }; - enum mpget { - MPGET_CREATE, - MPGET_LAST, - MPGET_NEW - }; - - DBTCL_INFO *ip; - Tcl_Obj *res; - db_pgno_t pgno; - u_int32_t flag; - int i, ipgno, optindex, result, ret; - char newname[MSG_SIZE]; - void *page; - - result = TCL_OK; - memset(newname, 0, MSG_SIZE); - i = 2; - flag = 0; - while (i < objc) { - if (Tcl_GetIndexFromObj(interp, objv[i], - mpget, "option", TCL_EXACT, &optindex) != TCL_OK) { - /* - * Reset the result so we don't get an errant - * error message if there is another error. - * This arg is the page number. - */ - if (IS_HELP(objv[i]) == TCL_OK) - return (TCL_OK); - Tcl_ResetResult(interp); - break; - } - i++; - switch ((enum mpget)optindex) { - case MPGET_CREATE: - flag |= DB_MPOOL_CREATE; - break; - case MPGET_LAST: - flag |= DB_MPOOL_LAST; - break; - case MPGET_NEW: - flag |= DB_MPOOL_NEW; - break; - } - if (result != TCL_OK) - goto error; - } - /* - * Any left over arg is a page number. It better be the last arg. - */ - ipgno = 0; - if (i != objc) { - if (i != objc - 1) { - Tcl_WrongNumArgs(interp, 2, objv, "?args? ?pgno?"); - result = TCL_ERROR; - goto error; - } - result = Tcl_GetIntFromObj(interp, objv[i++], &ipgno); - if (result != TCL_OK) - goto error; - } - - snprintf(newname, sizeof(newname), "%s.pg%d", - mpip->i_name, mpip->i_mppgid); - ip = _NewInfo(interp, NULL, newname, I_PG); - if (ip == NULL) { - Tcl_SetResult(interp, "Could not set up info", - TCL_STATIC); - return (TCL_ERROR); - } - _debug_check(); - pgno = (db_pgno_t)ipgno; - ret = mp->get(mp, &pgno, flag, &page); - result = _ReturnSetup(interp, ret, DB_RETOK_MPGET(ret), "mpool get"); - if (result == TCL_ERROR) - _DeleteInfo(ip); - else { - /* - * Success. Set up return. Set up new info - * and command widget for this mpool. - */ - mpip->i_mppgid++; - ip->i_parent = mpip; - ip->i_pgno = pgno; - ip->i_pgsz = mpip->i_pgsz; - _SetInfoData(ip, page); - (void)Tcl_CreateObjCommand(interp, newname, - (Tcl_ObjCmdProc *)pg_Cmd, (ClientData)page, NULL); - res = NewStringObj(newname, strlen(newname)); - Tcl_SetObjResult(interp, res); - } -error: - return (result); -} - -/* - * pg_Cmd -- - * Implements the "pg" widget. - */ -static int -pg_Cmd(clientData, interp, objc, objv) - ClientData clientData; /* Page handle */ - Tcl_Interp *interp; /* Interpreter */ - int objc; /* How many arguments? */ - Tcl_Obj *CONST objv[]; /* The argument objects */ -{ - static const char *pgcmds[] = { - "init", - "is_setto", - "pgnum", - "pgsize", - "put", - "set", - NULL - }; - enum pgcmds { - PGINIT, - PGISSET, - PGNUM, - PGSIZE, - PGPUT, - PGSET - }; - DB_MPOOLFILE *mp; - int cmdindex, length, result; - char *obj_name; - void *page; - DBTCL_INFO *pgip; - Tcl_Obj *res; - - Tcl_ResetResult(interp); - page = (void *)clientData; - obj_name = Tcl_GetStringFromObj(objv[0], &length); - pgip = _NameToInfo(obj_name); - mp = NAME_TO_MP(pgip->i_parent->i_name); - result = TCL_OK; - - if (page == NULL) { - Tcl_SetResult(interp, "NULL page pointer", TCL_STATIC); - return (TCL_ERROR); - } - if (mp == NULL) { - Tcl_SetResult(interp, "NULL mp pointer", TCL_STATIC); - return (TCL_ERROR); - } - if (pgip == NULL) { - Tcl_SetResult(interp, "NULL page info pointer", TCL_STATIC); - return (TCL_ERROR); - } - - /* - * Get the command name index from the object based on the dbcmds - * defined above. - */ - if (Tcl_GetIndexFromObj(interp, - objv[1], pgcmds, "command", TCL_EXACT, &cmdindex) != TCL_OK) - return (IS_HELP(objv[1])); - - res = NULL; - switch ((enum pgcmds)cmdindex) { - case PGNUM: - res = Tcl_NewWideIntObj((Tcl_WideInt)pgip->i_pgno); - break; - case PGSIZE: - res = Tcl_NewWideIntObj((Tcl_WideInt)pgip->i_pgsz); - break; - case PGSET: - case PGPUT: - result = tcl_Pg(interp, objc, objv, page, mp, pgip, - (enum pgcmds)cmdindex == PGSET ? 0 : 1); - break; - case PGINIT: - result = tcl_PgInit(interp, objc, objv, page, pgip); - break; - case PGISSET: - result = tcl_PgIsset(interp, objc, objv, page, pgip); - break; - } - - /* - * Only set result if we have a res. Otherwise, lower - * functions have already done so. - */ - if (result == TCL_OK && res != NULL) - Tcl_SetObjResult(interp, res); - return (result); -} - -static int -tcl_Pg(interp, objc, objv, page, mp, pgip, putop) - Tcl_Interp *interp; /* Interpreter */ - int objc; /* How many arguments? */ - Tcl_Obj *CONST objv[]; /* The argument objects */ - void *page; /* Page pointer */ - DB_MPOOLFILE *mp; /* Mpool pointer */ - DBTCL_INFO *pgip; /* Info pointer */ - int putop; /* Operation */ -{ - static const char *pgopt[] = { - "-clean", - "-dirty", - "-discard", - NULL - }; - enum pgopt { - PGCLEAN, - PGDIRTY, - PGDISCARD - }; - u_int32_t flag; - int i, optindex, result, ret; - - result = TCL_OK; - i = 2; - flag = 0; - while (i < objc) { - if (Tcl_GetIndexFromObj(interp, objv[i], - pgopt, "option", TCL_EXACT, &optindex) != TCL_OK) - return (IS_HELP(objv[i])); - i++; - switch ((enum pgopt)optindex) { - case PGCLEAN: - flag |= DB_MPOOL_CLEAN; - break; - case PGDIRTY: - flag |= DB_MPOOL_DIRTY; - break; - case PGDISCARD: - flag |= DB_MPOOL_DISCARD; - break; - } - } - - _debug_check(); - if (putop) - ret = mp->put(mp, page, flag); - else - ret = mp->set(mp, page, flag); - - result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret), "page"); - - if (putop) { - (void)Tcl_DeleteCommand(interp, pgip->i_name); - _DeleteInfo(pgip); - } - return (result); -} - -static int -tcl_PgInit(interp, objc, objv, page, pgip) - Tcl_Interp *interp; /* Interpreter */ - int objc; /* How many arguments? */ - Tcl_Obj *CONST objv[]; /* The argument objects */ - void *page; /* Page pointer */ - DBTCL_INFO *pgip; /* Info pointer */ -{ - Tcl_Obj *res; - long *p, *endp, newval; - int length, pgsz, result; - u_char *s; - - result = TCL_OK; - if (objc != 3) { - Tcl_WrongNumArgs(interp, 2, objv, "val"); - return (TCL_ERROR); - } - - pgsz = pgip->i_pgsz; - result = Tcl_GetLongFromObj(interp, objv[2], &newval); - if (result != TCL_OK) { - s = Tcl_GetByteArrayFromObj(objv[2], &length); - if (s == NULL) - return (TCL_ERROR); - memcpy(page, s, (size_t)((length < pgsz) ? length : pgsz)); - result = TCL_OK; - } else { - p = (long *)page; - for (endp = p + ((u_int)pgsz / sizeof(long)); p < endp; p++) - *p = newval; - } - res = Tcl_NewIntObj(0); - Tcl_SetObjResult(interp, res); - return (result); -} - -static int -tcl_PgIsset(interp, objc, objv, page, pgip) - Tcl_Interp *interp; /* Interpreter */ - int objc; /* How many arguments? */ - Tcl_Obj *CONST objv[]; /* The argument objects */ - void *page; /* Page pointer */ - DBTCL_INFO *pgip; /* Info pointer */ -{ - Tcl_Obj *res; - long *p, *endp, newval; - int length, pgsz, result; - u_char *s; - - result = TCL_OK; - if (objc != 3) { - Tcl_WrongNumArgs(interp, 2, objv, "val"); - return (TCL_ERROR); - } - - pgsz = pgip->i_pgsz; - result = Tcl_GetLongFromObj(interp, objv[2], &newval); - if (result != TCL_OK) { - if ((s = Tcl_GetByteArrayFromObj(objv[2], &length)) == NULL) - return (TCL_ERROR); - result = TCL_OK; - - if (memcmp(page, s, - (size_t)((length < pgsz) ? length : pgsz)) != 0) { - res = Tcl_NewIntObj(0); - Tcl_SetObjResult(interp, res); - return (result); - } - } else { - p = (long *)page; - /* - * If any value is not the same, return 0 (is not set to - * this value). Otherwise, if we finish the loop, we return 1 - * (is set to this value). - */ - for (endp = p + ((u_int)pgsz / sizeof(long)); p < endp; p++) - if (*p != newval) { - res = Tcl_NewIntObj(0); - Tcl_SetObjResult(interp, res); - return (result); - } - } - - res = Tcl_NewIntObj(1); - Tcl_SetObjResult(interp, res); - return (result); -} -#endif diff --git a/storage/bdb/tcl/tcl_rep.c b/storage/bdb/tcl/tcl_rep.c deleted file mode 100644 index e0e6a1a883c..00000000000 --- a/storage/bdb/tcl/tcl_rep.c +++ /dev/null @@ -1,489 +0,0 @@ -/*- - * See the file LICENSE for redistribution information. - * - * Copyright (c) 1999-2004 - * Sleepycat Software. All rights reserved. - * - * $Id: tcl_rep.c,v 11.106 2004/10/14 18:09:00 bostic Exp $ - */ - -#include "db_config.h" - -#ifndef NO_SYSTEM_INCLUDES -#include - -#include -#include -#include -#endif - -#include "db_int.h" -#include "dbinc/tcl_db.h" - -#ifdef CONFIG_TEST -/* - * tcl_RepElect -- - * Call DB_ENV->rep_elect(). - * - * PUBLIC: int tcl_RepElect - * PUBLIC: __P((Tcl_Interp *, int, Tcl_Obj * CONST *, DB_ENV *)); - */ -int -tcl_RepElect(interp, objc, objv, dbenv) - Tcl_Interp *interp; /* Interpreter */ - int objc; /* How many arguments? */ - Tcl_Obj *CONST objv[]; /* The argument objects */ - DB_ENV *dbenv; /* Environment pointer */ -{ - int eid, nsites, nvotes, pri, result, ret; - u_int32_t timeout; - - if (objc != 6) { - Tcl_WrongNumArgs(interp, 6, objv, "nsites pri timeout"); - return (TCL_ERROR); - } - - if ((result = Tcl_GetIntFromObj(interp, objv[2], &nsites)) != TCL_OK) - return (result); - if ((result = Tcl_GetIntFromObj(interp, objv[3], &nvotes)) != TCL_OK) - return (result); - if ((result = Tcl_GetIntFromObj(interp, objv[4], &pri)) != TCL_OK) - return (result); - if ((result = _GetUInt32(interp, objv[5], &timeout)) != TCL_OK) - return (result); - - _debug_check(); - if ((ret = dbenv->rep_elect(dbenv, nsites, nvotes, - pri, timeout, &eid, 0)) != 0) - return (_ReturnSetup(interp, ret, DB_RETOK_STD(ret), - "env rep_elect")); - - Tcl_SetObjResult(interp, Tcl_NewIntObj(eid)); - - return (TCL_OK); -} -#endif - -#ifdef CONFIG_TEST -/* - * tcl_RepFlush -- - * Call DB_ENV->rep_flush(). - * - * PUBLIC: int tcl_RepFlush - * PUBLIC: __P((Tcl_Interp *, int, Tcl_Obj * CONST *, DB_ENV *)); - */ -int -tcl_RepFlush(interp, objc, objv, dbenv) - Tcl_Interp *interp; - int objc; - Tcl_Obj *CONST objv[]; - DB_ENV *dbenv; -{ - int ret; - - if (objc != 2) { - Tcl_WrongNumArgs(interp, 2, objv, ""); - return TCL_ERROR; - } - - _debug_check(); - ret = dbenv->rep_flush(dbenv); - return (_ReturnSetup(interp, ret, DB_RETOK_STD(ret), "env rep_flush")); -} -#endif -#ifdef CONFIG_TEST -/* - * tcl_RepLimit -- - * Call DB_ENV->set_rep_limit(). - * - * PUBLIC: int tcl_RepLimit - * PUBLIC: __P((Tcl_Interp *, int, Tcl_Obj * CONST *, DB_ENV *)); - */ -int -tcl_RepLimit(interp, objc, objv, dbenv) - Tcl_Interp *interp; /* Interpreter */ - int objc; /* How many arguments? */ - Tcl_Obj *CONST objv[]; /* The argument objects */ - DB_ENV *dbenv; /* Environment pointer */ -{ - int result, ret; - u_int32_t bytes, gbytes; - - if (objc != 4) { - Tcl_WrongNumArgs(interp, 4, objv, "gbytes bytes"); - return (TCL_ERROR); - } - - if ((result = _GetUInt32(interp, objv[2], &gbytes)) != TCL_OK) - return (result); - if ((result = _GetUInt32(interp, objv[3], &bytes)) != TCL_OK) - return (result); - - _debug_check(); - if ((ret = dbenv->set_rep_limit(dbenv, gbytes, bytes)) != 0) - return (_ReturnSetup(interp, ret, DB_RETOK_STD(ret), - "env set_rep_limit")); - - return (_ReturnSetup(interp, - ret, DB_RETOK_STD(ret), "env set_rep_limit")); -} -#endif - -#ifdef CONFIG_TEST -/* - * tcl_RepRequest -- - * Call DB_ENV->set_rep_request(). - * - * PUBLIC: int tcl_RepRequest - * PUBLIC: __P((Tcl_Interp *, int, Tcl_Obj * CONST *, DB_ENV *)); - */ -int -tcl_RepRequest(interp, objc, objv, dbenv) - Tcl_Interp *interp; /* Interpreter */ - int objc; /* How many arguments? */ - Tcl_Obj *CONST objv[]; /* The argument objects */ - DB_ENV *dbenv; /* Environment pointer */ -{ - int result, ret; - u_int32_t min, max; - - if (objc != 4) { - Tcl_WrongNumArgs(interp, 4, objv, "min max"); - return (TCL_ERROR); - } - - if ((result = _GetUInt32(interp, objv[2], &min)) != TCL_OK) - return (result); - if ((result = _GetUInt32(interp, objv[3], &max)) != TCL_OK) - return (result); - - _debug_check(); - if ((ret = dbenv->set_rep_request(dbenv, min, max)) != 0) - return (_ReturnSetup(interp, ret, DB_RETOK_STD(ret), - "env set_rep_request")); - - return (_ReturnSetup(interp, - ret, DB_RETOK_STD(ret), "env set_rep_request")); -} -#endif - -#ifdef CONFIG_TEST -/* - * tcl_RepStart -- - * Call DB_ENV->rep_start(). - * - * PUBLIC: int tcl_RepStart - * PUBLIC: __P((Tcl_Interp *, int, Tcl_Obj * CONST *, DB_ENV *)); - * - * Note that this normally can/should be achieved as an argument to - * berkdb env, but we need to test forcible upgrading of clients, which - * involves calling this on an open environment handle. - */ -int -tcl_RepStart(interp, objc, objv, dbenv) - Tcl_Interp *interp; /* Interpreter */ - int objc; /* How many arguments? */ - Tcl_Obj *CONST objv[]; /* The argument objects */ - DB_ENV *dbenv; -{ - static const char *tclrpstrt[] = { - "-client", - "-master", - NULL - }; - enum tclrpstrt { - TCL_RPSTRT_CLIENT, - TCL_RPSTRT_MASTER - }; - char *arg; - int i, optindex, ret; - u_int32_t flag; - - flag = 0; - - if (objc != 3) { - Tcl_WrongNumArgs(interp, 3, objv, "[-master/-client]"); - return (TCL_ERROR); - } - - i = 2; - while (i < objc) { - if (Tcl_GetIndexFromObj(interp, objv[i], tclrpstrt, - "option", TCL_EXACT, &optindex) != TCL_OK) { - arg = Tcl_GetStringFromObj(objv[i], NULL); - if (arg[0] == '-') - return (IS_HELP(objv[i])); - else - Tcl_ResetResult(interp); - break; - } - i++; - switch ((enum tclrpstrt)optindex) { - case TCL_RPSTRT_CLIENT: - flag |= DB_REP_CLIENT; - break; - case TCL_RPSTRT_MASTER: - flag |= DB_REP_MASTER; - break; - } - } - - _debug_check(); - ret = dbenv->rep_start(dbenv, NULL, flag); - return (_ReturnSetup(interp, ret, DB_RETOK_STD(ret), "env rep_start")); -} -#endif - -#ifdef CONFIG_TEST -/* - * tcl_RepProcessMessage -- - * Call DB_ENV->rep_process_message(). - * - * PUBLIC: int tcl_RepProcessMessage - * PUBLIC: __P((Tcl_Interp *, int, Tcl_Obj * CONST *, DB_ENV *)); - */ -int -tcl_RepProcessMessage(interp, objc, objv, dbenv) - Tcl_Interp *interp; /* Interpreter */ - int objc; /* How many arguments? */ - Tcl_Obj *CONST objv[]; /* The argument objects */ - DB_ENV *dbenv; /* Environment pointer */ -{ - DBT control, rec; - DB_LSN permlsn; - Tcl_Obj *lsnlist, *myobjv[2], *res; - void *ctmp, *rtmp; - char *msg; - int eid; - int freectl, freerec, myobjc, result, ret; - - if (objc != 5) { - Tcl_WrongNumArgs(interp, 5, objv, "id control rec"); - return (TCL_ERROR); - } - freectl = freerec = 0; - - memset(&control, 0, sizeof(control)); - memset(&rec, 0, sizeof(rec)); - - if ((result = Tcl_GetIntFromObj(interp, objv[2], &eid)) != TCL_OK) - return (result); - - ret = _CopyObjBytes(interp, objv[3], &ctmp, - &control.size, &freectl); - if (ret != 0) { - result = _ReturnSetup(interp, ret, - DB_RETOK_REPPMSG(ret), "rep_proc_msg"); - return (result); - } - control.data = ctmp; - ret = _CopyObjBytes(interp, objv[4], &rtmp, - &rec.size, &freerec); - if (ret != 0) { - result = _ReturnSetup(interp, ret, - DB_RETOK_REPPMSG(ret), "rep_proc_msg"); - goto out; - } - rec.data = rtmp; - _debug_check(); - ret = dbenv->rep_process_message(dbenv, &control, &rec, &eid, &permlsn); - /* - * !!! - * The TCL API diverges from the C++/Java APIs here. For us, it - * is OK to get DUPMASTER and HOLDELECTION for testing purposes. - */ - result = _ReturnSetup(interp, ret, - DB_RETOK_REPPMSG(ret) || ret == DB_REP_DUPMASTER || - ret == DB_REP_HOLDELECTION, - "env rep_process_message"); - - if (result != TCL_OK) - goto out; - - /* - * We have a valid return. We need to return a variety of information. - * It will be one of the following: - * {0 0} - Make a 0 return a list for consistent return structure. - * {DUPMASTER 0} - DUPMASTER, no other info needed. - * {HOLDELECTION 0} - HOLDELECTION, no other info needed. - * {NEWMASTER #} - NEWMASTER and its ID. - * {NEWSITE 0} - NEWSITE, no other info needed. - * {STARTUPDONE 0} - STARTUPDONE, no other info needed. - * {ISPERM {LSN list}} - ISPERM and the perm LSN. - * {NOTPERM {LSN list}} - NOTPERM and this msg's LSN. - */ - myobjc = 2; - switch (ret) { - case 0: - myobjv[0] = Tcl_NewIntObj(0); - myobjv[1] = Tcl_NewIntObj(0); - break; - case DB_REP_DUPMASTER: - myobjv[0] = Tcl_NewByteArrayObj( - (u_char *)"DUPMASTER", (int)strlen("DUPMASTER")); - myobjv[1] = Tcl_NewIntObj(0); - break; - case DB_REP_HOLDELECTION: - myobjv[0] = Tcl_NewByteArrayObj( - (u_char *)"HOLDELECTION", (int)strlen("HOLDELECTION")); - myobjv[1] = Tcl_NewIntObj(0); - break; - case DB_REP_ISPERM: - myobjv[0] = Tcl_NewLongObj((long)permlsn.file); - myobjv[1] = Tcl_NewLongObj((long)permlsn.offset); - lsnlist = Tcl_NewListObj(myobjc, myobjv); - myobjv[0] = Tcl_NewByteArrayObj( - (u_char *)"ISPERM", (int)strlen("ISPERM")); - myobjv[1] = lsnlist; - break; - case DB_REP_NEWMASTER: - myobjv[0] = Tcl_NewByteArrayObj( - (u_char *)"NEWMASTER", (int)strlen("NEWMASTER")); - myobjv[1] = Tcl_NewIntObj(eid); - break; - case DB_REP_NEWSITE: - myobjv[0] = Tcl_NewByteArrayObj( - (u_char *)"NEWSITE", (int)strlen("NEWSITE")); - myobjv[1] = Tcl_NewIntObj(0); - break; - case DB_REP_NOTPERM: - myobjv[0] = Tcl_NewLongObj((long)permlsn.file); - myobjv[1] = Tcl_NewLongObj((long)permlsn.offset); - lsnlist = Tcl_NewListObj(myobjc, myobjv); - myobjv[0] = Tcl_NewByteArrayObj( - (u_char *)"NOTPERM", (int)strlen("NOTPERM")); - myobjv[1] = lsnlist; - break; - case DB_REP_STARTUPDONE: - myobjv[0] = Tcl_NewByteArrayObj( - (u_char *)"STARTUPDONE", (int)strlen("STARTUPDONE")); - myobjv[1] = Tcl_NewIntObj(0); - break; - default: - msg = db_strerror(ret); - Tcl_AppendResult(interp, msg, NULL); - Tcl_SetErrorCode(interp, "BerkeleyDB", msg, NULL); - result = TCL_ERROR; - goto out; - } - res = Tcl_NewListObj(myobjc, myobjv); - if (res != NULL) - Tcl_SetObjResult(interp, res); -out: - if (freectl) - __os_free(NULL, ctmp); - if (freerec) - __os_free(NULL, rtmp); - - return (result); -} -#endif - -#ifdef CONFIG_TEST -/* - * tcl_RepStat -- - * Call DB_ENV->rep_stat(). - * - * PUBLIC: int tcl_RepStat - * PUBLIC: __P((Tcl_Interp *, int, Tcl_Obj * CONST *, DB_ENV *)); - */ -int -tcl_RepStat(interp, objc, objv, dbenv) - Tcl_Interp *interp; /* Interpreter */ - int objc; /* How many arguments? */ - Tcl_Obj *CONST objv[]; /* The argument objects */ - DB_ENV *dbenv; -{ - DB_REP_STAT *sp; - Tcl_Obj *myobjv[2], *res, *thislist, *lsnlist; - u_int32_t flag; - int myobjc, result, ret; - char *arg; - - result = TCL_OK; - flag = 0; - - if (objc > 3) { - Tcl_WrongNumArgs(interp, 2, objv, NULL); - return (TCL_ERROR); - } - if (objc == 3) { - arg = Tcl_GetStringFromObj(objv[2], NULL); - if (strcmp(arg, "-clear") == 0) - flag = DB_STAT_CLEAR; - else { - Tcl_SetResult(interp, - "db stat: unknown arg", TCL_STATIC); - return (TCL_ERROR); - } - } - - _debug_check(); - ret = dbenv->rep_stat(dbenv, &sp, flag); - result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret), - "rep stat"); - if (result == TCL_ERROR) - return (result); - - /* - * Have our stats, now construct the name value - * list pairs and free up the memory. - */ - res = Tcl_NewObj(); - /* - * MAKE_STAT_* assumes 'res' and 'error' label. - */ - if (sp->st_status == DB_REP_MASTER) - MAKE_STAT_LIST("Master", 1); - else - MAKE_STAT_LIST("Client", 1); - MAKE_STAT_LSN("Next LSN expected", &sp->st_next_lsn); - MAKE_STAT_LSN("First missed LSN", &sp->st_waiting_lsn); - MAKE_STAT_LIST("Duplicate master conditions", sp->st_dupmasters); - MAKE_STAT_LIST("Environment ID", sp->st_env_id); - MAKE_STAT_LIST("Environment priority", sp->st_env_priority); - MAKE_STAT_LIST("Generation number", sp->st_gen); - MAKE_STAT_LIST("Election generation number", sp->st_egen); - MAKE_STAT_LIST("Startup complete", sp->st_startup_complete); - MAKE_STAT_LIST("Duplicate log records received", sp->st_log_duplicated); - MAKE_STAT_LIST("Current log records queued", sp->st_log_queued); - MAKE_STAT_LIST("Maximum log records queued", sp->st_log_queued_max); - MAKE_STAT_LIST("Total log records queued", sp->st_log_queued_total); - MAKE_STAT_LIST("Log records received", sp->st_log_records); - MAKE_STAT_LIST("Log records requested", sp->st_log_requested); - MAKE_STAT_LIST("Master environment ID", sp->st_master); - MAKE_STAT_LIST("Master changes", sp->st_master_changes); - MAKE_STAT_LIST("Messages with bad generation number", - sp->st_msgs_badgen); - MAKE_STAT_LIST("Messages processed", sp->st_msgs_processed); - MAKE_STAT_LIST("Messages ignored for recovery", sp->st_msgs_recover); - MAKE_STAT_LIST("Message send failures", sp->st_msgs_send_failures); - MAKE_STAT_LIST("Messages sent", sp->st_msgs_sent); - MAKE_STAT_LIST("New site messages", sp->st_newsites); - MAKE_STAT_LIST("Number of sites in replication group", sp->st_nsites); - MAKE_STAT_LIST("Transmission limited", sp->st_nthrottles); - MAKE_STAT_LIST("Outdated conditions", sp->st_outdated); - MAKE_STAT_LIST("Transactions applied", sp->st_txns_applied); - MAKE_STAT_LIST("Next page expected", sp->st_next_pg); - MAKE_STAT_LIST("First missed page", sp->st_waiting_pg); - MAKE_STAT_LIST("Duplicate pages received", sp->st_pg_duplicated); - MAKE_STAT_LIST("Pages received", sp->st_pg_records); - MAKE_STAT_LIST("Pages requested", sp->st_pg_requested); - MAKE_STAT_LIST("Elections held", sp->st_elections); - MAKE_STAT_LIST("Elections won", sp->st_elections_won); - MAKE_STAT_LIST("Election phase", sp->st_election_status); - MAKE_STAT_LIST("Election winner", sp->st_election_cur_winner); - MAKE_STAT_LIST("Election generation number", sp->st_election_gen); - MAKE_STAT_LSN("Election max LSN", &sp->st_election_lsn); - MAKE_STAT_LIST("Election sites", sp->st_election_nsites); - MAKE_STAT_LIST("Election votes", sp->st_election_nvotes); - MAKE_STAT_LIST("Election priority", sp->st_election_priority); - MAKE_STAT_LIST("Election tiebreaker", sp->st_election_tiebreaker); - MAKE_STAT_LIST("Election votes", sp->st_election_votes); - - Tcl_SetObjResult(interp, res); -error: - __os_ufree(dbenv, sp); - return (result); -} -#endif diff --git a/storage/bdb/tcl/tcl_seq.c b/storage/bdb/tcl/tcl_seq.c deleted file mode 100644 index de3e4dd6161..00000000000 --- a/storage/bdb/tcl/tcl_seq.c +++ /dev/null @@ -1,526 +0,0 @@ -/*- - * See the file LICENSE for redistribution information. - * - * Copyright (c) 2004 - * Sleepycat Software. All rights reserved. - * - * $Id: tcl_seq.c,v 11.12 2004/10/25 18:02:56 bostic Exp $ - */ - -#include "db_config.h" - -#ifdef HAVE_SEQUENCE -#ifndef NO_SYSTEM_INCLUDES -#include - -#include -#include -#endif - -#include "db_int.h" -#include "dbinc/tcl_db.h" -#include "dbinc_auto/sequence_ext.h" - -/* - * Prototypes for procedures defined later in this file: - */ -static int tcl_SeqClose __P((Tcl_Interp *, - int, Tcl_Obj * CONST*, DB_SEQUENCE *, DBTCL_INFO *)); -static int tcl_SeqGet __P((Tcl_Interp *, - int, Tcl_Obj * CONST*, DB_SEQUENCE *)); -static int tcl_SeqRemove __P((Tcl_Interp *, - int, Tcl_Obj * CONST*, DB_SEQUENCE *, DBTCL_INFO *)); -static int tcl_SeqStat __P((Tcl_Interp *, - int, Tcl_Obj * CONST*, DB_SEQUENCE *)); -static int tcl_SeqGetFlags __P((Tcl_Interp *, - int, Tcl_Obj * CONST*, DB_SEQUENCE *)); - -/* - * - * PUBLIC: int seq_Cmd __P((ClientData, Tcl_Interp *, int, Tcl_Obj * CONST*)); - * - * seq_Cmd -- - * Implements the "seq" widget. - */ -int -seq_Cmd(clientData, interp, objc, objv) - ClientData clientData; /* SEQ handle */ - Tcl_Interp *interp; /* Interpreter */ - int objc; /* How many arguments? */ - Tcl_Obj *CONST objv[]; /* The argument objects */ -{ - static const char *seqcmds[] = { - "close", - "get", - "get_cachesize", - "get_db", - "get_flags", - "get_key", - "get_range", - "remove", - "stat", - NULL - }; - enum seqcmds { - SEQCLOSE, - SEQGET, - SEQGETCACHESIZE, - SEQGETDB, - SEQGETFLAGS, - SEQGETKEY, - SEQGETRANGE, - SEQREMOVE, - SEQSTAT - }; - DB *dbp; - DBT key; - DBTCL_INFO *dbip, *ip; - DB_SEQUENCE *seq; - Tcl_Obj *myobjv[2], *res; - db_seq_t min, max; - int cmdindex, ncache, result, ret; - - Tcl_ResetResult(interp); - seq = (DB_SEQUENCE *)clientData; - result = TCL_OK; - dbip = NULL; - if (objc <= 1) { - Tcl_WrongNumArgs(interp, 1, objv, "command cmdargs"); - return (TCL_ERROR); - } - if (seq == NULL) { - Tcl_SetResult(interp, "NULL sequence pointer", TCL_STATIC); - return (TCL_ERROR); - } - - ip = _PtrToInfo((void *)seq); - if (ip == NULL) { - Tcl_SetResult(interp, "NULL info pointer", TCL_STATIC); - return (TCL_ERROR); - } - - /* - * Get the command name index from the object based on the dbcmds - * defined above. - */ - if (Tcl_GetIndexFromObj(interp, - objv[1], seqcmds, "command", TCL_EXACT, &cmdindex) != TCL_OK) - return (IS_HELP(objv[1])); - - res = NULL; - switch ((enum seqcmds)cmdindex) { - case SEQGETRANGE: - ret = seq->get_range(seq, &min, &max); - if ((result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret), - "sequence get_range")) == TCL_OK) { - myobjv[0] = Tcl_NewWideIntObj(min); - myobjv[1] = Tcl_NewWideIntObj(max); - res = Tcl_NewListObj(2, myobjv); - } - break; - case SEQCLOSE: - result = tcl_SeqClose(interp, objc, objv, seq, ip); - break; - case SEQREMOVE: - result = tcl_SeqRemove(interp, objc, objv, seq, ip); - break; - case SEQGET: - result = tcl_SeqGet(interp, objc, objv, seq); - break; - case SEQSTAT: - result = tcl_SeqStat(interp, objc, objv, seq); - break; - case SEQGETCACHESIZE: - if (objc != 2) { - Tcl_WrongNumArgs(interp, 1, objv, NULL); - return (TCL_ERROR); - } - ret = seq->get_cachesize(seq, &ncache); - if ((result = _ReturnSetup(interp, ret, - DB_RETOK_STD(ret), "sequence get_cachesize")) == TCL_OK) - res = Tcl_NewIntObj(ncache); - break; - case SEQGETDB: - if (objc != 2) { - Tcl_WrongNumArgs(interp, 1, objv, NULL); - return (TCL_ERROR); - } - ret = seq->get_db(seq, &dbp); - if (ret == 0 && (dbip = _PtrToInfo((void *)dbp)) == NULL) { - Tcl_SetResult(interp, - "NULL db info pointer", TCL_STATIC); - return (TCL_ERROR); - } - - if ((result = _ReturnSetup(interp, ret, - DB_RETOK_STD(ret), "sequence get_db")) == TCL_OK) - res = NewStringObj(dbip->i_name, strlen(dbip->i_name)); - break; - case SEQGETKEY: - if (objc != 2) { - Tcl_WrongNumArgs(interp, 1, objv, NULL); - return (TCL_ERROR); - } - ret = seq->get_key(seq, &key); - if ((result = _ReturnSetup(interp, ret, - DB_RETOK_STD(ret), "sequence get_key")) == TCL_OK) - res = Tcl_NewByteArrayObj( - (u_char *)key.data, (int)key.size); - break; - case SEQGETFLAGS: - result = tcl_SeqGetFlags(interp, objc, objv, seq); - break; - } - - /* - * Only set result if we have a res. Otherwise, lower functions have - * already done so. - */ - if (result == TCL_OK && res) - Tcl_SetObjResult(interp, res); - return (result); -} - -/* - * tcl_db_stat -- - */ -static int -tcl_SeqStat(interp, objc, objv, seq) - Tcl_Interp *interp; /* Interpreter */ - int objc; /* How many arguments? */ - Tcl_Obj *CONST objv[]; /* The argument objects */ - DB_SEQUENCE *seq; /* Database pointer */ -{ - DB_SEQUENCE_STAT *sp; - u_int32_t flag; - Tcl_Obj *res, *flaglist, *myobjv[2]; - int result, ret; - char *arg; - - result = TCL_OK; - flag = 0; - - if (objc > 3) { - Tcl_WrongNumArgs(interp, 2, objv, "?-clear?"); - return (TCL_ERROR); - } - - if (objc == 3) { - arg = Tcl_GetStringFromObj(objv[2], NULL); - if (strcmp(arg, "-clear") == 0) - flag = DB_STAT_CLEAR; - else { - Tcl_SetResult(interp, - "db stat: unknown arg", TCL_STATIC); - return (TCL_ERROR); - } - } - - _debug_check(); - ret = seq->stat(seq, &sp, flag); - result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret), "db stat"); - if (result == TCL_ERROR) - return (result); - - res = Tcl_NewObj(); - MAKE_STAT_LIST("Wait", sp->st_wait); - MAKE_STAT_LIST("No wait", sp->st_nowait); - MAKE_WSTAT_LIST("Current", sp->st_current); - MAKE_WSTAT_LIST("Cached", sp->st_value); - MAKE_WSTAT_LIST("Max Cached", sp->st_last_value); - MAKE_WSTAT_LIST("Min", sp->st_min); - MAKE_WSTAT_LIST("Max", sp->st_max); - MAKE_STAT_LIST("Cache size", sp->st_cache_size); - /* - * Construct a {name {flag1 flag2 ... flagN}} list for the - * seq flags. - */ - myobjv[0] = NewStringObj("Flags", strlen("Flags")); - myobjv[1] = - _GetFlagsList(interp, sp->st_flags, __db_get_seq_flags_fn()); - flaglist = Tcl_NewListObj(2, myobjv); - if (flaglist == NULL) { - result = TCL_ERROR; - goto error; - } - if ((result = - Tcl_ListObjAppendElement(interp, res, flaglist)) != TCL_OK) - goto error; - - Tcl_SetObjResult(interp, res); - -error: __os_ufree(seq->seq_dbp->dbenv, sp); - return (result); -} - -/* - * tcl_db_close -- - */ -static int -tcl_SeqClose(interp, objc, objv, seq, ip) - Tcl_Interp *interp; /* Interpreter */ - int objc; /* How many arguments? */ - Tcl_Obj *CONST objv[]; /* The argument objects */ - DB_SEQUENCE *seq; /* Database pointer */ - DBTCL_INFO *ip; /* Info pointer */ -{ - int result, ret; - - result = TCL_OK; - if (objc > 2) { - Tcl_WrongNumArgs(interp, 2, objv, ""); - return (TCL_ERROR); - } - - _DeleteInfo(ip); - _debug_check(); - - ret = seq->close(seq, 0); - result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret), "sequence close"); - return (result); -} - -/* - * tcl_SeqGet -- - */ -static int -tcl_SeqGet(interp, objc, objv, seq) - Tcl_Interp *interp; /* Interpreter */ - int objc; /* How many arguments? */ - Tcl_Obj *CONST objv[]; /* The argument objects */ - DB_SEQUENCE *seq; /* Sequence pointer */ -{ - static const char *seqgetopts[] = { - "-auto_commit", - "-nosync", - "-txn", - NULL - }; - enum seqgetopts { - SEQGET_AUTO_COMMIT, - SEQGET_NOSYNC, - SEQGET_TXN - }; - DB_TXN *txn; - Tcl_Obj *res; - db_seq_t value; - u_int32_t aflag, delta; - int i, end, optindex, result, ret; - char *arg, msg[MSG_SIZE]; - - result = TCL_OK; - txn = NULL; - aflag = 0; - - if (objc < 3) { - Tcl_WrongNumArgs(interp, 2, objv, "?-args? delta"); - return (TCL_ERROR); - } - - /* - * Get the command name index from the object based on the options - * defined above. - */ - i = 2; - end = objc; - while (i < end) { - if (Tcl_GetIndexFromObj(interp, objv[i], seqgetopts, "option", - TCL_EXACT, &optindex) != TCL_OK) { - arg = Tcl_GetStringFromObj(objv[i], NULL); - if (arg[0] == '-') { - result = IS_HELP(objv[i]); - goto out; - } else - Tcl_ResetResult(interp); - break; - } - i++; - switch ((enum seqgetopts)optindex) { - case SEQGET_AUTO_COMMIT: - aflag |= DB_AUTO_COMMIT; - break; - case SEQGET_NOSYNC: - aflag |= DB_TXN_NOSYNC; - break; - case SEQGET_TXN: - if (i >= end) { - Tcl_WrongNumArgs(interp, 2, objv, "?-txn id?"); - result = TCL_ERROR; - break; - } - arg = Tcl_GetStringFromObj(objv[i++], NULL); - txn = NAME_TO_TXN(arg); - if (txn == NULL) { - snprintf(msg, MSG_SIZE, - "Get: Invalid txn: %s\n", arg); - Tcl_SetResult(interp, msg, TCL_VOLATILE); - result = TCL_ERROR; - } - break; - } /* switch */ - if (result != TCL_OK) - break; - } - if (result != TCL_OK) - goto out; - - if (i != objc - 1) { - Tcl_SetResult(interp, - "Wrong number of key/data given\n", TCL_STATIC); - result = TCL_ERROR; - goto out; - } - - if ((result = _GetUInt32(interp, objv[objc - 1], &delta)) != TCL_OK) - goto out; - - ret = seq->get(seq, txn, (int32_t)delta, &value, aflag); - result = _ReturnSetup(interp, ret, DB_RETOK_DBGET(ret), "sequence get"); - if (ret == 0) { - res = Tcl_NewWideIntObj((Tcl_WideInt)value); - Tcl_SetObjResult(interp, res); - } -out: - return (result); -} -/* - */ -static int -tcl_SeqRemove(interp, objc, objv, seq, ip) - Tcl_Interp *interp; /* Interpreter */ - int objc; /* How many arguments? */ - Tcl_Obj *CONST objv[]; /* The argument objects */ - DB_SEQUENCE *seq; /* Sequence pointer */ - DBTCL_INFO *ip; /* Info pointer */ -{ - static const char *seqgetopts[] = { - "-auto_commit", - "-nosync", - "-txn", - NULL - }; - enum seqgetopts { - SEQGET_AUTO_COMMIT, - SEQGET_NOSYNC, - SEQGET_TXN - }; - DB_TXN *txn; - u_int32_t aflag; - int i, end, optindex, result, ret; - char *arg, msg[MSG_SIZE]; - - result = TCL_OK; - txn = NULL; - aflag = 0; - - _DeleteInfo(ip); - - if (objc < 2) { - Tcl_WrongNumArgs(interp, 2, objv, "?-args?"); - return (TCL_ERROR); - } - - /* - * Get the command name index from the object based on the options - * defined above. - */ - i = 2; - end = objc; - while (i < end) { - if (Tcl_GetIndexFromObj(interp, objv[i], seqgetopts, "option", - TCL_EXACT, &optindex) != TCL_OK) { - arg = Tcl_GetStringFromObj(objv[i], NULL); - if (arg[0] == '-') { - result = IS_HELP(objv[i]); - goto out; - } else - Tcl_ResetResult(interp); - break; - } - i++; - switch ((enum seqgetopts)optindex) { - case SEQGET_AUTO_COMMIT: - aflag |= DB_AUTO_COMMIT; - break; - case SEQGET_NOSYNC: - aflag |= DB_TXN_NOSYNC; - break; - case SEQGET_TXN: - if (i >= end) { - Tcl_WrongNumArgs(interp, 2, objv, "?-txn id?"); - result = TCL_ERROR; - break; - } - arg = Tcl_GetStringFromObj(objv[i++], NULL); - txn = NAME_TO_TXN(arg); - if (txn == NULL) { - snprintf(msg, MSG_SIZE, - "Remove: Invalid txn: %s\n", arg); - Tcl_SetResult(interp, msg, TCL_VOLATILE); - result = TCL_ERROR; - } - break; - } /* switch */ - if (result != TCL_OK) - break; - } - if (result != TCL_OK) - goto out; - - ret = seq->remove(seq, txn, aflag); - result = _ReturnSetup(interp, - ret, DB_RETOK_DBGET(ret), "sequence remove"); -out: - return (result); -} - -/* - * tcl_SeqGetFlags -- - */ -static int -tcl_SeqGetFlags(interp, objc, objv, seq) - Tcl_Interp *interp; /* Interpreter */ - int objc; /* How many arguments? */ - Tcl_Obj *CONST objv[]; /* The argument objects */ - DB_SEQUENCE *seq; /* Sequence pointer */ -{ - int i, ret, result; - u_int32_t flags; - char buf[512]; - Tcl_Obj *res; - - static const struct { - u_int32_t flag; - char *arg; - } seq_flags[] = { - { DB_SEQ_INC, "-inc" }, - { DB_SEQ_DEC, "-dec" }, - { DB_SEQ_WRAP, "-wrap" }, - { 0, NULL } - }; - - if (objc != 2) { - Tcl_WrongNumArgs(interp, 1, objv, NULL); - return (TCL_ERROR); - } - - ret = seq->get_flags(seq, &flags); - if ((result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret), - "db get_flags")) == TCL_OK) { - buf[0] = '\0'; - - for (i = 0; seq_flags[i].flag != 0; i++) - if (LF_ISSET(seq_flags[i].flag)) { - if (strlen(buf) > 0) - (void)strncat(buf, " ", sizeof(buf)); - (void)strncat( - buf, seq_flags[i].arg, sizeof(buf)); - } - - res = NewStringObj(buf, strlen(buf)); - Tcl_SetObjResult(interp, res); - } - - return (result); -} -#endif /* HAVE_SEQUENCE */ diff --git a/storage/bdb/tcl/tcl_txn.c b/storage/bdb/tcl/tcl_txn.c deleted file mode 100644 index b819c24b788..00000000000 --- a/storage/bdb/tcl/tcl_txn.c +++ /dev/null @@ -1,678 +0,0 @@ -/*- - * See the file LICENSE for redistribution information. - * - * Copyright (c) 1999-2004 - * Sleepycat Software. All rights reserved. - * - * $Id: tcl_txn.c,v 11.70 2004/10/27 16:48:32 bostic Exp $ - */ - -#include "db_config.h" - -#ifndef NO_SYSTEM_INCLUDES -#include - -#include -#include -#include -#endif - -#include "db_int.h" -#include "dbinc/tcl_db.h" - -static int tcl_TxnCommit __P((Tcl_Interp *, - int, Tcl_Obj * CONST *, DB_TXN *, DBTCL_INFO *)); -static int txn_Cmd __P((ClientData, Tcl_Interp *, int, Tcl_Obj * CONST *)); - -/* - * _TxnInfoDelete -- - * Removes nested txn info structures that are children - * of this txn. - * RECURSIVE: Transactions can be arbitrarily nested, so we - * must recurse down until we get them all. - * - * PUBLIC: void _TxnInfoDelete __P((Tcl_Interp *, DBTCL_INFO *)); - */ -void -_TxnInfoDelete(interp, txnip) - Tcl_Interp *interp; /* Interpreter */ - DBTCL_INFO *txnip; /* Info for txn */ -{ - DBTCL_INFO *nextp, *p; - - for (p = LIST_FIRST(&__db_infohead); p != NULL; p = nextp) { - /* - * Check if this info structure "belongs" to this - * txn. Remove its commands and info structure. - */ - nextp = LIST_NEXT(p, entries); - if (p->i_parent == txnip && p->i_type == I_TXN) { - _TxnInfoDelete(interp, p); - (void)Tcl_DeleteCommand(interp, p->i_name); - _DeleteInfo(p); - } - } -} - -/* - * tcl_TxnCheckpoint -- - * - * PUBLIC: int tcl_TxnCheckpoint __P((Tcl_Interp *, int, - * PUBLIC: Tcl_Obj * CONST*, DB_ENV *)); - */ -int -tcl_TxnCheckpoint(interp, objc, objv, envp) - Tcl_Interp *interp; /* Interpreter */ - int objc; /* How many arguments? */ - Tcl_Obj *CONST objv[]; /* The argument objects */ - DB_ENV *envp; /* Environment pointer */ -{ - static const char *txnckpopts[] = { - "-force", - "-kbyte", - "-min", - NULL - }; - enum txnckpopts { - TXNCKP_FORCE, - TXNCKP_KB, - TXNCKP_MIN - }; - u_int32_t flags; - int i, kb, min, optindex, result, ret; - - result = TCL_OK; - flags = 0; - kb = min = 0; - - /* - * Get the flag index from the object based on the options - * defined above. - */ - i = 2; - while (i < objc) { - if (Tcl_GetIndexFromObj(interp, objv[i], - txnckpopts, "option", TCL_EXACT, &optindex) != TCL_OK) { - return (IS_HELP(objv[i])); - } - i++; - switch ((enum txnckpopts)optindex) { - case TXNCKP_FORCE: - flags = DB_FORCE; - break; - case TXNCKP_KB: - if (i == objc) { - Tcl_WrongNumArgs(interp, 2, objv, - "?-kbyte kb?"); - result = TCL_ERROR; - break; - } - result = Tcl_GetIntFromObj(interp, objv[i++], &kb); - break; - case TXNCKP_MIN: - if (i == objc) { - Tcl_WrongNumArgs(interp, 2, objv, "?-min min?"); - result = TCL_ERROR; - break; - } - result = Tcl_GetIntFromObj(interp, objv[i++], &min); - break; - } - } - _debug_check(); - ret = envp->txn_checkpoint(envp, (u_int32_t)kb, (u_int32_t)min, flags); - result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret), - "txn checkpoint"); - return (result); -} - -/* - * tcl_Txn -- - * - * PUBLIC: int tcl_Txn __P((Tcl_Interp *, int, - * PUBLIC: Tcl_Obj * CONST*, DB_ENV *, DBTCL_INFO *)); - */ -int -tcl_Txn(interp, objc, objv, envp, envip) - Tcl_Interp *interp; /* Interpreter */ - int objc; /* How many arguments? */ - Tcl_Obj *CONST objv[]; /* The argument objects */ - DB_ENV *envp; /* Environment pointer */ - DBTCL_INFO *envip; /* Info pointer */ -{ - static const char *txnopts[] = { -#ifdef CONFIG_TEST - "-degree_2", - "-dirty", - "-lock_timeout", - "-txn_timeout", -#endif - "-nosync", - "-nowait", - "-parent", - "-sync", - NULL - }; - enum txnopts { -#ifdef CONFIG_TEST - TXNDEGREE2, - TXNDIRTY, - TXN_LOCK_TIMEOUT, - TXN_TIMEOUT, -#endif - TXNNOSYNC, - TXNNOWAIT, - TXNPARENT, - TXNSYNC - }; - DBTCL_INFO *ip; - DB_TXN *parent; - DB_TXN *txn; - Tcl_Obj *res; - u_int32_t flag; - int i, optindex, result, ret; - char *arg, msg[MSG_SIZE], newname[MSG_SIZE]; -#ifdef CONFIG_TEST - db_timeout_t lk_time, tx_time; - u_int32_t lk_timeflag, tx_timeflag; -#endif - - result = TCL_OK; - memset(newname, 0, MSG_SIZE); - - parent = NULL; - flag = 0; -#ifdef CONFIG_TEST - COMPQUIET(tx_time, 0); - COMPQUIET(lk_time, 0); - lk_timeflag = tx_timeflag = 0; -#endif - i = 2; - while (i < objc) { - if (Tcl_GetIndexFromObj(interp, objv[i], - txnopts, "option", TCL_EXACT, &optindex) != TCL_OK) { - return (IS_HELP(objv[i])); - } - i++; - switch ((enum txnopts)optindex) { -#ifdef CONFIG_TEST - case TXNDEGREE2: - flag |= DB_DEGREE_2; - break; - case TXNDIRTY: - flag |= DB_DIRTY_READ; - break; - case TXN_LOCK_TIMEOUT: - lk_timeflag = DB_SET_LOCK_TIMEOUT; - goto getit; - case TXN_TIMEOUT: - tx_timeflag = DB_SET_TXN_TIMEOUT; -getit: if (i >= objc) { - Tcl_WrongNumArgs(interp, 2, objv, - "?-txn_timestamp time?"); - return (TCL_ERROR); - } - result = Tcl_GetLongFromObj(interp, objv[i++], (long *) - ((enum txnopts)optindex == TXN_LOCK_TIMEOUT ? - &lk_time : &tx_time)); - if (result != TCL_OK) - return (TCL_ERROR); - break; -#endif - case TXNNOSYNC: - FLAG_CHECK2(flag, DB_DIRTY_READ); - flag |= DB_TXN_NOSYNC; - break; - case TXNNOWAIT: - FLAG_CHECK2(flag, DB_DIRTY_READ); - flag |= DB_TXN_NOWAIT; - break; - case TXNPARENT: - if (i == objc) { - Tcl_WrongNumArgs(interp, 2, objv, - "?-parent txn?"); - result = TCL_ERROR; - break; - } - arg = Tcl_GetStringFromObj(objv[i++], NULL); - parent = NAME_TO_TXN(arg); - if (parent == NULL) { - snprintf(msg, MSG_SIZE, - "Invalid parent txn: %s\n", - arg); - Tcl_SetResult(interp, msg, TCL_VOLATILE); - return (TCL_ERROR); - } - break; - case TXNSYNC: - FLAG_CHECK2(flag, DB_DIRTY_READ); - flag |= DB_TXN_SYNC; - break; - } - } - snprintf(newname, sizeof(newname), "%s.txn%d", - envip->i_name, envip->i_envtxnid); - ip = _NewInfo(interp, NULL, newname, I_TXN); - if (ip == NULL) { - Tcl_SetResult(interp, "Could not set up info", - TCL_STATIC); - return (TCL_ERROR); - } - _debug_check(); - ret = envp->txn_begin(envp, parent, &txn, flag); - result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret), - "txn"); - if (result == TCL_ERROR) - _DeleteInfo(ip); - else { - /* - * Success. Set up return. Set up new info - * and command widget for this txn. - */ - envip->i_envtxnid++; - if (parent) - ip->i_parent = _PtrToInfo(parent); - else - ip->i_parent = envip; - _SetInfoData(ip, txn); - (void)Tcl_CreateObjCommand(interp, newname, - (Tcl_ObjCmdProc *)txn_Cmd, (ClientData)txn, NULL); - res = NewStringObj(newname, strlen(newname)); - Tcl_SetObjResult(interp, res); -#ifdef CONFIG_TEST - if (tx_timeflag != 0) { - ret = txn->set_timeout(txn, tx_time, tx_timeflag); - if (ret != 0) { - result = - _ReturnSetup(interp, ret, DB_RETOK_STD(ret), - "set_timeout"); - _DeleteInfo(ip); - } - } - if (lk_timeflag != 0) { - ret = txn->set_timeout(txn, lk_time, lk_timeflag); - if (ret != 0) { - result = - _ReturnSetup(interp, ret, DB_RETOK_STD(ret), - "set_timeout"); - _DeleteInfo(ip); - } - } -#endif - } - return (result); -} - -/* - * tcl_TxnStat -- - * - * PUBLIC: int tcl_TxnStat __P((Tcl_Interp *, int, - * PUBLIC: Tcl_Obj * CONST*, DB_ENV *)); - */ -int -tcl_TxnStat(interp, objc, objv, envp) - Tcl_Interp *interp; /* Interpreter */ - int objc; /* How many arguments? */ - Tcl_Obj *CONST objv[]; /* The argument objects */ - DB_ENV *envp; /* Environment pointer */ -{ - DBTCL_INFO *ip; - DB_TXN_ACTIVE *p; - DB_TXN_STAT *sp; - Tcl_Obj *myobjv[2], *res, *thislist, *lsnlist; - u_int32_t i; - int myobjc, result, ret; - - result = TCL_OK; - /* - * No args for this. Error if there are some. - */ - if (objc != 2) { - Tcl_WrongNumArgs(interp, 2, objv, NULL); - return (TCL_ERROR); - } - _debug_check(); - ret = envp->txn_stat(envp, &sp, 0); - result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret), - "txn stat"); - if (result == TCL_ERROR) - return (result); - - /* - * Have our stats, now construct the name value - * list pairs and free up the memory. - */ - res = Tcl_NewObj(); - /* - * MAKE_STAT_LIST assumes 'res' and 'error' label. - */ - MAKE_STAT_LIST("Region size", sp->st_regsize); - MAKE_STAT_LSN("LSN of last checkpoint", &sp->st_last_ckp); - MAKE_STAT_LIST("Time of last checkpoint", sp->st_time_ckp); - MAKE_STAT_LIST("Last txn ID allocated", sp->st_last_txnid); - MAKE_STAT_LIST("Max Txns", sp->st_maxtxns); - MAKE_STAT_LIST("Number aborted txns", sp->st_naborts); - MAKE_STAT_LIST("Number active txns", sp->st_nactive); - MAKE_STAT_LIST("Maximum active txns", sp->st_maxnactive); - MAKE_STAT_LIST("Number txns begun", sp->st_nbegins); - MAKE_STAT_LIST("Number committed txns", sp->st_ncommits); - MAKE_STAT_LIST("Number restored txns", sp->st_nrestores); - MAKE_STAT_LIST("Number of region lock waits", sp->st_region_wait); - MAKE_STAT_LIST("Number of region lock nowaits", sp->st_region_nowait); - for (i = 0, p = sp->st_txnarray; i < sp->st_nactive; i++, p++) - for (ip = LIST_FIRST(&__db_infohead); ip != NULL; - ip = LIST_NEXT(ip, entries)) { - if (ip->i_type != I_TXN) - continue; - if (ip->i_type == I_TXN && - (ip->i_txnp->id(ip->i_txnp) == p->txnid)) { - MAKE_STAT_LSN(ip->i_name, &p->lsn); - if (p->parentid != 0) - MAKE_STAT_STRLIST("Parent", - ip->i_parent->i_name); - else - MAKE_STAT_LIST("Parent", 0); - break; - } - } - Tcl_SetObjResult(interp, res); -error: - __os_ufree(envp, sp); - return (result); -} - -/* - * tcl_TxnTimeout -- - * - * PUBLIC: int tcl_TxnTimeout __P((Tcl_Interp *, int, - * PUBLIC: Tcl_Obj * CONST*, DB_ENV *)); - */ -int -tcl_TxnTimeout(interp, objc, objv, envp) - Tcl_Interp *interp; /* Interpreter */ - int objc; /* How many arguments? */ - Tcl_Obj *CONST objv[]; /* The argument objects */ - DB_ENV *envp; /* Environment pointer */ -{ - long timeout; - int result, ret; - - /* - * One arg, the timeout. - */ - if (objc != 3) { - Tcl_WrongNumArgs(interp, 2, objv, "?timeout?"); - return (TCL_ERROR); - } - result = Tcl_GetLongFromObj(interp, objv[2], &timeout); - if (result != TCL_OK) - return (result); - _debug_check(); - ret = envp->set_timeout(envp, (u_int32_t)timeout, DB_SET_TXN_TIMEOUT); - result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret), - "lock timeout"); - return (result); -} - -/* - * txn_Cmd -- - * Implements the "txn" widget. - */ -static int -txn_Cmd(clientData, interp, objc, objv) - ClientData clientData; /* Txn handle */ - Tcl_Interp *interp; /* Interpreter */ - int objc; /* How many arguments? */ - Tcl_Obj *CONST objv[]; /* The argument objects */ -{ - static const char *txncmds[] = { -#ifdef CONFIG_TEST - "discard", - "id", - "prepare", -#endif - "abort", - "commit", - NULL - }; - enum txncmds { -#ifdef CONFIG_TEST - TXNDISCARD, - TXNID, - TXNPREPARE, -#endif - TXNABORT, - TXNCOMMIT - }; - DBTCL_INFO *txnip; - DB_TXN *txnp; - Tcl_Obj *res; - int cmdindex, result, ret; -#ifdef CONFIG_TEST - u_int8_t *gid; -#endif - - Tcl_ResetResult(interp); - txnp = (DB_TXN *)clientData; - txnip = _PtrToInfo((void *)txnp); - result = TCL_OK; - if (txnp == NULL) { - Tcl_SetResult(interp, "NULL txn pointer", TCL_STATIC); - return (TCL_ERROR); - } - if (txnip == NULL) { - Tcl_SetResult(interp, "NULL txn info pointer", TCL_STATIC); - return (TCL_ERROR); - } - - /* - * Get the command name index from the object based on the dbcmds - * defined above. - */ - if (Tcl_GetIndexFromObj(interp, - objv[1], txncmds, "command", TCL_EXACT, &cmdindex) != TCL_OK) - return (IS_HELP(objv[1])); - - res = NULL; - switch ((enum txncmds)cmdindex) { -#ifdef CONFIG_TEST - case TXNDISCARD: - if (objc != 2) { - Tcl_WrongNumArgs(interp, 1, objv, NULL); - return (TCL_ERROR); - } - _debug_check(); - ret = txnp->discard(txnp, 0); - result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret), - "txn discard"); - _TxnInfoDelete(interp, txnip); - (void)Tcl_DeleteCommand(interp, txnip->i_name); - _DeleteInfo(txnip); - break; - case TXNID: - if (objc != 2) { - Tcl_WrongNumArgs(interp, 1, objv, NULL); - return (TCL_ERROR); - } - _debug_check(); - res = Tcl_NewIntObj((int)txnp->id(txnp)); - break; - case TXNPREPARE: - if (objc != 3) { - Tcl_WrongNumArgs(interp, 1, objv, NULL); - return (TCL_ERROR); - } - _debug_check(); - gid = (u_int8_t *)Tcl_GetByteArrayFromObj(objv[2], NULL); - ret = txnp->prepare(txnp, gid); - /* - * !!! - * DB_TXN->prepare commits all outstanding children. But it - * does NOT destroy the current txn handle. So, we must call - * _TxnInfoDelete to recursively remove all nested txn handles, - * we do not call _DeleteInfo on ourselves. - */ - _TxnInfoDelete(interp, txnip); - result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret), - "txn prepare"); - break; -#endif - case TXNABORT: - if (objc != 2) { - Tcl_WrongNumArgs(interp, 1, objv, NULL); - return (TCL_ERROR); - } - _debug_check(); - ret = txnp->abort(txnp); - result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret), - "txn abort"); - _TxnInfoDelete(interp, txnip); - (void)Tcl_DeleteCommand(interp, txnip->i_name); - _DeleteInfo(txnip); - break; - case TXNCOMMIT: - result = tcl_TxnCommit(interp, objc, objv, txnp, txnip); - _TxnInfoDelete(interp, txnip); - (void)Tcl_DeleteCommand(interp, txnip->i_name); - _DeleteInfo(txnip); - break; - } - /* - * Only set result if we have a res. Otherwise, lower - * functions have already done so. - */ - if (result == TCL_OK && res) - Tcl_SetObjResult(interp, res); - return (result); -} - -static int -tcl_TxnCommit(interp, objc, objv, txnp, txnip) - Tcl_Interp *interp; /* Interpreter */ - int objc; /* How many arguments? */ - Tcl_Obj *CONST objv[]; /* The argument objects */ - DB_TXN *txnp; /* Transaction pointer */ - DBTCL_INFO *txnip; /* Info pointer */ -{ - static const char *commitopt[] = { - "-nosync", - "-sync", - NULL - }; - enum commitopt { - COMSYNC, - COMNOSYNC - }; - u_int32_t flag; - int optindex, result, ret; - - COMPQUIET(txnip, NULL); - - result = TCL_OK; - flag = 0; - if (objc != 2 && objc != 3) { - Tcl_WrongNumArgs(interp, 1, objv, NULL); - return (TCL_ERROR); - } - if (objc == 3) { - if (Tcl_GetIndexFromObj(interp, objv[2], commitopt, - "option", TCL_EXACT, &optindex) != TCL_OK) - return (IS_HELP(objv[2])); - switch ((enum commitopt)optindex) { - case COMSYNC: - FLAG_CHECK(flag); - flag = DB_TXN_SYNC; - break; - case COMNOSYNC: - FLAG_CHECK(flag); - flag = DB_TXN_NOSYNC; - break; - } - } - - _debug_check(); - ret = txnp->commit(txnp, flag); - result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret), - "txn commit"); - return (result); -} - -#ifdef CONFIG_TEST -/* - * tcl_TxnRecover -- - * - * PUBLIC: int tcl_TxnRecover __P((Tcl_Interp *, int, - * PUBLIC: Tcl_Obj * CONST*, DB_ENV *, DBTCL_INFO *)); - */ -int -tcl_TxnRecover(interp, objc, objv, envp, envip) - Tcl_Interp *interp; /* Interpreter */ - int objc; /* How many arguments? */ - Tcl_Obj *CONST objv[]; /* The argument objects */ - DB_ENV *envp; /* Environment pointer */ - DBTCL_INFO *envip; /* Info pointer */ -{ -#define DO_PREPLIST(count) \ -for (i = 0; i < count; i++) { \ - snprintf(newname, sizeof(newname), "%s.txn%d", \ - envip->i_name, envip->i_envtxnid); \ - ip = _NewInfo(interp, NULL, newname, I_TXN); \ - if (ip == NULL) { \ - Tcl_SetResult(interp, "Could not set up info", \ - TCL_STATIC); \ - return (TCL_ERROR); \ - } \ - envip->i_envtxnid++; \ - ip->i_parent = envip; \ - p = &prep[i]; \ - _SetInfoData(ip, p->txn); \ - (void)Tcl_CreateObjCommand(interp, newname, \ - (Tcl_ObjCmdProc *)txn_Cmd, (ClientData)p->txn, NULL); \ - result = _SetListElem(interp, res, newname, strlen(newname), \ - p->gid, DB_XIDDATASIZE); \ - if (result != TCL_OK) \ - goto error; \ -} - - DBTCL_INFO *ip; - DB_PREPLIST prep[DBTCL_PREP], *p; - Tcl_Obj *res; - long count, i; - int result, ret; - char newname[MSG_SIZE]; - - result = TCL_OK; - /* - * No args for this. Error if there are some. - */ - if (objc != 2) { - Tcl_WrongNumArgs(interp, 2, objv, NULL); - return (TCL_ERROR); - } - _debug_check(); - ret = envp->txn_recover(envp, prep, DBTCL_PREP, &count, DB_FIRST); - result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret), - "txn recover"); - if (result == TCL_ERROR) - return (result); - res = Tcl_NewObj(); - DO_PREPLIST(count); - - /* - * If count returned is the maximum size we have, then there - * might be more. Keep going until we get them all. - */ - while (count == DBTCL_PREP) { - ret = envp->txn_recover( - envp, prep, DBTCL_PREP, &count, DB_NEXT); - result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret), - "txn recover"); - if (result == TCL_ERROR) - return (result); - DO_PREPLIST(count); - } - Tcl_SetObjResult(interp, res); -error: - return (result); -} -#endif diff --git a/storage/bdb/tcl/tcl_util.c b/storage/bdb/tcl/tcl_util.c deleted file mode 100644 index 13a6d6a9dd7..00000000000 --- a/storage/bdb/tcl/tcl_util.c +++ /dev/null @@ -1,368 +0,0 @@ -/*- - * See the file LICENSE for redistribution information. - * - * Copyright (c) 1999-2004 - * Sleepycat Software. All rights reserved. - * - * $Id: tcl_util.c,v 11.43 2004/06/10 17:20:57 bostic Exp $ - */ - -#include "db_config.h" - -#ifndef NO_SYSTEM_INCLUDES -#include - -#include -#include -#include -#include -#endif - -#include "db_int.h" -#include "dbinc/tcl_db.h" - -/* - * Prototypes for procedures defined later in this file: - */ -static int mutex_Cmd __P((ClientData, Tcl_Interp *, int, Tcl_Obj * CONST*)); - -/* - * bdb_RandCommand -- - * Implements rand* functions. - * - * PUBLIC: int bdb_RandCommand __P((Tcl_Interp *, int, Tcl_Obj * CONST*)); - */ -int -bdb_RandCommand(interp, objc, objv) - Tcl_Interp *interp; /* Interpreter */ - int objc; /* How many arguments? */ - Tcl_Obj *CONST objv[]; /* The argument objects */ -{ - static const char *rcmds[] = { - "rand", "random_int", "srand", - NULL - }; - enum rcmds { - RRAND, RRAND_INT, RSRAND - }; - Tcl_Obj *res; - int cmdindex, hi, lo, result, ret; - - result = TCL_OK; - /* - * Get the command name index from the object based on the cmds - * defined above. This SHOULD NOT fail because we already checked - * in the 'berkdb' command. - */ - if (Tcl_GetIndexFromObj(interp, - objv[1], rcmds, "command", TCL_EXACT, &cmdindex) != TCL_OK) - return (IS_HELP(objv[1])); - - res = NULL; - switch ((enum rcmds)cmdindex) { - case RRAND: - /* - * Must be 0 args. Error if different. - */ - if (objc != 2) { - Tcl_WrongNumArgs(interp, 2, objv, NULL); - return (TCL_ERROR); - } - ret = rand(); - res = Tcl_NewIntObj(ret); - break; - case RRAND_INT: - /* - * Must be 4 args. Error if different. - */ - if (objc != 4) { - Tcl_WrongNumArgs(interp, 2, objv, "lo hi"); - return (TCL_ERROR); - } - if ((result = - Tcl_GetIntFromObj(interp, objv[2], &lo)) != TCL_OK) - return (result); - if ((result = - Tcl_GetIntFromObj(interp, objv[3], &hi)) != TCL_OK) - return (result); - if (lo < 0 || hi < 0) { - Tcl_SetResult(interp, - "Range value less than 0", TCL_STATIC); - return (TCL_ERROR); - } - - _debug_check(); - ret = lo + rand() % ((hi - lo) + 1); - res = Tcl_NewIntObj(ret); - break; - case RSRAND: - /* - * Must be 1 arg. Error if different. - */ - if (objc != 3) { - Tcl_WrongNumArgs(interp, 2, objv, "seed"); - return (TCL_ERROR); - } - if ((result = - Tcl_GetIntFromObj(interp, objv[2], &lo)) == TCL_OK) { - srand((u_int)lo); - res = Tcl_NewIntObj(0); - } - break; - } - - /* - * Only set result if we have a res. Otherwise, lower functions have - * already done so. - */ - if (result == TCL_OK && res) - Tcl_SetObjResult(interp, res); - return (result); -} - -/* - * - * tcl_Mutex -- - * Opens an env mutex. - * - * PUBLIC: int tcl_Mutex __P((Tcl_Interp *, int, Tcl_Obj * CONST*, DB_ENV *, - * PUBLIC: DBTCL_INFO *)); - */ -int -tcl_Mutex(interp, objc, objv, envp, envip) - Tcl_Interp *interp; /* Interpreter */ - int objc; /* How many arguments? */ - Tcl_Obj *CONST objv[]; /* The argument objects */ - DB_ENV *envp; /* Environment pointer */ - DBTCL_INFO *envip; /* Info pointer */ -{ - DBTCL_INFO *ip; - Tcl_Obj *res; - _MUTEX_DATA *md; - int i, nitems, mode, result, ret; - char newname[MSG_SIZE]; - - md = NULL; - result = TCL_OK; - ret = 0; - - if (objc != 4) { - Tcl_WrongNumArgs(interp, 2, objv, "mode nitems"); - return (TCL_ERROR); - } - result = Tcl_GetIntFromObj(interp, objv[2], &mode); - if (result != TCL_OK) - return (TCL_ERROR); - result = Tcl_GetIntFromObj(interp, objv[3], &nitems); - if (result != TCL_OK) - return (TCL_ERROR); - - memset(newname, 0, MSG_SIZE); - snprintf(newname, sizeof(newname), - "%s.mutex%d", envip->i_name, envip->i_envmutexid); - ip = _NewInfo(interp, NULL, newname, I_MUTEX); - if (ip == NULL) { - Tcl_SetResult(interp, "Could not set up info", - TCL_STATIC); - return (TCL_ERROR); - } - /* - * Set up mutex. - */ - /* - * Map in the region. - * - * XXX - * We don't bother doing this "right", i.e., using the shalloc - * functions, just grab some memory knowing that it's correctly - * aligned. - */ - _debug_check(); - if (__os_calloc(NULL, 1, sizeof(_MUTEX_DATA), &md) != 0) - goto posixout; - md->env = envp; - md->size = sizeof(_MUTEX_ENTRY) * (u_int)nitems; - - md->reginfo.dbenv = envp; - md->reginfo.type = REGION_TYPE_MUTEX; - md->reginfo.id = INVALID_REGION_ID; - md->reginfo.flags = REGION_CREATE_OK | REGION_JOIN_OK; - if ((ret = __db_r_attach(envp, &md->reginfo, md->size)) != 0) - goto posixout; - md->marray = md->reginfo.addr; - - /* Initialize a created region. */ - if (F_ISSET(&md->reginfo, REGION_CREATE)) - for (i = 0; i < nitems; i++) { - md->marray[i].val = 0; - if ((ret = __db_mutex_init_int(envp, - &md->marray[i].m, i, 0)) != 0) - goto posixout; - } - R_UNLOCK(envp, &md->reginfo); - - /* - * Success. Set up return. Set up new info - * and command widget for this mutex. - */ - envip->i_envmutexid++; - ip->i_parent = envip; - _SetInfoData(ip, md); - (void)Tcl_CreateObjCommand(interp, newname, - (Tcl_ObjCmdProc *)mutex_Cmd, (ClientData)md, NULL); - res = NewStringObj(newname, strlen(newname)); - Tcl_SetObjResult(interp, res); - - return (TCL_OK); - -posixout: - if (ret > 0) - (void)Tcl_PosixError(interp); - result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret), "mutex"); - _DeleteInfo(ip); - - if (md != NULL) { - if (md->reginfo.addr != NULL) - (void)__db_r_detach(md->env, &md->reginfo, 0); - __os_free(md->env, md); - } - return (result); -} - -/* - * mutex_Cmd -- - * Implements the "mutex" widget. - */ -static int -mutex_Cmd(clientData, interp, objc, objv) - ClientData clientData; /* Mutex handle */ - Tcl_Interp *interp; /* Interpreter */ - int objc; /* How many arguments? */ - Tcl_Obj *CONST objv[]; /* The argument objects */ -{ - static const char *mxcmds[] = { - "close", - "get", - "getval", - "release", - "setval", - NULL - }; - enum mxcmds { - MXCLOSE, - MXGET, - MXGETVAL, - MXRELE, - MXSETVAL - }; - DB_ENV *dbenv; - DBTCL_INFO *envip, *mpip; - _MUTEX_DATA *mp; - Tcl_Obj *res; - int cmdindex, id, result, newval; - - Tcl_ResetResult(interp); - mp = (_MUTEX_DATA *)clientData; - mpip = _PtrToInfo((void *)mp); - envip = mpip->i_parent; - dbenv = envip->i_envp; - result = TCL_OK; - - if (mp == NULL) { - Tcl_SetResult(interp, "NULL mp pointer", TCL_STATIC); - return (TCL_ERROR); - } - if (mpip == NULL) { - Tcl_SetResult(interp, "NULL mp info pointer", TCL_STATIC); - return (TCL_ERROR); - } - - /* - * Get the command name index from the object based on the dbcmds - * defined above. - */ - if (Tcl_GetIndexFromObj(interp, - objv[1], mxcmds, "command", TCL_EXACT, &cmdindex) != TCL_OK) - return (IS_HELP(objv[1])); - - res = NULL; - switch ((enum mxcmds)cmdindex) { - case MXCLOSE: - if (objc != 2) { - Tcl_WrongNumArgs(interp, 1, objv, NULL); - return (TCL_ERROR); - } - _debug_check(); - (void)__db_r_detach(mp->env, &mp->reginfo, 0); - res = Tcl_NewIntObj(0); - (void)Tcl_DeleteCommand(interp, mpip->i_name); - _DeleteInfo(mpip); - __os_free(mp->env, mp); - break; - case MXRELE: - /* - * Check for 1 arg. Error if different. - */ - if (objc != 3) { - Tcl_WrongNumArgs(interp, 2, objv, "id"); - return (TCL_ERROR); - } - result = Tcl_GetIntFromObj(interp, objv[2], &id); - if (result != TCL_OK) - break; - MUTEX_UNLOCK(dbenv, &mp->marray[id].m); - res = Tcl_NewIntObj(0); - break; - case MXGET: - /* - * Check for 1 arg. Error if different. - */ - if (objc != 3) { - Tcl_WrongNumArgs(interp, 2, objv, "id"); - return (TCL_ERROR); - } - result = Tcl_GetIntFromObj(interp, objv[2], &id); - if (result != TCL_OK) - break; - MUTEX_LOCK(dbenv, &mp->marray[id].m); - res = Tcl_NewIntObj(0); - break; - case MXGETVAL: - /* - * Check for 1 arg. Error if different. - */ - if (objc != 3) { - Tcl_WrongNumArgs(interp, 2, objv, "id"); - return (TCL_ERROR); - } - result = Tcl_GetIntFromObj(interp, objv[2], &id); - if (result != TCL_OK) - break; - res = Tcl_NewLongObj((long)mp->marray[id].val); - break; - case MXSETVAL: - /* - * Check for 2 args. Error if different. - */ - if (objc != 4) { - Tcl_WrongNumArgs(interp, 2, objv, "id val"); - return (TCL_ERROR); - } - result = Tcl_GetIntFromObj(interp, objv[2], &id); - if (result != TCL_OK) - break; - result = Tcl_GetIntFromObj(interp, objv[3], &newval); - if (result != TCL_OK) - break; - mp->marray[id].val = newval; - res = Tcl_NewIntObj(0); - break; - } - /* - * Only set result if we have a res. Otherwise, lower - * functions have already done so. - */ - if (result == TCL_OK && res) - Tcl_SetObjResult(interp, res); - return (result); -} diff --git a/storage/bdb/test/archive.tcl b/storage/bdb/test/archive.tcl deleted file mode 100644 index fa68f633b0f..00000000000 --- a/storage/bdb/test/archive.tcl +++ /dev/null @@ -1,256 +0,0 @@ -# See the file LICENSE for redistribution information. -# -# Copyright (c) 1996-2004 -# Sleepycat Software. All rights reserved. -# -# $Id: archive.tcl,v 11.26 2004/09/22 18:01:04 bostic Exp $ -# -# Options are: -# -checkrec -# -maxfilesize -proc archive { { inmem 0 } args } { - global alphabet - source ./include.tcl - - # Set defaults - if { $inmem == 1 } { - set maxbsize [expr 8 * [expr 1024 * 1024]] - set desc "in-memory" - } else { - set maxbsize [expr 8 * 1024] - set desc "on-disk" - } - set maxfile [expr 32 * 1024] - set checkrec 500 - for { set i 0 } { $i < [llength $args] } {incr i} { - switch -regexp -- [lindex $args $i] { - -c.* { incr i; set checkrec [lindex $args $i] } - -d.* { incr i; set testdir [lindex $args $i] } - -m.* { incr i; set maxfile [lindex $args $i] } - default { - puts "FAIL:[timestamp] archive usage" - puts "usage: archive -checkrec \ - -dir -maxfilesize " - return - } - } - } - - # Clean out old log if it existed - puts "Archive: Log archive test (using $desc logging)." - puts "Unlinking log: error message OK" - env_cleanup $testdir - - # Now run the various functionality tests - if { $inmem == 0 } { - set eflags "-create -txn -home $testdir \ - -log_buffer $maxbsize -log_max $maxfile" - } else { - set eflags "-create -txn -home $testdir -log_inmemory \ - -log_buffer $maxbsize -log_max $maxfile" - } - set dbenv [eval {berkdb_env} $eflags] - error_check_good dbenv [is_valid_env $dbenv] TRUE - - set logc [$dbenv log_cursor] - error_check_good log_cursor [is_valid_logc $logc $dbenv] TRUE - - # The basic test structure here is that we write a lot of log - # records (enough to fill up 100 log files; each log file it - # small). We start with three txns and open a database in - # each transaction. Then, in a loop, we take periodic - # checkpoints. Between each pair of checkpoints, we end one - # transaction; when no transactions are left, we start up three - # new ones, letting them overlap checkpoints as well. - # - # The pattern that we create is: - # 1. Create TXN1, TXN2, TXN3 and open dbs within the txns. - # 2. Write a bunch of additional log records. - # 3. Checkpoint. - # 4. Archive, checking that we list the right files. - # 5. Commit one transaction. - # 6. If no txns left, start 3 new ones. - # 7. Until we've gone through enough records, return to step 2. - - set baserec "1:$alphabet:2:$alphabet:3:$alphabet:4:$alphabet" - puts "\tArchive.a: Writing log records; checkpoint every $checkrec records" - set nrecs $maxfile - set rec 0:$baserec - - # Begin 1st transaction and record current log file. Open - # a database in the transaction; the log file won't be - # removable until the transaction is aborted or committed. - set t1 [$dbenv txn] - error_check_good t1:txn_begin [is_valid_txn $t1 $dbenv] TRUE - - set l1 [lindex [lindex [$logc get -last] 0] 0] - set lsnlist [list $l1] - - set tdb1 [eval {berkdb_open -create -mode 0644} \ - -env $dbenv -txn $t1 -btree tdb1.db] - error_check_good dbopen [is_valid_db $tdb1] TRUE - - # Do the same for a 2nd and 3rd transaction. - set t2 [$dbenv txn] - error_check_good t2:txn_begin [is_valid_txn $t2 $dbenv] TRUE - set l2 [lindex [lindex [$logc get -last] 0] 0] - lappend lsnlist $l2 - set tdb2 [eval {berkdb_open -create -mode 0644} \ - -env $dbenv -txn $t2 -btree tdb2.db] - error_check_good dbopen [is_valid_db $tdb2] TRUE - - set t3 [$dbenv txn] - error_check_good t3:txn_begin [is_valid_txn $t3 $dbenv] TRUE - set l3 [lindex [lindex [$logc get -last] 0] 0] - lappend lsnlist $l3 - set tdb3 [eval {berkdb_open -create -mode 0644} \ - -env $dbenv -txn $t3 -btree tdb3.db] - error_check_good dbopen [is_valid_db $tdb3] TRUE - - # Keep a list of active transactions and databases opened - # within those transactions. - set txnlist [list "$t1 $tdb1" "$t2 $tdb2" "$t3 $tdb3"] - - # Loop through a large number of log records, checkpointing - # and checking db_archive periodically. - for { set i 1 } { $i <= $nrecs } { incr i } { - set rec $i:$baserec - set lsn [$dbenv log_put $rec] - error_check_bad log_put [llength $lsn] 0 - if { [expr $i % $checkrec] == 0 } { - - # Take a checkpoint - $dbenv txn_checkpoint - set ckp_file [lindex [lindex [$logc get -last] 0] 0] - catch { archive_command -h $testdir -a } res_log_full - if { [string first db_archive $res_log_full] == 0 } { - set res_log_full "" - } - catch { archive_command -h $testdir } res_log - if { [string first db_archive $res_log] == 0 } { - set res_log "" - } - catch { archive_command -h $testdir -l } res_alllog - catch { archive_command -h $testdir -a -s } \ - res_data_full - catch { archive_command -h $testdir -s } res_data - - if { $inmem == 0 } { - error_check_good nlogfiles [llength $res_alllog] \ - [lindex [lindex [$logc get -last] 0] 0] - } else { - error_check_good nlogfiles [llength $res_alllog] 0 - } - - error_check_good logs_match [llength $res_log_full] \ - [llength $res_log] - error_check_good data_match [llength $res_data_full] \ - [llength $res_data] - - # Check right number of log files - if { $inmem == 0 } { - set expected [min $ckp_file [expr [lindex $lsnlist 0] - 1]] - error_check_good nlogs [llength $res_log] $expected - } - - # Check that the relative names are a subset of the - # full names - set n 0 - foreach x $res_log { - error_check_bad log_name_match:$res_log \ - [string first $x \ - [lindex $res_log_full $n]] -1 - incr n - } - - set n 0 - foreach x $res_data { - error_check_bad log_name_match:$res_data \ - [string first $x \ - [lindex $res_data_full $n]] -1 - incr n - } - - # Commit a transaction and close the associated db. - set t [lindex [lindex $txnlist 0] 0] - set tdb [lindex [lindex $txnlist 0] 1] - if { [string length $t] != 0 } { - error_check_good txn_commit:$t [$t commit] 0 - error_check_good tdb_close:$tdb [$tdb close] 0 - set txnlist [lrange $txnlist 1 end] - set lsnlist [lrange $lsnlist 1 end] - } - - # If we're down to no transactions, start some new ones. - if { [llength $txnlist] == 0 } { - set t1 [$dbenv txn] - error_check_bad tx_begin $t1 NULL - error_check_good \ - tx_begin [is_substr $t1 $dbenv] 1 - set tdb1 [eval {berkdb_open -create -mode 0644} \ - -env $dbenv -txn $t1 -btree tdb1.db] - error_check_good dbopen [is_valid_db $tdb1] TRUE - set l1 [lindex [lindex [$logc get -last] 0] 0] - lappend lsnlist $l1 - - set t2 [$dbenv txn] - error_check_bad tx_begin $t2 NULL - error_check_good \ - tx_begin [is_substr $t2 $dbenv] 1 - set tdb2 [eval {berkdb_open -create -mode 0644} \ - -env $dbenv -txn $t2 -btree tdb2.db] - error_check_good dbopen [is_valid_db $tdb2] TRUE - set l2 [lindex [lindex [$logc get -last] 0] 0] - lappend lsnlist $l2 - - set t3 [$dbenv txn] - error_check_bad tx_begin $t3 NULL - error_check_good \ - tx_begin [is_substr $t3 $dbenv] 1 - set tdb3 [eval {berkdb_open -create -mode 0644} \ - -env $dbenv -txn $t3 -btree tdb3.db] - error_check_good dbopen [is_valid_db $tdb3] TRUE - set l3 [lindex [lindex [$logc get -last] 0] 0] - lappend lsnlist $l3 - - set txnlist [list "$t1 $tdb1" "$t2 $tdb2" "$t3 $tdb3"] - } - } - } - # Commit any transactions still running. - puts "\tArchive.b: Commit any transactions still running." - foreach pair $txnlist { - set t [lindex $pair 0] - set tdb [lindex $pair 1] - error_check_good txn_commit:$t [$t commit] 0 - error_check_good tdb_close:$tdb [$tdb close] 0 - } - - # Close and unlink the file - error_check_good log_cursor_close [$logc close] 0 - reset_env $dbenv -} - -proc archive_command { args } { - source ./include.tcl - - # Catch a list of files output by db_archive. - catch { eval exec $util_path/db_archive $args } output - - if { $is_windows_test == 1 || 1 } { - # On Windows, convert all filenames to use forward slashes. - regsub -all {[\\]} $output / output - } - - # Output the [possibly-transformed] list. - return $output -} - -proc min { a b } { - if {$a < $b} { - return $a - } else { - return $b - } -} diff --git a/storage/bdb/test/bigfile001.tcl b/storage/bdb/test/bigfile001.tcl deleted file mode 100644 index 39981413829..00000000000 --- a/storage/bdb/test/bigfile001.tcl +++ /dev/null @@ -1,85 +0,0 @@ -# See the file LICENSE for redistribution information. -# -# Copyright (c) 2001-2004 -# Sleepycat Software. All rights reserved. -# -# $Id: bigfile001.tcl,v 11.9 2004/01/28 03:36:26 bostic Exp $ -# -# TEST bigfile001 -# TEST Create a database greater than 4 GB in size. Close, verify. -# TEST Grow the database somewhat. Close, reverify. Lather, rinse, -# TEST repeat. Since it will not work on all systems, this test is -# TEST not run by default. -proc bigfile001 { method \ - { itemsize 4096 } { nitems 1048576 } { growby 5000 } { growtms 2 } args } { - source ./include.tcl - - set args [convert_args $method $args] - set omethod [convert_method $method] - - puts "Bigfile: $method ($args) $nitems * $itemsize bytes of data" - - env_cleanup $testdir - - # Create the database. Use 64K pages; we want a good fill - # factor, and page size doesn't matter much. Use a 50MB - # cache; that should be manageable, and will help - # performance. - set dbname $testdir/big.db - - set db [eval {berkdb_open -create} {-pagesize 65536 \ - -cachesize {0 50000000 0}} $omethod $args $dbname] - error_check_good db_open [is_valid_db $db] TRUE - - puts -nonewline "\tBigfile.a: Creating database...0%..." - flush stdout - - set data [string repeat z $itemsize] - - set more_than_ten_already 0 - for { set i 0 } { $i < $nitems } { incr i } { - set key key[format %08u $i] - - error_check_good db_put($i) [$db put $key $data] 0 - - if { $i % 5000 == 0 } { - set pct [expr 100 * $i / $nitems] - puts -nonewline "\b\b\b\b\b" - if { $pct >= 10 } { - if { $more_than_ten_already } { - puts -nonewline "\b" - } else { - set more_than_ten_already 1 - } - } - - puts -nonewline "$pct%..." - flush stdout - } - } - puts "\b\b\b\b\b\b100%..." - error_check_good db_close [$db close] 0 - - puts "\tBigfile.b: Verifying database..." - error_check_good verify \ - [verify_dir $testdir "\t\t" 0 0 1 50000000] 0 - - puts "\tBigfile.c: Grow database $growtms times by $growby items" - - for { set j 0 } { $j < $growtms } { incr j } { - set db [eval {berkdb_open} {-cachesize {0 50000000 0}} $dbname] - error_check_good db_open [is_valid_db $db] TRUE - puts -nonewline "\t\tBigfile.c.1: Adding $growby items..." - flush stdout - for { set i 0 } { $i < $growby } { incr i } { - set key key[format %08u $i].$j - error_check_good db_put($j.$i) [$db put $key $data] 0 - } - error_check_good db_close [$db close] 0 - puts "done." - - puts "\t\tBigfile.c.2: Verifying database..." - error_check_good verify($j) \ - [verify_dir $testdir "\t\t\t" 0 0 1 50000000] 0 - } -} diff --git a/storage/bdb/test/bigfile002.tcl b/storage/bdb/test/bigfile002.tcl deleted file mode 100644 index 6686f9ac627..00000000000 --- a/storage/bdb/test/bigfile002.tcl +++ /dev/null @@ -1,45 +0,0 @@ -# See the file LICENSE for redistribution information. -# -# Copyright (c) 2001-2004 -# Sleepycat Software. All rights reserved. -# -# $Id: bigfile002.tcl,v 11.9 2004/01/28 03:36:26 bostic Exp $ -# -# TEST bigfile002 -# TEST This one should be faster and not require so much disk space, -# TEST although it doesn't test as extensively. Create an mpool file -# TEST with 1K pages. Dirty page 6000000. Sync. -proc bigfile002 { args } { - source ./include.tcl - - puts -nonewline \ - "Bigfile002: Creating large, sparse file through mpool..." - flush stdout - - env_cleanup $testdir - - # Create env. - set env [berkdb_env -create -home $testdir] - error_check_good valid_env [is_valid_env $env] TRUE - - # Create the file. - set name big002.file - set file [$env mpool -create -pagesize 1024 $name] - - # Dirty page 6000000 - set pg [$file get -create 6000000] - error_check_good pg_init [$pg init A] 0 - error_check_good pg_set [$pg is_setto A] 1 - - # Put page back. - error_check_good pg_put [$pg put -dirty] 0 - - # Fsync. - error_check_good fsync [$file fsync] 0 - - puts "succeeded." - - # Close. - error_check_good fclose [$file close] 0 - error_check_good env_close [$env close] 0 -} diff --git a/storage/bdb/test/byteorder.tcl b/storage/bdb/test/byteorder.tcl deleted file mode 100644 index d94f5a0146a..00000000000 --- a/storage/bdb/test/byteorder.tcl +++ /dev/null @@ -1,34 +0,0 @@ -# See the file LICENSE for redistribution information. -# -# Copyright (c) 1996-2004 -# Sleepycat Software. All rights reserved. -# -# $Id: byteorder.tcl,v 11.16 2004/01/28 03:36:26 bostic Exp $ -# -# Byte Order Test -# Use existing tests and run with both byte orders. -proc byteorder { method {nentries 1000} } { - source ./include.tcl - puts "Byteorder: $method $nentries" - - eval {test001 $method $nentries 0 0 "001" -lorder 1234} - eval {verify_dir $testdir} - eval {test001 $method $nentries 0 0 "001" -lorder 4321} - eval {verify_dir $testdir} - eval {test003 $method -lorder 1234} - eval {verify_dir $testdir} - eval {test003 $method -lorder 4321} - eval {verify_dir $testdir} - eval {test010 $method $nentries 5 "010" -lorder 1234} - eval {verify_dir $testdir} - eval {test010 $method $nentries 5 "010" -lorder 4321} - eval {verify_dir $testdir} - eval {test011 $method $nentries 5 "011" -lorder 1234} - eval {verify_dir $testdir} - eval {test011 $method $nentries 5 "011" -lorder 4321} - eval {verify_dir $testdir} - eval {test018 $method $nentries -lorder 1234} - eval {verify_dir $testdir} - eval {test018 $method $nentries -lorder 4321} - eval {verify_dir $testdir} -} diff --git a/storage/bdb/test/conscript.tcl b/storage/bdb/test/conscript.tcl deleted file mode 100644 index 8740ae21425..00000000000 --- a/storage/bdb/test/conscript.tcl +++ /dev/null @@ -1,124 +0,0 @@ -# See the file LICENSE for redistribution information. -# -# Copyright (c) 1999-2004 -# Sleepycat Software. All rights reserved. -# -# $Id: conscript.tcl,v 11.25 2004/01/28 03:36:26 bostic Exp $ -# -# Script for DB_CONSUME test (test070.tcl). -# Usage: conscript dir file runtype nitems outputfile tnum args -# dir: DBHOME directory -# file: db file on which to operate -# runtype: PRODUCE, CONSUME or WAIT -- which am I? -# nitems: number of items to put or get -# outputfile: where to log consumer results -# tnum: test number - -proc consumescript_produce { db_cmd nitems tnum args } { - source ./include.tcl - global mydata - - set pid [pid] - puts "\tTest$tnum: Producer $pid starting, producing $nitems items." - - set db [eval $db_cmd] - error_check_good db_open:$pid [is_valid_db $db] TRUE - - set oret -1 - set ret 0 - for { set ndx 0 } { $ndx < $nitems } { incr ndx } { - set oret $ret - if { 0xffffffff > 0 && $oret > 0x7fffffff } { - incr oret [expr 0 - 0x100000000] - } - set ret [$db put -append [chop_data q $mydata]] - error_check_good db_put \ - [expr $oret > $ret ? \ - ($oret > 0x7fffffff && $ret < 0x7fffffff) : 1] 1 - - } - - set ret [catch {$db close} res] - error_check_good db_close:$pid $ret 0 - puts "\t\tTest$tnum: Producer $pid finished." -} - -proc consumescript_consume { db_cmd nitems tnum outputfile mode args } { - source ./include.tcl - global mydata - set pid [pid] - puts "\tTest$tnum: Consumer $pid starting, seeking $nitems items." - - set db [eval $db_cmd] - error_check_good db_open:$pid [is_valid_db $db] TRUE - - set oid [open $outputfile a] - - for { set ndx 0 } { $ndx < $nitems } { } { - set ret [$db get $mode] - if { [llength $ret] > 0 } { - error_check_good correct_data:$pid \ - [lindex [lindex $ret 0] 1] [pad_data q $mydata] - set rno [lindex [lindex $ret 0] 0] - puts $oid $rno - incr ndx - } else { - # No data to consume; wait. - } - } - - error_check_good output_close:$pid [close $oid] "" - - set ret [catch {$db close} res] - error_check_good db_close:$pid $ret 0 - puts "\t\tTest$tnum: Consumer $pid finished." -} - -source ./include.tcl -source $test_path/test.tcl - -# Verify usage -if { $argc < 6 } { - puts stderr "FAIL:[timestamp] Usage: $usage" - exit -} - -set usage "conscript.tcl dir file runtype nitems outputfile tnum" - -# Initialize arguments -set dir [lindex $argv 0] -set file [lindex $argv 1] -set runtype [lindex $argv 2] -set nitems [lindex $argv 3] -set outputfile [lindex $argv 4] -set tnum [lindex $argv 5] -# args is the string "{ -len 20 -pad 0}", so we need to extract the -# " -len 20 -pad 0" part. -set args [lindex [lrange $argv 6 end] 0] - -set mydata "consumer data" - -# Open env -set dbenv [berkdb_env -home $dir ] -error_check_good db_env_create [is_valid_env $dbenv] TRUE - -# Figure out db opening command. -set db_cmd [concat {berkdb_open -create -mode 0644 -queue -env}\ - $dbenv $args $file] - -# Invoke consumescript_produce or consumescript_consume based on $runtype -if { $runtype == "PRODUCE" } { - # Producers have nothing to log; make sure outputfile is null. - error_check_good no_producer_outputfile $outputfile "" - consumescript_produce $db_cmd $nitems $tnum $args -} elseif { $runtype == "CONSUME" } { - consumescript_consume $db_cmd $nitems $tnum $outputfile -consume $args -} elseif { $runtype == "WAIT" } { - consumescript_consume $db_cmd $nitems $tnum $outputfile -consume_wait \ - $args -} else { - error_check_good bad_args $runtype \ - "either PRODUCE, CONSUME, or WAIT" -} -error_check_good env_close [$dbenv close] 0 -exit diff --git a/storage/bdb/test/dbm.tcl b/storage/bdb/test/dbm.tcl deleted file mode 100644 index 49a0f3e13d1..00000000000 --- a/storage/bdb/test/dbm.tcl +++ /dev/null @@ -1,128 +0,0 @@ -# See the file LICENSE for redistribution information. -# -# Copyright (c) 1996-2004 -# Sleepycat Software. All rights reserved. -# -# $Id: dbm.tcl,v 11.17 2004/01/28 03:36:26 bostic Exp $ -# -# TEST dbm -# TEST Historic DBM interface test. Use the first 1000 entries from the -# TEST dictionary. Insert each with self as key and data; retrieve each. -# TEST After all are entered, retrieve all; compare output to original. -# TEST Then reopen the file, re-retrieve everything. Finally, delete -# TEST everything. -proc dbm { { nentries 1000 } } { - source ./include.tcl - - puts "DBM interfaces test: $nentries" - - # Create the database and open the dictionary - set testfile $testdir/dbmtest - set t1 $testdir/t1 - set t2 $testdir/t2 - set t3 $testdir/t3 - cleanup $testdir NULL - - error_check_good dbminit [berkdb dbminit $testfile] 0 - set did [open $dict] - - set flags "" - set txn "" - set count 0 - set skippednullkey 0 - - puts "\tDBM.a: put/get loop" - # Here is the loop where we put and get each key/data pair - while { [gets $did str] != -1 && $count < $nentries } { - # DBM can't handle zero-length keys - if { [string length $str] == 0 } { - set skippednullkey 1 - continue - } - - set ret [berkdb store $str $str] - error_check_good dbm_store $ret 0 - - set d [berkdb fetch $str] - error_check_good dbm_fetch $d $str - incr count - } - close $did - - # Now we will get each key from the DB and compare the results - # to the original. - puts "\tDBM.b: dump file" - set oid [open $t1 w] - for { set key [berkdb firstkey] } { $key != -1 } {\ - set key [berkdb nextkey $key] } { - puts $oid $key - set d [berkdb fetch $key] - error_check_good dbm_refetch $d $key - } - - # If we had to skip a zero-length key, juggle things to cover up - # this fact in the dump. - if { $skippednullkey == 1 } { - puts $oid "" - incr nentries 1 - } - - close $oid - - # Now compare the keys to see if they match the dictionary (or ints) - set q q - filehead $nentries $dict $t3 - filesort $t3 $t2 - filesort $t1 $t3 - - error_check_good DBM:diff($t3,$t2) \ - [filecmp $t3 $t2] 0 - - puts "\tDBM.c: close, open, and dump file" - - # Now, reopen the file and run the last test again. - error_check_good dbminit2 [berkdb dbminit $testfile] 0 - set oid [open $t1 w] - - for { set key [berkdb firstkey] } { $key != -1 } {\ - set key [berkdb nextkey $key] } { - puts $oid $key - set d [berkdb fetch $key] - error_check_good dbm_refetch $d $key - } - if { $skippednullkey == 1 } { - puts $oid "" - } - close $oid - - # Now compare the keys to see if they match the dictionary (or ints) - filesort $t1 $t3 - - error_check_good DBM:diff($t2,$t3) \ - [filecmp $t2 $t3] 0 - - # Now, reopen the file and delete each entry - puts "\tDBM.d: sequential scan and delete" - - error_check_good dbminit3 [berkdb dbminit $testfile] 0 - set oid [open $t1 w] - - for { set key [berkdb firstkey] } { $key != -1 } {\ - set key [berkdb nextkey $key] } { - puts $oid $key - set ret [berkdb delete $key] - error_check_good dbm_delete $ret 0 - } - if { $skippednullkey == 1 } { - puts $oid "" - } - close $oid - - # Now compare the keys to see if they match the dictionary (or ints) - filesort $t1 $t3 - - error_check_good DBM:diff($t2,$t3) \ - [filecmp $t2 $t3] 0 - - error_check_good "dbm_close" [berkdb dbmclose] 0 -} diff --git a/storage/bdb/test/dbscript.tcl b/storage/bdb/test/dbscript.tcl deleted file mode 100644 index 8ddcec09d35..00000000000 --- a/storage/bdb/test/dbscript.tcl +++ /dev/null @@ -1,357 +0,0 @@ -# See the file LICENSE for redistribution information. -# -# Copyright (c) 1996-2004 -# Sleepycat Software. All rights reserved. -# -# $Id: dbscript.tcl,v 11.16 2004/01/28 03:36:26 bostic Exp $ -# -# Random db tester. -# Usage: dbscript file numops min_del max_add key_avg data_avgdups -# method: method (we pass this in so that fixed-length records work) -# file: db file on which to operate -# numops: number of operations to do -# ncurs: number of cursors -# min_del: minimum number of keys before you disable deletes. -# max_add: maximum number of keys before you disable adds. -# key_avg: average key size -# data_avg: average data size -# dups: 1 indicates dups allowed, 0 indicates no dups -# errpct: What percent of operations should generate errors -# seed: Random number generator seed (-1 means use pid) - -source ./include.tcl -source $test_path/test.tcl -source $test_path/testutils.tcl - -set usage "dbscript file numops ncurs min_del max_add key_avg data_avg dups errpcnt" - -# Verify usage -if { $argc != 10 } { - puts stderr "FAIL:[timestamp] Usage: $usage" - exit -} - -# Initialize arguments -set method [lindex $argv 0] -set file [lindex $argv 1] -set numops [ lindex $argv 2 ] -set ncurs [ lindex $argv 3 ] -set min_del [ lindex $argv 4 ] -set max_add [ lindex $argv 5 ] -set key_avg [ lindex $argv 6 ] -set data_avg [ lindex $argv 7 ] -set dups [ lindex $argv 8 ] -set errpct [ lindex $argv 9 ] - -berkdb srand $rand_init - -puts "Beginning execution for [pid]" -puts "$file database" -puts "$numops Operations" -puts "$ncurs cursors" -puts "$min_del keys before deletes allowed" -puts "$max_add or fewer keys to add" -puts "$key_avg average key length" -puts "$data_avg average data length" -if { $dups != 1 } { - puts "No dups" -} else { - puts "Dups allowed" -} -puts "$errpct % Errors" - -flush stdout - -set db [berkdb_open $file] -set cerr [catch {error_check_good dbopen [is_substr $db db] 1} cret] -if {$cerr != 0} { - puts $cret - return -} -# set method [$db get_type] -set record_based [is_record_based $method] - -# Initialize globals including data -global nkeys -global l_keys -global a_keys - -set nkeys [db_init $db 1] -puts "Initial number of keys: $nkeys" - -set pflags "" -set gflags "" -set txn "" - -# Open the cursors -set curslist {} -for { set i 0 } { $i < $ncurs } { incr i } { - set dbc [$db cursor] - set cerr [catch {error_check_good dbopen [is_substr $dbc $db.c] 1} cret] - if {$cerr != 0} { - puts $cret - return - } - set cerr [catch {error_check_bad cursor_create $dbc NULL} cret] - if {$cerr != 0} { - puts $cret - return - } - lappend curslist $dbc - -} - -# On each iteration we're going to generate random keys and -# data. We'll select either a get/put/delete operation unless -# we have fewer than min_del keys in which case, delete is not -# an option or more than max_add in which case, add is not -# an option. The tcl global arrays a_keys and l_keys keep track -# of key-data pairs indexed by key and a list of keys, accessed -# by integer. -set adds 0 -set puts 0 -set gets 0 -set dels 0 -set bad_adds 0 -set bad_puts 0 -set bad_gets 0 -set bad_dels 0 - -for { set iter 0 } { $iter < $numops } { incr iter } { - set op [pick_op $min_del $max_add $nkeys] - set err [is_err $errpct] - - # The op0's indicate that there aren't any duplicates, so we - # exercise regular operations. If dups is 1, then we'll use - # cursor ops. - switch $op$dups$err { - add00 { - incr adds - - set k [random_data $key_avg 1 a_keys $record_based] - set data [random_data $data_avg 0 0] - set data [chop_data $method $data] - set ret [eval {$db put} $txn $pflags \ - {-nooverwrite $k $data}] - set cerr [catch {error_check_good put $ret 0} cret] - if {$cerr != 0} { - puts $cret - return - } - newpair $k [pad_data $method $data] - } - add01 { - incr bad_adds - set k [random_key] - set data [random_data $data_avg 0 0] - set data [chop_data $method $data] - set ret [eval {$db put} $txn $pflags \ - {-nooverwrite $k $data}] - set cerr [catch {error_check_good put $ret 0} cret] - if {$cerr != 0} { - puts $cret - return - } - # Error case so no change to data state - } - add10 { - incr adds - set dbcinfo [random_cursor $curslist] - set dbc [lindex $dbcinfo 0] - if { [berkdb random_int 1 2] == 1 } { - # Add a new key - set k [random_data $key_avg 1 a_keys \ - $record_based] - set data [random_data $data_avg 0 0] - set data [chop_data $method $data] - set ret [eval {$dbc put} $txn \ - {-keyfirst $k $data}] - newpair $k [pad_data $method $data] - } else { - # Add a new duplicate - set dbc [lindex $dbcinfo 0] - set k [lindex $dbcinfo 1] - set data [random_data $data_avg 0 0] - - set op [pick_cursput] - set data [chop_data $method $data] - set ret [eval {$dbc put} $txn {$op $k $data}] - adddup $k [lindex $dbcinfo 2] $data - } - } - add11 { - # TODO - incr bad_adds - set ret 1 - } - put00 { - incr puts - set k [random_key] - set data [random_data $data_avg 0 0] - set data [chop_data $method $data] - set ret [eval {$db put} $txn {$k $data}] - changepair $k [pad_data $method $data] - } - put01 { - incr bad_puts - set k [random_key] - set data [random_data $data_avg 0 0] - set data [chop_data $method $data] - set ret [eval {$db put} $txn $pflags \ - {-nooverwrite $k $data}] - set cerr [catch {error_check_good put $ret 0} cret] - if {$cerr != 0} { - puts $cret - return - } - # Error case so no change to data state - } - put10 { - incr puts - set dbcinfo [random_cursor $curslist] - set dbc [lindex $dbcinfo 0] - set k [lindex $dbcinfo 1] - set data [random_data $data_avg 0 0] - set data [chop_data $method $data] - - set ret [eval {$dbc put} $txn {-current $data}] - changedup $k [lindex $dbcinfo 2] $data - } - put11 { - incr bad_puts - set k [random_key] - set data [random_data $data_avg 0 0] - set data [chop_data $method $data] - set dbc [$db cursor] - set ret [eval {$dbc put} $txn {-current $data}] - set cerr [catch {error_check_good curs_close \ - [$dbc close] 0} cret] - if {$cerr != 0} { - puts $cret - return - } - # Error case so no change to data state - } - get00 { - incr gets - set k [random_key] - set val [eval {$db get} $txn {$k}] - set data [pad_data $method [lindex [lindex $val 0] 1]] - if { $data == $a_keys($k) } { - set ret 0 - } else { - set ret "FAIL: Error got |$data| expected |$a_keys($k)|" - } - # Get command requires no state change - } - get01 { - incr bad_gets - set k [random_data $key_avg 1 a_keys $record_based] - set ret [eval {$db get} $txn {$k}] - # Error case so no change to data state - } - get10 { - incr gets - set dbcinfo [random_cursor $curslist] - if { [llength $dbcinfo] == 3 } { - set ret 0 - else - set ret 0 - } - # Get command requires no state change - } - get11 { - incr bad_gets - set k [random_key] - set dbc [$db cursor] - if { [berkdb random_int 1 2] == 1 } { - set dir -next - } else { - set dir -prev - } - set ret [eval {$dbc get} $txn {-next $k}] - set cerr [catch {error_check_good curs_close \ - [$dbc close] 0} cret] - if {$cerr != 0} { - puts $cret - return - } - # Error and get case so no change to data state - } - del00 { - incr dels - set k [random_key] - set ret [eval {$db del} $txn {$k}] - rempair $k - } - del01 { - incr bad_dels - set k [random_data $key_avg 1 a_keys $record_based] - set ret [eval {$db del} $txn {$k}] - # Error case so no change to data state - } - del10 { - incr dels - set dbcinfo [random_cursor $curslist] - set dbc [lindex $dbcinfo 0] - set ret [eval {$dbc del} $txn] - remdup [lindex dbcinfo 1] [lindex dbcinfo 2] - } - del11 { - incr bad_dels - set c [$db cursor] - set ret [eval {$c del} $txn] - set cerr [catch {error_check_good curs_close \ - [$c close] 0} cret] - if {$cerr != 0} { - puts $cret - return - } - # Error case so no change to data state - } - } - if { $err == 1 } { - # Verify failure. - set cerr [catch {error_check_good $op$dups$err:$k \ - [is_substr Error $ret] 1} cret] - if {$cerr != 0} { - puts $cret - return - } - } else { - # Verify success - set cerr [catch {error_check_good $op$dups$err:$k $ret 0} cret] - if {$cerr != 0} { - puts $cret - return - } - } - - flush stdout -} - -# Close cursors and file -foreach i $curslist { - set r [$i close] - set cerr [catch {error_check_good cursor_close:$i $r 0} cret] - if {$cerr != 0} { - puts $cret - return - } -} - -set r [$db close] -set cerr [catch {error_check_good db_close:$db $r 0} cret] -if {$cerr != 0} { - puts $cret - return -} - -puts "[timestamp] [pid] Complete" -puts "Successful ops: $adds adds $gets gets $puts puts $dels dels" -puts "Error ops: $bad_adds adds $bad_gets gets $bad_puts puts $bad_dels dels" -flush stdout - -filecheck $file $txn - -exit diff --git a/storage/bdb/test/ddoyscript.tcl b/storage/bdb/test/ddoyscript.tcl deleted file mode 100644 index 30e6c34e0bc..00000000000 --- a/storage/bdb/test/ddoyscript.tcl +++ /dev/null @@ -1,172 +0,0 @@ -# See the file LICENSE for redistribution information. -# -# Copyright (c) 1996-2004 -# Sleepycat Software. All rights reserved. -# -# $Id: ddoyscript.tcl,v 11.8 2004/01/28 03:36:26 bostic Exp $ -# -# Deadlock detector script tester. -# Usage: ddoyscript dir lockerid numprocs -# dir: DBHOME directory -# lockerid: Lock id for this locker -# numprocs: Total number of processes running -# myid: id of this process -- -# the order that the processes are created is the same -# in which their lockerid's were allocated so we know -# that there is a locker age relationship that is isomorphic -# with the order releationship of myid's. - -source ./include.tcl -source $test_path/test.tcl -source $test_path/testutils.tcl - -set usage "ddoyscript dir lockerid numprocs oldoryoung" - -# Verify usage -if { $argc != 5 } { - puts stderr "FAIL:[timestamp] Usage: $usage" - exit -} - -# Initialize arguments -set dir [lindex $argv 0] -set lockerid [ lindex $argv 1 ] -set numprocs [ lindex $argv 2 ] -set old_or_young [lindex $argv 3] -set myid [lindex $argv 4] - -set myenv [berkdb_env -lock -home $dir -create -mode 0644] -error_check_bad lock_open $myenv NULL -error_check_good lock_open [is_substr $myenv "env"] 1 - -# There are two cases here -- oldest/youngest or a ring locker. - -if { $myid == 0 || $myid == [expr $numprocs - 1] } { - set waitobj NULL - set ret 0 - - if { $myid == 0 } { - set objid 2 - if { $old_or_young == "o" } { - set waitobj [expr $numprocs - 1] - } - } else { - if { $old_or_young == "y" } { - set waitobj 0 - } - set objid 4 - } - - # Acquire own read lock - if {[catch {$myenv lock_get read $lockerid $myid} selflock] != 0} { - puts $errorInfo - } else { - error_check_good selfget:$objid [is_substr $selflock $myenv] 1 - } - - # Acquire read lock - if {[catch {$myenv lock_get read $lockerid $objid} lock1] != 0} { - puts $errorInfo - } else { - error_check_good lockget:$objid [is_substr $lock1 $myenv] 1 - } - - tclsleep 10 - - if { $waitobj == "NULL" } { - # Sleep for a good long while - tclsleep 90 - } else { - # Acquire write lock - if {[catch {$myenv lock_get write $lockerid $waitobj} lock2] - != 0} { - puts $errorInfo - set ret ERROR - } else { - error_check_good lockget:$waitobj \ - [is_substr $lock2 $myenv] 1 - - # Now release it - if {[catch {$lock2 put} err] != 0} { - puts $errorInfo - set ret ERROR - } else { - error_check_good lockput:oy:$objid $err 0 - } - } - - } - - # Release self lock - if {[catch {$selflock put} err] != 0} { - puts $errorInfo - if { $ret == 0 } { - set ret ERROR - } - } else { - error_check_good selfput:oy:$myid $err 0 - if { $ret == 0 } { - set ret 1 - } - } - - # Release first lock - if {[catch {$lock1 put} err] != 0} { - puts $errorInfo - if { $ret == 0 } { - set ret ERROR - } - } else { - error_check_good lockput:oy:$objid $err 0 - if { $ret == 0 } { - set ret 1 - } - } - -} else { - # Make sure that we succeed if we're locking the same object as - # oldest or youngest. - if { [expr $myid % 2] == 0 } { - set mode read - } else { - set mode write - } - # Obtain first lock (should always succeed). - if {[catch {$myenv lock_get $mode $lockerid $myid} lock1] != 0} { - puts $errorInfo - } else { - error_check_good lockget:$myid [is_substr $lock1 $myenv] 1 - } - - tclsleep 30 - - set nextobj [expr $myid + 1] - if { $nextobj == [expr $numprocs - 1] } { - set nextobj 1 - } - - set ret 1 - if {[catch {$myenv lock_get write $lockerid $nextobj} lock2] != 0} { - if {[string match "*DEADLOCK*" $lock2] == 1} { - set ret DEADLOCK - } else { - set ret ERROR - } - } else { - error_check_good lockget:$nextobj [is_substr $lock2 $myenv] 1 - } - - # Now release the first lock - error_check_good lockput:$lock1 [$lock1 put] 0 - - if {$ret == 1} { - error_check_bad lockget:$nextobj $lock2 NULL - error_check_good lockget:$nextobj [is_substr $lock2 $myenv] 1 - error_check_good lockput:$lock2 [$lock2 put] 0 - } -} - -puts $ret -error_check_good lock_id_free [$myenv lock_id_free $lockerid] 0 -error_check_good envclose [$myenv close] 0 -exit diff --git a/storage/bdb/test/ddscript.tcl b/storage/bdb/test/ddscript.tcl deleted file mode 100644 index 173cb2a69b4..00000000000 --- a/storage/bdb/test/ddscript.tcl +++ /dev/null @@ -1,44 +0,0 @@ -# See the file LICENSE for redistribution information. -# -# Copyright (c) 1996-2004 -# Sleepycat Software. All rights reserved. -# -# $Id: ddscript.tcl,v 11.15 2004/03/18 20:58:14 carol Exp $ -# -# Deadlock detector script tester. -# Usage: ddscript dir test lockerid objid numprocs -# dir: DBHOME directory -# test: Which test to run -# lockerid: Lock id for this locker -# objid: Object id to lock. -# numprocs: Total number of processes running - -source ./include.tcl -source $test_path/test.tcl -source $test_path/testutils.tcl - -set usage "ddscript dir test lockerid objid numprocs" - -# Verify usage -if { $argc != 5 } { - puts stderr "FAIL:[timestamp] Usage: $usage" - exit -} - -# Initialize arguments -set dir [lindex $argv 0] -set test [ lindex $argv 1 ] -set lockerid [ lindex $argv 2 ] -set objid [ lindex $argv 3 ] -set numprocs [ lindex $argv 4 ] - -set myenv [berkdb_env -lock -home $dir -create -mode 0644 ] -error_check_bad lock_open $myenv NULL -error_check_good lock_open [is_substr $myenv "env"] 1 - -puts [eval $test $myenv $lockerid $objid $numprocs] - -error_check_good lock_id_free [$myenv lock_id_free $lockerid] 0 -error_check_good envclose [$myenv close] 0 - -exit diff --git a/storage/bdb/test/dead001.tcl b/storage/bdb/test/dead001.tcl deleted file mode 100644 index fca094bf17c..00000000000 --- a/storage/bdb/test/dead001.tcl +++ /dev/null @@ -1,88 +0,0 @@ -# See the file LICENSE for redistribution information. -# -# Copyright (c) 1996-2004 -# Sleepycat Software. All rights reserved. -# -# $Id: dead001.tcl,v 11.37 2004/01/28 03:36:26 bostic Exp $ -# -# TEST dead001 -# TEST Use two different configurations to test deadlock detection among a -# TEST variable number of processes. One configuration has the processes -# TEST deadlocked in a ring. The other has the processes all deadlocked on -# TEST a single resource. -proc dead001 { { procs "2 4 10" } {tests "ring clump" } \ - {timeout 0} {tnum "001"} } { - source ./include.tcl - global lock_curid - global lock_maxid - - puts "Dead$tnum: Deadlock detector tests" - - env_cleanup $testdir - - # Create the environment. - puts "\tDead$tnum.a: creating environment" - set env [berkdb_env -create \ - -mode 0644 -lock -lock_timeout $timeout -home $testdir] - error_check_good lock_env:open [is_valid_env $env] TRUE - - foreach t $tests { - foreach n $procs { - if {$timeout == 0 } { - set dpid [exec $util_path/db_deadlock -vw \ - -h $testdir >& $testdir/dd.out &] - } else { - set dpid [exec $util_path/db_deadlock -vw \ - -ae -h $testdir >& $testdir/dd.out &] - } - - sentinel_init - set pidlist "" - set ret [$env lock_id_set $lock_curid $lock_maxid] - error_check_good lock_id_set $ret 0 - - # Fire off the tests - puts "\tDead$tnum: $n procs of test $t" - for { set i 0 } { $i < $n } { incr i } { - set locker [$env lock_id] - puts "$tclsh_path $test_path/wrap.tcl \ - $testdir/dead$tnum.log.$i \ - ddscript.tcl $testdir $t $locker $i $n" - set p [exec $tclsh_path \ - $test_path/wrap.tcl \ - ddscript.tcl $testdir/dead$tnum.log.$i \ - $testdir $t $locker $i $n &] - lappend pidlist $p - } - watch_procs $pidlist 5 - - # Now check output - set dead 0 - set clean 0 - set other 0 - for { set i 0 } { $i < $n } { incr i } { - set did [open $testdir/dead$tnum.log.$i] - while { [gets $did val] != -1 } { - switch $val { - DEADLOCK { incr dead } - 1 { incr clean } - default { incr other } - } - } - close $did - } - tclkill $dpid - puts "\tDead$tnum: dead check..." - dead_check $t $n $timeout $dead $clean $other - } - } - - # Windows needs files closed before deleting files, so pause a little - tclsleep 3 - fileremove -f $testdir/dd.out - # Remove log files - for { set i 0 } { $i < $n } { incr i } { - fileremove -f $testdir/dead$tnum.log.$i - } - error_check_good lock_env:close [$env close] 0 -} diff --git a/storage/bdb/test/dead002.tcl b/storage/bdb/test/dead002.tcl deleted file mode 100644 index 7493216d22d..00000000000 --- a/storage/bdb/test/dead002.tcl +++ /dev/null @@ -1,82 +0,0 @@ -# See the file LICENSE for redistribution information. -# -# Copyright (c) 1996-2004 -# Sleepycat Software. All rights reserved. -# -# $Id: dead002.tcl,v 11.30 2004/07/07 17:05:55 carol Exp $ -# -# TEST dead002 -# TEST Same test as dead001, but use "detect on every collision" instead -# TEST of separate deadlock detector. -proc dead002 { { procs "2 4 10" } {tests "ring clump" } \ - {timeout 0} {tnum 002} } { - source ./include.tcl - - puts "Dead$tnum: Deadlock detector tests (detect on every collision)" - - env_cleanup $testdir - - # Create the environment. - puts "\tDead$tnum.a: creating environment" - set lmode "default" - if { $timeout != 0 } { - set lmode "expire" - } - set env [berkdb_env \ - -create -mode 0644 -home $testdir \ - -lock -lock_timeout $timeout -lock_detect $lmode] - error_check_good lock_env:open [is_valid_env $env] TRUE - - foreach t $tests { - foreach n $procs { - set pidlist "" - sentinel_init - - # Fire off the tests - puts "\tDead$tnum: $n procs of test $t" - for { set i 0 } { $i < $n } { incr i } { - set locker [$env lock_id] - puts "$tclsh_path $test_path/wrap.tcl \ - $testdir/dead$tnum.log.$i \ - ddscript.tcl $testdir $t $locker $i $n" - set p [exec $tclsh_path \ - $test_path/wrap.tcl \ - ddscript.tcl $testdir/dead$tnum.log.$i \ - $testdir $t $locker $i $n &] - lappend pidlist $p - # If we're running with timeouts, pause so that - # locks will have a chance to time out. - if { $timeout != 0 } { - tclsleep 2 - } - } - watch_procs $pidlist 5 - - # Now check output - set dead 0 - set clean 0 - set other 0 - for { set i 0 } { $i < $n } { incr i } { - set did [open $testdir/dead$tnum.log.$i] - while { [gets $did val] != -1 } { - switch $val { - DEADLOCK { incr dead } - 1 { incr clean } - default { incr other } - } - } - close $did - } - - puts "\tDead$tnum: dead check ..." - dead_check $t $n $timeout $dead $clean $other - } - } - - fileremove -f $testdir/dd.out - # Remove log files - for { set i 0 } { $i < $n } { incr i } { - fileremove -f $testdir/dead$tnum.log.$i - } - error_check_good lock_env:close [$env close] 0 -} diff --git a/storage/bdb/test/dead003.tcl b/storage/bdb/test/dead003.tcl deleted file mode 100644 index 2a74ce4237b..00000000000 --- a/storage/bdb/test/dead003.tcl +++ /dev/null @@ -1,99 +0,0 @@ -# See the file LICENSE for redistribution information. -# -# Copyright (c) 1996-2004 -# Sleepycat Software. All rights reserved. -# -# $Id: dead003.tcl,v 1.20 2004/01/28 03:36:26 bostic Exp $ -# -# TEST dead003 -# TEST -# TEST Same test as dead002, but explicitly specify DB_LOCK_OLDEST and -# TEST DB_LOCK_YOUNGEST. Verify the correct lock was aborted/granted. -proc dead003 { {procs "2 4 10"} {tests "ring clump"} {tnum "003"} } { - source ./include.tcl - global lock_curid - global lock_maxid - - set detects { oldest youngest } - puts "Dead$tnum: Deadlock detector tests: $detects" - - # Create the environment. - foreach d $detects { - env_cleanup $testdir - puts "\tDead$tnum.a: creating environment for $d" - set env [berkdb_env \ - -create -mode 0644 -home $testdir -lock -lock_detect $d] - error_check_good lock_env:open [is_valid_env $env] TRUE - - foreach t $tests { - foreach n $procs { - set pidlist "" - sentinel_init - set ret [$env lock_id_set \ - $lock_curid $lock_maxid] - error_check_good lock_id_set $ret 0 - - # Fire off the tests - puts "\tDead$tnum: $n procs of test $t" - for { set i 0 } { $i < $n } { incr i } { - set locker [$env lock_id] - puts "$tclsh_path\ - test_path/ddscript.tcl $testdir \ - $t $locker $i $n >& \ - $testdir/dead$tnum.log.$i" - set p [exec $tclsh_path \ - $test_path/wrap.tcl \ - ddscript.tcl \ - $testdir/dead$tnum.log.$i $testdir \ - $t $locker $i $n &] - lappend pidlist $p - } - watch_procs $pidlist 5 - - # Now check output - set dead 0 - set clean 0 - set other 0 - for { set i 0 } { $i < $n } { incr i } { - set did [open $testdir/dead$tnum.log.$i] - while { [gets $did val] != -1 } { - switch $val { - DEADLOCK { incr dead } - 1 { incr clean } - default { incr other } - } - } - close $did - } - puts "\tDead$tnum: dead check..." - dead_check $t $n 0 $dead $clean $other - # - # If we get here we know we have the - # correct number of dead/clean procs, as - # checked by dead_check above. Now verify - # that the right process was the one. - puts "\tDead$tnum: Verify $d locks were aborted" - set l "" - if { $d == "oldest" } { - set l [expr $n - 1] - } - if { $d == "youngest" } { - set l 0 - } - set did [open $testdir/dead$tnum.log.$l] - while { [gets $did val] != -1 } { - error_check_good check_abort \ - $val 1 - } - close $did - } - } - - fileremove -f $testdir/dd.out - # Remove log files - for { set i 0 } { $i < $n } { incr i } { - fileremove -f $testdir/dead$tnum.log.$i - } - error_check_good lock_env:close [$env close] 0 - } -} diff --git a/storage/bdb/test/dead004.tcl b/storage/bdb/test/dead004.tcl deleted file mode 100644 index 4f33dcd75a9..00000000000 --- a/storage/bdb/test/dead004.tcl +++ /dev/null @@ -1,108 +0,0 @@ -# See the file LICENSE for redistribution information. -# -# Copyright (c) 1996-2004 -# Sleepycat Software. All rights reserved. -# -# $Id: dead004.tcl,v 11.15 2004/01/28 03:36:27 bostic Exp $ -# -# Deadlock Test 4. -# This test is designed to make sure that we handle youngest and oldest -# deadlock detection even when the youngest and oldest transactions in the -# system are not involved in the deadlock (that is, we want to abort the -# youngest/oldest which is actually involved in the deadlock, not simply -# the youngest/oldest in the system). -# Since this is used for transaction systems, the locker ID is what we -# use to identify age (smaller number is older). -# -# The set up is that we have a total of 6 processes. The oldest (locker 0) -# and the youngest (locker 5) simply acquire a lock, hold it for a long time -# and then release it. The rest form a ring, obtaining lock N and requesting -# a lock on (N+1) mod 4. The deadlock detector ought to pick locker 1 or 4 -# to abort and not 0 or 5. - -proc dead004 { {tnum "004"} } { - source ./include.tcl - global lock_curid - global lock_maxid - - foreach a { o y } { - puts "Dead$tnum: Deadlock detector test -a $a" - env_cleanup $testdir - - # Create the environment. - puts "\tDead$tnum.a: creating environment" - set env [berkdb_env -create -mode 0644 -lock -home $testdir] - error_check_good lock_env:open [is_valid_env $env] TRUE - - set dpid [exec $util_path/db_deadlock -v -t 5 -a $a \ - -h $testdir >& $testdir/dd.out &] - - set procs 6 - - foreach n $procs { - - sentinel_init - set pidlist "" - set ret [$env lock_id_set $lock_curid $lock_maxid] - error_check_good lock_id_set $ret 0 - - # Fire off the tests - puts "\tDead$tnum: $n procs" - for { set i 0 } { $i < $n } { incr i } { - set locker [$env lock_id] - puts "$tclsh_path $test_path/wrap.tcl \ - $testdir/dead$tnum.log.$i \ - ddoyscript.tcl $testdir $locker $n $a $i" - set p [exec $tclsh_path \ - $test_path/wrap.tcl \ - ddoyscript.tcl $testdir/dead$tnum.log.$i \ - $testdir $locker $n $a $i &] - lappend pidlist $p - } - watch_procs $pidlist 5 - - } - # Now check output - set dead 0 - set clean 0 - set other 0 - for { set i 0 } { $i < $n } { incr i } { - set did [open $testdir/dead$tnum.log.$i] - while { [gets $did val] != -1 } { - switch $val { - DEADLOCK { incr dead } - 1 { incr clean } - default { incr other } - } - } - close $did - } - tclkill $dpid - - puts "\tDead$tnum: dead check..." - dead_check oldyoung $n 0 $dead $clean $other - - # Now verify that neither the oldest nor the - # youngest were the deadlock. - set did [open $testdir/dead$tnum.log.0] - error_check_bad file:young [gets $did val] -1 - error_check_good read:young $val 1 - close $did - - set did [open $testdir/dead$tnum.log.[expr $procs - 1]] - error_check_bad file:old [gets $did val] -1 - error_check_good read:old $val 1 - close $did - - # Windows needs files closed before deleting files, - # so pause a little - tclsleep 2 - fileremove -f $testdir/dd.out - - # Remove log files - for { set i 0 } { $i < $n } { incr i } { - fileremove -f $testdir/dead$tnum.log.$i - } - error_check_good lock_env:close [$env close] 0 - } -} diff --git a/storage/bdb/test/dead005.tcl b/storage/bdb/test/dead005.tcl deleted file mode 100644 index 78e9ce838d7..00000000000 --- a/storage/bdb/test/dead005.tcl +++ /dev/null @@ -1,89 +0,0 @@ -# See the file LICENSE for redistribution information. -# -# Copyright (c) 1996-2004 -# Sleepycat Software. All rights reserved. -# -# $Id: dead005.tcl,v 11.15 2004/03/17 15:17:17 bostic Exp $ -# -# Deadlock Test 5. -# Test out the minlocks, maxlocks, and minwrites options -# to the deadlock detector. -proc dead005 { { procs "4 6 10" } \ - {tests "maxlocks maxwrites minlocks minwrites" } { tnum "005" } } { - source ./include.tcl - - foreach t $tests { - puts "Dead$tnum.$t: deadlock detection tests" - env_cleanup $testdir - - # Create the environment. - set env [berkdb_env -create -mode 0644 -lock -home $testdir] - error_check_good lock_env:open [is_valid_env $env] TRUE - case $t { - maxlocks { set to m } - maxwrites { set to W } - minlocks { set to n } - minwrites { set to w } - } - foreach n $procs { - set dpid [exec $util_path/db_deadlock -vw -h $testdir \ - -a $to >& $testdir/dd.out &] - sentinel_init - set pidlist "" - - # Fire off the tests - puts "\tDead$tnum: $t test with $n procs" - for { set i 0 } { $i < $n } { incr i } { - set locker [$env lock_id] - puts "$tclsh_path $test_path/wrap.tcl \ - $testdir/dead$tnum.log.$i \ - ddscript.tcl $testdir $t $locker $i $n" - set p [exec $tclsh_path \ - $test_path/wrap.tcl \ - ddscript.tcl $testdir/dead$tnum.log.$i \ - $testdir $t $locker $i $n &] - lappend pidlist $p - } - watch_procs $pidlist 5 - - # Now check output - set dead 0 - set clean 0 - set other 0 - for { set i 0 } { $i < $n } { incr i } { - set did [open $testdir/dead$tnum.log.$i] - while { [gets $did val] != -1 } { - switch $val { - DEADLOCK { incr dead } - 1 { incr clean } - default { incr other } - } - } - close $did - } - tclkill $dpid - puts "\tDead$tnum: dead check..." - dead_check $t $n 0 $dead $clean $other - # Now verify that the correct participant - # got deadlocked. - switch $t { - maxlocks {set f [expr $n - 1]} - maxwrites {set f 2} - minlocks {set f 0} - minwrites {set f 1} - } - set did [open $testdir/dead$tnum.log.$f] - error_check_bad file:$t [gets $did val] -1 - error_check_good read($f):$t $val DEADLOCK - close $did - } - error_check_good lock_env:close [$env close] 0 - # Windows needs files closed before deleting them, so pause - tclsleep 2 - fileremove -f $testdir/dd.out - # Remove log files - for { set i 0 } { $i < $n } { incr i } { - fileremove -f $testdir/dead001.log.$i - } - } -} diff --git a/storage/bdb/test/dead006.tcl b/storage/bdb/test/dead006.tcl deleted file mode 100644 index 4d80af41894..00000000000 --- a/storage/bdb/test/dead006.tcl +++ /dev/null @@ -1,16 +0,0 @@ -# See the file LICENSE for redistribution information. -# -# Copyright (c) 1996-2004 -# Sleepycat Software. All rights reserved. -# -# $Id: dead006.tcl,v 1.6 2004/01/28 03:36:27 bostic Exp $ -# -# TEST dead006 -# TEST use timeouts rather than the normal dd algorithm. -proc dead006 { { procs "2 4 10" } {tests "ring clump" } \ - {timeout 1000} {tnum 006} } { - source ./include.tcl - - dead001 $procs $tests $timeout $tnum - dead002 $procs $tests $timeout $tnum -} diff --git a/storage/bdb/test/dead007.tcl b/storage/bdb/test/dead007.tcl deleted file mode 100644 index e9aefa9c0ba..00000000000 --- a/storage/bdb/test/dead007.tcl +++ /dev/null @@ -1,36 +0,0 @@ -# See the file LICENSE for redistribution information. -# -# Copyright (c) 1996-2004 -# Sleepycat Software. All rights reserved. -# -# $Id: dead007.tcl,v 1.6 2004/01/28 03:36:27 bostic Exp $ -# -# TEST dead007 -# TEST Tests for locker and txn id wraparound. -proc dead007 { {tnum "007"} } { - source ./include.tcl - global lock_curid - global lock_maxid - - set save_curid $lock_curid - set save_maxid $lock_maxid - puts "Dead$tnum.a -- wrap around" - set lock_curid [expr $lock_maxid - 2] - dead001 "2 10" "ring clump" "0" $tnum - ## Oldest/youngest breaks when the id wraps - # dead003 "4 10" - dead004 $tnum - - puts "Dead$tnum.b -- extend space" - set lock_maxid [expr $lock_maxid - 3] - set lock_curid [expr $lock_maxid - 1] - dead001 "4 10" "ring clump" "0" $tnum - ## Oldest/youngest breaks when the id wraps - # dead003 "10" - dead004 $tnum - - set lock_curid $save_curid - set lock_maxid $save_maxid - # Return the empty string so we don't return lock_maxid. - return "" -} diff --git a/storage/bdb/test/env001.tcl b/storage/bdb/test/env001.tcl deleted file mode 100644 index 4e2c070e579..00000000000 --- a/storage/bdb/test/env001.tcl +++ /dev/null @@ -1,154 +0,0 @@ -# See the file LICENSE for redistribution information. -# -# Copyright (c) 1999-2004 -# Sleepycat Software. All rights reserved. -# -# $Id: env001.tcl,v 11.28 2004/01/28 03:36:27 bostic Exp $ -# -# TEST env001 -# TEST Test of env remove interface (formerly env_remove). -proc env001 { } { - global errorInfo - global errorCode - - source ./include.tcl - - set testfile $testdir/env.db - set t1 $testdir/t1 - - puts "Env001: Test of environment remove interface." - env_cleanup $testdir - - # Try opening without Create flag should error - puts "\tEnv001.a: Open without create (should fail)." - catch {set env [berkdb_env_noerr -home $testdir]} ret - error_check_good env:fail [is_substr $ret "no such file"] 1 - - # Now try opening with create - puts "\tEnv001.b: Open with create." - set env [berkdb_env -create -mode 0644 -home $testdir] - error_check_bad env:$testdir $env NULL - error_check_good env:$testdir [is_substr $env "env"] 1 - - # Make sure that close works. - puts "\tEnv001.c: Verify close." - error_check_good env:close:$env [$env close] 0 - - # Make sure we can reopen -- this doesn't work on Windows - # because if there is only one opener, the region disappears - # when it is closed. We can't do a second opener, because - # that will fail on HP-UX. - puts "\tEnv001.d: Remove on closed environments." - if { $is_windows_test != 1 } { - puts "\t\tEnv001.d.1: Verify re-open." - set env [berkdb_env -home $testdir] - error_check_bad env:$testdir $env NULL - error_check_good env:$testdir [is_substr $env "env"] 1 - - # remove environment - puts "\t\tEnv001.d.2: Close environment." - error_check_good env:close [$env close] 0 - puts "\t\tEnv001.d.3: Try remove with force (should succeed)." - error_check_good \ - envremove [berkdb envremove -force -home $testdir] 0 - } - - if { $is_windows_test != 1 && $is_hp_test != 1 } { - puts "\tEnv001.e: Remove on open environments." - puts "\t\tEnv001.e.1: Env is open by single proc,\ - remove no force." - set env [berkdb_env -create -mode 0644 -home $testdir] - error_check_bad env:$testdir $env NULL - error_check_good env:$testdir [is_substr $env "env"] 1 - set stat [catch {berkdb envremove -home $testdir} ret] - error_check_good env:remove $stat 1 - error_check_good env:close [$env close] 0 - } - - puts \ - "\t\tEnv001.e.2: Env is open by single proc, remove with force." - # Now that envremove doesn't do a close, this won't work on Windows. - if { $is_windows_test != 1 && $is_hp_test != 1} { - set env [berkdb_env_noerr -create -mode 0644 -home $testdir] - error_check_bad env:$testdir $env NULL - error_check_good env:$testdir [is_substr $env "env"] 1 - set stat [catch {berkdb envremove -force -home $testdir} ret] - error_check_good env:remove(force) $ret 0 - # - # Even though the underlying env is gone, we need to close - # the handle. - # - set stat [catch {$env close} ret] - error_check_bad env:close_after_remove $stat 0 - error_check_good env:close_after_remove \ - [is_substr $ret "recovery"] 1 - } - - puts "\t\tEnv001.e.3: Env is open by 2 procs, remove no force." - # should fail - set env [berkdb_env -create -mode 0644 -home $testdir] - error_check_bad env:$testdir $env NULL - error_check_good env:$testdir [is_substr $env "env"] 1 - - set f1 [open |$tclsh_path r+] - puts $f1 "source $test_path/test.tcl" - - set remote_env [send_cmd $f1 "berkdb_env_noerr -home $testdir"] - error_check_good remote:env_open [is_valid_env $remote_env] TRUE - # First close our env, but leave remote open - error_check_good env:close [$env close] 0 - catch {berkdb envremove -home $testdir} ret - error_check_good envremove:2procs:noforce [is_substr $errorCode EBUSY] 1 - # - # even though it failed, $env is no longer valid, so remove it in - # the remote process - set remote_close [send_cmd $f1 "$remote_env close"] - error_check_good remote_close $remote_close 0 - - # exit remote process - set err [catch { close $f1 } result] - error_check_good close_remote_process $err 0 - - puts "\t\tEnv001.e.4: Env is open by 2 procs, remove with force." - # You cannot do this on windows because you can't remove files that - # are open, so we skip this test for Windows. On UNIX, it should - # succeed - if { $is_windows_test != 1 && $is_hp_test != 1 } { - set env [berkdb_env_noerr -create -mode 0644 -home $testdir] - error_check_bad env:$testdir $env NULL - error_check_good env:$testdir [is_substr $env "env"] 1 - set f1 [open |$tclsh_path r+] - puts $f1 "source $test_path/test.tcl" - - set remote_env [send_cmd $f1 "berkdb_env -home $testdir"] - error_check_good remote:env_open [is_valid_env $remote_env] TRUE - - catch {berkdb envremove -force -home $testdir} ret - error_check_good envremove:2procs:force $ret 0 - # - # We still need to close our handle. - # - set stat [catch {$env close} ret] - error_check_bad env:close_after_error $stat 0 - error_check_good env:close_after_error \ - [is_substr $ret recovery] 1 - - # Close down remote process - set err [catch { close $f1 } result] - error_check_good close_remote_process $err 0 - } - - # Try opening in a different dir - puts "\tEnv001.f: Try opening env in another directory." - if { [file exists $testdir/NEWDIR] != 1 } { - file mkdir $testdir/NEWDIR - } - set eflags "-create -home $testdir/NEWDIR -mode 0644" - set env [eval {berkdb_env} $eflags] - error_check_bad env:open $env NULL - error_check_good env:close [$env close] 0 - error_check_good berkdb:envremove \ - [berkdb envremove -home $testdir/NEWDIR] 0 - - puts "\tEnv001 complete." -} diff --git a/storage/bdb/test/env002.tcl b/storage/bdb/test/env002.tcl deleted file mode 100644 index 70f573c2980..00000000000 --- a/storage/bdb/test/env002.tcl +++ /dev/null @@ -1,156 +0,0 @@ -# See the file LICENSE for redistribution information. -# -# Copyright (c) 1999-2004 -# Sleepycat Software. All rights reserved. -# -# $Id: env002.tcl,v 11.17 2004/01/28 03:36:27 bostic Exp $ -# -# TEST env002 -# TEST Test of DB_LOG_DIR and env name resolution. -# TEST With an environment path specified using -home, and then again -# TEST with it specified by the environment variable DB_HOME: -# TEST 1) Make sure that the set_lg_dir option is respected -# TEST a) as a relative pathname. -# TEST b) as an absolute pathname. -# TEST 2) Make sure that the DB_LOG_DIR db_config argument is respected, -# TEST again as relative and absolute pathnames. -# TEST 3) Make sure that if -both- db_config and a file are present, -# TEST only the file is respected (see doc/env/naming.html). -proc env002 { } { - # env002 is essentially just a small driver that runs - # env002_body--formerly the entire test--twice; once, it - # supplies a "home" argument to use with environment opens, - # and the second time it sets DB_HOME instead. - # Note that env002_body itself calls env002_run_test to run - # the body of the actual test and check for the presence - # of logs. The nesting, I hope, makes this test's structure simpler. - - global env - source ./include.tcl - - puts "Env002: set_lg_dir test." - - puts "\tEnv002: Running with -home argument to berkdb_env." - env002_body "-home $testdir" - - puts "\tEnv002: Running with environment variable DB_HOME set." - set env(DB_HOME) $testdir - env002_body "-use_environ" - - unset env(DB_HOME) - - puts "\tEnv002: Running with both DB_HOME and -home set." - # Should respect -only- -home, so we give it a bogus - # environment variable setting. - set env(DB_HOME) $testdir/bogus_home - env002_body "-use_environ -home $testdir" - unset env(DB_HOME) - -} - -proc env002_body { home_arg } { - source ./include.tcl - - env_cleanup $testdir - set logdir "logs_in_here" - - file mkdir $testdir/$logdir - - # Set up full path to $logdir for when we test absolute paths. - set curdir [pwd] - cd $testdir/$logdir - set fulllogdir [pwd] - cd $curdir - - env002_make_config $logdir - - # Run the meat of the test. - env002_run_test a 1 "relative path, config file" $home_arg \ - $testdir/$logdir - - env_cleanup $testdir - - file mkdir $fulllogdir - env002_make_config $fulllogdir - - # Run the test again - env002_run_test a 2 "absolute path, config file" $home_arg \ - $fulllogdir - - env_cleanup $testdir - - # Now we try without a config file, but instead with db_config - # relative paths - file mkdir $testdir/$logdir - env002_run_test b 1 "relative path, db_config" "$home_arg \ - -log_dir $logdir -data_dir ." \ - $testdir/$logdir - - env_cleanup $testdir - - # absolute - file mkdir $fulllogdir - env002_run_test b 2 "absolute path, db_config" "$home_arg \ - -log_dir $fulllogdir -data_dir ." \ - $fulllogdir - - env_cleanup $testdir - - # Now, set db_config -and- have a # DB_CONFIG file, and make - # sure only the latter is honored. - - file mkdir $testdir/$logdir - env002_make_config $logdir - - # note that we supply a -nonexistent- log dir to db_config - env002_run_test c 1 "relative path, both db_config and file" \ - "$home_arg -log_dir $testdir/bogus \ - -data_dir ." $testdir/$logdir - env_cleanup $testdir - - file mkdir $fulllogdir - env002_make_config $fulllogdir - - # note that we supply a -nonexistent- log dir to db_config - env002_run_test c 2 "relative path, both db_config and file" \ - "$home_arg -log_dir $fulllogdir/bogus \ - -data_dir ." $fulllogdir -} - -proc env002_run_test { major minor msg env_args log_path} { - global testdir - set testfile "env002.db" - - puts "\t\tEnv002.$major.$minor: $msg" - - # Create an environment, with logging, and scribble some - # stuff in a [btree] database in it. - # puts [concat {berkdb_env -create -log -private} $env_args] - set dbenv [eval {berkdb_env -create -log -private} $env_args] - error_check_good env_open [is_valid_env $dbenv] TRUE - set db [berkdb_open -env $dbenv -create -btree -mode 0644 $testfile] - error_check_good db_open [is_valid_db $db] TRUE - - set key "some_key" - set data "some_data" - - error_check_good db_put \ - [$db put $key [chop_data btree $data]] 0 - - error_check_good db_close [$db close] 0 - error_check_good env_close [$dbenv close] 0 - - # Now make sure the log file is where we want it to be. - error_check_good db_exists [file exists $testdir/$testfile] 1 - error_check_good log_exists \ - [file exists $log_path/log.0000000001] 1 -} - -proc env002_make_config { logdir } { - global testdir - - set cid [open $testdir/DB_CONFIG w] - puts $cid "set_data_dir ." - puts $cid "set_lg_dir $logdir" - close $cid -} diff --git a/storage/bdb/test/env003.tcl b/storage/bdb/test/env003.tcl deleted file mode 100644 index 247fcd3c9a8..00000000000 --- a/storage/bdb/test/env003.tcl +++ /dev/null @@ -1,149 +0,0 @@ -# See the file LICENSE for redistribution information. -# -# Copyright (c) 1999-2004 -# Sleepycat Software. All rights reserved. -# -# $Id: env003.tcl,v 11.23 2004/01/28 03:36:27 bostic Exp $ -# -# TEST env003 -# TEST Test DB_TMP_DIR and env name resolution -# TEST With an environment path specified using -home, and then again -# TEST with it specified by the environment variable DB_HOME: -# TEST 1) Make sure that the DB_TMP_DIR config file option is respected -# TEST a) as a relative pathname. -# TEST b) as an absolute pathname. -# TEST 2) Make sure that the -tmp_dir config option is respected, -# TEST again as relative and absolute pathnames. -# TEST 3) Make sure that if -both- -tmp_dir and a file are present, -# TEST only the file is respected (see doc/env/naming.html). -proc env003 { } { - # env003 is essentially just a small driver that runs - # env003_body twice. First, it supplies a "home" argument - # to use with environment opens, and the second time it sets - # DB_HOME instead. - # Note that env003_body itself calls env003_run_test to run - # the body of the actual test. - - global env - source ./include.tcl - - puts "Env003: DB_TMP_DIR test." - - puts "\tEnv003: Running with -home argument to berkdb_env." - env003_body "-home $testdir" - - puts "\tEnv003: Running with environment variable DB_HOME set." - set env(DB_HOME) $testdir - env003_body "-use_environ" - - unset env(DB_HOME) - - puts "\tEnv003: Running with both DB_HOME and -home set." - # Should respect -only- -home, so we give it a bogus - # environment variable setting. - set env(DB_HOME) $testdir/bogus_home - env003_body "-use_environ -home $testdir" - unset env(DB_HOME) -} - -proc env003_body { home_arg } { - source ./include.tcl - - env_cleanup $testdir - set tmpdir "tmpfiles_in_here" - file mkdir $testdir/$tmpdir - - # Set up full path to $tmpdir for when we test absolute paths. - set curdir [pwd] - cd $testdir/$tmpdir - set fulltmpdir [pwd] - cd $curdir - - # Create DB_CONFIG - env003_make_config $tmpdir - - # Run the meat of the test. - env003_run_test a 1 "relative path, config file" $home_arg \ - $testdir/$tmpdir - - env003_make_config $fulltmpdir - - # Run the test again - env003_run_test a 2 "absolute path, config file" $home_arg \ - $fulltmpdir - - # Now we try without a config file, but instead with db_config - # relative paths - env003_run_test b 1 "relative path, db_config" "$home_arg \ - -tmp_dir $tmpdir -data_dir ." \ - $testdir/$tmpdir - - # absolute paths - env003_run_test b 2 "absolute path, db_config" "$home_arg \ - -tmp_dir $fulltmpdir -data_dir ." \ - $fulltmpdir - - # Now, set db_config -and- have a # DB_CONFIG file, and make - # sure only the latter is honored. - - file mkdir $testdir/bogus - env003_make_config $tmpdir - - env003_run_test c 1 "relative path, both db_config and file" \ - "$home_arg -tmp_dir $testdir/bogus -data_dir ." \ - $testdir/$tmpdir - - file mkdir $fulltmpdir/bogus - env003_make_config $fulltmpdir - - env003_run_test c 2 "absolute path, both db_config and file" \ - "$home_arg -tmp_dir $fulltmpdir/bogus -data_dir ." \ - $fulltmpdir -} - -proc env003_run_test { major minor msg env_args tmp_path} { - global testdir - global alphabet - global errorCode - - puts "\t\tEnv003.$major.$minor: $msg" - - # Create an environment and small-cached in-memory database to - # use. - set dbenv [eval {berkdb_env -create -home $testdir} $env_args \ - {-cachesize {0 50000 1}}] - error_check_good env_open [is_valid_env $dbenv] TRUE - - set db [berkdb_open -env $dbenv -create -btree] - error_check_good db_open [is_valid_db $db] TRUE - - # Fill the database with more than its cache can fit. - # - # When CONFIG_TEST is defined, the tempfile is left linked so - # we can check for its existence. Size the data to overfill - # the cache--the temp file is created lazily, so it is created - # when the cache overflows. - # - set key "key" - set data [repeat $alphabet 2000] - error_check_good db_put [$db put $key $data] 0 - - # Check for exactly one temp file. - set ret [glob -nocomplain $tmp_path/BDB*] - error_check_good temp_file_exists [llength $ret] 1 - - # Can't remove temp file until db is closed on Windows. - error_check_good db_close [$db close] 0 - fileremove -f $ret - error_check_good env_close [$dbenv close] 0 - -} - -proc env003_make_config { tmpdir } { - global testdir - - set cid [open $testdir/DB_CONFIG w] - puts $cid "set_data_dir ." - puts $cid "set_tmp_dir $tmpdir" - close $cid -} diff --git a/storage/bdb/test/env004.tcl b/storage/bdb/test/env004.tcl deleted file mode 100644 index fe975d700fd..00000000000 --- a/storage/bdb/test/env004.tcl +++ /dev/null @@ -1,95 +0,0 @@ -# See the file LICENSE for redistribution information. -# -# Copyright (c) 1996-2004 -# Sleepycat Software. All rights reserved. -# -# $Id: env004.tcl,v 11.22 2004/04/23 15:40:12 sue Exp $ -# -# TEST env004 -# TEST Test multiple data directories. Do a bunch of different opens -# TEST to make sure that the files are detected in different directories. -proc env004 { } { - source ./include.tcl - - set method "hash" - set omethod [convert_method $method] - set args [convert_args $method ""] - - puts "Env004: Multiple data directory test." - - env_cleanup $testdir - file mkdir $testdir/data1 - file mkdir $testdir/data2 - file mkdir $testdir/data3 - - puts "\tEnv004.a: Multiple data directories in DB_CONFIG file" - - # Create a config file - set cid [open $testdir/DB_CONFIG w] - puts $cid "set_data_dir ." - puts $cid "set_data_dir data1" - puts $cid "set_data_dir data2" - puts $cid "set_data_dir data3" - close $cid - - set e [berkdb_env -create -private -home $testdir] - error_check_good dbenv [is_valid_env $e] TRUE - ddir_test $method $e $args - error_check_good env_close [$e close] 0 - - puts "\tEnv004.b: Multiple data directories in berkdb_env call." - env_cleanup $testdir - file mkdir $testdir/data1 - file mkdir $testdir/data2 - file mkdir $testdir/data3 - - # Now call dbenv with config specified - set e [berkdb_env -create -private \ - -data_dir . -data_dir data1 -data_dir data2 \ - -data_dir data3 -home $testdir] - error_check_good dbenv [is_valid_env $e] TRUE - ddir_test $method $e $args - error_check_good env_close [$e close] 0 -} - -proc ddir_test { method e args } { - source ./include.tcl - - set args [convert_args $args] - set omethod [convert_method $method] - - # Now create one file in each directory - set db1 [eval {berkdb_open -create \ - -truncate -mode 0644 $omethod -env $e} $args {data1/datafile1.db}] - error_check_good dbopen1 [is_valid_db $db1] TRUE - - set db2 [eval {berkdb_open -create \ - -truncate -mode 0644 $omethod -env $e} $args {data2/datafile2.db}] - error_check_good dbopen2 [is_valid_db $db2] TRUE - - set db3 [eval {berkdb_open -create \ - -truncate -mode 0644 $omethod -env $e} $args {data3/datafile3.db}] - error_check_good dbopen3 [is_valid_db $db3] TRUE - - # Close the files - error_check_good db_close1 [$db1 close] 0 - error_check_good db_close2 [$db2 close] 0 - error_check_good db_close3 [$db3 close] 0 - - # Now, reopen the files without complete pathnames and make - # sure that we find them. - - set db1 [berkdb_open -env $e datafile1.db] - error_check_good dbopen1 [is_valid_db $db1] TRUE - - set db2 [berkdb_open -env $e datafile2.db] - error_check_good dbopen2 [is_valid_db $db2] TRUE - - set db3 [berkdb_open -env $e datafile3.db] - error_check_good dbopen3 [is_valid_db $db3] TRUE - - # Finally close all the files - error_check_good db_close1 [$db1 close] 0 - error_check_good db_close2 [$db2 close] 0 - error_check_good db_close3 [$db3 close] 0 -} diff --git a/storage/bdb/test/env005.tcl b/storage/bdb/test/env005.tcl deleted file mode 100644 index fc08bc97fa1..00000000000 --- a/storage/bdb/test/env005.tcl +++ /dev/null @@ -1,52 +0,0 @@ -# See the file LICENSE for redistribution information. -# -# Copyright (c) 1999-2004 -# Sleepycat Software. All rights reserved. -# -# $Id: env005.tcl,v 11.20 2004/01/28 03:36:27 bostic Exp $ -# -# TEST env005 -# TEST Test that using subsystems without initializing them correctly -# TEST returns an error. Cannot test mpool, because it is assumed in -# TEST the Tcl code. -proc env005 { } { - source ./include.tcl - - puts "Env005: Uninitialized env subsystems test." - - env_cleanup $testdir - puts "\tEnv005.a: Creating env with no subsystems." - set e [berkdb_env_noerr -create -home $testdir] - error_check_good dbenv [is_valid_env $e] TRUE - set db [berkdb_open -create -btree $testdir/env005.db] - error_check_good dbopen [is_valid_db $db] TRUE - - set rlist { - { "lock_detect" "Env005.b0"} - { "lock_get read 1 1" "Env005.b1"} - { "lock_id" "Env005.b2"} - { "lock_stat" "Env005.b3"} - { "lock_timeout 100" "Env005.b4"} - { "log_archive" "Env005.c0"} - { "log_cursor" "Env005.c1"} - { "log_file {1 1}" "Env005.c2"} - { "log_flush" "Env005.c3"} - { "log_put record" "Env005.c4"} - { "log_stat" "Env005.c5"} - { "txn" "Env005.d0"} - { "txn_checkpoint" "Env005.d1"} - { "txn_stat" "Env005.d2"} - { "txn_timeout 100" "Env005.d3"} - } - - foreach pair $rlist { - set cmd [lindex $pair 0] - set msg [lindex $pair 1] - puts "\t$msg: $cmd" - set stat [catch {eval $e $cmd} ret] - error_check_good $cmd $stat 1 - error_check_good $cmd.err [is_substr $ret invalid] 1 - } - error_check_good dbclose [$db close] 0 - error_check_good envclose [$e close] 0 -} diff --git a/storage/bdb/test/env006.tcl b/storage/bdb/test/env006.tcl deleted file mode 100644 index 9f220335749..00000000000 --- a/storage/bdb/test/env006.tcl +++ /dev/null @@ -1,91 +0,0 @@ -# See the file LICENSE for redistribution information. -# -# Copyright (c) 1999-2004 -# Sleepycat Software. All rights reserved. -# -# $Id: env006.tcl,v 11.11 2004/04/27 19:56:44 carol Exp $ -# -# TEST env006 -# TEST Make sure that all the utilities exist and run. -# TEST Test that db_load -r options don't blow up. -proc env006 { } { - source ./include.tcl - - puts "Env006: Run underlying utilities." - - set rlist { - { "db_archive" "Env006.a"} - { "db_checkpoint" "Env006.b"} - { "db_deadlock" "Env006.c"} - { "db_dump" "Env006.d"} - { "db_load" "Env006.e"} - { "db_printlog" "Env006.f"} - { "db_recover" "Env006.g"} - { "db_stat" "Env006.h"} - { "db_upgrade" "Env006.h"} - { "db_verify" "Env006.h"} - } - foreach pair $rlist { - set cmd [lindex $pair 0] - set msg [lindex $pair 1] - - puts "\t$msg: $cmd" - - set stat [catch {exec $util_path/$cmd -?} ret] - error_check_good $cmd $stat 1 - - # - # Check for "usage", but only check "sage" so that - # we can handle either Usage or usage. - # - error_check_good $cmd.err [is_substr $ret sage] 1 - } - - env_cleanup $testdir - set env [eval berkdb_env -create -home $testdir -txn] - error_check_good env_open [is_valid_env $env] TRUE - - set sub SUBDB - foreach case { noenv env } { - if { $case == "env" } { - set envargs " -env $env " - set homeargs " -h $testdir " - set testfile env006.db - } else { - set envargs "" - set homeargs "" - set testfile $testdir/env006.db - } - - puts "\tEnv006.i: Testing db_load -r with $case." - set db [eval berkdb_open -create $envargs -btree $testfile] - error_check_good db_open [is_valid_db $db] TRUE - error_check_good db_close [$db close] 0 - - set ret [eval \ - exec $util_path/db_load -r lsn $homeargs $testfile] - error_check_good db_load_r_lsn $ret "" - set ret [eval \ - exec $util_path/db_load -r fileid $homeargs $testfile] - error_check_good db_load_r_fileid $ret "" - - error_check_good db_remove \ - [eval {berkdb dbremove} $envargs $testfile] 0 - - puts "\tEnv006.j: Testing db_load -r with $case and subdbs." - set db [eval berkdb_open -create $envargs -btree $testfile $sub] - error_check_good db_open [is_valid_db $db] TRUE - error_check_good db_close [$db close] 0 - - set ret [eval \ - exec {$util_path/db_load} -r lsn $homeargs $testfile] - error_check_good db_load_r_lsn $ret "" - set ret [eval \ - exec {$util_path/db_load} -r fileid $homeargs $testfile] - error_check_good db_load_r_fileid $ret "" - - error_check_good \ - db_remove [eval {berkdb dbremove} $envargs $testfile] 0 - } - error_check_good env_close [$env close] 0 -} diff --git a/storage/bdb/test/env007.tcl b/storage/bdb/test/env007.tcl deleted file mode 100644 index efbb17d55c7..00000000000 --- a/storage/bdb/test/env007.tcl +++ /dev/null @@ -1,605 +0,0 @@ -# See the file LICENSE for redistribution information. -# -# Copyright (c) 1999-2004 -# Sleepycat Software. All rights reserved. -# -# $Id: env007.tcl,v 11.41 2004/09/22 18:01:04 bostic Exp $ -# -# TEST env007 -# TEST Test DB_CONFIG config file options for berkdb env. -# TEST 1) Make sure command line option is respected -# TEST 2) Make sure that config file option is respected -# TEST 3) Make sure that if -both- DB_CONFIG and the set_ -# TEST method is used, only the file is respected. -# TEST Then test all known config options. -# TEST Also test config options on berkdb open. This isn't -# TEST really env testing, but there's no better place to put it. -proc env007 { } { - global errorInfo - global passwd - global has_crypto - source ./include.tcl - - puts "Env007: DB_CONFIG and getters test." - puts "Env007.a: Test berkdb env options using getters and stat." - - # Set up options we can check via stat or getters. Structure - # of the list is: - # 0. Arg used in berkdb env command - # 1. Arg used in DB_CONFIG file - # 2. Value assigned in berkdb env command - # 3. Value assigned in DB_CONFIG file - # 4. Message output during test - # 5. Stat command to run (empty if we can't get the info - # from stat). - # 6. String to search for in stat output - # 7. Which arg to check in stat (needed for cases where - # we set more than one args at a time, but stat can - # only check one args, like cachesize) - # 8. Arg used in getter - # - set rlist { - { " -txn_max " "set_tx_max" "19" "31" - "Env007.a1: Txn Max" "txn_stat" - "Max Txns" "0" "get_tx_max" } - { " -lock_max_locks " "set_lk_max_locks" "17" "29" - "Env007.a2: Lock Max" "lock_stat" - "Maximum locks" "0" "get_lk_max_locks" } - { " -lock_max_lockers " "set_lk_max_lockers" "1500" "2000" - "Env007.a3: Max Lockers" "lock_stat" - "Maximum lockers" "0" "get_lk_max_lockers" } - { " -lock_max_objects " "set_lk_max_objects" "1500" "2000" - "Env007.a4: Max Objects" "lock_stat" - "Maximum objects" "0" "get_lk_max_objects" } - { " -log_buffer " "set_lg_bsize" "65536" "131072" - "Env007.a5: Log Bsize" "log_stat" - "Log record cache size" "0" "get_lg_bsize" } - { " -log_max " "set_lg_max" "8388608" "9437184" - "Env007.a6: Log Max" "log_stat" - "Current log file size" "0" "get_lg_max" } - { " -cachesize " "set_cachesize" "0 536870912 1" "1 0 1" - "Env007.a7.0: Cachesize" "mpool_stat" - "Cache size (gbytes)" "0" "get_cachesize" } - { " -cachesize " "set_cachesize" "0 536870912 1" "1 0 1" - "Env007.a7.1: Cachesize" "mpool_stat" - "Cache size (bytes)" "1" "get_cachesize" } - { " -cachesize " "set_cachesize" "0 536870912 1" "1 0 1" - "Env007.a7.2: Cachesize" "mpool_stat" - "Number of caches" "2" "get_cachesize" } - { " -lock_timeout " "set_lock_timeout" "100" "120" - "Env007.a8: Lock Timeout" "lock_stat" - "Lock timeout value" "0" "get_timeout lock" } - { " -log_regionmax " "set_lg_regionmax" "8388608" "4194304" - "Env007.a9: Log Regionmax" "log_stat" - "Region size" "0" "get_lg_regionmax" } - { " -mpool_max_openfd " "set_mp_max_openfd" "17" "27" - "Env007.a10: Mmap max openfd" "mpool_stat" - "Maximum open file descriptors" "0" "get_mp_max_openfd" } - { " -mpool_max_write " "set_mp_max_write" "37 47" "57 67" - "Env007.a11.0: Mmap max write" "mpool_stat" - "Maximum sequential buffer writes" "0" "get_mp_max_write" } - { " -mpool_max_write " "set_mp_max_write" "37 47" "57 67" - "Env007.a11.1: Mmap max write" "mpool_stat" - "Sleep after writing maximum buffers" "1" "get_mp_max_write" } - { " -mpool_mmap_size " "set_mp_mmapsize" "12582912" "8388608" - "Env007.a12: Mmapsize" "mpool_stat" - "Maximum memory-mapped file size" "0" "get_mp_mmapsize" } - { " -shm_key " "set_shm_key" "15" "35" - "Env007.a13: Shm Key" "" - "" "" "get_shm_key" } - { " -tmp_dir " "set_tmp_dir" "." "./TEMPDIR" - "Env007.a14: Temp dir" "" - "" "" "get_tmp_dir" } - { " -txn_timeout " "set_txn_timeout" "100" "120" - "Env007.a15: Txn timeout" "lock_stat" - "Transaction timeout value" "0" "get_timeout txn" } - } - - set e "berkdb_env_noerr -create -mode 0644 -home $testdir -txn " - set qnxexclude {set_cachesize} - foreach item $rlist { - set envarg [lindex $item 0] - set configarg [lindex $item 1] - set envval [lindex $item 2] - set configval [lindex $item 3] - set msg [lindex $item 4] - set statcmd [lindex $item 5] - set statstr [lindex $item 6] - set index [lindex $item 7] - set getter [lindex $item 8] - - if { $is_qnx_test && - [lsearch $qnxexclude $configarg] != -1 } { - puts "\tEnv007.a: Skipping $configarg for QNX" - continue - } - - env_cleanup $testdir - - # First verify using just env args - puts "\t$msg Environment argument only" - set env [eval $e $envarg {$envval}] - error_check_good envopen:0 [is_valid_env $env] TRUE - error_check_good get_envval [eval $env $getter] $envval - if { $statcmd != "" } { - set statenvval [lindex $envval $index] - # log_stat reports the sum of the specified - # region size and the log buffer size. - if { $statstr == "Region size" } { - set lbufsize 32768 - set statenvval [expr $statenvval + $lbufsize] - } - env007_check $env $statcmd $statstr $statenvval - } - error_check_good envclose:0 [$env close] 0 - - env_cleanup $testdir - env007_make_config $configarg $configval - - # Verify using just config file - puts "\t$msg Config file only" - set env [eval $e] - error_check_good envopen:1 [is_valid_env $env] TRUE - error_check_good get_configval1 [eval $env $getter] $configval - if { $statcmd != "" } { - set statconfigval [lindex $configval $index] - if { $statstr == "Region size" } { - set statconfigval [expr $statconfigval + $lbufsize] - } - env007_check $env $statcmd $statstr $statconfigval - } - error_check_good envclose:1 [$env close] 0 - - # Now verify using env args and config args - puts "\t$msg Environment arg and config file" - set env [eval $e $envarg {$envval}] - error_check_good envopen:2 [is_valid_env $env] TRUE - # Getter should retrieve config val, not envval. - error_check_good get_configval2 [eval $env $getter] $configval - if { $statcmd != "" } { - env007_check $env $statcmd $statstr $statconfigval - } - error_check_good envclose:2 [$env close] 0 - } - - # - # Test all options that can be set in DB_CONFIG. Write it out - # to the file and make sure we can open the env. This execs - # the config file code. Also check with a getter that the - # expected value is returned. - # - puts "\tEnv007.b: Test berkdb env config options using getters\ - and env open." - - # The cfglist variable contains options that can be set in DB_CONFIG. - set cfglist { - { "set_data_dir" "." "get_data_dirs" "." } - { "set_flags" "db_auto_commit" "get_flags" "-auto_commit" } - { "set_flags" "db_cdb_alldb" "get_flags" "-cdb_alldb" } - { "set_flags" "db_direct_db" "get_flags" "-direct_db" } - { "set_flags" "db_direct_log" "get_flags" "-direct_log" } - { "set_flags" "db_dsync_log" "get_flags" "-dsync_log" } - { "set_flags" "db_log_autoremove" "get_flags" "-log_remove" } - { "set_flags" "db_nolocking" "get_flags" "-nolock" } - { "set_flags" "db_nommap" "get_flags" "-nommap" } - { "set_flags" "db_nopanic" "get_flags" "-nopanic" } - { "set_flags" "db_overwrite" "get_flags" "-overwrite" } - { "set_flags" "db_region_init" "get_flags" "-region_init" } - { "set_flags" "db_txn_nosync" "get_flags" "-nosync" } - { "set_flags" "db_txn_write_nosync" "get_flags" "-wrnosync" } - { "set_flags" "db_yieldcpu" "get_flags" "-yield" } - { "set_lg_bsize" "65536" "get_lg_bsize" "65536" } - { "set_lg_dir" "." "get_lg_dir" "." } - { "set_lg_max" "8388608" "get_lg_max" "8388608" } - { "set_lg_regionmax" "65536" "get_lg_regionmax" "65536" } - { "set_lk_detect" "db_lock_default" "get_lk_detect" "default" } - { "set_lk_detect" "db_lock_expire" "get_lk_detect" "expire" } - { "set_lk_detect" "db_lock_maxlocks" "get_lk_detect" "maxlocks" } - { "set_lk_detect" "db_lock_minlocks" "get_lk_detect" "minlocks" } - { "set_lk_detect" "db_lock_minwrite" "get_lk_detect" "minwrite" } - { "set_lk_detect" "db_lock_oldest" "get_lk_detect" "oldest" } - { "set_lk_detect" "db_lock_random" "get_lk_detect" "random" } - { "set_lk_detect" "db_lock_youngest" "get_lk_detect" "youngest" } - { "set_lk_max_lockers" "1500" "get_lk_max_lockers" "1500" } - { "set_lk_max_locks" "29" "get_lk_max_locks" "29" } - { "set_lk_max_objects" "1500" "get_lk_max_objects" "1500" } - { "set_lock_timeout" "100" "get_timeout lock" "100" } - { "set_mp_mmapsize" "12582912" "get_mp_mmapsize" "12582912" } - { "set_mp_max_write" "10 20" "get_mp_max_write" "10 20" } - { "set_mp_max_openfd" "10" "get_mp_max_openfd" "10" } - { "set_region_init" "1" "get_flags" "-region_init" } - { "set_shm_key" "15" "get_shm_key" "15" } - { "set_tas_spins" "15" "get_tas_spins" "15" } - { "set_tmp_dir" "." "get_tmp_dir" "." } - { "set_tx_max" "31" "get_tx_max" "31" } - { "set_txn_timeout" "50" "get_timeout txn" "50" } - { "set_verbose" "db_verb_deadlock" "get_verbose deadlock" "on" } - { "set_verbose" "db_verb_recovery" "get_verbose recovery" "on" } - { "set_verbose" "db_verb_replication" "get_verbose rep" "on" } - { "set_verbose" "db_verb_waitsfor" "get_verbose wait" "on" } - } - - env_cleanup $testdir - set e "berkdb_env_noerr -create -mode 0644 -home $testdir -txn" - set directlist {db_direct_db db_direct_log} - foreach item $cfglist { - env_cleanup $testdir - set configarg [lindex $item 0] - set configval [lindex $item 1] - set getter [lindex $item 2] - set getval [lindex $item 3] - - env007_make_config $configarg $configval - - # Verify using config file - puts "\t\tEnv007.b: $configarg $configval" - - # Unconfigured/unsupported direct I/O is not reported - # as a failure. - set directmsg \ - "direct I/O either not configured or not supported" - if {[catch { eval $e } env ]} { - if { [lsearch $directlist $configval] != -1 && \ - [is_substr $env $directmsg] == 1 } { - continue - } else { - puts "FAIL: $env" - continue - } - } - error_check_good envvalid:1 [is_valid_env $env] TRUE - error_check_good getter:1 [eval $env $getter] $getval - error_check_good envclose:1 [$env close] 0 - } - - puts "\tEnv007.c: Test berkdb env options using getters and env open." - # The envopenlist variable contains options that can be set using - # berkdb env. We always set -mpool. - set envopenlist { - { "-cdb" "" "-cdb" "get_open_flags" } - { "-errpfx" "FOO" "FOO" "get_errpfx" } - { "-lock" "" "-lock" "get_open_flags" } - { "-log" "" "-log" "get_open_flags" } - { "" "" "-mpool" "get_open_flags" } - { "-system_mem" "-shm_key 1" "-system_mem" "get_open_flags" } - { "-txn" "" "-txn" "get_open_flags" } - { "-recover" "-txn" "-recover" "get_open_flags" } - { "-recover_fatal" "-txn" "-recover_fatal" "get_open_flags" } - { "-use_environ" "" "-use_environ" "get_open_flags" } - { "-use_environ_root" "" "-use_environ_root" "get_open_flags" } - { "" "" "-create" "get_open_flags" } - { "-private" "" "-private" "get_open_flags" } - { "-thread" "" "-thread" "get_open_flags" } - { "-txn_timestamp" "100000000" "100000000" "get_tx_timestamp" } - } - - if { $has_crypto == 1 } { - lappend envopenlist \ - { "-encryptaes" "$passwd" "-encryptaes" "get_encrypt_flags" } - } - - set e "berkdb_env -create -mode 0644 -home $testdir" - set qnxexclude {-system_mem} - foreach item $envopenlist { - set envarg [lindex $item 0] - set envval [lindex $item 1] - set retval [lindex $item 2] - set getter [lindex $item 3] - - if { $is_qnx_test && - [lsearch $qnxexclude $envarg] != -1} { - puts "\t\tEnv007: Skipping $envarg for QNX" - continue - } - env_cleanup $testdir - # Set up env - set env [eval $e $envarg $envval] - error_check_good envopen [is_valid_env $env] TRUE - - # Check that getter retrieves expected retval. - set get_retval [eval $env $getter] - if { [is_substr $get_retval $retval] != 1 } { - puts "FAIL: $retval\ - should be a substring of $get_retval" - continue - } - error_check_good envclose [$env close] 0 - - # The -encryptany flag can only be tested on an existing - # environment that supports encryption, so do it here. - if { $has_crypto == 1 } { - if { $envarg == "-encryptaes" } { - set env [eval berkdb_env -home $testdir\ - -encryptany $passwd] - error_check_good get_encryptany \ - [eval $env get_encrypt_flags] "-encryptaes" - error_check_good env_close [$env close] 0 - } - } - } - - puts "\tEnv007.d: Test berkdb env options using set_flags and getters." - - # The flaglist variable contains options that can be set using - # $env set_flags. - set flaglist { - { "-direct_db" } - { "-direct_log" } - { "-dsync_log" } - { "-log_remove" } - { "-nolock" } - { "-nommap" } - { "-nopanic" } - { "-nosync" } - { "-overwrite" } - { "-panic" } - { "-wrnosync" } - } - set e "berkdb_env_noerr -create -mode 0644 -home $testdir" - set directlist {-direct_db -direct_log} - foreach item $flaglist { - set flag [lindex $item 0] - env_cleanup $testdir - - # Set up env - set env [eval $e] - error_check_good envopen [is_valid_env $env] TRUE - - # Use set_flags to turn on new env characteristics. - # - # Unconfigured/unsupported direct I/O is not reported - # as a failure. - if {[catch { $env set_flags $flag on } res ]} { - if { [lsearch $directlist $flag] != -1 && \ - [is_substr $res $directmsg] == 1 } { - error_check_good env_close [$env close] 0 - continue - } else { - puts "FAIL: $res" - error_check_good env_close [$env close] 0 - continue - } - } else { - error_check_good "flag $flag on" $res 0 - } - - # Check that getter retrieves expected retval. - set get_retval [eval $env get_flags] - if { [is_substr $get_retval $flag] != 1 } { - puts "FAIL: $flag should be a substring of $get_retval" - error_check_good env_close [$env close] 0 - continue - } - # Use set_flags to turn off env characteristics, make sure - # they are gone. - error_check_good "flag $flag off" [$env set_flags $flag off] 0 - set get_retval [eval $env get_flags] - if { [is_substr $get_retval $flag] == 1 } { - puts "FAIL: $flag should not be in $get_retval" - error_check_good env_close [$env close] 0 - continue - } - - error_check_good envclose [$env close] 0 - } - - puts "\tEnv007.e: Test env get_home." - env_cleanup $testdir - # Set up env - set env [eval $e] - error_check_good env_open [is_valid_env $env] TRUE - # Test for correct value. - set get_retval [eval $env get_home] - error_check_good get_home $get_retval $testdir - error_check_good envclose [$env close] 0 - - puts "\tEnv007.f: Test that bad config values are rejected." - set cfglist { - { "set_cachesize" "1048576" } - { "set_flags" "db_xxx" } - { "set_flags" "1" } - { "set_flags" "db_txn_nosync x" } - { "set_lg_bsize" "db_xxx" } - { "set_lg_max" "db_xxx" } - { "set_lg_regionmax" "db_xxx" } - { "set_lk_detect" "db_xxx" } - { "set_lk_detect" "1" } - { "set_lk_detect" "db_lock_youngest x" } - { "set_lk_max" "db_xxx" } - { "set_lk_max_locks" "db_xxx" } - { "set_lk_max_lockers" "db_xxx" } - { "set_lk_max_objects" "db_xxx" } - { "set_mp_max_openfd" "1 2" } - { "set_mp_max_write" "1 2 3" } - { "set_mp_mmapsize" "db_xxx" } - { "set_region_init" "db_xxx" } - { "set_shm_key" "db_xxx" } - { "set_tas_spins" "db_xxx" } - { "set_tx_max" "db_xxx" } - { "set_verbose" "db_xxx" } - { "set_verbose" "1" } - { "set_verbose" "db_verb_recovery x" } - } - - set e "berkdb_env_noerr -create -mode 0644 \ - -home $testdir -log -lock -txn " - foreach item $cfglist { - set configarg [lindex $item 0] - set configval [lindex $item 1] - - env007_make_config $configarg $configval - - # verify using just config file - set stat [catch {eval $e} ret] - error_check_good envopen $stat 1 - error_check_good error [is_substr $errorInfo \ - "incorrect arguments for name-value pair"] 1 - } - - puts "\tEnv007.g: Config name error set_xxx" - set e "berkdb_env_noerr -create -mode 0644 \ - -home $testdir -log -lock -txn " - env007_make_config "set_xxx" 1 - set stat [catch {eval $e} ret] - error_check_good envopen $stat 1 - error_check_good error [is_substr $errorInfo \ - "unrecognized name-value pair"] 1 - - puts "\tEnv007.h: Test berkdb open flags and getters." - # Check options that we configure with berkdb open and - # query via getters. Structure of the list is: - # 0. Flag used in berkdb open command - # 1. Value specified to flag - # 2. Specific method, if needed - # 3. Arg used in getter - - set olist { - { "-minkey" "4" " -btree " "get_bt_minkey" } - { "-cachesize" "0 1048576 1" "" "get_cachesize" } - { "" "FILENAME DBNAME" "" "get_dbname" } - { "" "" "" "get_env" } - { "-errpfx" "ERROR:" "" "get_errpfx" } - { "" "-chksum" "" "get_flags" } - { "-delim" "58" "-recno" "get_re_delim" } - { "" "-dup" "" "get_flags" } - { "" "-dup -dupsort" "" "get_flags" } - { "" "-recnum" "" "get_flags" } - { "" "-revsplitoff" "" "get_flags" } - { "" "-renumber" "-recno" "get_flags" } - { "" "-snapshot" "-recno" "get_flags" } - { "" "-create" "" "get_open_flags" } - { "" "-create -dirty" "" "get_open_flags" } - { "" "-create -excl" "" "get_open_flags" } - { "" "-create -nommap" "" "get_open_flags" } - { "" "-create -thread" "" "get_open_flags" } - { "" "-create -truncate" "" "get_open_flags" } - { "-ffactor" "40" " -hash " "get_h_ffactor" } - { "-lorder" "4321" "" "get_lorder" } - { "-nelem" "10000" " -hash " "get_h_nelem" } - { "-pagesize" "4096" "" "get_pagesize" } - { "-extent" "4" "-queue" "get_q_extentsize" } - { "-len" "20" "-recno" "get_re_len" } - { "-pad" "0" "-recno" "get_re_pad" } - { "-source" "include.tcl" "-recno" "get_re_source" } - } - - set o "berkdb_open -create -mode 0644" - foreach item $olist { - cleanup $testdir NULL - set flag [lindex $item 0] - set flagval [lindex $item 1] - set method [lindex $item 2] - if { $method == "" } { - set method " -btree " - } - set getter [lindex $item 3] - - # Check that open is successful with the flag. - # The option -cachesize requires grouping for $flagval. - if { $flag == "-cachesize" } { - set db [eval $o $method $flag {$flagval}\ - $testdir/a.db] - } else { - set db [eval $o $method $flag $flagval\ - $testdir/a.db] - } - error_check_good dbopen:0 [is_valid_db $db] TRUE - - # Check that getter retrieves the correct value. - # Cachesizes under 500MB are adjusted upward to - # about 25% so just make sure we're in the right - # ballpark, between 1.2 and 1.3 of the original value. - if { $flag == "-cachesize" } { - set retval [eval $db $getter] - set retbytes [lindex $retval 1] - set setbytes [lindex $flagval 1] - error_check_good cachesize_low\ - [expr $retbytes > [expr $setbytes * 6 / 5]] 1 - error_check_good cachesize_high\ - [expr $retbytes < [expr $setbytes * 13 / 10]] 1 - } else { - error_check_good get_flagval \ - [eval $db $getter] $flagval - } - error_check_good dbclose:0 [$db close] 0 - } - - puts "\tEnv007.i: Test berkdb_open -rdonly." - # This test is done separately because -rdonly can only be specified - # on an already existing database. - set flag "-rdonly" - set db [eval berkdb_open $flag $testdir/a.db] - error_check_good open_rdonly [is_valid_db $db] TRUE - - error_check_good get_rdonly [eval $db get_open_flags] $flag - error_check_good dbclose:0 [$db close] 0 - - puts "\tEnv007.j: Test berkdb open flags and getters\ - requiring environments." - # Check options that we configure with berkdb open and - # query via getters. Structure of the list is: - # 0. Flag used in berkdb open command - # 1. Value specified to flag - # 2. Specific method, if needed - # 3. Arg used in getter - # 4. Additional flags needed in setting up env - - set elist { - { "" "-auto_commit" "" "get_open_flags" "" } - } - - if { $has_crypto == 1 } { - lappend elist \ - { "" "-encrypt" "" "get_flags" "-encryptaes $passwd" } - } - - set e "berkdb_env -create -home $testdir -txn " - set o "berkdb_open -create -btree -mode 0644 " - foreach item $elist { - env_cleanup $testdir - set flag [lindex $item 0] - set flagval [lindex $item 1] - set method [lindex $item 2] - if { $method == "" } { - set method " -btree " - } - set getter [lindex $item 3] - set envflag [lindex $item 4] - - # Check that open is successful with the flag. - set env [eval $e $envflag] - set db [eval $o -env $env $flag $flagval a.db] - error_check_good dbopen:0 [is_valid_db $db] TRUE - - # Check that getter retrieves the correct value - set get_flagval [eval $db $getter] - error_check_good get_flagval [is_substr $get_flagval $flagval] 1 - error_check_good dbclose [$db close] 0 - error_check_good envclose [$env close] 0 - } -} - -proc env007_check { env statcmd statstr testval } { - set stat [$env $statcmd] - set checked 0 - foreach statpair $stat { - if {$checked == 1} { - break - } - set statmsg [lindex $statpair 0] - set statval [lindex $statpair 1] - if {[is_substr $statmsg $statstr] != 0} { - set checked 1 - error_check_good $statstr:ck $statval $testval - } - } - error_check_good $statstr:test $checked 1 -} - -proc env007_make_config { carg cval } { - global testdir - - set cid [open $testdir/DB_CONFIG w] - puts $cid "$carg $cval" - close $cid -} diff --git a/storage/bdb/test/env008.tcl b/storage/bdb/test/env008.tcl deleted file mode 100644 index c203d55d4dd..00000000000 --- a/storage/bdb/test/env008.tcl +++ /dev/null @@ -1,73 +0,0 @@ -# See the file LICENSE for redistribution information. -# -# Copyright (c) 1999-2004 -# Sleepycat Software. All rights reserved. -# -# $Id: env008.tcl,v 11.8 2004/01/28 03:36:27 bostic Exp $ -# -# TEST env008 -# TEST Test environments and subdirectories. -proc env008 { } { - global errorInfo - global errorCode - - source ./include.tcl - - env_cleanup $testdir - - set subdir 1/1 - set subdir1 1/2 - file mkdir $testdir/$subdir $testdir/$subdir1 - set testfile $subdir/env.db - - puts "Env008: Test of environments and subdirectories." - - puts "\tEnv008.a: Create env and db." - set env [berkdb_env -create -mode 0644 -home $testdir -txn] - error_check_good env [is_valid_env $env] TRUE - - puts "\tEnv008.b: Remove db in subdir." - env008_db $env $testfile - error_check_good dbremove:$testfile \ - [berkdb dbremove -env $env $testfile] 0 - - # - # Rather than remaking the db every time for the renames - # just move around the new file name to another new file - # name. - # - puts "\tEnv008.c: Rename db in subdir." - env008_db $env $testfile - set newfile $subdir/new.db - error_check_good dbrename:$testfile/.. \ - [berkdb dbrename -env $env $testfile $newfile] 0 - set testfile $newfile - - puts "\tEnv008.d: Rename db to parent dir." - set newfile $subdir/../new.db - error_check_good dbrename:$testfile/.. \ - [berkdb dbrename -env $env $testfile $newfile] 0 - set testfile $newfile - - puts "\tEnv008.e: Rename db to child dir." - set newfile $subdir/env.db - error_check_good dbrename:$testfile/.. \ - [berkdb dbrename -env $env $testfile $newfile] 0 - set testfile $newfile - - puts "\tEnv008.f: Rename db to another dir." - set newfile $subdir1/env.db - error_check_good dbrename:$testfile/.. \ - [berkdb dbrename -env $env $testfile $newfile] 0 - - error_check_good envclose [$env close] 0 - puts "\tEnv008 complete." -} - -proc env008_db { env testfile } { - set db [berkdb_open -env $env -create -btree $testfile] - error_check_good dbopen [is_valid_db $db] TRUE - set ret [$db put key data] - error_check_good dbput $ret 0 - error_check_good dbclose [$db close] 0 -} diff --git a/storage/bdb/test/env009.tcl b/storage/bdb/test/env009.tcl deleted file mode 100644 index e6fd3a56373..00000000000 --- a/storage/bdb/test/env009.tcl +++ /dev/null @@ -1,82 +0,0 @@ -# See the file LICENSE for redistribution information. -# -# Copyright (c) 1999-2004 -# Sleepycat Software. All rights reserved. -# -# $Id: env009.tcl,v 11.9 2004/09/23 21:45:21 mjc Exp $ -# -# TEST env009 -# TEST Test calls to all the various stat functions. We have several -# TEST sprinkled throughout the test suite, but this will ensure that -# TEST we run all of them at least once. -proc env009 { } { - source ./include.tcl - - puts "Env009: Various stat functions test." - - env_cleanup $testdir - puts "\tEnv009.a: Setting up env and a database." - - set e [berkdb_env -create -home $testdir -txn] - error_check_good dbenv [is_valid_env $e] TRUE - set dbbt [berkdb_open -create -btree $testdir/env009bt.db] - error_check_good dbopen [is_valid_db $dbbt] TRUE - set dbh [berkdb_open -create -hash $testdir/env009h.db] - error_check_good dbopen [is_valid_db $dbh] TRUE - set dbq [berkdb_open -create -queue $testdir/env009q.db] - error_check_good dbopen [is_valid_db $dbq] TRUE - - puts "\tEnv009.b: Setting up replication master and client envs." - replsetup $testdir/MSGQUEUEDIR - set masterdir $testdir/MASTERDIR - set clientdir $testdir/CLIENTDIR - file mkdir $masterdir - file mkdir $clientdir - - repladd 1 - set repenv(M) [berkdb_env -create -home $masterdir \ - -txn -rep_master -rep_transport [list 1 replsend]] - repladd 2 - set repenv(C) [berkdb_env -create -home $clientdir \ - -txn -rep_client -rep_transport [list 2 replsend]] - - set rlist { - { "lock_stat" "Maximum locks" "Env009.c" $e } - { "log_stat" "Magic" "Env009.d" "$e" } - { "mpool_stat" "Number of caches" "Env009.e" "$e"} - { "txn_stat" "Max Txns" "Env009.f" "$e" } - { "rep_stat" "{Environment ID} 1" "Env009.g (Master)" "$repenv(M)"} - { "rep_stat" "{Environment ID} 2" "Env009.h (Client)" "$repenv(C)"} - } - - foreach set $rlist { - set cmd [lindex $set 0] - set str [lindex $set 1] - set msg [lindex $set 2] - set env [lindex $set 3] - puts "\t$msg: $cmd" - set ret [eval $env $cmd] - error_check_good $cmd [is_substr $ret $str] 1 - } - - puts "\tEnv009.i: btree stats" - set ret [$dbbt stat] - error_check_good $cmd [is_substr $ret "Leaf pages"] 1 - - puts "\tEnv009.j: hash stats" - set ret [$dbh stat] - error_check_good $cmd [is_substr $ret "Buckets"] 1 - - puts "\tEnv009.k: queue stats" - set ret [$dbq stat] - error_check_good $cmd [is_substr $ret "Extent size"] 1 - - # Clean up. - error_check_good dbclose [$dbbt close] 0 - error_check_good dbclose [$dbh close] 0 - error_check_good dbclose [$dbq close] 0 - error_check_good masterenvclose [$repenv(M) close] 0 - error_check_good clientenvclose [$repenv(C) close] 0 - replclose $testdir/MSGQUEUEDIR - error_check_good envclose [$e close] 0 -} diff --git a/storage/bdb/test/env010.tcl b/storage/bdb/test/env010.tcl deleted file mode 100644 index 403f0cd9dd6..00000000000 --- a/storage/bdb/test/env010.tcl +++ /dev/null @@ -1,50 +0,0 @@ -# See the file LICENSE for redistribution information. -# -# Copyright (c) 1999-2004 -# Sleepycat Software. All rights reserved. -# -# $Id: env010.tcl,v 1.7 2004/01/28 03:36:27 bostic Exp $ -# -# TEST env010 -# TEST Run recovery in an empty directory, and then make sure we can still -# TEST create a database in that directory. -proc env010 { } { - source ./include.tcl - - puts "Env010: Test of recovery in an empty directory." - - # Create a new directory used only for this test - - if { [file exists $testdir/EMPTYDIR] != 1 } { - file mkdir $testdir/EMPTYDIR - } else { - puts "\nDirectory already exists." - } - - # Do the test twice, for regular recovery and catastrophic - # Open environment and recover, but don't create a database - - foreach rmethod {recover recover_fatal} { - - puts "\tEnv010: Creating env for $rmethod test." - env_cleanup $testdir/EMPTYDIR - set e [berkdb_env \ - -create -home $testdir/EMPTYDIR -txn -$rmethod] - error_check_good dbenv [is_valid_env $e] TRUE - - # Open and close a database - # The method doesn't matter, so picked btree arbitrarily - - set db [eval {berkdb_open -env $e \ - -btree -create -mode 0644} ] - error_check_good dbopen [is_valid_db $db] TRUE - error_check_good db_close [$db close] 0 - - # Close environment - - error_check_good envclose [$e close] 0 - error_check_good berkdb:envremove \ - [berkdb envremove -home $testdir/EMPTYDIR] 0 - } - puts "\tEnv010 complete." -} diff --git a/storage/bdb/test/env011.tcl b/storage/bdb/test/env011.tcl deleted file mode 100644 index de6c82b3670..00000000000 --- a/storage/bdb/test/env011.tcl +++ /dev/null @@ -1,39 +0,0 @@ -# See the file LICENSE for redistribution information. -# -# Copyright (c) 1999-2004 -# Sleepycat Software. All rights reserved. -# -# $Id: env011.tcl,v 1.4 2004/01/28 03:36:27 bostic Exp $ -# -# TEST env011 -# TEST Run with region overwrite flag. -proc env011 { } { - source ./include.tcl - - puts "Env011: Test of region overwriting." - env_cleanup $testdir - - puts "\tEnv011: Creating/closing env for open test." - set e [berkdb_env -create -overwrite -home $testdir -txn] - error_check_good dbenv [is_valid_env $e] TRUE - set db [eval \ - {berkdb_open -auto_commit -env $e -btree -create -mode 0644} ] - error_check_good dbopen [is_valid_db $db] TRUE - set ret [eval {$db put} -auto_commit "aaa" "data"] - error_check_good put $ret 0 - set ret [eval {$db put} -auto_commit "bbb" "data"] - error_check_good put $ret 0 - error_check_good db_close [$db close] 0 - error_check_good envclose [$e close] 0 - - puts "\tEnv011: Opening the environment with overwrite set." - set e [berkdb_env -create -overwrite -home $testdir -txn -recover] - error_check_good dbenv [is_valid_env $e] TRUE - error_check_good envclose [$e close] 0 - - puts "\tEnv011: Removing the environment with overwrite set." - error_check_good berkdb:envremove \ - [berkdb envremove -home $testdir -overwrite] 0 - - puts "\tEnv011 complete." -} diff --git a/storage/bdb/test/hsearch.tcl b/storage/bdb/test/hsearch.tcl deleted file mode 100644 index f2d223264d4..00000000000 --- a/storage/bdb/test/hsearch.tcl +++ /dev/null @@ -1,51 +0,0 @@ -# See the file LICENSE for redistribution information. -# -# Copyright (c) 1996-2004 -# Sleepycat Software. All rights reserved. -# -# $Id: hsearch.tcl,v 11.11 2004/01/28 03:36:28 bostic Exp $ -# -# Historic Hsearch interface test. -# Use the first 1000 entries from the dictionary. -# Insert each with self as key and data; retrieve each. -# After all are entered, retrieve all; compare output to original. -# Then reopen the file, re-retrieve everything. -# Finally, delete everything. -proc hsearch { { nentries 1000 } } { - source ./include.tcl - - puts "HSEARCH interfaces test: $nentries" - - # Create the database and open the dictionary - set t1 $testdir/t1 - set t2 $testdir/t2 - set t3 $testdir/t3 - cleanup $testdir NULL - - error_check_good hcreate [berkdb hcreate $nentries] 0 - set did [open $dict] - set count 0 - - puts "\tHSEARCH.a: put/get loop" - # Here is the loop where we put and get each key/data pair - while { [gets $did str] != -1 && $count < $nentries } { - set ret [berkdb hsearch $str $str enter] - error_check_good hsearch:enter $ret 0 - - set d [berkdb hsearch $str 0 find] - error_check_good hsearch:find $d $str - incr count - } - close $did - - puts "\tHSEARCH.b: re-get loop" - set did [open $dict] - # Here is the loop where we retrieve each key - while { [gets $did str] != -1 && $count < $nentries } { - set d [berkdb hsearch $str 0 find] - error_check_good hsearch:find $d $str - incr count - } - close $did - error_check_good hdestroy [berkdb hdestroy] 0 -} diff --git a/storage/bdb/test/join.tcl b/storage/bdb/test/join.tcl deleted file mode 100644 index eba811dfdc7..00000000000 --- a/storage/bdb/test/join.tcl +++ /dev/null @@ -1,455 +0,0 @@ -# See the file LICENSE for redistribution information. -# -# Copyright (c) 1996-2004 -# Sleepycat Software. All rights reserved. -# -# $Id: join.tcl,v 11.23 2004/01/28 03:36:28 bostic Exp $ -# -# TEST jointest -# TEST Test duplicate assisted joins. Executes 1, 2, 3 and 4-way joins -# TEST with differing index orders and selectivity. -# TEST -# TEST We'll test 2-way, 3-way, and 4-way joins and figure that if those -# TEST work, everything else does as well. We'll create test databases -# TEST called join1.db, join2.db, join3.db, and join4.db. The number on -# TEST the database describes the duplication -- duplicates are of the -# TEST form 0, N, 2N, 3N, ... where N is the number of the database. -# TEST Primary.db is the primary database, and null.db is the database -# TEST that has no matching duplicates. -# TEST -# TEST We should test this on all btrees, all hash, and a combination thereof -proc jointest { {psize 8192} {with_dup_dups 0} {flags 0} } { - global testdir - global rand_init - source ./include.tcl - - env_cleanup $testdir - berkdb srand $rand_init - - # Use one environment for all database opens so we don't - # need oodles of regions. - set env [berkdb_env -create -home $testdir] - error_check_good env_open [is_valid_env $env] TRUE - - # With the new offpage duplicate code, we don't support - # duplicate duplicates in sorted dup sets. Thus, if with_dup_dups - # is greater than one, run only with "-dup". - if { $with_dup_dups > 1 } { - set doptarray {"-dup"} - } else { - set doptarray {"-dup -dupsort" "-dup" RANDOMMIX RANDOMMIX } - } - - # NB: these flags are internal only, ok - foreach m "DB_BTREE DB_HASH DB_BOTH" { - # run with two different random mixes. - foreach dopt $doptarray { - set opt [list "-env" $env $dopt] - - puts "Join test: ($m $dopt) psize $psize,\ - $with_dup_dups dup\ - dups, flags $flags." - - build_all $m $psize $opt oa $with_dup_dups - - # null.db is db_built fifth but is referenced by - # zero; set up the option array appropriately. - set oa(0) $oa(5) - - # Build the primary - puts "\tBuilding the primary database $m" - set oflags "-create -truncate -mode 0644 -env $env\ - [conv $m [berkdb random_int 1 2]]" - set db [eval {berkdb_open} $oflags primary.db] - error_check_good dbopen [is_valid_db $db] TRUE - for { set i 0 } { $i < 1000 } { incr i } { - set key [format "%04d" $i] - set ret [$db put $key stub] - error_check_good "primary put" $ret 0 - } - error_check_good "primary close" [$db close] 0 - set did [open $dict] - gets $did str - do_join primary.db "1 0" $str oa $flags\ - $with_dup_dups - gets $did str - do_join primary.db "2 0" $str oa $flags\ - $with_dup_dups - gets $did str - do_join primary.db "3 0" $str oa $flags\ - $with_dup_dups - gets $did str - do_join primary.db "4 0" $str oa $flags\ - $with_dup_dups - gets $did str - do_join primary.db "1" $str oa $flags $with_dup_dups - gets $did str - do_join primary.db "2" $str oa $flags $with_dup_dups - gets $did str - do_join primary.db "3" $str oa $flags $with_dup_dups - gets $did str - do_join primary.db "4" $str oa $flags $with_dup_dups - gets $did str - do_join primary.db "1 2" $str oa $flags\ - $with_dup_dups - gets $did str - do_join primary.db "1 2 3" $str oa $flags\ - $with_dup_dups - gets $did str - do_join primary.db "1 2 3 4" $str oa $flags\ - $with_dup_dups - gets $did str - do_join primary.db "2 1" $str oa $flags\ - $with_dup_dups - gets $did str - do_join primary.db "3 2 1" $str oa $flags\ - $with_dup_dups - gets $did str - do_join primary.db "4 3 2 1" $str oa $flags\ - $with_dup_dups - gets $did str - do_join primary.db "1 3" $str oa $flags $with_dup_dups - gets $did str - do_join primary.db "3 1" $str oa $flags $with_dup_dups - gets $did str - do_join primary.db "1 4" $str oa $flags $with_dup_dups - gets $did str - do_join primary.db "4 1" $str oa $flags $with_dup_dups - gets $did str - do_join primary.db "2 3" $str oa $flags $with_dup_dups - gets $did str - do_join primary.db "3 2" $str oa $flags $with_dup_dups - gets $did str - do_join primary.db "2 4" $str oa $flags $with_dup_dups - gets $did str - do_join primary.db "4 2" $str oa $flags $with_dup_dups - gets $did str - do_join primary.db "3 4" $str oa $flags $with_dup_dups - gets $did str - do_join primary.db "4 3" $str oa $flags $with_dup_dups - gets $did str - do_join primary.db "2 3 4" $str oa $flags\ - $with_dup_dups - gets $did str - do_join primary.db "3 4 1" $str oa $flags\ - $with_dup_dups - gets $did str - do_join primary.db "4 2 1" $str oa $flags\ - $with_dup_dups - gets $did str - do_join primary.db "0 2 1" $str oa $flags\ - $with_dup_dups - gets $did str - do_join primary.db "3 2 0" $str oa $flags\ - $with_dup_dups - gets $did str - do_join primary.db "4 3 2 1" $str oa $flags\ - $with_dup_dups - gets $did str - do_join primary.db "4 3 0 1" $str oa $flags\ - $with_dup_dups - gets $did str - do_join primary.db "3 3 3" $str oa $flags\ - $with_dup_dups - gets $did str - do_join primary.db "2 2 3 3" $str oa $flags\ - $with_dup_dups - gets $did str2 - gets $did str - do_join primary.db "1 2" $str oa $flags\ - $with_dup_dups "3" $str2 - - # You really don't want to run this section - # with $with_dup_dups > 2. - if { $with_dup_dups <= 2 } { - gets $did str2 - gets $did str - do_join primary.db "1 2 3" $str\ - oa $flags $with_dup_dups "3 3 1" $str2 - gets $did str2 - gets $did str - do_join primary.db "4 0 2" $str\ - oa $flags $with_dup_dups "4 3 3" $str2 - gets $did str2 - gets $did str - do_join primary.db "3 2 1" $str\ - oa $flags $with_dup_dups "0 2" $str2 - gets $did str2 - gets $did str - do_join primary.db "2 2 3 3" $str\ - oa $flags $with_dup_dups "1 4 4" $str2 - gets $did str2 - gets $did str - do_join primary.db "2 2 3 3" $str\ - oa $flags $with_dup_dups "0 0 4 4" $str2 - gets $did str2 - gets $did str - do_join primary.db "2 2 3 3" $str2\ - oa $flags $with_dup_dups "2 4 4" $str - gets $did str2 - gets $did str - do_join primary.db "2 2 3 3" $str2\ - oa $flags $with_dup_dups "0 0 4 4" $str - } - close $did - } - } - - error_check_good env_close [$env close] 0 -} - -proc build_all { method psize opt oaname with_dup_dups {nentries 100} } { - global testdir - db_build join1.db $nentries 50 1 [conv $method 1]\ - $psize $opt $oaname $with_dup_dups - db_build join2.db $nentries 25 2 [conv $method 2]\ - $psize $opt $oaname $with_dup_dups - db_build join3.db $nentries 16 3 [conv $method 3]\ - $psize $opt $oaname $with_dup_dups - db_build join4.db $nentries 12 4 [conv $method 4]\ - $psize $opt $oaname $with_dup_dups - db_build null.db $nentries 0 5 [conv $method 5]\ - $psize $opt $oaname $with_dup_dups -} - -proc conv { m i } { - switch -- $m { - DB_HASH { return "-hash"} - "-hash" { return "-hash"} - DB_BTREE { return "-btree"} - "-btree" { return "-btree"} - DB_BOTH { - if { [expr $i % 2] == 0 } { - return "-hash"; - } else { - return "-btree"; - } - } - } -} - -proc random_opts { } { - set j [berkdb random_int 0 1] - if { $j == 0 } { - return " -dup" - } else { - return " -dup -dupsort" - } -} - -proc db_build { name nkeys ndups dup_interval method psize lopt oaname \ - with_dup_dups } { - source ./include.tcl - - # Get array of arg names (from two levels up the call stack) - upvar 2 $oaname oa - - # Search for "RANDOMMIX" in $opt, and if present, replace - # with " -dup" or " -dup -dupsort" at random. - set i [lsearch $lopt RANDOMMIX] - if { $i != -1 } { - set lopt [lreplace $lopt $i $i [random_opts]] - } - - # Save off db_open arguments for this database. - set opt [eval concat $lopt] - set oa($dup_interval) $opt - - # Create the database and open the dictionary - set oflags "-create -truncate -mode 0644 $method\ - -pagesize $psize" - set db [eval {berkdb_open} $oflags $opt $name] - error_check_good dbopen [is_valid_db $db] TRUE - set did [open $dict] - set count 0 - puts -nonewline "\tBuilding $name: $nkeys keys " - puts -nonewline "with $ndups duplicates at interval of $dup_interval" - if { $with_dup_dups > 0 } { - puts "" - puts "\t\tand $with_dup_dups duplicate duplicates." - } else { - puts "." - } - for { set count 0 } { [gets $did str] != -1 && $count < $nkeys } { - incr count} { - set str $str$name - # We need to make sure that the dups are inserted in a - # random, or near random, order. Do this by generating - # them and putting each in a list, then sorting the list - # at random. - set duplist {} - for { set i 0 } { $i < $ndups } { incr i } { - set data [format "%04d" [expr $i * $dup_interval]] - lappend duplist $data - } - # randomize the list - for { set i 0 } { $i < $ndups } {incr i } { - # set j [berkdb random_int $i [expr $ndups - 1]] - set j [expr ($i % 2) + $i] - if { $j >= $ndups } { set j $i } - set dupi [lindex $duplist $i] - set dupj [lindex $duplist $j] - set duplist [lreplace $duplist $i $i $dupj] - set duplist [lreplace $duplist $j $j $dupi] - } - foreach data $duplist { - if { $with_dup_dups != 0 } { - for { set j 0 }\ - { $j < $with_dup_dups }\ - {incr j} { - set ret [$db put $str $data] - error_check_good put$j $ret 0 - } - } else { - set ret [$db put $str $data] - error_check_good put $ret 0 - } - } - - if { $ndups == 0 } { - set ret [$db put $str NODUP] - error_check_good put $ret 0 - } - } - close $did - error_check_good close:$name [$db close] 0 -} - -proc do_join { primary dbs key oanm flags with_dup_dups {dbs2 ""} {key2 ""} } { - global testdir - source ./include.tcl - - upvar $oanm oa - - puts -nonewline "\tJoining: $dbs on $key" - if { $dbs2 == "" } { - puts "" - } else { - puts " with $dbs2 on $key2" - } - - # Open all the databases - set p [berkdb_open -unknown $testdir/$primary] - error_check_good "primary open" [is_valid_db $p] TRUE - - set dblist "" - set curslist "" - - set ndx [llength $dbs] - - foreach i [concat $dbs $dbs2] { - set opt $oa($i) - set db [eval {berkdb_open -unknown} $opt [n_to_name $i]] - error_check_good "[n_to_name $i] open" [is_valid_db $db] TRUE - set curs [$db cursor] - error_check_good "$db cursor" \ - [is_substr $curs "$db.c"] 1 - lappend dblist $db - lappend curslist $curs - - if { $ndx > 0 } { - set realkey [concat $key[n_to_name $i]] - } else { - set realkey [concat $key2[n_to_name $i]] - } - - set pair [$curs get -set $realkey] - error_check_good cursor_set:$realkey:$pair \ - [llength [lindex $pair 0]] 2 - - incr ndx -1 - } - - set join_curs [eval {$p join} $curslist] - error_check_good join_cursor \ - [is_substr $join_curs "$p.c"] 1 - - # Calculate how many dups we expect. - # We go through the list of indices. If we find a 0, then we - # expect 0 dups. For everything else, we look at pairs of numbers, - # if the are relatively prime, multiply them and figure out how - # many times that goes into 50. If they aren't relatively prime, - # take the number of times the larger goes into 50. - set expected 50 - set last 1 - foreach n [concat $dbs $dbs2] { - if { $n == 0 } { - set expected 0 - break - } - if { $last == $n } { - continue - } - - if { [expr $last % $n] == 0 || [expr $n % $last] == 0 } { - if { $n > $last } { - set last $n - set expected [expr 50 / $last] - } - } else { - set last [expr $n * $last / [gcd $n $last]] - set expected [expr 50 / $last] - } - } - - # If $with_dup_dups is greater than zero, each datum has - # been inserted $with_dup_dups times. So we expect the number - # of dups to go up by a factor of ($with_dup_dups)^(number of databases) - - if { $with_dup_dups > 0 } { - foreach n [concat $dbs $dbs2] { - set expected [expr $expected * $with_dup_dups] - } - } - - set ndups 0 - if { $flags == " -join_item"} { - set l 1 - } else { - set flags "" - set l 2 - } - for { set pair [eval {$join_curs get} $flags] } { \ - [llength [lindex $pair 0]] == $l } { - set pair [eval {$join_curs get} $flags] } { - set k [lindex [lindex $pair 0] 0] - foreach i $dbs { - error_check_bad valid_dup:$i:$dbs $i 0 - set kval [string trimleft $k 0] - if { [string length $kval] == 0 } { - set kval 0 - } - error_check_good valid_dup:$i:$dbs [expr $kval % $i] 0 - } - incr ndups - } - error_check_good number_of_dups:$dbs $ndups $expected - - error_check_good close_primary [$p close] 0 - foreach i $curslist { - error_check_good close_cursor:$i [$i close] 0 - } - foreach i $dblist { - error_check_good close_index:$i [$i close] 0 - } -} - -proc n_to_name { n } { -global testdir - if { $n == 0 } { - return null.db; - } else { - return join$n.db; - } -} - -proc gcd { a b } { - set g 1 - - for { set i 2 } { $i <= $a } { incr i } { - if { [expr $a % $i] == 0 && [expr $b % $i] == 0 } { - set g $i - } - } - return $g -} diff --git a/storage/bdb/test/lock001.tcl b/storage/bdb/test/lock001.tcl deleted file mode 100644 index 48eb95515cb..00000000000 --- a/storage/bdb/test/lock001.tcl +++ /dev/null @@ -1,122 +0,0 @@ -# See the file LICENSE for redistribution information. -# -# Copyright (c) 1996-2004 -# Sleepycat Software. All rights reserved. -# -# $Id: lock001.tcl,v 11.21 2004/01/28 03:36:28 bostic Exp $ -# - -# TEST lock001 -# TEST Make sure that the basic lock tests work. Do some simple gets -# TEST and puts for a single locker. -proc lock001 { {iterations 1000} {maxlocks 1000} } { - source ./include.tcl - global lock_curid - global lock_maxid - - set save_curid $lock_curid - set save_maxid $lock_maxid - - # Set defaults - # Adjusted to make exact match of isqrt - #set conflicts { 3 0 0 0 0 0 1 0 1 1} - #set conflicts { 3 0 0 0 0 1 0 1 1} - - set conflicts { 0 0 0 0 0 1 0 1 1} - set nmodes [isqrt [llength $conflicts]] - - # Cleanup - env_cleanup $testdir - - # Open the region we'll use for testing. - set eflags "-create -lock -home $testdir -mode 0644 \ - -lock_max $maxlocks -lock_conflict {$nmodes {$conflicts}}" - set env [eval {berkdb_env} $eflags] - error_check_good env [is_valid_env $env] TRUE - error_check_good lock_id_set \ - [$env lock_id_set $lock_curid $lock_maxid] 0 - - puts "Lock001: test basic lock operations" - set locker [$env lock_id] - # Get and release each type of lock - puts "\tLock001.a: get and release each type of lock" - foreach m {ng write read} { - set obj obj$m - set lockp [$env lock_get $m $locker $obj] - error_check_good lock_get:a [is_blocked $lockp] 0 - error_check_good lock_get:a [is_substr $lockp $env] 1 - set ret [ $lockp put ] - error_check_good lock_put $ret 0 - } - - # Get a bunch of locks for the same locker; these should work - set obj OBJECT - puts "\tLock001.b: Get a bunch of locks for the same locker" - foreach m {ng write read} { - set lockp [$env lock_get $m $locker $obj ] - lappend locklist $lockp - error_check_good lock_get:b [is_blocked $lockp] 0 - error_check_good lock_get:b [is_substr $lockp $env] 1 - } - release_list $locklist - - set locklist {} - # Check that reference counted locks work - puts "\tLock001.c: reference counted locks." - for {set i 0} { $i < 10 } {incr i} { - set lockp [$env lock_get -nowait write $locker $obj] - error_check_good lock_get:c [is_blocked $lockp] 0 - error_check_good lock_get:c [is_substr $lockp $env] 1 - lappend locklist $lockp - } - release_list $locklist - - # Finally try some failing locks - set locklist {} - foreach i {ng write read} { - set lockp [$env lock_get $i $locker $obj] - lappend locklist $lockp - error_check_good lock_get:d [is_blocked $lockp] 0 - error_check_good lock_get:d [is_substr $lockp $env] 1 - } - - # Change the locker - set locker [$env lock_id] - set blocklist {} - # Skip NO_LOCK lock. - puts "\tLock001.d: Change the locker, acquire read and write." - foreach i {write read} { - catch {$env lock_get -nowait $i $locker $obj} ret - error_check_good lock_get:e [is_substr $ret "not granted"] 1 - #error_check_good lock_get:e [is_substr $lockp $env] 1 - #error_check_good lock_get:e [is_blocked $lockp] 0 - } - # Now release original locks - release_list $locklist - - # Now re-acquire blocking locks - set locklist {} - puts "\tLock001.e: Re-acquire blocking locks." - foreach i {write read} { - set lockp [$env lock_get -nowait $i $locker $obj ] - error_check_good lock_get:f [is_substr $lockp $env] 1 - error_check_good lock_get:f [is_blocked $lockp] 0 - lappend locklist $lockp - } - - # Now release new locks - release_list $locklist - error_check_good free_id [$env lock_id_free $locker] 0 - - error_check_good envclose [$env close] 0 - -} - -# Blocked locks appear as lockmgrN.lockM\nBLOCKED -proc is_blocked { l } { - if { [string compare $l BLOCKED ] == 0 } { - return 1 - } else { - return 0 - } -} diff --git a/storage/bdb/test/lock002.tcl b/storage/bdb/test/lock002.tcl deleted file mode 100644 index 1fdea56cdb4..00000000000 --- a/storage/bdb/test/lock002.tcl +++ /dev/null @@ -1,157 +0,0 @@ -# See the file LICENSE for redistribution information. -# -# Copyright (c) 1996-2004 -# Sleepycat Software. All rights reserved. -# -# $Id: lock002.tcl,v 11.21 2004/01/28 03:36:28 bostic Exp $ -# -# TEST lock002 -# TEST Exercise basic multi-process aspects of lock. -proc lock002 { {maxlocks 1000} {conflicts {0 0 0 0 0 1 0 1 1} } } { - source ./include.tcl - - puts "Lock002: Basic multi-process lock tests." - - env_cleanup $testdir - - set nmodes [isqrt [llength $conflicts]] - - # Open the lock - mlock_open $maxlocks $nmodes $conflicts - mlock_wait -} - -# Make sure that we can create a region; destroy it, attach to it, -# detach from it, etc. -proc mlock_open { maxl nmodes conflicts } { - source ./include.tcl - global lock_curid - global lock_maxid - - puts "\tLock002.a multi-process open/close test" - - # Open/Create region here. Then close it and try to open from - # other test process. - set env_cmd [concat "berkdb_env -create -mode 0644 \ - -lock -lock_max $maxl -lock_conflict" \ - [list [list $nmodes $conflicts]] "-home $testdir"] - set local_env [eval $env_cmd] - $local_env lock_id_set $lock_curid $lock_maxid - error_check_good env_open [is_valid_env $local_env] TRUE - - set ret [$local_env close] - error_check_good env_close $ret 0 - - # Open from other test process - set env_cmd "berkdb_env -mode 0644 -home $testdir" - - set f1 [open |$tclsh_path r+] - puts $f1 "source $test_path/test.tcl" - - set remote_env [send_cmd $f1 $env_cmd] - error_check_good remote:env_open [is_valid_env $remote_env] TRUE - - # Now make sure that we can reopen the region. - set local_env [eval $env_cmd] - error_check_good env_open [is_valid_env $local_env] TRUE - set ret [$local_env close] - error_check_good env_close $ret 0 - - # Try closing the remote region - set ret [send_cmd $f1 "$remote_env close"] - error_check_good remote:lock_close $ret 0 - - # Try opening for create. Will succeed because region exists. - set env_cmd [concat "berkdb_env -create -mode 0644 \ - -lock -lock_max $maxl -lock_conflict" \ - [list [list $nmodes $conflicts]] "-home $testdir"] - set local_env [eval $env_cmd] - error_check_good remote:env_open [is_valid_env $local_env] TRUE - - # close locally - reset_env $local_env - - # Close and exit remote - set ret [send_cmd $f1 "reset_env $remote_env"] - - catch { close $f1 } result -} - -proc mlock_wait { } { - source ./include.tcl - - puts "\tLock002.b multi-process get/put wait test" - - # Open region locally - set env_cmd "berkdb_env -lock -home $testdir" - set local_env [eval $env_cmd] - error_check_good env_open [is_valid_env $local_env] TRUE - - # Open region remotely - set f1 [open |$tclsh_path r+] - - puts $f1 "source $test_path/test.tcl" - - set remote_env [send_cmd $f1 $env_cmd] - error_check_good remote:env_open [is_valid_env $remote_env] TRUE - - # Get a write lock locally; try for the read lock - # remotely. We hold the locks for several seconds - # so that we can use timestamps to figure out if the - # other process waited. - set locker1 [$local_env lock_id] - set local_lock [$local_env lock_get write $locker1 object1] - error_check_good lock_get [is_valid_lock $local_lock $local_env] TRUE - - # Now request a lock that we expect to hang; generate - # timestamps so we can tell if it actually hangs. - set locker2 [send_cmd $f1 "$remote_env lock_id"] - set remote_lock [send_timed_cmd $f1 1 \ - "set lock \[$remote_env lock_get write $locker2 object1\]"] - - # Now sleep before releasing lock - tclsleep 5 - set result [$local_lock put] - error_check_good lock_put $result 0 - - # Now get the result from the other script - set result [rcv_result $f1] - error_check_good lock_get:remote_time [expr $result > 4] 1 - - # Now get the remote lock - set remote_lock [send_cmd $f1 "puts \$lock"] - error_check_good remote:lock_get \ - [is_valid_lock $remote_lock $remote_env] TRUE - - # Now make the other guy wait 5 second and then release his - # lock while we try to get a write lock on it - set start [timestamp -r] - - set ret [send_cmd $f1 "tclsleep 5"] - - set ret [send_cmd $f1 "$remote_lock put"] - - set local_lock [$local_env lock_get write $locker1 object1] - error_check_good lock_get:time \ - [expr [expr [timestamp -r] - $start] > 2] 1 - error_check_good lock_get:local \ - [is_valid_lock $local_lock $local_env] TRUE - - # Now check remote's result - set result [rcv_result $f1] - error_check_good lock_put:remote $result 0 - - # Clean up remote - set result [send_cmd $f1 "$remote_env lock_id_free $locker2" ] - error_check_good remote_free_id $result 0 - set ret [send_cmd $f1 "reset_env $remote_env"] - - close $f1 - - # Now close up locally - set ret [$local_lock put] - error_check_good lock_put $ret 0 - error_check_good lock_id_free [$local_env lock_id_free $locker1] 0 - - reset_env $local_env -} diff --git a/storage/bdb/test/lock003.tcl b/storage/bdb/test/lock003.tcl deleted file mode 100644 index a535142838c..00000000000 --- a/storage/bdb/test/lock003.tcl +++ /dev/null @@ -1,101 +0,0 @@ -# See the file LICENSE for redistribution information. -# -# Copyright (c) 1996-2004 -# Sleepycat Software. All rights reserved. -# -# $Id: lock003.tcl,v 11.28 2004/01/28 03:36:28 bostic Exp $ -# -# TEST lock003 -# TEST Exercise multi-process aspects of lock. Generate a bunch of parallel -# TEST testers that try to randomly obtain locks; make sure that the locks -# TEST correctly protect corresponding objects. -proc lock003 { {iter 500} {max 1000} {procs 5} } { - source ./include.tcl - global lock_curid - global lock_maxid - - set ldegree 5 - set objs 75 - set reads 65 - set wait 1 - set conflicts { 0 0 0 0 0 1 0 1 1} - set seeds {} - - puts "Lock003: Multi-process random lock test" - - # Clean up after previous runs - env_cleanup $testdir - - # Open/create the lock region - puts "\tLock003.a: Create environment" - set e [berkdb_env -create -lock -home $testdir] - error_check_good env_open [is_substr $e env] 1 - $e lock_id_set $lock_curid $lock_maxid - - error_check_good env_close [$e close] 0 - - # Now spawn off processes - set pidlist {} - - for { set i 0 } {$i < $procs} {incr i} { - if { [llength $seeds] == $procs } { - set s [lindex $seeds $i] - } -# puts "$tclsh_path\ -# $test_path/wrap.tcl \ -# lockscript.tcl $testdir/$i.lockout\ -# $testdir $iter $objs $wait $ldegree $reads &" - set p [exec $tclsh_path $test_path/wrap.tcl \ - lockscript.tcl $testdir/lock003.$i.out \ - $testdir $iter $objs $wait $ldegree $reads &] - lappend pidlist $p - } - - puts "\tLock003.b: $procs independent processes now running" - watch_procs $pidlist 30 10800 - - # Check for test failure - set errstrings [eval findfail [glob $testdir/lock003.*.out]] - foreach str $errstrings { - puts "FAIL: error message in .out file: $str" - } - - # Remove log files - for { set i 0 } {$i < $procs} {incr i} { - fileremove -f $testdir/lock003.$i.out - } -} - -# Create and destroy flag files to show we have an object locked, and -# verify that the correct files exist or don't exist given that we've -# just read or write locked a file. -proc lock003_create { rw obj } { - source ./include.tcl - - set pref $testdir/L3FLAG - set f [open $pref.$rw.[pid].$obj w] - close $f -} - -proc lock003_destroy { obj } { - source ./include.tcl - - set pref $testdir/L3FLAG - set f [glob -nocomplain $pref.*.[pid].$obj] - error_check_good l3_destroy [llength $f] 1 - fileremove $f -} - -proc lock003_vrfy { rw obj } { - source ./include.tcl - - set pref $testdir/L3FLAG - if { [string compare $rw "write"] == 0 } { - set fs [glob -nocomplain $pref.*.*.$obj] - error_check_good "number of other locks on $obj" [llength $fs] 0 - } else { - set fs [glob -nocomplain $pref.write.*.$obj] - error_check_good "number of write locks on $obj" [llength $fs] 0 - } -} - diff --git a/storage/bdb/test/lock004.tcl b/storage/bdb/test/lock004.tcl deleted file mode 100644 index e71a51f9ba7..00000000000 --- a/storage/bdb/test/lock004.tcl +++ /dev/null @@ -1,29 +0,0 @@ -# See the file LICENSE for redistribution information. -# -# Copyright (c) 1996-2004 -# Sleepycat Software. All rights reserved. -# -# $Id: lock004.tcl,v 11.7 2004/01/28 03:36:28 bostic Exp $ -# -# TEST lock004 -# TEST Test locker ids wraping around. - -proc lock004 {} { - source ./include.tcl - global lock_curid - global lock_maxid - - set save_curid $lock_curid - set save_maxid $lock_maxid - - set lock_curid [expr $lock_maxid - 1] - puts "Lock004: Locker id wraparound test" - puts "\tLock004.a: repeat lock001-lock003 with wraparound lockids" - - lock001 - lock002 - lock003 - - set lock_curid $save_curid - set lock_maxid $save_maxid -} diff --git a/storage/bdb/test/lock005.tcl b/storage/bdb/test/lock005.tcl deleted file mode 100644 index 8b3b977ad31..00000000000 --- a/storage/bdb/test/lock005.tcl +++ /dev/null @@ -1,177 +0,0 @@ -# See the file LICENSE for redistribution information. -# -# Copyright (c) 1996-2004 -# Sleepycat Software. All rights reserved. -# -# $Id: lock005.tcl,v 1.10 2004/01/28 03:36:28 bostic Exp $ -# -# TEST lock005 -# TEST Check that page locks are being released properly. - -proc lock005 { } { - source ./include.tcl - - puts "Lock005: Page lock release test" - - # Clean up after previous runs - env_cleanup $testdir - - # Open/create the lock region - set e [berkdb_env -create -lock -home $testdir -txn -log] - error_check_good env_open [is_valid_env $e] TRUE - - # Open/create the database - set db [berkdb open -create -auto_commit -env $e -len 10 -queue q.db] - error_check_good dbopen [is_valid_db $db] TRUE - - # Check that records are locking by trying to - # fetch a record on the wrong transaction. - puts "\tLock005.a: Verify that we are locking" - - # Start the first transaction - set txn1 [$e txn -nowait] - error_check_good txn_begin [is_valid_txn $txn1 $e] TRUE - set ret [catch {$db put -txn $txn1 -append record1} recno1] - error_check_good dbput_txn1 $ret 0 - - # Start second txn while the first is still running ... - set txn2 [$e txn -nowait] - error_check_good txn_begin [is_valid_txn $txn2 $e] TRUE - - # ... and try to get a record from the first txn (should fail) - set ret [catch {$db get -txn $txn2 $recno1} res] - error_check_good dbget_wrong_record \ - [is_substr $res "deadlock"] 1 - - # End transactions - error_check_good txn1commit [$txn1 commit] 0 - how_many_locks 1 $e - error_check_good txn2commit [$txn2 commit] 0 - # The number of locks stays the same here because the first - # lock is released and the second lock was never granted. - how_many_locks 1 $e - - # Test lock behavior for both abort and commit - puts "\tLock005.b: Verify locks after abort or commit" - foreach endorder {forward reverse} { - end_order_test $db $e commit abort $endorder - end_order_test $db $e abort commit $endorder - end_order_test $db $e commit commit $endorder - end_order_test $db $e abort abort $endorder - } - - # Clean up - error_check_good db_close [$db close] 0 - error_check_good env_close [$e close] 0 -} - -proc end_order_test { db e txn1end txn2end endorder } { - # Start one transaction - set txn1 [$e txn -nowait] - error_check_good txn_begin [is_valid_txn $txn1 $e] TRUE - set ret [catch {$db put -txn $txn1 -append record1} recno1] - error_check_good dbput_txn1 $ret 0 - - # Check number of locks - how_many_locks 2 $e - - # Start a second transaction while first is still running - set txn2 [$e txn -nowait] - error_check_good txn_begin [is_valid_txn $txn2 $e] TRUE - set ret [catch {$db put -txn $txn2 -append record2} recno2] - error_check_good dbput_txn2 $ret 0 - how_many_locks 3 $e - - # Now commit or abort one txn and make sure the other is okay - if {$endorder == "forward"} { - # End transaction 1 first - puts "\tLock005.b.1: $txn1end txn1 then $txn2end txn2" - error_check_good txn_$txn1end [$txn1 $txn1end] 0 - how_many_locks 2 $e - - # txn1 is now ended, but txn2 is still running - set ret1 [catch {$db get -txn $txn2 $recno1} res1] - set ret2 [catch {$db get -txn $txn2 $recno2} res2] - if { $txn1end == "commit" } { - error_check_good txn2_sees_txn1 $ret1 0 - error_check_good txn2_sees_txn2 $ret2 0 - } else { - # transaction 1 was aborted - error_check_good txn2_cantsee_txn1 [llength $res1] 0 - } - - # End transaction 2 second - error_check_good txn_$txn2end [$txn2 $txn2end] 0 - how_many_locks 1 $e - - # txn1 and txn2 should both now be invalid - # The get no longer needs to be transactional - set ret3 [catch {$db get $recno1} res3] - set ret4 [catch {$db get $recno2} res4] - - if { $txn2end == "commit" } { - error_check_good txn2_sees_txn1 $ret3 0 - error_check_good txn2_sees_txn2 $ret4 0 - error_check_good txn2_has_record2 \ - [is_substr $res4 "record2"] 1 - } else { - # transaction 2 was aborted - error_check_good txn2_cantsee_txn1 $ret3 0 - error_check_good txn2_aborted [llength $res4] 0 - } - - } elseif { $endorder == "reverse" } { - # End transaction 2 first - puts "\tLock005.b.2: $txn2end txn2 then $txn1end txn1" - error_check_good txn_$txn2end [$txn2 $txn2end] 0 - how_many_locks 2 $e - - # txn2 is ended, but txn1 is still running - set ret1 [catch {$db get -txn $txn1 $recno1} res1] - set ret2 [catch {$db get -txn $txn1 $recno2} res2] - if { $txn2end == "commit" } { - error_check_good txn1_sees_txn1 $ret1 0 - error_check_good txn1_sees_txn2 $ret2 0 - } else { - # transaction 2 was aborted - error_check_good txn1_cantsee_txn2 [llength $res2] 0 - } - - # End transaction 1 second - error_check_good txn_$txn1end [$txn1 $txn1end] 0 - how_many_locks 1 $e - - # txn1 and txn2 should both now be invalid - # The get no longer needs to be transactional - set ret3 [catch {$db get $recno1} res3] - set ret4 [catch {$db get $recno2} res4] - - if { $txn1end == "commit" } { - error_check_good txn1_sees_txn1 $ret3 0 - error_check_good txn1_sees_txn2 $ret4 0 - error_check_good txn1_has_record1 \ - [is_substr $res3 "record1"] 1 - } else { - # transaction 1 was aborted - error_check_good txn1_cantsee_txn2 $ret4 0 - error_check_good txn1_aborted [llength $res3] 0 - } - } -} - -proc how_many_locks { expected env } { - set stat [$env lock_stat] - set str "Current number of locks" - set checked 0 - foreach statpair $stat { - if { $checked == 1 } { - break - } - if { [is_substr [lindex $statpair 0] $str] != 0} { - set checked 1 - set nlocks [lindex $statpair 1] - error_check_good expected_nlocks $nlocks $expected - } - } - error_check_good checked $checked 1 -} diff --git a/storage/bdb/test/lockscript.tcl b/storage/bdb/test/lockscript.tcl deleted file mode 100644 index f542c100b1f..00000000000 --- a/storage/bdb/test/lockscript.tcl +++ /dev/null @@ -1,117 +0,0 @@ -# See the file LICENSE for redistribution information. -# -# Copyright (c) 1996-2004 -# Sleepycat Software. All rights reserved. -# -# $Id: lockscript.tcl,v 11.19 2004/01/28 03:36:28 bostic Exp $ -# -# Random lock tester. -# Usage: lockscript dir numiters numobjs sleepint degree readratio -# dir: lock directory. -# numiters: Total number of iterations. -# numobjs: Number of objects on which to lock. -# sleepint: Maximum sleep interval. -# degree: Maximum number of locks to acquire at once -# readratio: Percent of locks that should be reads. - -source ./include.tcl -source $test_path/test.tcl - -set usage "lockscript dir numiters numobjs sleepint degree readratio" - -# Verify usage -if { $argc != 6 } { - puts stderr "FAIL:[timestamp] Usage: $usage" - exit -} - -# Initialize arguments -set dir [lindex $argv 0] -set numiters [ lindex $argv 1 ] -set numobjs [ lindex $argv 2 ] -set sleepint [ lindex $argv 3 ] -set degree [ lindex $argv 4 ] -set readratio [ lindex $argv 5 ] - -# Initialize random number generator -global rand_init -berkdb srand $rand_init - - -catch { berkdb_env -create -lock -home $dir } e -error_check_good env_open [is_substr $e env] 1 -catch { $e lock_id } locker -error_check_good locker [is_valid_locker $locker] TRUE - -puts -nonewline "Beginning execution for $locker: $numiters $numobjs " -puts "$sleepint $degree $readratio" -flush stdout - -for { set iter 0 } { $iter < $numiters } { incr iter } { - set nlocks [berkdb random_int 1 $degree] - # We will always lock objects in ascending order to avoid - # deadlocks. - set lastobj 1 - set locklist {} - set objlist {} - for { set lnum 0 } { $lnum < $nlocks } { incr lnum } { - # Pick lock parameters - set obj [berkdb random_int $lastobj $numobjs] - set lastobj [expr $obj + 1] - set x [berkdb random_int 1 100 ] - if { $x <= $readratio } { - set rw read - } else { - set rw write - } - puts "[timestamp -c] $locker $lnum: $rw $obj" - - # Do get; add to list - catch {$e lock_get $rw $locker $obj} lockp - error_check_good lock_get [is_valid_lock $lockp $e] TRUE - - # Create a file to flag that we've a lock of the given - # type, after making sure only other read locks exist - # (if we're read locking) or no other locks exist (if - # we're writing). - lock003_vrfy $rw $obj - lock003_create $rw $obj - lappend objlist [list $obj $rw] - - lappend locklist $lockp - if {$lastobj > $numobjs} { - break - } - } - # Pick sleep interval - puts "[timestamp -c] $locker sleeping" - # We used to sleep 1 to $sleepint seconds. This makes the test - # run for hours. Instead, make it sleep for 10 to $sleepint * 100 - # milliseconds, for a maximum sleep time of 0.5 s. - after [berkdb random_int 10 [expr $sleepint * 100]] - puts "[timestamp -c] $locker awake" - - # Now release locks - puts "[timestamp -c] $locker released locks" - - # Delete our locking flag files, then reverify. (Note that the - # locking flag verification function assumes that our own lock - # is not currently flagged.) - foreach pair $objlist { - set obj [lindex $pair 0] - set rw [lindex $pair 1] - lock003_destroy $obj - lock003_vrfy $rw $obj - } - - release_list $locklist - flush stdout -} - -set ret [$e close] -error_check_good env_close $ret 0 - -puts "[timestamp -c] $locker Complete" -flush stdout - -exit diff --git a/storage/bdb/test/log001.tcl b/storage/bdb/test/log001.tcl deleted file mode 100644 index cec09b84fd6..00000000000 --- a/storage/bdb/test/log001.tcl +++ /dev/null @@ -1,144 +0,0 @@ -# See the file LICENSE for redistribution information. -# -# Copyright (c) 1996-2004 -# Sleepycat Software. All rights reserved. -# -# $Id: log001.tcl,v 11.34 2004/09/22 18:01:05 bostic Exp $ -# - -# TEST log001 -# TEST Read/write log records. -# TEST Test with and without fixed-length, in-memory logging, -# TEST and encryption. -proc log001 { } { - global passwd - global has_crypto - global rand_init - - berkdb srand $rand_init - set iter 1000 - - set max [expr 1024 * 128] - foreach fixedlength { 0 1 } { - foreach inmem { 1 0 } { - log001_body $max $iter $fixedlength $inmem - log001_body $max [expr $iter * 15] $fixedlength $inmem - - # Skip encrypted tests if not supported. - if { $has_crypto == 0 } { - continue - } - log001_body $max\ - $iter $fixedlength $inmem "-encryptaes $passwd" - log001_body $max\ - [expr $iter * 15] $fixedlength $inmem "-encryptaes $passwd" - } - } -} - -proc log001_body { max nrecs fixedlength inmem {encargs ""} } { - source ./include.tcl - - puts -nonewline "Log001: Basic put/get log records: " - if { $fixedlength == 1 } { - puts -nonewline "fixed-length ($encargs)" - } else { - puts -nonewline "variable-length ($encargs)" - } - - # In-memory logging requires a large enough log buffer that - # any active transaction can be aborted. - if { $inmem == 1 } { - set lbuf [expr 8 * [expr 1024 * 1024]] - puts " with in-memory logging." - } else { - puts " with on-disk logging." - } - - env_cleanup $testdir - - set logargs "" - if { $inmem == 1 } { - set logargs "-log_inmemory -log_buffer $lbuf" - } - set env [eval {berkdb_env -log -create -home $testdir -mode 0644} \ - $encargs $logargs -log_max $max] - error_check_good envopen [is_valid_env $env] TRUE - - # We will write records to the log and make sure we can - # read them back correctly. We'll use a standard pattern - # repeated some number of times for each record. - set lsn_list {} - set rec_list {} - puts "\tLog001.a: Writing $nrecs log records" - for { set i 0 } { $i < $nrecs } { incr i } { - set rec "" - for { set j 0 } { $j < [expr $i % 10 + 1] } {incr j} { - set rec $rec$i:logrec:$i - } - if { $fixedlength != 1 } { - set rec $rec:[random_data 237 0 0] - } - set lsn [$env log_put $rec] - error_check_bad log_put [is_substr $lsn log_cmd] 1 - lappend lsn_list $lsn - lappend rec_list $rec - } - - # Open a log cursor. - set logc [$env log_cursor] - error_check_good logc [is_valid_logc $logc $env] TRUE - - puts "\tLog001.b: Retrieving log records sequentially (forward)" - set i 0 - for { set grec [$logc get -first] } { [llength $grec] != 0 } { - set grec [$logc get -next]} { - error_check_good log_get:seq [lindex $grec 1] \ - [lindex $rec_list $i] - incr i - } - - puts "\tLog001.c: Retrieving log records sequentially (backward)" - set i [llength $rec_list] - for { set grec [$logc get -last] } { [llength $grec] != 0 } { - set grec [$logc get -prev] } { - incr i -1 - error_check_good \ - log_get:seq [lindex $grec 1] [lindex $rec_list $i] - } - - puts "\tLog001.d: Retrieving log records sequentially by LSN" - set i 0 - foreach lsn $lsn_list { - set grec [$logc get -set $lsn] - error_check_good \ - log_get:seq [lindex $grec 1] [lindex $rec_list $i] - incr i - } - - puts "\tLog001.e: Retrieving log records randomly by LSN" - set m [expr [llength $lsn_list] - 1] - for { set i 0 } { $i < $nrecs } { incr i } { - set recno [berkdb random_int 0 $m ] - set lsn [lindex $lsn_list $recno] - set grec [$logc get -set $lsn] - error_check_good \ - log_get:seq [lindex $grec 1] [lindex $rec_list $recno] - } - - puts "\tLog001.f: Retrieving first/current, last/current log record" - set grec [$logc get -first] - error_check_good log_get:seq [lindex $grec 1] [lindex $rec_list 0] - set grec [$logc get -current] - error_check_good log_get:seq [lindex $grec 1] [lindex $rec_list 0] - set i [expr [llength $rec_list] - 1] - set grec [$logc get -last] - error_check_good log_get:seq [lindex $grec 1] [lindex $rec_list $i] - set grec [$logc get -current] - error_check_good log_get:seq [lindex $grec 1] [lindex $rec_list $i] - - # Close and unlink the file - error_check_good log_cursor:close:$logc [$logc close] 0 - error_check_good env:close [$env close] 0 - error_check_good envremove [berkdb envremove -home $testdir] 0 -} diff --git a/storage/bdb/test/log002.tcl b/storage/bdb/test/log002.tcl deleted file mode 100644 index 1c8f2b91979..00000000000 --- a/storage/bdb/test/log002.tcl +++ /dev/null @@ -1,102 +0,0 @@ -# See the file LICENSE for redistribution information. -# -# Copyright (c) 1996-2004 -# Sleepycat Software. All rights reserved. -# -# $Id: log002.tcl,v 11.33 2004/09/22 18:01:05 bostic Exp $ -# -# TEST log002 -# TEST Tests multiple logs -# TEST Log truncation -# TEST LSN comparison and file functionality. -proc log002 { } { - global rand_init - error_check_good set_random_seed [berkdb srand $rand_init] 0 - - foreach inmem { 1 0 } { - log002_body $inmem - } -} - -proc log002_body { inmem } { - source ./include.tcl - - puts "Log002: Multiple log test w/trunc, file, compare functionality" - - env_cleanup $testdir - - set max [expr 1024 * 128] - - set logargs "" - if { $inmem == 0 } { - puts "Log002: Using on-disk logging." - } else { - puts "Log002: Using in-memory logging." - set lbuf [expr 8 * [expr 1024 * 1024]] - set logargs "-log_inmemory -log_buffer $lbuf" - } - set env [eval {berkdb_env} -create -home $testdir -log \ - -mode 0644 $logargs -log_max $max] - error_check_good envopen [is_valid_env $env] TRUE - - # We'll record every hundredth record for later use - set info_list {} - - puts "\tLog002.a: Writing log records" - set i 0 - for {set s 0} { $s < [expr 3 * $max] } { incr s $len } { - set rec [random_data 120 0 0] - set len [string length $rec] - set lsn [$env log_put $rec] - - if { [expr $i % 100 ] == 0 } { - lappend info_list [list $lsn $rec] - } - incr i - } - - puts "\tLog002.b: Checking log_compare" - set last {0 0} - foreach p $info_list { - set l [lindex $p 0] - if { [llength $last] != 0 } { - error_check_good \ - log_compare [$env log_compare $l $last] 1 - error_check_good \ - log_compare [$env log_compare $last $l] -1 - error_check_good \ - log_compare [$env log_compare $l $l] 0 - } - set last $l - } - - puts "\tLog002.c: Checking log_file" - if { $inmem == 0 } { - set flist [glob $testdir/log*] - foreach p $info_list { - set lsn [lindex $p 0] - set f [$env log_file $lsn] - - # Change backslash separators on Windows to forward - # slashes, as the rest of the test suite expects. - regsub -all {\\} $f {/} f - error_check_bad log_file:$f [lsearch $flist $f] -1 - } - } - - puts "\tLog002.d: Verifying records" - - set logc [$env log_cursor] - error_check_good log_cursor [is_valid_logc $logc $env] TRUE - - for {set i [expr [llength $info_list] - 1] } { $i >= 0 } { incr i -1} { - set p [lindex $info_list $i] - set grec [$logc get -set [lindex $p 0]] - error_check_good log_get:$env [lindex $grec 1] [lindex $p 1] - } - - # Close and unlink the file - error_check_good log_cursor:close:$logc [$logc close] 0 - error_check_good env:close [$env close] 0 - error_check_good envremove [berkdb envremove -home $testdir] 0 -} diff --git a/storage/bdb/test/log003.tcl b/storage/bdb/test/log003.tcl deleted file mode 100644 index e8d10dbfc9b..00000000000 --- a/storage/bdb/test/log003.tcl +++ /dev/null @@ -1,144 +0,0 @@ -# See the file LICENSE for redistribution information. -# -# Copyright (c) 1996-2004 -# Sleepycat Software. All rights reserved. -# -# $Id: log003.tcl,v 11.34 2004/09/22 18:01:05 bostic Exp $ -# -# TEST log003 -# TEST Verify that log_flush is flushing records correctly. -proc log003 { } { - global rand_init - error_check_good set_random_seed [berkdb srand $rand_init] 0 - - # Even though log_flush doesn't do anything for in-memory - # logging, we want to make sure calling it doesn't break - # anything. - foreach inmem { 1 0 } { - log003_body $inmem - } -} - -proc log003_body { inmem } { - source ./include.tcl - - puts -nonewline "Log003: Verify log_flush behavior" - if { $inmem == 0 } { - puts " (on-disk logging)." - } else { - puts " (in-memory logging)." - } - - set max [expr 1024 * 128] - env_cleanup $testdir - set short_rec "abcdefghijklmnopqrstuvwxyz" - set long_rec [repeat $short_rec 200] - set very_long_rec [repeat $long_rec 4] - - foreach rec "$short_rec $long_rec $very_long_rec" { - puts "\tLog003.a: Verify flush on [string length $rec] byte rec" - - set logargs "" - if { $inmem == 1 } { - set logargs "-log_inmemory -log_buffer [expr $max * 2]" - } - set env [eval {berkdb_env} -log -home $testdir -create \ - -mode 0644 $logargs -log_max $max] - error_check_good envopen [is_valid_env $env] TRUE - - set lsn [$env log_put $rec] - error_check_bad log_put [lindex $lsn 0] "ERROR:" - set ret [$env log_flush $lsn] - error_check_good log_flush $ret 0 - - # Now, we want to crash the region and recheck. Closing the - # log does not flush any records, so we'll use a close to - # do the "crash" - set ret [$env close] - error_check_good log_env:close $ret 0 - - # Now, remove the log region - #set ret [berkdb envremove -home $testdir] - #error_check_good env:remove $ret 0 - - # Re-open the log and try to read the record. - set env [berkdb_env -create -home $testdir \ - -log -mode 0644 -log_max $max] - error_check_good envopen [is_valid_env $env] TRUE - - set logc [$env log_cursor] - error_check_good log_cursor [is_valid_logc $logc $env] TRUE - - set gotrec [$logc get -first] - error_check_good lp_get [lindex $gotrec 1] $rec - - # Close and unlink the file - error_check_good log_cursor:close:$logc [$logc close] 0 - error_check_good env:close:$env [$env close] 0 - error_check_good envremove [berkdb envremove -home $testdir] 0 - log_cleanup $testdir - } - - if { $inmem == 1 } { - puts "Log003: Skipping remainder of test for in-memory logging." - return - } - - foreach rec "$short_rec $long_rec $very_long_rec" { - puts "\tLog003.b: \ - Verify flush on non-last record [string length $rec]" - - set env [berkdb_env -log -home $testdir \ - -create -mode 0644 -log_max $max] - - error_check_good envopen [is_valid_env $env] TRUE - - # Put 10 random records - for { set i 0 } { $i < 10 } { incr i} { - set r [random_data 450 0 0] - set lsn [$env log_put $r] - error_check_bad log_put [lindex $lsn 0] "ERROR:" - } - - # Put the record we are interested in - set save_lsn [$env log_put $rec] - error_check_bad log_put [lindex $save_lsn 0] "ERROR:" - - # Put 10 more random records - for { set i 0 } { $i < 10 } { incr i} { - set r [random_data 450 0 0] - set lsn [$env log_put $r] - error_check_bad log_put [lindex $lsn 0] "ERROR:" - } - - # Now check the flush - set ret [$env log_flush $save_lsn] - error_check_good log_flush $ret 0 - - # Now, we want to crash the region and recheck. Closing the - # log does not flush any records, so we'll use a close to - # do the "crash". - # - # Now, close and remove the log region - error_check_good env:close:$env [$env close] 0 - set ret [berkdb envremove -home $testdir] - error_check_good env:remove $ret 0 - - # Re-open the log and try to read the record. - set env [berkdb_env -log -home $testdir \ - -create -mode 0644 -log_max $max] - error_check_good envopen [is_valid_env $env] TRUE - - set logc [$env log_cursor] - error_check_good log_cursor [is_valid_logc $logc $env] TRUE - - set gotrec [$logc get -set $save_lsn] - error_check_good lp_get [lindex $gotrec 1] $rec - - # Close and unlink the file - error_check_good log_cursor:close:$logc [$logc close] 0 - error_check_good env:close:$env [$env close] 0 - error_check_good envremove [berkdb envremove -home $testdir] 0 - log_cleanup $testdir - } -} diff --git a/storage/bdb/test/log004.tcl b/storage/bdb/test/log004.tcl deleted file mode 100644 index 15af405f5ce..00000000000 --- a/storage/bdb/test/log004.tcl +++ /dev/null @@ -1,52 +0,0 @@ -# See the file LICENSE for redistribution information. -# -# Copyright (c) 1996-2004 -# Sleepycat Software. All rights reserved. -# -# $Id: log004.tcl,v 11.31 2004/07/19 16:08:36 carol Exp $ -# - -# TEST log004 -# TEST Make sure that if we do PREVs on a log, but the beginning of the -# TEST log has been truncated, we do the right thing. -proc log004 { } { - foreach inmem { 1 0 } { - log004_body $inmem - } -} - -proc log004_body { inmem } { - source ./include.tcl - - puts "Log004: Prev on log when beginning of log has been truncated." - # Use archive test to populate log - env_cleanup $testdir - puts "\tLog004.a: Call archive to populate log." - archive $inmem - - # Delete all log files under 100 - puts "\tLog004.b: Delete all log files under 100." - set ret [catch { glob $testdir/log.00000000* } result] - if { $ret == 0 } { - eval fileremove -f $result - } - - # Now open the log and get the first record and try a prev - puts "\tLog004.c: Open truncated log, attempt to access missing portion." - set env [berkdb_env -create -log -home $testdir] - error_check_good envopen [is_valid_env $env] TRUE - - set logc [$env log_cursor] - error_check_good log_cursor [is_valid_logc $logc $env] TRUE - - set ret [$logc get -first] - error_check_bad log_get [llength $ret] 0 - - # This should give DB_NOTFOUND which is a ret of length 0 - catch {$logc get -prev} ret - error_check_good log_get_prev [string length $ret] 0 - - puts "\tLog004.d: Close log and environment." - error_check_good log_cursor_close [$logc close] 0 - error_check_good log_close [$env close] 0 -} diff --git a/storage/bdb/test/log005.tcl b/storage/bdb/test/log005.tcl deleted file mode 100644 index ea6e3fa3304..00000000000 --- a/storage/bdb/test/log005.tcl +++ /dev/null @@ -1,118 +0,0 @@ -# See the file LICENSE for redistribution information. -# -# Copyright (c) 1996-2004 -# Sleepycat Software. All rights reserved. -# -# $Id: log005.tcl,v 11.6 2004/09/22 18:01:05 bostic Exp $ -# -# TEST log005 -# TEST Check that log file sizes can change on the fly. -proc log005 { } { - - # Skip the test for HP-UX, where we can't do the second - # env open. - global is_hp_test - if { $is_hp_test == 1 } { - puts "Log005: Skipping for HP-UX." - return - } - - foreach inmem { 1 0 } { - log005_body $inmem - } -} -proc log005_body { inmem } { - source ./include.tcl - env_cleanup $testdir - - puts -nonewline "Log005: Check that log file sizes can change" - if { $inmem == 0 } { - puts " (on-disk logging)." - } else { - puts " (in-memory logging)." - } - - # Open the environment, set and check the log file size. - puts "\tLog005.a: open, set and check the log file size." - set logargs "" - if { $inmem == 1 } { - set lbuf [expr 1024 * 1024] - set logargs "-log_inmemory -log_buffer $lbuf" - } - set env [eval {berkdb_env} -create -home $testdir \ - $logargs -log_max 1000000 -txn] - error_check_good envopen [is_valid_env $env] TRUE - set db [berkdb_open \ - -env $env -create -mode 0644 -btree -auto_commit a.db] - error_check_good dbopen [is_valid_db $db] TRUE - - # Get the current log file maximum. - set max [log005_stat $env "Current log file size"] - error_check_good max_set $max 1000000 - - # Reset the log file size using a second open, and make sure - # it changes. - puts "\tLog005.b: reset during open, check the log file size." - set envtmp [berkdb_env -home $testdir -log_max 900000 -txn] - error_check_good envtmp_open [is_valid_env $envtmp] TRUE - error_check_good envtmp_close [$envtmp close] 0 - - set tmp [log005_stat $env "Current log file size"] - error_check_good max_changed 900000 $tmp - - puts "\tLog005.c: fill in the current log file size." - # Fill in the current log file. - set new_lsn 0 - set data [repeat "a" 1024] - for { set i 1 } \ - { [log005_stat $env "Current log file number"] != 2 } \ - { incr i } { - set t [$env txn] - error_check_good txn [is_valid_txn $t $env] TRUE - set ret [$db put -txn $t $i $data] - error_check_good put $ret 0 - error_check_good txn [$t commit] 0 - - set last_lsn $new_lsn - set new_lsn [log005_stat $env "Current log file offset"] - } - - # The last LSN in the first file should be more than our new - # file size. - error_check_good "lsn check < 900000" [expr 900000 < $last_lsn] 1 - - # Close down the environment. - error_check_good db_close [$db close] 0 - error_check_good env_close [$env close] 0 - - if { $inmem == 1 } { - puts "Log005: Skipping remainder of test for in-memory logging." - return - } - - puts "\tLog005.d: check the log file size is unchanged after recovery." - # Open again, running recovery. Verify the log file size is as we - # left it. - set env [berkdb_env -create -home $testdir -recover -txn] - error_check_good env_open [is_valid_env $env] TRUE - - set tmp [log005_stat $env "Current log file size"] - error_check_good after_recovery 900000 $tmp - - error_check_good env_close [$env close] 0 -} - -# log005_stat -- -# Return the current log statistics. -proc log005_stat { env s } { - set stat [$env log_stat] - foreach statpair $stat { - set statmsg [lindex $statpair 0] - set statval [lindex $statpair 1] - if {[is_substr $statmsg $s] != 0} { - return $statval - } - } - puts "FAIL: log005: stat string $s not found" - return 0 -} diff --git a/storage/bdb/test/logtrack.tcl b/storage/bdb/test/logtrack.tcl deleted file mode 100644 index 50851932fc4..00000000000 --- a/storage/bdb/test/logtrack.tcl +++ /dev/null @@ -1,139 +0,0 @@ -# See the file LICENSE for redistribution information -# -# Copyright (c) 2000-2004 -# Sleepycat Software. All rights reserved. -# -# $Id: logtrack.tcl,v 11.15 2004/04/14 16:08:42 carol Exp $ -# -# logtrack.tcl: A collection of routines, formerly implemented in Perl -# as log.pl, to track which log record types the test suite hits. - -set ltsname "logtrack_seen.db" -set ltlist $test_path/logtrack.list -set tmpname "logtrack_tmp" - -proc logtrack_clean { } { - global ltsname - - file delete -force $ltsname - - return -} - -proc logtrack_init { } { - global ltsname - - logtrack_clean - - # Create an empty tracking database. - [berkdb_open -create -truncate -btree $ltsname] close - - return -} - -# Dump the logs for directory dirname and record which log -# records were seen. -proc logtrack_read { dirname } { - global ltsname tmpname util_path - global encrypt passwd - - set seendb [berkdb_open $ltsname] - error_check_good seendb_open [is_valid_db $seendb] TRUE - - file delete -force $tmpname - set pargs " -N -h $dirname " - if { $encrypt > 0 } { - append pargs " -P $passwd " - } - set ret [catch {eval exec $util_path/db_printlog $pargs > $tmpname} res] - error_check_good printlog $ret 0 - error_check_good tmpfile_exists [file exists $tmpname] 1 - - set f [open $tmpname r] - while { [gets $f record] >= 0 } { - set r [regexp {\[[^\]]*\]\[[^\]]*\]([^\:]*)\:} $record whl name] - if { $r == 1 } { - error_check_good seendb_put [$seendb put $name ""] 0 - } - } - close $f - file delete -force $tmpname - - error_check_good seendb_close [$seendb close] 0 -} - -# Print the log record types that were seen but should not have been -# seen and the log record types that were not seen but should have been seen. -proc logtrack_summary { } { - global ltsname ltlist testdir - global one_test - - set seendb [berkdb_open $ltsname] - error_check_good seendb_open [is_valid_db $seendb] TRUE - set existdb [berkdb_open -create -btree] - error_check_good existdb_open [is_valid_db $existdb] TRUE - set deprecdb [berkdb_open -create -btree] - error_check_good deprecdb_open [is_valid_db $deprecdb] TRUE - - error_check_good ltlist_exists [file exists $ltlist] 1 - set f [open $ltlist r] - set pref "" - while { [gets $f line] >= 0 } { - # Get the keyword, the first thing on the line: - # BEGIN/DEPRECATED/IGNORED/PREFIX - set keyword [lindex $line 0] - - if { [string compare $keyword PREFIX] == 0 } { - # New prefix. - set pref [lindex $line 1] - } elseif { [string compare $keyword BEGIN] == 0 } { - # A log type we care about; put it on our list. - - # Skip noop and debug. - if { [string compare [lindex $line 1] noop] == 0 } { - continue - } - if { [string compare [lindex $line 1] debug] == 0 } { - continue - } - - error_check_good exist_put [$existdb put \ - ${pref}_[lindex $line 1] ""] 0 - } elseif { [string compare $keyword DEPRECATED] == 0 || - [string compare $keyword IGNORED] == 0 } { - error_check_good deprec_put [$deprecdb put \ - ${pref}_[lindex $line 1] ""] 0 - } - } - - error_check_good exist_curs \ - [is_valid_cursor [set ec [$existdb cursor]] $existdb] TRUE - while { [llength [set dbt [$ec get -next]]] != 0 } { - set rec [lindex [lindex $dbt 0] 0] - if { [$seendb count $rec] == 0 && $one_test == "ALL" } { - puts "WARNING: log record type $rec: not tested" - } - } - error_check_good exist_curs_close [$ec close] 0 - - error_check_good seen_curs \ - [is_valid_cursor [set sc [$existdb cursor]] $existdb] TRUE - while { [llength [set dbt [$sc get -next]]] != 0 } { - set rec [lindex [lindex $dbt 0] 0] - if { [$existdb count $rec] == 0 } { - if { [$deprecdb count $rec] == 0 } { - puts "WARNING: log record type $rec: unknown" - } else { - puts \ - "WARNING: log record type $rec: deprecated" - } - } - } - error_check_good seen_curs_close [$sc close] 0 - - error_check_good seendb_close [$seendb close] 0 - error_check_good existdb_close [$existdb close] 0 - error_check_good deprecdb_close [$deprecdb close] 0 - - logtrack_clean -} diff --git a/storage/bdb/test/mdbscript.tcl b/storage/bdb/test/mdbscript.tcl deleted file mode 100644 index 88433485a94..00000000000 --- a/storage/bdb/test/mdbscript.tcl +++ /dev/null @@ -1,400 +0,0 @@ -# See the file LICENSE for redistribution information. -# -# Copyright (c) 1996-2004 -# Sleepycat Software. All rights reserved. -# -# $Id: mdbscript.tcl,v 11.33 2004/01/28 03:36:28 bostic Exp $ -# -# Process script for the multi-process db tester. - -source ./include.tcl -source $test_path/test.tcl -source $test_path/testutils.tcl - -global dbenv -global klock -global l_keys -global procid -global alphabet - -# In Tcl, when there are multiple catch handlers, *all* handlers -# are called, so we have to resort to this hack. -# -global exception_handled - -set exception_handled 0 - -set datastr $alphabet$alphabet - -# Usage: mdbscript dir file nentries iter procid procs seed -# dir: DBHOME directory -# file: db file on which to operate -# nentries: number of entries taken from dictionary -# iter: number of operations to run -# procid: this processes' id number -# procs: total number of processes running -set usage "mdbscript method dir file nentries iter procid procs" - -# Verify usage -if { $argc != 7 } { - puts "FAIL:[timestamp] test042: Usage: $usage" - exit -} - -# Initialize arguments -set method [lindex $argv 0] -set dir [lindex $argv 1] -set file [lindex $argv 2] -set nentries [ lindex $argv 3 ] -set iter [ lindex $argv 4 ] -set procid [ lindex $argv 5 ] -set procs [ lindex $argv 6 ] - -set pflags "" -set gflags "" -set txn "" - -set renum [is_rrecno $method] -set omethod [convert_method $method] - -if { [is_record_based $method] == 1 } { - append gflags " -recno" -} - -# Initialize seed -global rand_init - -# We want repeatable results, but we also want each instance of mdbscript -# to do something different. So we add the procid to the fixed seed. -# (Note that this is a serial number given by the caller, not a pid.) -berkdb srand [expr $rand_init + $procid] - -puts "Beginning execution for [pid] $method" -puts "$dir db_home" -puts "$file database" -puts "$nentries data elements" -puts "$iter iterations" -puts "$procid process id" -puts "$procs processes" - -set klock NOLOCK - -# Note: all I/O operations, and especially flush, are expensive -# on Win2000 at least with Tcl version 8.3.2. So we'll avoid -# flushes in the main part of the loop below. -flush stdout - -set dbenv [berkdb_env -create -cdb -home $dir] -#set dbenv [berkdb_env -create -cdb -log -home $dir] -error_check_good dbenv [is_valid_env $dbenv] TRUE - -set locker [ $dbenv lock_id ] - -set db [berkdb_open -env $dbenv -create -mode 0644 $omethod $file] -error_check_good dbopen [is_valid_db $db] TRUE - -# Init globals (no data) -set nkeys [db_init $db 0] -puts "Initial number of keys: $nkeys" -tclsleep 5 - -proc get_lock { k } { - global dbenv - global procid - global locker - global klock - global DB_LOCK_WRITE - global DB_LOCK_NOWAIT - global errorInfo - global exception_handled - # Make sure that the key isn't in the middle of - # a delete operation - if {[catch {$dbenv lock_get -nowait write $locker $k} klock] != 0 } { - set exception_handled 1 - - error_check_good \ - get_lock [is_substr $errorInfo "DB_LOCK_NOTGRANTED"] 1 - puts "Warning: key $k locked" - set klock NOLOCK - return 1 - } else { - error_check_good get_lock [is_valid_lock $klock $dbenv] TRUE - } - return 0 -} - -# If we are renumbering, then each time we delete an item, the number of -# items in the file is temporarily decreased, so the highest record numbers -# do not exist. To make sure this doesn't happen, we never generate the -# highest few record numbers as keys. -# -# For record-based methods, record numbers begin at 1, while for other keys, -# we begin at 0 to index into an array. -proc rand_key { method nkeys renum procs} { - if { $renum == 1 } { - return [berkdb random_int 1 [expr $nkeys - $procs]] - } elseif { [is_record_based $method] == 1 } { - return [berkdb random_int 1 $nkeys] - } else { - return [berkdb random_int 0 [expr $nkeys - 1]] - } -} - -# On each iteration we're going to randomly pick a key. -# 1. We'll either get it (verifying that its contents are reasonable). -# 2. Put it (using an overwrite to make the data be datastr:ID). -# 3. Get it and do a put through the cursor, tacking our ID on to -# 4. Get it, read forward some random number of keys. -# 5. Get it, read forward some random number of keys and do a put (replace). -# 6. Get it, read forward some random number of keys and do a del. And then -# do a put of the key. -set gets 0 -set getput 0 -set overwrite 0 -set seqread 0 -set seqput 0 -set seqdel 0 -set dlen [string length $datastr] - -for { set i 0 } { $i < $iter } { incr i } { - set op [berkdb random_int 0 5] - puts "iteration $i operation $op" - set close_cursor 0 - if {[catch { - switch $op { - 0 { - incr gets - set k [rand_key $method $nkeys $renum $procs] - if {[is_record_based $method] == 1} { - set key $k - } else { - set key [lindex $l_keys $k] - } - - if { [get_lock $key] == 1 } { - incr i -1 - continue; - } - - set rec [eval {$db get} $txn $gflags {$key}] - error_check_bad "$db get $key" [llength $rec] 0 - set partial [string range \ - [lindex [lindex $rec 0] 1] 0 [expr $dlen - 1]] - error_check_good \ - "$db get $key" $partial [pad_data $method $datastr] - } - 1 { - incr overwrite - set k [rand_key $method $nkeys $renum $procs] - if {[is_record_based $method] == 1} { - set key $k - } else { - set key [lindex $l_keys $k] - } - - set data $datastr:$procid - set ret [eval {$db put} \ - $txn $pflags {$key [chop_data $method $data]}] - error_check_good "$db put $key" $ret 0 - } - 2 { - incr getput - set dbc [$db cursor -update] - error_check_good "$db cursor" \ - [is_valid_cursor $dbc $db] TRUE - set close_cursor 1 - set k [rand_key $method $nkeys $renum $procs] - if {[is_record_based $method] == 1} { - set key $k - } else { - set key [lindex $l_keys $k] - } - - if { [get_lock $key] == 1 } { - incr i -1 - error_check_good "$dbc close" \ - [$dbc close] 0 - set close_cursor 0 - continue; - } - - set ret [$dbc get -set $key] - error_check_good \ - "$dbc get $key" [llength [lindex $ret 0]] 2 - set rec [lindex [lindex $ret 0] 1] - set partial [string range $rec 0 [expr $dlen - 1]] - error_check_good \ - "$dbc get $key" $partial [pad_data $method $datastr] - append rec ":$procid" - set ret [$dbc put \ - -current [chop_data $method $rec]] - error_check_good "$dbc put $key" $ret 0 - error_check_good "$dbc close" [$dbc close] 0 - set close_cursor 0 - } - 3 - - 4 - - 5 { - if { $op == 3 } { - set flags "" - } else { - set flags -update - } - set dbc [eval {$db cursor} $flags] - error_check_good "$db cursor" \ - [is_valid_cursor $dbc $db] TRUE - set close_cursor 1 - set k [rand_key $method $nkeys $renum $procs] - if {[is_record_based $method] == 1} { - set key $k - } else { - set key [lindex $l_keys $k] - } - - if { [get_lock $key] == 1 } { - incr i -1 - error_check_good "$dbc close" \ - [$dbc close] 0 - set close_cursor 0 - continue; - } - - set ret [$dbc get -set $key] - error_check_good \ - "$dbc get $key" [llength [lindex $ret 0]] 2 - - # Now read a few keys sequentially - set nloop [berkdb random_int 0 10] - if { [berkdb random_int 0 1] == 0 } { - set flags -next - } else { - set flags -prev - } - while { $nloop > 0 } { - set lastret $ret - set ret [eval {$dbc get} $flags] - # Might read beginning/end of file - if { [llength $ret] == 0} { - set ret $lastret - break - } - incr nloop -1 - } - switch $op { - 3 { - incr seqread - } - 4 { - incr seqput - set rec [lindex [lindex $ret 0] 1] - set partial [string range $rec 0 \ - [expr $dlen - 1]] - error_check_good "$dbc get $key" \ - $partial [pad_data $method $datastr] - append rec ":$procid" - set ret [$dbc put -current \ - [chop_data $method $rec]] - error_check_good \ - "$dbc put $key" $ret 0 - } - 5 { - incr seqdel - set k [lindex [lindex $ret 0] 0] - # We need to lock the item we're - # deleting so that someone else can't - # try to do a get while we're - # deleting - error_check_good "$klock put" \ - [$klock put] 0 - set klock NOLOCK - set cur [$dbc get -current] - error_check_bad get_current \ - [llength $cur] 0 - set key [lindex [lindex $cur 0] 0] - if { [get_lock $key] == 1 } { - incr i -1 - error_check_good "$dbc close" \ - [$dbc close] 0 - set close_cursor 0 - continue - } - set ret [$dbc del] - error_check_good "$dbc del" $ret 0 - set rec $datastr - append rec ":$procid" - if { $renum == 1 } { - set ret [$dbc put -before \ - [chop_data $method $rec]] - error_check_good \ - "$dbc put $k" $ret $k - } elseif { \ - [is_record_based $method] == 1 } { - error_check_good "$dbc close" \ - [$dbc close] 0 - set close_cursor 0 - set ret [$db put $k \ - [chop_data $method $rec]] - error_check_good \ - "$db put $k" $ret 0 - } else { - set ret [$dbc put -keylast $k \ - [chop_data $method $rec]] - error_check_good \ - "$dbc put $k" $ret 0 - } - } - } - if { $close_cursor == 1 } { - error_check_good \ - "$dbc close" [$dbc close] 0 - set close_cursor 0 - } - } - } - } res] != 0} { - global errorInfo; - global exception_handled; - - puts $errorInfo - - set fnl [string first "\n" $errorInfo] - set theError [string range $errorInfo 0 [expr $fnl - 1]] - - if { [string compare $klock NOLOCK] != 0 } { - catch {$klock put} - } - if {$close_cursor == 1} { - catch {$dbc close} - set close_cursor 0 - } - - if {[string first FAIL $theError] == 0 && \ - $exception_handled != 1} { - flush stdout - error "FAIL:[timestamp] test042: key $k: $theError" - } - set exception_handled 0 - } else { - if { [string compare $klock NOLOCK] != 0 } { - error_check_good "$klock put" [$klock put] 0 - set klock NOLOCK - } - } -} - -error_check_good db_close_catch [catch {$db close} ret] 0 -error_check_good db_close $ret 0 -error_check_good dbenv_close [$dbenv close] 0 - -flush stdout -exit - -puts "[timestamp] [pid] Complete" -puts "Successful ops: " -puts "\t$gets gets" -puts "\t$overwrite overwrites" -puts "\t$getput getputs" -puts "\t$seqread seqread" -puts "\t$seqput seqput" -puts "\t$seqdel seqdel" -flush stdout diff --git a/storage/bdb/test/memp001.tcl b/storage/bdb/test/memp001.tcl deleted file mode 100644 index 4818072debd..00000000000 --- a/storage/bdb/test/memp001.tcl +++ /dev/null @@ -1,199 +0,0 @@ -# See the file LICENSE for redistribution information. -# -# Copyright (c) 1996-2004 -# Sleepycat Software. All rights reserved. -# -# $Id: memp001.tcl,v 11.53 2004/01/28 03:36:28 bostic Exp $ -# - -# TEST memp001 -# TEST Randomly updates pages. -proc memp001 { } { - - memp001_body 1 "" - memp001_body 3 "" - memp001_body 1 -private - memp001_body 3 -private - memp001_body 1 "-system_mem -shm_key 1" - memp001_body 3 "-system_mem -shm_key 1" - -} - -proc memp001_body { ncache flags } { - source ./include.tcl - global rand_init - - set nfiles 5 - set iter 500 - set psize 512 - set cachearg "-cachesize {0 400000 $ncache}" - - puts \ -"Memp001: { $flags } random update $iter iterations on $nfiles files." - # - # Check if this platform supports this set of flags - # - if { [mem_chk $flags] == 1 } { - return - } - - env_cleanup $testdir - puts "\tMemp001.a: Create env with $ncache caches" - set env [eval {berkdb_env -create -mode 0644} \ - $cachearg {-home $testdir} $flags] - error_check_good env_open [is_valid_env $env] TRUE - - # - # Do a simple mpool_stat call to verify the number of caches - # just to exercise the stat code. - set stat [$env mpool_stat] - set str "Number of caches" - set checked 0 - foreach statpair $stat { - if { $checked == 1 } { - break - } - if { [is_substr [lindex $statpair 0] $str] != 0} { - set checked 1 - error_check_good ncache [lindex $statpair 1] $ncache - } - } - error_check_good checked $checked 1 - - # Open N memp files - puts "\tMemp001.b: Create $nfiles mpool files" - for {set i 1} {$i <= $nfiles} {incr i} { - set fname "data_file.$i" - file_create $testdir/$fname 50 $psize - - set mpools($i) \ - [$env mpool -create -pagesize $psize -mode 0644 $fname] - error_check_good mp_open [is_substr $mpools($i) $env.mp] 1 - } - - # Now, loop, picking files at random - berkdb srand $rand_init - puts "\tMemp001.c: Random page replacement loop" - for {set i 0} {$i < $iter} {incr i} { - set mpool $mpools([berkdb random_int 1 $nfiles]) - set p(1) [get_range $mpool 10] - set p(2) [get_range $mpool 10] - set p(3) [get_range $mpool 10] - set p(1) [replace $mpool $p(1)] - set p(3) [replace $mpool $p(3)] - set p(4) [get_range $mpool 20] - set p(4) [replace $mpool $p(4)] - set p(5) [get_range $mpool 10] - set p(6) [get_range $mpool 20] - set p(7) [get_range $mpool 10] - set p(8) [get_range $mpool 20] - set p(5) [replace $mpool $p(5)] - set p(6) [replace $mpool $p(6)] - set p(9) [get_range $mpool 40] - set p(9) [replace $mpool $p(9)] - set p(10) [get_range $mpool 40] - set p(7) [replace $mpool $p(7)] - set p(8) [replace $mpool $p(8)] - set p(9) [replace $mpool $p(9)] - set p(10) [replace $mpool $p(10)] - # - # We now need to put all the pages we have here or - # else they end up pinned. - # - for {set x 1} { $x <= 10} {incr x} { - error_check_good pgput [$p($x) put] 0 - } - } - - # Close N memp files, close the environment. - puts "\tMemp001.d: Close mpools" - for {set i 1} {$i <= $nfiles} {incr i} { - error_check_good memp_close:$mpools($i) [$mpools($i) close] 0 - } - error_check_good envclose [$env close] 0 - - for {set i 1} {$i <= $nfiles} {incr i} { - fileremove -f $testdir/data_file.$i - } -} - -proc file_create { fname nblocks blocksize } { - set fid [open $fname w] - for {set i 0} {$i < $nblocks} {incr i} { - seek $fid [expr $i * $blocksize] start - puts -nonewline $fid $i - } - seek $fid [expr $nblocks * $blocksize - 1] - - # We don't end the file with a newline, because some platforms (like - # Windows) emit CR/NL. There does not appear to be a BINARY open flag - # that prevents this. - puts -nonewline $fid "Z" - close $fid - - # Make sure it worked - if { [file size $fname] != $nblocks * $blocksize } { - error "FAIL: file_create could not create correct file size" - } -} - -proc get_range { mpool max } { - set pno [berkdb random_int 0 $max] - set p [$mpool get $pno] - error_check_good page [is_valid_page $p $mpool] TRUE - set got [$p pgnum] - if { $got != $pno } { - puts "Get_range: Page mismatch page |$pno| val |$got|" - } - set ret [$p init "Page is pinned by [pid]"] - error_check_good page_init $ret 0 - - return $p -} - -proc replace { mpool p } { - set pgno [$p pgnum] - - set ret [$p init "Page is unpinned by [pid]"] - error_check_good page_init $ret 0 - - set ret [$p put -dirty] - error_check_good page_put $ret 0 - - set p2 [$mpool get $pgno] - error_check_good page [is_valid_page $p2 $mpool] TRUE - - return $p2 -} - -proc mem_chk { flags } { - source ./include.tcl - global errorCode - - # Open the memp with region init specified - env_cleanup $testdir - - set cachearg " -cachesize {0 400000 3}" - set ret [catch {eval {berkdb_env_noerr -create -mode 0644}\ - $cachearg {-region_init -home $testdir} $flags} env] - if { $ret != 0 } { - # If the env open failed, it may be because we're on a platform - # such as HP-UX 10 that won't support mutexes in shmget memory. - # Or QNX, which doesn't support system memory at all. - # Verify that the return value was EINVAL or EOPNOTSUPP - # and bail gracefully. - error_check_good is_shm_test [is_substr $flags -system_mem] 1 - error_check_good returned_error [expr \ - [is_substr $errorCode EINVAL] || \ - [is_substr $errorCode EOPNOTSUPP]] 1 - puts "Warning:\ - platform does not support mutexes in shmget memory." - puts "Skipping shared memory mpool test." - return 1 - } - error_check_good env_open [is_valid_env $env] TRUE - error_check_good env_close [$env close] 0 - env_cleanup $testdir - - return 0 -} diff --git a/storage/bdb/test/memp002.tcl b/storage/bdb/test/memp002.tcl deleted file mode 100644 index 763ef923d35..00000000000 --- a/storage/bdb/test/memp002.tcl +++ /dev/null @@ -1,62 +0,0 @@ -# See the file LICENSE for redistribution information. -# -# Copyright (c) 1996-2004 -# Sleepycat Software. All rights reserved. -# -# $Id: memp002.tcl,v 11.49 2004/01/28 03:36:28 bostic Exp $ -# - -# TEST memp002 -# TEST Tests multiple processes accessing and modifying the same files. -proc memp002 { } { - # - # Multiple processes not supported by private memory so don't - # run memp002_body with -private. - # - memp002_body "" - memp002_body "-system_mem -shm_key 1" -} - -proc memp002_body { flags } { - source ./include.tcl - - puts "Memp002: {$flags} Multiprocess mpool tester" - - set procs 4 - set psizes "512 1024 2048 4096 8192" - set iterations 500 - set npages 100 - - # Check if this combination of flags is supported by this arch. - if { [mem_chk $flags] == 1 } { - return - } - - set iter [expr $iterations / $procs] - - # Clean up old stuff and create new. - env_cleanup $testdir - - for { set i 0 } { $i < [llength $psizes] } { incr i } { - fileremove -f $testdir/file$i - } - set e [eval {berkdb_env -create -lock -home $testdir} $flags] - error_check_good dbenv [is_valid_env $e] TRUE - - set pidlist {} - for { set i 0 } { $i < $procs } {incr i} { - - puts "$tclsh_path\ - $test_path/mpoolscript.tcl $testdir $i $procs \ - $iter $psizes $npages 3 $flags > \ - $testdir/memp002.$i.out &" - set p [exec $tclsh_path $test_path/wrap.tcl \ - mpoolscript.tcl $testdir/memp002.$i.out $testdir $i $procs \ - $iter $psizes $npages 3 $flags &] - lappend pidlist $p - } - puts "Memp002: $procs independent processes now running" - watch_procs $pidlist - - reset_env $e -} diff --git a/storage/bdb/test/memp003.tcl b/storage/bdb/test/memp003.tcl deleted file mode 100644 index a4e68bfd58d..00000000000 --- a/storage/bdb/test/memp003.tcl +++ /dev/null @@ -1,154 +0,0 @@ -# See the file LICENSE for redistribution information. -# -# Copyright (c) 1996-2004 -# Sleepycat Software. All rights reserved. -# -# $Id: memp003.tcl,v 11.51 2004/01/28 03:36:28 bostic Exp $ -# -# TEST memp003 -# TEST Test reader-only/writer process combinations; we use the access methods -# TEST for testing. -proc memp003 { } { - global rand_init - error_check_good set_random_seed [berkdb srand $rand_init] 0 - # - # Multiple processes not supported by private memory so don't - # run memp003_body with -private. - # - memp003_body "" - memp003_body "-system_mem -shm_key 1" -} - -proc memp003_body { flags } { - global alphabet - source ./include.tcl - - puts "Memp003: {$flags} Reader/Writer tests" - - if { [mem_chk $flags] == 1 } { - return - } - - env_cleanup $testdir - set psize 1024 - set nentries 500 - set testfile mpool.db - set t1 $testdir/t1 - - # Create an environment that the two processes can share, with - # 20 pages per cache. - set c [list 0 [expr $psize * 20 * 3] 3] - set dbenv [eval {berkdb_env \ - -create -lock -home $testdir -cachesize $c} $flags] - error_check_good dbenv [is_valid_env $dbenv] TRUE - - # First open and create the file. - set db [berkdb_open -env $dbenv -create \ - -mode 0644 -pagesize $psize -btree $testfile] - error_check_good dbopen/RW [is_valid_db $db] TRUE - - set did [open $dict] - set txn "" - set count 0 - - puts "\tMemp003.a: create database" - set keys "" - # Here is the loop where we put and get each key/data pair - while { [gets $did str] != -1 && $count < $nentries } { - lappend keys $str - - set ret [eval {$db put} $txn {$str $str}] - error_check_good put $ret 0 - - set ret [eval {$db get} $txn {$str}] - error_check_good get $ret [list [list $str $str]] - - incr count - } - close $did - error_check_good close [$db close] 0 - - # Now open the file for read-only - set db [berkdb_open -env $dbenv -rdonly $testfile] - error_check_good dbopen/RO [is_substr $db db] 1 - - puts "\tMemp003.b: verify a few keys" - # Read and verify a couple of keys; saving them to check later - set testset "" - for { set i 0 } { $i < 10 } { incr i } { - set ndx [berkdb random_int 0 [expr $nentries - 1]] - set key [lindex $keys $ndx] - if { [lsearch $testset $key] != -1 } { - incr i -1 - continue; - } - - # The remote process stuff is unhappy with - # zero-length keys; make sure we don't pick one. - if { [llength $key] == 0 } { - incr i -1 - continue - } - - lappend testset $key - - set ret [eval {$db get} $txn {$key}] - error_check_good get/RO $ret [list [list $key $key]] - } - - puts "\tMemp003.c: retrieve and modify keys in remote process" - # Now open remote process where we will open the file RW - set f1 [open |$tclsh_path r+] - puts $f1 "source $test_path/test.tcl" - puts $f1 "flush stdout" - flush $f1 - - set c [concat "{" [list 0 [expr $psize * 20 * 3] 3] "}" ] - set remote_env [send_cmd $f1 \ - "berkdb_env -create -lock -home $testdir -cachesize $c $flags"] - error_check_good remote_dbenv [is_valid_env $remote_env] TRUE - - set remote_db [send_cmd $f1 "berkdb_open -env $remote_env $testfile"] - error_check_good remote_dbopen [is_valid_db $remote_db] TRUE - - foreach k $testset { - # Get the key - set ret [send_cmd $f1 "$remote_db get $k"] - error_check_good remote_get $ret [list [list $k $k]] - - # Now replace the key - set ret [send_cmd $f1 "$remote_db put $k $k$k"] - error_check_good remote_put $ret 0 - } - - puts "\tMemp003.d: verify changes in local process" - foreach k $testset { - set ret [eval {$db get} $txn {$key}] - error_check_good get_verify/RO $ret [list [list $key $key$key]] - } - - puts "\tMemp003.e: Fill up the cache with dirty buffers" - foreach k $testset { - # Now rewrite the keys with BIG data - set data [replicate $alphabet 32] - set ret [send_cmd $f1 "$remote_db put $k $data"] - error_check_good remote_put $ret 0 - } - - puts "\tMemp003.f: Get more pages for the read-only file" - dump_file $db $txn $t1 nop - - puts "\tMemp003.g: Sync from the read-only file" - error_check_good db_sync [$db sync] 0 - error_check_good db_close [$db close] 0 - - set ret [send_cmd $f1 "$remote_db close"] - error_check_good remote_get $ret 0 - - # Close the environment both remotely and locally. - set ret [send_cmd $f1 "$remote_env close"] - error_check_good remote:env_close $ret 0 - close $f1 - - reset_env $dbenv -} diff --git a/storage/bdb/test/mpoolscript.tcl b/storage/bdb/test/mpoolscript.tcl deleted file mode 100644 index 38d689c9666..00000000000 --- a/storage/bdb/test/mpoolscript.tcl +++ /dev/null @@ -1,171 +0,0 @@ -# See the file LICENSE for redistribution information. -# -# Copyright (c) 1996-2004 -# Sleepycat Software. All rights reserved. -# -# $Id: mpoolscript.tcl,v 11.18 2004/01/28 03:36:28 bostic Exp $ -# -# Random multiple process mpool tester. -# Usage: mpoolscript dir id numiters numfiles numpages sleepint -# dir: lock directory. -# id: Unique identifier for this process. -# maxprocs: Number of procs in this test. -# numiters: Total number of iterations. -# pgsizes: Pagesizes for the different files. Length of this item indicates -# how many files to use. -# numpages: Number of pages per file. -# sleepint: Maximum sleep interval. -# flags: Flags for env open - -source ./include.tcl -source $test_path/test.tcl -source $test_path/testutils.tcl - -set usage \ - "mpoolscript dir id maxprocs numiters pgsizes numpages sleepint flags" - -# Verify usage -if { $argc != 8 } { - puts stderr "FAIL:[timestamp] Usage: $usage" - puts $argc - exit -} - -# Initialize arguments -set dir [lindex $argv 0] -set id [lindex $argv 1] -set maxprocs [lindex $argv 2] -set numiters [ lindex $argv 3 ] -set pgsizes [ lindex $argv 4 ] -set numpages [ lindex $argv 5 ] -set sleepint [ lindex $argv 6 ] -set flags [ lindex $argv 7] - -# Initialize seed -global rand_init -berkdb srand $rand_init - -# Give time for all processes to start up. -tclsleep 10 - -puts -nonewline "Beginning execution for $id: $maxprocs $dir $numiters" -puts " $pgsizes $numpages $sleepint" -flush stdout - -# Figure out how small/large to make the cache -set max 0 -foreach i $pgsizes { - if { $i > $max } { - set max $i - } -} - -set cache [list 0 [expr $maxprocs * ([lindex $pgsizes 0] + $max)] 1] -set env_cmd {berkdb_env -lock -cachesize $cache -home $dir} -set e [eval $env_cmd $flags] -error_check_good env_open [is_valid_env $e] TRUE - -# Now open files -set mpools {} -set nfiles 0 -foreach psize $pgsizes { - set mp [$e mpool -create -mode 0644 -pagesize $psize file$nfiles] - error_check_good memp_fopen:$nfiles [is_valid_mpool $mp $e] TRUE - lappend mpools $mp - incr nfiles -} - -puts "Establishing long-term pin on file 0 page $id for process $id" - -# Set up the long-pin page -set locker [$e lock_id] -set lock [$e lock_get write $locker 0:$id] -error_check_good lock_get [is_valid_lock $lock $e] TRUE - -set mp [lindex $mpools 0] -set master_page [$mp get -create $id] -error_check_good mp_get:$master_page [is_valid_page $master_page $mp] TRUE - -set r [$master_page init MASTER$id] -error_check_good page_init $r 0 - -# Release the lock but keep the page pinned -set r [$lock put] -error_check_good lock_put $r 0 - -# Main loop. On each iteration, we'll check every page in each of -# of the files. On any file, if we see the appropriate tag in the -# field, we'll rewrite the page, else we won't. Keep track of -# how many pages we actually process. -set pages 0 -for { set iter 0 } { $iter < $numiters } { incr iter } { - puts "[timestamp]: iteration $iter, $pages pages set so far" - flush stdout - for { set fnum 1 } { $fnum < $nfiles } { incr fnum } { - if { [expr $fnum % 2 ] == 0 } { - set pred [expr ($id + $maxprocs - 1) % $maxprocs] - } else { - set pred [expr ($id + $maxprocs + 1) % $maxprocs] - } - - set mpf [lindex $mpools $fnum] - for { set p 0 } { $p < $numpages } { incr p } { - set lock [$e lock_get write $locker $fnum:$p] - error_check_good lock_get:$fnum:$p \ - [is_valid_lock $lock $e] TRUE - - # Now, get the page - set pp [$mpf get -create $p] - error_check_good page_get:$fnum:$p \ - [is_valid_page $pp $mpf] TRUE - - if { [$pp is_setto $pred] == 0 || [$pp is_setto 0] == 0 } { - # Set page to self. - set r [$pp init $id] - error_check_good page_init:$fnum:$p $r 0 - incr pages - set r [$pp put -dirty] - error_check_good page_put:$fnum:$p $r 0 - } else { - error_check_good page_put:$fnum:$p [$pp put] 0 - } - error_check_good lock_put:$fnum:$p [$lock put] 0 - } - } - tclsleep [berkdb random_int 1 $sleepint] -} - -# Now verify your master page, release its pin, then verify everyone else's -puts "$id: End of run verification of master page" -set r [$master_page is_setto MASTER$id] -error_check_good page_check $r 1 -set r [$master_page put -dirty] -error_check_good page_put $r 0 - -set i [expr ($id + 1) % $maxprocs] -set mpf [lindex $mpools 0] - -while { $i != $id } { - set p [$mpf get -create $i] - error_check_good mp_get [is_valid_page $p $mpf] TRUE - - if { [$p is_setto MASTER$i] != 1 } { - puts "Warning: Master page $i not set." - } - error_check_good page_put:$p [$p put] 0 - - set i [expr ($i + 1) % $maxprocs] -} - -# Close files -foreach i $mpools { - set r [$i close] - error_check_good mpf_close $r 0 -} - -# Close environment system -set r [$e close] -error_check_good env_close $r 0 - -puts "[timestamp] $id Complete" -flush stdout diff --git a/storage/bdb/test/mutex001.tcl b/storage/bdb/test/mutex001.tcl deleted file mode 100644 index 66d14c41c95..00000000000 --- a/storage/bdb/test/mutex001.tcl +++ /dev/null @@ -1,51 +0,0 @@ -# See the file LICENSE for redistribution information. -# -# Copyright (c) 1996-2004 -# Sleepycat Software. All rights reserved. -# -# $Id: mutex001.tcl,v 11.25 2004/01/28 03:36:28 bostic Exp $ -# - -# TEST mutex001 -# TEST Test basic mutex functionality -proc mutex001 { } { - source ./include.tcl - - puts "Mutex001: Basic functionality" - env_cleanup $testdir - set nlocks 20 - - # Test open w/out create; should fail - error_check_bad \ - env_open [catch {berkdb_env -lock -home $testdir} env] 0 - - puts "\tMutex001.a: Create lock env" - # Now open for real - set env [berkdb_env -create -mode 0644 -lock -home $testdir] - error_check_good env_open [is_valid_env $env] TRUE - - puts "\tMutex001.b: Create $nlocks mutexes" - set m [$env mutex 0644 $nlocks] - error_check_good mutex_init [is_valid_mutex $m $env] TRUE - - # Get, set each mutex; sleep, then get Release - puts "\tMutex001.c: Get/set loop" - for { set i 0 } { $i < $nlocks } { incr i } { - set r [$m get $i ] - error_check_good mutex_get $r 0 - - set r [$m setval $i $i] - error_check_good mutex_setval $r 0 - } - tclsleep 5 - for { set i 0 } { $i < $nlocks } { incr i } { - set r [$m getval $i] - error_check_good mutex_getval $r $i - - set r [$m release $i ] - error_check_good mutex_get $r 0 - } - - error_check_good mutex_close [$m close] 0 - error_check_good env_close [$env close] 0 -} diff --git a/storage/bdb/test/mutex002.tcl b/storage/bdb/test/mutex002.tcl deleted file mode 100644 index f03a63ac139..00000000000 --- a/storage/bdb/test/mutex002.tcl +++ /dev/null @@ -1,94 +0,0 @@ -# See the file LICENSE for redistribution information. -# -# Copyright (c) 1996-2004 -# Sleepycat Software. All rights reserved. -# -# $Id: mutex002.tcl,v 11.25 2004/01/28 03:36:28 bostic Exp $ -# - -# TEST mutex002 -# TEST Test basic mutex synchronization -proc mutex002 { } { - source ./include.tcl - - puts "Mutex002: Basic synchronization" - env_cleanup $testdir - set nlocks 20 - - # Fork off child before we open any files. - set f1 [open |$tclsh_path r+] - puts $f1 "source $test_path/test.tcl" - flush $f1 - - # Open the environment and the mutex locally - puts "\tMutex002.a: Open local and remote env" - set local_env [berkdb_env -create -mode 0644 -lock -home $testdir] - error_check_good env_open [is_valid_env $local_env] TRUE - - set local_mutex [$local_env mutex 0644 $nlocks] - error_check_good \ - mutex_init [is_valid_mutex $local_mutex $local_env] TRUE - - # Open the environment and the mutex remotely - set remote_env [send_cmd $f1 "berkdb_env -lock -home $testdir"] - error_check_good remote:env_open [is_valid_env $remote_env] TRUE - - set remote_mutex [send_cmd $f1 "$remote_env mutex 0644 $nlocks"] - error_check_good \ - mutex_init [is_valid_mutex $remote_mutex $remote_env] TRUE - - # Do a get here, then set the value to be pid. - # On the remote side fire off a get and getval. - puts "\tMutex002.b: Local and remote get/set" - set r [$local_mutex get 1] - error_check_good lock_get $r 0 - - set r [$local_mutex setval 1 [pid]] - error_check_good lock_get $r 0 - - # Now have the remote side request the lock and check its - # value. Then wait 5 seconds, release the mutex and see - # what the remote side returned. - send_timed_cmd $f1 1 "$remote_mutex get 1" - send_timed_cmd $f1 1 "set ret \[$remote_mutex getval 1\]" - - # Now sleep before resetting and releasing lock - tclsleep 5 - set newv [expr [pid] - 1] - set r [$local_mutex setval 1 $newv] - error_check_good mutex_setval $r 0 - - set r [$local_mutex release 1] - error_check_good mutex_release $r 0 - - # Now get the result from the other script - # Timestamp - set result [rcv_result $f1] - error_check_good lock_get:remote_time [expr $result > 4] 1 - - # Timestamp - set result [rcv_result $f1] - - # Mutex value - set result [send_cmd $f1 "puts \$ret"] - error_check_good lock_get:remote_getval $result $newv - - # Close down the remote - puts "\tMutex002.c: Close remote" - set ret [send_cmd $f1 "$remote_mutex close" 5] - # Not sure why we need this, but we do... an extra blank line - # someone gets output somewhere - gets $f1 ret - error_check_good remote:mutex_close $ret 0 - - set ret [send_cmd $f1 "$remote_env close"] - error_check_good remote:env_close $ret 0 - - catch { close $f1 } result - - set ret [$local_mutex close] - error_check_good local:mutex_close $ret 0 - - set ret [$local_env close] - error_check_good local:env_close $ret 0 -} diff --git a/storage/bdb/test/mutex003.tcl b/storage/bdb/test/mutex003.tcl deleted file mode 100644 index 7efd0883e0e..00000000000 --- a/storage/bdb/test/mutex003.tcl +++ /dev/null @@ -1,52 +0,0 @@ -# See the file LICENSE for redistribution information. -# -# Copyright (c) 1996-2004 -# Sleepycat Software. All rights reserved. -# -# $Id: mutex003.tcl,v 11.26 2004/01/28 03:36:28 bostic Exp $ -# - -# TEST mutex003 -# TEST Generate a bunch of parallel testers that try to randomly obtain locks. -proc mutex003 { } { - source ./include.tcl - - set nmutex 20 - set iter 500 - set procs 5 - set mdegree 3 - set wait 2 - puts "Mutex003: Multi-process random mutex test" - - env_cleanup $testdir - - puts "\tMutex003.a: Create environment" - # Now open the region we'll use for multiprocess testing. - set env [berkdb_env -create -mode 0644 -lock -home $testdir] - error_check_good env_open [is_valid_env $env] TRUE - - set mutex [$env mutex 0644 $nmutex] - error_check_good mutex_init [is_valid_mutex $mutex $env] TRUE - - error_check_good mutex_close [$mutex close] 0 - - # Now spawn off processes - puts "\tMutex003.b: Create $procs processes" - set pidlist {} - for { set i 0 } {$i < $procs} {incr i} { - puts "$tclsh_path\ - $test_path/mutexscript.tcl $testdir\ - $iter $nmutex $wait $mdegree > $testdir/$i.mutexout &" - set p [exec $tclsh_path $test_path/wrap.tcl \ - mutexscript.tcl $testdir/$i.mutexout $testdir\ - $iter $nmutex $wait $mdegree &] - lappend pidlist $p - } - puts "\tMutex003.c: $procs independent processes now running" - watch_procs $pidlist - error_check_good env_close [$env close] 0 - # Remove output files - for { set i 0 } {$i < $procs} {incr i} { - fileremove -f $testdir/$i.mutexout - } -} diff --git a/storage/bdb/test/mutexscript.tcl b/storage/bdb/test/mutexscript.tcl deleted file mode 100644 index 7bf1bbe9552..00000000000 --- a/storage/bdb/test/mutexscript.tcl +++ /dev/null @@ -1,91 +0,0 @@ -# See the file LICENSE for redistribution information. -# -# Copyright (c) 1996-2004 -# Sleepycat Software. All rights reserved. -# -# $Id: mutexscript.tcl,v 11.18 2004/01/28 03:36:28 bostic Exp $ -# -# Random mutex tester. -# Usage: mutexscript dir numiters mlocks sleepint degree -# dir: dir in which all the mutexes live. -# numiters: Total number of iterations. -# nmutex: Total number of mutexes. -# sleepint: Maximum sleep interval. -# degree: Maximum number of locks to acquire at once - -source ./include.tcl -source $test_path/test.tcl -source $test_path/testutils.tcl - -set usage "mutexscript dir numiters nmutex sleepint degree" - -# Verify usage -if { $argc != 5 } { - puts stderr "FAIL:[timestamp] Usage: $usage" - exit -} - -# Initialize arguments -set dir [lindex $argv 0] -set numiters [ lindex $argv 1 ] -set nmutex [ lindex $argv 2 ] -set sleepint [ lindex $argv 3 ] -set degree [ lindex $argv 4 ] -set locker [pid] -set mypid [sanitized_pid] - -# Initialize seed -global rand_init -berkdb srand $rand_init - -puts -nonewline "Mutexscript: Beginning execution for $locker:" -puts " $numiters $nmutex $sleepint $degree" -flush stdout - -# Open the environment and the mutex -set e [berkdb_env -create -mode 0644 -lock -home $dir] -error_check_good evn_open [is_valid_env $e] TRUE - -set mutex [$e mutex 0644 $nmutex] -error_check_good mutex_init [is_valid_mutex $mutex $e] TRUE - -# Sleep for awhile to make sure that everyone has gotten in -tclsleep 5 - -for { set iter 0 } { $iter < $numiters } { incr iter } { - set nlocks [berkdb random_int 1 $degree] - # We will always lock objects in ascending order to avoid - # deadlocks. - set lastobj 1 - set mlist {} - for { set lnum 0 } { $lnum < $nlocks } { incr lnum } { - # Pick lock parameters - set obj [berkdb random_int $lastobj [expr $nmutex - 1]] - set lastobj [expr $obj + 1] - puts "[timestamp] $locker $lnum: $obj" - - # Do get, set its val to own pid, and then add to list - error_check_good mutex_get:$obj [$mutex get $obj] 0 - error_check_good mutex_setval:$obj [$mutex setval $obj $mypid] 0 - lappend mlist $obj - if {$lastobj >= $nmutex} { - break - } - } - - # Sleep for 10 to (100*$sleepint) ms. - after [berkdb random_int 10 [expr $sleepint * 100]] - - # Now release locks - foreach i $mlist { - error_check_good mutex_getval:$i [$mutex getval $i] $mypid - error_check_good mutex_setval:$i \ - [$mutex setval $i [expr 0 - $mypid]] 0 - error_check_good mutex_release:$i [$mutex release $i] 0 - } - puts "[timestamp] $locker released mutexes" - flush stdout -} - -puts "[timestamp] $locker Complete" -flush stdout diff --git a/storage/bdb/test/ndbm.tcl b/storage/bdb/test/ndbm.tcl deleted file mode 100644 index 00ee1f4fdbf..00000000000 --- a/storage/bdb/test/ndbm.tcl +++ /dev/null @@ -1,144 +0,0 @@ -# See the file LICENSE for redistribution information. -# -# Copyright (c) 1996-2004 -# Sleepycat Software. All rights reserved. -# -# $Id: ndbm.tcl,v 11.18 2004/01/28 03:36:28 bostic Exp $ -# -# Historic NDBM interface test. -# Use the first 1000 entries from the dictionary. -# Insert each with self as key and data; retrieve each. -# After all are entered, retrieve all; compare output to original. -# Then reopen the file, re-retrieve everything. -# Finally, delete everything. -proc ndbm { { nentries 1000 } } { - source ./include.tcl - - puts "NDBM interfaces test: $nentries" - - # Create the database and open the dictionary - set testfile $testdir/ndbmtest - set t1 $testdir/t1 - set t2 $testdir/t2 - set t3 $testdir/t3 - cleanup $testdir NULL - - set db [berkdb ndbm_open -create -truncate -mode 0644 $testfile] - error_check_good ndbm_open [is_substr $db ndbm] 1 - set did [open $dict] - - error_check_good rdonly_false [$db rdonly] 0 - - set flags 0 - set txn 0 - set count 0 - set skippednullkey 0 - - puts "\tNDBM.a: put/get loop" - # Here is the loop where we put and get each key/data pair - while { [gets $did str] != -1 && $count < $nentries } { - # NDBM can't handle zero-length keys - if { [string length $str] == 0 } { - set skippednullkey 1 - continue - } - - set ret [$db store $str $str insert] - error_check_good ndbm_store $ret 0 - - set d [$db fetch $str] - error_check_good ndbm_fetch $d $str - incr count - } - close $did - - # Now we will get each key from the DB and compare the results - # to the original. - puts "\tNDBM.b: dump file" - set oid [open $t1 w] - for { set key [$db firstkey] } { $key != -1 } { - set key [$db nextkey] } { - puts $oid $key - set d [$db fetch $key] - error_check_good ndbm_refetch $d $key - } - - # If we had to skip a zero-length key, juggle things to cover up - # this fact in the dump. - if { $skippednullkey == 1 } { - puts $oid "" - incr nentries 1 - } - close $oid - - # Now compare the keys to see if they match the dictionary (or ints) - set q q - filehead $nentries $dict $t3 - filesort $t3 $t2 - filesort $t1 $t3 - - error_check_good NDBM:diff($t3,$t2) \ - [filecmp $t3 $t2] 0 - - # File descriptors tests won't work under Windows. - if { $is_windows_test != 1 } { - puts "\tNDBM.c: pagf/dirf test" - set fd [$db pagfno] - error_check_bad pagf $fd -1 - set fd [$db dirfno] - error_check_bad dirf $fd -1 - } - - puts "\tNDBM.d: close, open, and dump file" - - # Now, reopen the file and run the last test again. - error_check_good ndbm_close [$db close] 0 - set db [berkdb ndbm_open -rdonly $testfile] - error_check_good ndbm_open2 [is_substr $db ndbm] 1 - set oid [open $t1 w] - - error_check_good rdonly_true [$db rdonly] "rdonly:not owner" - - for { set key [$db firstkey] } { $key != -1 } { - set key [$db nextkey] } { - puts $oid $key - set d [$db fetch $key] - error_check_good ndbm_refetch2 $d $key - } - if { $skippednullkey == 1 } { - puts $oid "" - } - close $oid - - # Now compare the keys to see if they match the dictionary (or ints) - filesort $t1 $t3 - - error_check_good NDBM:diff($t2,$t3) \ - [filecmp $t2 $t3] 0 - - # Now, reopen the file and delete each entry - puts "\tNDBM.e: sequential scan and delete" - - error_check_good ndbm_close [$db close] 0 - set db [berkdb ndbm_open $testfile] - error_check_good ndbm_open3 [is_substr $db ndbm] 1 - set oid [open $t1 w] - - for { set key [$db firstkey] } { $key != -1 } { - set key [$db nextkey] } { - puts $oid $key - set ret [$db delete $key] - error_check_good ndbm_delete $ret 0 - } - if { $skippednullkey == 1 } { - puts $oid "" - } - close $oid - - # Now compare the keys to see if they match the dictionary (or ints) - filesort $t1 $t3 - - error_check_good NDBM:diff($t2,$t3) \ - [filecmp $t2 $t3] 0 - error_check_good ndbm_close [$db close] 0 -} diff --git a/storage/bdb/test/parallel.tcl b/storage/bdb/test/parallel.tcl deleted file mode 100644 index bd1f468fa5d..00000000000 --- a/storage/bdb/test/parallel.tcl +++ /dev/null @@ -1,383 +0,0 @@ -# Code to load up the tests in to the Queue database -# $Id: parallel.tcl,v 11.46 2004/09/22 18:01:05 bostic Exp $ -proc load_queue { file {dbdir RUNQUEUE} nitems } { - global serial_tests - global num_serial - global num_parallel - - puts -nonewline "Loading run queue with $nitems items..." - flush stdout - - set env [berkdb_env -create -lock -home $dbdir] - error_check_good dbenv [is_valid_env $env] TRUE - - # Open two databases, one for tests that may be run - # in parallel, the other for tests we want to run - # while only a single process is testing. - set db [eval {berkdb_open -env $env -create \ - -mode 0644 -len 200 -queue queue.db} ] - error_check_good dbopen [is_valid_db $db] TRUE - set serialdb [eval {berkdb_open -env $env -create \ - -mode 0644 -len 200 -queue serialqueue.db} ] - error_check_good dbopen [is_valid_db $serialdb] TRUE - - set fid [open $file] - - set count 0 - - while { [gets $fid str] != -1 } { - set testarr($count) $str - incr count - } - - # Randomize array of tests. - set rseed [pid] - berkdb srand $rseed - puts -nonewline "randomizing..." - flush stdout - for { set i 0 } { $i < $count } { incr i } { - set tmp $testarr($i) - - # RPC test is very long so force it to run first - # in full runs. If we find 'r rpc' as we walk the - # array, arrange to put it in slot 0 ... - if { [is_substr $tmp "r rpc"] == 1 && \ - [string match $nitems ALL] } { - set j 0 - } else { - set j [berkdb random_int $i [expr $count - 1]] - } - # ... and if 'r rpc' is selected to be swapped with the - # current item in the array, skip the swap. If we - # did the swap and moved to the next item, "r rpc" would - # never get moved to slot 0. - if { [is_substr $testarr($j) "r rpc"] && \ - [string match $nitems ALL] } { - continue - } - - set testarr($i) $testarr($j) - set testarr($j) $tmp - } - - if { [string compare ALL $nitems] != 0 } { - set maxload $nitems - } else { - set maxload $count - } - - puts "loading..." - flush stdout - set num_serial 0 - set num_parallel 0 - for { set i 0 } { $i < $maxload } { incr i } { - set str $testarr($i) - # Push serial tests into serial testing db, others - # into parallel db. - if { [is_serial $str] } { - set ret [eval {$serialdb put -append $str}] - error_check_good put:serialdb [expr $ret > 0] 1 - incr num_serial - } else { - set ret [eval {$db put -append $str}] - error_check_good put:paralleldb [expr $ret > 0] 1 - incr num_parallel - } - } - - error_check_good maxload $maxload [expr $num_serial + $num_parallel] - puts "Loaded $maxload records: $num_serial in serial,\ - $num_parallel in parallel." - close $fid - $db close - $serialdb close - $env close -} - -proc init_runqueue { {dbdir RUNQUEUE} nitems list} { - - if { [file exists $dbdir] != 1 } { - file mkdir $dbdir - } - puts "Creating test list..." - $list ALL -n - load_queue ALL.OUT $dbdir $nitems - file delete TEST.LIST - file rename ALL.OUT TEST.LIST -} - -proc run_parallel { nprocs {list run_all} {nitems ALL} } { - global num_serial - global num_parallel - - # Forcibly remove stuff from prior runs, if it's still there. - fileremove -f ./RUNQUEUE - set dirs [glob -nocomplain ./PARALLEL_TESTDIR.*] - set files [glob -nocomplain ALL.OUT.*] - foreach file $files { - fileremove -f $file - } - foreach dir $dirs { - fileremove -f $dir - } - - set basename ./PARALLEL_TESTDIR - set queuedir ./RUNQUEUE - source ./include.tcl - - mkparalleldirs $nprocs $basename $queuedir - - init_runqueue $queuedir $nitems $list - - set basedir [pwd] - set queuedir ../../[string range $basedir \ - [string last "/" $basedir] end]/$queuedir - - # Run serial tests in parallel testdir 0. - run_queue 0 $basename.0 $queuedir serial $num_serial - - set pidlist {} - # Run parallel tests in testdirs 1 through n. - for { set i 1 } { $i <= $nprocs } { incr i } { - set ret [catch { - set p [exec $tclsh_path << \ - "source $test_path/test.tcl; run_queue $i \ - $basename.$i $queuedir parallel $num_parallel" &] - lappend pidlist $p - set f [open $testdir/begin.$p w] - close $f - } res] - } - watch_procs $pidlist 300 360000 - - set failed 0 - for { set i 0 } { $i <= $nprocs } { incr i } { - if { [file exists ALL.OUT.$i] == 1 } { - puts -nonewline "Checking output from ALL.OUT.$i ... " - if { [check_output ALL.OUT.$i] == 1 } { - set failed 1 - } - puts " done." - } - } - if { $failed == 0 } { - puts "Regression tests succeeded." - } else { - puts "Regression tests failed." - puts "Review UNEXPECTED OUTPUT lines above for errors." - puts "Complete logs found in ALL.OUT.x files" - } -} - -proc run_queue { i rundir queuedir {qtype parallel} {nitems 0} } { - set builddir [pwd] - file delete $builddir/ALL.OUT.$i - cd $rundir - - puts "Starting $qtype run_queue process $i (pid [pid])." - - source ./include.tcl - global env - - set dbenv [berkdb_env -create -lock -home $queuedir] - error_check_good dbenv [is_valid_env $dbenv] TRUE - - if { $qtype == "parallel" } { - set db [eval {berkdb_open -env $dbenv \ - -mode 0644 -queue queue.db} ] - error_check_good dbopen [is_valid_db $db] TRUE - } elseif { $qtype == "serial" } { - set db [eval {berkdb_open -env $dbenv \ - -mode 0644 -queue serialqueue.db} ] - error_check_good serialdbopen [is_valid_db $db] TRUE - } else { - puts "FAIL: queue type $qtype not recognized" - } - - set dbc [eval $db cursor] - error_check_good cursor [is_valid_cursor $dbc $db] TRUE - - set count 0 - set waitcnt 0 - set starttime [timestamp -r] - - while { $waitcnt < 5 } { - set line [$db get -consume] - if { [ llength $line ] > 0 } { - set cmd [lindex [lindex $line 0] 1] - set num [lindex [lindex $line 0] 0] - set o [open $builddir/ALL.OUT.$i a] - puts $o "\nExecuting record $num ([timestamp -w]):\n" - set tdir "TESTDIR.$i" - regsub {TESTDIR} $cmd $tdir cmd - puts $o $cmd - close $o - if { [expr {$num % 10} == 0] && $nitems != 0 } { - puts -nonewline \ - "Starting test $num of $nitems $qtype items. " - set now [timestamp -r] - set elapsed [expr $now - $starttime] - set esttotal [expr $nitems * $elapsed / $num] - set remaining [expr $esttotal - $elapsed] - if { $remaining < 3600 } { - puts "\tRough guess: less than 1\ - hour left." - } else { - puts "\tRough guess: \ - [expr $remaining / 3600] hour(s) left." - } - } -# puts "Process $i, record $num:\n$cmd" - set env(PURIFYOPTIONS) \ - "-log-file=./test$num.%p -follow-child-processes -messages=first" - set env(PURECOVOPTIONS) \ - "-counts-file=./cov.pcv -log-file=./cov.log -follow-child-processes" - if [catch {exec $tclsh_path \ - << "source $test_path/test.tcl; $cmd" \ - >>& $builddir/ALL.OUT.$i } res] { - set o [open $builddir/ALL.OUT.$i a] - puts $o "FAIL: '$cmd': $res" - close $o - } - env_cleanup $testdir - set o [open $builddir/ALL.OUT.$i a] - puts $o "\nEnding record $num ([timestamp])\n" - close $o - incr count - } else { - incr waitcnt - tclsleep 1 - } - } - - set now [timestamp -r] - set elapsed [expr $now - $starttime] - puts "Process $i: $count commands executed in [format %02u:%02u \ - [expr $elapsed / 3600] [expr ($elapsed % 3600) / 60]]" - - error_check_good close_parallel_cursor_$i [$dbc close] 0 - error_check_good close_parallel_db_$i [$db close] 0 - error_check_good close_parallel_env_$i [$dbenv close] 0 - - # - # We need to put the pid file in the builddir's idea - # of testdir, not this child process' local testdir. - # Therefore source builddir's include.tcl to get its - # testdir. - # !!! This resets testdir, so don't do anything else - # local to the child after this. - source $builddir/include.tcl - - set f [open $builddir/$testdir/end.[pid] w] - close $f - cd $builddir -} - -proc mkparalleldirs { nprocs basename queuedir } { - source ./include.tcl - set dir [pwd] - - if { $is_windows_test != 1 } { - set EXE "" - } else { - set EXE ".exe" - } - for { set i 0 } { $i <= $nprocs } { incr i } { - set destdir $basename.$i - catch {file mkdir $destdir} - puts "Created $destdir" - if { $is_windows_test == 1 } { - catch {file mkdir $destdir/Debug} - catch {eval file copy \ - [eval glob {$dir/Debug/*.dll}] $destdir/Debug} - } - catch {eval file copy \ - [eval glob {$dir/{.libs,include.tcl}}] $destdir} - # catch {eval file copy $dir/$queuedir $destdir} - catch {eval file copy \ - [eval glob {$dir/db_{checkpoint,deadlock}$EXE} \ - {$dir/db_{dump,load,printlog,recover,stat,upgrade}$EXE} \ - {$dir/db_{archive,verify}$EXE}] \ - $destdir} - - # Create modified copies of include.tcl in parallel - # directories so paths still work. - - set infile [open ./include.tcl r] - set d [read $infile] - close $infile - - regsub {test_path } $d {test_path ../} d - regsub {src_root } $d {src_root ../} d - set tdir "TESTDIR.$i" - regsub -all {TESTDIR} $d $tdir d - regsub {KILL \.} $d {KILL ..} d - set outfile [open $destdir/include.tcl w] - puts $outfile $d - close $outfile - - global svc_list - foreach svc_exe $svc_list { - if { [file exists $dir/$svc_exe] } { - catch {eval file copy $dir/$svc_exe $destdir} - } - } - } -} - -proc run_ptest { nprocs test args } { - global parms - set basename ./PARALLEL_TESTDIR - set queuedir NULL - source ./include.tcl - - mkparalleldirs $nprocs $basename $queuedir - - if { [info exists parms($test)] } { - foreach method \ - "hash queue queueext recno rbtree frecno rrecno btree" { - if { [eval exec_ptest $nprocs $basename \ - $test $method $args] != 0 } { - break - } - } - } else { - eval exec_ptest $nprocs $basename $test $args - } -} - -proc exec_ptest { nprocs basename test args } { - source ./include.tcl - - set basedir [pwd] - set pidlist {} - puts "Running $nprocs parallel runs of $test" - for { set i 1 } { $i <= $nprocs } { incr i } { - set outf ALL.OUT.$i - fileremove -f $outf - set ret [catch { - set p [exec $tclsh_path << \ - "cd $basename.$i;\ - source ../$test_path/test.tcl;\ - $test $args" >& $outf &] - lappend pidlist $p - set f [open $testdir/begin.$p w] - close $f - } res] - } - watch_procs $pidlist 30 36000 - set failed 0 - for { set i 1 } { $i <= $nprocs } { incr i } { - if { [check_output ALL.OUT.$i] == 1 } { - set failed 1 - puts "Test $test failed in process $i." - } - } - if { $failed == 0 } { - puts "Test $test succeeded all processes" - return 0 - } else { - puts "Test failed: stopping" - return 1 - } -} diff --git a/storage/bdb/test/recd001.tcl b/storage/bdb/test/recd001.tcl deleted file mode 100644 index 67ad8004cc2..00000000000 --- a/storage/bdb/test/recd001.tcl +++ /dev/null @@ -1,242 +0,0 @@ -# See the file LICENSE for redistribution information. -# -# Copyright (c) 1996-2004 -# Sleepycat Software. All rights reserved. -# -# $Id: recd001.tcl,v 11.43 2004/01/28 03:36:28 bostic Exp $ -# -# TEST recd001 -# TEST Per-operation recovery tests for non-duplicate, non-split -# TEST messages. Makes sure that we exercise redo, undo, and do-nothing -# TEST condition. Any test that appears with the message (change state) -# TEST indicates that we've already run the particular test, but we are -# TEST running it again so that we can change the state of the data base -# TEST to prepare for the next test (this applies to all other recovery -# TEST tests as well). -# TEST -# TEST These are the most basic recovery tests. We do individual recovery -# TEST tests for each operation in the access method interface. First we -# TEST create a file and capture the state of the database (i.e., we copy -# TEST it. Then we run a transaction containing a single operation. In -# TEST one test, we abort the transaction and compare the outcome to the -# TEST original copy of the file. In the second test, we restore the -# TEST original copy of the database and then run recovery and compare -# TEST this against the actual database. -proc recd001 { method {select 0} args} { - global fixed_len - source ./include.tcl - - set orig_fixed_len $fixed_len - set opts [convert_args $method $args] - set omethod [convert_method $method] - - puts "Recd001: $method operation/transaction tests" - - # Create the database and environment. - env_cleanup $testdir - - # The recovery tests were originally written to - # do a command, abort, do it again, commit, and then - # repeat the sequence with another command. Each command - # tends to require that the previous command succeeded and - # left the database a certain way. To avoid cluttering up the - # op_recover interface as well as the test code, we create two - # databases; one does abort and then commit for each op, the - # other does prepare, prepare-abort, and prepare-commit for each - # op. If all goes well, this allows each command to depend - # exactly one successful iteration of the previous command. - set testfile recd001.db - set testfile2 recd001-2.db - - set flags "-create -txn -home $testdir" - - puts "\tRecd001.a.0: creating environment" - set env_cmd "berkdb_env $flags" - set dbenv [eval $env_cmd] - error_check_good dbenv [is_valid_env $dbenv] TRUE - - # - # We need to create a database to get the pagesize (either - # the default or whatever might have been specified). - # Then remove it so we can compute fixed_len and create the - # real database. - set oflags "-create $omethod -mode 0644 \ - -env $dbenv $opts $testfile" - set db [eval {berkdb_open} $oflags] - error_check_good db_open [is_valid_db $db] TRUE - set stat [$db stat] - # - # Compute the fixed_len based on the pagesize being used. - # We want the fixed_len to be 1/4 the pagesize. - # - set pg [get_pagesize $stat] - error_check_bad get_pagesize $pg -1 - set fixed_len [expr $pg / 4] - error_check_good db_close [$db close] 0 - error_check_good dbremove [berkdb dbremove -env $dbenv $testfile] 0 - - # Convert the args again because fixed_len is now real. - # Create the databases and close the environment. - # cannot specify db truncate in txn protected env!!! - set opts [convert_args $method ""] - set omethod [convert_method $method] - set oflags "-create $omethod -mode 0644 \ - -env $dbenv $opts $testfile" - set db [eval {berkdb_open} $oflags] - error_check_good db_open [is_valid_db $db] TRUE - error_check_good db_close [$db close] 0 - - set oflags "-create $omethod -mode 0644 \ - -env $dbenv $opts $testfile2" - set db [eval {berkdb_open} $oflags] - error_check_good db_open [is_valid_db $db] TRUE - error_check_good db_close [$db close] 0 - - error_check_good env_close [$dbenv close] 0 - - puts "\tRecd001.a.1: Verify db_printlog can read logfile" - set tmpfile $testdir/printlog.out - set stat [catch {exec $util_path/db_printlog -h $testdir \ - > $tmpfile} ret] - error_check_good db_printlog $stat 0 - fileremove $tmpfile - - # List of recovery tests: {CMD MSG} pairs. - set rlist { - { {DB put -txn TXNID $key $data} "Recd001.b: put"} - { {DB del -txn TXNID $key} "Recd001.c: delete"} - { {DB put -txn TXNID $bigkey $data} "Recd001.d: big key put"} - { {DB del -txn TXNID $bigkey} "Recd001.e: big key delete"} - { {DB put -txn TXNID $key $bigdata} "Recd001.f: big data put"} - { {DB del -txn TXNID $key} "Recd001.g: big data delete"} - { {DB put -txn TXNID $key $data} "Recd001.h: put (change state)"} - { {DB put -txn TXNID $key $newdata} "Recd001.i: overwrite"} - { {DB put -txn TXNID -partial "$off $len" $key $partial_grow} - "Recd001.j: partial put growing"} - { {DB put -txn TXNID $key $newdata} "Recd001.k: overwrite (fix)"} - { {DB put -txn TXNID -partial "$off $len" $key $partial_shrink} - "Recd001.l: partial put shrinking"} - { {DB put -txn TXNID -append $data} "Recd001.m: put -append"} - { {DB get -txn TXNID -consume} "Recd001.n: db get -consume"} - } - - # These are all the data values that we're going to need to read - # through the operation table and run the recovery tests. - - if { [is_record_based $method] == 1 } { - set key 1 - } else { - set key recd001_key - } - set data recd001_data - set newdata NEWrecd001_dataNEW - set off 3 - set len 12 - - set partial_grow replacement_record_grow - set partial_shrink xxx - if { [is_fixed_length $method] == 1 } { - set len [string length $partial_grow] - set partial_shrink $partial_grow - } - set bigdata [replicate $key $fixed_len] - if { [is_record_based $method] == 1 } { - set bigkey $fixed_len - } else { - set bigkey [replicate $key $fixed_len] - } - - foreach pair $rlist { - set cmd [subst [lindex $pair 0]] - set msg [lindex $pair 1] - if { $select != 0 } { - set tag [lindex $msg 0] - set tail [expr [string length $tag] - 2] - set tag [string range $tag $tail $tail] - if { [lsearch $select $tag] == -1 } { - continue - } - } - - if { [is_queue $method] != 1 } { - if { [string first append $cmd] != -1 } { - continue - } - if { [string first consume $cmd] != -1 } { - continue - } - } - -# if { [is_fixed_length $method] == 1 } { -# if { [string first partial $cmd] != -1 } { -# continue -# } -# } - op_recover abort $testdir $env_cmd $testfile $cmd $msg - op_recover commit $testdir $env_cmd $testfile $cmd $msg - # - # Note that since prepare-discard ultimately aborts - # the txn, it must come before prepare-commit. - # - op_recover prepare-abort $testdir $env_cmd $testfile2 \ - $cmd $msg - op_recover prepare-discard $testdir $env_cmd $testfile2 \ - $cmd $msg - op_recover prepare-commit $testdir $env_cmd $testfile2 \ - $cmd $msg - } - set fixed_len $orig_fixed_len - - if { [is_fixed_length $method] == 1 } { - puts "Skipping remainder of test for fixed length methods" - return - } - - # - # Check partial extensions. If we add a key/data to the database - # and then expand it using -partial, then recover, recovery was - # failing in #3944. Check that scenario here. - # - # !!! - # We loop here because on each iteration, we need to clean up - # the old env (i.e. this test does not depend on earlier runs). - # If we run it without cleaning up the env inbetween, we do not - # test the scenario of #3944. - # - set len [string length $data] - set len2 256 - set part_data [replicate "abcdefgh" 32] - set p [list 0 $len] - set cmd [subst \ - {DB put -txn TXNID -partial "$len $len2" $key $part_data}] - set msg "Recd001.o: partial put prepopulated/expanding" - foreach op {abort commit prepare-abort prepare-discard prepare-commit} { - env_cleanup $testdir - - set dbenv [eval $env_cmd] - error_check_good dbenv [is_valid_env $dbenv] TRUE - set t [$dbenv txn] - error_check_good txn_begin [is_valid_txn $t $dbenv] TRUE - set oflags "-create $omethod -mode 0644 \ - -env $dbenv -txn $t $opts $testfile" - set db [eval {berkdb_open} $oflags] - error_check_good db_open [is_valid_db $db] TRUE - set oflags "-create $omethod -mode 0644 \ - -env $dbenv -txn $t $opts $testfile2" - set db2 [eval {berkdb_open} $oflags] - error_check_good db_open [is_valid_db $db2] TRUE - - set ret [$db put -txn $t -partial $p $key $data] - error_check_good dbput $ret 0 - - set ret [$db2 put -txn $t -partial $p $key $data] - error_check_good dbput $ret 0 - error_check_good txncommit [$t commit] 0 - error_check_good dbclose [$db close] 0 - error_check_good dbclose [$db2 close] 0 - error_check_good dbenvclose [$dbenv close] 0 - - op_recover $op $testdir $env_cmd $testfile $cmd $msg - } - return -} diff --git a/storage/bdb/test/recd002.tcl b/storage/bdb/test/recd002.tcl deleted file mode 100644 index 6189c5e2304..00000000000 --- a/storage/bdb/test/recd002.tcl +++ /dev/null @@ -1,103 +0,0 @@ -# See the file LICENSE for redistribution information. -# -# Copyright (c) 1996-2004 -# Sleepycat Software. All rights reserved. -# -# $Id: recd002.tcl,v 11.32 2004/01/28 03:36:28 bostic Exp $ -# -# TEST recd002 -# TEST Split recovery tests. For every known split log message, makes sure -# TEST that we exercise redo, undo, and do-nothing condition. -proc recd002 { method {select 0} args} { - source ./include.tcl - global rand_init - - set args [convert_args $method $args] - set omethod [convert_method $method] - - set pgindex [lsearch -exact $args "-pagesize"] - if { $pgindex != -1 } { - puts "Recd002: skipping for specific pagesizes" - return - } - berkdb srand $rand_init - - # Queues don't do splits, so we don't really need the small page - # size and the small page size is smaller than the record, so it's - # a problem. - if { [string compare $omethod "-queue"] == 0 } { - set pagesize 4096 - } else { - set pagesize 512 - } - puts "Recd002: $method split recovery tests" - - env_cleanup $testdir - set testfile recd002.db - set testfile2 recd002-2.db - set eflags \ - "-create -txn -lock_max 2000 -home $testdir" - - puts "\tRecd002.a: creating environment" - set env_cmd "berkdb_env $eflags" - set dbenv [eval $env_cmd] - error_check_bad dbenv $dbenv NULL - - # Create the databases. We will use a small page size so that splits - # happen fairly quickly. - set oflags "-create $args $omethod -mode 0644 -env $dbenv\ - -pagesize $pagesize $testfile" - set db [eval {berkdb_open} $oflags] - error_check_bad db_open $db NULL - error_check_good db_open [is_substr $db db] 1 - error_check_good db_close [$db close] 0 - set oflags "-create $args $omethod -mode 0644 -env $dbenv\ - -pagesize $pagesize $testfile2" - set db [eval {berkdb_open} $oflags] - error_check_bad db_open $db NULL - error_check_good db_open [is_substr $db db] 1 - error_check_good db_close [$db close] 0 - reset_env $dbenv - - # List of recovery tests: {CMD MSG} pairs - set slist { - { {populate DB $omethod TXNID $n 0 0} "Recd002.b: splits"} - { {unpopulate DB TXNID $r} "Recd002.c: Remove keys"} - } - - # If pages are 512 bytes, then adding 512 key/data pairs - # should be more than sufficient. - set n 512 - set r [expr $n / 2 ] - foreach pair $slist { - set cmd [subst [lindex $pair 0]] - set msg [lindex $pair 1] - if { $select != 0 } { - set tag [lindex $msg 0] - set tail [expr [string length $tag] - 2] - set tag [string range $tag $tail $tail] - if { [lsearch $select $tag] == -1 } { - continue - } - } - op_recover abort $testdir $env_cmd $testfile $cmd $msg - op_recover commit $testdir $env_cmd $testfile $cmd $msg - # - # Note that since prepare-discard ultimately aborts - # the txn, it must come before prepare-commit. - # - op_recover prepare-abort $testdir $env_cmd $testfile2 \ - $cmd $msg - op_recover prepare-discard $testdir $env_cmd $testfile2 \ - $cmd $msg - op_recover prepare-commit $testdir $env_cmd $testfile2 \ - $cmd $msg - } - - puts "\tRecd002.d: Verify db_printlog can read logfile" - set tmpfile $testdir/printlog.out - set stat [catch {exec $util_path/db_printlog -h $testdir \ - > $tmpfile} ret] - error_check_good db_printlog $stat 0 - fileremove $tmpfile -} diff --git a/storage/bdb/test/recd003.tcl b/storage/bdb/test/recd003.tcl deleted file mode 100644 index b6e799b3c18..00000000000 --- a/storage/bdb/test/recd003.tcl +++ /dev/null @@ -1,119 +0,0 @@ -# See the file LICENSE for redistribution information. -# -# Copyright (c) 1996-2004 -# Sleepycat Software. All rights reserved. -# -# $Id: recd003.tcl,v 11.32 2004/01/28 03:36:28 bostic Exp $ -# -# TEST recd003 -# TEST Duplicate recovery tests. For every known duplicate log message, -# TEST makes sure that we exercise redo, undo, and do-nothing condition. -# TEST -# TEST Test all the duplicate log messages and recovery operations. We make -# TEST sure that we exercise all possible recovery actions: redo, undo, undo -# TEST but no fix necessary and redo but no fix necessary. -proc recd003 { method {select 0} args } { - source ./include.tcl - global rand_init - - set largs [convert_args $method $args] - set omethod [convert_method $method] - - if { [is_record_based $method] == 1 || [is_rbtree $method] == 1 } { - puts "Recd003 skipping for method $method" - return - } - puts "Recd003: $method duplicate recovery tests" - - berkdb srand $rand_init - - env_cleanup $testdir - # See comment in recd001.tcl for why there are two database files... - set testfile recd003.db - set testfile2 recd003-2.db - set eflags "-create -txn -home $testdir" - - puts "\tRecd003.a: creating environment" - set env_cmd "berkdb_env $eflags" - set dbenv [eval $env_cmd] - error_check_bad dbenv $dbenv NULL - - # Create the databases. - set oflags \ - "-create $largs -mode 0644 $omethod -dup -env $dbenv $testfile" - set db [eval {berkdb_open} $oflags] - error_check_bad db_open $db NULL - error_check_good db_open [is_substr $db db] 1 - error_check_good db_close [$db close] 0 - set oflags \ - "-create $largs -mode 0644 $omethod -dup -env $dbenv $testfile2" - set db [eval {berkdb_open} $oflags] - error_check_bad db_open $db NULL - error_check_good db_open [is_substr $db db] 1 - error_check_good db_close [$db close] 0 - reset_env $dbenv - - # These are all the data values that we're going to need to read - # through the operation table and run the recovery tests. - set n 10 - set dupn 2000 - set bign 500 - - # List of recovery tests: {CMD MSG} pairs - set dlist { - { {populate DB $omethod TXNID $n 1 0} - "Recd003.b: add dups"} - { {DB del -txn TXNID duplicate_key} - "Recd003.c: remove dups all at once"} - { {populate DB $omethod TXNID $n 1 0} - "Recd003.d: add dups (change state)"} - { {unpopulate DB TXNID 0} - "Recd003.e: remove dups 1 at a time"} - { {populate DB $omethod TXNID $dupn 1 0} - "Recd003.f: dup split"} - { {DB del -txn TXNID duplicate_key} - "Recd003.g: remove dups (change state)"} - { {populate DB $omethod TXNID $n 1 1} - "Recd003.h: add big dup"} - { {DB del -txn TXNID duplicate_key} - "Recd003.i: remove big dup all at once"} - { {populate DB $omethod TXNID $n 1 1} - "Recd003.j: add big dup (change state)"} - { {unpopulate DB TXNID 0} - "Recd003.k: remove big dup 1 at a time"} - { {populate DB $omethod TXNID $bign 1 1} - "Recd003.l: split big dup"} - } - - foreach pair $dlist { - set cmd [subst [lindex $pair 0]] - set msg [lindex $pair 1] - if { $select != 0 } { - set tag [lindex $msg 0] - set tail [expr [string length $tag] - 2] - set tag [string range $tag $tail $tail] - if { [lsearch $select $tag] == -1 } { - continue - } - } - op_recover abort $testdir $env_cmd $testfile $cmd $msg - op_recover commit $testdir $env_cmd $testfile $cmd $msg - # - # Note that since prepare-discard ultimately aborts - # the txn, it must come before prepare-commit. - # - op_recover prepare-abort $testdir $env_cmd $testfile2 \ - $cmd $msg - op_recover prepare-discard $testdir $env_cmd $testfile2 \ - $cmd $msg - op_recover prepare-commit $testdir $env_cmd $testfile2 \ - $cmd $msg - } - - puts "\tRecd003.m: Verify db_printlog can read logfile" - set tmpfile $testdir/printlog.out - set stat [catch {exec $util_path/db_printlog -h $testdir \ - > $tmpfile} ret] - error_check_good db_printlog $stat 0 - fileremove $tmpfile -} diff --git a/storage/bdb/test/recd004.tcl b/storage/bdb/test/recd004.tcl deleted file mode 100644 index 72e66b5e674..00000000000 --- a/storage/bdb/test/recd004.tcl +++ /dev/null @@ -1,95 +0,0 @@ -# See the file LICENSE for redistribution information. -# -# Copyright (c) 1996-2004 -# Sleepycat Software. All rights reserved. -# -# $Id: recd004.tcl,v 11.31 2004/01/28 03:36:28 bostic Exp $ -# -# TEST recd004 -# TEST Big key test where big key gets elevated to internal page. -proc recd004 { method {select 0} args} { - source ./include.tcl - global rand_init - - set opts [convert_args $method $args] - set omethod [convert_method $method] - - set pgindex [lsearch -exact $args "-pagesize"] - if { $pgindex != -1 } { - puts "Recd004: skipping for specific pagesizes" - return - } - if { [is_record_based $method] == 1 } { - puts "Recd004 skipping for method $method" - return - } - puts "Recd004: $method big-key on internal page recovery tests" - - berkdb srand $rand_init - - env_cleanup $testdir - set testfile recd004.db - set testfile2 recd004-2.db - set eflags "-create -txn -home $testdir" - puts "\tRecd004.a: creating environment" - set env_cmd "berkdb_env $eflags" - set dbenv [eval $env_cmd] - error_check_bad dbenv $dbenv NULL - - # Create the databases. We will use a small page size so that we - # elevate quickly - set oflags "-create -mode 0644 \ - $omethod -env $dbenv $opts -pagesize 512 $testfile" - set db [eval {berkdb_open} $oflags] - error_check_bad db_open $db NULL - error_check_good db_open [is_substr $db db] 1 - error_check_good db_close [$db close] 0 - set oflags "-create -mode 0644 \ - $omethod -env $dbenv $opts -pagesize 512 $testfile2" - set db [eval {berkdb_open} $oflags] - error_check_bad db_open $db NULL - error_check_good db_open [is_substr $db db] 1 - error_check_good db_close [$db close] 0 - reset_env $dbenv - - # List of recovery tests: {CMD MSG} pairs - set slist { - { {big_populate DB TXNID $n} "Recd004.b: big key elevation"} - { {unpopulate DB TXNID 0} "Recd004.c: Remove keys"} - } - - # If pages are 512 bytes, then adding 512 key/data pairs - # should be more than sufficient. - set n 512 - foreach pair $slist { - set cmd [subst [lindex $pair 0]] - set msg [lindex $pair 1] - if { $select != 0 } { - set tag [lindex $msg 0] - set tail [expr [string length $tag] - 2] - set tag [string range $tag $tail $tail] - if { [lsearch $select $tag] == -1 } { - continue - } - } - op_recover abort $testdir $env_cmd $testfile $cmd $msg - op_recover commit $testdir $env_cmd $testfile $cmd $msg - # - # Note that since prepare-discard ultimately aborts - # the txn, it must come before prepare-commit. - # - op_recover prepare-abort $testdir $env_cmd $testfile2 \ - $cmd $msg - op_recover prepare-discard $testdir $env_cmd $testfile2 \ - $cmd $msg - op_recover prepare-commit $testdir $env_cmd $testfile2 \ - $cmd $msg - } - - puts "\tRecd004.d: Verify db_printlog can read logfile" - set tmpfile $testdir/printlog.out - set stat [catch {exec $util_path/db_printlog -h $testdir \ - > $tmpfile} ret] - error_check_good db_printlog $stat 0 - fileremove $tmpfile -} diff --git a/storage/bdb/test/recd005.tcl b/storage/bdb/test/recd005.tcl deleted file mode 100644 index df6a2df3a53..00000000000 --- a/storage/bdb/test/recd005.tcl +++ /dev/null @@ -1,230 +0,0 @@ -# See the file LICENSE for redistribution information. -# -# Copyright (c) 1996-2004 -# Sleepycat Software. All rights reserved. -# -# $Id: recd005.tcl,v 11.36 2004/01/28 03:36:28 bostic Exp $ -# -# TEST recd005 -# TEST Verify reuse of file ids works on catastrophic recovery. -# TEST -# TEST Make sure that we can do catastrophic recovery even if we open -# TEST files using the same log file id. -proc recd005 { method args} { - source ./include.tcl - global rand_init - - set args [convert_args $method $args] - set omethod [convert_method $method] - - puts "Recd005: $method catastrophic recovery" - - berkdb srand $rand_init - - set testfile1 recd005.1.db - set testfile2 recd005.2.db - set eflags \ - "-create -txn -lock_max 2000 -lock_max_objects 2000 -home $testdir" - - set tnum 0 - foreach sizes "{1000 10} {10 1000}" { - foreach ops "{abort abort} {abort commit} {commit abort} \ - {commit commit}" { - env_cleanup $testdir - incr tnum - - set s1 [lindex $sizes 0] - set s2 [lindex $sizes 1] - set op1 [lindex $ops 0] - set op2 [lindex $ops 1] - puts "\tRecd005.$tnum: $s1 $s2 $op1 $op2" - - puts "\tRecd005.$tnum.a: creating environment" - set env_cmd "berkdb_env $eflags" - set dbenv [eval $env_cmd] - error_check_bad dbenv $dbenv NULL - - # Create the two databases. - set oflags \ - "-create -mode 0644 -env $dbenv $args $omethod" - set db1 [eval {berkdb_open} $oflags $testfile1] - error_check_bad db_open $db1 NULL - error_check_good db_open [is_substr $db1 db] 1 - error_check_good db_close [$db1 close] 0 - - set db2 [eval {berkdb_open} $oflags $testfile2] - error_check_bad db_open $db2 NULL - error_check_good db_open [is_substr $db2 db] 1 - error_check_good db_close [$db2 close] 0 - $dbenv close - - set dbenv [eval $env_cmd] - puts "\tRecd005.$tnum.b: Populating databases" - do_one_file \ - $testdir $method $dbenv $env_cmd $testfile1 $s1 $op1 - do_one_file \ - $testdir $method $dbenv $env_cmd $testfile2 $s2 $op2 - - puts "\tRecd005.$tnum.c: Verifying initial population" - check_file $testdir $env_cmd $testfile1 $op1 - check_file $testdir $env_cmd $testfile2 $op2 - - # Now, close the environment (so that recovery will work - # on NT which won't allow delete of an open file). - reset_env $dbenv - - berkdb debug_check - puts -nonewline \ - "\tRecd005.$tnum.d: About to run recovery ... " - flush stdout - - set stat [catch \ - {exec $util_path/db_recover -h $testdir -c} \ - result] - if { $stat == 1 } { - error "Recovery error: $result." - } - puts "complete" - - # Substitute a file that will need recovery and try - # running recovery again. - if { $op1 == "abort" } { - file copy -force $testdir/$testfile1.afterop \ - $testdir/$testfile1 - move_file_extent $testdir $testfile1 \ - afterop copy - } else { - file copy -force $testdir/$testfile1.init \ - $testdir/$testfile1 - move_file_extent $testdir $testfile1 init copy - } - if { $op2 == "abort" } { - file copy -force $testdir/$testfile2.afterop \ - $testdir/$testfile2 - move_file_extent $testdir $testfile2 \ - afterop copy - } else { - file copy -force $testdir/$testfile2.init \ - $testdir/$testfile2 - move_file_extent $testdir $testfile2 init copy - } - - berkdb debug_check - puts -nonewline "\tRecd005.$tnum.e:\ - About to run recovery on pre-op database ... " - flush stdout - - set stat \ - [catch {exec $util_path/db_recover \ - -h $testdir -c} result] - if { $stat == 1 } { - error "Recovery error: $result." - } - puts "complete" - - set dbenv [eval $env_cmd] - check_file $testdir $env_cmd $testfile1 $op1 - check_file $testdir $env_cmd $testfile2 $op2 - reset_env $dbenv - - puts "\tRecd005.$tnum.f:\ - Verify db_printlog can read logfile" - set tmpfile $testdir/printlog.out - set stat [catch \ - {exec $util_path/db_printlog -h $testdir \ - > $tmpfile} ret] - error_check_good db_printlog $stat 0 - fileremove $tmpfile - } - } -} - -proc do_one_file { dir method env env_cmd filename num op } { - source ./include.tcl - - set init_file $dir/$filename.t1 - set afterop_file $dir/$filename.t2 - set final_file $dir/$filename.t3 - - # Save the initial file and open the environment and the first file - file copy -force $dir/$filename $dir/$filename.init - copy_extent_file $dir $filename init - set oflags "-auto_commit -unknown -env $env" - set db [eval {berkdb_open} $oflags $filename] - - # Dump out file contents for initial case - open_and_dump_file $filename $env $init_file nop \ - dump_file_direction "-first" "-next" - - set txn [$env txn] - error_check_bad txn_begin $txn NULL - error_check_good txn_begin [is_substr $txn $env] 1 - - # Now fill in the db and the txnid in the command - populate $db $method $txn $num 0 0 - - # Sync the file so that we can capture a snapshot to test - # recovery. - error_check_good sync:$db [$db sync] 0 - file copy -force $dir/$filename $dir/$filename.afterop - copy_extent_file $dir $filename afterop - open_and_dump_file $testdir/$filename.afterop NULL \ - $afterop_file nop dump_file_direction "-first" "-next" - error_check_good txn_$op:$txn [$txn $op] 0 - - if { $op == "commit" } { - puts "\t\tFile $filename executed and committed." - } else { - puts "\t\tFile $filename executed and aborted." - } - - # Dump out file and save a copy. - error_check_good sync:$db [$db sync] 0 - open_and_dump_file $testdir/$filename NULL $final_file nop \ - dump_file_direction "-first" "-next" - file copy -force $dir/$filename $dir/$filename.final - copy_extent_file $dir $filename final - - # If this is an abort, it should match the original file. - # If this was a commit, then this file should match the - # afterop file. - if { $op == "abort" } { - filesort $init_file $init_file.sort - filesort $final_file $final_file.sort - error_check_good \ - diff(initial,post-$op):diff($init_file,$final_file) \ - [filecmp $init_file.sort $final_file.sort] 0 - } else { - filesort $afterop_file $afterop_file.sort - filesort $final_file $final_file.sort - error_check_good \ - diff(post-$op,pre-commit):diff($afterop_file,$final_file) \ - [filecmp $afterop_file.sort $final_file.sort] 0 - } - - error_check_good close:$db [$db close] 0 -} - -proc check_file { dir env_cmd filename op } { - source ./include.tcl - - set init_file $dir/$filename.t1 - set afterop_file $dir/$filename.t2 - set final_file $dir/$filename.t3 - - open_and_dump_file $testdir/$filename NULL $final_file nop \ - dump_file_direction "-first" "-next" - if { $op == "abort" } { - filesort $init_file $init_file.sort - filesort $final_file $final_file.sort - error_check_good \ - diff(initial,post-$op):diff($init_file,$final_file) \ - [filecmp $init_file.sort $final_file.sort] 0 - } else { - filesort $afterop_file $afterop_file.sort - filesort $final_file $final_file.sort - error_check_good \ - diff(pre-commit,post-$op):diff($afterop_file,$final_file) \ - [filecmp $afterop_file.sort $final_file.sort] 0 - } -} diff --git a/storage/bdb/test/recd006.tcl b/storage/bdb/test/recd006.tcl deleted file mode 100644 index bca968e5bb3..00000000000 --- a/storage/bdb/test/recd006.tcl +++ /dev/null @@ -1,262 +0,0 @@ -# See the file LICENSE for redistribution information. -# -# Copyright (c) 1996-2004 -# Sleepycat Software. All rights reserved. -# -# $Id: recd006.tcl,v 11.28 2004/01/28 03:36:28 bostic Exp $ -# -# TEST recd006 -# TEST Nested transactions. -proc recd006 { method {select 0} args} { - global kvals - source ./include.tcl - - set args [convert_args $method $args] - set omethod [convert_method $method] - - if { [is_record_based $method] == 1 || [is_rbtree $method] == 1 } { - puts "Recd006 skipping for method $method" - return - } - puts "Recd006: $method nested transactions" - - # Create the database and environment. - env_cleanup $testdir - - set dbfile recd006.db - set testfile $testdir/$dbfile - - puts "\tRecd006.a: create database" - set oflags "-create $args $omethod $testfile" - set db [eval {berkdb_open} $oflags] - error_check_good dbopen [is_valid_db $db] TRUE - - # Make sure that we have enough entries to span a couple of - # different pages. - set did [open $dict] - set count 0 - while { [gets $did str] != -1 && $count < 1000 } { - if { [string compare $omethod "-recno"] == 0 } { - set key [expr $count + 1] - } else { - set key $str - } - - set ret [$db put -nooverwrite $key $str] - error_check_good put $ret 0 - - incr count - } - close $did - - # Variables used below: - # p1: a pair of keys that are likely to be on the same page. - # p2: a pair of keys that are likely to be on the same page, - # but on a page different than those in p1. - set dbc [$db cursor] - error_check_good dbc [is_substr $dbc $db] 1 - - set ret [$dbc get -first] - error_check_bad dbc_get:DB_FIRST [llength $ret] 0 - set p1 [lindex [lindex $ret 0] 0] - set kvals($p1) [lindex [lindex $ret 0] 1] - - set ret [$dbc get -next] - error_check_bad dbc_get:DB_NEXT [llength $ret] 0 - lappend p1 [lindex [lindex $ret 0] 0] - set kvals([lindex [lindex $ret 0] 0]) [lindex [lindex $ret 0] 1] - - set ret [$dbc get -last] - error_check_bad dbc_get:DB_LAST [llength $ret] 0 - set p2 [lindex [lindex $ret 0] 0] - set kvals($p2) [lindex [lindex $ret 0] 1] - - set ret [$dbc get -prev] - error_check_bad dbc_get:DB_PREV [llength $ret] 0 - lappend p2 [lindex [lindex $ret 0] 0] - set kvals([lindex [lindex $ret 0] 0]) [lindex [lindex $ret 0] 1] - - error_check_good dbc_close [$dbc close] 0 - error_check_good db_close [$db close] 0 - - # Now create the full transaction environment. - set eflags "-create -txn -home $testdir" - - puts "\tRecd006.b: creating environment" - set env_cmd "berkdb_env $eflags" - set dbenv [eval $env_cmd] - error_check_bad dbenv $dbenv NULL - - # Reset the environment. - reset_env $dbenv - - set p1 [list $p1] - set p2 [list $p2] - - # List of recovery tests: {CMD MSG} pairs - set rlist { - { {nesttest DB TXNID ENV 1 $p1 $p2 commit commit} - "Recd006.c: children (commit commit)"} - { {nesttest DB TXNID ENV 0 $p1 $p2 commit commit} - "Recd006.d: children (commit commit)"} - { {nesttest DB TXNID ENV 1 $p1 $p2 commit abort} - "Recd006.e: children (commit abort)"} - { {nesttest DB TXNID ENV 0 $p1 $p2 commit abort} - "Recd006.f: children (commit abort)"} - { {nesttest DB TXNID ENV 1 $p1 $p2 abort abort} - "Recd006.g: children (abort abort)"} - { {nesttest DB TXNID ENV 0 $p1 $p2 abort abort} - "Recd006.h: children (abort abort)"} - { {nesttest DB TXNID ENV 1 $p1 $p2 abort commit} - "Recd006.i: children (abort commit)"} - { {nesttest DB TXNID ENV 0 $p1 $p2 abort commit} - "Recd006.j: children (abort commit)"} - } - - foreach pair $rlist { - set cmd [subst [lindex $pair 0]] - set msg [lindex $pair 1] - if { $select != 0 } { - set tag [lindex $msg 0] - set tail [expr [string length $tag] - 2] - set tag [string range $tag $tail $tail] - if { [lsearch $select $tag] == -1 } { - continue - } - } - op_recover abort $testdir $env_cmd $dbfile $cmd $msg - op_recover commit $testdir $env_cmd $dbfile $cmd $msg - } - - puts "\tRecd006.k: Verify db_printlog can read logfile" - set tmpfile $testdir/printlog.out - set stat [catch {exec $util_path/db_printlog -h $testdir \ - > $tmpfile} ret] - error_check_good db_printlog $stat 0 - fileremove $tmpfile -} - -# Do the nested transaction test. -# We want to make sure that children inherit properly from their -# parents and that locks are properly handed back to parents -# and that the right thing happens on commit/abort. -# In particular: -# Write lock on parent, properly acquired by child. -# Committed operation on child gives lock to parent so that -# other child can also get the lock. -# Aborted op by child releases lock so other child can get it. -# Correct database state if child commits -# Correct database state if child aborts -proc nesttest { db parent env do p1 p2 child1 child2} { - global kvals - source ./include.tcl - - if { $do == 1 } { - set func toupper - } else { - set func tolower - } - - # Do an RMW on the parent to get a write lock. - set p10 [lindex $p1 0] - set p11 [lindex $p1 1] - set p20 [lindex $p2 0] - set p21 [lindex $p2 1] - - set ret [$db get -rmw -txn $parent $p10] - set res $ret - set Dret [lindex [lindex $ret 0] 1] - if { [string compare $Dret $kvals($p10)] == 0 || - [string compare $Dret [string toupper $kvals($p10)]] == 0 } { - set val 0 - } else { - set val $Dret - } - error_check_good get_parent_RMW $val 0 - - # OK, do child 1 - set kid1 [$env txn -parent $parent] - error_check_good kid1 [is_valid_txn $kid1 $env] TRUE - - # Reading write-locked parent object should be OK - #puts "\tRead write-locked parent object for kid1." - set ret [$db get -txn $kid1 $p10] - error_check_good kid1_get10 $ret $res - - # Now update this child - set data [lindex [lindex [string $func $ret] 0] 1] - set ret [$db put -txn $kid1 $p10 $data] - error_check_good kid1_put10 $ret 0 - - #puts "\tKid1 successful put." - - # Now start child2 - #puts "\tBegin txn for kid2." - set kid2 [$env txn -parent $parent] - error_check_good kid2 [is_valid_txn $kid2 $env] TRUE - - # Getting anything in the p1 set should deadlock, so let's - # work on the p2 set. - set data [string $func $kvals($p20)] - #puts "\tPut data for kid2." - set ret [$db put -txn $kid2 $p20 $data] - error_check_good kid2_put20 $ret 0 - - #puts "\tKid2 data put successful." - - # Now let's do the right thing to kid1 - puts -nonewline "\tKid1 $child1..." - if { [string compare $child1 "commit"] == 0 } { - error_check_good kid1_commit [$kid1 commit] 0 - } else { - error_check_good kid1_abort [$kid1 abort] 0 - } - puts "complete" - - # In either case, child2 should now be able to get the - # lock, either because it is inherited by the parent - # (commit) or because it was released (abort). - set data [string $func $kvals($p11)] - set ret [$db put -txn $kid2 $p11 $data] - error_check_good kid2_put11 $ret 0 - - # Now let's do the right thing to kid2 - puts -nonewline "\tKid2 $child2..." - if { [string compare $child2 "commit"] == 0 } { - error_check_good kid2_commit [$kid2 commit] 0 - } else { - error_check_good kid2_abort [$kid2 abort] 0 - } - puts "complete" - - # Now, let parent check that the right things happened. - # First get all four values - set p10_check [lindex [lindex [$db get -txn $parent $p10] 0] 0] - set p11_check [lindex [lindex [$db get -txn $parent $p11] 0] 0] - set p20_check [lindex [lindex [$db get -txn $parent $p20] 0] 0] - set p21_check [lindex [lindex [$db get -txn $parent $p21] 0] 0] - - if { [string compare $child1 "commit"] == 0 } { - error_check_good parent_kid1 $p10_check \ - [string tolower [string $func $kvals($p10)]] - } else { - error_check_good \ - parent_kid1 $p10_check [string tolower $kvals($p10)] - } - if { [string compare $child2 "commit"] == 0 } { - error_check_good parent_kid2 $p11_check \ - [string tolower [string $func $kvals($p11)]] - error_check_good parent_kid2 $p20_check \ - [string tolower [string $func $kvals($p20)]] - } else { - error_check_good parent_kid2 $p11_check $kvals($p11) - error_check_good parent_kid2 $p20_check $kvals($p20) - } - - # Now do a write on the parent for 21 whose lock it should - # either have or should be available. - set ret [$db put -txn $parent $p21 [string $func $kvals($p21)]] - error_check_good parent_put21 $ret 0 - - return 0 -} diff --git a/storage/bdb/test/recd007.tcl b/storage/bdb/test/recd007.tcl deleted file mode 100644 index 9764d840f62..00000000000 --- a/storage/bdb/test/recd007.tcl +++ /dev/null @@ -1,906 +0,0 @@ -# See the file LICENSE for redistribution information. -# -# Copyright (c) 1999-2004 -# Sleepycat Software. All rights reserved. -# -# $Id: recd007.tcl,v 11.64 2004/07/07 19:08:21 carol Exp $ -# -# TEST recd007 -# TEST File create/delete tests. -# TEST -# TEST This is a recovery test for create/delete of databases. We have -# TEST hooks in the database so that we can abort the process at various -# TEST points and make sure that the transaction doesn't commit. We -# TEST then need to recover and make sure the file is correctly existing -# TEST or not, as the case may be. -proc recd007 { method args} { - global fixed_len - source ./include.tcl - - set orig_fixed_len $fixed_len - set opts [convert_args $method $args] - set omethod [convert_method $method] - - puts "Recd007: $method operation/transaction tests" - - # Create the database and environment. - env_cleanup $testdir - - set testfile recd007.db - set flags "-create -txn -home $testdir" - - puts "\tRecd007.a: creating environment" - set env_cmd "berkdb_env $flags" - - set env [eval $env_cmd] - - # We need to create a database to get the pagesize (either - # the default or whatever might have been specified). - # Then remove it so we can compute fixed_len and create the - # real database. - set oflags "-create $omethod -mode 0644 -env $env $opts $testfile" - set db [eval {berkdb_open} $oflags] - error_check_good db_open [is_valid_db $db] TRUE - set stat [$db stat] - # - # Compute the fixed_len based on the pagesize being used. - # We want the fixed_len to be 1/4 the pagesize. - # - set pg [get_pagesize $stat] - error_check_bad get_pagesize $pg -1 - set fixed_len [expr $pg / 4] - error_check_good db_close [$db close] 0 - error_check_good dbremove [berkdb dbremove -env $env $testfile] 0 - error_check_good envclose [$env close] 0 - - # Convert the args again because fixed_len is now real. - set opts [convert_args $method ""] - set save_opts $opts - set moreopts {" -lorder 1234 " " -lorder 1234 -chksum " \ - " -lorder 4321 " " -lorder 4321 -chksum "} - - # List of recovery tests: {HOOKS MSG} pairs - # Where each HOOK is a list of {COPY ABORT} - # - set rlist { - { {"none" "preopen"} "Recd007.b0: none/preopen"} - { {"none" "postopen"} "Recd007.b1: none/postopen"} - { {"none" "postlogmeta"} "Recd007.b2: none/postlogmeta"} - { {"none" "postlog"} "Recd007.b3: none/postlog"} - { {"none" "postsync"} "Recd007.b4: none/postsync"} - { {"postopen" "none"} "Recd007.c0: postopen/none"} - { {"postlogmeta" "none"} "Recd007.c1: postlogmeta/none"} - { {"postlog" "none"} "Recd007.c2: postlog/none"} - { {"postsync" "none"} "Recd007.c3: postsync/none"} - { {"postopen" "postopen"} "Recd007.d: postopen/postopen"} - { {"postopen" "postlogmeta"} "Recd007.e: postopen/postlogmeta"} - { {"postopen" "postlog"} "Recd007.f: postopen/postlog"} - { {"postlog" "postlog"} "Recd007.g: postlog/postlog"} - { {"postlogmeta" "postlogmeta"} "Recd007.h: postlogmeta/postlogmeta"} - { {"postlogmeta" "postlog"} "Recd007.i: postlogmeta/postlog"} - { {"postlog" "postsync"} "Recd007.j: postlog/postsync"} - { {"postsync" "postsync"} "Recd007.k: postsync/postsync"} - } - - # These are all the data values that we're going to need to read - # through the operation table and run the recovery tests. - - foreach pair $rlist { - set cmd [lindex $pair 0] - set msg [lindex $pair 1] - # - # Run natively - # - file_recover_create $testdir $env_cmd $omethod \ - $save_opts $testfile $cmd $msg - foreach o $moreopts { - set opts $save_opts - append opts $o - file_recover_create $testdir $env_cmd $omethod \ - $opts $testfile $cmd $msg - } - } - - set rlist { - { {"none" "predestroy"} "Recd007.l0: none/predestroy"} - { {"none" "postdestroy"} "Recd007.l1: none/postdestroy"} - { {"predestroy" "none"} "Recd007.m0: predestroy/none"} - { {"postdestroy" "none"} "Recd007.m1: postdestroy/none"} - { {"predestroy" "predestroy"} "Recd007.n: predestroy/predestroy"} - { {"predestroy" "postdestroy"} "Recd007.o: predestroy/postdestroy"} - { {"postdestroy" "postdestroy"} "Recd007.p: postdestroy/postdestroy"} - } - foreach op { dbremove dbrename dbtruncate } { - foreach pair $rlist { - set cmd [lindex $pair 0] - set msg [lindex $pair 1] - file_recover_delete $testdir $env_cmd $omethod \ - $save_opts $testfile $cmd $msg $op - foreach o $moreopts { - set opts $save_opts - append opts $o - file_recover_delete $testdir $env_cmd $omethod \ - $opts $testfile $cmd $msg $op - } - } - } - - if { $is_windows_test != 1 } { - set env_cmd "berkdb_env_noerr $flags" - do_file_recover_delmk $testdir $env_cmd $method $opts $testfile - } - - puts "\tRecd007.r: Verify db_printlog can read logfile" - set tmpfile $testdir/printlog.out - set stat [catch {exec $util_path/db_printlog -h $testdir \ - > $tmpfile} ret] - error_check_good db_printlog $stat 0 - fileremove $tmpfile - set fixed_len $orig_fixed_len - return -} - -proc file_recover_create { dir env_cmd method opts dbfile cmd msg } { - # - # We run this test on each of these scenarios: - # 1. Creating just a database - # 2. Creating a database with a subdb - # 3. Creating a 2nd subdb in a database - puts "\t$msg ($opts) create with a database" - do_file_recover_create $dir $env_cmd $method $opts $dbfile \ - 0 $cmd $msg - if { [is_queue $method] == 1 } { - puts "\tSkipping subdatabase tests for method $method" - return - } - puts "\t$msg ($opts) create with a database and subdb" - do_file_recover_create $dir $env_cmd $method $opts $dbfile \ - 1 $cmd $msg - puts "\t$msg ($opts) create with a database and 2nd subdb" - do_file_recover_create $dir $env_cmd $method $opts $dbfile \ - 2 $cmd $msg - -} - -proc do_file_recover_create { dir env_cmd method opts dbfile sub cmd msg } { - global log_log_record_types - source ./include.tcl - - # Keep track of the log types we've seen - if { $log_log_record_types == 1} { - logtrack_read $dir - } - - env_cleanup $dir - set dflags "-dar" - # Open the environment and set the copy/abort locations - set env [eval $env_cmd] - set copy [lindex $cmd 0] - set abort [lindex $cmd 1] - error_check_good copy_location [is_valid_create_loc $copy] 1 - error_check_good abort_location [is_valid_create_loc $abort] 1 - - if {([string first "logmeta" $copy] != -1 || \ - [string first "logmeta" $abort] != -1) && \ - [is_btree $method] == 0 } { - puts "\tSkipping for method $method" - $env test copy none - $env test abort none - error_check_good env_close [$env close] 0 - return - } - - # Basically non-existence is our initial state. When we - # abort, it is also our final state. - # - switch $sub { - 0 { - set oflags "-create $method -auto_commit -mode 0644 \ - -env $env $opts $dbfile" - } - 1 { - set oflags "-create $method -auto_commit -mode 0644 \ - -env $env $opts $dbfile sub0" - } - 2 { - # - # If we are aborting here, then we need to - # create a first subdb, then create a second - # - set oflags "-create $method -auto_commit -mode 0644 \ - -env $env $opts $dbfile sub0" - set db [eval {berkdb_open} $oflags] - error_check_good db_open [is_valid_db $db] TRUE - error_check_good db_close [$db close] 0 - set init_file $dir/$dbfile.init - catch { file copy -force $dir/$dbfile $init_file } res - set oflags "-create $method -auto_commit -mode 0644 \ - -env $env $opts $dbfile sub1" - } - default { - puts "\tBad value $sub for sub" - return - } - } - # - # Set our locations to copy and abort - # - set ret [eval $env test copy $copy] - error_check_good test_copy $ret 0 - set ret [eval $env test abort $abort] - error_check_good test_abort $ret 0 - - puts "\t\tExecuting command" - set ret [catch {eval {berkdb_open} $oflags} db] - - # Sync the mpool so any changes to the file that are - # in mpool get written to the disk file before the - # diff. - $env mpool_sync - - # - # If we don't abort, then we expect success. - # If we abort, we expect no file created. - # - if {[string first "none" $abort] == -1} { - # - # Operation was aborted, verify it does - # not exist. - # - puts "\t\tCommand executed and aborted." - error_check_bad db_open ret 0 - - # - # Check that the file does not exist. Final state. - # - if { $sub != 2 } { - error_check_good db_open:exists \ - [file exists $dir/$dbfile] 0 - } else { - error_check_good \ - diff(init,postcreate):diff($init_file,$dir/$dbfile)\ - [dbdump_diff $dflags $init_file $dir $dbfile] 0 - } - } else { - # - # Operation was committed, verify it exists. - # - puts "\t\tCommand executed and committed." - error_check_good db_open [is_valid_db $db] TRUE - error_check_good db_close [$db close] 0 - - # - # Check that the file exists. - # - error_check_good db_open [file exists $dir/$dbfile] 1 - set init_file $dir/$dbfile.init - catch { file copy -force $dir/$dbfile $init_file } res - - if { [is_queue $method] == 1 } { - copy_extent_file $dir $dbfile init - } - } - error_check_good env_close [$env close] 0 - - # - # Run recovery here. Should be a no-op. Verify that - # the file still doesn't exist or change (depending on sub) - # when we are done. - # - berkdb debug_check - puts -nonewline "\t\tAbout to run recovery ... " - flush stdout - - set stat [catch {exec $util_path/db_recover -h $dir -c} result] - if { $stat == 1 } { - error "FAIL: Recovery error: $result." - return - } - puts "complete" - if { $sub != 2 && [string first "none" $abort] == -1} { - # - # Operation was aborted, verify it still does - # not exist. Only done with file creations. - # - error_check_good after_recover1 [file exists $dir/$dbfile] 0 - } else { - # - # Operation was committed or just a subdb was aborted. - # Verify it did not change. - # - error_check_good \ - diff(initial,post-recover1):diff($init_file,$dir/$dbfile) \ - [dbdump_diff $dflags $init_file $dir $dbfile] 0 - # - # Need a new copy to get the right LSN into the file. - # - catch { file copy -force $dir/$dbfile $init_file } res - - if { [is_queue $method] == 1 } { - copy_extent_file $dir $dbfile init - } - } - - # If we didn't make a copy, then we are done. - # - if {[string first "none" $copy] != -1} { - return - } - - # - # Now move the .afterop file to $dbfile. Run recovery again. - # - copy_afterop $dir - - berkdb debug_check - puts -nonewline "\t\tAbout to run recovery ... " - flush stdout - - set stat [catch {exec $util_path/db_recover -h $dir -c} result] - if { $stat == 1 } { - error "FAIL: Recovery error: $result." - return - } - puts "complete" - if { $sub != 2 && [string first "none" $abort] == -1} { - # - # Operation was aborted, verify it still does - # not exist. Only done with file creations. - # - error_check_good after_recover2 [file exists $dir/$dbfile] 0 - } else { - # - # Operation was committed or just a subdb was aborted. - # Verify it did not change. - # - error_check_good \ - diff(initial,post-recover2):diff($init_file,$dir/$dbfile) \ - [dbdump_diff $dflags $init_file $dir $dbfile] 0 - } - -} - -proc file_recover_delete { dir env_cmd method opts dbfile cmd msg op } { - # - # We run this test on each of these scenarios: - # 1. Deleting/Renaming just a database - # 2. Deleting/Renaming a database with a subdb - # 3. Deleting/Renaming a 2nd subdb in a database - puts "\t$msg $op ($opts) with a database" - do_file_recover_delete $dir $env_cmd $method $opts $dbfile \ - 0 $cmd $msg $op - if { [is_queue $method] == 1 } { - puts "\tSkipping subdatabase tests for method $method" - return - } - puts "\t$msg $op ($opts) with a database and subdb" - do_file_recover_delete $dir $env_cmd $method $opts $dbfile \ - 1 $cmd $msg $op - puts "\t$msg $op ($opts) with a database and 2nd subdb" - do_file_recover_delete $dir $env_cmd $method $opts $dbfile \ - 2 $cmd $msg $op - -} - -proc do_file_recover_delete { dir env_cmd method opts dbfile sub cmd msg op } { - global log_log_record_types - source ./include.tcl - - # Keep track of the log types we've seen - if { $log_log_record_types == 1} { - logtrack_read $dir - } - - env_cleanup $dir - # Open the environment and set the copy/abort locations - set env [eval $env_cmd] - set copy [lindex $cmd 0] - set abort [lindex $cmd 1] - error_check_good copy_location [is_valid_delete_loc $copy] 1 - error_check_good abort_location [is_valid_delete_loc $abort] 1 - - if { [is_record_based $method] == 1 } { - set key1 1 - set key2 2 - } else { - set key1 recd007_key1 - set key2 recd007_key2 - } - set data1 recd007_data0 - set data2 recd007_data1 - set data3 NEWrecd007_data2 - - # - # Depending on what sort of subdb we want, if any, our - # args to the open call will be different (and if we - # want a 2nd subdb, we create the first here. - # - # XXX - # For dbtruncate, we want oflags to have "$env" in it, - # not have the value currently in 'env'. That is why - # the '$' is protected below. Later on we use oflags - # but with a new $env we just opened. - # - switch $sub { - 0 { - set subdb "" - set new $dbfile.new - set dflags "-dar" - set oflags "-create $method -auto_commit -mode 0644 \ - -env \$env $opts $dbfile" - } - 1 { - set subdb sub0 - set new $subdb.new - set dflags "" - set oflags "-create $method -auto_commit -mode 0644 \ - -env \$env $opts $dbfile $subdb" - } - 2 { - # - # If we are aborting here, then we need to - # create a first subdb, then create a second - # - set subdb sub1 - set new $subdb.new - set dflags "" - set oflags "-create $method -auto_commit -mode 0644 \ - -env \$env $opts $dbfile sub0" - set db [eval {berkdb_open} $oflags] - error_check_good db_open [is_valid_db $db] TRUE - set txn [$env txn] - set ret [$db put -txn $txn $key1 $data1] - error_check_good db_put $ret 0 - error_check_good commit [$txn commit] 0 - error_check_good db_close [$db close] 0 - set oflags "-create $method -auto_commit -mode 0644 \ - -env \$env $opts $dbfile $subdb" - } - default { - puts "\tBad value $sub for sub" - return - } - } - - # - # Set our locations to copy and abort - # - set ret [eval $env test copy $copy] - error_check_good test_copy $ret 0 - set ret [eval $env test abort $abort] - error_check_good test_abort $ret 0 - - # - # Open our db, add some data, close and copy as our - # init file. - # - set db [eval {berkdb_open} $oflags] - error_check_good db_open [is_valid_db $db] TRUE - set txn [$env txn] - set ret [$db put -txn $txn $key1 $data1] - error_check_good db_put $ret 0 - set ret [$db put -txn $txn $key2 $data2] - error_check_good db_put $ret 0 - error_check_good commit [$txn commit] 0 - error_check_good db_close [$db close] 0 - - $env mpool_sync - - set init_file $dir/$dbfile.init - catch { file copy -force $dir/$dbfile $init_file } res - - if { [is_queue $method] == 1} { - copy_extent_file $dir $dbfile init - } - - # - # If we don't abort, then we expect success. - # If we abort, we expect no file removed. - # - switch $op { - "dbrename" { - set ret [catch { eval {berkdb} $op -env $env -auto_commit \ - $dbfile $subdb $new } remret] - } - "dbremove" { - set ret [catch { eval {berkdb} $op -env $env -auto_commit \ - $dbfile $subdb } remret] - } - "dbtruncate" { - set txn [$env txn] - set db [eval {berkdb_open_noerr -env} \ - $env -auto_commit $dbfile $subdb] - error_check_good dbopen [is_valid_db $db] TRUE - error_check_good txnbegin [is_valid_txn $txn $env] TRUE - set ret [catch {$db truncate -txn $txn} remret] - } - } - $env mpool_sync - if { $abort == "none" } { - if { $op == "dbtruncate" } { - error_check_good txncommit [$txn commit] 0 - error_check_good dbclose [$db close] 0 - } - # - # Operation was committed, verify it. - # - puts "\t\tCommand executed and committed." - error_check_good $op $ret 0 - # - # If a dbtruncate, check that truncate returned the number - # of items previously in the database. - # - if { [string compare $op "dbtruncate"] == 0 } { - error_check_good remret $remret 2 - } - recd007_check $op $sub $dir $dbfile $subdb $new $env $oflags - } else { - # - # Operation was aborted, verify it did not change. - # - if { $op == "dbtruncate" } { - error_check_good txnabort [$txn abort] 0 - error_check_good dbclose [$db close] 0 - } - puts "\t\tCommand executed and aborted." - error_check_good $op $ret 1 - - # - # Check that the file exists. Final state. - # Compare against initial file. - # - error_check_good post$op.1 [file exists $dir/$dbfile] 1 - error_check_good \ - diff(init,post$op.2):diff($init_file,$dir/$dbfile)\ - [dbdump_diff $dflags $init_file $dir $dbfile] 0 - } - $env mpool_sync - error_check_good env_close [$env close] 0 - catch { file copy -force $dir/$dbfile $init_file } res - if { [is_queue $method] == 1} { - copy_extent_file $dir $dbfile init - } - - - # - # Run recovery here. Should be a no-op. Verify that - # the file still doesn't exist or change (depending on abort) - # when we are done. - # - berkdb debug_check - puts -nonewline "\t\tAbout to run recovery ... " - flush stdout - - set stat [catch {exec $util_path/db_recover -h $dir -c} result] - if { $stat == 1 } { - error "FAIL: Recovery error: $result." - return - } - - puts "complete" - - if { $abort == "none" } { - # - # Operate was committed. - # - set env [eval $env_cmd] - recd007_check $op $sub $dir $dbfile $subdb $new $env $oflags - error_check_good env_close [$env close] 0 - } else { - # - # Operation was aborted, verify it did not change. - # - berkdb debug_check - error_check_good \ - diff(initial,post-recover1):diff($init_file,$dir/$dbfile) \ - [dbdump_diff $dflags $init_file $dir $dbfile] 0 - } - - # - # If we didn't make a copy, then we are done. - # - if {[string first "none" $copy] != -1} { - return - } - - # - # Now restore the .afterop file(s) to their original name. - # Run recovery again. - # - copy_afterop $dir - - berkdb debug_check - puts -nonewline "\t\tAbout to run recovery ... " - flush stdout - - set stat [catch {exec $util_path/db_recover -h $dir -c} result] - if { $stat == 1 } { - error "FAIL: Recovery error: $result." - return - } - puts "complete" - - if { [string first "none" $abort] != -1} { - set env [eval $env_cmd] - recd007_check $op $sub $dir $dbfile $subdb $new $env $oflags - error_check_good env_close [$env close] 0 - } else { - # - # Operation was aborted, verify it did not change. - # - error_check_good \ - diff(initial,post-recover2):diff($init_file,$dir/$dbfile) \ - [dbdump_diff $dflags $init_file $dir $dbfile] 0 - } - -} - -# -# This function tests a specific case of recovering after a db removal. -# This is for SR #2538. Basically we want to test that: -# - Make an env. -# - Make/close a db. -# - Remove the db. -# - Create another db of same name. -# - Sync db but leave open. -# - Run recovery. -# - Verify no recovery errors and that new db is there. -proc do_file_recover_delmk { dir env_cmd method opts dbfile } { - global log_log_record_types - source ./include.tcl - - # Keep track of the log types we've seen - if { $log_log_record_types == 1} { - logtrack_read $dir - } - set omethod [convert_method $method] - - puts "\tRecd007.q: Delete and recreate a database" - env_cleanup $dir - # Open the environment and set the copy/abort locations - set env [eval $env_cmd] - error_check_good env_open [is_valid_env $env] TRUE - - if { [is_record_based $method] == 1 } { - set key 1 - } else { - set key recd007_key - } - set data1 recd007_data - set data2 NEWrecd007_data2 - - set oflags \ - "-create $omethod -auto_commit -mode 0644 $opts $dbfile" - - # - # Open our db, add some data, close and copy as our - # init file. - # - set db [eval {berkdb_open_noerr} -env $env $oflags] - error_check_good db_open [is_valid_db $db] TRUE - set txn [$env txn] - set ret [$db put -txn $txn $key $data1] - error_check_good db_put $ret 0 - error_check_good commit [$txn commit] 0 - error_check_good db_close [$db close] 0 - - set ret \ - [catch { berkdb dbremove -env $env -auto_commit $dbfile } remret] - - # - # Operation was committed, verify it does - # not exist. - # - puts "\t\tCommand executed and committed." - error_check_good dbremove $ret 0 - error_check_good dbremove.1 [file exists $dir/$dbfile] 0 - - # - # Now create a new db with the same name. - # - set db [eval {berkdb_open_noerr} -env $env $oflags] - error_check_good db_open [is_valid_db $db] TRUE - set txn [$env txn] - set ret [$db put -txn $txn $key [chop_data $method $data2]] - error_check_good db_put $ret 0 - error_check_good commit [$txn commit] 0 - error_check_good db_sync [$db sync] 0 - - berkdb debug_check - puts -nonewline "\t\tAbout to run recovery ... " - flush stdout - - set stat [catch {exec $util_path/db_recover -h $dir -c} result] - if { $stat == 1 } { - error "FAIL: Recovery error: $result." - return - } - puts "complete" - error_check_good db_recover $stat 0 - error_check_good db_recover.1 [file exists $dir/$dbfile] 1 - # - # Since we ran recovery on the open db/env, we need to - # catch these calls. Basically they are there to clean - # up the Tcl widgets. - # - set stat [catch {$db close} ret] - error_check_bad dbclose_after_remove $stat 0 - error_check_good dbclose_after_remove [is_substr $ret recovery] 1 - set stat [catch {$env close} ret] - error_check_bad envclose_after_remove $stat 0 - error_check_good envclose_after_remove [is_substr $ret recovery] 1 - - # - # Reopen env and db and verify 2nd database is there. - # - set env [eval $env_cmd] - error_check_good env_open [is_valid_env $env] TRUE - set db [eval {berkdb_open} -env $env $oflags] - error_check_good db_open [is_valid_db $db] TRUE - set ret [$db get $key] - error_check_good dbget [llength $ret] 1 - set kd [lindex $ret 0] - error_check_good key [lindex $kd 0] $key - error_check_good data2 [lindex $kd 1] [pad_data $method $data2] - - error_check_good dbclose [$db close] 0 - error_check_good envclose [$env close] 0 -} - -proc is_valid_create_loc { loc } { - switch $loc { - none - - preopen - - postopen - - postlogmeta - - postlog - - postsync - { return 1 } - default - { return 0 } - } -} - -proc is_valid_delete_loc { loc } { - switch $loc { - none - - predestroy - - postdestroy - - postremcall - { return 1 } - default - { return 0 } - } -} - -# Do a logical diff on the db dump files. We expect that either -# the files are identical, or if they differ, that it is exactly -# just a free/invalid page. -# Return 1 if they are different, 0 if logically the same (or identical). -# -proc dbdump_diff { flags initfile dir dbfile } { - source ./include.tcl - - set initdump $initfile.dump - set dbdump $dbfile.dump - - set stat [catch {eval {exec $util_path/db_dump} $flags -f $initdump \ - $initfile} ret] - error_check_good dbdump.init $stat 0 - - # Do a dump without the freelist which should eliminate any - # recovery differences. - set stat [catch {eval {exec $util_path/db_dump} $flags -f $dir/$dbdump \ - $dir/$dbfile} ret] - error_check_good dbdump.db $stat 0 - - set stat [filecmp $dir/$dbdump $initdump] - - if {$stat == 0} { - return 0 - } - puts "diff: $dbdump $initdump gives:\n$ret" - return 1 -} - -proc recd007_check { op sub dir dbfile subdb new env oflags } { - # - # No matter how many subdbs we have, dbtruncate will always - # have a file, and if we open our particular db, it should - # have no entries. - # - if { $sub == 0 } { - if { $op == "dbremove" } { - error_check_good $op:not-exist \ - [file exists $dir/$dbfile] 0 - } elseif { $op == "dbrename"} { - error_check_good $op:exist \ - [file exists $dir/$dbfile] 0 - error_check_good $op:exist2 \ - [file exists $dir/$dbfile.new] 1 - } else { - error_check_good $op:exist \ - [file exists $dir/$dbfile] 1 - set db [eval {berkdb_open} $oflags] - error_check_good db_open [is_valid_db $db] TRUE - set dbc [$db cursor] - error_check_good dbc_open \ - [is_valid_cursor $dbc $db] TRUE - set ret [$dbc get -first] - error_check_good dbget1 [llength $ret] 0 - error_check_good dbc_close [$dbc close] 0 - error_check_good db_close [$db close] 0 - } - return - } else { - set t1 $dir/t1 - # - # If we have subdbs, check that all but the last one - # are there, and the last one is correctly operated on. - # - set db [berkdb_open -rdonly -env $env $dbfile] - error_check_good dbopen [is_valid_db $db] TRUE - set c [eval {$db cursor}] - error_check_good db_cursor [is_valid_cursor $c $db] TRUE - set d [$c get -last] - if { $op == "dbremove" } { - if { $sub == 1 } { - error_check_good subdb:rem [llength $d] 0 - } else { - error_check_bad subdb:rem [llength $d] 0 - set sdb [lindex [lindex $d 0] 0] - error_check_bad subdb:rem1 $sdb $subdb - } - } elseif { $op == "dbrename"} { - set sdb [lindex [lindex $d 0] 0] - error_check_good subdb:ren $sdb $new - if { $sub != 1 } { - set d [$c get -prev] - error_check_bad subdb:ren [llength $d] 0 - set sdb [lindex [lindex $d 0] 0] - error_check_good subdb:ren1 \ - [is_substr "new" $sdb] 0 - } - } else { - set sdb [lindex [lindex $d 0] 0] - set dbt [berkdb_open -rdonly -env $env $dbfile $sdb] - error_check_good db_open [is_valid_db $dbt] TRUE - set dbc [$dbt cursor] - error_check_good dbc_open \ - [is_valid_cursor $dbc $dbt] TRUE - set ret [$dbc get -first] - error_check_good dbget2 [llength $ret] 0 - error_check_good dbc_close [$dbc close] 0 - error_check_good db_close [$dbt close] 0 - if { $sub != 1 } { - set d [$c get -prev] - error_check_bad subdb:ren [llength $d] 0 - set sdb [lindex [lindex $d 0] 0] - set dbt [berkdb_open -rdonly -env $env \ - $dbfile $sdb] - error_check_good db_open [is_valid_db $dbt] TRUE - set dbc [$db cursor] - error_check_good dbc_open \ - [is_valid_cursor $dbc $db] TRUE - set ret [$dbc get -first] - error_check_bad dbget3 [llength $ret] 0 - error_check_good dbc_close [$dbc close] 0 - error_check_good db_close [$dbt close] 0 - } - } - error_check_good dbcclose [$c close] 0 - error_check_good db_close [$db close] 0 - } -} - -proc copy_afterop { dir } { - set r [catch { set filecopy [glob $dir/*.afterop] } res] - if { $r == 1 } { - return - } - foreach f $filecopy { - set orig [string range $f 0 \ - [expr [string last "." $f] - 1]] - catch { file rename -force $f $orig} res - } -} diff --git a/storage/bdb/test/recd008.tcl b/storage/bdb/test/recd008.tcl deleted file mode 100644 index b12c757854a..00000000000 --- a/storage/bdb/test/recd008.tcl +++ /dev/null @@ -1,226 +0,0 @@ -# See the file LICENSE for redistribution information. -# -# Copyright (c) 1996-2004 -# Sleepycat Software. All rights reserved. -# -# $Id: recd008.tcl,v 1.30 2004/11/05 00:59:01 mjc Exp $ -# -# TEST recd008 -# TEST Test deeply nested transactions and many-child transactions. -proc recd008 { method {breadth 4} {depth 4} args} { - global kvals - source ./include.tcl - - set args [convert_args $method $args] - set omethod [convert_method $method] - - puts "Recd008: $method $breadth X $depth deeply nested transactions" - - # Create the database and environment. - env_cleanup $testdir - - set dbfile recd008.db - - puts "\tRecd008.a: create database" - set db [eval {berkdb_open -create} $args $omethod $testdir/$dbfile] - error_check_good dbopen [is_valid_db $db] TRUE - - # Make sure that we have enough entries to span a couple of - # different pages. - set did [open $dict] - set count 0 - while { [gets $did str] != -1 && $count < 1000 } { - if { [is_record_based $method] == 1 } { - set key [expr $count + 1] - } else { - set key $str - } - if { $count == 500} { - set p1 $key - set kvals($p1) $str - } - set ret [$db put $key [chop_data $method $str]] - error_check_good put $ret 0 - - incr count - } - close $did - error_check_good db_close [$db close] 0 - - set txn_max [expr int([expr pow($breadth,$depth)])] - if { $txn_max < 20 } { - set txn_max 20 - } - puts "\tRecd008.b: create environment for $txn_max transactions" - - set lock_max 2500 - set eflags "-mode 0644 -create -lock_max $lock_max -txn_max $txn_max \ - -txn -home $testdir" - set env_cmd "berkdb_env $eflags" - set dbenv [eval $env_cmd] - error_check_good env_open [is_valid_env $dbenv] TRUE - - reset_env $dbenv - - set rlist { - { {recd008_parent abort ENV DB $method $p1 TXNID 1 1 $breadth $depth} - "Recd008.c: child abort parent" } - { {recd008_parent commit ENV DB $method $p1 TXNID 1 1 $breadth $depth} - "Recd008.d: child commit parent" } - } - foreach pair $rlist { - set cmd [subst [lindex $pair 0]] - set msg [lindex $pair 1] - op_recover abort $testdir $env_cmd $dbfile $cmd $msg - recd008_setkval $dbfile $p1 - op_recover commit $testdir $env_cmd $dbfile $cmd $msg - recd008_setkval $dbfile $p1 - } - - puts "\tRecd008.e: Verify db_printlog can read logfile" - set tmpfile $testdir/printlog.out - set stat [catch {exec $util_path/db_printlog -h $testdir \ - > $tmpfile} ret] - error_check_good db_printlog $stat 0 - fileremove $tmpfile -} - -proc recd008_setkval { dbfile p1 } { - global kvals - source ./include.tcl - - set db [berkdb_open $testdir/$dbfile] - error_check_good dbopen [is_valid_db $db] TRUE - set ret [$db get $p1] - error_check_good dbclose [$db close] 0 - set kvals($p1) [lindex [lindex $ret 0] 1] -} - -# This is a lot like the op_recover procedure. We cannot use that -# because it was not meant to be called recursively. This proc -# knows about depth/breadth and file naming so that recursive calls -# don't overwrite various initial and afterop files, etc. -# -# The basic flow of this is: -# (Initial file) -# Parent begin transaction (in op_recover) -# Parent starts children -# Recursively call recd008_recover -# (children modify p1) -# Parent modifies p1 -# (Afterop file) -# Parent commit/abort (in op_recover) -# (Final file) -# Recovery test (in op_recover) -proc recd008_parent { op env db method p1key parent b0 d0 breadth depth } { - global kvals - source ./include.tcl - - # - # Save copy of original data - # Acquire lock on data - # - set olddata [pad_data $method $kvals($p1key)] - set ret [$db get -rmw -txn $parent $p1key] - set Dret [lindex [lindex $ret 0] 1] - error_check_good get_parent_RMW $Dret $olddata - - # - # Parent spawns off children - # - set ret [recd008_txn $op $env $db $method $p1key $parent \ - $b0 $d0 $breadth $depth] - - puts "Child runs complete. Parent modifies data." - - # - # Parent modifies p1 - # - set newdata $olddata.parent - set ret [$db put -txn $parent $p1key [chop_data $method $newdata]] - error_check_good db_put $ret 0 - - # - # Save value in kvals for later comparison - # - switch $op { - "commit" { - set kvals($p1key) $newdata - } - "abort" { - set kvals($p1key) $olddata - } - } - return 0 -} - -proc recd008_txn { op env db method p1key parent b0 d0 breadth depth } { - global log_log_record_types - global kvals - source ./include.tcl - - for {set d 1} {$d < $d0} {incr d} { - puts -nonewline "\t" - } - puts "Recd008_txn: $op parent:$parent $breadth $depth ($b0 $d0)" - - # Save the initial file and open the environment and the file - for {set b $b0} {$b <= $breadth} {incr b} { - # - # Begin child transaction - # - set t [$env txn -parent $parent] - error_check_bad txn_begin $t NULL - error_check_good txn_begin [is_valid_txn $t $env] TRUE - set startd [expr $d0 + 1] - set child $b:$startd:$t - set olddata [pad_data $method $kvals($p1key)] - set newdata $olddata.$child - set ret [$db get -rmw -txn $t $p1key] - set Dret [lindex [lindex $ret 0] 1] - error_check_good get_parent_RMW $Dret $olddata - - # - # Recursively call to set up nested transactions/children - # - for {set d $startd} {$d <= $depth} {incr d} { - set ret [recd008_txn commit $env $db $method $p1key $t \ - $b $d $breadth $depth] - set ret [recd008_txn abort $env $db $method $p1key $t \ - $b $d $breadth $depth] - } - # - # Modifies p1. - # - set ret [$db put -txn $t $p1key [chop_data $method $newdata]] - error_check_good db_put $ret 0 - - # - # Commit or abort - # - for {set d 1} {$d < $startd} {incr d} { - puts -nonewline "\t" - } - puts "Executing txn_$op:$t" - error_check_good txn_$op:$t [$t $op] 0 - for {set d 1} {$d < $startd} {incr d} { - puts -nonewline "\t" - } - set ret [$db get -rmw -txn $parent $p1key] - set Dret [lindex [lindex $ret 0] 1] - set newdata [pad_data $method $newdata] - switch $op { - "commit" { - puts "Command executed and committed." - error_check_good get_parent_RMW $Dret $newdata - set kvals($p1key) $newdata - } - "abort" { - puts "Command executed and aborted." - error_check_good get_parent_RMW $Dret $olddata - set kvals($p1key) $olddata - } - } - } - return 0 -} diff --git a/storage/bdb/test/recd009.tcl b/storage/bdb/test/recd009.tcl deleted file mode 100644 index d6700a0bd54..00000000000 --- a/storage/bdb/test/recd009.tcl +++ /dev/null @@ -1,180 +0,0 @@ -# See the file LICENSE for redistribution information. -# -# Copyright (c) 1999-2004 -# Sleepycat Software. All rights reserved. -# -# $Id: recd009.tcl,v 1.20 2004/01/28 03:36:28 bostic Exp $ -# -# TEST recd009 -# TEST Verify record numbering across split/reverse splits and recovery. -proc recd009 { method {select 0} args} { - global fixed_len - source ./include.tcl - - if { [is_rbtree $method] != 1 && [is_rrecno $method] != 1} { - puts "Recd009 skipping for method $method." - return - } - - set opts [convert_args $method $args] - set method [convert_method $method] - - puts "\tRecd009: Test record numbers across splits and recovery" - - set testfile recd009.db - env_cleanup $testdir - set mkeys 1000 - set nkeys 5 - set data "data" - - puts "\tRecd009.a: Create $method environment and database." - set flags "-create -txn -home $testdir" - - set env_cmd "berkdb_env $flags" - set dbenv [eval $env_cmd] - error_check_good dbenv [is_valid_env $dbenv] TRUE - - set oflags "-env $dbenv -pagesize 8192 -create -mode 0644 $opts $method" - set db [eval {berkdb_open} $oflags $testfile] - error_check_good dbopen [is_valid_db $db] TRUE - - # Fill page with small key/data pairs. Keep at leaf. - puts "\tRecd009.b: Fill page with $nkeys small key/data pairs." - for { set i 1 } { $i <= $nkeys } { incr i } { - if { [is_recno $method] == 1 } { - set key $i - } else { - set key key000$i - } - set ret [$db put $key $data$i] - error_check_good dbput $ret 0 - } - error_check_good db_close [$db close] 0 - error_check_good env_close [$dbenv close] 0 - - set newnkeys [expr $nkeys + 1] - # List of recovery tests: {CMD MSG} pairs. - set rlist { - { {recd009_split DB TXNID 1 $method $newnkeys $mkeys} - "Recd009.c: split"} - { {recd009_split DB TXNID 0 $method $newnkeys $mkeys} - "Recd009.d: reverse split"} - } - - foreach pair $rlist { - set cmd [subst [lindex $pair 0]] - set msg [lindex $pair 1] - if { $select != 0 } { - set tag [lindex $msg 0] - set tail [expr [string length $tag] - 2] - set tag [string range $tag $tail $tail] - if { [lsearch $select $tag] == -1 } { - continue - } - } - set reverse [string first "reverse" $msg] - if { $reverse == -1 } { - set abortkeys $nkeys - set commitkeys $mkeys - set abortpg 0 - set commitpg 1 - } else { - set abortkeys $mkeys - set commitkeys $nkeys - set abortpg 1 - set commitpg 0 - } - op_recover abort $testdir $env_cmd $testfile $cmd $msg - recd009_recnocheck $testdir $testfile $opts $abortkeys $abortpg - op_recover commit $testdir $env_cmd $testfile $cmd $msg - recd009_recnocheck $testdir $testfile $opts \ - $commitkeys $commitpg - } - puts "\tRecd009.e: Verify db_printlog can read logfile" - set tmpfile $testdir/printlog.out - set stat [catch {exec $util_path/db_printlog -h $testdir \ - > $tmpfile} ret] - error_check_good db_printlog $stat 0 - fileremove $tmpfile -} - -# -# This procedure verifies that the database has only numkeys number -# of keys and that they are in order. -# -proc recd009_recnocheck { tdir testfile opts numkeys numpg} { - source ./include.tcl - - set db [eval {berkdb_open} $opts $tdir/$testfile] - error_check_good dbopen [is_valid_db $db] TRUE - - puts "\tRecd009_recnocheck: Verify page count of $numpg on split." - set stat [$db stat] - error_check_bad stat:check-split [is_substr $stat \ - "{{Internal pages} 0}"] $numpg - - set type [$db get_type] - set dbc [$db cursor] - error_check_good dbcursor [is_valid_cursor $dbc $db] TRUE - set i 1 - puts "\tRecd009_recnocheck: Checking $numkeys record numbers." - for {set d [$dbc get -first]} { [llength $d] != 0 } { - set d [$dbc get -next]} { - if { [is_btree $type] } { - set thisi [$dbc get -get_recno] - } else { - set thisi [lindex [lindex $d 0] 0] - } - error_check_good recno_check $i $thisi - error_check_good record_count [expr $i <= $numkeys] 1 - incr i - } - error_check_good curs_close [$dbc close] 0 - error_check_good db_close [$db close] 0 -} - -proc recd009_split { db txn split method nkeys mkeys } { - global errorCode - source ./include.tcl - - set data "data" - - set isrecno [is_recno $method] - # if mkeys is above 1000, need to adjust below for lexical order - if { $split == 1 } { - puts "\tRecd009_split: Add $mkeys pairs to force split." - for {set i $nkeys} { $i <= $mkeys } { incr i } { - if { $isrecno == 1 } { - set key $i - } else { - if { $i >= 100 } { - set key key0$i - } elseif { $i >= 10 } { - set key key00$i - } else { - set key key000$i - } - } - set ret [$db put -txn $txn $key $data$i] - error_check_good dbput:more $ret 0 - } - } else { - puts "\tRecd009_split: Delete added keys to force reverse split." - # Since rrecno renumbers, we delete downward. - for {set i $mkeys} { $i >= $nkeys } { set i [expr $i - 1] } { - if { $isrecno == 1 } { - set key $i - } else { - if { $i >= 100 } { - set key key0$i - } elseif { $i >= 10 } { - set key key00$i - } else { - set key key000$i - } - } - error_check_good db_del:$i [$db del -txn $txn $key] 0 - } - } - return 0 -} diff --git a/storage/bdb/test/recd010.tcl b/storage/bdb/test/recd010.tcl deleted file mode 100644 index a2df7a47c6f..00000000000 --- a/storage/bdb/test/recd010.tcl +++ /dev/null @@ -1,257 +0,0 @@ -# See the file LICENSE for redistribution information. -# -# Copyright (c) 1999-2004 -# Sleepycat Software. All rights reserved. -# -# $Id: recd010.tcl,v 1.21 2004/01/28 03:36:28 bostic Exp $ -# -# TEST recd010 -# TEST Test stability of btree duplicates across btree off-page dup splits -# TEST and reverse splits and across recovery. -proc recd010 { method {select 0} args} { - if { [is_btree $method] != 1 } { - puts "Recd010 skipping for method $method." - return - } - - set pgindex [lsearch -exact $args "-pagesize"] - if { $pgindex != -1 } { - puts "Recd010: skipping for specific pagesizes" - return - } - set largs $args - append largs " -dup " - recd010_main $method $select $largs - append largs " -dupsort " - recd010_main $method $select $largs -} - -proc recd010_main { method select largs } { - global fixed_len - global kvals - global kvals_dups - source ./include.tcl - - - set opts [convert_args $method $largs] - set method [convert_method $method] - - puts "Recd010 ($opts): Test duplicates across splits and recovery" - - set testfile recd010.db - env_cleanup $testdir - # - # Set pagesize small to generate lots of off-page dups - # - set page 512 - set mkeys 1000 - set firstkeys 5 - set data "data" - set key "recd010_key" - - puts "\tRecd010.a: Create environment and database." - set flags "-create -txn -home $testdir" - - set env_cmd "berkdb_env $flags" - set dbenv [eval $env_cmd] - error_check_good dbenv [is_valid_env $dbenv] TRUE - - set oflags "-env $dbenv -create -mode 0644 $opts $method" - set db [eval {berkdb_open} -pagesize $page $oflags $testfile] - error_check_good dbopen [is_valid_db $db] TRUE - - # Fill page with small key/data pairs. Keep at leaf. - puts "\tRecd010.b: Fill page with $firstkeys small dups." - for { set i 1 } { $i <= $firstkeys } { incr i } { - set ret [$db put $key $data$i] - error_check_good dbput $ret 0 - } - set kvals 1 - set kvals_dups $firstkeys - error_check_good db_close [$db close] 0 - error_check_good env_close [$dbenv close] 0 - - # List of recovery tests: {CMD MSG} pairs. - if { $mkeys < 100 } { - puts "Recd010 mkeys of $mkeys too small" - return - } - set rlist { - { {recd010_split DB TXNID 1 2 $mkeys} - "Recd010.c: btree split 2 large dups"} - { {recd010_split DB TXNID 0 2 $mkeys} - "Recd010.d: btree reverse split 2 large dups"} - { {recd010_split DB TXNID 1 10 $mkeys} - "Recd010.e: btree split 10 dups"} - { {recd010_split DB TXNID 0 10 $mkeys} - "Recd010.f: btree reverse split 10 dups"} - { {recd010_split DB TXNID 1 100 $mkeys} - "Recd010.g: btree split 100 dups"} - { {recd010_split DB TXNID 0 100 $mkeys} - "Recd010.h: btree reverse split 100 dups"} - } - - foreach pair $rlist { - set cmd [subst [lindex $pair 0]] - set msg [lindex $pair 1] - if { $select != 0 } { - set tag [lindex $msg 0] - set tail [expr [string length $tag] - 2] - set tag [string range $tag $tail $tail] - if { [lsearch $select $tag] == -1 } { - continue - } - } - set reverse [string first "reverse" $msg] - op_recover abort $testdir $env_cmd $testfile $cmd $msg - recd010_check $testdir $testfile $opts abort $reverse $firstkeys - op_recover commit $testdir $env_cmd $testfile $cmd $msg - recd010_check $testdir $testfile $opts commit $reverse $firstkeys - } - puts "\tRecd010.i: Verify db_printlog can read logfile" - set tmpfile $testdir/printlog.out - set stat [catch {exec $util_path/db_printlog -h $testdir \ - > $tmpfile} ret] - error_check_good db_printlog $stat 0 - fileremove $tmpfile -} - -# -# This procedure verifies that the database has only numkeys number -# of keys and that they are in order. -# -proc recd010_check { tdir testfile opts op reverse origdups } { - global kvals - global kvals_dups - source ./include.tcl - - set db [eval {berkdb_open} $opts $tdir/$testfile] - error_check_good dbopen [is_valid_db $db] TRUE - - set data "data" - - if { $reverse == -1 } { - puts "\tRecd010_check: Verify split after $op" - } else { - puts "\tRecd010_check: Verify reverse split after $op" - } - - set stat [$db stat] - if { [expr ([string compare $op "abort"] == 0 && $reverse == -1) || \ - ([string compare $op "commit"] == 0 && $reverse != -1)]} { - set numkeys 0 - set allkeys [expr $numkeys + 1] - set numdups $origdups - # - # If we abort the adding of dups, or commit - # the removal of dups, either way check that - # we are back at the beginning. Check that: - # - We have 0 internal pages. - # - We have only 1 key (the original we primed the db - # with at the beginning of the test). - # - We have only the original number of dups we primed - # the db with at the beginning of the test. - # - error_check_good stat:orig0 [is_substr $stat \ - "{{Internal pages} 0}"] 1 - error_check_good stat:orig1 [is_substr $stat \ - "{{Number of keys} 1}"] 1 - error_check_good stat:orig2 [is_substr $stat \ - "{{Number of records} $origdups}"] 1 - } else { - set numkeys $kvals - set allkeys [expr $numkeys + 1] - set numdups $kvals_dups - # - # If we abort the removal of dups, or commit the - # addition of dups, check that: - # - We have > 0 internal pages. - # - We have the number of keys. - # - error_check_bad stat:new0 [is_substr $stat \ - "{{Internal pages} 0}"] 1 - error_check_good stat:new1 [is_substr $stat \ - "{{Number of keys} $allkeys}"] 1 - } - - set dbc [$db cursor] - error_check_good dbcursor [is_valid_cursor $dbc $db] TRUE - puts "\tRecd010_check: Checking key and duplicate values" - set key "recd010_key" - # - # Check dups are there as they should be. - # - for {set ki 0} {$ki < $numkeys} {incr ki} { - set datacnt 0 - for {set d [$dbc get -set $key$ki]} { [llength $d] != 0 } { - set d [$dbc get -nextdup]} { - set thisdata [lindex [lindex $d 0] 1] - if { $datacnt < 10 } { - set pdata $data.$ki.00$datacnt - } elseif { $datacnt < 100 } { - set pdata $data.$ki.0$datacnt - } else { - set pdata $data.$ki.$datacnt - } - error_check_good dup_check $thisdata $pdata - incr datacnt - } - error_check_good dup_count $datacnt $numdups - } - # - # Check that the number of expected keys (allkeys) are - # all of the ones that exist in the database. - # - set dupkeys 0 - set lastkey "" - for {set d [$dbc get -first]} { [llength $d] != 0 } { - set d [$dbc get -next]} { - set thiskey [lindex [lindex $d 0] 0] - if { [string compare $lastkey $thiskey] != 0 } { - incr dupkeys - } - set lastkey $thiskey - } - error_check_good key_check $allkeys $dupkeys - error_check_good curs_close [$dbc close] 0 - error_check_good db_close [$db close] 0 -} - -proc recd010_split { db txn split nkeys mkeys } { - global errorCode - global kvals - global kvals_dups - source ./include.tcl - - set data "data" - set key "recd010_key" - - set numdups [expr $mkeys / $nkeys] - - set kvals $nkeys - set kvals_dups $numdups - if { $split == 1 } { - puts \ -"\tRecd010_split: Add $nkeys keys, with $numdups duplicates each to force split." - for {set k 0} { $k < $nkeys } { incr k } { - for {set i 0} { $i < $numdups } { incr i } { - if { $i < 10 } { - set pdata $data.$k.00$i - } elseif { $i < 100 } { - set pdata $data.$k.0$i - } else { - set pdata $data.$k.$i - } - set ret [$db put -txn $txn $key$k $pdata] - error_check_good dbput:more $ret 0 - } - } - } else { - puts \ -"\tRecd010_split: Delete $nkeys keys to force reverse split." - for {set k 0} { $k < $nkeys } { incr k } { - error_check_good db_del:$k [$db del -txn $txn $key$k] 0 - } - } - return 0 -} diff --git a/storage/bdb/test/recd011.tcl b/storage/bdb/test/recd011.tcl deleted file mode 100644 index bf118905b98..00000000000 --- a/storage/bdb/test/recd011.tcl +++ /dev/null @@ -1,136 +0,0 @@ -# See the file LICENSE for redistribution information. -# -# Copyright (c) 2000-2004 -# Sleepycat Software. All rights reserved. -# -# $Id: recd011.tcl,v 11.26 2004/01/28 03:36:28 bostic Exp $ -# -# TEST recd011 -# TEST Verify that recovery to a specific timestamp works. -proc recd011 { method {niter 200} {ckpt_freq 15} {sleep_time 1} args } { - source ./include.tcl - global rand_init - berkdb srand $rand_init - - set args [convert_args $method $args] - set omethod [convert_method $method] - set tnum "011" - - puts "Recd$tnum ($method $args): Test recovery to a specific timestamp." - - set testfile recd$tnum.db - env_cleanup $testdir - - set i 0 - if { [is_record_based $method] == 1 } { - set key 1 - set bigkey 1001 - } else { - set key KEY - set bigkey BIGKEY - } - - puts "\tRecd$tnum.a: Create environment and database." - set bufsize [expr 8 * 1024] - set maxsize [expr 8 * $bufsize] - set flags "-create -txn -home $testdir -log_buffer $bufsize \ - -log_max $maxsize" - - set env_cmd "berkdb_env $flags" - set dbenv [eval $env_cmd] - error_check_good dbenv [is_valid_env $dbenv] TRUE - - set oflags "-auto_commit -env $dbenv -create -mode 0644 $args $omethod" - set db [eval {berkdb_open} $oflags $testfile] - error_check_good dbopen [is_valid_db $db] TRUE - - # Main loop: every second or so, increment the db in a txn. - puts "\t\tInitial Checkpoint" - error_check_good "Initial Checkpoint" [$dbenv txn_checkpoint] 0 - - puts "\tRecd$tnum.b ($niter iterations):\ - Transaction-protected increment loop." - for { set i 0 } { $i <= $niter } { incr i } { - set str [random_data 4096 0 NOTHING] - set data $i - set bigdata $i$str - - # Put, in a txn. - set txn [$dbenv txn] - error_check_good txn_begin [is_valid_txn $txn $dbenv] TRUE - error_check_good db_put \ - [$db put -txn $txn $key [chop_data $method $data]] 0 - error_check_good db_put \ - [$db put -txn $txn $bigkey [chop_data $method $bigdata]] 0 - error_check_good txn_commit [$txn commit] 0 - - # We need to sleep before taking the timestamp to guarantee - # that the timestamp is *after* this transaction commits. - # Since the resolution of the system call used by Berkeley DB - # is less than a second, rounding to the nearest second can - # otherwise cause off-by-one errors in the test. - tclsleep $sleep_time - - set timeof($i) [timestamp -r] - - # If an appropriate period has elapsed, checkpoint. - if { $i % $ckpt_freq == $ckpt_freq - 1 } { - puts "\t\tIteration $i: Checkpointing." - error_check_good ckpt($i) [$dbenv txn_checkpoint] 0 - } - - # Sleep again to ensure that the next operation definitely - # occurs after the timestamp. - tclsleep $sleep_time - } - error_check_good db_close [$db close] 0 - error_check_good env_close [$dbenv close] 0 - - # Now, loop through and recover to each timestamp, verifying the - # expected increment. - puts "\tRecd$tnum.c: Recover to each timestamp and check." - for { set i $niter } { $i >= 0 } { incr i -1 } { - - # Run db_recover. - set t [clock format $timeof($i) -format "%y%m%d%H%M.%S"] - # puts $t - berkdb debug_check - set ret [catch {exec $util_path/db_recover -h $testdir -t $t} r] - error_check_good db_recover($i,$t,$r) $ret 0 - - # Now open the db and check the timestamp. - set db [eval {berkdb_open} $testdir/$testfile] - error_check_good db_open($i) [is_valid_db $db] TRUE - - set dbt [$db get $key] - set datum [lindex [lindex $dbt 0] 1] - error_check_good timestamp_recover $datum [pad_data $method $i] - - error_check_good db_close [$db close] 0 - } - - # Finally, recover to a time well before the first timestamp - # and well after the last timestamp. The latter should - # be just like the timestamp of the last test performed; - # the former should fail. - puts "\tRecd$tnum.d: Recover to before the first timestamp." - set t [clock format [expr $timeof(0) - 1000] -format "%y%m%d%H%M.%S"] - set ret [catch {exec $util_path/db_recover -h $testdir -t $t} r] - error_check_bad db_recover(before,$t) $ret 0 - - puts "\tRecd$tnum.e: Recover to after the last timestamp." - set t [clock format \ - [expr $timeof($niter) + 1000] -format "%y%m%d%H%M.%S"] - set ret [catch {exec $util_path/db_recover -h $testdir -t $t} r] - error_check_good db_recover(after,$t) $ret 0 - - # Now open the db and check the timestamp. - set db [eval {berkdb_open} $testdir/$testfile] - error_check_good db_open(after) [is_valid_db $db] TRUE - - set dbt [$db get $key] - set datum2 [lindex [lindex $dbt 0] 1] - - error_check_good timestamp_recover $datum2 $datum - error_check_good db_close [$db close] 0 -} diff --git a/storage/bdb/test/recd012.tcl b/storage/bdb/test/recd012.tcl deleted file mode 100644 index abed99de809..00000000000 --- a/storage/bdb/test/recd012.tcl +++ /dev/null @@ -1,433 +0,0 @@ -# See the file LICENSE for redistribution information. -# -# Copyright (c) 2000-2004 -# Sleepycat Software. All rights reserved. -# -# $Id: recd012.tcl,v 11.31 2004/04/19 14:56:13 bostic Exp $ -# -# TEST recd012 -# TEST Test of log file ID management. [#2288] -# TEST Test recovery handling of file opens and closes. -proc recd012 { method {start 0} \ - {niter 49} {noutiter 25} {niniter 100} {ndbs 5} args } { - source ./include.tcl - - set tnum "012" - set pagesize 512 - - if { $is_qnx_test } { - set niter 40 - } - - puts "Recd$tnum $method ($args): Test recovery file management." - set pgindex [lsearch -exact $args "-pagesize"] - if { $pgindex != -1 } { - puts "Recd012: skipping for specific pagesizes" - return - } - - for { set i $start } { $i <= $niter } { incr i } { - env_cleanup $testdir - - # For repeatability, we pass in the iteration number - # as a parameter and use that in recd012_body to seed - # the random number generator to randomize our operations. - # This lets us re-run a potentially failing iteration - # without having to start from the beginning and work - # our way to it. - # - # The number of databases ranges from 4 to 8 and is - # a function of $niter - # set ndbs [expr ($i % 5) + 4] - - recd012_body \ - $method $ndbs $i $noutiter $niniter $pagesize $tnum $args - } -} - -proc recd012_body { method {ndbs 5} iter noutiter niniter psz tnum {largs ""} } { - global alphabet rand_init fixed_len recd012_ofkey recd012_ofckptkey - source ./include.tcl - - set largs [convert_args $method $largs] - set omethod [convert_method $method] - - puts "\tRecd$tnum $method ($largs): Iteration $iter" - puts "\t\tRecd$tnum.a: Create environment and $ndbs databases." - - # We run out of lockers during some of the recovery runs, so - # we need to make sure that we specify a DB_CONFIG that will - # give us enough lockers. - set f [open $testdir/DB_CONFIG w] - puts $f "set_lk_max_lockers 5000" - close $f - - set flags "-create -txn -home $testdir" - set env_cmd "berkdb_env $flags" - error_check_good env_remove [berkdb envremove -home $testdir] 0 - set dbenv [eval $env_cmd] - error_check_good dbenv [is_valid_env $dbenv] TRUE - - # Initialize random number generator based on $iter. - berkdb srand [expr $iter + $rand_init] - - # Initialize database that keeps track of number of open files (so - # we don't run out of descriptors). - set ofname of.db - set txn [$dbenv txn] - error_check_good open_txn_begin [is_valid_txn $txn $dbenv] TRUE - set ofdb [berkdb_open -env $dbenv -txn $txn\ - -create -dup -mode 0644 -btree -pagesize 512 $ofname] - error_check_good of_open [is_valid_db $ofdb] TRUE - error_check_good open_txn_commit [$txn commit] 0 - set oftxn [$dbenv txn] - error_check_good of_txn [is_valid_txn $oftxn $dbenv] TRUE - error_check_good of_put [$ofdb put -txn $oftxn $recd012_ofkey 1] 0 - error_check_good of_put2 [$ofdb put -txn $oftxn $recd012_ofckptkey 0] 0 - error_check_good of_put3 [$ofdb put -txn $oftxn $recd012_ofckptkey 0] 0 - error_check_good of_txn_commit [$oftxn commit] 0 - error_check_good of_close [$ofdb close] 0 - - # Create ndbs databases to work in, and a file listing db names to - # pick from. - set f [open $testdir/dblist w] - - set oflags "-auto_commit -env $dbenv \ - -create -mode 0644 -pagesize $psz $largs $omethod" - for { set i 0 } { $i < $ndbs } { incr i } { - # 50-50 chance of being a subdb, unless we're a queue. - if { [berkdb random_int 0 1] || [is_queue $method] } { - # not a subdb - set dbname recd$tnum-$i.db - } else { - # subdb - set dbname "recd$tnum-subdb.db s$i" - } - puts $f $dbname - set db [eval berkdb_open $oflags $dbname] - error_check_good db($i) [is_valid_db $db] TRUE - error_check_good db($i)_close [$db close] 0 - } - close $f - error_check_good env_close [$dbenv close] 0 - - # Now we get to the meat of things. Our goal is to do some number - # of opens, closes, updates, and shutdowns (simulated here by a - # close of all open handles and a close/reopen of the environment, - # with or without an envremove), matching the regular expression - # - # ((O[OUC]+S)+R+V) - # - # We'll repeat the inner + a random number up to $niniter times, - # and the outer + a random number up to $noutiter times. - # - # In order to simulate shutdowns, we'll perform the opens, closes, - # and updates in a separate process, which we'll exit without closing - # all handles properly. The environment will be left lying around - # before we run recovery 50% of the time. - set out [berkdb random_int 1 $noutiter] - puts \ - "\t\tRecd$tnum.b: Performing $out recoveries of up to $niniter ops." - for { set i 0 } { $i < $out } { incr i } { - set child [open "|$tclsh_path" w] - - # For performance, don't source everything, - # just what we'll need. - puts $child "load $tcllib" - puts $child "set fixed_len $fixed_len" - puts $child "source $src_root/test/testutils.tcl" - puts $child "source $src_root/test/recd$tnum.tcl" - - set rnd [expr $iter * 10000 + $i * 100 + $rand_init] - - # Go. - berkdb debug_check - puts $child "recd012_dochild {$env_cmd} $rnd $i $niniter\ - $ndbs $tnum $method $ofname $largs" - close $child - - # Run recovery 0-3 times. - set nrecs [berkdb random_int 0 3] - for { set j 0 } { $j < $nrecs } { incr j } { - berkdb debug_check - set ret [catch {exec $util_path/db_recover \ - -h $testdir} res] - if { $ret != 0 } { - puts "FAIL: db_recover returned with nonzero\ - exit status, output as follows:" - file mkdir /tmp/12out - set fd [open /tmp/12out/[pid] w] - puts $fd $res - close $fd - } - error_check_good recover($j) $ret 0 - } - } - - # Run recovery one final time; it doesn't make sense to - # check integrity if we do not. - set ret [catch {exec $util_path/db_recover -h $testdir} res] - if { $ret != 0 } { - puts "FAIL: db_recover returned with nonzero\ - exit status, output as follows:" - puts $res - } - - # Make sure each datum is the correct filename. - puts "\t\tRecd$tnum.c: Checking data integrity." - set dbenv [berkdb_env -create -private -home $testdir] - error_check_good env_open_integrity [is_valid_env $dbenv] TRUE - set f [open $testdir/dblist r] - set i 0 - while { [gets $f dbinfo] > 0 } { - set db [eval berkdb_open -env $dbenv $dbinfo] - error_check_good dbopen($dbinfo) [is_valid_db $db] TRUE - - set dbc [$db cursor] - error_check_good cursor [is_valid_cursor $dbc $db] TRUE - - for { set dbt [$dbc get -first] } { [llength $dbt] > 0 } \ - { set dbt [$dbc get -next] } { - error_check_good integrity [lindex [lindex $dbt 0] 1] \ - [pad_data $method $dbinfo] - } - error_check_good dbc_close [$dbc close] 0 - error_check_good db_close [$db close] 0 - } - close $f - error_check_good env_close_integrity [$dbenv close] 0 - - # Verify - error_check_good verify \ - [verify_dir $testdir "\t\tRecd$tnum.d: " 0 0 1] 0 -} - -proc recd012_dochild { env_cmd rnd outiter niniter ndbs tnum method\ - ofname args } { - global recd012_ofkey - source ./include.tcl - if { [is_record_based $method] } { - set keybase "" - } else { - set keybase .[repeat abcdefghijklmnopqrstuvwxyz 4] - } - - # Initialize our random number generator, repeatably based on an arg. - berkdb srand $rnd - - # Open our env. - set dbenv [eval $env_cmd] - error_check_good env_open [is_valid_env $dbenv] TRUE - - # Find out how many databases appear to be open in the log--we - # don't want recovery to run out of filehandles. - set txn [$dbenv txn] - error_check_good child_txn_begin [is_valid_txn $txn $dbenv] TRUE - set ofdb [berkdb_open -env $dbenv -txn $txn $ofname] - error_check_good child_txn_commit [$txn commit] 0 - - set oftxn [$dbenv txn] - error_check_good of_txn [is_valid_txn $oftxn $dbenv] TRUE - set dbt [$ofdb get -txn $oftxn $recd012_ofkey] - error_check_good of_get [lindex [lindex $dbt 0] 0] $recd012_ofkey - set nopenfiles [lindex [lindex $dbt 0] 1] - - error_check_good of_commit [$oftxn commit] 0 - - # Read our dbnames - set f [open $testdir/dblist r] - set i 0 - while { [gets $f dbname($i)] > 0 } { - incr i - } - close $f - - # We now have $ndbs extant databases. - # Open one of them, just to get us started. - set opendbs {} - set oflags "-env $dbenv $args" - - # Start a transaction, just to get us started. - set curtxn [$dbenv txn] - error_check_good txn [is_valid_txn $curtxn $dbenv] TRUE - - # Inner loop. Do $in iterations of a random open, close, or - # update, where $in is between 1 and $niniter. - set in [berkdb random_int 1 $niniter] - for { set j 0 } { $j < $in } { incr j } { - set op [berkdb random_int 0 2] - switch $op { - 0 { - # Open. - recd012_open - } - 1 { - # Update. Put random-number$keybase as key, - # filename as data, into random database. - set num_open [llength $opendbs] - if { $num_open == 0 } { - # If none are open, do an open first. - recd012_open - set num_open [llength $opendbs] - } - set n [berkdb random_int 0 [expr $num_open - 1]] - set pair [lindex $opendbs $n] - set udb [lindex $pair 0] - set uname [lindex $pair 1] - - set key [berkdb random_int 1000 1999]$keybase - set data [chop_data $method $uname] - error_check_good put($uname,$udb,$key,$data) \ - [$udb put -txn $curtxn $key $data] 0 - - # One time in four, commit the transaction. - if { [berkdb random_int 0 3] == 0 && 0 } { - error_check_good txn_recommit \ - [$curtxn commit] 0 - set curtxn [$dbenv txn] - error_check_good txn_reopen \ - [is_valid_txn $curtxn $dbenv] TRUE - } - } - 2 { - # Close. - if { [llength $opendbs] == 0 } { - # If none are open, open instead of closing. - recd012_open - continue - } - - # Commit curtxn first, lest we self-deadlock. - error_check_good txn_recommit [$curtxn commit] 0 - - # Do it. - set which [berkdb random_int 0 \ - [expr [llength $opendbs] - 1]] - - set db [lindex [lindex $opendbs $which] 0] - error_check_good db_choice [is_valid_db $db] TRUE - global errorCode errorInfo - - error_check_good db_close \ - [[lindex [lindex $opendbs $which] 0] close] 0 - - set opendbs [lreplace $opendbs $which $which] - incr nopenfiles -1 - - # Reopen txn. - set curtxn [$dbenv txn] - error_check_good txn_reopen \ - [is_valid_txn $curtxn $dbenv] TRUE - } - } - - # One time in two hundred, checkpoint. - if { [berkdb random_int 0 199] == 0 } { - puts "\t\t\tRecd$tnum:\ - Random checkpoint after operation $outiter.$j." - error_check_good txn_ckpt \ - [$dbenv txn_checkpoint] 0 - set nopenfiles \ - [recd012_nopenfiles_ckpt $dbenv $ofdb $nopenfiles] - } - } - - # We have to commit curtxn. It'd be kind of nice not to, but - # if we start in again without running recovery, we may block - # ourselves. - error_check_good curtxn_commit [$curtxn commit] 0 - - # Put back the new number of open files. - set oftxn [$dbenv txn] - error_check_good of_txn [is_valid_txn $oftxn $dbenv] TRUE - error_check_good of_del [$ofdb del -txn $oftxn $recd012_ofkey] 0 - error_check_good of_put \ - [$ofdb put -txn $oftxn $recd012_ofkey $nopenfiles] 0 - error_check_good of_commit [$oftxn commit] 0 - error_check_good ofdb_close [$ofdb close] 0 -} - -proc recd012_open { } { - # This is basically an inline and has to modify curtxn, - # so use upvars. - upvar curtxn curtxn - upvar ndbs ndbs - upvar dbname dbname - upvar dbenv dbenv - upvar oflags oflags - upvar opendbs opendbs - upvar nopenfiles nopenfiles - - # Return without an open if we've already opened too many files-- - # we don't want to make recovery run out of filehandles. - if { $nopenfiles > 30 } { - #puts "skipping--too many open files" - return -code break - } - - # Commit curtxn first, lest we self-deadlock. - error_check_good txn_recommit \ - [$curtxn commit] 0 - - # Do it. - set which [berkdb random_int 0 [expr $ndbs - 1]] - - set db [eval berkdb_open -auto_commit $oflags $dbname($which)] - - lappend opendbs [list $db $dbname($which)] - - # Reopen txn. - set curtxn [$dbenv txn] - error_check_good txn_reopen [is_valid_txn $curtxn $dbenv] TRUE - - incr nopenfiles -} - -# Update the database containing the number of files that db_recover has -# to contend with--we want to avoid letting it run out of file descriptors. -# We do this by keeping track of the number of unclosed opens since the -# checkpoint before last. -# $recd012_ofkey stores this current value; the two dups available -# at $recd012_ofckptkey store the number of opens since the last checkpoint -# previous. -# Thus, if the current value is 17 when we do a checkpoint, and the -# stored values are 3 and 8, the new current value (which we return) -# is 14, and the new stored values are 8 and 6. -proc recd012_nopenfiles_ckpt { env db nopenfiles } { - global recd012_ofckptkey - set txn [$env txn] - error_check_good nopenfiles_ckpt_txn [is_valid_txn $txn $env] TRUE - - set dbc [$db cursor -txn $txn] - error_check_good db_cursor [is_valid_cursor $dbc $db] TRUE - - # Get the first ckpt value and delete it. - set dbt [$dbc get -set $recd012_ofckptkey] - error_check_good set [llength $dbt] 1 - - set discard [lindex [lindex $dbt 0] 1] - error_check_good del [$dbc del] 0 - - set nopenfiles [expr $nopenfiles - $discard] - - # Get the next ckpt value - set dbt [$dbc get -nextdup] - error_check_good set2 [llength $dbt] 1 - - # Calculate how many opens we've had since this checkpoint before last. - set onlast [lindex [lindex $dbt 0] 1] - set sincelast [expr $nopenfiles - $onlast] - - # Put this new number at the end of the dup set. - error_check_good put [$dbc put -keylast $recd012_ofckptkey $sincelast] 0 - - # We should never deadlock since we're the only one in this db. - error_check_good dbc_close [$dbc close] 0 - error_check_good txn_commit [$txn commit] 0 - - return $nopenfiles -} - -# globals -- it's not worth passing these around, as they're constants -set recd012_ofkey OPENFILES -set recd012_ofckptkey CKPTS diff --git a/storage/bdb/test/recd013.tcl b/storage/bdb/test/recd013.tcl deleted file mode 100644 index 36cad9eb497..00000000000 --- a/storage/bdb/test/recd013.tcl +++ /dev/null @@ -1,285 +0,0 @@ -# See the file LICENSE for redistribution information. -# -# Copyright (c) 2000-2004 -# Sleepycat Software. All rights reserved. -# -# $Id: recd013.tcl,v 11.22 2004/09/20 17:06:15 sue Exp $ -# -# TEST recd013 -# TEST Test of cursor adjustment on child transaction aborts. [#2373] -# -# XXX -# Other tests that cover more specific variants of the same issue -# are in the access method tests for now. This is probably wrong; we -# put this one here because they're closely based on and intertwined -# with other, non-transactional cursor stability tests that are among -# the access method tests, and because we need at least one test to -# fit under recd and keep logtrack from complaining. We'll sort out the mess -# later; the important thing, for now, is that everything that needs to gets -# tested. (This really shouldn't be under recd at all, since it doesn't -# run recovery!) -proc recd013 { method { nitems 100 } args } { - source ./include.tcl - global alphabet log_log_record_types - - set args [convert_args $method $args] - set omethod [convert_method $method] - set tnum "013" - set pgsz 512 - - puts "Recd$tnum $method ($args): Test of aborted cursor adjustments." - set pgindex [lsearch -exact $args "-pagesize"] - if { $pgindex != -1 } { - puts "Recd013: skipping for specific pagesizes" - return - } - - set testfile recd$tnum.db - env_cleanup $testdir - - set i 0 - if { [is_record_based $method] == 1 } { - set keybase "" - } else { - set keybase "key" - } - - puts "\tRecd$tnum.a:\ - Create environment, database, and parent transaction." - set flags "-create -txn -home $testdir" - - set env_cmd "berkdb_env $flags" - set env [eval $env_cmd] - error_check_good dbenv [is_valid_env $env] TRUE - - set oflags \ - "-auto_commit -env $env -create -mode 0644 -pagesize $pgsz $args $omethod" - set db [eval {berkdb_open} $oflags $testfile] - error_check_good dbopen [is_valid_db $db] TRUE - - # Create a database containing $nitems items, numbered with odds. - # We'll then put the even numbers during the body of the test. - set txn [$env txn] - error_check_good init_txn [is_valid_txn $txn $env] TRUE - for { set i 1 } { $i <= 2 * $nitems } { incr i 2 } { - set key $keybase$i - set data [chop_data $method $i$alphabet] - - # First, try to put the item in a child transaction, - # then abort and verify all the cursors we've done up until - # now. - set ctxn [$env txn -parent $txn] - error_check_good child_txn($i) [is_valid_txn $ctxn $env] TRUE - error_check_good fake_put($i) [$db put -txn $ctxn $key $data] 0 - error_check_good ctxn_abort($i) [$ctxn abort] 0 - for { set j 1 } { $j < $i } { incr j 2 } { - error_check_good dbc_get($j):1 [$dbc($j) get -current] \ - [list [list $keybase$j \ - [pad_data $method $j$alphabet]]] - } - - # Then put for real. - error_check_good init_put($i) [$db put -txn $txn $key $data] 0 - - # Set a cursor of the parent txn to each item. - set dbc($i) [$db cursor -txn $txn] - error_check_good dbc_getset($i) \ - [$dbc($i) get -set $key] \ - [list [list $keybase$i [pad_data $method $i$alphabet]]] - - # And verify all the cursors, including the one we just - # created. - for { set j 1 } { $j <= $i } { incr j 2 } { - error_check_good dbc_get($j):2 [$dbc($j) get -current] \ - [list [list $keybase$j \ - [pad_data $method $j$alphabet]]] - } - } - - puts "\t\tRecd$tnum.a.1: Verify cursor stability after init." - for { set i 1 } { $i <= 2 * $nitems } { incr i 2 } { - error_check_good dbc_get($i):3 [$dbc($i) get -current] \ - [list [list $keybase$i [pad_data $method $i$alphabet]]] - } - - puts "\tRecd$tnum.b: Put test." - puts "\t\tRecd$tnum.b.1: Put items." - set ctxn [$env txn -parent $txn] - error_check_good txn [is_valid_txn $ctxn $env] TRUE - for { set i 2 } { $i <= 2 * $nitems } { incr i 2 } { - set key $keybase$i - set data [chop_data $method $i$alphabet] - error_check_good child_put($i) [$db put -txn $ctxn $key $data] 0 - - # If we're a renumbering recno, this is uninteresting. - # Stir things up by putting a few additional records at - # the beginning. - if { [is_rrecno $method] == 1 } { - set curs [$db cursor -txn $ctxn] - error_check_bad llength_get_first \ - [llength [$curs get -first]] 0 - error_check_good cursor [is_valid_cursor $curs $db] TRUE - # expect a recno! - error_check_good rrecno_put($i) \ - [$curs put -before ADDITIONAL.$i] 1 - error_check_good curs_close [$curs close] 0 - } - } - - puts "\t\tRecd$tnum.b.2: Verify cursor stability after abort." - error_check_good ctxn_abort [$ctxn abort] 0 - - for { set i 1 } { $i <= 2 * $nitems } { incr i 2 } { - error_check_good dbc_get($i):4 [$dbc($i) get -current] \ - [list [list $keybase$i [pad_data $method $i$alphabet]]] - } - - # Clean up cursors. - for { set i 1 } { $i <= 2 * $nitems } { incr i 2 } { - error_check_good dbc($i)_close [$dbc($i) close] 0 - } - - # Sync and verify. - error_check_good txn_commit [$txn commit] 0 - set txn [$env txn] - error_check_good txn [is_valid_txn $txn $env] TRUE - - error_check_good db_sync [$db sync] 0 - error_check_good db_verify \ - [verify_dir $testdir "\t\tRecd$tnum.b.3: "] 0 - - # Now put back all the even records, this time in the parent. - # Commit and re-begin the transaction so we can abort and - # get back to a nice full database. - for { set i 2 } { $i <= 2 * $nitems } { incr i 2 } { - set key $keybase$i - set data [chop_data $method $i$alphabet] - error_check_good child_put($i) [$db put -txn $txn $key $data] 0 - } - error_check_good txn_commit [$txn commit] 0 - set txn [$env txn] - error_check_good txn [is_valid_txn $txn $env] TRUE - - # Delete test. Set a cursor to each record. Delete the even ones - # in the parent and check cursor stability. Then open a child - # transaction, and delete the odd ones. Verify that the database - # is empty. - puts "\tRecd$tnum.c: Delete test." - unset dbc - - # Create cursors pointing at each item. - for { set i 1 } { $i <= 2 * $nitems } { incr i } { - set dbc($i) [$db cursor -txn $txn] - error_check_good dbc($i)_create [is_valid_cursor $dbc($i) $db] \ - TRUE - error_check_good dbc_getset($i) [$dbc($i) get -set $keybase$i] \ - [list [list $keybase$i [pad_data $method $i$alphabet]]] - } - - puts "\t\tRecd$tnum.c.1: Delete even items in child txn and abort." - - if { [is_rrecno $method] != 1 } { - set init 2 - set bound [expr 2 * $nitems] - set step 2 - } else { - # In rrecno, deletes will renumber the items, so we have - # to take that into account when we delete by recno. - set init 2 - set bound [expr $nitems + 1] - set step 1 - } - - set ctxn [$env txn -parent $txn] - for { set i $init } { $i <= $bound } { incr i $step } { - error_check_good del($i) [$db del -txn $ctxn $keybase$i] 0 - } - error_check_good ctxn_abort [$ctxn abort] 0 - - # Verify that no items are deleted. - for { set i 1 } { $i <= 2 * $nitems } { incr i } { - error_check_good dbc_get($i):5 [$dbc($i) get -current] \ - [list [list $keybase$i [pad_data $method $i$alphabet]]] - } - - puts "\t\tRecd$tnum.c.2: Delete even items in child txn and commit." - set ctxn [$env txn -parent $txn] - for { set i $init } { $i <= $bound } { incr i $step } { - error_check_good del($i) [$db del -txn $ctxn $keybase$i] 0 - } - error_check_good ctxn_commit [$ctxn commit] 0 - - # Verify that even items are deleted and odd items are not. - for { set i 1 } { $i <= 2 * $nitems } { incr i 2 } { - if { [is_rrecno $method] != 1 } { - set j $i - } else { - set j [expr ($i - 1) / 2 + 1] - } - error_check_good dbc_get($i):6 [$dbc($i) get -current] \ - [list [list $keybase$j [pad_data $method $i$alphabet]]] - } - for { set i 2 } { $i <= 2 * $nitems } { incr i 2 } { - error_check_good dbc_get($i):7 [$dbc($i) get -current] "" - } - - puts "\t\tRecd$tnum.c.3: Delete odd items in child txn." - - set ctxn [$env txn -parent $txn] - - for { set i 1 } { $i <= 2 * $nitems } { incr i 2 } { - if { [is_rrecno $method] != 1 } { - set j $i - } else { - # If this is an rrecno, just delete the first - # item repeatedly--the renumbering will make - # that delete everything. - set j 1 - } - error_check_good del($i) [$db del -txn $ctxn $keybase$j] 0 - } - - # Verify that everyone's deleted. - for { set i 1 } { $i <= 2 * $nitems } { incr i } { - error_check_good get_deleted($i) \ - [llength [$db get -txn $ctxn $keybase$i]] 0 - } - - puts "\t\tRecd$tnum.c.4: Verify cursor stability after abort." - error_check_good ctxn_abort [$ctxn abort] 0 - - # Verify that even items are deleted and odd items are not. - for { set i 1 } { $i <= 2 * $nitems } { incr i 2 } { - if { [is_rrecno $method] != 1 } { - set j $i - } else { - set j [expr ($i - 1) / 2 + 1] - } - error_check_good dbc_get($i):8 [$dbc($i) get -current] \ - [list [list $keybase$j [pad_data $method $i$alphabet]]] - } - for { set i 2 } { $i <= 2 * $nitems } { incr i 2 } { - error_check_good dbc_get($i):9 [$dbc($i) get -current] "" - } - - # Clean up cursors. - for { set i 1 } { $i <= 2 * $nitems } { incr i } { - error_check_good dbc($i)_close [$dbc($i) close] 0 - } - - # Sync and verify. - error_check_good db_sync [$db sync] 0 - error_check_good db_verify \ - [verify_dir $testdir "\t\tRecd$tnum.c.5: "] 0 - - puts "\tRecd$tnum.d: Clean up." - error_check_good txn_commit [$txn commit] 0 - error_check_good db_close [$db close] 0 - error_check_good env_close [$env close] 0 - error_check_good verify_dir \ - [verify_dir $testdir "\t\tRecd$tnum.d.1: "] 0 - - if { $log_log_record_types == 1 } { - logtrack_read $testdir - } -} diff --git a/storage/bdb/test/recd014.tcl b/storage/bdb/test/recd014.tcl deleted file mode 100644 index da9207cb008..00000000000 --- a/storage/bdb/test/recd014.tcl +++ /dev/null @@ -1,445 +0,0 @@ -# See the file LICENSE for redistribution information. -# -# Copyright (c) 1999-2004 -# Sleepycat Software. All rights reserved. -# -# $Id: recd014.tcl,v 1.21 2004/01/28 03:36:29 bostic Exp $ -# -# TEST recd014 -# TEST This is a recovery test for create/delete of queue extents. We -# TEST then need to recover and make sure the file is correctly existing -# TEST or not, as the case may be. -proc recd014 { method args} { - global fixed_len - source ./include.tcl - - if { ![is_queueext $method] == 1 } { - puts "Recd014: Skipping for method $method" - return - } - set pgindex [lsearch -exact $args "-pagesize"] - if { $pgindex != -1 } { - puts "Recd014: skipping for specific pagesizes" - return - } - - set orig_fixed_len $fixed_len - # - # We will use 512-byte pages, to be able to control - # when extents get created/removed. - # - set fixed_len 300 - - set opts [convert_args $method $args] - set omethod [convert_method $method] - # - # We want to set -extent 1 instead of what - # convert_args gave us. - # - set exti [lsearch -exact $opts "-extent"] - incr exti - set opts [lreplace $opts $exti $exti 1] - - puts "Recd014: $method extent creation/deletion tests" - - # Create the database and environment. - env_cleanup $testdir - - set testfile recd014.db - set flags "-create -txn -home $testdir" - - puts "\tRecd014.a: creating environment" - set env_cmd "berkdb_env $flags" - - puts "\tRecd014.b: Create test commit" - ext_recover_create $testdir $env_cmd $omethod \ - $opts $testfile commit - puts "\tRecd014.b: Create test abort" - ext_recover_create $testdir $env_cmd $omethod \ - $opts $testfile abort - - puts "\tRecd014.c: Consume test commit" - ext_recover_consume $testdir $env_cmd $omethod \ - $opts $testfile commit - puts "\tRecd014.c: Consume test abort" - ext_recover_consume $testdir $env_cmd $omethod \ - $opts $testfile abort - - set fixed_len $orig_fixed_len - puts "\tRecd014.d: Verify db_printlog can read logfile" - set tmpfile $testdir/printlog.out - set stat [catch {exec $util_path/db_printlog -h $testdir \ - > $tmpfile} ret] - error_check_good db_printlog $stat 0 - fileremove $tmpfile -} - -proc ext_recover_create { dir env_cmd method opts dbfile txncmd } { - global log_log_record_types - global fixed_len - global alphabet - source ./include.tcl - - # Keep track of the log types we've seen - if { $log_log_record_types == 1} { - logtrack_read $dir - } - - env_cleanup $dir - # Open the environment and set the copy/abort locations - set env [eval $env_cmd] - - set init_file $dir/$dbfile.init - set noenvflags "-create $method -mode 0644 -pagesize 512 $opts $dbfile" - set oflags "-env $env $noenvflags" - - set t [$env txn] - error_check_good txn_begin [is_valid_txn $t $env] TRUE - - set ret [catch {eval {berkdb_open} -txn $t $oflags} db] - error_check_good txn_commit [$t commit] 0 - - set t [$env txn] - error_check_good txn_begin [is_valid_txn $t $env] TRUE - - # - # The command to execute to create an extent is a put. - # We are just creating the first one, so our extnum is 0. - # - set extnum 0 - set data [chop_data $method [replicate $alphabet 512]] - puts "\t\tExecuting command" - set putrecno [$db put -txn $t -append $data] - error_check_good db_put $putrecno 1 - - # Sync the db so any changes to the file that are - # in mpool get written to the disk file before the - # diff. - puts "\t\tSyncing" - error_check_good db_sync [$db sync] 0 - - catch { file copy -force $dir/$dbfile $dir/$dbfile.afterop } res - copy_extent_file $dir $dbfile afterop - - error_check_good txn_$txncmd:$t [$t $txncmd] 0 - # - # If we don't abort, then we expect success. - # If we abort, we expect no file created. - # - set dbq [make_ext_filename $dir $dbfile $extnum] - error_check_good extput:exists1 [file exists $dbq] 1 - set ret [$db get $putrecno] - if {$txncmd == "abort"} { - # - # Operation was aborted. Verify our entry is not there. - # - puts "\t\tCommand executed and aborted." - error_check_good db_get [llength $ret] 0 - } else { - # - # Operation was committed, verify it exists. - # - puts "\t\tCommand executed and committed." - error_check_good db_get [llength $ret] 1 - catch { file copy -force $dir/$dbfile $init_file } res - copy_extent_file $dir $dbfile init - } - set t [$env txn] - error_check_good txn_begin [is_valid_txn $t $env] TRUE - error_check_good db_close [$db close] 0 - error_check_good txn_commit [$t commit] 0 - error_check_good env_close [$env close] 0 - - # - # Run recovery here. Should be a no-op. Verify that - # the file still does/n't exist when we are done. - # - berkdb debug_check - puts -nonewline "\t\tAbout to run recovery (no-op) ... " - flush stdout - - set stat [catch {exec $util_path/db_recover -h $dir -c} result] - if { $stat == 1 } { - error "FAIL: Recovery error: $result." - return - } - puts "complete" - # - # Verify it did not change. - # - error_check_good extput:exists2 [file exists $dbq] 1 - ext_create_check $dir $txncmd $init_file $dbfile $noenvflags $putrecno - - # - # Need a new copy to get the right LSN into the file. - # - catch { file copy -force $dir/$dbfile $init_file } res - copy_extent_file $dir $dbfile init - - # - # Undo. - # Now move the .afterop file to $dbfile. Run recovery again. - # - file copy -force $dir/$dbfile.afterop $dir/$dbfile - move_file_extent $dir $dbfile afterop copy - - berkdb debug_check - puts -nonewline "\t\tAbout to run recovery (afterop) ... " - flush stdout - - set stat [catch {exec $util_path/db_recover -h $dir -c} result] - if { $stat == 1 } { - error "FAIL: Recovery error: $result." - return - } - puts "complete" - ext_create_check $dir $txncmd $init_file $dbfile $noenvflags $putrecno - - # - # To redo, remove the dbfiles. Run recovery again. - # - catch { file rename -force $dir/$dbfile $dir/$dbfile.renamed } res - copy_extent_file $dir $dbfile renamed rename - - berkdb debug_check - puts -nonewline "\t\tAbout to run recovery (init) ... " - flush stdout - - set stat [catch {exec $util_path/db_recover -h $dir -c} result] - # - # !!! - # Even though db_recover exits with status 0, it should print out - # a warning because the file didn't exist. Db_recover writes this - # to stderr. Tcl assumes that ANYTHING written to stderr is an - # error, so even though we exit with 0 status, we still get an - # error back from 'catch'. Look for the warning. - # - if { $stat == 1 && [is_substr $result "warning"] == 0 } { - error "FAIL: Recovery error: $result." - return - } - puts "complete" - - # - # Verify it was redone. However, since we removed the files - # to begin with, recovery with abort will not recreate the - # extent. Recovery with commit will. - # - if {$txncmd == "abort"} { - error_check_good extput:exists3 [file exists $dbq] 0 - } else { - error_check_good extput:exists3 [file exists $dbq] 1 - } -} - -proc ext_create_check { dir txncmd init_file dbfile oflags putrecno } { - if { $txncmd == "commit" } { - # - # Operation was committed. Verify it did not change. - # - error_check_good \ - diff(initial,post-recover2):diff($init_file,$dir/$dbfile) \ - [dbdump_diff "-dar" $init_file $dir $dbfile] 0 - } else { - # - # Operation aborted. The file is there, but make - # sure the item is not. - # - set xdb [eval {berkdb_open} $oflags] - error_check_good db_open [is_valid_db $xdb] TRUE - set ret [$xdb get $putrecno] - error_check_good db_get [llength $ret] 0 - error_check_good db_close [$xdb close] 0 - } -} - -proc ext_recover_consume { dir env_cmd method opts dbfile txncmd} { - global log_log_record_types - global alphabet - source ./include.tcl - - # Keep track of the log types we've seen - if { $log_log_record_types == 1} { - logtrack_read $dir - } - - env_cleanup $dir - # Open the environment and set the copy/abort locations - set env [eval $env_cmd] - - set oflags "-create -auto_commit $method -mode 0644 -pagesize 512 \ - -env $env $opts $dbfile" - - # - # Open our db, add some data, close and copy as our - # init file. - # - set db [eval {berkdb_open} $oflags] - error_check_good db_open [is_valid_db $db] TRUE - - set extnum 0 - set data [chop_data $method [replicate $alphabet 512]] - - set txn [$env txn] - error_check_good txn_begin [is_valid_txn $txn $env] TRUE - set putrecno [$db put -txn $txn -append $data] - error_check_good db_put $putrecno 1 - error_check_good commit [$txn commit] 0 - error_check_good db_close [$db close] 0 - - puts "\t\tExecuting command" - - set init_file $dir/$dbfile.init - catch { file copy -force $dir/$dbfile $init_file } res - copy_extent_file $dir $dbfile init - - # - # If we don't abort, then we expect success. - # If we abort, we expect no file removed until recovery is run. - # - set db [eval {berkdb_open} $oflags] - error_check_good db_open [is_valid_db $db] TRUE - - set t [$env txn] - error_check_good txn_begin [is_valid_txn $t $env] TRUE - - set dbcmd "$db get -txn $t -consume" - set ret [eval $dbcmd] - error_check_good db_sync [$db sync] 0 - - catch { file copy -force $dir/$dbfile $dir/$dbfile.afterop } res - copy_extent_file $dir $dbfile afterop - - error_check_good txn_$txncmd:$t [$t $txncmd] 0 - error_check_good db_sync [$db sync] 0 - set dbq [make_ext_filename $dir $dbfile $extnum] - if {$txncmd == "abort"} { - # - # Operation was aborted, verify ext did not change. - # - puts "\t\tCommand executed and aborted." - - # - # Check that the file exists. Final state. - # Since we aborted the txn, we should be able - # to get to our original entry. - # - error_check_good postconsume.1 [file exists $dbq] 1 - error_check_good \ - diff(init,postconsume.2):diff($init_file,$dir/$dbfile)\ - [dbdump_diff "-dar" $init_file $dir $dbfile] 0 - } else { - # - # Operation was committed, verify it does - # not exist. - # - puts "\t\tCommand executed and committed." - # - # Check file existence. Consume operations remove - # the extent when we move off, which we should have - # done. - error_check_good consume_exists [file exists $dbq] 0 - } - error_check_good db_close [$db close] 0 - error_check_good env_close [$env close] 0 - - # - # Run recovery here on what we ended up with. Should be a no-op. - # - berkdb debug_check - puts -nonewline "\t\tAbout to run recovery (no-op) ... " - flush stdout - - set stat [catch {exec $util_path/db_recover -h $dir -c} result] - if { $stat == 1 } { - error "FAIL: Recovery error: $result." - return - } - puts "complete" - if { $txncmd == "abort"} { - # - # Operation was aborted, verify it did not change. - # - error_check_good \ - diff(initial,post-recover1):diff($init_file,$dir/$dbfile) \ - [dbdump_diff "-dar" $init_file $dir $dbfile] 0 - } else { - # - # Operation was committed, verify it does - # not exist. Both operations should result - # in no file existing now that we've run recovery. - # - error_check_good after_recover1 [file exists $dbq] 0 - } - - # - # Run recovery here. Re-do the operation. - # Verify that the file doesn't exist - # (if we committed) or change (if we aborted) - # when we are done. - # - catch { file copy -force $dir/$dbfile $init_file } res - copy_extent_file $dir $dbfile init - berkdb debug_check - puts -nonewline "\t\tAbout to run recovery (init) ... " - flush stdout - - set stat [catch {exec $util_path/db_recover -h $dir -c} result] - if { $stat == 1 } { - error "FAIL: Recovery error: $result." - return - } - puts "complete" - if { $txncmd == "abort"} { - # - # Operation was aborted, verify it did not change. - # - error_check_good \ - diff(initial,post-recover1):diff($init_file,$dir/$dbfile) \ - [dbdump_diff "-dar" $init_file $dir $dbfile] 0 - } else { - # - # Operation was committed, verify it does - # not exist. Both operations should result - # in no file existing now that we've run recovery. - # - error_check_good after_recover2 [file exists $dbq] 0 - } - - # - # Now move the .afterop file to $dbfile. Run recovery again. - # - set filecopy [glob $dir/*.afterop] - set afterop [lindex $filecopy 0] - file rename -force $afterop $dir/$dbfile - set afterop [string range $afterop \ - [expr [string last "/" $afterop] + 1] \ - [string last "." $afterop]] - move_file_extent $dir $dbfile afterop rename - - berkdb debug_check - puts -nonewline "\t\tAbout to run recovery (afterop) ... " - flush stdout - - set stat [catch {exec $util_path/db_recover -h $dir -c} result] - if { $stat == 1 } { - error "FAIL: Recovery error: $result." - return - } - puts "complete" - - if { $txncmd == "abort"} { - # - # Operation was aborted, verify it did not change. - # - error_check_good \ - diff(initial,post-recover2):diff($init_file,$dir/$dbfile) \ - [dbdump_diff "-dar" $init_file $dir $dbfile] 0 - } else { - # - # Operation was committed, verify it still does - # not exist. - # - error_check_good after_recover3 [file exists $dbq] 0 - } -} diff --git a/storage/bdb/test/recd015.tcl b/storage/bdb/test/recd015.tcl deleted file mode 100644 index afe0ac8833b..00000000000 --- a/storage/bdb/test/recd015.tcl +++ /dev/null @@ -1,162 +0,0 @@ -# See the file LICENSE for redistribution information. -# -# Copyright (c) 1999-2004 -# Sleepycat Software. All rights reserved. -# -# $Id: recd015.tcl,v 1.16 2004/01/28 03:36:29 bostic Exp $ -# -# TEST recd015 -# TEST This is a recovery test for testing lots of prepared txns. -# TEST This test is to force the use of txn_recover to call with the -# TEST DB_FIRST flag and then DB_NEXT. -proc recd015 { method args } { - source ./include.tcl - global rand_init - error_check_good set_random_seed [berkdb srand $rand_init] 0 - - set args [convert_args $method $args] - set omethod [convert_method $method] - - puts "Recd015: $method ($args) prepared txns test" - - # Create the database and environment. - - set numtxns 1 - set testfile NULL - - set env_cmd "berkdb_env -create -txn -home $testdir" - set msg "\tRecd015.a" - puts "$msg Simple test to prepare $numtxns txn " - foreach op { abort commit discard } { - env_cleanup $testdir - recd015_body $env_cmd $testfile $numtxns $msg $op - } - - # - # Now test large numbers of prepared txns to test DB_NEXT - # on txn_recover. - # - set numtxns 250 - set testfile recd015.db - set txnmax [expr $numtxns + 5] - # - # For this test we create our database ahead of time so that we - # don't need to send methods and args to the script. - # - env_cleanup $testdir - set env_cmd "berkdb_env -create -txn_max $txnmax -txn -home $testdir" - set env [eval $env_cmd] - error_check_good dbenv [is_valid_env $env] TRUE - set db [eval {berkdb_open -create} $omethod -env $env $args $testfile] - error_check_good dbopen [is_valid_db $db] TRUE - error_check_good dbclose [$db close] 0 - error_check_good envclose [$env close] 0 - - set msg "\tRecd015.b" - puts "$msg Large test to prepare $numtxns txn " - foreach op { abort commit discard } { - recd015_body $env_cmd $testfile $numtxns $msg $op - } - - set stat [catch {exec $util_path/db_printlog -h $testdir \ - > $testdir/LOG } ret] - error_check_good db_printlog $stat 0 - fileremove $testdir/LOG -} - -proc recd015_body { env_cmd testfile numtxns msg op } { - source ./include.tcl - - sentinel_init - set gidf $testdir/gidfile - fileremove -f $gidf - set pidlist {} - puts "$msg.0: Executing child script to prepare txns" - berkdb debug_check - set p [exec $tclsh_path $test_path/wrap.tcl recd15scr.tcl \ - $testdir/recdout $env_cmd $testfile $gidf $numtxns &] - - lappend pidlist $p - watch_procs $pidlist 5 - set f1 [open $testdir/recdout r] - set r [read $f1] - puts $r - close $f1 - fileremove -f $testdir/recdout - - berkdb debug_check - puts -nonewline "$msg.1: Running recovery ... " - flush stdout - berkdb debug_check - set env [eval $env_cmd -recover] - error_check_good dbenv-recover [is_valid_env $env] TRUE - puts "complete" - - puts "$msg.2: getting txns from txn_recover" - set txnlist [$env txn_recover] - error_check_good txnlist_len [llength $txnlist] $numtxns - - set gfd [open $gidf r] - set i 0 - while { [gets $gfd gid] != -1 } { - set gids($i) $gid - incr i - } - close $gfd - # - # Make sure we have as many as we expect - error_check_good num_gids $i $numtxns - - set i 0 - puts "$msg.3: comparing GIDs and $op txns" - foreach tpair $txnlist { - set txn [lindex $tpair 0] - set gid [lindex $tpair 1] - error_check_good gidcompare $gid $gids($i) - error_check_good txn:$op [$txn $op] 0 - incr i - } - if { $op != "discard" } { - error_check_good envclose [$env close] 0 - return - } - # - # If we discarded, now do it again and randomly resolve some - # until all txns are resolved. - # - puts "$msg.4: resolving/discarding txns" - set txnlist [$env txn_recover] - set len [llength $txnlist] - set opval(1) "abort" - set opcnt(1) 0 - set opval(2) "commit" - set opcnt(2) 0 - set opval(3) "discard" - set opcnt(3) 0 - while { $len != 0 } { - set opicnt(1) 0 - set opicnt(2) 0 - set opicnt(3) 0 - # - # Abort/commit or discard them randomly until - # all are resolved. - # - for { set i 0 } { $i < $len } { incr i } { - set t [lindex $txnlist $i] - set txn [lindex $t 0] - set newop [berkdb random_int 1 3] - set ret [$txn $opval($newop)] - error_check_good txn_$opval($newop):$i $ret 0 - incr opcnt($newop) - incr opicnt($newop) - } -# puts "$opval(1): $opicnt(1) Total: $opcnt(1)" -# puts "$opval(2): $opicnt(2) Total: $opcnt(2)" -# puts "$opval(3): $opicnt(3) Total: $opcnt(3)" - - set txnlist [$env txn_recover] - set len [llength $txnlist] - } - - error_check_good envclose [$env close] 0 -} diff --git a/storage/bdb/test/recd016.tcl b/storage/bdb/test/recd016.tcl deleted file mode 100644 index 6f0d3d132f2..00000000000 --- a/storage/bdb/test/recd016.tcl +++ /dev/null @@ -1,179 +0,0 @@ -# See the file LICENSE for redistribution information. -# -# Copyright (c) 1996-2004 -# Sleepycat Software. All rights reserved. -# -# $Id: recd016.tcl,v 11.13 2004/07/07 19:08:21 carol Exp $ -# -# TEST recd016 -# TEST Test recovery after checksum error. -proc recd016 { method args} { - global fixed_len - global log_log_record_types - global datastr - source ./include.tcl - - set pgindex [lsearch -exact $args "-pagesize"] - if { $pgindex != -1 } { - puts "Recd016: skipping for specific pagesizes" - return - } - if { [is_queueext $method] == 1 } { - puts "Recd016: skipping for method $method" - return - } - - puts "Recd016: $method recovery after checksum error" - - # Create the database and environment. - env_cleanup $testdir - - set testfile recd016.db - set flags "-create -txn -home $testdir" - - puts "\tRecd016.a: creating environment" - set env_cmd "berkdb_env $flags" - set dbenv [eval $env_cmd] - error_check_good dbenv [is_valid_env $dbenv] TRUE - - set pgsize 512 - set orig_fixed_len $fixed_len - set fixed_len [expr $pgsize / 4] - set opts [convert_args $method $args] - set omethod [convert_method $method] - set oflags "-create $omethod -mode 0644 \ - -auto_commit -chksum -pagesize $pgsize $opts $testfile" - set db [eval {berkdb_open} -env $dbenv $oflags] - - # - # Put some data. - # - set nument 50 - puts "\tRecd016.b: Put some data" - for { set i 1 } { $i <= $nument } { incr i } { - # Use 'i' as key so method doesn't matter - set key $i - set data $i$datastr - - # Put, in a txn. - set txn [$dbenv txn] - error_check_good txn_begin [is_valid_txn $txn $dbenv] TRUE - error_check_good db_put \ - [$db put -txn $txn $key [chop_data $method $data]] 0 - error_check_good txn_commit [$txn commit] 0 - } - error_check_good db_close [$db close] 0 - error_check_good env_close [$dbenv close] 0 - # - # We need to remove the env so that we don't get cached - # pages. - # - error_check_good env_remove [berkdb envremove -home $testdir] 0 - - puts "\tRecd016.c: Overwrite part of database" - # - # First just touch some bits in the file. We want to go - # through the paging system, so touch some data pages, - # like the middle of page 2. - # We should get a checksum error for the checksummed file. - # - set pg 2 - set fid [open $testdir/$testfile r+] - fconfigure $fid -translation binary - set seeklen [expr $pgsize * $pg + 200] - seek $fid $seeklen start - set byte [read $fid 1] - binary scan $byte c val - set newval [expr ~$val] - set newbyte [binary format c $newval] - seek $fid $seeklen start - puts -nonewline $fid $newbyte - close $fid - - # - # Verify we get the checksum error. When we get it, it should - # log the error as well, so when we run recovery we'll need to - # do catastrophic recovery. We do this in a sub-process so that - # the files are closed after the panic. - # - set f1 [open |$tclsh_path r+] - puts $f1 "source $test_path/test.tcl" - - set env_cmd "berkdb_env_noerr $flags" - set dbenv [send_cmd $f1 $env_cmd] - error_check_good dbenv [is_valid_env $dbenv] TRUE - - set db [send_cmd $f1 "{berkdb_open_noerr} -env $dbenv $oflags"] - error_check_good db [is_valid_db $db] TRUE - - # We need to set non-blocking mode so that after each command - # we can read all the remaining output from that command and - # we can know what the output from one command is. - fconfigure $f1 -blocking 0 - set ret [read $f1] - set got_err 0 - for { set i 1 } { $i <= $nument } { incr i } { - set stat [send_cmd $f1 "catch {$db get $i} r"] - set getret [send_cmd $f1 "puts \$r"] - set ret [read $f1] - if { $stat == 1 } { - error_check_good dbget:fail [is_substr $getret \ - "checksum error: page $pg"] 1 - set got_err 1 - break - } else { - set key [lindex [lindex $getret 0] 0] - set data [lindex [lindex $getret 0] 1] - error_check_good keychk $key $i - error_check_good datachk $data \ - [pad_data $method $i$datastr] - } - } - error_check_good got_chksum $got_err 1 - set ret [send_cmd $f1 "$db close"] - set extra [read $f1] - error_check_good db:fail [is_substr $ret "run recovery"] 1 - - set ret [send_cmd $f1 "$dbenv close"] - error_check_good env_close:fail [is_substr $ret "run recovery"] 1 - close $f1 - - # Keep track of the log types we've seen - if { $log_log_record_types == 1} { - logtrack_read $testdir - } - - puts "\tRecd016.d: Run normal recovery" - set ret [catch {exec $util_path/db_recover -h $testdir} r] - error_check_good db_recover $ret 1 - error_check_good dbrec:fail \ - [is_substr $r "checksum error"] 1 - - catch {fileremove $testdir/$testfile} ret - puts "\tRecd016.e: Run catastrophic recovery" - set ret [catch {exec $util_path/db_recover -c -h $testdir} r] - error_check_good db_recover $ret 0 - - # - # Now verify the data was reconstructed correctly. - # - set env_cmd "berkdb_env_noerr $flags" - set dbenv [eval $env_cmd] - error_check_good dbenv [is_valid_env $dbenv] TRUE - - set db [eval {berkdb_open} -env $dbenv $oflags] - error_check_good db [is_valid_db $db] TRUE - - for { set i 1 } { $i <= $nument } { incr i } { - set stat [catch {$db get $i} ret] - error_check_good stat $stat 0 - set key [lindex [lindex $ret 0] 0] - set data [lindex [lindex $ret 0] 1] - error_check_good keychk $key $i - error_check_good datachk $data [pad_data $method $i$datastr] - } - error_check_good db_close [$db close] 0 - error_check_good env_close [$dbenv close] 0 - set fixed_len $orig_fixed_len - return -} diff --git a/storage/bdb/test/recd017.tcl b/storage/bdb/test/recd017.tcl deleted file mode 100644 index 43dfb642194..00000000000 --- a/storage/bdb/test/recd017.tcl +++ /dev/null @@ -1,158 +0,0 @@ -# See the file LICENSE for redistribution information. -# -# Copyright (c) 1996-2004 -# Sleepycat Software. All rights reserved. -# -# $Id: recd017.tcl,v 11.7 2004/01/28 03:36:29 bostic Exp $ -# -# TEST recd017 -# TEST Test recovery and security. This is basically a watered -# TEST down version of recd001 just to verify that encrypted environments -# TEST can be recovered. -proc recd017 { method {select 0} args} { - global fixed_len - global encrypt - global passwd - global has_crypto - source ./include.tcl - - # Skip test if release does not support encryption. - if { $has_crypto == 0 } { - puts "Skipping recd017 for non-crypto release." - return - } - - set orig_fixed_len $fixed_len - set opts [convert_args $method $args] - set omethod [convert_method $method] - - puts "Recd017: $method operation/transaction tests" - - # Create the database and environment. - env_cleanup $testdir - - # The recovery tests were originally written to - # do a command, abort, do it again, commit, and then - # repeat the sequence with another command. Each command - # tends to require that the previous command succeeded and - # left the database a certain way. To avoid cluttering up the - # op_recover interface as well as the test code, we create two - # databases; one does abort and then commit for each op, the - # other does prepare, prepare-abort, and prepare-commit for each - # op. If all goes well, this allows each command to depend - # exactly one successful iteration of the previous command. - set testfile recd017.db - set testfile2 recd017-2.db - - set flags "-create -encryptaes $passwd -txn -home $testdir" - - puts "\tRecd017.a.0: creating environment" - set env_cmd "berkdb_env $flags" - convert_encrypt $env_cmd - set dbenv [eval $env_cmd] - error_check_good dbenv [is_valid_env $dbenv] TRUE - - # - # We need to create a database to get the pagesize (either - # the default or whatever might have been specified). - # Then remove it so we can compute fixed_len and create the - # real database. - set oflags "-create $omethod -mode 0644 \ - -env $dbenv -encrypt $opts $testfile" - set db [eval {berkdb_open} $oflags] - error_check_good db_open [is_valid_db $db] TRUE - set stat [$db stat] - # - # Compute the fixed_len based on the pagesize being used. - # We want the fixed_len to be 1/4 the pagesize. - # - set pg [get_pagesize $stat] - error_check_bad get_pagesize $pg -1 - set fixed_len [expr $pg / 4] - error_check_good db_close [$db close] 0 - error_check_good dbremove [berkdb dbremove -env $dbenv $testfile] 0 - - # Convert the args again because fixed_len is now real. - # Create the databases and close the environment. - # cannot specify db truncate in txn protected env!!! - set opts [convert_args $method ""] - convert_encrypt $env_cmd - set omethod [convert_method $method] - set oflags "-create $omethod -mode 0644 \ - -env $dbenv -encrypt $opts $testfile" - set db [eval {berkdb_open} $oflags] - error_check_good db_open [is_valid_db $db] TRUE - error_check_good db_close [$db close] 0 - - set oflags "-create $omethod -mode 0644 \ - -env $dbenv -encrypt $opts $testfile2" - set db [eval {berkdb_open} $oflags] - error_check_good db_open [is_valid_db $db] TRUE - error_check_good db_close [$db close] 0 - - error_check_good env_close [$dbenv close] 0 - - puts "\tRecd017.a.1: Verify db_printlog can read logfile" - set tmpfile $testdir/printlog.out - set stat [catch {exec $util_path/db_printlog -h $testdir -P $passwd \ - > $tmpfile} ret] - error_check_good db_printlog $stat 0 - fileremove $tmpfile - - # List of recovery tests: {CMD MSG} pairs. - set rlist { - { {DB put -txn TXNID $key $data} "Recd017.b: put"} - { {DB del -txn TXNID $key} "Recd017.c: delete"} - } - - # These are all the data values that we're going to need to read - # through the operation table and run the recovery tests. - - if { [is_record_based $method] == 1 } { - set key 1 - } else { - set key recd017_key - } - set data recd017_data - foreach pair $rlist { - set cmd [subst [lindex $pair 0]] - set msg [lindex $pair 1] - if { $select != 0 } { - set tag [lindex $msg 0] - set tail [expr [string length $tag] - 2] - set tag [string range $tag $tail $tail] - if { [lsearch $select $tag] == -1 } { - continue - } - } - - if { [is_queue $method] != 1 } { - if { [string first append $cmd] != -1 } { - continue - } - if { [string first consume $cmd] != -1 } { - continue - } - } - -# if { [is_fixed_length $method] == 1 } { -# if { [string first partial $cmd] != -1 } { -# continue -# } -# } - op_recover abort $testdir $env_cmd $testfile $cmd $msg - op_recover commit $testdir $env_cmd $testfile $cmd $msg - # - # Note that since prepare-discard ultimately aborts - # the txn, it must come before prepare-commit. - # - op_recover prepare-abort $testdir $env_cmd $testfile2 \ - $cmd $msg - op_recover prepare-discard $testdir $env_cmd $testfile2 \ - $cmd $msg - op_recover prepare-commit $testdir $env_cmd $testfile2 \ - $cmd $msg - } - set fixed_len $orig_fixed_len - return -} diff --git a/storage/bdb/test/recd018.tcl b/storage/bdb/test/recd018.tcl deleted file mode 100644 index 2f2300cf97b..00000000000 --- a/storage/bdb/test/recd018.tcl +++ /dev/null @@ -1,110 +0,0 @@ -# See the file LICENSE for redistribution information. -# -# Copyright (c) 2000-2004 -# Sleepycat Software. All rights reserved. -# -# $Id: recd018.tcl,v 11.5 2004/01/28 03:36:29 bostic Exp $ -# -# TEST recd018 -# TEST Test recover of closely interspersed checkpoints and commits. -# -# This test is from the error case from #4230. -# -proc recd018 { method {ndbs 10} args } { - source ./include.tcl - - set args [convert_args $method $args] - set omethod [convert_method $method] - set tnum "018" - - puts "Recd$tnum ($args): $method recovery of checkpoints and commits." - - set tname recd$tnum.db - env_cleanup $testdir - - set i 0 - if { [is_record_based $method] == 1 } { - set key 1 - set key2 2 - } else { - set key KEY - set key2 KEY2 - } - - puts "\tRecd$tnum.a: Create environment and database." - set flags "-create -txn -home $testdir" - - set env_cmd "berkdb_env $flags" - set dbenv [eval $env_cmd] - error_check_good dbenv [is_valid_env $dbenv] TRUE - - set oflags "-auto_commit -env $dbenv -create -mode 0644 $args $omethod" - for { set i 0 } { $i < $ndbs } { incr i } { - set testfile $tname.$i - set db($i) [eval {berkdb_open} $oflags $testfile] - error_check_good dbopen [is_valid_db $db($i)] TRUE - set file $testdir/$testfile.init - catch { file copy -force $testdir/$testfile $file} res - copy_extent_file $testdir $testfile init - } - - # Main loop: Write a record or two to each database. - # Do a commit immediately followed by a checkpoint after each one. - error_check_good "Initial Checkpoint" [$dbenv txn_checkpoint] 0 - - puts "\tRecd$tnum.b Put/Commit/Checkpoint to $ndbs databases" - for { set i 0 } { $i < $ndbs } { incr i } { - set testfile $tname.$i - set data $i - - # Put, in a txn. - set txn [$dbenv txn] - error_check_good txn_begin [is_valid_txn $txn $dbenv] TRUE - error_check_good db_put \ - [$db($i) put -txn $txn $key [chop_data $method $data]] 0 - error_check_good txn_commit [$txn commit] 0 - error_check_good txn_checkpt [$dbenv txn_checkpoint] 0 - if { [expr $i % 2] == 0 } { - set txn [$dbenv txn] - error_check_good txn2 [is_valid_txn $txn $dbenv] TRUE - error_check_good db_put [$db($i) put \ - -txn $txn $key2 [chop_data $method $data]] 0 - error_check_good txn_commit [$txn commit] 0 - error_check_good txn_checkpt [$dbenv txn_checkpoint] 0 - } - error_check_good db_close [$db($i) close] 0 - set file $testdir/$testfile.afterop - catch { file copy -force $testdir/$testfile $file} res - copy_extent_file $testdir $testfile afterop - } - error_check_good env_close [$dbenv close] 0 - - # Now, loop through and recover to each timestamp, verifying the - # expected increment. - puts "\tRecd$tnum.c: Run recovery (no-op)" - set ret [catch {exec $util_path/db_recover -h $testdir} r] - error_check_good db_recover $ret 0 - - puts "\tRecd$tnum.d: Run recovery (initial file)" - for { set i 0 } {$i < $ndbs } { incr i } { - set testfile $tname.$i - set file $testdir/$testfile.init - catch { file copy -force $file $testdir/$testfile } res - move_file_extent $testdir $testfile init copy - } - - set ret [catch {exec $util_path/db_recover -h $testdir} r] - error_check_good db_recover $ret 0 - - puts "\tRecd$tnum.e: Run recovery (after file)" - for { set i 0 } {$i < $ndbs } { incr i } { - set testfile $tname.$i - set file $testdir/$testfile.afterop - catch { file copy -force $file $testdir/$testfile } res - move_file_extent $testdir $testfile afterop copy - } - - set ret [catch {exec $util_path/db_recover -h $testdir} r] - error_check_good db_recover $ret 0 - -} diff --git a/storage/bdb/test/recd019.tcl b/storage/bdb/test/recd019.tcl deleted file mode 100644 index 4e4e6051739..00000000000 --- a/storage/bdb/test/recd019.tcl +++ /dev/null @@ -1,123 +0,0 @@ -# See the file LICENSE for redistribution information. -# -# Copyright (c) 1996-2004 -# Sleepycat Software. All rights reserved. -# -# $Id: recd019.tcl,v 11.6 2004/07/07 19:08:21 carol Exp $ -# -# TEST recd019 -# TEST Test txn id wrap-around and recovery. -proc recd019 { method {numid 50} args} { - global fixed_len - global txn_curid - global log_log_record_types - source ./include.tcl - - set orig_fixed_len $fixed_len - set opts [convert_args $method $args] - set omethod [convert_method $method] - - puts "Recd019: $method txn id wrap-around test" - - # Create the database and environment. - env_cleanup $testdir - - set testfile recd019.db - - set flags "-create -txn -home $testdir" - - puts "\tRecd019.a: creating environment" - set env_cmd "berkdb_env $flags" - set dbenv [eval $env_cmd] - error_check_good dbenv [is_valid_env $dbenv] TRUE - - # Test txn wrapping. Force a txn_recycle msg. - # - set new_curid $txn_curid - set new_maxid [expr $new_curid + $numid] - error_check_good txn_id_set [$dbenv txn_id_set $new_curid $new_maxid] 0 - - # - # We need to create a database to get the pagesize (either - # the default or whatever might have been specified). - # Then remove it so we can compute fixed_len and create the - # real database. - set oflags "-create $omethod -mode 0644 \ - -env $dbenv $opts $testfile" - set db [eval {berkdb_open} $oflags] - error_check_good db_open [is_valid_db $db] TRUE - set stat [$db stat] - # - # Compute the fixed_len based on the pagesize being used. - # We want the fixed_len to be 1/4 the pagesize. - # - set pg [get_pagesize $stat] - error_check_bad get_pagesize $pg -1 - set fixed_len [expr $pg / 4] - error_check_good db_close [$db close] 0 - error_check_good dbremove [berkdb dbremove -env $dbenv $testfile] 0 - - # Convert the args again because fixed_len is now real. - # Create the databases and close the environment. - # cannot specify db truncate in txn protected env!!! - set opts [convert_args $method ""] - set omethod [convert_method $method] - set oflags "-create $omethod -mode 0644 \ - -env $dbenv -auto_commit $opts $testfile" - set db [eval {berkdb_open} $oflags] - error_check_good db_open [is_valid_db $db] TRUE - - # - # Force txn ids to wrap twice and then some. - # - set nument [expr $numid * 3 - 2] - puts "\tRecd019.b: Wrapping txn ids after $numid" - set file $testdir/$testfile.init - catch { file copy -force $testdir/$testfile $file} res - copy_extent_file $testdir $testfile init - for { set i 1 } { $i <= $nument } { incr i } { - # Use 'i' as key so method doesn't matter - set key $i - set data $i - - # Put, in a txn. - set txn [$dbenv txn] - error_check_good txn_begin [is_valid_txn $txn $dbenv] TRUE - error_check_good db_put \ - [$db put -txn $txn $key [chop_data $method $data]] 0 - error_check_good txn_commit [$txn commit] 0 - } - error_check_good db_close [$db close] 0 - set file $testdir/$testfile.afterop - catch { file copy -force $testdir/$testfile $file} res - copy_extent_file $testdir $testfile afterop - error_check_good env_close [$dbenv close] 0 - - # Keep track of the log types we've seen - if { $log_log_record_types == 1} { - logtrack_read $testdir - } - - # Now, loop through and recover. - puts "\tRecd019.c: Run recovery (no-op)" - set ret [catch {exec $util_path/db_recover -h $testdir} r] - error_check_good db_recover $ret 0 - - puts "\tRecd019.d: Run recovery (initial file)" - set file $testdir/$testfile.init - catch { file copy -force $file $testdir/$testfile } res - move_file_extent $testdir $testfile init copy - - set ret [catch {exec $util_path/db_recover -h $testdir} r] - error_check_good db_recover $ret 0 - - puts "\tRecd019.e: Run recovery (after file)" - set file $testdir/$testfile.afterop - catch { file copy -force $file $testdir/$testfile } res - move_file_extent $testdir $testfile afterop copy - - set ret [catch {exec $util_path/db_recover -h $testdir} r] - error_check_good db_recover $ret 0 - set fixed_len $orig_fixed_len - return -} diff --git a/storage/bdb/test/recd020.tcl b/storage/bdb/test/recd020.tcl deleted file mode 100644 index 6fbe4b78e23..00000000000 --- a/storage/bdb/test/recd020.tcl +++ /dev/null @@ -1,80 +0,0 @@ -# See the file LICENSE for redistribution information. -# -# Copyright (c) 2004 -# Sleepycat Software. All rights reserved. -# -# $Id: recd020.tcl,v 11.3 2004/09/22 18:01:05 bostic Exp $ -# -# TEST recd020 -# TEST Test creation of intermediate directories -- an -# TEST undocumented, UNIX-only feature. -# -proc recd020 { method args } { - source ./include.tcl - global tcl_platform - - set args [convert_args $method $args] - set omethod [convert_method $method] - set tnum "020" - set nentries 10 - - if { $tcl_platform(platform) != "unix" } { - puts "Skipping recd$tnum for non-UNIX platform." - return - } - - puts "Recd$tnum ($method):\ - Test creation of intermediate directories in recovery." - - # Create the original intermediate directory. - env_cleanup $testdir - set intdir INTDIR - file mkdir $testdir/$intdir - - set testfile recd$tnum.db - set flags "-create -txn -home $testdir" - - puts "\tRecd$tnum.a: Create environment and populate database." - set env_cmd "berkdb_env $flags" - set env [eval $env_cmd] - error_check_good env [is_valid_env $env] TRUE - - set db [eval berkdb_open \ - -create $omethod $args -env $env -auto_commit $intdir/$testfile] - error_check_good db_open [is_valid_db $db] TRUE - - set txn [$env txn] - set data "data" - for { set i 1 } { $i <= $nentries } { incr i } { - error_check_good db_put [eval \ - {$db put} -txn $txn $i [chop_data $method $data.$i]] 0 - } - error_check_good txn_commit [$txn commit] 0 - error_check_good db_close [$db close] 0 - error_check_good env_close [$env close] 0 - - puts "\tRecd$tnum.b: Remove intermediate directory." - error_check_good directory_there [file exists $testdir/$intdir] 1 - file delete -force $testdir/$intdir - error_check_good directory_gone [file exists $testdir/$intdir] 0 - - puts "\tRecd020.c: Run recovery, recreating intermediate directory." - set env [eval $env_cmd -set_intermediate_dir 0751 -recover] - error_check_good env [is_valid_env $env] TRUE - - puts "\tRecd020.d: Reopen test file to verify success." - set db [berkdb_open -env $env $intdir/$testfile] - error_check_good db_open [is_valid_db $db] TRUE - for { set i 1 } { $i <= $nentries } { incr i } { - set ret [$db get $i] - set k [lindex [lindex $ret 0] 0] - set d [lindex [lindex $ret 0] 1] - error_check_good key $k $i - error_check_good data $d [pad_data $method $data.$i] - } - - # Clean up. - error_check_good db_close [$db close] 0 - error_check_good env_close [$env close] 0 - -} diff --git a/storage/bdb/test/recd15scr.tcl b/storage/bdb/test/recd15scr.tcl deleted file mode 100644 index ef6fe7d0332..00000000000 --- a/storage/bdb/test/recd15scr.tcl +++ /dev/null @@ -1,74 +0,0 @@ -# See the file LICENSE for redistribution information. -# -# Copyright (c) 1996-2004 -# Sleepycat Software. All rights reserved. -# -# $Id: recd15scr.tcl,v 1.7 2004/01/28 03:36:29 bostic Exp $ -# -# Recd15 - lots of txns - txn prepare script -# Usage: recd15script envcmd dbcmd gidf numtxns -# envcmd: command to open env -# dbfile: name of database file -# gidf: name of global id file -# numtxns: number of txns to start - -source ./include.tcl -source $test_path/test.tcl -source $test_path/testutils.tcl - -set usage "recd15script envcmd dbfile gidfile numtxns" - -# Verify usage -if { $argc != 4 } { - puts stderr "FAIL:[timestamp] Usage: $usage" - exit -} - -# Initialize arguments -set envcmd [ lindex $argv 0 ] -set dbfile [ lindex $argv 1 ] -set gidfile [ lindex $argv 2 ] -set numtxns [ lindex $argv 3 ] - -set txnmax [expr $numtxns + 5] -set dbenv [eval $envcmd] -error_check_good envopen [is_valid_env $dbenv] TRUE - -set usedb 0 -if { $dbfile != "NULL" } { - set usedb 1 - set db [berkdb_open -auto_commit -env $dbenv $dbfile] - error_check_good dbopen [is_valid_db $db] TRUE -} - -puts "\tRecd015script.a: Begin $numtxns txns" -for {set i 0} {$i < $numtxns} {incr i} { - set t [$dbenv txn] - error_check_good txnbegin($i) [is_valid_txn $t $dbenv] TRUE - set txns($i) $t - if { $usedb } { - set dbc [$db cursor -txn $t] - error_check_good cursor($i) [is_valid_cursor $dbc $db] TRUE - set curs($i) $dbc - } -} - -puts "\tRecd015script.b: Prepare $numtxns txns" -set gfd [open $gidfile w+] -for {set i 0} {$i < $numtxns} {incr i} { - if { $usedb } { - set dbc $curs($i) - error_check_good dbc_close [$dbc close] 0 - } - set t $txns($i) - set gid [make_gid recd015script:$t] - puts $gfd $gid - error_check_good txn_prepare:$t [$t prepare $gid] 0 -} -close $gfd - -# -# We do not close the db or env, but exit with the txns outstanding. -# -puts "\tRecd015script completed successfully" -flush stdout diff --git a/storage/bdb/test/recdscript.tcl b/storage/bdb/test/recdscript.tcl deleted file mode 100644 index 559d3407a09..00000000000 --- a/storage/bdb/test/recdscript.tcl +++ /dev/null @@ -1,37 +0,0 @@ -# See the file LICENSE for redistribution information. -# -# Copyright (c) 1996-2004 -# Sleepycat Software. All rights reserved. -# -# $Id: recdscript.tcl,v 11.6 2004/01/28 03:36:29 bostic Exp $ -# -# Recovery txn prepare script -# Usage: recdscript op dir envcmd dbfile cmd -# op: primary txn operation -# dir: test directory -# envcmd: command to open env -# dbfile: name of database file -# gidf: name of global id file -# cmd: db command to execute - -source ./include.tcl -source $test_path/test.tcl - -set usage "recdscript op dir envcmd dbfile gidfile cmd" - -# Verify usage -if { $argc != 6 } { - puts stderr "FAIL:[timestamp] Usage: $usage" - exit -} - -# Initialize arguments -set op [ lindex $argv 0 ] -set dir [ lindex $argv 1 ] -set envcmd [ lindex $argv 2 ] -set dbfile [ lindex $argv 3 ] -set gidfile [ lindex $argv 4 ] -set cmd [ lindex $argv 5 ] - -op_recover_prep $op $dir $envcmd $dbfile $gidfile $cmd -flush stdout diff --git a/storage/bdb/test/rep001.tcl b/storage/bdb/test/rep001.tcl deleted file mode 100644 index 94163986a04..00000000000 --- a/storage/bdb/test/rep001.tcl +++ /dev/null @@ -1,198 +0,0 @@ -# See the file LICENSE for redistribution information. -# -# Copyright (c) 2001-2004 -# Sleepycat Software. All rights reserved. -# -# $Id: rep001.tcl,v 1.35 2004/09/22 18:01:05 bostic Exp $ -# -# TEST rep001 -# TEST Replication rename and forced-upgrade test. -# TEST -# TEST Run rep_test in a replicated master environment. -# TEST Verify that the database on the client is correct. -# TEST Next, remove the database, close the master, upgrade the -# TEST client, reopen the master, and make sure the new master can -# TEST correctly run rep_test and propagate it in the other direction. - -proc rep001 { method { niter 1000 } { tnum "001" } args } { - global passwd - global has_crypto - - # Run tests with and without recovery. If we're doing testing - # of in-memory logging, skip the combination of recovery - # and in-memory logging -- it doesn't make sense. - set logsets [create_logsets 2] - set saved_args $args - - foreach recopt { "" "-recover" } { - foreach l $logsets { - set logindex [lsearch -exact $l "in-memory"] - if { $recopt == "-recover" && $logindex != -1 } { - puts "Skipping test with -recover for in-memory logs." - continue - } - set envargs "" - set args $saved_args - puts "Rep$tnum: Replication sanity test ($method $recopt)." - puts "Rep$tnum: Master logs are [lindex $l 0]" - puts "Rep$tnum: Client logs are [lindex $l 1]" - rep001_sub $method \ - $niter $tnum $envargs $l $recopt $args - - # Skip encrypted tests if not supported. - if { $has_crypto == 0 } { - continue - } - - # Run the same tests with security. - append envargs " -encryptaes $passwd " - append args " -encrypt " - puts "Rep$tnum: Replication and security sanity test\ - ($method $recopt)." - puts "Rep$tnum: Master logs are [lindex $l 0]" - puts "Rep$tnum: Client logs are [lindex $l 1]" - rep001_sub $method \ - $niter $tnum $envargs $l $recopt $args - } - } -} - -proc rep001_sub { method niter tnum envargs logset recargs largs } { - source ./include.tcl - global testdir - global encrypt - - env_cleanup $testdir - - replsetup $testdir/MSGQUEUEDIR - - set masterdir $testdir/MASTERDIR - set clientdir $testdir/CLIENTDIR - - file mkdir $masterdir - file mkdir $clientdir - - set m_logtype [lindex $logset 0] - set c_logtype [lindex $logset 1] - - # In-memory logs require a large log buffer, and cannot - # be used with -txn nosync. Adjust the args for master - # and client. - set m_logargs [adjust_logargs $m_logtype] - set c_logargs [adjust_logargs $c_logtype] - set m_txnargs [adjust_txnargs $m_logtype] - set c_txnargs [adjust_txnargs $c_logtype] - - # Open a master. - repladd 1 - set env_cmd(M) "berkdb_env_noerr -create -lock_max 2500 \ - -log_max 1000000 $envargs $m_logargs $recargs \ - -home $masterdir -errpfx MASTER $m_txnargs -rep_master \ - -rep_transport \[list 1 replsend\]" -# set env_cmd(M) "berkdb_env_noerr -create -lock_max 2500 \ -# -log_max 1000000 $envargs $m_logargs $recargs \ -# -home $masterdir \ -# -verbose {rep on} -errfile /dev/stderr \ -# -errpfx MASTER $m_txnargs -rep_master \ -# -rep_transport \[list 1 replsend\]" - set masterenv [eval $env_cmd(M)] - error_check_good master_env [is_valid_env $masterenv] TRUE - - # Open a client - repladd 2 - set env_cmd(C) "berkdb_env_noerr -create -lock_max 2500 \ - -log_max 1000000 $envargs $c_logargs $recargs \ - -home $clientdir -errpfx CLIENT $c_txnargs -rep_client \ - -rep_transport \[list 2 replsend\]" -# set env_cmd(C) "berkdb_env_noerr -create -lock_max 2500 \ -# -log_max 1000000 $envargs $c_logargs $recargs \ -# -home $clientdir \ -# -verbose {rep on} -errfile /dev/stderr \ -# -errpfx CLIENT $c_txnargs -rep_client \ -# -rep_transport \[list 2 replsend\]" - set clientenv [eval $env_cmd(C)] - error_check_good client_env [is_valid_env $clientenv] TRUE - - # Bring the client online by processing the startup messages. - set envlist "{$masterenv 1} {$clientenv 2}" - process_msgs $envlist - - # Run rep_test in the master (and update client). - puts "\tRep$tnum.a:\ - Running rep_test in replicated env ($envargs $recargs)." - eval rep_test $method $masterenv NULL $niter 0 0 0 $largs - process_msgs $envlist - - puts "\tRep$tnum.b: Verifying client database contents." - set dbname "test.db" - set masterdb [berkdb_open -env $masterenv -auto_commit $dbname] - set clientdb [berkdb_open -env $clientenv -auto_commit $dbname] - - error_check_good compare_master_and_client [db_compare \ - $masterdb $clientdb $masterdir/$dbname $clientdir/$dbname] 0 - - error_check_good master_close [$masterdb close] 0 - error_check_good client_close [$clientdb close] 0 - - # Remove the file (and update client). - puts "\tRep$tnum.c: Remove the file on the master and close master." - error_check_good remove \ - [$masterenv dbremove -auto_commit $dbname] 0 - error_check_good masterenv_close [$masterenv close] 0 - process_msgs $envlist - - puts "\tRep$tnum.d: Upgrade client." - set newmasterenv $clientenv - error_check_good upgrade_client [$newmasterenv rep_start -master] 0 - - # Run rep_test in the new master - puts "\tRep$tnum.e: Running rep_test in new master." - eval rep_test $method $newmasterenv NULL $niter 0 0 0 $largs - set envlist "{$newmasterenv 2}" - process_msgs $envlist - - puts "\tRep$tnum.f: Reopen old master as client and catch up." - # Throttle master so it can't send everything at once - $newmasterenv rep_limit 0 [expr 64 * 1024] - set newclientenv [eval {berkdb_env -create -recover} $envargs \ - -txn nosync -lock_max 2500 \ - {-home $masterdir -rep_client -rep_transport [list 1 replsend]}] - error_check_good newclient_env [is_valid_env $newclientenv] TRUE - set envlist "{$newclientenv 1} {$newmasterenv 2}" - process_msgs $envlist - - # If we're running with a low number of iterations, we might - # not have had to throttle the data transmission; skip the check. - if { $niter > 200 } { - set nthrottles \ - [stat_field $newmasterenv rep_stat "Transmission limited"] - error_check_bad nthrottles $nthrottles -1 - error_check_bad nthrottles $nthrottles 0 - } - - # Run a modified rep_test in the new master (and update client). - puts "\tRep$tnum.g: Running rep_test in new master." - eval rep_test $method \ - $newmasterenv NULL $niter $niter $niter 0 $largs - process_msgs $envlist - - # Verify the database in the client dir. - puts "\tRep$tnum.h: Verifying new client database contents." - set masterdb [berkdb_open -env $newmasterenv -auto_commit $dbname] - set clientdb [berkdb_open -env $newclientenv -auto_commit $dbname] - - error_check_good compare_master_and_client [db_compare \ - $masterdb $clientdb $masterdir/$dbname $clientdir/$dbname] 0 - - error_check_good master_close [$masterdb close] 0 - error_check_good client_close [$clientdb close] 0 - error_check_good newmasterenv_close [$newmasterenv close] 0 - error_check_good newclientenv_close [$newclientenv close] 0 - - if { [lsearch $envargs "-encrypta*"] !=-1 } { - set encrypt 1 - } - error_check_good verify \ - [verify_dir $clientdir "\tRep$tnum.k: " 0 0 1] 0 - replclose $testdir/MSGQUEUEDIR -} diff --git a/storage/bdb/test/rep002.tcl b/storage/bdb/test/rep002.tcl deleted file mode 100644 index 1a3683f2e59..00000000000 --- a/storage/bdb/test/rep002.tcl +++ /dev/null @@ -1,323 +0,0 @@ -# See the file LICENSE for redistribution information. -# -# Copyright (c) 2002-2004 -# Sleepycat Software. All rights reserved. -# -# $Id: rep002.tcl,v 11.37 2004/09/22 18:01:05 bostic Exp $ -# -# TEST rep002 -# TEST Basic replication election test. -# TEST -# TEST Run a modified version of test001 in a replicated master -# TEST environment; hold an election among a group of clients to -# TEST make sure they select a proper master from amongst themselves, -# TEST in various scenarios. - -proc rep002 { method { niter 10 } { nclients 3 } { tnum "002" } args } { - - if { [is_record_based $method] == 1 } { - puts "Rep002: Skipping for method $method." - return - } - - set logsets [create_logsets [expr $nclients + 1]] - - # Run the body of the test with and without recovery. - set recopts { "" "-recover" } - foreach r $recopts { - foreach l $logsets { - set logindex [lsearch -exact $l "in-memory"] - if { $r == "-recover" && $logindex != -1 } { - puts "Skipping test with -recover for in-memory logs." - } - puts "Rep$tnum ($method $r):\ - Replication election test with $nclients clients." - puts "Rep$tnum: Master logs are [lindex $l 0]" - for { set i 0 } { $i < $nclients } { incr i } { - puts "Rep$tnum: Client $i logs are\ - [lindex $l [expr $i + 1]]" - } - rep002_sub $method $niter $nclients $tnum $l $r $args - } - } -} - -proc rep002_sub { method niter nclients tnum logset recargs largs } { - source ./include.tcl - global elect_timeout elect_serial - global is_windows_test - set elect_timeout 5000000 - - env_cleanup $testdir - - set qdir $testdir/MSGQUEUEDIR - replsetup $qdir - - set masterdir $testdir/MASTERDIR - file mkdir $masterdir - set m_logtype [lindex $logset 0] - set m_logargs [adjust_logargs $m_logtype] - set m_txnargs [adjust_txnargs $m_logtype] - - for { set i 0 } { $i < $nclients } { incr i } { - set clientdir($i) $testdir/CLIENTDIR.$i - file mkdir $clientdir($i) - set c_logtype($i) [lindex $logset [expr $i + 1]] - set c_logargs($i) [adjust_logargs $c_logtype($i)] - set c_txnargs($i) [adjust_txnargs $c_logtype($i)] - } - - # Open a master. - repladd 1 - set env_cmd(M) "berkdb_env_noerr -create -log_max 1000000 \ - -home $masterdir $m_logargs -errpfx MASTER \ - $m_txnargs -rep_master -rep_transport \[list 1 replsend\]" -# set env_cmd(M) "berkdb_env_noerr -create -log_max 1000000 \ -# -home $masterdir $m_logargs -errpfx MASTER -errfile /dev/stderr \ -# -verbose {rep on} $m_txnargs -rep_master \ -# -rep_transport \[list 1 replsend\]" - # In an election test, the -recovery arg must not go - # in the env_cmd string because that is going to be - # passed to a child process. - set masterenv [eval $env_cmd(M) $recargs] - error_check_good master_env [is_valid_env $masterenv] TRUE - - # Open the clients. - for { set i 0 } { $i < $nclients } { incr i } { - set envid [expr $i + 2] - repladd $envid - set env_cmd($i) "berkdb_env_noerr -create -home $clientdir($i) \ - $c_logargs($i) $c_txnargs($i) -rep_client -errpfx CLIENT$i \ - -rep_transport \[list $envid replsend\]" -# set env_cmd($i) "berkdb_env_noerr -create -home $clientdir($i) \ -# $c_logargs($i) -verbose {rep on} -errfile /dev/stderr \ -# $c_txnargs($i) -rep_client -errpfx CLIENT$i \ -# -rep_transport \[list $envid replsend\]" - set clientenv($i) [eval $env_cmd($i) $recargs] - error_check_good \ - client_env($i) [is_valid_env $clientenv($i)] TRUE - } - - # Loop, processing first the master's messages, then the client's, - # until both queues are empty. - set envlist {} - lappend envlist "$masterenv 1" - for { set i 0 } { $i < $nclients } { incr i } { - lappend envlist "$clientenv($i) [expr $i + 2]" - } - process_msgs $envlist - - # Run a modified test001 in the master. - puts "\tRep$tnum.a: Running test001 in replicated env." - eval test001 $method $niter 0 0 $tnum -env $masterenv $largs - process_msgs $envlist - - # Verify the database in the client dir. - for { set i 0 } { $i < $nclients } { incr i } { - puts "\tRep$tnum.b: Verifying contents of client database $i." - set testdir [get_home $masterenv] - set t1 $testdir/t1 - set t2 $testdir/t2 - set t3 $testdir/t3 - open_and_dump_file test$tnum.db $clientenv($i) $testdir/t1 \ - test001.check dump_file_direction "-first" "-next" - - if { [string compare [convert_method $method] -recno] != 0 } { - filesort $t1 $t3 - } - error_check_good diff_files($t2,$t3) [filecmp $t2 $t3] 0 - - verify_dir $clientdir($i) "\tRep$tnum.c: " 0 0 1 - } - - # Start an election in the first client. - puts "\tRep$tnum.d: Starting election with existing master." - # We want to verify that the master declares the election - # over by fiat, even if everyone uses a lower priority than 20. - # Loop and process all messages, keeping track of which - # sites got a HOLDELECTION and checking that the returned newmaster, - # if any, is 1 (the master's replication ID). - set got_hold_elect(M) 0 - for { set i 0 } { $i < $nclients } { incr i } { - set got_hold_elect($i) 0 - set elect_pipe($i) INVALID - } - set elect_pipe(0) [start_election C0 \ - $qdir $env_cmd(0) [expr $nclients + 1] $nclients 20 $elect_timeout] - - tclsleep 2 - - set got_master 0 - while { 1 } { - set nproced 0 - set he 0 - set nm 0 - set nm2 0 - - incr nproced [replprocessqueue $masterenv 1 0 he nm] - - if { $he == 1 } { - incr elect_serial - set elect_pipe(M) [start_election CM $qdir \ - $env_cmd(M) [expr $nclients + 1] $nclients \ - 0 $elect_timeout] - set got_hold_elect(M) 1 - } - if { $nm != 0 } { - error_check_good newmaster_is_master $nm 1 - set got_master $nm - } - if { $nm2 != 0 } { - error_check_good newmaster_is_master $nm2 1 - set got_master $nm2 - } - - for { set i 0 } { $i < $nclients } { incr i } { - set he 0 - set envid [expr $i + 2] - incr nproced \ - [replprocessqueue $clientenv($i) $envid 0 he nm] - set child_done [check_election $elect_pipe($i) nm2] - if { $he == 1 } { - # error_check_bad client(0)_in_elect $i 0 - if { $elect_pipe($i) != "INVALID" } { - close_election $elect_pipe($i) - } - incr elect_serial - set pfx CHILD$i.$elect_serial - set elect_pipe($i) [start_election $pfx $qdir \ - $env_cmd($i) [expr $nclients + 1] \ - $nclients 0 \ - $elect_timeout] - set got_hold_elect($i) 1 - } - if { $nm != 0 } { - error_check_good newmaster_is_master $nm 1 - set got_master $nm - } - if { $nm2 != 0 } { - error_check_good newmaster_is_master $nm2 1 - set got_master $nm2 - } - } - - if { $nproced == 0 } { - break - } - } - error_check_good got_master $got_master 1 - cleanup_elections - - # We need multiple clients to proceed from here. - if { $nclients < 2 } { - puts "\tRep$tnum: Skipping for less than two clients." - error_check_good masterenv_close [$masterenv close] 0 - for { set i 0 } { $i < $nclients } { incr i } { - error_check_good clientenv_close($i) \ - [$clientenv($i) close] 0 - } - return - } - - # Make sure all the clients are synced up and ready to be good - # voting citizens. - error_check_good master_flush [$masterenv rep_flush] 0 - process_msgs $envlist - - # Now hold another election in the first client, this time with - # a dead master. - puts "\tRep$tnum.e: Starting election with dead master." - error_check_good masterenv_close [$masterenv close] 0 - set envlist [lreplace $envlist 0 0] - - set m "Rep$tnum.e" - # We're not going to be using err_cmd, so initialize to "none". - # Client #1 has priority 100; everyone else has priority 10. - for { set i 0 } { $i < $nclients } { incr i } { - set err_cmd($i) "none" - set crash($i) 0 - if { $i == 1 } { - set pri($i) 100 - } else { - set pri($i) 10 - } - } - set nsites $nclients - set nvotes $nclients - # The elector calls the first election. The expected winner - # is $win. - set elector 1 - set win 1 - run_election env_cmd envlist err_cmd pri crash $qdir $m \ - $elector $nsites $nvotes $nclients $win 1 "test$tnum.db" - - # Hold an election with two clients at the same (winning) priority. - # Make sure that the tie gets broken, and that the third client - # does not win. - puts "\tRep$tnum.f: Election with two clients at same priority." - set m "Rep$tnum.f" - # Clients 0 and 1 have high, matching priority. - for { set i 0 } { $i < $nclients } { incr i } { - if { $i >= 2 } { - set pri($i) 10 - } else { - set pri($i) 100 - } - } - - # Run several elections. - set elections 5 - for { set i 0 } { $i < $elections } { incr i } { - # - # The expected winner is 0 or 1. Since run_election can only - # handle one expected winner, catch the result and inspect it. - # - set elector 0 - set win 1 - set altwin 0 - if {[catch {eval run_election \ - env_cmd envlist err_cmd pri crash $qdir $m $elector $nsites \ - $nvotes $nclients $win 1 "test$tnum.db"} res]} { - # - # If the primary winner didn't win, make sure - # the alternative winner won. Do all the cleanup - # for that winner normally done in run_election: - # open and close the new master, then reopen as a - # client for the next cycle. - # - puts "\t$m: Election $i: Alternate winner $altwin won." - error_check_good check_winner [is_substr \ - $res "expected 3, got [expr $altwin + 2]"] 1 - error_check_good make_master \ - [$clientenv($altwin) rep_start -master] 0 - - cleanup_elections - process_msgs $envlist - - error_check_good newmaster_close \ - [$clientenv($altwin) close] 0 - set clientenv($altwin) [eval $env_cmd($altwin)] - error_check_good cl($altwin) \ - [is_valid_env $clientenv($altwin)] TRUE - set newelector "$clientenv($altwin) [expr $altwin + 2]" - set envlist [lreplace $envlist $altwin $altwin $newelector] - } else { - puts "\t$m: Election $i: Primary winner $win won." - } - process_msgs $envlist - } - - foreach pair $envlist { - set cenv [lindex $pair 0] - error_check_good cenv_close [$cenv close] 0 - } - - replclose $testdir/MSGQUEUEDIR - - # If we're on Windows, we need to forcibly remove some of the - # files created when the alternate winner won. - if { $is_windows_test == 1 } { - set filelist [glob -nocomplain $testdir/CLIENTDIR.$altwin/*] - fileremove -f $filelist - } -} diff --git a/storage/bdb/test/rep003.tcl b/storage/bdb/test/rep003.tcl deleted file mode 100644 index 13ef257da40..00000000000 --- a/storage/bdb/test/rep003.tcl +++ /dev/null @@ -1,277 +0,0 @@ -# See the file LICENSE for redistribution information. -# -# Copyright (c) 2002-2004 -# Sleepycat Software. All rights reserved. -# -# $Id: rep003.tcl,v 11.19 2004/09/22 18:01:05 bostic Exp $ -# -# TEST rep003 -# TEST Repeated shutdown/restart replication test -# TEST -# TEST Run a quick put test in a replicated master environment; -# TEST start up, shut down, and restart client processes, with -# TEST and without recovery. To ensure that environment state -# TEST is transient, use DB_PRIVATE. - -proc rep003 { method { tnum "003" } args } { - source ./include.tcl - global rep003_dbname rep003_omethod rep003_oargs - - if { [is_record_based $method] } { - puts "Rep$tnum: Skipping for method $method" - return - } - - set rep003_dbname rep003.db - set rep003_omethod [convert_method $method] - set rep003_oargs [convert_args $method $args] - - # Run the body of the test with and without recovery. If we're - # testing in-memory logging, skip the combination of recovery - # and in-memory logging -- it doesn't make sense. - - set logsets [create_logsets 2] - foreach recopt { "" "-recover" } { - foreach l $logsets { - set logindex [lsearch -exact $l "in-memory"] - if { $recopt == "-recover" && $logindex != -1 } { - puts "Rep$tnum: Skipping for\ - in-memory logs with -recover." - continue - } - puts "Rep$tnum ($method $recopt):\ - Replication repeated-startup test." - puts "Rep$tnum: Master logs are [lindex $l 0]" - puts "Rep$tnum: Client logs are [lindex $l 1]" - rep003_sub $method $tnum $l $recopt $args - } - } -} - -proc rep003_sub { method tnum logset recargs largs } { - source ./include.tcl - - env_cleanup $testdir - - replsetup $testdir/MSGQUEUEDIR - - set masterdir $testdir/MASTERDIR - set clientdir $testdir/CLIENTDIR - - file mkdir $masterdir - file mkdir $clientdir - - set m_logtype [lindex $logset 0] - set c_logtype [lindex $logset 1] - - # In-memory logs require a large log buffer, and cannot - # be used with -txn nosync. This test already requires - # -txn, so adjust the logargs only. - set m_logargs [adjust_logargs $m_logtype] - set c_logargs [adjust_logargs $c_logtype] - - # Open a master. - repladd 1 - set env_cmd(M) "berkdb_env_noerr -create -log_max 1000000 \ - -errpfx MASTER -errfile /dev/stderr \ - -home $masterdir -txn $m_logargs -rep_master \ - -rep_transport \[list 1 replsend\]" - set masterenv [eval $env_cmd(M) $recargs] - error_check_good master_env [is_valid_env $masterenv] TRUE - - puts "\tRep$tnum.a: Simple client startup test." - - # Put item one. - rep003_put $masterenv A1 a-one - - # Open a client. - repladd 2 - set env_cmd(C) "berkdb_env_noerr -create -private -home $clientdir \ - -txn $c_logargs -errpfx CLIENT -errfile /dev/stderr \ - -rep_client -rep_transport \[list 2 replsend\]" - set clientenv [eval $env_cmd(C) $recargs] - error_check_good client_env [is_valid_env $clientenv] TRUE - - # Put another quick item. - rep003_put $masterenv A2 a-two - - # Loop, processing first the master's messages, then the client's, - # until both queues are empty. - set envlist "{$masterenv 1} {$clientenv 2}" - process_msgs $envlist - - rep003_check $clientenv A1 a-one - rep003_check $clientenv A2 a-two - - error_check_good clientenv_close [$clientenv close] 0 - replclear 2 - - # Now reopen the client after doing another put. - puts "\tRep$tnum.b: Client restart." - rep003_put $masterenv B1 b-one - - unset clientenv - set clientenv [berkdb_env_noerr -create -private -home $clientdir -txn \ - -rep_client -rep_transport [list 2 replsend]] - error_check_good client_env [is_valid_env $clientenv] TRUE - - # Loop letting the client and master sync up and get the - # environment initialized. It's a new client env so - # reinitialize the envlist as well. - set envlist "{$masterenv 1} {$clientenv 2}" - process_msgs $envlist - - # The items from part A should be present at all times-- - # if we roll them back, we've screwed up. [#5709] - rep003_check $clientenv A1 a-one - rep003_check $clientenv A2 a-two - - rep003_put $masterenv B2 b-two - - # Loop, processing first the master's messages, then the client's, - # until both queues are empty. - while { 1 } { - set nproced 0 - - incr nproced [replprocessqueue $masterenv 1] - incr nproced [replprocessqueue $clientenv 2] - - # The items from part A should be present at all times-- - # if we roll them back, we've screwed up. [#5709] - rep003_check $clientenv A1 a-one - rep003_check $clientenv A2 a-two - - if { $nproced == 0 } { - break - } - } - - rep003_check $clientenv B1 b-one - rep003_check $clientenv B2 b-two - - error_check_good clientenv_close [$clientenv close] 0 - - replclear 2 - - # Now reopen the client after a recovery. - puts "\tRep$tnum.c: Client restart after recovery." - rep003_put $masterenv C1 c-one - - unset clientenv - set clientenv [berkdb_env_noerr -create -private -home $clientdir -txn \ - -recover -rep_client -rep_transport [list 2 replsend]] - error_check_good client_env [is_valid_env $clientenv] TRUE - - # Loop, processing first the master's messages, then the client's, - # until both queues are empty. - set envlist "{$masterenv 1} {$clientenv 2}" - process_msgs $envlist - - # The items from part A should be present at all times-- - # if we roll them back, we've screwed up. [#5709] - rep003_check $clientenv A1 a-one - rep003_check $clientenv A2 a-two - rep003_check $clientenv B1 b-one - rep003_check $clientenv B2 b-two - - rep003_put $masterenv C2 c-two - - # Loop, processing first the master's messages, then the client's, - # until both queues are empty. - while { 1 } { - set nproced 0 - - # The items from part A should be present at all times-- - # if we roll them back, we've screwed up. [#5709] - rep003_check $clientenv A1 a-one - rep003_check $clientenv A2 a-two - rep003_check $clientenv B1 b-one - rep003_check $clientenv B2 b-two - - incr nproced [replprocessqueue $masterenv 1] - incr nproced [replprocessqueue $clientenv 2] - - if { $nproced == 0 } { - break - } - } - - rep003_check $clientenv C1 c-one - rep003_check $clientenv C2 c-two - - error_check_good clientenv_close [$clientenv close] 0 - - replclear 2 - - # Now reopen the client after a catastrophic recovery. - puts "\tRep$tnum.d: Client restart after catastrophic recovery." - rep003_put $masterenv D1 d-one - - unset clientenv - set clientenv [berkdb_env_noerr -create -private -home $clientdir -txn \ - -recover_fatal -rep_client -rep_transport [list 2 replsend]] - error_check_good client_env [is_valid_env $clientenv] TRUE - - # Loop, processing first the master's messages, then the client's, - # until both queues are empty. - set envlist "{$masterenv 1} {$clientenv 2}" - process_msgs $envlist - rep003_put $masterenv D2 d-two - - # Loop, processing first the master's messages, then the client's, - # until both queues are empty. - while { 1 } { - set nproced 0 - - # The items from part A should be present at all times-- - # if we roll them back, we've screwed up. [#5709] - rep003_check $clientenv A1 a-one - rep003_check $clientenv A2 a-two - rep003_check $clientenv B1 b-one - rep003_check $clientenv B2 b-two - rep003_check $clientenv C1 c-one - rep003_check $clientenv C2 c-two - - incr nproced [replprocessqueue $masterenv 1] - incr nproced [replprocessqueue $clientenv 2] - - if { $nproced == 0 } { - break - } - } - - rep003_check $clientenv D1 d-one - rep003_check $clientenv D2 d-two - - error_check_good clientenv_close [$clientenv close] 0 - - error_check_good masterenv_close [$masterenv close] 0 - replclose $testdir/MSGQUEUEDIR -} - -proc rep003_put { masterenv key data } { - global rep003_dbname rep003_omethod rep003_oargs - - set db [eval {berkdb_open_noerr -create -env $masterenv -auto_commit} \ - $rep003_omethod $rep003_oargs $rep003_dbname] - error_check_good rep3_put_open($key,$data) [is_valid_db $db] TRUE - - set txn [$masterenv txn] - error_check_good rep3_put($key,$data) [$db put -txn $txn $key $data] 0 - error_check_good rep3_put_txn_commit($key,$data) [$txn commit] 0 - - error_check_good rep3_put_close($key,$data) [$db close] 0 -} - -proc rep003_check { env key data } { - global rep003_dbname - - set db [berkdb_open_noerr -rdonly -env $env $rep003_dbname] - error_check_good rep3_check_open($key,$data) [is_valid_db $db] TRUE - - set dbt [$db get $key] - error_check_good rep3_check($key,$data) \ - [lindex [lindex $dbt 0] 1] $data - - error_check_good rep3_put_close($key,$data) [$db close] 0 -} diff --git a/storage/bdb/test/rep004.tcl b/storage/bdb/test/rep004.tcl deleted file mode 100644 index e1d4d3b65c7..00000000000 --- a/storage/bdb/test/rep004.tcl +++ /dev/null @@ -1,198 +0,0 @@ -# -# Copyright (c) 2002 -# Sleepycat Software. All rights reserved. -# -# $Id: rep004.tcl,v 1.5 2002/08/08 18:13:12 sue Exp $ -# -# TEST rep004 -# TEST Test of DB_REP_LOGSONLY. -# TEST -# TEST Run a quick put test in a master environment that has one logs-only -# TEST client. Shut down, then run catastrophic recovery in the logs-only -# TEST client and check that the database is present and populated. - -proc rep004 { method { nitems 10 } { tnum "04" } args } { - source ./include.tcl - global testdir - - env_cleanup $testdir - set dbname rep0$tnum.db - - set omethod [convert_method $method] - set oargs [convert_args $method $args] - - puts "Rep0$tnum: Test of logs-only replication clients" - - replsetup $testdir/MSGQUEUEDIR - set masterdir $testdir/MASTERDIR - file mkdir $masterdir - set clientdir $testdir/CLIENTDIR - file mkdir $clientdir - set logsonlydir $testdir/LOGSONLYDIR - file mkdir $logsonlydir - - # Open a master, a logsonly replica, and a normal client. - repladd 1 - set masterenv [berkdb_env -create -home $masterdir -txn -rep_master \ - -rep_transport [list 1 replsend]] - error_check_good master_env [is_valid_env $masterenv] TRUE - - repladd 2 - set loenv [berkdb_env -create -home $logsonlydir -txn -rep_logsonly \ - -rep_transport [list 2 replsend]] - error_check_good logsonly_env [is_valid_env $loenv] TRUE - - repladd 3 - set clientenv [berkdb_env -create -home $clientdir -txn -rep_client \ - -rep_transport [list 3 replsend]] - error_check_good client_env [is_valid_env $clientenv] TRUE - - - puts "\tRep0$tnum.a: Populate database." - - set db [eval {berkdb open -create -mode 0644 -auto_commit} \ - -env $masterenv $oargs $omethod $dbname] - error_check_good dbopen [is_valid_db $db] TRUE - - set did [open $dict] - set count 0 - while { [gets $did str] != -1 && $count < $nitems } { - if { [is_record_based $method] == 1 } { - set key [expr $count + 1] - set data $str - } else { - set key $str - set data [reverse $str] - } - set kvals($count) $key - set dvals($count) [pad_data $method $data] - - set txn [$masterenv txn] - error_check_good txn($count) [is_valid_txn $txn $masterenv] TRUE - - set ret [eval \ - {$db put} -txn $txn {$key [chop_data $method $data]}] - error_check_good put($count) $ret 0 - - error_check_good commit($count) [$txn commit] 0 - - incr count - } - - puts "\tRep0$tnum.b: Sync up clients." - set donenow 0 - while { 1 } { - set nproced 0 - - incr nproced [replprocessqueue $masterenv 1] - incr nproced [replprocessqueue $loenv 2] - incr nproced [replprocessqueue $clientenv 3] - - if { $nproced == 0 } { - break - } - } - - - puts "\tRep0$tnum.c: Get master and logs-only client ahead." - set newcount 0 - while { [gets $did str] != -1 && $newcount < $nitems } { - if { [is_record_based $method] == 1 } { - set key [expr $count + 1] - set data $str - } else { - set key $str - set data [reverse $str] - } - set kvals($count) $key - set dvals($count) [pad_data $method $data] - - set txn [$masterenv txn] - error_check_good txn($count) [is_valid_txn $txn $masterenv] TRUE - - set ret [eval \ - {$db put} -txn $txn {$key [chop_data $method $data]}] - error_check_good put($count) $ret 0 - - error_check_good commit($count) [$txn commit] 0 - - incr count - incr newcount - } - - error_check_good db_close [$db close] 0 - - puts "\tRep0$tnum.d: Sync up logs-only client only, then fail over." - set donenow 0 - while { 1 } { - set nproced 0 - - incr nproced [replprocessqueue $masterenv 1] - incr nproced [replprocessqueue $loenv 2] - - if { $nproced == 0 } { - break - } - } - - - # "Crash" the master, and fail over to the upgradeable client. - error_check_good masterenv_close [$masterenv close] 0 - replclear 3 - - error_check_good upgrade_client [$clientenv rep_start -master] 0 - set donenow 0 - while { 1 } { - set nproced 0 - - incr nproced [replprocessqueue $clientenv 3] - incr nproced [replprocessqueue $loenv 2] - - if { $nproced == 0 } { - break - } - } - - error_check_good loenv_close [$loenv close] 0 - - puts "\tRep0$tnum.e: Run catastrophic recovery on logs-only client." - set loenv [berkdb_env -create -home $logsonlydir -txn -recover_fatal] - - puts "\tRep0$tnum.f: Verify logs-only client contents." - set lodb [eval {berkdb open} -env $loenv $oargs $omethod $dbname] - set loc [$lodb cursor] - - set cdb [eval {berkdb open} -env $clientenv $oargs $omethod $dbname] - set cc [$cdb cursor] - - # Make sure new master and recovered logs-only replica match. - for { set cdbt [$cc get -first] } \ - { [llength $cdbt] > 0 } { set cdbt [$cc get -next] } { - set lodbt [$loc get -next] - - error_check_good newmaster_replica_match $cdbt $lodbt - } - - # Reset new master cursor. - error_check_good cc_close [$cc close] 0 - set cc [$cdb cursor] - - for { set lodbt [$loc get -first] } \ - { [llength $lodbt] > 0 } { set lodbt [$loc get -next] } { - set cdbt [$cc get -next] - - error_check_good replica_newmaster_match $lodbt $cdbt - } - - error_check_good loc_close [$loc close] 0 - error_check_good lodb_close [$lodb close] 0 - error_check_good loenv_close [$loenv close] 0 - - error_check_good cc_close [$cc close] 0 - error_check_good cdb_close [$cdb close] 0 - error_check_good clientenv_close [$clientenv close] 0 - - close $did - - replclose $testdir/MSGQUEUEDIR -} diff --git a/storage/bdb/test/rep005.tcl b/storage/bdb/test/rep005.tcl deleted file mode 100644 index 517fa955d5d..00000000000 --- a/storage/bdb/test/rep005.tcl +++ /dev/null @@ -1,307 +0,0 @@ -# See the file LICENSE for redistribution information. -# -# Copyright (c) 2002-2004 -# Sleepycat Software. All rights reserved. -# -# $Id: rep005.tcl,v 11.41 2004/10/15 15:41:56 sue Exp $ -# -# TEST rep005 -# TEST Replication election test with error handling. -# TEST -# TEST Run a modified version of test001 in a replicated master environment; -# TEST hold an election among a group of clients to make sure they select -# TEST a proper master from amongst themselves, forcing errors at various -# TEST locations in the election path. - -proc rep005 { method args } { - if { [is_btree $method] == 0 } { - puts "Rep005: Skipping for method $method." - return - } - - set tnum "005" - set niter 10 - set nclients 3 - set logsets [create_logsets [expr $nclients + 1]] - - # We don't want to run this with -recover - it takes too - # long and doesn't cover any new ground. - set recargs "" - foreach l $logsets { - puts "Rep$tnum ($recargs): Replication election\ - error test with $nclients clients." - puts -nonewline "Rep$tnum: Started at: " - puts [clock format [clock seconds] -format "%H:%M %D"] - puts "Rep$tnum: Master logs are [lindex $l 0]" - for { set i 0 } { $i < $nclients } { incr i } { - puts "Rep$tnum: Client $i logs are\ - [lindex $l [expr $i + 1]]" - } - rep005_sub $method $tnum \ - $niter $nclients $l $recargs $args - } -} - -proc rep005_sub { method tnum niter nclients logset recargs largs } { - source ./include.tcl - global rand_init - error_check_good set_random_seed [berkdb srand $rand_init] 0 - - env_cleanup $testdir - - set qdir $testdir/MSGQUEUEDIR - replsetup $qdir - - set masterdir $testdir/MASTERDIR - file mkdir $masterdir - set m_logtype [lindex $logset 0] - set m_logargs [adjust_logargs $m_logtype] - set m_txnargs [adjust_txnargs $m_logtype] - - for { set i 0 } { $i < $nclients } { incr i } { - set clientdir($i) $testdir/CLIENTDIR.$i - file mkdir $clientdir($i) - set c_logtype($i) [lindex $logset [expr $i + 1]] - set c_logargs($i) [adjust_logargs $c_logtype($i)] - set c_txnargs($i) [adjust_txnargs $c_logtype($i)] - } - - # Open a master. - repladd 1 - set env_cmd(M) "berkdb_env -create -log_max 1000000 \ - -home $masterdir $m_logargs \ - $m_txnargs -rep_master -rep_transport \[list 1 replsend\]" -# To debug elections, uncomment the line below and further below -# for the clients to turn on verbose. Also edit reputils.tcl -# in proc start_election and swap the 2 commented lines with -# their counterpart. -# set env_cmd(M) "berkdb_env_noerr -create -log_max 1000000 \ -# -home $masterdir $m_logargs \ -# $m_txnargs -rep_master \ -# -verbose {rep on} -errpfx MASTER -errfile /dev/stderr \ -# -rep_transport \[list 1 replsend\]" - set masterenv [eval $env_cmd(M) $recargs] - error_check_good master_env [is_valid_env $masterenv] TRUE - - set envlist {} - lappend envlist "$masterenv 1" - - # Open the clients. - for { set i 0 } { $i < $nclients } { incr i } { - set envid [expr $i + 2] - repladd $envid - set env_cmd($i) "berkdb_env -create -home $clientdir($i) \ - $c_logargs($i) $c_txnargs($i) -rep_client \ - -rep_transport \[list $envid replsend\]" -# set env_cmd($i) "berkdb_env_noerr -create -home $clientdir($i) \ -# -verbose {rep on} -errpfx CLIENT$i -errfile /dev/stderr \ -# $c_logargs($i) $c_txnargs($i) -rep_client \ -# -rep_transport \[list $envid replsend\]" - set clientenv($i) [eval $env_cmd($i) $recargs] - error_check_good \ - client_env($i) [is_valid_env $clientenv($i)] TRUE - lappend envlist "$clientenv($i) $envid" - } - - # Run a modified test001 in the master. - puts "\tRep$tnum.a: Running test001 in replicated env." - eval rep_test $method $masterenv NULL $niter 0 0 0 $largs - - # Process all the messages and close the master. - process_msgs $envlist - error_check_good masterenv_close [$masterenv close] 0 - set envlist [lreplace $envlist 0 0] - - for { set i 0 } { $i < $nclients } { incr i } { - replclear [expr $i + 2] - } - # - # We set up the error list for each client. We know that the - # first client is the one calling the election, therefore, add - # the error location on sending the message (electsend) for that one. - set m "Rep$tnum" - set count 0 - set win -1 - # - # A full test can take a long time to run. For normal testing - # pare it down a lot so that it runs in a shorter time. - # - set c0err { none electinit none none } - set c1err $c0err - set c2err $c0err - set numtests [expr [llength $c0err] * [llength $c1err] * \ - [llength $c2err]] - puts "\t$m.b: Starting $numtests election with error tests" - set last_win -1 - set win -1 - foreach c0 $c0err { - foreach c1 $c1err { - foreach c2 $c2err { - set elist [list $c0 $c1 $c2] - rep005_elect env_cmd envlist $qdir \ - $m $count win last_win $elist $logset - incr count - } - } - } - - foreach pair $envlist { - set cenv [lindex $pair 0] - error_check_good cenv_close [$cenv close] 0 - } - - replclose $testdir/MSGQUEUEDIR - puts -nonewline \ - "Rep$tnum: Completed at: " - puts [clock format [clock seconds] -format "%H:%M %D"] -} - -proc rep005_elect { ecmd celist qdir msg count \ - winner lsn_lose elist logset} { - global elect_timeout elect_serial - global is_windows_test - upvar $ecmd env_cmd - upvar $celist envlist - upvar $winner win - upvar $lsn_lose last_win - - set elect_timeout 5000000 - set nclients [llength $elist] - set nsites [expr $nclients + 1] - - set cl_list {} - foreach pair $envlist { - set id [lindex $pair 1] - set i [expr $id - 2] - set clientenv($i) [lindex $pair 0] - set err_cmd($i) [lindex $elist $i] - set elect_pipe($i) INVALID - replclear $id - lappend cl_list $i - } - - # Select winner. We want to test biggest LSN wins, and secondarily - # highest priority wins. If we already have a master, make sure - # we don't start a client in that master. - set el 0 - if { $win == -1 } { - if { $last_win != -1 } { - set cl_list [lreplace $cl_list $last_win $last_win] - set el $last_win - } - set windex [berkdb random_int 0 [expr [llength $cl_list] - 1]] - set win [lindex $cl_list $windex] - } else { - # Easy case, if we have a master, the winner must be the - # same one as last time, just use $win. - # If client0 is the current existing master, start the - # election in client 1. - if {$win == 0} { - set el 1 - } - } - # Winner has priority 100. If we are testing LSN winning, the - # make sure the lowest LSN client has the highest priority. - # Everyone else has priority 10. - for { set i 0 } { $i < $nclients } { incr i } { - set crash($i) 0 - if { $i == $win } { - set pri($i) 100 - } elseif { $i == $last_win } { - set pri($i) 200 - } else { - set pri($i) 10 - } - } - - puts "\t$msg.b.$count: Start election (win=client$win) $elist" - set msg $msg.c.$count - set nsites $nclients - set nvotes $nsites - run_election env_cmd envlist err_cmd pri crash \ - $qdir $msg $el $nsites $nvotes $nclients $win - # - # Sometimes test elections with an existing master. - # Other times test elections without master by closing the - # master we just elected and creating a new client. - # We want to weight it to close the new master. So, use - # a list to cause closing about 70% of the time. - # - set close_list { 0 0 0 1 1 1 1 1 1 1} - set close_len [expr [llength $close_list] - 1] - set close_index [berkdb random_int 0 $close_len] - if { [lindex $close_list $close_index] == 1 } { - puts -nonewline "\t\t$msg: Closing " - error_check_good newmaster_close [$clientenv($win) close] 0 - # - # If the next test should win via LSN then remove the - # env before starting the new client so that we - # can guarantee this client doesn't win the next one. - set lsn_win { 0 0 0 0 1 1 1 1 1 1 } - set lsn_len [expr [llength $lsn_win] - 1] - set lsn_index [berkdb random_int 0 $lsn_len] - set rec_arg "" - set win_inmem [expr [string compare [lindex $logset \ - [expr $win + 1]] in-memory] == 0] - if { [lindex $lsn_win $lsn_index] == 1 } { - set last_win $win - set dirindex [lsearch -exact $env_cmd($win) "-home"] - incr dirindex - set lsn_dir [lindex $env_cmd($win) $dirindex] - env_cleanup $lsn_dir - puts -nonewline "and cleaning " - } else { - # - # If we're not cleaning the env, decide if we should - # run recovery upon reopening the env. This causes - # two things: - # 1. Removal of region files which forces the env - # to read its __db.rep.egen file. - # 2. Adding a couple log records, so this client must - # be the next winner as well since it'll have the - # biggest LSN. - # - set rec_win { 0 0 0 0 0 0 1 1 1 1 } - set rec_len [expr [llength $rec_win] - 1] - set rec_index [berkdb random_int 0 $rec_len] - if { [lindex $rec_win $rec_index] == 1 } { - puts -nonewline "and recovering " - set rec_arg "-recover" - # - # If we're in memory and about to run - # recovery, we force ourselves not to win - # the next election because recovery will - # blow away the entire log in memory. - # However, we don't skip this entirely - # because we still want to force reading - # of __db.rep.egen. - # - if { $win_inmem } { - set last_win $win - } else { - set last_win -1 - } - } else { - set last_win -1 - } - } - puts "new master, new client $win" - set clientenv($win) [eval $env_cmd($win) $rec_arg] - error_check_good cl($win) [is_valid_env $clientenv($win)] TRUE - # - # Since we started a new client, we need to replace it - # in the message processing list so that we get the - # new Tcl handle name in there. - set newel "$clientenv($win) [expr $win + 2]" - set envlist [lreplace $envlist $win $win $newel] - if { $rec_arg == "" || $win_inmem } { - set win -1 - } - # - # Since we started a new client we want to give them - # all a chance to process everything outstanding before - # the election on the next iteration. - # - process_msgs $envlist - } -} diff --git a/storage/bdb/test/reputils.tcl b/storage/bdb/test/reputils.tcl deleted file mode 100644 index f25da575ad1..00000000000 --- a/storage/bdb/test/reputils.tcl +++ /dev/null @@ -1,1275 +0,0 @@ -# See the file LICENSE for redistribution information. -# -# Copyright (c) 2001-2004 -# Sleepycat Software. All rights reserved. -# -# $Id: reputils.tcl,v 11.84 2004/11/03 18:50:52 carol Exp $ -# -# Replication testing utilities - -# Environment handle for the env containing the replication "communications -# structure" (really a CDB environment). - -# The test environment consists of a queue and a # directory (environment) -# per replication site. The queue is used to hold messages destined for a -# particular site and the directory will contain the environment for the -# site. So the environment looks like: -# $testdir -# ___________|______________________________ -# / | \ \ -# MSGQUEUEDIR MASTERDIR CLIENTDIR.0 ... CLIENTDIR.N-1 -# | | ... | -# 1 2 .. N+1 -# -# The master is site 1 in the MSGQUEUEDIR and clients 1-N map to message -# queues 2 - N+1. -# -# The globals repenv(1-N) contain the environment handles for the sites -# with a given id (i.e., repenv(1) is the master's environment. - -global queueenv - -# Array of DB handles, one per machine ID, for the databases that contain -# messages. -global queuedbs -global machids -global perm_response_list -set perm_response_list {} -global perm_sent_list -set perm_sent_list {} -global elect_timeout -set elect_timeout 50000000 -set drop 0 - -# The default for replication testing is for logs to be on-disk. -# Mixed-mode log testing provides a mixture of on-disk and -# in-memory logging, or even all in-memory. When testing on a -# 1-master/1-client test, we try all four options. On a test -# with more clients, we still try four options, randomly -# selecting whether the later clients are on-disk or in-memory. -# - -global mixed_mode_logging -set mixed_mode_logging 0 - -proc create_logsets { nsites } { - global mixed_mode_logging - global logsets - global rand_init - - error_check_good set_random_seed [berkdb srand $rand_init] 0 - if { $mixed_mode_logging == 0 } { - set loglist {} - for { set i 0 } { $i < $nsites } { incr i } { - lappend loglist "on-disk" - } - set logsets [list $loglist] - } - if { $mixed_mode_logging == 1 } { - set set1 {on-disk on-disk} - set set2 {on-disk in-memory} - set set3 {in-memory on-disk} - set set4 {in-memory in-memory} - - # Start with nsites at 2 since we already set up - # the master and first client. - for { set i 2 } { $i < $nsites } { incr i } { - foreach set { set1 set2 set3 set4 } { - if { [berkdb random_int 0 1] == 0 } { - lappend $set "on-disk" - } else { - lappend $set "in-memory" - } - } - } - set logsets [list $set1 $set2 $set3 $set4] - } - return $logsets -} - -proc run_mixedmode { method test {display 0} {run 1} \ - {outfile stdout} {largs ""} } { - global mixed_mode_logging - set mixed_mode_logging 1 - - set prefix [string range $test 0 2] - if { $prefix != "rep" } { - puts "Skipping mixed-mode log testing for non-rep test." - set mixed_mode_logging 0 - return - } - - eval run_method $method $test $display $run $outfile $largs - - # Reset to default values after run. - set mixed_mode_logging 0 -} - -# Create the directory structure for replication testing. -# Open the master and client environments; store these in the global repenv -# Return the master's environment: "-env masterenv" -proc repl_envsetup { envargs largs test {nclients 1} {droppct 0} { oob 0 } } { - source ./include.tcl - global clientdir - global drop drop_msg - global masterdir - global repenv - global testdir - - env_cleanup $testdir - - replsetup $testdir/MSGQUEUEDIR - - set masterdir $testdir/MASTERDIR - file mkdir $masterdir - if { $droppct != 0 } { - set drop 1 - set drop_msg [expr 100 / $droppct] - } else { - set drop 0 - } - - for { set i 0 } { $i < $nclients } { incr i } { - set clientdir($i) $testdir/CLIENTDIR.$i - file mkdir $clientdir($i) - } - - # Open a master. - repladd 1 - # - # Set log smaller than default to force changing files, - # but big enough so that the tests that use binary files - # as keys/data can run. - # - set logmax [expr 3 * 1024 * 1024] - set ma_cmd "berkdb_env -create -log_max $logmax $envargs \ - -lock_max 10000 \ - -home $masterdir -txn nosync -rep_master -rep_transport \ - \[list 1 replsend\]" -# set ma_cmd "berkdb_env_noerr -create -log_max $logmax $envargs \ -# -lock_max 10000 -verbose {rep on} -errfile /dev/stderr \ -# -errpfx $masterdir \ -# -home $masterdir -txn nosync -rep_master -rep_transport \ -# \[list 1 replsend\]" - set masterenv [eval $ma_cmd] - error_check_good master_env [is_valid_env $masterenv] TRUE - set repenv(master) $masterenv - - # Open clients - for { set i 0 } { $i < $nclients } { incr i } { - set envid [expr $i + 2] - repladd $envid - set cl_cmd "berkdb_env -create $envargs -txn nosync \ - -cachesize { 0 10000000 0 } -lock_max 10000 \ - -home $clientdir($i) -rep_client -rep_transport \ - \[list $envid replsend\]" -# set cl_cmd "berkdb_env_noerr -create $envargs -txn nosync \ -# -cachesize { 0 10000000 0 } -lock_max 10000 \ -# -home $clientdir($i) -rep_client -rep_transport \ -# \[list $envid replsend\] -verbose {rep on} \ -# -errfile /dev/stderr -errpfx $clientdir($i)" - set clientenv [eval $cl_cmd] - error_check_good client_env [is_valid_env $clientenv] TRUE - set repenv($i) $clientenv - } - set repenv($i) NULL - append largs " -env $masterenv " - - # Process startup messages - repl_envprocq $test $nclients $oob - - return $largs -} - -# Process all incoming messages. Iterate until there are no messages left -# in anyone's queue so that we capture all message exchanges. We verify that -# the requested number of clients matches the number of client environments -# we have. The oob parameter indicates if we should process the queue -# with out-of-order delivery. The replprocess procedure actually does -# the real work of processing the queue -- this routine simply iterates -# over the various queues and does the initial setup. -proc repl_envprocq { test { nclients 1 } { oob 0 }} { - global repenv - global drop - - set masterenv $repenv(master) - for { set i 0 } { 1 } { incr i } { - if { $repenv($i) == "NULL"} { - break - } - } - error_check_good i_nclients $nclients $i - - berkdb debug_check - puts -nonewline "\t$test: Processing master/$i client queues" - set rand_skip 0 - if { $oob } { - puts " out-of-order" - } else { - puts " in order" - } - set do_check 1 - set droprestore $drop - while { 1 } { - set nproced 0 - - if { $oob } { - set rand_skip [berkdb random_int 2 10] - } - incr nproced [replprocessqueue $masterenv 1 $rand_skip] - for { set i 0 } { $i < $nclients } { incr i } { - set envid [expr $i + 2] - if { $oob } { - set rand_skip [berkdb random_int 2 10] - } - set n [replprocessqueue $repenv($i) \ - $envid $rand_skip] - incr nproced $n - } - - if { $nproced == 0 } { - # Now that we delay requesting records until - # we've had a few records go by, we should always - # see that the number of requests is lower than the - # number of messages that were enqueued. - for { set i 0 } { $i < $nclients } { incr i } { - set clientenv $repenv($i) - set queued [stat_field $clientenv rep_stat \ - "Total log records queued"] - error_check_bad queued_stats \ - $queued -1 - set requested [stat_field $clientenv rep_stat \ - "Log records requested"] - error_check_bad requested_stats \ - $requested -1 - if { $queued != 0 && $do_check != 0 } { - error_check_good num_requested \ - [expr $requested <= $queued] 1 - } - - $clientenv rep_request 1 1 - } - - # If we were dropping messages, we might need - # to flush the log so that we get everything - # and end up in the right state. - if { $drop != 0 } { - set drop 0 - set do_check 0 - $masterenv rep_flush - berkdb debug_check - puts "\t$test: Flushing Master" - } else { - break - } - } - } - - # Reset the clients back to the default state in case we - # have more processing to do. - for { set i 0 } { $i < $nclients } { incr i } { - set clientenv $repenv($i) - $clientenv rep_request 4 128 - } - set drop $droprestore -} - -# Verify that the directories in the master are exactly replicated in -# each of the client environments. -proc repl_envver0 { test method { nclients 1 } } { - global clientdir - global masterdir - global repenv - - # Verify the database in the client dir. - # First dump the master. - set t1 $masterdir/t1 - set t2 $masterdir/t2 - set t3 $masterdir/t3 - set omethod [convert_method $method] - - # - # We are interested in the keys of whatever databases are present - # in the master environment, so we just call a no-op check function - # since we have no idea what the contents of this database really is. - # We just need to walk the master and the clients and make sure they - # have the same contents. - # - set cwd [pwd] - cd $masterdir - set stat [catch {glob test*.db} dbs] - cd $cwd - if { $stat == 1 } { - return - } - foreach testfile $dbs { - open_and_dump_file $testfile $repenv(master) $masterdir/t2 \ - repl_noop dump_file_direction "-first" "-next" - - if { [string compare [convert_method $method] -recno] != 0 } { - filesort $t2 $t3 - file rename -force $t3 $t2 - } - for { set i 0 } { $i < $nclients } { incr i } { - puts "\t$test: Verifying client $i database $testfile contents." - open_and_dump_file $testfile $repenv($i) \ - $t1 repl_noop dump_file_direction "-first" "-next" - - if { [string compare $omethod "-recno"] != 0 } { - filesort $t1 $t3 - } else { - catch {file copy -force $t1 $t3} ret - } - error_check_good diff_files($t2,$t3) [filecmp $t2 $t3] 0 - } - } -} - -# Remove all the elements from the master and verify that these -# deletions properly propagated to the clients. -proc repl_verdel { test method { nclients 1 } } { - global clientdir - global masterdir - global repenv - - # Delete all items in the master. - set cwd [pwd] - cd $masterdir - set stat [catch {glob test*.db} dbs] - cd $cwd - if { $stat == 1 } { - return - } - foreach testfile $dbs { - puts "\t$test: Deleting all items from the master." - set txn [$repenv(master) txn] - error_check_good txn_begin [is_valid_txn $txn \ - $repenv(master)] TRUE - set db [berkdb_open -txn $txn -env $repenv(master) $testfile] - error_check_good reopen_master [is_valid_db $db] TRUE - set dbc [$db cursor -txn $txn] - error_check_good reopen_master_cursor \ - [is_valid_cursor $dbc $db] TRUE - for { set dbt [$dbc get -first] } { [llength $dbt] > 0 } \ - { set dbt [$dbc get -next] } { - error_check_good del_item [$dbc del] 0 - } - error_check_good dbc_close [$dbc close] 0 - error_check_good txn_commit [$txn commit] 0 - error_check_good db_close [$db close] 0 - - repl_envprocq $test $nclients - - # Check clients. - for { set i 0 } { $i < $nclients } { incr i } { - puts "\t$test: Verifying client database $i is empty." - - set db [berkdb_open -env $repenv($i) $testfile] - error_check_good reopen_client($i) \ - [is_valid_db $db] TRUE - set dbc [$db cursor] - error_check_good reopen_client_cursor($i) \ - [is_valid_cursor $dbc $db] TRUE - - error_check_good client($i)_empty \ - [llength [$dbc get -first]] 0 - - error_check_good dbc_close [$dbc close] 0 - error_check_good db_close [$db close] 0 - } - } -} - -# Replication "check" function for the dump procs that expect to -# be able to verify the keys and data. -proc repl_noop { k d } { - return -} - -# Close all the master and client environments in a replication test directory. -proc repl_envclose { test envargs } { - source ./include.tcl - global clientdir - global encrypt - global masterdir - global repenv - global testdir - - if { [lsearch $envargs "-encrypta*"] !=-1 } { - set encrypt 1 - } - - # In order to make sure that we have fully-synced and ready-to-verify - # databases on all the clients, do a checkpoint on the master and - # process messages in order to flush all the clients. - set drop 0 - set do_check 0 - berkdb debug_check - puts "\t$test: Checkpointing master." - error_check_good masterenv_ckp [$repenv(master) txn_checkpoint] 0 - - # Count clients. - for { set ncli 0 } { 1 } { incr ncli } { - if { $repenv($ncli) == "NULL" } { - break - } - } - repl_envprocq $test $ncli - - error_check_good masterenv_close [$repenv(master) close] 0 - verify_dir $masterdir "\t$test: " 0 0 1 - for { set i 0 } { $i < $ncli } { incr i } { - error_check_good client($i)_close [$repenv($i) close] 0 - verify_dir $clientdir($i) "\t$test: " 0 0 1 - } - replclose $testdir/MSGQUEUEDIR - -} - -# Close up a replication group -proc replclose { queuedir } { - global queueenv queuedbs machids - - foreach m $machids { - set db $queuedbs($m) - error_check_good dbr_close [$db close] 0 - } - error_check_good qenv_close [$queueenv close] 0 - set machids {} -} - -# Create a replication group for testing. -proc replsetup { queuedir } { - global queueenv queuedbs machids - - file mkdir $queuedir - set queueenv \ - [berkdb_env -create -txn nosync -lock_max 20000 -home $queuedir] - error_check_good queueenv [is_valid_env $queueenv] TRUE - - if { [info exists queuedbs] } { - unset queuedbs - } - set machids {} - - return $queueenv -} - -# Send function for replication. -proc replsend { control rec fromid toid flags lsn } { - global queuedbs queueenv machids - global drop drop_msg - global perm_sent_list - if { [llength $perm_sent_list] != 0 && $flags == "perm" } { -# puts "replsend sent perm message, LSN $lsn" - lappend perm_sent_list $lsn - } - - # - # If we are testing with dropped messages, then we drop every - # $drop_msg time. If we do that just return 0 and don't do - # anything. - # - if { $drop != 0 } { - incr drop - if { $drop == $drop_msg } { - set drop 1 - return 0 - } - } - # XXX - # -1 is DB_BROADCAST_MID - if { $toid == -1 } { - set machlist $machids - } else { - if { [info exists queuedbs($toid)] != 1 } { - error "replsend: machid $toid not found" - } - set machlist [list $toid] - } - - foreach m $machlist { - # XXX should a broadcast include to "self"? - if { $m == $fromid } { - continue - } - - set db $queuedbs($m) - set txn [$queueenv txn] - $db put -txn $txn -append [list $control $rec $fromid] - error_check_good replsend_commit [$txn commit] 0 - } - - return 0 -} - -# Discard all the pending messages for a particular site. -proc replclear { machid } { - global queuedbs queueenv - - if { [info exists queuedbs($machid)] != 1 } { - error "FAIL: replclear: machid $machid not found" - } - - set db $queuedbs($machid) - set txn [$queueenv txn] - set dbc [$db cursor -txn $txn] - for { set dbt [$dbc get -rmw -first] } { [llength $dbt] > 0 } \ - { set dbt [$dbc get -rmw -next] } { - error_check_good replclear($machid)_del [$dbc del] 0 - } - error_check_good replclear($machid)_dbc_close [$dbc close] 0 - error_check_good replclear($machid)_txn_commit [$txn commit] 0 -} - -# Add a machine to a replication environment. -proc repladd { machid } { - global queueenv queuedbs machids - - if { [info exists queuedbs($machid)] == 1 } { - error "FAIL: repladd: machid $machid already exists" - } - - set queuedbs($machid) [berkdb open -auto_commit \ - -env $queueenv -create -recno -renumber repqueue$machid.db] - error_check_good repqueue_create [is_valid_db $queuedbs($machid)] TRUE - - lappend machids $machid -} - -# Acquire a handle to work with an existing machine's replication -# queue. This is for situations where more than one process -# is working with a message queue. In general, having more than one -# process handle the queue is wrong. However, in order to test some -# things, we need two processes (since Tcl doesn't support threads). We -# go to great pain in the test harness to make sure this works, but we -# don't let customers do it. -proc repljoin { machid } { - global queueenv queuedbs machids - - set queuedbs($machid) [berkdb open -auto_commit \ - -env $queueenv repqueue$machid.db] - error_check_good repqueue_create [is_valid_db $queuedbs($machid)] TRUE - - lappend machids $machid -} - -# Process a queue of messages, skipping every "skip_interval" entry. -# We traverse the entire queue, but since we skip some messages, we -# may end up leaving things in the queue, which should get picked up -# on a later run. -proc replprocessqueue { dbenv machid { skip_interval 0 } { hold_electp NONE } \ - { newmasterp NONE } { dupmasterp NONE } { errp NONE } } { - global queuedbs queueenv errorCode - global perm_response_list - global startup_done - - # hold_electp is a call-by-reference variable which lets our caller - # know we need to hold an election. - if { [string compare $hold_electp NONE] != 0 } { - upvar $hold_electp hold_elect - } - set hold_elect 0 - - # newmasterp is the same idea, only returning the ID of a master - # given in a DB_REP_NEWMASTER return. - if { [string compare $newmasterp NONE] != 0 } { - upvar $newmasterp newmaster - } - set newmaster 0 - - # dupmasterp is a call-by-reference variable which lets our caller - # know we have a duplicate master. - if { [string compare $dupmasterp NONE] != 0 } { - upvar $dupmasterp dupmaster - } - set dupmaster 0 - - # errp is a call-by-reference variable which lets our caller - # know we have gotten an error (that they expect). - if { [string compare $errp NONE] != 0 } { - upvar $errp errorp - } - set errorp 0 - - set nproced 0 - - set txn [$queueenv txn] - - # If we are running separate processes, the second process has - # to join an existing message queue. - if { [info exists queuedbs($machid)] == 0 } { - repljoin $machid - } - - set dbc [$queuedbs($machid) cursor -txn $txn] - - error_check_good process_dbc($machid) \ - [is_valid_cursor $dbc $queuedbs($machid)] TRUE - - for { set dbt [$dbc get -first] } \ - { [llength $dbt] != 0 } \ - { } { - set data [lindex [lindex $dbt 0] 1] - set recno [lindex [lindex $dbt 0] 0] - - # If skip_interval is nonzero, we want to process messages - # out of order. We do this in a simple but slimy way-- - # continue walking with the cursor without processing the - # message or deleting it from the queue, but do increment - # "nproced". The way this proc is normally used, the - # precise value of nproced doesn't matter--we just don't - # assume the queues are empty if it's nonzero. Thus, - # if we contrive to make sure it's nonzero, we'll always - # come back to records we've skipped on a later call - # to replprocessqueue. (If there really are no records, - # we'll never get here.) - # - # Skip every skip_interval'th record (and use a remainder other - # than zero so that we're guaranteed to really process at least - # one record on every call). - if { $skip_interval != 0 } { - if { $nproced % $skip_interval == 1 } { - incr nproced - set dbt [$dbc get -next] - continue - } - } - - # We need to remove the current message from the queue, - # because we're about to end the transaction and someone - # else processing messages might come in and reprocess this - # message which would be bad. - error_check_good queue_remove [$dbc del] 0 - - # We have to play an ugly cursor game here: we currently - # hold a lock on the page of messages, but rep_process_message - # might need to lock the page with a different cursor in - # order to send a response. So save the next recno, close - # the cursor, and then reopen and reset the cursor. - # If someone else is processing this queue, our entry might - # have gone away, and we need to be able to handle that. - - error_check_good dbc_process_close [$dbc close] 0 - error_check_good txn_commit [$txn commit] 0 - - set ret [catch {$dbenv rep_process_message \ - [lindex $data 2] [lindex $data 0] [lindex $data 1]} res] - - # Save all ISPERM and NOTPERM responses so we can compare their - # LSNs to the LSN in the log. The variable perm_response_list - # holds the entire response so we can extract responses and - # LSNs as needed. - # - if { [llength $perm_response_list] != 0 && \ - ([is_substr $res ISPERM] || [is_substr $res NOTPERM]) } { - lappend perm_response_list $res - } - - if { $ret != 0 } { - if { [string compare $errp NONE] != 0 } { - set errorp "$dbenv $machid $res" - } else { - error "FAIL:[timestamp]\ - rep_process_message returned $res" - } - } - - incr nproced - - # Now, re-establish the cursor position. We fetch the - # current record number. If there is something there, - # that is the record for the next iteration. If there - # is nothing there, then we've consumed the last item - # in the queue. - - set txn [$queueenv txn] - set dbc [$queuedbs($machid) cursor -txn $txn] - set dbt [$dbc get -set_range $recno] - - if { $ret == 0 } { - set rettype [lindex $res 0] - set retval [lindex $res 1] - # - # Do nothing for 0 and NEWSITE - # - if { [is_substr $rettype STARTUPDONE] } { - set startup_done 1 - } - if { [is_substr $rettype HOLDELECTION] } { - set hold_elect 1 - } - if { [is_substr $rettype DUPMASTER] } { - set dupmaster "1 $dbenv $machid" - } - if { [is_substr $rettype NOTPERM] || \ - [is_substr $rettype ISPERM] } { - set lsnfile [lindex $retval 0] - set lsnoff [lindex $retval 1] - } - if { [is_substr $rettype NEWMASTER] } { - set newmaster $retval - # Break as soon as we get a NEWMASTER message; - # our caller needs to handle it. - break - } - } - - if { $errorp != 0 } { - # Break also on an error, caller wants to handle it. - break - } - if { $hold_elect == 1 } { - # Break also on a HOLDELECTION, for the same reason. - break - } - if { $dupmaster == 1 } { - # Break also on a DUPMASTER, for the same reason. - break - } - - } - - error_check_good dbc_close [$dbc close] 0 - error_check_good txn_commit [$txn commit] 0 - - # Return the number of messages processed. - return $nproced -} - -set run_repl_flag "-run_repl" - -proc extract_repl_args { args } { - global run_repl_flag - - for { set arg [lindex $args [set i 0]] } \ - { [string length $arg] > 0 } \ - { set arg [lindex $args [incr i]] } { - if { [string compare $arg $run_repl_flag] == 0 } { - return [lindex $args [expr $i + 1]] - } - } - return "" -} - -proc delete_repl_args { args } { - global run_repl_flag - - set ret {} - - for { set arg [lindex $args [set i 0]] } \ - { [string length $arg] > 0 } \ - { set arg [lindex $args [incr i]] } { - if { [string compare $arg $run_repl_flag] != 0 } { - lappend ret $arg - } else { - incr i - } - } - return $ret -} - -global elect_serial -global elections_in_progress -set elect_serial 0 - -# Start an election in a sub-process. -proc start_election \ - { pfx qdir envstring nsites nvotes pri timeout {err "none"} {crash 0}} { - source ./include.tcl - global elect_serial elect_timeout elections_in_progress machids - - set filelist {} - set ret [catch {glob $testdir/ELECTION*.$elect_serial} result] - if { $ret == 0 } { - set filelist [concat $filelist $result] - } - foreach f $filelist { - fileremove -f $f - } - - set oid [open $testdir/ELECTION_SOURCE.$elect_serial w] - - puts $oid "source $test_path/test.tcl" - puts $oid "replsetup $qdir" - foreach i $machids { puts $oid "repladd $i" } - puts $oid "set env_cmd \{$envstring\}" -# puts $oid "set dbenv \[eval \$env_cmd -errfile \ -# $testdir/ELECTION_ERRFILE.$elect_serial -errpfx $pfx \]" - puts $oid "set dbenv \[eval \$env_cmd -errfile \ - /dev/stdout -errpfx $pfx \]" - puts $oid "\$dbenv test abort $err" - puts $oid "set res \[catch \{\$dbenv rep_elect $nsites $nvotes $pri \ - $elect_timeout\} ret\]" - puts $oid "set r \[open \$testdir/ELECTION_RESULT.$elect_serial w\]" - puts $oid "if \{\$res == 0 \} \{" - puts $oid "puts \$r \"NEWMASTER \$ret\"" - puts $oid "\} else \{" - puts $oid "puts \$r \"ERROR \$ret\"" - puts $oid "\}" - # - # This loop calls rep_elect a second time with the error cleared. - # We don't want to do that if we are simulating a crash. - if { $err != "none" && $crash != 1 } { - puts $oid "\$dbenv test abort none" - puts $oid "set res \[catch \{\$dbenv rep_elect $nsites \ - $nvotes $pri $elect_timeout\} ret\]" - puts $oid "if \{\$res == 0 \} \{" - puts $oid "puts \$r \"NEWMASTER \$ret\"" - puts $oid "\} else \{" - puts $oid "puts \$r \"ERROR \$ret\"" - puts $oid "\}" - } - puts $oid "close \$r" - close $oid - -# set t [open "|$tclsh_path >& $testdir/ELECTION_OUTPUT.$elect_serial" w] - set t [open "|$tclsh_path" w] - puts $t "source ./include.tcl" - puts $t "source $testdir/ELECTION_SOURCE.$elect_serial" - flush $t - - set elections_in_progress($elect_serial) $t - return $elect_serial -} - -proc setpriority { priority nclients winner {start 0} } { - upvar $priority pri - - for { set i $start } { $i < [expr $nclients + $start] } { incr i } { - if { $i == $winner } { - set pri($i) 100 - } else { - set pri($i) 10 - } - } -} - -# run_election has the following arguments: -# Arrays: -# ecmd Array of the commands for setting up each client env. -# cenv Array of the handles to each client env. -# errcmd Array of where errors should be forced. -# priority Array of the priorities of each client env. -# crash If an error is forced, should we crash or recover? -# The upvar command takes care of making these arrays available to -# the procedure. -# -# Ordinary variables: -# qdir Directory where the message queue is located. -# msg Message prefixed to the output. -# elector This client calls the first election. -# nsites Number of sites in the replication group. -# nvotes Number of votes required to win the election. -# nclients Number of clients participating in the election. -# win The expected winner of the election. -# reopen Should the new master (i.e. winner) be closed -# and reopened as a client? -# dbname Name of the underlying database. Defaults to -# the name of the db created by rep_test. -# -proc run_election { ecmd celist errcmd priority crsh qdir msg elector \ - nsites nvotes nclients win {reopen 0} {dbname "test.db"} } { - global elect_timeout elect_serial - global is_hp_test - global is_windows_test - global rand_init - upvar $ecmd env_cmd - upvar $celist cenvlist - upvar $errcmd err_cmd - upvar $priority pri - upvar $crsh crash - - set elect_timeout 5000000 - - foreach pair $cenvlist { - set id [lindex $pair 1] - set i [expr $id - 2] - set elect_pipe($i) INVALID - replclear $id - } - - # - # XXX - # We need to somehow check for the warning if nvotes is not - # a majority. Problem is that warning will go into the child - # process' output. Furthermore, we need a mechanism that can - # handle both sending the output to a file and sending it to - # /dev/stderr when debugging without failing the - # error_check_good check. - # - puts "\t\t$msg.1: Election with nsites=$nsites,\ - nvotes=$nvotes, nclients=$nclients" - puts "\t\t$msg.2: First elector is $elector,\ - expected winner is $win (eid [expr $win + 2])" - incr elect_serial - set pfx "CHILD$elector.$elect_serial" - # Windows and HP-UX require a longer timeout. - if { $is_windows_test == 1 || $is_hp_test == 1 } { - set elect_timeout [expr $elect_timeout * 3] - } - set elect_pipe($elector) [start_election \ - $pfx $qdir $env_cmd($elector) $nsites $nvotes $pri($elector) \ - $elect_timeout $err_cmd($elector) $crash($elector)] - - tclsleep 2 - - set got_newmaster 0 - set tries [expr [expr $elect_timeout * 4] / 1000000] - - # If we're simulating a crash, skip the while loop and - # just give the initial election a chance to complete. - set crashing 0 - for { set i 0 } { $i < $nclients } { incr i } { - if { $crash($i) == 1 } { - set crashing 1 - } - } - - if { $crashing == 1 } { - tclsleep 10 - } else { - while { 1 } { - set nproced 0 - set he 0 - set nm 0 - set nm2 0 - - foreach pair $cenvlist { - set he 0 - set envid [lindex $pair 1] - set i [expr $envid - 2] - set clientenv($i) [lindex $pair 0] - set child_done [check_election $elect_pipe($i) nm2] - if { $got_newmaster == 0 && $nm2 != 0 } { - error_check_good newmaster_is_master2 $nm2 \ - [expr $win + 2] - set got_newmaster $nm2 - - # If this env is the new master, it needs to - # configure itself as such--this is a different - # env handle from the one that performed the - # election. - if { $nm2 == $envid } { - error_check_good make_master($i) \ - [$clientenv($i) rep_start -master] \ - 0 - } - } - incr nproced \ - [replprocessqueue $clientenv($i) $envid 0 he nm] -# puts "Tries $tries: Processed queue for client $i, $nproced msgs he $he nm $nm nm2 $nm2" - if { $he == 1 } { - # - # Only close down the election pipe if the - # previously created one is done and - # waiting for new commands, otherwise - # if we try to close it while it's in - # progress we hang this main tclsh. - # - if { $elect_pipe($i) != "INVALID" && \ - $child_done == 1 } { - close_election $elect_pipe($i) - set elect_pipe($i) "INVALID" - } -# puts "Starting election on client $i" - if { $elect_pipe($i) == "INVALID" } { - incr elect_serial - set pfx "CHILD$i.$elect_serial" - set elect_pipe($i) [start_election \ - $pfx $qdir \ - $env_cmd($i) $nsites \ - $nvotes $pri($i) $elect_timeout] - set got_hold_elect($i) 1 - } - } - if { $nm != 0 } { - error_check_good newmaster_is_master $nm \ - [expr $win + 2] - set got_newmaster $nm - - # If this env is the new master, it needs to - # configure itself as such--this is a different - # env handle from the one that performed the - # election. - if { $nm == $envid } { - error_check_good make_master($i) \ - [$clientenv($i) rep_start -master] \ - 0 - # Occasionally force new log records - # to be written. - set write [berkdb random_int 1 10] - if { $write == 1 } { - set db [berkdb_open -env \ - $clientenv($i) \ - -auto_commit $dbname] - error_check_good dbopen \ - [is_valid_db $db] TRUE - error_check_good dbclose \ - [$db close] 0 - } - } - } - } - - # We need to wait around to make doubly sure that the - # election has finished... - if { $nproced == 0 } { - incr tries -1 - if { $tries == 0 } { - break - } else { - tclsleep 1 - } - } else { - set tries $tries - } - } - - # Verify that expected winner is actually the winner. - error_check_good "client $win wins" $got_newmaster [expr $win + 2] - } - - cleanup_elections - - # - # Make sure we've really processed all the post-election - # sync-up messages. If we're simulating a crash, don't process - # any more messages. - # - if { $crashing == 0 } { - process_msgs $cenvlist - } - - if { $reopen == 1 } { - puts "\t\t$msg.3: Closing new master and reopening as client" - error_check_good newmaster_close [$clientenv($win) close] 0 - - set clientenv($win) [eval $env_cmd($win)] - error_check_good cl($win) [is_valid_env $clientenv($win)] TRUE - set newelector "$clientenv($win) [expr $win + 2]" - set cenvlist [lreplace $cenvlist $win $win $newelector] - if { $crashing == 0 } { - process_msgs $cenvlist - } - } -} - -proc got_newmaster { cenv i newmaster win {dbname "test.db"} } { - upvar $cenv clientenv - - # Check that the new master we got is the one we expected. - error_check_good newmaster_is_master $newmaster [expr $win + 2] - - # If this env is the new master, it needs to configure itself - # as such -- this is a different env handle from the one that - # performed the election. - if { $nm == $envid } { - error_check_good make_master($i) \ - [$clientenv($i) rep_start -master] 0 - # Occasionally force new log records to be written. - set write [berkdb random_int 1 10] - if { $write == 1 } { - set db [berkdb_open -env $clientenv($i) -auto_commit \ - -create -btree $dbname] - error_check_good dbopen [is_valid_db $db] TRUE - error_check_good dbclose [$db close] 0 - } - } -} - -proc check_election { id newmasterp } { - source ./include.tcl - - if { $id == "INVALID" } { - return 0 - } - upvar $newmasterp newmaster - set newmaster 0 - set res [catch {open $testdir/ELECTION_RESULT.$id} nmid] - if { $res != 0 } { - return 0 - } - while { [gets $nmid val] != -1 } { -# puts "result $id: $val" - set str [lindex $val 0] - if { [is_substr $str NEWMASTER] } { - set newmaster [lindex $val 1] - } - } - close $nmid - return 1 -} - -proc close_election { i } { - global elections_in_progress - set t $elections_in_progress($i) - puts $t "replclose \$testdir/MSGQUEUEDIR" - puts $t "\$dbenv close" - close $t - unset elections_in_progress($i) -} - -proc cleanup_elections { } { - global elect_serial elections_in_progress - - for { set i 0 } { $i <= $elect_serial } { incr i } { - if { [info exists elections_in_progress($i)] != 0 } { - close_election $i - } - } - - set elect_serial 0 -} - -# -# This is essentially a copy of test001, but it only does the put/get -# loop AND it takes an already-opened db handle. -# -proc rep_test { method env repdb {nentries 10000} \ - {start 0} {skip 0} {needpad 0} args } { - source ./include.tcl - - # - # Open the db if one isn't given. Close before exit. - # - if { $repdb == "NULL" } { - set testfile "test.db" - set largs [convert_args $method $args] - set omethod [convert_method $method] - set db [eval {berkdb_open_noerr -env $env -auto_commit -create \ - -mode 0644} $largs $omethod $testfile] - error_check_good reptest_db [is_valid_db $db] TRUE - } else { - set db $repdb - } - - # - # If we are using an env, then testfile should just be the db name. - # Otherwise it is the test directory and the name. - # If we are not using an external env, then test setting - # the database cache size and using multiple caches. - puts "\t\tRep_test: $method $nentries key/data pairs starting at $start" - set did [open $dict] - - # The "start" variable determines the record number to start - # with, if we're using record numbers. The "skip" variable - # determines which dictionary entry to start with. In normal - # use, skip is equal to start. - - if { $skip != 0 } { - for { set count 0 } { $count < $skip } { incr count } { - gets $did str - } - } - set pflags "" - set gflags "" - set txn "" - - if { [is_record_based $method] == 1 } { - append gflags " -recno" - } - puts "\t\tRep_test.a: put/get loop" - # Here is the loop where we put and get each key/data pair - set count 0 - while { [gets $did str] != -1 && $count < $nentries } { - if { [is_record_based $method] == 1 } { - global kvals - - set key [expr $count + 1 + $start] - if { 0xffffffff > 0 && $key > 0xffffffff } { - set key [expr $key - 0x100000000] - } - if { $key == 0 || $key - 0xffffffff == 1 } { - incr key - incr count - } - set kvals($key) [pad_data $method $str] - } else { - set key $str - set str [reverse $str] - } - # - # We want to make sure we send in exactly the same - # length data so that LSNs match up for some tests - # in replication (rep021). - # - if { [is_fixed_length $method] == 1 && $needpad } { - # - # Make it something visible and obvious, 'A'. - # - set p 65 - set str [make_fixed_length $method $str $p] - set kvals($key) $str - } - set t [$env txn] - error_check_good txn [is_valid_txn $t $env] TRUE - set txn "-txn $t" - set ret [eval \ - {$db put} $txn $pflags {$key [chop_data $method $str]}] - error_check_good put $ret 0 - error_check_good txn [$t commit] 0 - - # Checkpoint 10 times during the run, but not more - # frequently than every 5 entries. - set checkfreq [expr $nentries / 10] - if { $checkfreq < 5 } { - set checkfreq 5 - } - if { $count % $checkfreq == 0 } { - error_check_good txn_checkpoint($count) \ - [$env txn_checkpoint] 0 - } - incr count - } - close $did - if { $repdb == "NULL" } { - error_check_good rep_close [$db close] 0 - } -} - -proc process_msgs { elist {perm_response 0} {dupp NONE} {errp NONE} } { - if { $perm_response == 1 } { - global perm_response_list - set perm_response_list {{}} - } - - if { [string compare $dupp NONE] != 0 } { - upvar $dupp dupmaster - set dupmaster 0 - } else { - set dupmaster NONE - } - - if { [string compare $errp NONE] != 0 } { - upvar $errp errorp - set errorp 0 - } else { - set errorp NONE - } - - while { 1 } { - set nproced 0 - foreach pair $elist { - set envname [lindex $pair 0] - set envid [lindex $pair 1] - # - # If we need to send in all the other args - incr nproced [replprocessqueue $envname $envid \ - 0 NONE NONE dupmaster errorp] - # - # If the user is expecting to handle an error and we get - # one, return the error immediately. - # - if { $dupmaster != 0 && $dupmaster != "NONE" } { - return - } - if { $errorp != 0 && $errorp != "NONE" } { - return - } - } - if { $nproced == 0 } { - break - } - } -} diff --git a/storage/bdb/test/rpc001.tcl b/storage/bdb/test/rpc001.tcl deleted file mode 100644 index e07d5dcb162..00000000000 --- a/storage/bdb/test/rpc001.tcl +++ /dev/null @@ -1,476 +0,0 @@ -# See the file LICENSE for redistribution information. -# -# Copyright (c) 1996-2004 -# Sleepycat Software. All rights reserved. -# -# $Id: rpc001.tcl,v 11.41 2004/06/01 19:55:25 carol Exp $ -# -# TEST rpc001 -# TEST Test RPC server timeouts for cursor, txn and env handles. -proc rpc001 { } { - global __debug_on - global __debug_print - global errorInfo - global is_je_test - global rpc_svc - source ./include.tcl - - # - # First test timeouts on server. - # - set ttime 5 - set itime 10 - puts "Rpc001: Server timeouts: resource $ttime sec, idle $itime sec" - set dpid [rpc_server_start 0 30 -t $ttime -I $itime] - puts "\tRpc001.a: Started server, pid $dpid" - - # - # Wrap the whole test in a catch statement so we can still kill - # the rpc server even if the test fails. - # - set status [catch { - tclsleep 2 - remote_cleanup $rpc_server $rpc_testdir $testdir - - puts "\tRpc001.b: Creating environment" - - set testfile "rpc001.db" - set home [file tail $rpc_testdir] - - set env [eval {berkdb_env -create -mode 0644 -home $home \ - -server $rpc_server -client_timeout 10000 -txn}] - error_check_good lock_env:open [is_valid_env $env] TRUE - - puts "\tRpc001.c: Opening a database" - # - # NOTE: the type of database doesn't matter, just use btree. - set db [eval {berkdb_open -auto_commit -create -btree \ - -mode 0644} -env $env $testfile] - error_check_good dbopen [is_valid_db $db] TRUE - - set curs_list {} - set txn_list {} - puts "\tRpc001.d: Basic timeout test" - puts "\tRpc001.d1: Starting a transaction" - set txn [$env txn] - error_check_good txn_begin [is_valid_txn $txn $env] TRUE - lappend txn_list $txn - - puts "\tRpc001.d2: Open a cursor in that transaction" - set dbc [$db cursor -txn $txn] - error_check_good db_cursor [is_valid_cursor $dbc $db] TRUE - lappend curs_list $dbc - - puts "\tRpc001.d3: Duplicate that cursor" - set dbc [$dbc dup] - error_check_good db_cursor [is_valid_cursor $dbc $db] TRUE - lappend curs_list $dbc - - if { !$is_je_test } { - puts "\tRpc001.d4: Starting a nested transaction" - set txn [$env txn -parent $txn] - error_check_good txn_begin [is_valid_txn $txn $env] TRUE - set txn_list [linsert $txn_list 0 $txn] - } - - puts "\tRpc001.d5: Create a cursor, no transaction" - set dbc [$db cursor] - error_check_good db_cursor [is_valid_cursor $dbc $db] TRUE - lappend curs_list $dbc - - puts "\tRpc001.d6: Timeout cursor and transactions" - set sleeptime [expr $ttime + 2] - tclsleep $sleeptime - - # - # Perform a generic db operations to cause the timeout routine - # to trigger. - # - set stat [catch {$db stat} ret] - error_check_good dbstat $stat 0 - - # - # Check that every handle we opened above is timed out - # - foreach c $curs_list { - set stat [catch {$c close} ret] - error_check_good dbc_close:$c $stat 1 - error_check_good dbc_timeout:$c \ - [is_substr $errorInfo "DB_NOSERVER_ID"] 1 - } - foreach t $txn_list { - set stat [catch {$t commit} ret] - error_check_good txn_commit:$t $stat 1 - error_check_good txn_timeout:$t \ - [is_substr $errorInfo "DB_NOSERVER_ID"] 1 - } - - set txn_list {} - - if { !$is_je_test } { - set ntxns 8 - puts "\tRpc001.e: Nested ($ntxns x $ntxns) txn activity test" - puts "\tRpc001.e1: Starting parent transaction" - set txn [$env txn] - error_check_good txn_begin [is_valid_txn $txn $env] TRUE - set txn_list [linsert $txn_list 0 $txn] - set last_txn $txn - set parent_txn $txn - - # - # First set a breadth of 'ntxns' - # We need 2 from this set for testing later on. Just - # set them up separately first. - # - puts "\tRpc001.e2: Creating $ntxns child transactions" - set child0 [$env txn -parent $parent_txn] - error_check_good txn_begin \ - [is_valid_txn $child0 $env] TRUE - set child1 [$env txn -parent $parent_txn] - error_check_good txn_begin \ - [is_valid_txn $child1 $env] TRUE - - for {set i 2} {$i < $ntxns} {incr i} { - set txn [$env txn -parent $parent_txn] - error_check_good txn_begin \ - [is_valid_txn $txn $env] TRUE - set txn_list [linsert $txn_list 0 $txn] - } - - # - # Now make one 'ntxns' deeply nested. - # Add one more for testing later on separately. - # - puts "\tRpc001.e3: Creating $ntxns nested child transactions" - for {set i 0} {$i < $ntxns} {incr i} { - set txn [$env txn -parent $last_txn] - error_check_good txn_begin \ - [is_valid_txn $txn $env] TRUE - set txn_list [linsert $txn_list 0 $txn] - set last_txn $txn - } - set last_parent $last_txn - set last_txn [$env txn -parent $last_parent] - error_check_good txn_begin \ - [is_valid_txn $last_txn $env] TRUE - - puts "\tRpc001.e4: Open a cursor in deepest transaction" - set dbc [$db cursor -txn $last_txn] - error_check_good db_cursor \ - [is_valid_cursor $dbc $db] TRUE - - puts "\tRpc001.e5: Duplicate that cursor" - set dbcdup [$dbc dup] - error_check_good db_cursor \ - [is_valid_cursor $dbcdup $db] TRUE - lappend curs_list $dbcdup - - puts "\tRpc001.f: Timeout then activate duplicate cursor" - tclsleep $sleeptime - set stat [catch {$dbcdup close} ret] - error_check_good dup_close:$dbcdup $stat 0 - error_check_good dup_close:$dbcdup $ret 0 - - # - # Make sure that our parent txn is not timed out. We - # will try to begin another child tnx using the parent. - # We expect that to succeed. Immediately commit that - # txn. - # - set stat [catch {$env txn -parent $parent_txn} newchild] - error_check_good newchildtxn $stat 0 - error_check_good newcommit [$newchild commit] 0 - - puts "\tRpc001.g: Timeout, then activate cursor" - tclsleep $sleeptime - set stat [catch {$dbc close} ret] - error_check_good dbc_close:$dbc $stat 0 - error_check_good dbc_close:$dbc $ret 0 - - # - # Make sure that our parent txn is not timed out. We - # will try to begin another child tnx using the parent. - # We expect that to succeed. Immediately commit that - # txn. - # - set stat [catch {$env txn -parent $parent_txn} newchild] - error_check_good newchildtxn $stat 0 - error_check_good newcommit [$newchild commit] 0 - - puts "\tRpc001.h: Timeout, then activate child txn" - tclsleep $sleeptime - set stat [catch {$child0 commit} ret] - error_check_good child_commit $stat 0 - error_check_good child_commit:$child0 $ret 0 - - # - # Make sure that our nested txn is not timed out. We - # will try to begin another child tnx using the parent. - # We expect that to succeed. Immediately commit that - # txn. - # - set stat \ - [catch {$env txn -parent $last_parent} newchild] - error_check_good newchildtxn $stat 0 - error_check_good newcommit [$newchild commit] 0 - - puts "\tRpc001.i: Timeout, then activate nested txn" - tclsleep $sleeptime - set stat [catch {$last_txn commit} ret] - error_check_good lasttxn_commit $stat 0 - error_check_good lasttxn_commit:$child0 $ret 0 - - # - # Make sure that our child txn is not timed out. We - # should be able to commit it. - # - set stat [catch {$child1 commit} ret] - error_check_good child_commit:$child1 $stat 0 - error_check_good child_commit:$child1 $ret 0 - - # - # Clean up. They were inserted in LIFO order, so we - # should just be able to commit them all. - # - foreach t $txn_list { - set stat [catch {$t commit} ret] - error_check_good txn_commit:$t $stat 0 - error_check_good txn_commit:$t $ret 0 - } - } - - set stat [catch {$db close} ret] - error_check_good db_close $stat 0 - - rpc_timeoutjoin $env "Rpc001.j" $sleeptime 0 - rpc_timeoutjoin $env "Rpc001.k" $sleeptime 1 - - puts "\tRpc001.l: Timeout idle env handle" - set sleeptime [expr $itime + 2] - tclsleep $sleeptime - - # - # We need to do another operation to time out the environment - # handle. Open another environment, with an invalid home - # directory. - # - set stat [catch {eval {berkdb_env_noerr -home "$home.fail" \ - -server $rpc_server}} ret] - error_check_good env_open $stat 1 - - set stat [catch {$env close} ret] - error_check_good env_close $stat 1 - error_check_good env_timeout \ - [is_substr $errorInfo "DB_NOSERVER_ID"] 1 - } res] - if { $status != 0 } { - puts $res - } - tclkill $dpid -} - -proc rpc_timeoutjoin {env msg sleeptime use_txn} { - # - # Check join cursors now. - # - puts -nonewline "\t$msg: Test join cursors and timeouts" - if { $use_txn } { - puts " (using txns)" - set txnflag "-auto_commit" - } else { - puts " (without txns)" - set txnflag "" - } - # - # Set up a simple set of join databases - # - puts "\t${msg}0: Set up join databases" - set fruit { - {blue blueberry} - {red apple} {red cherry} {red raspberry} - {yellow lemon} {yellow pear} - } - set price { - {expen blueberry} {expen cherry} {expen raspberry} - {inexp apple} {inexp lemon} {inexp pear} - } - set dessert { - {blueberry cobbler} {cherry cobbler} {pear cobbler} - {apple pie} {raspberry pie} {lemon pie} - } - set fdb [eval {berkdb_open -create -btree -mode 0644} \ - $txnflag -env $env -dup -dupsort fruit.db] - error_check_good dbopen [is_valid_db $fdb] TRUE - set pdb [eval {berkdb_open -create -btree -mode 0644} \ - $txnflag -env $env -dup -dupsort price.db] - error_check_good dbopen [is_valid_db $pdb] TRUE - set ddb [eval {berkdb_open -create -btree -mode 0644} \ - $txnflag -env $env -dup -dupsort dessert.db] - error_check_good dbopen [is_valid_db $ddb] TRUE - foreach kd $fruit { - set k [lindex $kd 0] - set d [lindex $kd 1] - set ret [eval {$fdb put} $txnflag {$k $d}] - error_check_good fruit_put $ret 0 - } - error_check_good sync [$fdb sync] 0 - foreach kd $price { - set k [lindex $kd 0] - set d [lindex $kd 1] - set ret [eval {$pdb put} $txnflag {$k $d}] - error_check_good price_put $ret 0 - } - error_check_good sync [$pdb sync] 0 - foreach kd $dessert { - set k [lindex $kd 0] - set d [lindex $kd 1] - set ret [eval {$ddb put} $txnflag {$k $d}] - error_check_good dessert_put $ret 0 - } - error_check_good sync [$ddb sync] 0 - - rpc_join $env $msg $sleeptime $fdb $pdb $ddb $use_txn 0 - rpc_join $env $msg $sleeptime $fdb $pdb $ddb $use_txn 1 - - error_check_good ddb:close [$ddb close] 0 - error_check_good pdb:close [$pdb close] 0 - error_check_good fdb:close [$fdb close] 0 - error_check_good ddb:remove [$env dbremove dessert.db] 0 - error_check_good pdb:remove [$env dbremove price.db] 0 - error_check_good fdb:remove [$env dbremove fruit.db] 0 -} - -proc rpc_join {env msg sleep fdb pdb ddb use_txn op} { - global errorInfo - global is_je_test - - # - # Start a parent and child transaction. We'll do our join in - # the child transaction just to make sure everything gets timed - # out correctly. - # - set curs_list {} - set txn_list {} - set msgnum [expr $op * 2 + 1] - if { $use_txn } { - puts "\t$msg$msgnum: Set up txns and join cursor" - set txn [$env txn] - error_check_good txn_begin [is_valid_txn $txn $env] TRUE - set txn_list [linsert $txn_list 0 $txn] - if { !$is_je_test } { - set child0 [$env txn -parent $txn] - error_check_good txn_begin \ - [is_valid_txn $child0 $env] TRUE - set txn_list [linsert $txn_list 0 $child0] - set child1 [$env txn -parent $txn] - error_check_good txn_begin \ - [is_valid_txn $child1 $env] TRUE - set txn_list [linsert $txn_list 0 $child1] - } else { - set child0 $txn - set child1 $txn - } - set txncmd "-txn $child0" - } else { - puts "\t$msg$msgnum: Set up join cursor" - set txncmd "" - } - - # - # Start a cursor, (using txn child0 in the fruit and price dbs, if - # needed). # Just pick something simple to join on. - # Then call join on the dessert db. - # - set fkey yellow - set pkey inexp - set fdbc [eval $fdb cursor $txncmd] - error_check_good fdb_cursor [is_valid_cursor $fdbc $fdb] TRUE - set ret [$fdbc get -set $fkey] - error_check_bad fget:set [llength $ret] 0 - set k [lindex [lindex $ret 0] 0] - error_check_good fget:set:key $k $fkey - set curs_list [linsert $curs_list 0 $fdbc] - - set pdbc [eval $pdb cursor $txncmd] - error_check_good pdb_cursor [is_valid_cursor $pdbc $pdb] TRUE - set ret [$pdbc get -set $pkey] - error_check_bad pget:set [llength $ret] 0 - set k [lindex [lindex $ret 0] 0] - error_check_good pget:set:key $k $pkey - set curs_list [linsert $curs_list 0 $pdbc] - - set jdbc [$ddb join $fdbc $pdbc] - error_check_good join_cursor [is_valid_cursor $jdbc $ddb] TRUE - set ret [$jdbc get] - error_check_bad jget [llength $ret] 0 - - set msgnum [expr $op * 2 + 2] - if { $op == 1 } { - puts -nonewline "\t$msg$msgnum: Timeout all cursors" - if { $use_txn } { - puts " and txns" - } else { - puts "" - } - } else { - puts "\t$msg$msgnum: Timeout, then activate join cursor" - } - - tclsleep $sleep - - if { $op == 1 } { - # - # Perform a generic db operations to cause the timeout routine - # to trigger. - # - set stat [catch {$fdb stat} ret] - error_check_good fdbstat $stat 0 - - # - # Check that join cursor is timed out. - # - set stat [catch {$jdbc close} ret] - error_check_good dbc_close:$jdbc $stat 1 - error_check_good dbc_timeout:$jdbc \ - [is_substr $errorInfo "DB_NOSERVER_ID"] 1 - - # - # Now the server may or may not timeout constituent - # cursors when it times out the join cursor. So, just - # sleep again and then they should timeout. - # - tclsleep $sleep - set stat [catch {$fdb stat} ret] - error_check_good fdbstat $stat 0 - - foreach c $curs_list { - set stat [catch {$c close} ret] - error_check_good dbc_close:$c $stat 1 - error_check_good dbc_timeout:$c \ - [is_substr $errorInfo "DB_NOSERVER_ID"] 1 - } - - foreach t $txn_list { - set stat [catch {$t commit} ret] - error_check_good txn_commit:$t $stat 1 - error_check_good txn_timeout:$t \ - [is_substr $errorInfo "DB_NOSERVER_ID"] 1 - } - } else { - set stat [catch {$jdbc get} ret] - error_check_good jget.stat $stat 0 - error_check_bad jget [llength $ret] 0 - set curs_list [linsert $curs_list 0 $jdbc] - foreach c $curs_list { - set stat [catch {$c close} ret] - error_check_good dbc_close:$c $stat 0 - error_check_good dbc_close:$c $ret 0 - } - - foreach t $txn_list { - set stat [catch {$t commit} ret] - error_check_good txn_commit:$t $stat 0 - error_check_good txn_commit:$t $ret 0 - } - } -} diff --git a/storage/bdb/test/rpc002.tcl b/storage/bdb/test/rpc002.tcl deleted file mode 100644 index 505c23e7be7..00000000000 --- a/storage/bdb/test/rpc002.tcl +++ /dev/null @@ -1,161 +0,0 @@ -# Sel the file LICENSE for redistribution information. -# -# Copyright (c) 1999-2004 -# Sleepycat Software. All rights reserved. -# -# $Id: rpc002.tcl,v 1.25 2004/07/15 17:19:12 sue Exp $ -# -# TEST rpc002 -# TEST Test invalid RPC functions and make sure we error them correctly -# TEST Test server home directory error cases -proc rpc002 { } { - global __debug_on - global __debug_print - global errorInfo - global rpc_svc - source ./include.tcl - - set testfile "rpc002.db" - set home [file tail $rpc_testdir] - # - # First start the server. - # - puts "Rpc002: Error and Unsupported interface test" - set dpid [rpc_server_start] - puts "\tRpc002.a: Started server, pid $dpid" - - # - # Wrap the whole test in a catch statement so we can still - # kill the rpc server even if the test fails. - # - set status [catch { - tclsleep 2 - remote_cleanup $rpc_server $rpc_testdir $testdir - - puts "\tRpc002.b: Unsupported env options" - # - # Test each "pre-open" option for env's. These need to be - # tested on the 'berkdb_env' line. - # - set rlist { - { "-data_dir $rpc_testdir" "Rpc002.b0"} - { "-log_buffer 512" "Rpc002.b1"} - { "-log_dir $rpc_testdir" "Rpc002.b2"} - { "-log_max 100" "Rpc002.b3"} - { "-lock_conflict {3 {0 0 0 0 0 1 0 1 1}}" "Rpc002.b4"} - { "-lock_detect default" "Rpc002.b5"} - { "-lock_max 100" "Rpc002.b6"} - { "-mpool_mmap_size 100" "Rpc002.b7"} - { "-shm_key 100" "Rpc002.b9"} - { "-tmp_dir $rpc_testdir" "Rpc002.b10"} - { "-txn_max 100" "Rpc002.b11"} - { "-txn_timestamp 100" "Rpc002.b12"} - { "-verbose {recovery on}" "Rpc002.b13"} - } - - set e "berkdb_env_noerr -create -mode 0644 -home $home \ - -server $rpc_server -client_timeout 10000 -txn" - - foreach pair $rlist { - set cmd [lindex $pair 0] - set msg [lindex $pair 1] - puts "\t$msg: $cmd" - - set stat [catch {eval $e $cmd} ret] - error_check_good $cmd $stat 1 - error_check_good $cmd.err [is_substr $errorInfo \ - "unsupported in RPC environments"] 1 - } - - # - # Open an env with all the subsystems (-txn implies all - # the rest) - # - puts "\tRpc002.c: Unsupported env related interfaces" - set env [eval {berkdb_env_noerr -create -mode 0644 -home $home \ - -server $rpc_server -client_timeout 10000 -txn}] - error_check_good envopen [is_valid_env $env] TRUE - set dbcmd "berkdb_open_noerr -create -btree -mode 0644 \ - -env $env $testfile" - set db [eval $dbcmd] - error_check_good dbopen [is_valid_db $db] TRUE - - # - # Test each "post-open" option relating to envs, txns, locks, - # logs and mpools. - # - set rlist { - { " lock_detect default" "Rpc002.c0"} - { " lock_get read 1 $env" "Rpc002.c1"} - { " lock_id" "Rpc002.c2"} - { " lock_stat" "Rpc002.c3"} - { " lock_vec 1 {get $env read}" "Rpc002.c4"} - { " log_archive" "Rpc002.c5"} - { " log_file {0 0}" "Rpc002.c6"} - { " log_flush" "Rpc002.c7"} - { " log_cursor" "Rpc002.c8"} - { " log_stat" "Rpc002.c9"} - { " mpool -create -pagesize 512" "Rpc002.c10"} - { " mpool_stat" "Rpc002.c11"} - { " mpool_sync {0 0}" "Rpc002.c12"} - { " mpool_trickle 50" "Rpc002.c13"} - { " txn_checkpoint -min 1" "Rpc002.c14"} - { " txn_stat" "Rpc002.c15"} - } - - foreach pair $rlist { - set cmd [lindex $pair 0] - set msg [lindex $pair 1] - puts "\t$msg: $cmd" - - set stat [catch {eval $env $cmd} ret] - error_check_good $cmd $stat 1 - error_check_good $cmd.err [is_substr $errorInfo \ - "unsupported in RPC environments"] 1 - } - error_check_good dbclose [$db close] 0 - - # - # The database operations that aren't supported are few - # because mostly they are the ones Tcl doesn't support - # either so we have no way to get at them. Test what we can. - # - puts "\tRpc002.d: Unsupported database related interfaces" - # - # NOTE: the type of database doesn't matter, just use btree. - # - puts "\tRpc002.d0: -cachesize" - set dbcmd "berkdb_open_noerr -create -btree -mode 0644 \ - -env $env -cachesize {0 65536 0} $testfile" - set stat [catch {eval $dbcmd} ret] - error_check_good dbopen_cache $stat 1 - error_check_good dbopen_cache_err \ - [is_substr $errorInfo "unsupported in RPC environments"] 1 - - puts "\tRpc002.d1: Try to upgrade a database" - # - # NOTE: the type of database doesn't matter, just use btree. - set stat [catch {eval {berkdb upgrade -env} $env $testfile} ret] - error_check_good dbupgrade $stat 1 - error_check_good dbupgrade_err [is_substr $errorInfo \ - "unsupported in RPC environments"] 1 - error_check_good envclose [$env close] 0 - - puts "\tRpc002.e: Open env with unsupported home dir" - set stat [catch {eval {berkdb_env_noerr -create -mode 0644 \ - -home XXX -server $rpc_server -client_timeout 10000 \ - -txn}} ret] - error_check_good env2open $stat 1 - error_check_good envfail [is_substr $ret "Home unrecognized"] 1 - - puts "\tRpc002.f: Open env with a NULL home dir" - set stat [catch {eval {berkdb_env_noerr -create -mode 0644 \ - -server $rpc_server -client_timeout 10000 -txn}} ret] - error_check_good env2open $stat 1 - error_check_good envfail [is_substr $ret "Home unrecognized"] 1 - } res] - if { $status != 0 } { - puts $res - } - tclkill $dpid -} diff --git a/storage/bdb/test/rpc003.tcl b/storage/bdb/test/rpc003.tcl deleted file mode 100644 index 890cbc30f5e..00000000000 --- a/storage/bdb/test/rpc003.tcl +++ /dev/null @@ -1,184 +0,0 @@ -# See the file LICENSE for redistribution information. -# -# Copyright (c) 2001-2004 -# Sleepycat Software. All rights reserved. -# -# $Id: rpc003.tcl,v 11.16 2004/03/02 18:44:41 mjc Exp $ -# -# TEST rpc003 -# TEST Test RPC and secondary indices. -proc rpc003 { } { - source ./include.tcl - global dict nsecondaries - global rpc_svc - - # - # First set up the files. Secondary indices only work readonly - # over RPC. So we need to create the databases first without - # RPC. Then run checking over RPC. - # - puts "Rpc003: Secondary indices over RPC" - if { [string compare $rpc_server "localhost"] != 0 } { - puts "Cannot run to non-local RPC server. Skipping." - return - } - cleanup $testdir NULL - puts "\tRpc003.a: Creating local secondary index databases" - - # Primary method/args. - set pmethod btree - set pomethod [convert_method $pmethod] - set pargs "" - set methods {dbtree dbtree} - set argses [convert_argses $methods ""] - set omethods [convert_methods $methods] - - set nentries 500 - - puts "\tRpc003.b: ($pmethod/$methods) $nentries equal key/data pairs" - set pname "primary003.db" - set snamebase "secondary003" - - # Open an environment - # XXX if one is not supplied! - set env [berkdb_env -create -home $testdir] - error_check_good env_open [is_valid_env $env] TRUE - - # Open the primary. - set pdb [eval {berkdb_open -create -env} $env $pomethod $pargs $pname] - error_check_good primary_open [is_valid_db $pdb] TRUE - - # Open and associate the secondaries - set sdbs {} - for { set i 0 } { $i < [llength $omethods] } { incr i } { - set sdb [eval {berkdb_open -create -env} $env \ - [lindex $omethods $i] [lindex $argses $i] $snamebase.$i.db] - error_check_good second_open($i) [is_valid_db $sdb] TRUE - - error_check_good db_associate($i) \ - [$pdb associate [callback_n $i] $sdb] 0 - lappend sdbs $sdb - } - - set did [open $dict] - for { set n 0 } { [gets $did str] != -1 && $n < $nentries } { incr n } { - if { [is_record_based $pmethod] == 1 } { - set key [expr $n + 1] - set datum $str - } else { - set key $str - gets $did datum - } - set keys($n) $key - set data($n) [pad_data $pmethod $datum] - - set ret [eval {$pdb put} {$key [chop_data $pmethod $datum]}] - error_check_good put($n) $ret 0 - } - close $did - foreach sdb $sdbs { - error_check_good secondary_close [$sdb close] 0 - } - error_check_good primary_close [$pdb close] 0 - error_check_good env_close [$env close] 0 - - # - # We have set up our databases, so now start the server and - # read them over RPC. - # - set dpid [rpc_server_start] - puts "\tRpc003.c: Started server, pid $dpid" - - # - # Wrap the remainder of the test in a catch statement so we - # can still kill the rpc server even if the test fails. - # - set status [catch { - tclsleep 2 - set home [file tail $rpc_testdir] - set env [eval {berkdb_env_noerr -create -mode 0644 \ - -home $home -server $rpc_server}] - error_check_good lock_env:open [is_valid_env $env] TRUE - - # - # Attempt to send in a NULL callback to associate. It will - # fail if the primary and secondary are not both read-only. - # - set msg "\tRpc003.d" - puts "$msg: Using r/w primary and r/w secondary" - set popen "berkdb_open_noerr -env $env $pomethod $pargs $pname" - set sopen "berkdb_open_noerr -create -env $env \ - [lindex $omethods 0] [lindex $argses 0] $snamebase.0.db" - rpc003_assoc_err $popen $sopen $msg - - set msg "\tRpc003.e" - puts "$msg: Using r/w primary and read-only secondary" - set popen "berkdb_open_noerr -env $env $pomethod $pargs $pname" - set sopen "berkdb_open_noerr -env $env -rdonly \ - [lindex $omethods 0] [lindex $argses 0] $snamebase.0.db" - rpc003_assoc_err $popen $sopen $msg - - set msg "\tRpc003.f" - puts "$msg: Using read-only primary and r/w secondary" - set popen "berkdb_open_noerr -env $env \ - $pomethod -rdonly $pargs $pname" - set sopen "berkdb_open_noerr -create -env $env \ - [lindex $omethods 0] [lindex $argses 0] $snamebase.0.db" - rpc003_assoc_err $popen $sopen $msg - - # Open and associate the secondaries - puts "\tRpc003.g: Checking secondaries, both read-only" - set pdb [eval {berkdb_open_noerr -env} $env \ - -rdonly $pomethod $pargs $pname] - error_check_good primary_open2 [is_valid_db $pdb] TRUE - - set sdbs {} - for { set i 0 } { $i < [llength $omethods] } { incr i } { - set sdb [eval {berkdb_open -env} $env -rdonly \ - [lindex $omethods $i] [lindex $argses $i] \ - $snamebase.$i.db] - error_check_good second_open2($i) \ - [is_valid_db $sdb] TRUE - error_check_good db_associate2($i) \ - [eval {$pdb associate} "" $sdb] 0 - lappend sdbs $sdb - } - check_secondaries $pdb $sdbs $nentries keys data "Rpc003.h" - - foreach sdb $sdbs { - error_check_good secondary_close [$sdb close] 0 - } - error_check_good primary_close [$pdb close] 0 - error_check_good env_close [$env close] 0 - } res] - if { $status != 0 } { - puts $res - } - tclkill $dpid -} - -proc rpc003_assoc_err { popen sopen msg } { - global rpc_svc - - set pdb [eval $popen] - error_check_good assoc_err_popen [is_valid_db $pdb] TRUE - - puts "$msg.0: NULL callback" - set sdb [eval $sopen] - error_check_good assoc_err_sopen [is_valid_db $sdb] TRUE - set stat [catch {eval {$pdb associate} "" $sdb} ret] - error_check_good db_associate:rdonly $stat 1 - error_check_good db_associate:inval [is_substr $ret invalid] 1 - - # The Java and JE RPC servers support callbacks. - if { $rpc_svc == "berkeley_db_svc" || \ - $rpc_svc == "berkeley_db_cxxsvc" } { - puts "$msg.1: non-NULL callback" - set stat [catch {eval $pdb associate [callback_n 0] $sdb} ret] - error_check_good db_associate:callback $stat 1 - error_check_good db_associate:inval [is_substr $ret invalid] 1 - } - - error_check_good assoc_sclose [$sdb close] 0 - error_check_good assoc_pclose [$pdb close] 0 -} diff --git a/storage/bdb/test/rpc004.tcl b/storage/bdb/test/rpc004.tcl deleted file mode 100644 index 468e2727076..00000000000 --- a/storage/bdb/test/rpc004.tcl +++ /dev/null @@ -1,87 +0,0 @@ -# See the file LICENSE for redistribution information. -# -# Copyright (c) 1996-2004 -# Sleepycat Software. All rights reserved. -# -# $Id: rpc004.tcl,v 11.12 2004/01/28 03:36:29 bostic Exp $ -# -# TEST rpc004 -# TEST Test RPC server and security -proc rpc004 { } { - global __debug_on - global __debug_print - global errorInfo - global passwd - global has_crypto - global rpc_svc - source ./include.tcl - - puts "Rpc004: RPC server + security" - # Skip test if release does not support encryption. - if { $has_crypto == 0 } { - puts "Skipping test rpc004 for non-crypto release." - return - } - - cleanup $testdir NULL - set dpid [rpc_server_start 1] - puts "\tRpc004.a: Started server, pid $dpid" - - # - # Wrap the test in a catch statement so we can still kill - # the rpc server even if the test fails. - # - set status [catch { - tclsleep 2 - remote_cleanup $rpc_server $rpc_testdir $testdir - - puts "\tRpc004.b: Creating environment" - - set testfile "rpc004.db" - set testfile1 "rpc004a.db" - set home [file tail $rpc_testdir] - - set env [eval {berkdb_env -create -mode 0644 -home $home \ - -server $rpc_server -encryptaes $passwd -txn}] - error_check_good lock_env:open [is_valid_env $env] TRUE - - puts "\tRpc004.c: Opening a non-encrypted database" - # - # NOTE: the type of database doesn't matter, just use btree. - set db [eval {berkdb_open -auto_commit -create -btree \ - -mode 0644} -env $env $testfile] - error_check_good dbopen [is_valid_db $db] TRUE - - puts "\tRpc004.d: Opening an encrypted database" - set db1 [eval {berkdb_open -auto_commit -create -btree \ - -mode 0644} -env $env -encrypt $testfile1] - error_check_good dbopen [is_valid_db $db1] TRUE - - set txn [$env txn] - error_check_good txn [is_valid_txn $txn $env] TRUE - puts "\tRpc004.e: Put/get on both databases" - set key "key" - set data "data" - - set ret [$db put -txn $txn $key $data] - error_check_good db_put $ret 0 - set ret [$db get -txn $txn $key] - error_check_good db_get $ret [list [list $key $data]] - set ret [$db1 put -txn $txn $key $data] - error_check_good db1_put $ret 0 - set ret [$db1 get -txn $txn $key] - error_check_good db1_get $ret [list [list $key $data]] - - error_check_good txn_commit [$txn commit] 0 - error_check_good db_close [$db close] 0 - error_check_good db1_close [$db1 close] 0 - error_check_good env_close [$env close] 0 - - # Cleanup our environment because it's encrypted - remote_cleanup $rpc_server $rpc_testdir $testdir - } res] - if { $status != 0 } { - puts $res - } - tclkill $dpid -} diff --git a/storage/bdb/test/rpc005.tcl b/storage/bdb/test/rpc005.tcl deleted file mode 100644 index 3111f651b16..00000000000 --- a/storage/bdb/test/rpc005.tcl +++ /dev/null @@ -1,158 +0,0 @@ -# See the file LICENSE for redistribution information. -# -# Copyright (c) 1996-2004 -# Sleepycat Software. All rights reserved. -# -# $Id: rpc005.tcl,v 11.11 2004/09/22 18:01:06 bostic Exp $ -# -# TEST rpc005 -# TEST Test RPC server handle ID sharing -proc rpc005 { } { - global __debug_on - global __debug_print - global errorInfo - global rpc_svc - global is_hp_test - source ./include.tcl - - puts "Rpc005: RPC server handle sharing" - set dpid [rpc_server_start] - puts "\tRpc005.a: Started server, pid $dpid" - - # - # Wrap the test in a catch statement so we can still kill - # the rpc server even if the test fails. - # - set status [catch { - tclsleep 2 - remote_cleanup $rpc_server $rpc_testdir $testdir - puts "\tRpc005.b: Creating environment" - - set testfile "rpc005.db" - set testfile1 "rpc005a.db" - set subdb1 "subdb1" - set subdb2 "subdb2" - set home [file tail $rpc_testdir] - - set env [eval {berkdb_env -create -mode 0644 -home $home \ - -server $rpc_server -txn}] - error_check_good lock_env:open [is_valid_env $env] TRUE - - # You can't open two handles on the same env in - # HP-UX, so skip this piece. - if { $is_hp_test == 1 } { - puts "\tRpc005.c: Skipping for HP-UX." - } else { - puts "\tRpc005.c: Compare identical and different \ - configured envs" - set env_ident [eval {berkdb_env -home $home \ - -server $rpc_server -txn}] - error_check_good \ - lock_env:open [is_valid_env $env_ident] TRUE - - set env_diff [eval {berkdb_env -home $home \ - -server $rpc_server -txn nosync}] - error_check_good \ - lock_env:open [is_valid_env $env_diff] TRUE - - error_check_good \ - ident:id [$env rpcid] [$env_ident rpcid] - error_check_bad \ - diff:id [$env rpcid] [$env_diff rpcid] - - error_check_good envclose [$env_diff close] 0 - error_check_good envclose [$env_ident close] 0 - } - - puts "\tRpc005.d: Opening a database" - set db [eval {berkdb_open -auto_commit -create -btree \ - -mode 0644} -env $env $testfile] - error_check_good dbopen [is_valid_db $db] TRUE - - puts "\tRpc005.e: Compare identical and different \ - configured dbs" - set db_ident [eval {berkdb_open -btree} -env $env $testfile] - error_check_good dbopen [is_valid_db $db_ident] TRUE - - set db_diff [eval {berkdb_open -btree} -env $env -rdonly \ - $testfile] - error_check_good dbopen [is_valid_db $db_diff] TRUE - - set db_diff2 [eval {berkdb_open -btree} -env $env -rdonly \ - $testfile] - error_check_good dbopen [is_valid_db $db_diff2] TRUE - - error_check_good ident:id [$db rpcid] [$db_ident rpcid] - error_check_bad diff:id [$db rpcid] [$db_diff rpcid] - error_check_good ident2:id [$db_diff rpcid] [$db_diff2 rpcid] - - error_check_good db_close [$db_ident close] 0 - error_check_good db_close [$db_diff close] 0 - error_check_good db_close [$db_diff2 close] 0 - error_check_good db_close [$db close] 0 - - puts "\tRpc005.f: Compare with a database and subdatabases" - set db [eval {berkdb_open -auto_commit -create -btree \ - -mode 0644} -env $env $testfile1 $subdb1] - error_check_good dbopen [is_valid_db $db] TRUE - set dbid [$db rpcid] - - set db2 [eval {berkdb_open -auto_commit -create -btree \ - -mode 0644} -env $env $testfile1 $subdb2] - error_check_good dbopen [is_valid_db $db2] TRUE - set db2id [$db2 rpcid] - error_check_bad 2subdb:id $dbid $db2id - - set db_ident [eval {berkdb_open -btree} -env $env \ - $testfile1 $subdb1] - error_check_good dbopen [is_valid_db $db_ident] TRUE - set identid [$db_ident rpcid] - - set db_ident2 [eval {berkdb_open -btree} -env $env \ - $testfile1 $subdb2] - error_check_good dbopen [is_valid_db $db_ident2] TRUE - set ident2id [$db_ident2 rpcid] - - set db_diff1 [eval {berkdb_open -btree} -env $env -rdonly \ - $testfile1 $subdb1] - error_check_good dbopen [is_valid_db $db_diff1] TRUE - set diff1id [$db_diff1 rpcid] - - set db_diff2 [eval {berkdb_open -btree} -env $env -rdonly \ - $testfile1 $subdb2] - error_check_good dbopen [is_valid_db $db_diff2] TRUE - set diff2id [$db_diff2 rpcid] - - set db_diff [eval {berkdb_open -unknown} -env $env -rdonly \ - $testfile1] - error_check_good dbopen [is_valid_db $db_diff] TRUE - set diffid [$db_diff rpcid] - - set db_diff2a [eval {berkdb_open -btree} -env $env -rdonly \ - $testfile1 $subdb2] - error_check_good dbopen [is_valid_db $db_diff2a] TRUE - set diff2aid [$db_diff2a rpcid] - - error_check_good ident:id $dbid $identid - error_check_good ident2:id $db2id $ident2id - error_check_bad diff:id $dbid $diffid - error_check_bad diff2:id $db2id $diffid - error_check_bad diff3:id $diff2id $diffid - error_check_bad diff4:id $diff1id $diffid - error_check_good diff2a:id $diff2id $diff2aid - - error_check_good db_close [$db_ident close] 0 - error_check_good db_close [$db_ident2 close] 0 - error_check_good db_close [$db_diff close] 0 - error_check_good db_close [$db_diff1 close] 0 - error_check_good db_close [$db_diff2 close] 0 - error_check_good db_close [$db_diff2a close] 0 - error_check_good db_close [$db2 close] 0 - error_check_good db_close [$db close] 0 - error_check_good env_close [$env close] 0 - } res] - if { $status != 0 } { - puts $res - } - tclkill $dpid -} diff --git a/storage/bdb/test/rsrc001.tcl b/storage/bdb/test/rsrc001.tcl deleted file mode 100644 index b44d5ebd642..00000000000 --- a/storage/bdb/test/rsrc001.tcl +++ /dev/null @@ -1,221 +0,0 @@ -# See the file LICENSE for redistribution information. -# -# Copyright (c) 1996-2004 -# Sleepycat Software. All rights reserved. -# -# $Id: rsrc001.tcl,v 11.25 2004/01/28 03:36:29 bostic Exp $ -# -# TEST rsrc001 -# TEST Recno backing file test. Try different patterns of adding -# TEST records and making sure that the corresponding file matches. -proc rsrc001 { } { - source ./include.tcl - - puts "Rsrc001: Basic recno backing file writeback tests" - - # We run this test essentially twice, once with a db file - # and once without (an in-memory database). - set rec1 "This is record 1" - set rec2 "This is record 2 This is record 2" - set rec3 "This is record 3 This is record 3 This is record 3" - set rec4 [replicate "This is record 4 " 512] - - foreach testfile { "$testdir/rsrc001.db" "" } { - - cleanup $testdir NULL - - if { $testfile == "" } { - puts "Rsrc001: Testing with in-memory database." - } else { - puts "Rsrc001: Testing with disk-backed database." - } - - # Create backing file for the empty-file test. - set oid1 [open $testdir/rsrc.txt w] - close $oid1 - - puts "\tRsrc001.a: Put to empty file." - set db [eval {berkdb_open -create -mode 0644\ - -recno -source $testdir/rsrc.txt} $testfile] - error_check_good dbopen [is_valid_db $db] TRUE - set txn "" - - set ret [eval {$db put} $txn {1 $rec1}] - error_check_good put_to_empty $ret 0 - error_check_good db_close [$db close] 0 - - # Now fill out the backing file and create the check file. - set oid1 [open $testdir/rsrc.txt a] - set oid2 [open $testdir/check.txt w] - - # This one was already put into rsrc.txt. - puts $oid2 $rec1 - - # These weren't. - puts $oid1 $rec2 - puts $oid2 $rec2 - puts $oid1 $rec3 - puts $oid2 $rec3 - puts $oid1 $rec4 - puts $oid2 $rec4 - close $oid1 - close $oid2 - - puts -nonewline "\tRsrc001.b: Read file, rewrite last record;" - puts " write it out and diff" - set db [eval {berkdb_open -create -mode 0644\ - -recno -source $testdir/rsrc.txt} $testfile] - error_check_good dbopen [is_valid_db $db] TRUE - - # Read the last record; replace it (but we won't change it). - # Then close the file and diff the two files. - set dbc [eval {$db cursor} $txn] - error_check_good db_cursor [is_valid_cursor $dbc $db] TRUE - - set rec [$dbc get -last] - error_check_good get_last [llength [lindex $rec 0]] 2 - set key [lindex [lindex $rec 0] 0] - set data [lindex [lindex $rec 0] 1] - - # Get the last record from the text file - set oid [open $testdir/rsrc.txt] - set laststr "" - while { [gets $oid str] != -1 } { - set laststr $str - } - close $oid - set data [sanitize_record $data] - error_check_good getlast $data $laststr - - set ret [eval {$db put} $txn {$key $data}] - error_check_good replace_last $ret 0 - - error_check_good curs_close [$dbc close] 0 - error_check_good db_sync [$db sync] 0 - error_check_good db_sync [$db sync] 0 - error_check_good \ - Rsrc001:diff($testdir/rsrc.txt,$testdir/check.txt) \ - [filecmp $testdir/rsrc.txt $testdir/check.txt] 0 - - puts -nonewline "\tRsrc001.c: " - puts "Append some records in tree and verify in file." - set oid [open $testdir/check.txt a] - for {set i 1} {$i < 10} {incr i} { - set rec [replicate "New Record $i" $i] - puts $oid $rec - incr key - set ret [eval {$db put} $txn {-append $rec}] - error_check_good put_append $ret $key - } - error_check_good db_sync [$db sync] 0 - error_check_good db_sync [$db sync] 0 - close $oid - set ret [filecmp $testdir/rsrc.txt $testdir/check.txt] - error_check_good \ - Rsrc001:diff($testdir/{rsrc.txt,check.txt}) $ret 0 - - puts "\tRsrc001.d: Append by record number" - set oid [open $testdir/check.txt a] - for {set i 1} {$i < 10} {incr i} { - set rec [replicate "New Record (set 2) $i" $i] - puts $oid $rec - incr key - set ret [eval {$db put} $txn {$key $rec}] - error_check_good put_byno $ret 0 - } - - error_check_good db_sync [$db sync] 0 - error_check_good db_sync [$db sync] 0 - close $oid - set ret [filecmp $testdir/rsrc.txt $testdir/check.txt] - error_check_good \ - Rsrc001:diff($testdir/{rsrc.txt,check.txt}) $ret 0 - - puts "\tRsrc001.e: Put beyond end of file." - set oid [open $testdir/check.txt a] - for {set i 1} {$i < 10} {incr i} { - puts $oid "" - incr key - } - set rec "Last Record" - puts $oid $rec - incr key - - set ret [eval {$db put} $txn {$key $rec}] - error_check_good put_byno $ret 0 - - puts "\tRsrc001.f: Put beyond end of file, after reopen." - - error_check_good db_close [$db close] 0 - set db [eval {berkdb_open -create -mode 0644\ - -recno -source $testdir/rsrc.txt} $testfile] - error_check_good dbopen [is_valid_db $db] TRUE - - set rec "Last record with reopen" - puts $oid $rec - - incr key - set ret [eval {$db put} $txn {$key $rec}] - error_check_good put_byno_with_reopen $ret 0 - - puts "\tRsrc001.g:\ - Put several beyond end of file, after reopen with snapshot." - error_check_good db_close [$db close] 0 - set db [eval {berkdb_open -create -mode 0644\ - -snapshot -recno -source $testdir/rsrc.txt} $testfile] - error_check_good dbopen [is_valid_db $db] TRUE - - set rec "Really really last record with reopen" - puts $oid "" - puts $oid "" - puts $oid "" - puts $oid $rec - - incr key - incr key - incr key - incr key - - set ret [eval {$db put} $txn {$key $rec}] - error_check_good put_byno_with_reopen $ret 0 - - error_check_good db_sync [$db sync] 0 - error_check_good db_sync [$db sync] 0 - - close $oid - set ret [filecmp $testdir/rsrc.txt $testdir/check.txt] - error_check_good \ - Rsrc001:diff($testdir/{rsrc.txt,check.txt}) $ret 0 - - puts "\tRsrc001.h: Verify proper syncing of changes on close." - error_check_good Rsrc001:db_close [$db close] 0 - set db [eval {berkdb_open -create -mode 0644 -recno \ - -source $testdir/rsrc.txt} $testfile] - set oid [open $testdir/check.txt a] - for {set i 1} {$i < 10} {incr i} { - set rec [replicate "New Record $i" $i] - puts $oid $rec - set ret [eval {$db put} $txn {-append $rec}] - # Don't bother checking return; we don't know what - # the key number is, and we'll pick up a failure - # when we compare. - } - error_check_good Rsrc001:db_close [$db close] 0 - close $oid - set ret [filecmp $testdir/rsrc.txt $testdir/check.txt] - error_check_good Rsrc001:diff($testdir/{rsrc,check}.txt) $ret 0 - } -} - -# Strip CRs from a record. -# Needed on Windows when a file is created as text (with CR/LF) -# but read as binary (where CR is read as a separate character) -proc sanitize_record { rec } { - source ./include.tcl - - if { $is_windows_test != 1 } { - return $rec - } - regsub -all \15 $rec "" data - return $data -} diff --git a/storage/bdb/test/rsrc002.tcl b/storage/bdb/test/rsrc002.tcl deleted file mode 100644 index 808159a01e0..00000000000 --- a/storage/bdb/test/rsrc002.tcl +++ /dev/null @@ -1,66 +0,0 @@ -# See the file LICENSE for redistribution information. -# -# Copyright (c) 1999-2004 -# Sleepycat Software. All rights reserved. -# -# $Id: rsrc002.tcl,v 11.16 2004/01/28 03:36:29 bostic Exp $ -# -# TEST rsrc002 -# TEST Recno backing file test #2: test of set_re_delim. Specify a backing -# TEST file with colon-delimited records, and make sure they are correctly -# TEST interpreted. -proc rsrc002 { } { - source ./include.tcl - - puts "Rsrc002: Alternate variable-length record delimiters." - - # We run this test essentially twice, once with a db file - # and once without (an in-memory database). - foreach testfile { "$testdir/rsrc002.db" "" } { - - cleanup $testdir NULL - - # Create the starting files - set oid1 [open $testdir/rsrc.txt w] - set oid2 [open $testdir/check.txt w] - puts -nonewline $oid1 "ostrich:emu:kiwi:moa:cassowary:rhea:" - puts -nonewline $oid2 "ostrich:emu:kiwi:penguin:cassowary:rhea:" - close $oid1 - close $oid2 - - if { $testfile == "" } { - puts "Rsrc002: Testing with in-memory database." - } else { - puts "Rsrc002: Testing with disk-backed database." - } - - puts "\tRsrc002.a: Read file, verify correctness." - set db [eval {berkdb_open -create -mode 0644 -delim 58 \ - -recno -source $testdir/rsrc.txt} $testfile] - error_check_good dbopen [is_valid_db $db] TRUE - - # Read the last record; replace it (but we won't change it). - # Then close the file and diff the two files. - set txn "" - set dbc [eval {$db cursor} $txn] - error_check_good db_cursor [is_valid_cursor $dbc $db] TRUE - - set rec [$dbc get -first] - error_check_good get_first $rec [list [list 1 "ostrich"]] - set rec [$dbc get -next] - error_check_good get_next $rec [list [list 2 "emu"]] - - puts "\tRsrc002.b: Write record, verify correctness." - - eval {$dbc get -set 4} - set ret [$dbc put -current "penguin"] - error_check_good dbc_put $ret 0 - - error_check_good dbc_close [$dbc close] 0 - error_check_good db_close [$db close] 0 - - error_check_good \ - Rsrc002:diff($testdir/rsrc.txt,$testdir/check.txt) \ - [filecmp $testdir/rsrc.txt $testdir/check.txt] 0 - } -} diff --git a/storage/bdb/test/rsrc003.tcl b/storage/bdb/test/rsrc003.tcl deleted file mode 100644 index b62a6bcb15e..00000000000 --- a/storage/bdb/test/rsrc003.tcl +++ /dev/null @@ -1,173 +0,0 @@ -# See the file LICENSE for redistribution information. -# -# Copyright (c) 1996-2004 -# Sleepycat Software. All rights reserved. -# -# $Id: rsrc003.tcl,v 11.7 2004/01/28 03:36:29 bostic Exp $ -# -# TEST rsrc003 -# TEST Recno backing file test. Try different patterns of adding -# TEST records and making sure that the corresponding file matches. -proc rsrc003 { } { - source ./include.tcl - global fixed_len - - puts "Rsrc003: Basic recno backing file writeback tests fixed length" - - # We run this test essentially twice, once with a db file - # and once without (an in-memory database). - # - # Then run with big fixed-length records - set rec1 "This is record 1" - set rec2 "This is record 2" - set rec3 "This is record 3" - set bigrec1 [replicate "This is record 1 " 512] - set bigrec2 [replicate "This is record 2 " 512] - set bigrec3 [replicate "This is record 3 " 512] - - set orig_fixed_len $fixed_len - set rlist { - {{$rec1 $rec2 $rec3} "small records" } - {{$bigrec1 $bigrec2 $bigrec3} "large records" }} - - foreach testfile { "$testdir/rsrc003.db" "" } { - - foreach rec $rlist { - cleanup $testdir NULL - - set recs [lindex $rec 0] - set msg [lindex $rec 1] - # Create the starting files - # Note that for the rest of the test, we are going - # to append a LF when we 'put' via DB to maintain - # file structure and allow us to use 'gets'. - set oid1 [open $testdir/rsrc.txt w] - set oid2 [open $testdir/check.txt w] - foreach record $recs { - set r [subst $record] - set fixed_len [string length $r] - puts $oid1 $r - puts $oid2 $r - } - close $oid1 - close $oid2 - - set reclen [expr $fixed_len + 1] - if { $reclen > [string length $rec1] } { - set repl 512 - } else { - set repl 2 - } - if { $testfile == "" } { - puts \ -"Rsrc003: Testing with in-memory database with $msg." - } else { - puts \ -"Rsrc003: Testing with disk-backed database with $msg." - } - - puts -nonewline \ - "\tRsrc003.a: Read file, rewrite last record;" - puts " write it out and diff" - set db [eval {berkdb_open -create -mode 0644 -recno \ - -len $reclen -source $testdir/rsrc.txt} $testfile] - error_check_good dbopen [is_valid_db $db] TRUE - - # Read the last record; replace it (don't change it). - # Then close the file and diff the two files. - set txn "" - set dbc [eval {$db cursor} $txn] - error_check_good db_cursor \ - [is_valid_cursor $dbc $db] TRUE - - set rec [$dbc get -last] - error_check_good get_last [llength [lindex $rec 0]] 2 - set key [lindex [lindex $rec 0] 0] - set data [lindex [lindex $rec 0] 1] - - # Get the last record from the text file - set oid [open $testdir/rsrc.txt] - set laststr "" - while { [gets $oid str] != -1 } { - append str \12 - set laststr $str - } - close $oid - set data [sanitize_record $data] - error_check_good getlast $data $laststr - - set ret [eval {$db put} $txn {$key $data}] - error_check_good replace_last $ret 0 - - error_check_good curs_close [$dbc close] 0 - error_check_good db_sync [$db sync] 0 - error_check_good db_sync [$db sync] 0 - error_check_good \ - diff1($testdir/rsrc.txt,$testdir/check.txt) \ - [filecmp $testdir/rsrc.txt $testdir/check.txt] 0 - - puts -nonewline "\tRsrc003.b: " - puts "Append some records in tree and verify in file." - set oid [open $testdir/check.txt a] - for {set i 1} {$i < 10} {incr i} { - set rec [chop_data -frecno [replicate \ - "This is New Record $i" $repl]] - puts $oid $rec - append rec \12 - incr key - set ret [eval {$db put} $txn {-append $rec}] - error_check_good put_append $ret $key - } - error_check_good db_sync [$db sync] 0 - error_check_good db_sync [$db sync] 0 - close $oid - set ret [filecmp $testdir/rsrc.txt $testdir/check.txt] - error_check_good \ - diff2($testdir/{rsrc.txt,check.txt}) $ret 0 - - puts "\tRsrc003.c: Append by record number" - set oid [open $testdir/check.txt a] - for {set i 1} {$i < 10} {incr i} { - set rec [chop_data -frecno [replicate \ - "New Record (set 2) $i" $repl]] - puts $oid $rec - append rec \12 - incr key - set ret [eval {$db put} $txn {$key $rec}] - error_check_good put_byno $ret 0 - } - - error_check_good db_sync [$db sync] 0 - error_check_good db_sync [$db sync] 0 - close $oid - set ret [filecmp $testdir/rsrc.txt $testdir/check.txt] - error_check_good \ - diff3($testdir/{rsrc.txt,check.txt}) $ret 0 - - puts \ -"\tRsrc003.d: Verify proper syncing of changes on close." - error_check_good Rsrc003:db_close [$db close] 0 - set db [eval {berkdb_open -create -mode 0644 -recno \ - -len $reclen -source $testdir/rsrc.txt} $testfile] - set oid [open $testdir/check.txt a] - for {set i 1} {$i < 10} {incr i} { - set rec [chop_data -frecno [replicate \ - "New Record (set 3) $i" $repl]] - puts $oid $rec - append rec \12 - set ret [eval {$db put} $txn {-append $rec}] - # Don't bother checking return; - # we don't know what - # the key number is, and we'll pick up a failure - # when we compare. - } - error_check_good Rsrc003:db_close [$db close] 0 - close $oid - set ret [filecmp $testdir/rsrc.txt $testdir/check.txt] - error_check_good \ - diff5($testdir/{rsrc,check}.txt) $ret 0 - } - } - set fixed_len $orig_fixed_len - return -} diff --git a/storage/bdb/test/rsrc004.tcl b/storage/bdb/test/rsrc004.tcl deleted file mode 100644 index 20f11372a17..00000000000 --- a/storage/bdb/test/rsrc004.tcl +++ /dev/null @@ -1,52 +0,0 @@ -# See the file LICENSE for redistribution information. -# -# Copyright (c) 2001-2004 -# Sleepycat Software. All rights reserved. -# -# $Id: rsrc004.tcl,v 11.5 2004/01/28 03:36:29 bostic Exp $ -# -# TEST rsrc004 -# TEST Recno backing file test for EOF-terminated records. -proc rsrc004 { } { - source ./include.tcl - - foreach isfixed { 0 1 } { - cleanup $testdir NULL - - # Create the backing text file. - set oid1 [open $testdir/rsrc.txt w] - if { $isfixed == 1 } { - puts -nonewline $oid1 "record 1xxx" - puts -nonewline $oid1 "record 2xxx" - } else { - puts $oid1 "record 1xxx" - puts $oid1 "record 2xxx" - } - puts -nonewline $oid1 "record 3" - close $oid1 - - set args "-create -mode 0644 -recno -source $testdir/rsrc.txt" - if { $isfixed == 1 } { - append args " -len [string length "record 1xxx"]" - set match "record 3 " - puts "Rsrc004: EOF-terminated recs: fixed length" - } else { - puts "Rsrc004: EOF-terminated recs: variable length" - set match "record 3" - } - - puts "\tRsrc004.a: Read file, verify correctness." - set db [eval berkdb_open $args "$testdir/rsrc004.db"] - error_check_good dbopen [is_valid_db $db] TRUE - - # Read the last record - set dbc [eval {$db cursor} ""] - error_check_good db_cursor [is_valid_cursor $dbc $db] TRUE - - set rec [$dbc get -last] - error_check_good get_last $rec [list [list 3 $match]] - - error_check_good dbc_close [$dbc close] 0 - error_check_good db_close [$db close] 0 - } -} diff --git a/storage/bdb/test/scr001/chk.code b/storage/bdb/test/scr001/chk.code deleted file mode 100644 index 159524d27e1..00000000000 --- a/storage/bdb/test/scr001/chk.code +++ /dev/null @@ -1,38 +0,0 @@ -#!/bin/sh - -# -# $Id: chk.code,v 1.14 2004/11/08 14:45:07 bostic Exp $ -# -# Check to make sure that the code samples in the documents build. - -d=../../../db_docs - -[ -d $d ] || { - echo 'FAIL: cannot find source distribution directory.' - exit 1 -} -[ -f ../libdb.a ] || (cd .. && make libdb.a) || { - echo 'FAIL: unable to find or build libdb.a' - exit 1 -} - -exitv=0 -for i in `find $d -name '*.cs'`; do - echo " compiling $i" - sed -e 's/m4_include(\(.*\))/#include <\1>/g' \ - -e 's/m4_[a-z]*[(\[)]*//g' \ - -e 's/(\[//g' \ - -e '/argv/!s/])//g' \ - -e 's/dnl//g' \ - -e 's/__GT__/>/g' \ - -e 's/__LB__/[/g' \ - -e 's/__LT__/ t.c - if cc -pthread -Wall -Werror -I.. t.c ../libdb.a -o t; then - : - else - echo "FAIL: unable to compile $i" - exitv=1 - fi -done - -exit $exitv diff --git a/storage/bdb/test/scr002/chk.def b/storage/bdb/test/scr002/chk.def deleted file mode 100644 index 7d5e6670f63..00000000000 --- a/storage/bdb/test/scr002/chk.def +++ /dev/null @@ -1,64 +0,0 @@ -#!/bin/sh - -# -# $Id: chk.def,v 1.9 2002/03/27 04:32:57 bostic Exp $ -# -# Check to make sure we haven't forgotten to add any interfaces -# to the Win32 libdb.def file. - -d=../.. - -# Test must be run from the top-level directory, not from a test directory. -[ -f $d/LICENSE ] || { - echo 'FAIL: cannot find source distribution directory.' - exit 1 -} - -f=$d/build_win32/libdb.def -t1=__1 -t2=__2 - -exitv=0 - -sed '/; /d' $f | - egrep @ | - awk '{print $1}' | - sed -e '/db_xa_switch/d' \ - -e '/^__/d' -e '/^;/d' | - sort > $t1 - -egrep __P $d/dbinc_auto/ext_prot.in | - sed '/^[a-z]/!d' | - awk '{print $2}' | - sed 's/^\*//' | - sed '/^__/d' | sort > $t2 - -if cmp -s $t1 $t2 ; then - : -else - echo "<<< libdb.def >>> DB include files" - diff $t1 $t2 - echo "FAIL: missing items in libdb.def file." - exitv=1 -fi - -# Check to make sure we don't have any extras in the libdb.def file. -sed '/; /d' $f | - egrep @ | - awk '{print $1}' | - sed -e '/__db_global_values/d' > $t1 - -for i in `cat $t1`; do - if egrep $i $d/*/*.c > /dev/null; then - : - else - echo "$f: $i not found in DB sources" - fi -done > $t2 - -test -s $t2 && { - cat $t2 - echo "FAIL: found unnecessary items in libdb.def file." - exitv=1 -} - -exit $exitv diff --git a/storage/bdb/test/scr003/chk.define b/storage/bdb/test/scr003/chk.define deleted file mode 100644 index 96c45c2b7d7..00000000000 --- a/storage/bdb/test/scr003/chk.define +++ /dev/null @@ -1,86 +0,0 @@ -#!/bin/sh - -# -# $Id: chk.define,v 1.25 2004/10/07 18:54:37 bostic Exp $ -# -# Check to make sure that all #defines are actually used. -# Check to make sure that all #defines start in column 1. - -d=../.. - -[ -f $d/LICENSE ] || { - echo 'FAIL: cannot find source distribution directory.' - exit 1 -} - -exitv=0 -t1=__1 -t2=__2 -t3=__3 - -find $d -name '*.c' -o -name '*.cpp' | - sed -e '/\/php_db4\//d' | - xargs egrep '^[ ][ ]*#' > $t1 -test -s $t1 && { - echo "FAIL: found #defines with leading white space:" - cat $t1 - exit 1 -} - -egrep '^#define' $d/dbinc/*.h $d/dbinc/*.in | - sed -e '/db_185.in/d' -e '/queue.h/d' -e '/xa.h/d' | - awk '{print $2}' | - sed -e '/^B_DELETE/d' \ - -e '/^B_MAX/d' \ - -e '/^DB_BTREEOLDVER/d' \ - -e '/^DB_HASHOLDVER/d' \ - -e '/^DB_LOCKVERSION/d' \ - -e '/^DB_MAX_PAGES/d' \ - -e '/^DB_QAMOLDVER/d' \ - -e '/^DB_TXNVERSION/d' \ - -e '/^DB_UNUSED/d' \ - -e '/^DEFINE_DB_CLASS/d' \ - -e '/^HASH_UNUSED/d' \ - -e '/^HPUX_MUTEX_PAD/d' \ - -e '/^LOG_OP/d' \ - -e '/^MINFILL/d' \ - -e '/^MUTEX_FIELDS/d' \ - -e '/^NAME_TO_SEQUENCE/d' \ - -e '/^NCACHED2X/d' \ - -e '/^NCACHED30/d' \ - -e '/^PAIR_MASK/d' \ - -e '/^P_16_COPY/d' \ - -e '/^P_32_COPY/d' \ - -e '/^P_32_SWAP/d' \ - -e '/^P_TO_UINT16/d' \ - -e '/^QPAGE_CHKSUM/d' \ - -e '/^QPAGE_NORMAL/d' \ - -e '/^QPAGE_SEC/d' \ - -e '/^SIZEOF_PAGE/d' \ - -e '/^TAILQ_/d' \ - -e '/^UINT64_FMT/d' \ - -e '/^UINT64_MAX/d' \ - -e '/^WRAPPED_CLASS/d' \ - -e '/^__BIT_TYPES_DEFINED__/d' \ - -e '/^__DBC_INTERNAL/d' \ - -e '/^i_/d' \ - -e '/_H_/d' \ - -e 's/(.*//' | sort > $t1 - -find $d -name '*.c' -o -name '*.cpp' > $t2 -for i in `cat $t1`; do - if egrep -w $i `cat $t2` > /dev/null; then - :; - else - f=`egrep -l "#define.*$i" $d/dbinc/*.h $d/dbinc/*.in | - sed 's;\.\.\/\.\.\/dbinc/;;' | tr -s "[:space:]" " "` - echo "FAIL: $i: $f" - fi -done | sort -k 2 > $t3 - -test -s $t3 && { - cat $t3 - echo "FAIL: found unused #defines" - exit 1 -} - -exit $exitv diff --git a/storage/bdb/test/scr004/chk.javafiles b/storage/bdb/test/scr004/chk.javafiles deleted file mode 100644 index 98fb1aff23f..00000000000 --- a/storage/bdb/test/scr004/chk.javafiles +++ /dev/null @@ -1,30 +0,0 @@ -#!/bin/sh - -# -# $Id: chk.javafiles,v 1.7 2003/08/01 18:14:45 gburd Exp $ -# -# Check to make sure we haven't forgotten to add any Java files to the list -# of source files in the Makefile. - -d=../.. - -[ -f $d/LICENSE ] || { - echo 'FAIL: cannot find source distribution directory.' - exit 1 -} - -f=$d/dist/Makefile.in - -t1=__1 -t2=__2 - -find $d/java $d/examples_java $d/rpc_server/java -name \*.java -print | - sed -e 's/^.*\///' | sort -u > $t1 -tr ' \t' '\n' < $f | sed -e '/\.java$/!d' -e 's/^.*\///' | sort -u > $t2 - -cmp $t1 $t2 > /dev/null || { - echo "<<< java source files >>> Makefile" - diff $t1 $t2 - exit 1 -} - -exit 0 diff --git a/storage/bdb/test/scr005/chk.nl b/storage/bdb/test/scr005/chk.nl deleted file mode 100644 index c6ccaab6920..00000000000 --- a/storage/bdb/test/scr005/chk.nl +++ /dev/null @@ -1,114 +0,0 @@ -#!/bin/sh - -# -# $Id: chk.nl,v 1.7 2003/09/30 19:16:42 bostic Exp $ -# -# Check to make sure that there are no trailing newlines in __db_err calls. - -d=../.. - -[ -f $d/README ] || { - echo "FAIL: chk.nl can't find the source directory." - exit 1 -} - -cat << END_OF_CODE > t.c -#include - -#include -#include - -int chk(FILE *, char *); - -int -main(argc, argv) - int argc; - char *argv[]; -{ - FILE *fp; - int exitv; - - for (exitv = 0; *++argv != NULL;) { - if ((fp = fopen(*argv, "r")) == NULL) { - fprintf(stderr, "%s: %s\n", *argv, strerror(errno)); - return (1); - } - if (chk(fp, *argv)) - exitv = 1; - (void)fclose(fp); - } - return (exitv); -} - -int -chk(fp, name) - FILE *fp; - char *name; -{ - int ch, exitv, line, q; - - exitv = 0; - for (ch = 'a', line = 1;;) { - if ((ch = getc(fp)) == EOF) - return (exitv); - if (ch == '\n') { - ++line; - continue; - } - if (!isspace(ch)) continue; - if ((ch = getc(fp)) != '_') continue; - if ((ch = getc(fp)) != '_') continue; - if ((ch = getc(fp)) != 'd') continue; - if ((ch = getc(fp)) != 'b') continue; - if ((ch = getc(fp)) != '_') continue; - if ((ch = getc(fp)) != 'e') continue; - if ((ch = getc(fp)) != 'r') continue; - if ((ch = getc(fp)) != 'r') continue; - if ((ch = getc(fp)) != '(') continue; - while ((ch = getc(fp)) != '"') { - if (ch == EOF) - return (exitv); - if (ch == '\n') - ++line; - } - while ((ch = getc(fp)) != '"') - switch (ch) { - case EOF: - return (exitv); - case '\\n': - ++line; - break; - case '.': - if ((ch = getc(fp)) != '"') - ungetc(ch, fp); - else { - fprintf(stderr, - "%s: at line %d\n", name, line); - exitv = 1; - } - break; - case '\\\\': - if ((ch = getc(fp)) != 'n') - ungetc(ch, fp); - else if ((ch = getc(fp)) != '"') - ungetc(ch, fp); - else { - fprintf(stderr, - "%s: at line %d\n", name, line); - exitv = 1; - } - break; - } - } - return (exitv); -} -END_OF_CODE - -cc t.c -o t -if ./t $d/*/*.[ch] $d/*/*.cpp $d/*/*.in ; then - : -else - echo "FAIL: found __db_err calls ending with periods/newlines." - exit 1 -fi - -exit 0 diff --git a/storage/bdb/test/scr006/chk.offt b/storage/bdb/test/scr006/chk.offt deleted file mode 100644 index 69bf7cc9219..00000000000 --- a/storage/bdb/test/scr006/chk.offt +++ /dev/null @@ -1,52 +0,0 @@ -#!/bin/sh - -# -# $Id: chk.offt,v 1.13 2004/10/07 18:57:53 bostic Exp $ -# -# Make sure that no off_t's have snuck into the release. - -d=../.. - -[ -f $d/LICENSE ] || { - echo 'FAIL: cannot find source distribution directory.' - exit 1 -} - -t=__1 - -egrep -w off_t $d/*/*.[ch] $d/*/*.in | -sed -e "/#undef off_t/d" \ - -e "/db_env_set_func_ftruncate/d" \ - -e "/db_env_set_func_pread/d" \ - -e "/db_env_set_func_pwrite/d" \ - -e "/db_env_set_func_seek/d" \ - -e "/j_ftruncate/d" \ - -e "/j_pread/d" \ - -e "/j_pwrite/d" \ - -e "/j_seek/d" \ - -e "/mp_fopen.c:.*can't use off_t's here/d" \ - -e "/mp_fopen.c:.*size or type off_t's or/d" \ - -e "/mp_fopen.c:.*where an off_t is 32-bits/d" \ - -e "/mutex\/tm.c:/d" \ - -e "/os_map.c:.*(off_t)0))/d" \ - -e "/os_method.c.*func_ftruncate/d" \ - -e "/os_method.c.*func_pread/d" \ - -e "/os_method.c.*func_pwrite/d" \ - -e "/os_method.c.*func_seek/d" \ - -e "/os_method.c.*__P.*off_t/d" \ - -e "/os_rw.c:/d" \ - -e "/os_seek.c:.*off_t offset;/d" \ - -e "/os_seek.c:.*offset = /d" \ - -e "/os_truncate.c:.*off_t offset;/d" \ - -e "/os_truncate.c:.*off_t stat_offset;/d" \ - -e "/os_truncate.c:.*offset = /d" \ - -e "/test_perf\/perf_misc.c:/d" \ - -e "/test_server\/dbs.c:/d" \ - -e "/test_vxworks\/vx_mutex.c:/d" > $t - -test -s $t && { - cat $t - echo "FAIL: found questionable off_t usage" - exit 1 -} - -exit 0 diff --git a/storage/bdb/test/scr007/chk.proto b/storage/bdb/test/scr007/chk.proto deleted file mode 100644 index 05e980ce6fc..00000000000 --- a/storage/bdb/test/scr007/chk.proto +++ /dev/null @@ -1,44 +0,0 @@ -#!/bin/sh - -# -# $Id: chk.proto,v 1.10 2003/12/08 19:28:26 bostic Exp $ -# -# Check to make sure that prototypes are actually needed. - -d=../.. - -[ -f $d/LICENSE ] || { - echo 'FAIL: cannot find source distribution directory.' - exit 1 -} - -t1=__1 -t2=__2 -t3=__3 - -egrep '__P' $d/dbinc_auto/*.h | - sed -e 's/[ ][ ]*__P.*//' \ - -e 's/^.*[ *]//' \ - -e '/__db_cprint/d' \ - -e '/__db_lprint/d' \ - -e '/__db_noop_log/d' \ - -e '/__db_prnpage/d' \ - -e '/__db_txnlist_print/d' \ - -e '/__db_util_arg/d' \ - -e '/__ham_func2/d' \ - -e '/__ham_func3/d' \ - -e '/_print$/d' \ - -e '/_read$/d' > $t1 - -find $d -name '*.in' -o -name '*.[ch]' -o -name '*.cpp' > $t2 -for i in `cat $t1`; do - c=$(egrep -Hlw $i $(cat $t2) | wc -l) - echo "$i: $c" -done | egrep ' 1$' > $t3 - -test -s $t3 && { - cat $t3 - echo "FAIL: found unnecessary prototypes." - exit 1 -} - -exit 0 diff --git a/storage/bdb/test/scr008/chk.pubdef b/storage/bdb/test/scr008/chk.pubdef deleted file mode 100644 index f5e66ccbc69..00000000000 --- a/storage/bdb/test/scr008/chk.pubdef +++ /dev/null @@ -1,185 +0,0 @@ -#!/bin/sh - -# -# Reconcile the list of public defines with the man pages and the Java files. - -d=../.. -docs=$d/../db_docs - -[ -f $d/LICENSE ] || { - echo 'FAIL: cannot find source distribution directory.' - exit 1 -} - -p=$d/dist/pubdef.in - -exitv=0 - -cat < /dev/null`; then - : - else - echo "$f: $name is missing from $p" - exitv=1 - fi -done - -cat < /dev/null`; then - [ "X$isdoc" != "XD" ] && { - echo "$name should not appear in $f" - exitv=1 - } - else - [ "X$isdoc" = "XD" ] && { - echo "$name does not appear in $f" - exitv=1; - } - fi -done - -cat < /dev/null`; then - : - else - echo "$f: $name is missing from $p" - exitv=1 - fi -done - -cat < /dev/null`; then - [ "X$isinc" != "XI" ] && { - echo "$name should not appear in $f" - exitv=1 - } - else - [ "X$isinc" = "XI" ] && { - echo "$name does not appear in $f" - exitv=1 - } - fi -done - -cat < /dev/null`; then - : - else - echo "$f: $name is missing from $p" - exitv=1 - fi -done - -cat < /dev/null`; then - [ "X$isjava" != "XJ" ] && { - echo "$name should not appear in the Java API" - exitv=1 - } - else - [ "X$isjava" = "XJ" ] && { - echo "$name does not appear in the Java API" - exitv=1 - } - fi -done - -cat < /dev/null`; then - [ "X$isjava" != "XN" ] && [ "X$isjava" != "XJ" ] && { - echo "$name should not appear in the Java native layer" - exitv=1 - } - else - [ "X$isjava" = "XN" ] && { - echo "$name does not appear in the Java native layer" - exitv=1 - } - fi -done - -exit $exitv diff --git a/storage/bdb/test/scr009/chk.srcfiles b/storage/bdb/test/scr009/chk.srcfiles deleted file mode 100644 index 18fc6369288..00000000000 --- a/storage/bdb/test/scr009/chk.srcfiles +++ /dev/null @@ -1,47 +0,0 @@ -#!/bin/sh - -# -# $Id: chk.srcfiles,v 1.13 2004/10/07 19:16:43 bostic Exp $ -# -# Check to make sure we haven't forgotten to add any files to the list -# of source files Win32 uses to build its dsp files. - -d=../.. - -[ -f $d/LICENSE ] || { - echo 'FAIL: cannot find source distribution directory.' - exit 1 -} - -f=$d/dist/srcfiles.in -t1=__1 -t2=__2 - -sed -e '/^[ #]/d' \ - -e '/^db_server_clnt.c/d' \ - -e '/^db_server_svc.c/d' \ - -e '/^db_server_xdr.c/d' \ - -e '/^gen_db_server.c/d' \ - -e '/^$/d' < $f | - awk '{print $1}' > $t1 -find $d -type f | - sed -e 's/^\.\.\/\.\.\///' \ - -e '/^build[^_]/d' \ - -e '/^libdb_java\/java_stat_auto.c/d' \ - -e '/^mod_db4\//d' \ - -e '/^perl\//d' \ - -e '/^php_db4\//d' \ - -e '/^rpc_server\/c\/gen_db_server.c/d' \ - -e '/^test\//d' \ - -e '/^test_server/d' \ - -e '/^test_thread/d' \ - -e '/^test_vxworks/d' | - egrep '\.c$|\.cpp$|\.def$|\.rc$' | - sort > $t2 - -cmp $t1 $t2 > /dev/null || { - echo "<<< srcfiles.in >>> existing files" - diff $t1 $t2 - exit 1 -} - -exit 0 diff --git a/storage/bdb/test/scr010/chk.str b/storage/bdb/test/scr010/chk.str deleted file mode 100644 index 873fdb0124a..00000000000 --- a/storage/bdb/test/scr010/chk.str +++ /dev/null @@ -1,42 +0,0 @@ -#!/bin/sh - -# -# $Id: chk.str,v 1.9 2004/09/22 18:01:06 bostic Exp $ -# -# Check spelling in quoted strings. - -d=../.. - -[ -f $d/LICENSE ] || { - echo 'FAIL: cannot find source distribution directory.' - exit 1 -} - -t1=__t1 - -sed -e '/^#include/d' \ - -e '/"/!d' \ - -e 's/^[^"]*//' \ - -e 's/%s/ /g' \ - -e 's/[^"]*$//' \ - -e 's/\\[nt]/ /g' \ - `find $d -name '*.[ch]' -o -name '*.cpp' -o -name '*.java' | - sed '/\/perl\//d'` | -spell | sort | comm -23 /dev/stdin spell.ok > $t1 - -test -s $t1 && { - cat $t1 - echo "FAIL: found questionable spelling in strings." - exit 1 -} - -egrep -h '/\* | \* ' \ - `find $d -name '*.[ch]' -o -name '*.cpp' | sed '/\/perl\//d'` | -spell | sort | comm -23 /dev/stdin spell.ok | tee /tmp/f/1 > $t1 - -test -s $t1 && { - cat $t1 - echo "FAIL: found questionable spelling in comments." - exit 1 -} - -exit 0 diff --git a/storage/bdb/test/scr010/spell.ok b/storage/bdb/test/scr010/spell.ok deleted file mode 100644 index 6a557279b67..00000000000 --- a/storage/bdb/test/scr010/spell.ok +++ /dev/null @@ -1,3697 +0,0 @@ -AAAA -ABS -ADDR -AES -AIX's -AJVX -ALG -ALLBACK -ALLDB -ALLOC -ALLZEROES -API -APIs -APP -APPMALLOC -APPNAME -APPREC -ASYNC -ATOI -AUTOCOMMIT -AUTOREMOVE -AccessExample -AccesssExample -Acflmo -Acknowledgements -Aclmop -Aclop -Adata -Addr -Ahlm -Ahm -Antoon -Applock -ArithmeticException -Arntzen -ArrayList -AssociateCallbacks -AttachCurrentThread -Aug -BBBB -BC -BCFILprRsvVxX -BCc -BDBXXXXX -BDBXXXXXX -BEGID -BH -BH's -BI -BII -BINTERNAL -BITSPERBLOCK -BKEYDATA -BLKSIZE -BNF -BOTHC -BOVERFLOW -BR -BSIZE -BTCOMPARE -BTMETA -BTREE -BTREEMAGIC -BTREEMETA -BTREEOLDVER -BTREEVERSION -BUF -Backoff -Barreto -Bc -Bdata -BerkeleyDB -BigInteger -BindingSpeedTest -Bosselaers -BtRecExample -Btree -BtreeStat -BtreeStats -Btrees -BulkAccessExample -ByteArray -ByteArrayBinding -ByteArrayFormat -ByteArrayInputStream -ByteArrayOutputStream -C'est -CALLBACK -CALLPGIN -CBC -CC -CCCC -CCYYMMDDhhmm -CD -CDB -CDS -CDdFILTVvX -CFB -CFILpRsv -CFLprsvVxX -CFh -CHARKEY -CHGPG -CHILDCOMMIT -CHILDINFO -CHILDINFOs -CHK -CHKPNT -CHKPOINT -CHKSUM -CKP -CKPLSN -CL -CLASSPATH -CLOSEFP -CLR -CLRDBC -CLpsvxX -CMP -CNT -COMPQUIET -CONCAT -CONCATDATAKEY -CONCATKEYDATA -CONF -CONFIG -CONST -CRTL -CRYPTO -CT -CTX -CXX -CacheFilePriority -CacheFileStats -CacheStats -Callback -CdFILTvX -Ch -ClassCastException -ClassCatalog -ClassCatalogDB -ClassInfo -ClassNotFoundException -ClientData -CloseHandle -Cmd -Cmp -CollectionTest -Config -CopyObjBytes -CreateFile -CreateFileMapping -CreateHashEntry -Crypto -CurrentTransaction -Cygwin -DATAHOME -DBC -DBCursor -DBENV -DBHOME -DBINFO -DBLOCAL -DBLOG -DBM -DBMETA -DBMETASIZE -DBNAME -DBP -DBP's -DBREG -DBREP -DBS -DBSDIR -DBT -DBT's -DBTCL -DBTList -DBTYPE -DBTs -DBa -DBaa -DBaz -DBba -DBcursor -DBz -DEADFILE -DECLS -DEF -DEFMINKEYPAGE -DELNO -DGREE -DIR -DIRECTIO -DIRENT -DIST -DJ -DLFCN -DLL -DOALL -DONOTINDEX -DS -DST -DSYNC -DUP -DUPFIRST -DUPID -DUPLAST -DUPMASTER -DUPOK -DUPONLY -DUPS -DUPSET -DUPSORT -DataBinding -DataBuffer -DataCursor -DataDb -DataEnvironment -DataFormat -DataIndex -DataInput -DataInputStream -DataOutput -DataOutputStream -DataStore -DataThang -DataType -DataView -DatabaseEntry -DatabaseException -DatabaseType -Db -DbAppDispatch -DbAppendRecno -DbAssociate -DbAttachImpl -DbBtreeCompare -DbBtreePrefix -DbBtreeStat -DbClient -DbCount -DbDeadlockException -DbDelete -DbDispatcher -DbDupCompare -DbEnv -DbEnvFeedback -DbEnvFeedbackHandler -DbErrcall -DbErrorHandler -DbException -DbFeedback -DbFeedbackHandler -DbGet -DbGetFlags -DbGetOpenFlags -DbGetjoin -DbHash -DbHashStat -DbInfoDelete -DbKeyRange -DbLock -DbLockNotGrantedException -DbLockRequest -DbLockStat -DbLogStat -DbLogc -DbLsn -DbMemoryException -DbMpoolFStat -DbMpoolFile -DbMpoolFileStat -DbMpoolStat -DbMultiple -DbMultipleDataIterator -DbMultipleIterator -DbMultipleKeyDataIterator -DbMultipleRecnoDataIterator -DbOpen -DbOutputStreamErrcall -DbPanicHandler -DbPreplist -DbPut -DbQueueStat -DbRecoveryInit -DbRemove -DbRename -DbRepStat -DbRepTransport -DbRunRecoveryException -DbSecondaryKeyCreate -DbSequence -DbServer -DbStat -DbTestUtil -DbTruncate -DbTxn -DbTxnRecover -DbTxnStat -DbUpgrade -DbUtil -DbVerify -DbXA -DbXAResource -DbXid -Dbc -DbcDup -DbcGet -DbcPut -Dbm -DbmCommand -Dbp -Dbs -Dbt -Dbt's -Dbts -Dde -Deadfile -DeadlockException -Debian -DeleteInfo -Deref'ing -Dir -Dups -EAGAIN -EBUSY -ECB -EEXIST -EEXISTS -EFAULT -EGENCHG -EID -EINTR -EINVAL -EIO -EIRT -EIi -ELECTINIT -ELECTVOTE -EMSG -ENOENT -ENOMEM -ENT -ENV -ENV's -EOFException -EPG -EPGNO -EPHASE -EPRINT -EPRINTed -ETIME -ETIMEDOUT -EXCL -EXT -Eefh -Egen -Elp -Endian -EntityBinding -EnvAttr -EnvExample -EnvGetEncryptFlags -EnvInfoDelete -EnvOpen -EnvRemove -EnvSetErrfile -EnvSetErrpfx -EnvSetFlags -EnvTest -EnvVerbose -Equidistributed -Errcall -Errfile -ErrorFunc -ErrorSetup -Errpfx -EvalObjv -ExampleDatabaseLoad -ExampleDatabaseRead -ExceptionUnwrapper -ExceptionWrapper -ExceptionWrapperTest -Exp -Externalizable -FALLTHROUGH -FCNTL -FCONTROL -FD -FDATASYNC -FF -FH -FILEDONE -FILEID -FILELIST -FILENO -FILEOPEN -FILEWRITTEN -FIXEDLEN -FIXLEN -FIXME -FMAP -FMT -FN -FNAME -FOREACH -FP -FST -FSTAT -FSTATI -FTRUNCATE -FTYPE -FastInputStream -FastOutputStream -FatalRecover -Fd -Ff -Fh -FileIndexHigh -FileIndexLow -FileNotFoundException -Fileinfo -FindHashEntry -FooImp -Foreach -ForeignKeyIndex -ForeignKeyTest -FreeBSD -FreeBSD's -FreeFunc -FreeList -Friedl -GCC -GETALL -GETCWD -GETDYNAMIC -GETNAME -GETOPT -GETRUSAGE -GETTIME -GETTIMEOFDAY -GETUID -GETZIP -Gb -Gcc -Gentles -Get's -GetByteArray -GetByteArrayFromObj -GetDiskFreeSpace -GetFileInformationByHandle -GetFlags -GetFlagsList -GetGlobPrefix -GetHashValue -GetIndexFromObj -GetIntFromObj -GetJavaVM -GetJoin -GetLockDetect -GetLongFromObj -GetLsn -GetOpenFlag -GetTimeout -GetUInt -GetUnsignedIntFromObj -GetVerbose -GetVersion -Gh -GlobalRefs -GotRange -HANDSOFF -HASHC -HASHC's -HASHHDR -HASHINSERT -HASHLOOKUP -HASHMAGIC -HASHMETA -HASHOLDVER -HASHREMOVE -HASHTAB -HASHVERSION -HCommand -HDR -HDRs -HEURCOM -HEURHAZ -HEURMIX -HEURRB -HKEYDATA -HMAC -HMETA -HOFFDUP -HOFFPAGE -HOFFSET -HOLDELECTION -HPPA -HPUX -HSEARCH -Harbison -HashStats -Hashtable -HelloDatabaseWorld -Holder's -Hsearch -IA -IAFTER -IBEFORE -IBTREE -ICURRENT -IDLETIMEOUT -IDs -IIL -IL -ILOCK -ILo -ILprR -INDX -INFOTYPE -INI -INIT -INITED -INITENV -INITSPIN -INMEM -INMEMORY -INORDER -INTTYPES -INVAL -INVALIDID -IOException -IOExceptionWrapper -IOSIZE -IPC -IR -IREAD -IRECNO -IRGRP -IRIX -IROTH -IRUSR -ISDUP -ISPERM -ISSET -IV's -IW -IWGRP -IWOTH -IWR -IWRITE -IWUSR -Ick -Ids -Ik -IllegalArgumentException -IllegalStateException -IncrRefCount -IndexOutOfBoundsException -Init -Initialise -IntegrityConstraintException -Interp -InventoryDB -Istmp -ItemNameIndexDB -Itemname -IterDeadlockTest -JDB -JE -JHB -JKL -JNI -JNIEnv -JNIs -JOINCUR -JOINENV -JVM -JZ -JavaIO -JavaRPCServer -JoinTest -KEYDATA -KEYEMPTY -KEYEXIST -KEYFIRST -KEYGROUP -KEYGRP -KEYLAST -KEYLEN -KL -Kerberos -KeyExtractor -KeyRange -KeyRangeException -KeyRangeTest -Krinsky -LANGLVL -LASTCKP -LBTREE -LCK -LDF -LDUP -LEAFCHAIN -LEAFLEVEL -LEAFSEEN -LFNAME -LFPREFIX -LG -LGPL -LIBNSL -LL -LOCKDOWN -LOCKOBJ -LOCKREGION -LOCKREQ -LOCKTAB -LOCKTIMEOUT -LOCKVERSION -LOGC -LOGFILEID -LOGMAGIC -LOGOLDVER -LOGP -LOGREADY -LOGSONLY -LOGVERSION -LORDER -LRECNO -LRECNODUP -LRU -LRUness -LSN -LSN's -LSNfile -LSNoffset -LSNs -LSTAT -LV -LWARX -LWP -LWZ -Landon -Lastp -Lcom -ListIterator -ListObjAppendElement -Ljava -Ll -LocalIterator -LockDetect -LockDetectMode -LockExample -LockGet -LockMode -LockNotGrantedException -LockOperation -LockRequest -LockRequestMode -LockStat -LockStats -LockTimeout -LockVec -Lockfhp -LogArchive -LogCompare -LogFile -LogFlush -LogGet -LogPut -LogRegister -LogSequenceNumber -LogStat -LogStats -Logc -LogcGet -LpRsS -LprRsS -Lsn -LtoR -MALLOC -MAMAMIA -MARGO -MASTERELECT -MAXARGS -MAXBQUALSIZE -MAXBTREELEVEL -MAXFIELD -MAXGTRIDSIZE -MAXID -MAXINFOSIZE -MAXLOCKS -MAXMMAPSIZE -MAXNR -MAXPATHLEN -MAXSIZE -MAXSIZEONPAGE -MAXTIMEOUT -MAXWRITE -MC -MEM -MEMCMP -MEMCPY -MEMMAPPED -MEMMOVE -MEMP -METADIRTY -MFT -MINCACHE -MINFO -MINIT -MINLOCKS -MINPAGECACHE -MINWRITE -MKS -MLOCK -MMAP -MMDDhhmm -MNO -MP -MPE -MPFARRAY -MPOOL -MPOOLFILE -MPOOLFILE's -MPOOLFILEs -MPREG -MPREGs -MSB -MSC -MSEM -MSG -MSGBUF -MSHUTDOWN -MSTR -MSVC -MT -MUNLOCK -MUNMAP -MUTEXes -MYDIRECTORY -Makoto -Malloc -MapEntry -MapViewOfFile -Margo -MarshalledEnt -MarshalledEntityBinding -MarshalledKey -MarshalledKeyBinding -MarshalledObject -MarshalledTupleData -MarshalledTupleEntry -MarshalledTupleKeyEntity -Matsumoto -Maxid -Maxkey -Mb -Mbytes -MemoryException -Mempool -Mersenne -Metadata -Metapage -MinGW -Minkey -Misc -MixColumn -MoveFile -MoveFileEx -Mp -MpGet -MpInfoDelete -MpStat -MpSync -MpTrickle -Mpool -MpoolExample -Mpoolfile -Msg -MsgType -Mutex -Mv -MyDbs -NB -NBUCKET -NCACHE -NCACHED -NDBM -NDIR -NEEDSPLIT -NEEDSWAP -NEWCLIENT -NEWFILE -NEWMASTER -NEWSITE -NG -NOARCHIVE -NOAUTO -NOBUFFER -NOCOPY -NODUP -NODUPDATA -NODUPS -NOFILE -NOHEADER -NOKEY -NOLOCK -NOLOCKING -NOMIGRATE -NOMMAP -NOMORE -NOORDERCHK -NOOVERWRITE -NOPANIC -NOPROMOTE -NORUN -NOSERVER -NOSORT -NOSYNC -NOTA -NOTEXIST -NOTFOUND -NOTGRANTED -NOTPERM -NOTREACHED -NOTSET -NOTUSED -NOTYPE -NOTZERO -NOWAIT -NP -NRECS -NT -NTFS -NULL'ing -NULLXID -NULLing -NULLs -NULs -NUM -NUMWRITES -NameToInfo -NameToPtr -Ndbm -NdbmOpen -NewInfo -NewStringObj -Nishimura -NoP -NoqV -NqV -Nr -NrV -NsV -Nuff -NullClassCatalog -NullPointerException -NullTransactionRunner -Num -Nxt -OBJ -ODDFILESIZE -OFFDUP -OFFPAGE -OLDVERSION -ONC -OOB -OP -OPD -OPENFILES -OPFLAGS -OR'd -ORDERCHKONLY -OSF -OSO -OUTFILE -OVFL -Obj -ObjectInputStream -ObjectOutputStream -ObjectStreamClass -Objs -Offpage -Ol -OpenFileMapping -OpenServer -OperationStatus -Ops -Optimised -OutOfMemoryError -OutputStream -PAGEDONE -PAGEINFO -PAGEINFOs -PAGELIST -PAGEs -PANIC'd -PARAMS -PARENT's -PBNYC -PG -PGDEF -PGINFO -PGNO -PGSIZE -PHP -PID -PKG -PLIST -POPENFILES -POSTDESTROY -POSTLOG -POSTLOGMETA -POSTOPEN -POSTSYNC -PPC -PR -PREAD -PREPLIST -PREV -PRI -PRINTFOOTER -PRINTHEADER -PROT -PSIZE -PSTAT -PTHREAD -PWRITE -PaRisc -Pagesize -Pagesizes -Params -Part's -PartBinding -PartData -PartKey -PartKeyBinding -PartValue -Paulo -Perl -Pg -PgInit -PgIsset -Pgin -Pgno -Phong -PlatformSDK -Posix -PowerPC -PreparedTransaction -Prev -PrimaryKeyAssigner -Proc -Pthread -PtrToInfo -QAM -QAMDATA -QAMMAGIC -QAMMETA -QAMOLDVER -QAMVERSION -QMETA -QNX -QPAGE -QUOTESERVER -QueueStats -RB -RBBASE -RBCOMMFAIL -RBDEADLOCK -RBEND -RBINTEGRITY -RBOTHER -RBPROTO -RBROLLBACK -RBTIMEOUT -RBTRANSIENT -RCLOSE -RDONLY -RDWRMASTER -READONLY -REALLOC -REALLOC'ed -REC -RECLEN -RECNO -RECNOSYNC -RECNUM -RECORDCOUNT -RECOVERYTEST -REGENV -REGINFO -REGIONs -REGMAINT -RELEN -RELIANTUNIX -RENAMEMAGIC -REPLOCKED -REPQUOTE -REPVERSION -REQ -REQs -REVERSECONCAT -REVERSEDATA -REVSPLITOFF -RIJNDAEL -RINTERNAL -RIW -RLOCK -RM -RMERR -RMFAIL -RMID -RMNAMESZ -RMW -RMs -ROP -RPC -RPCCLIENT -RPCExample -RPCGEN -RPRINT -RT -RTTarget -RUNLOG -RUNRECOVERY -RUSAGE -RandCommand -RangeExceeded -RangeKeyNotEqual -RangeNotFound -Realloc -Rec -Recno -Recnos -RecordNumberBinding -RecordNumberFormat -RecoveryOperation -Reinit -RepElect -RepElectResult -RepFlush -RepLimit -RepProcessMessage -RepRequest -RepStart -RepStat -ReplicationHandleDeadException -ReplicationStats -ReplicationStatus -ResetResult -ReturnSetup -Rieffel -Rijmen -Rijndael -Roeber -Rp -RpcDb -RpcDbEnv -RpcDbTxn -RpcDbc -RtoL -RunRecoveryException -RuntimeException -RuntimeExceptionWrapper -Rusage -SCHED -SCO -SCO's -SEGDATA -SEGID -SEM -SEMA -SEP -SEQ -SERVERPROG -SERVERVERS -SETALL -SETCURSOR -SETFD -SETVAL -SGI -SHA -SHALLOC -SHASH -SHMEM -SHMGET -SHQUEUE -SIGALRM -SIGPIPE -SIZEOF -SKIPFIRSTKEY -SKU -SNPRINTF -SPL -SPLITOLD -SPRINTF -SS -SSLeay -SSZ -STAILQ -STARTUPDONE -STAT -STATS -STCWX -STD -STDC -STDERR -STDINT -STK -STR -STRCASECMP -STRDUP -STRLIST -STROFFSET -STRTOUL -STRUCT -STWCX -SUBDB -SWAPBYTES -SWIG's -SWITCHes -SWR -SYSCONF -SampleDatabase -SampleViews -Schlossnagle -SecondaryDeadlockTest -Sedgewick -Seq -SeqGet -SeqGetFlags -SeqOpen -SequenceExample -SequenceStats -SerialBinding -SerialBindingTest -SerialFormat -SerialInput -SerialOutput -SerialSerialBinding -SerialSerialKeyExtractor -SetEndOfFile -SetInfoData -SetListElem -SetListElemInt -SetListElemWideInt -SetListRecnoElem -SetMultiList -SetObjResult -ShipmentBinding -ShipmentByPart -ShipmentBySupplier -ShipmentData -ShipmentKey -ShipmentKeyBinding -ShipmentValue -Shm -Signalling -SimpleBuffer -Skiplist -Skodon -Sleepycat -Something's -SortedMap -SortedSet -Sparc -Splitp -Stat -Stats -Std -Stdout -Steele -StoredClassCatalog -StoredClassCatalogTest -StoredClassCatalogTestInit -StoredCollection -StoredCollections -StoredContainer -StoredEntrySet -StoredIterator -StoredKeySet -StoredList -StoredMap -StoredMapEntry -StoredSortedEntrySet -StoredSortedKeySet -StoredSortedMap -StoredSortedValueSet -StoredValueSet -StringBuffer -StringDbt -Subdatabase -Subdb -Subname -SunOS -SupplierBinding -SupplierByCity -SupplierData -SupplierKey -SupplierKeyBinding -SupplierValue -SystemInfo -TAILQ -TCHAR -TCL -TDS -TESTDIR -TESTTESTEST -TESTXADIR -THR -TID -TLPUT -TM -TMASYNC -TMENDRSCAN -TMER -TMERR -TMFAIL -TMJOIN -TMMIGRATE -TMMULTIPLE -TMNOFLAGGS -TMNOFLAGS -TMNOMIGRATE -TMNOWAIT -TMONEPHASE -TMP -TMPDIR -TMREGISTER -TMRESUME -TMSTARTRSCAN -TMSUCCESS -TMSUSPEND -TMUSEASYNC -TMs -TODO -TOPLEVEL -TPC -TPCB -TPS -TRU -TRUNC -TRUNCDATA -TSTRING -TXN -TXNAPP -TXNHEAD -TXNID -TXNLIST -TXNLOGREC -TXNMGR -TXNREGION -TXNS -TXNVERSION -TXNs -Takuji -Tcl -Tcl's -TempFolder -TestAppendRecno -TestAssociate -TestCallback -TestClassCatalog -TestClosedDb -TestConstruct -TestDataBinding -TestDbtFlags -TestEntity -TestEntityBinding -TestEnv -TestGetSetMethods -TestKeyAssigner -TestKeyExtractor -TestKeyRange -TestLockVec -TestLogc -TestOpenEmpty -TestReplication -TestRpcServer -TestSameDbt -TestSerial -TestSimpleAccess -TestStat -TestStore -TestTruncate -TestUtil -TestXAServlet -Thang -Thies -Threshhold -Throwable -TimeUnits -Tmp -Topher -TpcbExample -TransactionRunner -TransactionStats -TransactionTest -TransactionTests -TransactionWorker -Tru -Tt -TupleBinding -TupleBindingTest -TupleFormat -TupleFormatTest -TupleInput -TupleInputBinding -TupleMarshalledBinding -TupleOrderingTest -TupleOutput -TupleSerialBinding -TupleSerialDbFactory -TupleSerialDbFactoryTest -TupleSerialEntityBinding -TupleSerialKeyExtractor -TupleSerialMarshalledBinding -TupleSerialMarshalledKeyExtractor -TupleTupleBinding -TupleTupleKeyExtractor -TupleTupleMarshalledBinding -TupleTupleMarshalledKeyExtractor -Txn -TxnCheckpoint -TxnInfoDelete -TxnRecover -TxnStat -TxnTimeout -Txnid -Txns -UI -UID -UINT -ULONG -UMRW -UNAVAIL -UNDEF -UNDOC -UNICODE -UNISTD -UNREF -UOC -UPDATEROOT -UPREFIX -USEC -USERMEM -UTF -UTFDataFormatException -UTS -UX -Unencrypted -Unicode -UnixLib -UnixWare -Unixware -UnknownError -UnmapViewOfFile -UnsupportedOperationException -UtfOps -UtfTest -Util -VC -VER -VM -VMPAGESIZE -VRFY -VSNPRINTF -VTALLY -VX -Var -Vc -VendorDB -Vo -Voter's -Vv -VvW -VvXxZ -Vvw -Vx -VxWorks -WAITSFOR -WAKEME -WATCOM -WLInitialContextFactory -WORDLIST -WRITECURSOR -WRITELOCK -WRITEOPEN -WRNOSYNC -WRONLY -WT -WW -WWRITE -Waitsfor -WebLogic -WinNT -WriteFile -X's -XA -XAER -XAException -XAResource -XID -XIDDATASIZE -XOR'd -XP -XPG -XXX -Xid -XxZ -YIELDCPU -YY -YYMMDDhhmm -ZED -ZF -Zero'd -aa -aaA -aaB -aaC -aaD -aaa -aaaaaa -aaaaab -aaaaac -aab -aac -aad -ab -abc -abcdbcdecdefdefgefghfghighijhijkijkljklmklmnlmnomnopnopq -abcdef -abs -absname -abspath -ac -aca -accessor -ack -acplt -activekids -activep -activepips -actualData -actualKey -acurs -ada -addAll -addfamilylocker -addpage -addr -addrem -addrp -adh -adj -adjindx -adlsVv -admin -afterop -ahr -alfred -alg -algsetup -alignp -alldb -alloc -alloc'ed -alloc'ing -alloced -allocs -alsVv -alsn -amx -antoon -anum -aparts -apologise -app's -appexit -appl -appname -appnotes -apprec -apps -aq -archivedir -areDuplicatesAllowed -areDuplicatesOrdered -areKeysRenumbered -arg -argp -args -argv -arr -arraycopy -arraysz -asites -assertEquals -astubble -ata -atoi -atol -att -autoCommit -autocommit -autoconf -autoconfig -autoremove -avg -awk -baaaaa -backoff -badend -badgen -badkey -badnum -bak -barreto -baseClass -bb -bba -bcopy -bcurs -bd -bdb -bdbcmds -beginInclusive -beginKey -beq -berkdb -berkdbcmds -berkeley -bfname -bfree -bh -bharray -bhfree -bhp -bhwrite -bi -bigint -bigpages -bigpair -binding's -bitmasks -bk -blbs -blk -blksize -blockDecrypt -blockEncrypt -blocknum -blocksize -bmeta -bmp -bndx -bne -bnum -bosselaers -bostic -bp -bqual -br -broot -bs -bshift -bsize -bsizep -bt -btcompare -btrec -btree -btreemeta -btrees -buf -bufp -bufs -bufsize -buildpartial -builtin -bumpSize -bval -bylsn -bypage -byteLen -byteOffset -byteorder -bytesToChars -bytesize -bytesp -byteswap -byteswapped -bytevalue -cEelmNrtVZ -caaaaa -cachep -cachesize -cadjust -callback -callback's -callbk -calloc -callocs -callpgin -carray -catalogtest -cbts -cc -ccclose -ccnext -ccset -ccurs -cd -cdata -cdb -cdel -ceVv -ceh -celmNrtVZ -celmNtV -celmNtVZ -cget -cgetchk -ch -char's -charLen -charLength -charOffset -charkey -charset -checkgen -checklocker -chgpg -childcursor -childinc -childproof -childput -chk -chkpoint -chkpt -chkspace -chksum -chmod -chongo -ci -cip -cipherInit -cipherInstance -cipherUpdateRounds -ckp -ckplsn -cksum -cl -classCatalog -classID -className -classpath -cleandir -clearIndexKey -clearerr -clib -clientData -clientrun -clist -clnt -closeevent -closehandle -closeme -cls -cmap -cmd -cmdargs -cmds -cmp -cmpwi -cnt -com -comm -compareDuplicates -compareproc -comparitors -compat -concatdatakey -concatkeydata -conf -config -const -containsAll -containsKey -containsValue -conv -convprintable -copyFrom -copyin -copyleft -copymap -copyout -copypage -countElements -countp -cp -cpage -cpp -cputchk -cq -cr -crdel -creat -createflag -crypto -cs -csp -ct -ctp -ctp's -ctps -ctx -ctxn -curadj -curfile -curinfo -curinval -curlist -curlsn -curslen -curslist -cursp -cursq -cutlim -cxx -cxxproc -cxxthis -cxxutil -dat -dataBinding -dataInput -dataOutput -dataToObject -databuf -datalen -datap -datapage -datastr -db -dbFileName -dbOpenFlags -dbc -dbc's -dbca -dbcb -dbcl -dbclear -dbclient -dbclose -dbcmds -dbcp -dbcursor -dbdata -dbdel -dbdemo -dbdp -dbe -dbentry -dbenv -dbh -dbinc -dbinfo -dbinit -dbip -dbj -dbjoin -dbkill -dblist -dblistlinks -dblp -dbm -dbmclose -dbmeta -dbmfp -dbminit -dbmp -dbname -dbnamep -dbobj -dbopen -dbp -dbp's -dbpp -dbprep -dbps -dbq -dbrdonly -dbreg -dbremove -dbrename -dbs -dbsizes -dbsrv -dbt -dbta -dbtb -dbtp -dbtruncate -dbts -dbtxn -dbtype -dbuf -dbverify -dbx -dcursor -dd -deadfile -deadlocker -deadmap -dec -decrpyting -def -defcmp -defg -defpfx -defto -del -deletable -deleteAction -delext -delim -delimp -delpg -denom -dereffing -des -deserialize -deserialized -deserializing -dest -detectp -dev -df -dh -diff -difflen -dir -directio -dirent -dirf -dirfno -dirfree -dirlist -dirname -dirp -dirs -dirtyRead -dist -dists -ditem -dl -dlbytes -dlen -dlfcn -dll -dlopen -dm -dname -dndx -doGet -doTimeouts -doWork -dobj -docRoot -doevents -donext -doreq -doubleToLongBits -dp -dpagep -dpages -dpair -dpgno -dr -dremoved -ds -dsearch -dsize -dst -dsync -dtab -dtabsize -dumptree -dup -dup'ed -dupcompare -duperr -dupmaster -dupmasters -dupped -dups -dupset -dupsort -duptree -duptype -dwNumberOfProcessors -dx -eax -ebuf -edu -efg -efh -egen -eid -eid's -eidp -ek -ele -electinit -electsend -electtally -electvote -electwait -elem -elp -emap -emt -encrpyting -encryptaes -encryptany -endInclusive -endKey -endian -endianness -endif -endname -endodata -endofile -endpath -enqueuing -ent -entityBinding -entrySet -entryp -enum -enums -env -env's -envFlags -envcl -envdata -envdp -envid -envip -envlock -envp -envpanic -envparent -envremove -envrpcserver -envs -eof -eor -erlangen -errbuf -errcall -errfile -errlock -errno -errnum -erroring -errorret -errpfx -errunlock -errx -esat -esp -eval -exactp -excl -exe -exid -exnum -expandtab -expr -ext -extentsize -extentsizep -externalizable -extid -extractIndexKey -fN -faq -faststat -faultmem -fc -fcchk -fchk -fclose -fcn -fcntl -fcreate -fd -fd's -fdatasync -fdm -fdp -fdupcurs -feedback's -ferr -ff -ffactor -ffactorp -fget -fh -fhp -fid -fids -fileID -fileIDs -filedone -filehandle -fileid -fileids -fileinfo -fileinit -filelist -filenamep -filenum -fileopen -fileops -fillf -finalcount -findFirst -finddatum -findlastckp -finfo -firstKey -firstkey -fiv -fixup -fixups -flagN -flagsp -floatToIntBits -flushcommit -fmax -fmethod -fn -fname -fnl -fnp -fns -fnum -foo -fopen -forName -foreignStore -form's -format's -formatID -fp -fprobe -fptr -fput -fq -freakin -free'd -free'ing -freeable -freedata -freefamilylocker -freelist -freelock -freelocker -freep -fremove -freq -friedl -fromInclusive -fromIndex -fromKey -fromMapEntry -fromValue -frotzed -fs -fset -fst -fstat -fstati -fsync -ftruncate -ftype -fullhome -fullname -func -funcs -fv -fwd -gbytes -gbytesp -gc -gc'ed -gcc -gdb -gen -genrand -george -getBranchQualifier -getBytes -getCity -getClassFormat -getCollection -getCurrentKey -getData -getDbEnv -getDbt -getDbtString -getDetail -getEnvironment -getErrno -getFlags -getFormatId -getGlobalTransactionId -getIndex -getInstance -getLock -getMode -getNext -getObj -getObject -getOffset -getOp -getPartialLength -getPartialOffset -getPrimaryKeyFormat -getPrimitiveBinding -getRecordNumber -getSize -getString -getTimeout -getUserBufferLength -getValue -getValueFormat -getactive -getboth -getbothc -getckp -getcwd -getdata -getdynamic -getenv -getinfo -getjoin -getlocker -getlong -getname -getnext -getno -getobj -getopt -getpageinfo -getpid -getrusage -getstack -getsubopt -gettime -gettimeofday -gettingstarted -gettype -getuid -getulong -getval -getzip -ghi -gid -gotkey -gotta -groupalloc -gsf -gsp -gtrid -guesspgsize -hEvent -handle's -handleException -happend -hardcode -hardcoding -hasNext -hasPrevious -hashCode -hashhdr -hashinit -hashmeta -hashp -hashproc -hc -hcp -hcreate -hdestroy -hdr -hdrbuf -hdrchk -hdrpages -hdrs -headMap -headSet -header's -headp -heldby -helloworld -hf -hijkl -himark -histdbt -hlock -hmac -hmeta -holdl -homep -homeroot -hostaddr -hostname -hotcopy -hp -hq -href -hs -hsearch -htab -htonl -httpd -iX -ia -icursor -idbase -idletimeout -idleto -idmap -idnum -idp -ids -idspace -idup -idup'ed -iface -ifdef -iff -ifndef -ihold -iitem -ik -ilock -ilocks -inc -incfirst -incomp -incr -incursor -indexCursor -indexKey -indexKeyData -indexKeyFormat -indexKeyOutput -indexKeys -indexOf -indexViews -indexlist -indx -info's -infop -informatik -ini -init -init'ing -inited -initialSize -inits -initspin -inlen -inline -inmem -inmemdbflags -inmemory -ino -inode -inorder -inp -inpitem -inputOctets -inregion -insdel -int -intBitsToFloat -intValue -intel -interp -intial -ints -inttypes -inuse -inventoryDB -inventorydb -io -ioinfo -iopsize -iosDevFind -ip -ipcs -iread -isAutoCommit -isByteLen -isDirtyReadAllowed -isDirtyReadEnabled -isEmpty -isIndexed -isOrdered -isTransactional -isWriteAllowed -isbad -isbigendian -isdeleted -isdone -isdst -isdup -isolder -isopd -ispget -isroot -isspace -istmp -isvalid -isync -itemname -itemnameDB -itemorder -iter -ith -iwr -iwrite -java -java's -javax -jbyte -jc -jenv -jhi -jl -jlong -jmsjdbc -jndi -journaling -jp -jq -jrpcgen -jta -kb -kbyte -kbytes -keio -key's -keyAssigner -keyBinding -keyClass -keyExtractor -keyFormat -keyInput -keyInstance -keyLen -keyMaterial -keyName -keyOutput -keySet -keybuf -keyfirst -keyflag -keygroup -keygroups -keygrp -keylast -keynum -keyp -keyrange -keystr -kgnum -ki -killid -killinterval -killiteration -killtest -klNpP -klNprRV -klNprRs -klinks -kow -kp -kpv -krinsky -ks -kuleuven -lM -lP -lSN -lang -last's -lastError -lastIndexOf -lastKey -lastfile -lastid -lastpgno -later's -lbtree -lbucket -lc -ld -ldata -ldbp -ldbt -ldbtsize -ldcws -ldl -ldstub -le -len -lenp -les -lf -lfhp -lfname -lg -lget -lh -lhash -lhi -libdb -libfile -libname -libpthread -libthread -lineno -listIterator -listobj -listp -lk -lkrs -ll -lld -llsn -llu -lm -ln -lnP -lnsl -loadme -localhost -localtime -lockForWrite -lockGet -lockVector -lockcount -lockdown -locker's -lockerid -lockevent -lockfhp -lockid -lockinfo -lockmgr -lockmode -lockobj -lockop -lockreq -lockstep -locktimeout -logbuf -logc -logclean -logdir -logfile -logfiles -logflush -loggap -logmaxset -logmsg -logrec -logset -logsonly -longBitsToDouble -lorder -lorderp -lowlsn -lp -lpgno -lprint -lput -lrand -lrp -lru -lsVv -lsn -lsnadd -lsninit -lsnoff -lsnp -lsynch -lt -lu -luB -luGB -luKB -luKb -luM -luMB -luMb -lvalue -lwarx -lwp -lx -mNP -mNs -machid -machtab -maddr -magicno -maintinit -maj -majver -makeKey -makedup -malloc -malloc'd -malloc'ed -malloc's -mallocing -mallocs -mapEntry -mapfile -margo -markdone -markneeded -markus -marshalIndexKey -marshalled -marshalling -matumoto -maxRetries -maxb -maxcommitperflush -maxid -maxkey -maxkeypage -maxlockers -maxlocks -maxlsn -maxn -maxnactive -maxnlockers -maxnlocks -maxnobjects -maxobjects -maxopenfd -maxops -maxp -maxperm -maxpg -maxpgno -maxrec -maxsites -maxsize -maxtimeout -maxto -maxtxn -maxtxns -maxwrite -maxwrites -mb -mbp -mbucket -mbytes -mbytesp -md -mem -membar -memcmp -memcmps -memcpy -memmove -memp -memset -metachk -metadata -metaflags -metagroup -metalsn -metapage -metasub -metaswap -methodID -mf -mfp -mgrp -midpage -millitm -mincommitperflush -minkey -minkeyp -minkeypage -minlocks -minp -minval -minver -minwrite -minwrites -mip -mis -misc -mjc -mkdir -mkdir's -mlock -mmap -mmap'd -mmap'ing -mmapped -mmapsize -mmapsizep -mmetalsn -mmpgno -modeFlag -moremiddle -mortem -movl -mp -mpf -mpfarray -mpfq -mpgno -mpip -mpool -mpoolfile -mpools -mpreg -mps -msem -msemaphore -msg -msg's -msgadd -msgbuf -msgcall -msgdbt -msgfile -msgfp -msgs -msgtype -msize -mswap -mt -mti -munlock -munmap -mut -mutex -mutexes -mutexlocks -mutexp -muxfile -mv -mvptr -mydrive -mydrivexxx -myfree -mylock -myobjc -myval -n'th -nO -nP -nTV -nTt -naborts -nactive -nalloc -namelistp -nameop -namep -namesp -nargc -nargv -nbegins -nbytes -ncache -ncachep -ncaches -ncommit -ncommits -nconflicts -ncurs -ndary -ndata -ndbm -ndeadalloc -ndeadlocks -ndir -ndx -needswap -neg -nelem -nelemp -nentries -nevict -newalloc -newclient -newdata -newdatabase -newfh -newfile -newitem -newlist -newmaster -newname -newopd -newpage -newpgno -newsite -newsites -newsize -next's -nextIndex -nextdup -nextents -nextinfo -nextkey -nextlsn -nextnodup -nextpgno -nfid -nfiles -ng -nitems -nkeys -nlist -nlockers -nlocks -nlocktimeouts -nlsn -nmodes -nnext -nnextlsn -nnowaits -noWait -noarchive -nobjects -nobuffer -nodup -nodupdata -noet -nogrant -nohasham -nolock -nolocking -nolonger -nomem -nommap -noop -nooverwrite -nop -nopanic -nopenp -norep -nosort -nosync -nosystemmem -notdurable -notfound -notgranted -notused -notzero -novrfy -nowait -nowaits -np -npages -npgno -nprocs -nptr -nr -nread -nreaders -nrec -nrecords -nrecs -nreg -nreleases -nrepeat -nrequests -nrestores -nsites -nsize -nsl -nsleep -nsleepp -ntasks -nthreads -nthrottles -ntohl -ntxns -ntxntimeouts -nuls -num -numberOfKeysRead -numdup -numdups -numext -numlocks -nval -nvotes -nwrite -nwritep -nwriters -nwrites -nwrotep -nxt -obj -objc -objectArrayToString -objectToData -objectToKey -objectToValue -objp -objs -objv -octets -offdup -offp -offpage -offsetp -oflags -ohash -ok -oldValue -oldValues -olddata -olditem -oldname -oldrec -oldsize -oli -omniti -omode -ondisk -onefile -onint -onoff -onoffp -onpage -op -opd -openCursors -openFlags -openfd -openfiles -openhandle -opensub -opflags -opmods -ops -optarg -opterr -optind -optopt -optreset -orderchkonly -org -orig -originfo -origline -origmap -origp -os -osynch -outBuffer -outbuf -outdatedp -outfd -outfile -outfp -outlen -outstr -ovfl -ovflok -ovflpage -ovflpoint -ovflsize -ovput -ovref -padDecrypt -padEncrypt -padp -pageimage -pageinfo -pagelayout -pagelist -pagelsn -pageno -pagep -pagereq -pagesize -pagesizep -pagesizes -pagespace -pagetype -pagezero -pagf -pagfno -panic'd -panic'ing -paniccall -panicstate -params -parentid -parseLong -partsize -passwd -passwds -paulo -pct -pdbp -pdf -penv -perf -perfdb -perftool -perms -pflag -pfx -pg -pgaddr -pgcookie -pgdbp -pgdbt -pgerr -pget -pgfmt -pgfree -pggap -pgin -pginfo -pgip -pgmax -pgno -pgnoadd -pgnoaddr -pgnop -pgnos -pgnum -pgout -pgread -pgs -pgset -pgsize -pgwrite -ph -php -physdel -physwrite -pid -pids -pinref -pinsert -pitem -pk -pkey -pkey's -pkeys -pkg -placeholder -plist -pmap -pn -poff -portmapper -pos -postdestroy -postlog -postlogmeta -postopen -postsync -pp -ppc -pr -prR -prdb -prdbt -pre -pread -prec -predestroy -preopen -preparse -preplist -preprocess -preprocessed -preread -prereq -presorted -prev -prev's -prevfile -previousIndex -prevlsn -prevnodup -prflags -prfooter -prheader -pri -primaryKey -primaryKeyData -primaryKeyFormat -primaryKeyInput -primaryKeyThang -primget -printf -printlock -printlog -priorityp -prnpage -proc -procs -proff -progname -progpath -protos -prpage -prqueue -prtree -pseudorandom -psize -psplit -pstat -ptail -pthread -pthreads -ptr -ptrdiff -ptype -putAll -putall -putchar -putitem -putobj -putop -putpageinfo -putr -pv -pwrite -qV -qam -qammeta -qmeta -qmpf -qnx -qp -qs -qtest -quV -queuestart -quicksort -quotedStr -rRV -rRs -rV -rand -randtbl -rbtree -rcon -rcuradj -rcursor -rcvd -rdata -rdbc -rdbenv -rdonly -rdump -reacquired -reacquires -readBoolean -readByte -readBytes -readChar -readChars -readDouble -readFloat -readInt -readLong -readShort -readString -readUnsignedByte -readUnsignedInt -readUnsignedShort -readd -readdir -readn -readonly -readratio -realloc -realloc'd -realloc'ed -reallocing -rec -recfill -reclen -reclength -recno -recnop -recnos -recnum -recnums -recops -record's -recordNumber -recordlen -recs -rectype -recvd -refcnt -refcount -refcounting -reffed -regids -reginfo -regionmax -reglocks -regmutex -regmutexes -regop -regsize -relen -relink -rem -remevent -remfile -remfirst -remlock -removeAll -remrem -renum -renv -rep's -repdb -repl -replication's -replpair -replyp -reppg -repquote -repsite -reput -reputpair -req -resizep -resync -retPrimaryKey -retValue -retbuf -retcopy -retcount -rethrown -reties -retp -retsp -retval -reverseconcat -reversedata -revsplitoff -rf -rfd -rfp -rget -rheader -ri -rijmen -rijndael -rijndaelDecrypt -rijndaelDecryptRound -rijndaelEncrypt -rijndaelEncryptRound -rijndaelKeySetupDec -rijndaelKeySetupEnc -ritem -riw -rk -rkey -rlen -rlsn -rlsnp -rm -rmdir -rmdir's -rmid -rmw -ro -roff -rollforward -rootent -rootlsn -rp -rp's -rpath -rpc -rpcgen -rpcid -rpcserver -rprint -rptr -rq -rr -rrecno -rs -rsearch -rskey -rsplit -rtree -rtxn -rundb -runlog -rusage -rw -rwrw -rwrwrw -sS -sV -sVv -salloc -salvager's -savetime -sched -scount -sdb -sdbp -seckey -secon -secondary's -secondaryKeyCreate -secs -secsp -sectorsize -segdata -segid -sema -semid -seminfo -semun -sendpages -sendproc -sep -seq -seqnum -seqp -serialobj -servlet -setAppDispatch -setAppendRecno -setBtreeCompare -setBtreePrefix -setCacheSize -setData -setDuplicatelicateCompare -setEncrypted -setErrorHandler -setErrorPrefix -setFeedback -setFeedbackHandler -setFlags -setHash -setLock -setMode -setObj -setObject -setOffset -setOp -setPanicHandler -setPartialLength -setPartialOffset -setRecno -setRecordNumber -setReplicationLimit -setReplicationTransport -setSize -setTimeout -setUserBufferLength -setValue -setflags -setid -setlsn -settimeout -setto -setval -sexing -sgenrand -sh -shalloc -shalloc'ed -shalloced -sharedb -shareenv -shash -shm -shmat -shmctl -shmdt -shmem -shmget -shmname -shortread -shownull -shqueue -shr -shreg -siginit -signo -sigresend -singleKey -sizeAdded -sizeNeeded -sizefix -sizeof -sj -skiplist -skiplists -skodonj -sl -sle -sleepycat -slh -slumber'd -smap -smax -snapshotting -sniglet -snprintf -sortdups -sourcep -sp -sparc -spawnl -spinlock -spinlocks -spinsp -splitdata -splitmeta -splitp -sprintf -srand -srandom -src -sread -ss -sscanf -sse -sshift -ssize -sslll -sss -stat -stati -stats -stbar -std -stddev -stderr -stdfd -stdin -stdint -stdlib -stdmode -stdout -stkgrow -stkrel -stl -storedCollection -storedIterator -storedList -storedMap -storedSet -storedSortedMap -storedSortedSet -stqe -stqh -str -strcasecmp -strcmp -strdup -strdup'ed -strerror -stringToBytes -stringp -strlen -strncasecmp -strncmp -strsep -strtol -strtoul -struct -structs -structure's -sts -stwcx -subList -subMap -subSet -subcases -subclassed -subdatabase -subdatabase's -subdb -subdbname -subdbpg -subdbs -subdistribution -subdistributions -submap -subname -subpackages -subtransaction -subtransactions -sullivan -sv -svc -sw -swigCPtr -swpb -sync'd -sync'ed -synced -syncs -sysattach -sysconf -sysdetach -sz -t's -tV -tVZ -tableent -tablesize -tailMap -tailSet -tailq -tas -taskLock -tc -tcl -tcp -td -tdata -tearDown -terra -testName -testcopy -testdata -testdestdir -testdigits -testdocopy -tffsp -tfsp -thang -theVendor -thies -thr -thread's -threadID -threadedness -tid -tids -tiebreaker -tiebreaking -timeoutp -timestamp -timeval -timout -timouts -tlen -tm -tmap -tmax -tmp -tmpdir -tmpmap -tmpname -tmutex -tnum -toArray -toBuf -toHexString -toInclusive -toIndex -toKey -toList -toMapEntry -toString -toValue -toched -todo -toobig -tp -tpcb -tput -tqe -tqh -tr -transport's -treeorder -tregion -trinomials -trunc -truncdata -ts -tsl -tstart -ttpcbddlk -ttpcbi -ttpcbr -ttype -tv -tx -txn -txnal -txnapp -txnarray -txnid -txnidcl -txnids -txnip -txnlist -txnp -txns -txntimeout -txt -ua -ubell -ud -udbt -ufid -ufree -uid -uintmax -uintptr -ul -ulen -ulens -ulinks -umalloc -uncorrect -undef -undeleting -undo'ing -undodup -undosplit -uni -unicode -unindexed -uniq -unistd -unix -unmap -unmapfile -unmark -unmarshalData -unmarshalled -unpinned -unpinning -unref -unregistry -upd -updateDatabaseEntry -updateDbt -updateckp -upg -upi -urealloc -useCurrentKey -usePrimaryKey -useValue -usec -usecs -usecsp -usermem -usr -usrAppInit -util -vVxXZ -vVxXyZ -vZ -va -val -value's -valueBinding -valueData -valueEntityBinding -valueFormat -valueInput -valueInputOutput -valueOf -valueOutput -var -variadic -vars -vdp -vdp's -vec -vendorDB -vendordb -ver -verbage -vflag -vica -view's -vrfy -vrfyutil -vsnprintf -vsprintf -vtruncate -vw -vx -vxmutex -vxtmp -vxtpcb -vxworks -wDay -wHour -wMinute -wMonth -wSecond -wYear -waitl -waitlist -waitsfor -walkdupint -walkpages -walkqueue -wb -wc -wcount -weblogic -weblogic's -webquill -windsh -winnt -wmask -wnt -wordlist -workcurs -writeAllowed -writeBoolean -writeByte -writeBytes -writeChar -writeChars -writeCursor -writeDouble -writeFloat -writeInt -writeLong -writeShort -writeString -writeUnsignedByte -writeUnsignedInt -writeUnsignedShort -writeable -writeback -writelock -writelocks -wrlock -wrnosync -wsize -wt -wthread -xa -xact -xalinks -xchg -xchgb -xdr -xid -xids -xml -xor -xorl -xxx -xyz -yieldcpu -zend -zero'd -zeroeth -zerofill -zipcode -zl diff --git a/storage/bdb/test/scr011/chk.tags b/storage/bdb/test/scr011/chk.tags deleted file mode 100644 index f1d680ac7d7..00000000000 --- a/storage/bdb/test/scr011/chk.tags +++ /dev/null @@ -1,46 +0,0 @@ -#!/bin/sh - -# -# $Id: chk.tags,v 1.14 2004/10/07 20:30:32 bostic Exp $ -# -# Check to make sure we don't need any more symbolic links to tags files. - -d=../.. - -# Test must be run from the top-level directory, not from a test directory. -[ -f $d/LICENSE ] || { - echo 'FAIL: cannot find source distribution directory.' - exit 1 -} - -t1=__1 -t2=__2 - -(cd $d && ls -F | egrep / | sort | - sed -e 's/\///' \ - -e '/^CVS$/d' \ - -e '/^build_vxworks$/d' \ - -e '/^build_win32$/d' \ - -e '/^build_win64$/d' \ - -e '/^docs$/d' \ - -e '/^docs_book$/d' \ - -e '/^docs_src$/d' \ - -e '/^examples_java$/d' \ - -e '/^java$/d' \ - -e '/^mod_db4$/d' \ - -e '/^perl$/d' \ - -e '/^php_db4$/d' \ - -e '/^test$/d' \ - -e '/^test_cxx$/d' \ - -e '/^test_purify$/d' \ - -e '/^test_server$/d' \ - -e '/^test_thread$/d' \ - -e '/^test_vxworks$/d') > $t1 - -(cd $d && ls */tags | sed 's/\/tags$//' | sort) > $t2 -if diff $t1 $t2 > /dev/null; then - exit 0 -else - echo "<<< source tree >>> tags files" - diff $t1 $t2 - exit 1 -fi diff --git a/storage/bdb/test/scr012/chk.vx_code b/storage/bdb/test/scr012/chk.vx_code deleted file mode 100644 index 8b7916053ae..00000000000 --- a/storage/bdb/test/scr012/chk.vx_code +++ /dev/null @@ -1,61 +0,0 @@ -#!/bin/sh - -# -# $Id: chk.vx_code,v 1.7 2004/09/16 17:21:11 bostic Exp $ -# -# Check to make sure the auto-generated utility code in the VxWorks build -# directory compiles. - -d=../.. - -[ -f $d/LICENSE ] || { - echo 'FAIL: cannot find source distribution directory.' - exit 1 -} -[ -f ../libdb.a ] || (cd .. && make libdb.a) || { - echo 'FAIL: unable to find or build libdb.a' - exit 1 -} - -rm -f t.c t1.c t2.c - -F="$d/clib/getopt.c $d/common/util_arg.c $d/common/util_cache.c - $d/common/util_log.c $d/common/util_sig.c $d/*/*_autop.c" - -header() -{ - echo "int" - echo "main(int argc, char *argv[])" - echo "{return ($1(argv[1]));}" -} - -(echo "int" - echo "main(int argc, char *argv[])" - echo "{" - echo "int i;") > t1.c - -for i in db_archive db_checkpoint db_deadlock db_dump db_load \ - db_printlog db_recover db_stat db_upgrade db_verify dbdemo; do - echo " compiling build_vxworks/$i" - (cat $d/build_vxworks/$i/$i.c; header $i) > t.c - if cc -Wall -I.. -I$d t.c $F ../libdb.a -o t; then - : - else - echo "FAIL: unable to compile $i" - exit 1 - fi - - cat $d/build_vxworks/$i/$i.c >> t2.c - echo "i = $i(argv[1]);" >> t1.c -done - -(cat t2.c t1.c; echo "return (0); }") > t.c - -echo " compiling build_vxworks utility composite" -if cc -Dlint -Wall -I.. -I$d t.c $F ../libdb.a -o t; then - : -else - echo "FAIL: unable to compile utility composite" - exit 1 -fi - -exit 0 diff --git a/storage/bdb/test/scr013/chk.stats b/storage/bdb/test/scr013/chk.stats deleted file mode 100644 index 7ee71392e1c..00000000000 --- a/storage/bdb/test/scr013/chk.stats +++ /dev/null @@ -1,125 +0,0 @@ -#!/bin/sh - -# -# $Id: chk.stats,v 1.9 2004/11/08 14:49:42 bostic Exp $ -# -# Check to make sure all of the stat structure members are included in -# all of the possible formats. - -# Top-level directory. -d=../.. -docs=../../../db_docs - -# Path names are from a top-level directory. -[ -f $d/README ] || { - echo 'FAIL: cannot find source distribution directory.' - exit 1 -} - -exitv=0 -t=__tmp - -# Extract the field names for a structure from the db.h file. -inc_fields() -{ - sed -e "/struct $1 {/,/^};$/p" \ - -e d < $d/dbinc/db.in | - sed -e 1d \ - -e '$d' \ - -e '/;/!d' \ - -e 's/;.*//' \ - -e 's/^[ ].*[ \*]//' -} - -cat << END_OF_IGNORE > IGNORE -bt_maxkey -bt_metaflags -hash_metaflags -qs_metaflags -qs_ndata -END_OF_IGNORE - -# Check to make sure the elements of a structure from db.h appear in -# the other files. -inc() -{ - for i in `inc_fields $1`; do - if egrep -w $i IGNORE > /dev/null; then - echo " $1: ignoring $i" - continue - fi - for j in $2; do - if egrep -w $i $j > /dev/null; then - :; - else - echo " $1: $i not found in $j." - exitv=1 - fi - done - done -} - -inc "__db_bt_stat" "$d/tcl/tcl_db.c $d/btree/bt_stat.c $docs/db/db_stat.so" -inc "__db_h_stat" "$d/tcl/tcl_db.c $d/hash/hash_stat.c $docs/db/db_stat.so" -inc __db_lock_stat \ - "$d/tcl/tcl_lock.c $d/lock/lock_stat.c $docs/lock/lock_stat.so" -inc __db_log_stat "$d/tcl/tcl_log.c $d/log/log_stat.c $docs/log/log_stat.so" -inc __db_mpool_fstat \ - "$d/tcl/tcl_mp.c $d/mp/mp_stat.c $docs/memp/memp_stat.so" -inc __db_mpool_stat \ - "$d/tcl/tcl_mp.c $d/mp/mp_stat.c $docs/memp/memp_stat.so" -inc "__db_qam_stat" \ - "$d/tcl/tcl_db.c $d/qam/qam_stat.c $docs/db/db_stat.so" -inc __db_rep_stat \ - "$d/tcl/tcl_rep.c $d/rep/rep_stat.c $docs/rep/rep_stat.so" -inc __db_seq_stat \ - "$d/tcl/tcl_seq.c $d/sequence/seq_stat.c $docs/seq/seq_stat.so" -inc __db_txn_stat \ - "$d/tcl/tcl_txn.c $d/txn/txn_stat.c $docs/txn/txn_stat.so" - -# Check to make sure the elements from a man page appears in db.in. -man() -{ - for i in `cat $t`; do - if egrep -w $i IGNORE > /dev/null; then - echo " $1: ignoring $i" - continue - fi - if egrep -w $i $d/dbinc/db.in > /dev/null; then - :; - else - echo " $1: $i not found in db.h." - exitv=1 - fi - done -} - -sed -e '/m4_field(/!d' \ - -e 's/.*m4_field[^,]*,[ ]*\([^,]*\).*/\1/' < $docs/db/db_stat.so > $t -man "checking db_stat.so against db.h" - -sed -e '/m4_field(/!d' \ - -e 's/.*m4_field[^,]*,[ ]*\([^,]*\).*/\1/' < $docs/lock/lock_stat.so > $t -man "checking lock_stat.so against db.h" - -sed -e '/m4_field(/!d' \ - -e 's/.*m4_field[^,]*,[ ]*\([^,]*\).*/\1/' < $docs/log/log_stat.so > $t -man "checking log_stat.so against db.h" - -sed -e '/m4_field(/!d' \ - -e 's/.*m4_field[^,]*,[ ]*\([^,]*\).*/\1/' < $docs/memp/memp_stat.so > $t -man "checking memp_stat.so against db.h" - -sed -e '/m4_field(/!d' \ - -e 's/.*m4_field[^,]*,[ ]*\([^,]*\).*/\1/' < $docs/rep/rep_stat.so > $t -man "checking rep_stat.so against db.h" - -sed -e '/m4_field(/!d' \ - -e 's/.*m4_field[^,]*,[ ]*\([^,]*\).*/\1/' < $docs/seq/seq_stat.so > $t -man "checking seq_stat.so against db.h" - -sed -e '/m4_field(/!d' \ - -e 's/.*m4_field[^,]*,[ ]*\([^,]*\).*/\1/' \ - -e 's/__LB__.*//' < $docs/txn/txn_stat.so > $t -man "checking txn_stat.so against db.h" - -exit $exitv diff --git a/storage/bdb/test/scr014/chk.err b/storage/bdb/test/scr014/chk.err deleted file mode 100644 index 72b4a62719f..00000000000 --- a/storage/bdb/test/scr014/chk.err +++ /dev/null @@ -1,34 +0,0 @@ -#!/bin/sh - -# -# $Id: chk.err,v 1.3 2002/03/27 04:33:05 bostic Exp $ -# -# Check to make sure all of the error values have corresponding error -# message strings in db_strerror(). - -# Top-level directory. -d=../.. - -# Path names are from a top-level directory. -[ -f $d/README ] || { - echo 'FAIL: cannot find source distribution directory.' - exit 1 -} - -t1=__tmp1 -t2=__tmp2 - -egrep -- "define.*DB_.*-309" $d/dbinc/db.in | awk '{print $2}' > $t1 -sed -e '/^db_strerror/,/^}/{' \ - -e '/ case DB_/{' \ - -e 's/:.*//' \ - -e 's/.* //' \ - -e p \ - -e '}' \ - -e '}' \ - -e d \ - < $d/common/db_err.c > $t2 - -cmp $t1 $t2 > /dev/null || -(echo "<<< db.h >>> db_strerror" && diff $t1 $t2 && exit 1) - -exit 0 diff --git a/storage/bdb/test/scr015/README b/storage/bdb/test/scr015/README deleted file mode 100644 index 75a356eea06..00000000000 --- a/storage/bdb/test/scr015/README +++ /dev/null @@ -1,36 +0,0 @@ -# $Id: README,v 1.1 2001/05/31 23:09:11 dda Exp $ - -Use the scripts testall or testone to run all, or just one of the C++ -tests. You must be in this directory to run them. For example, - - $ export LIBS="-L/usr/include/BerkeleyDB/lib" - $ export CXXFLAGS="-I/usr/include/BerkeleyDB/include" - $ export LD_LIBRARY_PATH="/usr/include/BerkeleyDB/lib" - $ ./testone TestAppendRecno - $ ./testall - -The scripts will use c++ in your path. Set environment variables $CXX -to override this. It will also honor any $CXXFLAGS and $LIBS -variables that are set, except that -c are silently removed from -$CXXFLAGS (since we do the compilation in one step). - -To run successfully, you will probably need to set $LD_LIBRARY_PATH -to be the directory containing libdb_cxx-X.Y.so - -As an alternative, use the --prefix=

option, a la configure -to set the top of the BerkeleyDB install directory. This forces -the proper options to be added to $LIBS, $CXXFLAGS $LD_LIBRARY_PATH. -For example, - - $ ./testone --prefix=/usr/include/BerkeleyDB TestAppendRecno - $ ./testall --prefix=/usr/include/BerkeleyDB - -The test framework is pretty simple. Any .cpp file in this -directory that is not mentioned in the 'ignore' file represents a -test. If the test is not compiled successfully, the compiler output -is left in .compileout . Otherwise, the java program is run in -a clean subdirectory using as input .testin, or if that doesn't -exist, /dev/null. Output and error from the test run are put into -.out, .err . If .testout, .testerr exist, -they are used as reference files and any differences are reported. -If either of the reference files does not exist, /dev/null is used. diff --git a/storage/bdb/test/scr015/TestConstruct01.cpp b/storage/bdb/test/scr015/TestConstruct01.cpp deleted file mode 100644 index 0b0495ce9ce..00000000000 --- a/storage/bdb/test/scr015/TestConstruct01.cpp +++ /dev/null @@ -1,323 +0,0 @@ -/*- - * See the file LICENSE for redistribution information. - * - * Copyright (c) 2000-2004 - * Sleepycat Software. All rights reserved. - * - * $Id: TestConstruct01.cpp,v 1.8 2004/01/28 03:36:33 bostic Exp $ - */ - -/* - * Do some regression tests for constructors. - * Run normally (without arguments) it is a simple regression test. - * Run with a numeric argument, it repeats the regression a number - * of times, to try to determine if there are memory leaks. - */ - -#ifndef NO_SYSTEM_INCLUDES -#include - -#include -#include -#include -#include -#ifndef _MSC_VER -#include -#endif -#endif - -#include -#include - -#define ERR(a) \ - do { \ - cout << "FAIL: " << (a) << "\n"; sysexit(1); \ - } while (0) - -#define ERR2(a1,a2) \ - do { \ - cout << "FAIL: " << (a1) << ": " << (a2) << "\n"; sysexit(1); \ - } while (0) - -#define ERR3(a1,a2,a3) \ - do { \ - cout << "FAIL: " << (a1) << ": " << (a2) << ": " << (a3) << "\n"; sysexit(1); \ - } while (0) - -#define CHK(a) \ - do { \ - int _ret; \ - if ((_ret = (a)) != 0) { \ - ERR3("DB function " #a " has bad return", _ret, DbEnv::strerror(_ret)); \ - } \ - } while (0) - -#ifdef VERBOSE -#define DEBUGOUT(a) cout << a << "\n" -#else -#define DEBUGOUT(a) -#endif - -#define CONSTRUCT01_DBNAME "construct01.db" -#define CONSTRUCT01_DBDIR "." -#define CONSTRUCT01_DBFULLPATH (CONSTRUCT01_DBDIR "/" CONSTRUCT01_DBNAME) - -int itemcount; // count the number of items in the database - -// A good place to put a breakpoint... -// -void sysexit(int status) -{ - exit(status); -} - -void check_file_removed(const char *name, int fatal) -{ - unlink(name); -#if 0 - if (access(name, 0) == 0) { - if (fatal) - cout << "FAIL: "; - cout << "File \"" << name << "\" still exists after run\n"; - if (fatal) - sysexit(1); - } -#endif -} - -// Check that key/data for 0 - count-1 are already present, -// and write a key/data for count. The key and data are -// both "0123...N" where N == count-1. -// -// For some reason on Windows, we need to open using the full pathname -// of the file when there is no environment, thus the 'has_env' -// variable. -// -void rundb(Db *db, int count, int has_env) -{ - const char *name; - - if (has_env) - name = CONSTRUCT01_DBNAME; - else - name = CONSTRUCT01_DBFULLPATH; - - db->set_error_stream(&cerr); - - // We don't really care about the pagesize, but we do want - // to make sure adjusting Db specific variables works before - // opening the db. - // - CHK(db->set_pagesize(1024)); - CHK(db->open(NULL, name, NULL, DB_BTREE, count ? 0 : DB_CREATE, 0664)); - - // The bit map of keys we've seen - long bitmap = 0; - - // The bit map of keys we expect to see - long expected = (1 << (count+1)) - 1; - - char outbuf[10]; - int i; - for (i=0; iput(0, &key, &data, DB_NOOVERWRITE)); - - // Acquire a cursor for the table. - Dbc *dbcp; - CHK(db->cursor(NULL, &dbcp, 0)); - - // Walk through the table, checking - Dbt readkey; - Dbt readdata; - while (dbcp->get(&readkey, &readdata, DB_NEXT) == 0) { - char *key_string = (char *)readkey.get_data(); - char *data_string = (char *)readdata.get_data(); - DEBUGOUT("Got: " << key_string << ": " << data_string); - int len = strlen(key_string); - long bit = (1 << len); - if (len > count) { - ERR("reread length is bad"); - } - else if (strcmp(data_string, key_string) != 0) { - ERR("key/data don't match"); - } - else if ((bitmap & bit) != 0) { - ERR("key already seen"); - } - else if ((expected & bit) == 0) { - ERR("key was not expected"); - } - else { - bitmap |= bit; - expected &= ~(bit); - for (i=0; iclose()); - CHK(db->close(0)); -} - -void t1(int except_flag) -{ - cout << " Running test 1:\n"; - Db db(0, except_flag); - rundb(&db, itemcount++, 0); - cout << " finished.\n"; -} - -void t2(int except_flag) -{ - cout << " Running test 2:\n"; - Db db(0, except_flag); - rundb(&db, itemcount++, 0); - cout << " finished.\n"; -} - -void t3(int except_flag) -{ - cout << " Running test 3:\n"; - Db db(0, except_flag); - rundb(&db, itemcount++, 0); - cout << " finished.\n"; -} - -void t4(int except_flag) -{ - cout << " Running test 4:\n"; - DbEnv env(except_flag); - CHK(env.open(CONSTRUCT01_DBDIR, DB_CREATE | DB_INIT_MPOOL, 0)); - Db db(&env, 0); - CHK(db.close(0)); - CHK(env.close(0)); - cout << " finished.\n"; -} - -void t5(int except_flag) -{ - cout << " Running test 5:\n"; - DbEnv env(except_flag); - CHK(env.open(CONSTRUCT01_DBDIR, DB_CREATE | DB_INIT_MPOOL, 0)); - Db db(&env, 0); - rundb(&db, itemcount++, 1); - // Note we cannot reuse the old Db! - Db anotherdb(&env, 0); - - anotherdb.set_errpfx("test5"); - rundb(&anotherdb, itemcount++, 1); - CHK(env.close(0)); - cout << " finished.\n"; -} - -void t6(int except_flag) -{ - cout << " Running test 6:\n"; - - /* From user [#2939] */ - int err; - - DbEnv* penv = new DbEnv(DB_CXX_NO_EXCEPTIONS); - penv->set_cachesize(0, 32 * 1024, 0); - penv->open(CONSTRUCT01_DBDIR, DB_CREATE | DB_PRIVATE | DB_INIT_MPOOL, 0); - - //LEAK: remove this block and leak disappears - Db* pdb = new Db(penv,0); - if ((err = pdb->close(0)) != 0) { - fprintf(stderr, "Error closing Db: %s\n", db_strerror(err)); - } - delete pdb; - //LEAK: remove this block and leak disappears - - if ((err = penv->close(0)) != 0) { - fprintf(stderr, "Error closing DbEnv: %s\n", db_strerror(err)); - } - delete penv; - - cout << " finished.\n"; -} - -// remove any existing environment or database -void removeall() -{ - { - DbEnv tmpenv(DB_CXX_NO_EXCEPTIONS); - (void)tmpenv.remove(CONSTRUCT01_DBDIR, DB_FORCE); - } - - check_file_removed(CONSTRUCT01_DBFULLPATH, 1); - for (int i=0; i<8; i++) { - char buf[20]; - sprintf(buf, "__db.00%d", i); - check_file_removed(buf, 1); - } -} - -int doall(int except_flag) -{ - itemcount = 0; - try { - // before and after the run, removing any - // old environment/database. - // - removeall(); - t1(except_flag); - t2(except_flag); - t3(except_flag); - t4(except_flag); - t5(except_flag); - t6(except_flag); - - removeall(); - return 0; - } - catch (DbException &dbe) { - ERR2("EXCEPTION RECEIVED", dbe.what()); - } - return 1; -} - -int main(int argc, char *argv[]) -{ - int iterations = 1; - if (argc > 1) { - iterations = atoi(argv[1]); - if (iterations < 0) { - ERR("Usage: construct01 count"); - } - } - for (int i=0; i -#include - -int main(int argc, char *argv[]) -{ - DbException *dbe = new DbException("something"); - DbMemoryException *dbme = new DbMemoryException("anything"); - - dbe = dbme; -} - diff --git a/storage/bdb/test/scr015/TestGetSetMethods.cpp b/storage/bdb/test/scr015/TestGetSetMethods.cpp deleted file mode 100644 index 1d896766db3..00000000000 --- a/storage/bdb/test/scr015/TestGetSetMethods.cpp +++ /dev/null @@ -1,91 +0,0 @@ -/*- - * See the file LICENSE for redistribution information. - * - * Copyright (c) 2000-2004 - * Sleepycat Software. All rights reserved. - * - * $Id: TestGetSetMethods.cpp,v 1.6 2004/01/28 03:36:33 bostic Exp $ - */ - -/* - * Do some regression tests for simple get/set access methods - * on DbEnv, DbTxn, Db. We don't currently test that they have - * the desired effect, only that they operate and return correctly. - */ - -#include -#include - -int main(int argc, char *argv[]) -{ - try { - DbEnv *dbenv = new DbEnv(0); - DbTxn *dbtxn; - u_int8_t conflicts[10]; - - dbenv->set_error_stream(&cerr); - dbenv->set_timeout(0x90000000, - DB_SET_LOCK_TIMEOUT); - dbenv->set_lg_bsize(0x1000); - dbenv->set_lg_dir("."); - dbenv->set_lg_max(0x10000000); - dbenv->set_lg_regionmax(0x100000); - dbenv->set_lk_conflicts(conflicts, sizeof(conflicts)); - dbenv->set_lk_detect(DB_LOCK_DEFAULT); - // exists, but is deprecated: - // dbenv->set_lk_max(0); - dbenv->set_lk_max_lockers(100); - dbenv->set_lk_max_locks(10); - dbenv->set_lk_max_objects(1000); - dbenv->set_mp_mmapsize(0x10000); - dbenv->set_tas_spins(1000); - - // Need to open the environment so we - // can get a transaction. - // - dbenv->open(".", DB_CREATE | DB_INIT_TXN | - DB_INIT_LOCK | DB_INIT_LOG | - DB_INIT_MPOOL, - 0644); - - dbenv->txn_begin(NULL, &dbtxn, DB_TXN_NOWAIT); - dbtxn->set_timeout(0xA0000000, DB_SET_TXN_TIMEOUT); - dbtxn->abort(); - - dbenv->close(0); - - // We get a db, one for each type. - // That's because once we call (for instance) - // set_bt_maxkey, DB 'knows' that this is a - // Btree Db, and it cannot be used to try Hash - // or Recno functions. - // - Db *db_bt = new Db(NULL, 0); - db_bt->set_bt_maxkey(10000); - db_bt->set_bt_minkey(100); - db_bt->set_cachesize(0, 0x100000, 0); - db_bt->close(0); - - Db *db_h = new Db(NULL, 0); - db_h->set_h_ffactor(0x10); - db_h->set_h_nelem(100); - db_h->set_lorder(0); - db_h->set_pagesize(0x10000); - db_h->close(0); - - Db *db_re = new Db(NULL, 0); - db_re->set_re_delim('@'); - db_re->set_re_pad(10); - db_re->set_re_source("re.in"); - db_re->close(0); - - Db *db_q = new Db(NULL, 0); - db_q->set_q_extentsize(200); - db_q->close(0); - - } - catch (DbException &dbe) { - cerr << "Db Exception: " << dbe.what() << "\n"; - } - return 0; -} diff --git a/storage/bdb/test/scr015/TestKeyRange.cpp b/storage/bdb/test/scr015/TestKeyRange.cpp deleted file mode 100644 index d875cb20b81..00000000000 --- a/storage/bdb/test/scr015/TestKeyRange.cpp +++ /dev/null @@ -1,172 +0,0 @@ -/*- - * See the file LICENSE for redistribution information. - * - * Copyright (c) 1997-2004 - * Sleepycat Software. All rights reserved. - * - * $Id: TestKeyRange.cpp,v 1.6 2004/01/28 03:36:33 bostic Exp $ - */ - -/* - * NOTE: AccessExample changed to test Db.key_range. We made a global - * change of /AccessExample/TestKeyRange/, the only other changes are - * marked with comments that are notated as 'ADDED'. - */ - -#ifndef NO_SYSTEM_INCLUDES -#include - -#include -#include -#include -#include -#ifndef _MSC_VER -#include -#endif -#endif - -#include -#include - -class TestKeyRange -{ -public: - TestKeyRange(); - void run(); - -private: - static const char FileName[]; - - // no need for copy and assignment - TestKeyRange(const TestKeyRange &); - void operator = (const TestKeyRange &); -}; - -static void usage(); // forward - -int main(int argc, char *argv[]) -{ - if (argc > 1) { - usage(); - } - - // Use a try block just to report any errors. - // An alternate approach to using exceptions is to - // use error models (see DbEnv::set_error_model()) so - // that error codes are returned for all Berkeley DB methods. - // - try { - TestKeyRange app; - app.run(); - return 0; - } - catch (DbException &dbe) { - cerr << "TestKeyRange: " << dbe.what() << "\n"; - return 1; - } -} - -static void usage() -{ - cerr << "usage: TestKeyRange\n"; - exit(1); -} - -const char TestKeyRange::FileName[] = "access.db"; - -TestKeyRange::TestKeyRange() -{ -} - -void TestKeyRange::run() -{ - // Remove the previous database. - (void)unlink(FileName); - - // Create the database object. - // There is no environment for this simple example. - Db db(0, 0); - - db.set_error_stream(&cerr); - db.set_errpfx("TestKeyRange"); - db.set_pagesize(1024); /* Page size: 1K. */ - db.set_cachesize(0, 32 * 1024, 0); - db.open(NULL, FileName, NULL, DB_BTREE, DB_CREATE, 0664); - - // - // Insert records into the database, where the key is the user - // input and the data is the user input in reverse order. - // - char buf[1024]; - char rbuf[1024]; - char *t; - char *p; - int ret; - int len; - Dbt *firstkey = NULL; - char firstbuf[1024]; - - for (;;) { - cout << "input>"; - cout.flush(); - - cin.getline(buf, sizeof(buf)); - if (cin.eof()) - break; - - if ((len = strlen(buf)) <= 0) - continue; - for (t = rbuf, p = buf + (len - 1); p >= buf;) - *t++ = *p--; - *t++ = '\0'; - - Dbt key(buf, len + 1); - Dbt data(rbuf, len + 1); - if (firstkey == NULL) { - strcpy(firstbuf, buf); - firstkey = new Dbt(firstbuf, len + 1); - } - - ret = db.put(0, &key, &data, DB_NOOVERWRITE); - if (ret == DB_KEYEXIST) { - cout << "Key " << buf << " already exists.\n"; - } - cout << "\n"; - } - - // We put a try block around this section of code - // to ensure that our database is properly closed - // in the event of an error. - // - try { - // Acquire a cursor for the table. - Dbc *dbcp; - db.cursor(NULL, &dbcp, 0); - - /*ADDED...*/ - DB_KEY_RANGE range; - memset(&range, 0, sizeof(range)); - - db.key_range(NULL, firstkey, &range, 0); - printf("less: %f\n", range.less); - printf("equal: %f\n", range.equal); - printf("greater: %f\n", range.greater); - /*end ADDED*/ - - Dbt key; - Dbt data; - - // Walk through the table, printing the key/data pairs. - while (dbcp->get(&key, &data, DB_NEXT) == 0) { - char *key_string = (char *)key.get_data(); - char *data_string = (char *)data.get_data(); - cout << key_string << " : " << data_string << "\n"; - } - dbcp->close(); - } - catch (DbException &dbe) { - cerr << "TestKeyRange: " << dbe.what() << "\n"; - } - - db.close(0); -} diff --git a/storage/bdb/test/scr015/TestKeyRange.testin b/storage/bdb/test/scr015/TestKeyRange.testin deleted file mode 100644 index a2b6bd74e7b..00000000000 --- a/storage/bdb/test/scr015/TestKeyRange.testin +++ /dev/null @@ -1,8 +0,0 @@ -first line is alphabetically somewhere in the middle. -Blah blah -let's have exactly eight lines of input. -stuff -more stuff -and even more stuff -lastly -but not leastly. diff --git a/storage/bdb/test/scr015/TestKeyRange.testout b/storage/bdb/test/scr015/TestKeyRange.testout deleted file mode 100644 index 25b2e1a835c..00000000000 --- a/storage/bdb/test/scr015/TestKeyRange.testout +++ /dev/null @@ -1,19 +0,0 @@ -input> -input> -input> -input> -input> -input> -input> -input> -input>less: 0.375000 -equal: 0.125000 -greater: 0.500000 -Blah blah : halb halB -and even more stuff : ffuts erom neve dna -but not leastly. : .yltsael ton tub -first line is alphabetically somewhere in the middle. : .elddim eht ni erehwemos yllacitebahpla si enil tsrif -lastly : yltsal -let's have exactly eight lines of input. : .tupni fo senil thgie yltcaxe evah s'tel -more stuff : ffuts erom -stuff : ffuts diff --git a/storage/bdb/test/scr015/TestLogc.cpp b/storage/bdb/test/scr015/TestLogc.cpp deleted file mode 100644 index 636db4530c0..00000000000 --- a/storage/bdb/test/scr015/TestLogc.cpp +++ /dev/null @@ -1,101 +0,0 @@ -/*- - * See the file LICENSE for redistribution information. - * - * Copyright (c) 2000-2004 - * Sleepycat Software. All rights reserved. - * - * $Id: TestLogc.cpp,v 1.8 2004/01/28 03:36:33 bostic Exp $ - */ - -/* - * A basic regression test for the Logc class. - */ - -#include -#include - -static void show_dbt(ostream &os, Dbt *dbt) -{ - int i; - int size = dbt->get_size(); - unsigned char *data = (unsigned char *)dbt->get_data(); - - os << "size: " << size << " data: "; - for (i=0; iopen(".", DB_CREATE | DB_INIT_LOG | DB_INIT_MPOOL, 0); - - // Do some database activity to get something into the log. - Db *db1 = new Db(env, 0); - db1->open(NULL, "first.db", NULL, DB_BTREE, DB_CREATE, 0); - Dbt *key = new Dbt((char *)"a", 1); - Dbt *data = new Dbt((char *)"b", 1); - db1->put(NULL, key, data, 0); - key->set_data((char *)"c"); - data->set_data((char *)"d"); - db1->put(NULL, key, data, 0); - db1->close(0); - - Db *db2 = new Db(env, 0); - db2->open(NULL, "second.db", NULL, DB_BTREE, DB_CREATE, 0); - key->set_data((char *)"w"); - data->set_data((char *)"x"); - db2->put(NULL, key, data, 0); - key->set_data((char *)"y"); - data->set_data((char *)"z"); - db2->put(NULL, key, data, 0); - db2->close(0); - - // Now get a log cursor and walk through. - DbLogc *logc; - - env->log_cursor(&logc, 0); - int ret = 0; - DbLsn lsn; - Dbt *dbt = new Dbt(); - u_int32_t flags = DB_FIRST; - - int count = 0; - while ((ret = logc->get(&lsn, dbt, flags)) == 0) { - - // We ignore the contents of the log record, - // it's not portable. Even the exact count - // is may change when the underlying implementation - // changes, we'll just make sure at the end we saw - // 'enough'. - // - // cout << "logc.get: " << count; - // show_dbt(cout, dbt); - // cout << "\n"; - // - count++; - flags = DB_NEXT; - } - if (ret != DB_NOTFOUND) { - cerr << "*** FAIL: logc.get returned: " - << DbEnv::strerror(ret) << "\n"; - } - logc->close(0); - - // There has to be at *least* four log records, - // since we did four separate database operations. - // - if (count < 4) - cerr << "*** FAIL: not enough log records\n"; - - cout << "TestLogc done.\n"; - } - catch (DbException &dbe) { - cerr << "*** FAIL: " << dbe.what() <<"\n"; - } - return 0; -} diff --git a/storage/bdb/test/scr015/TestLogc.testout b/storage/bdb/test/scr015/TestLogc.testout deleted file mode 100644 index afac3af7eda..00000000000 --- a/storage/bdb/test/scr015/TestLogc.testout +++ /dev/null @@ -1 +0,0 @@ -TestLogc done. diff --git a/storage/bdb/test/scr015/TestSimpleAccess.cpp b/storage/bdb/test/scr015/TestSimpleAccess.cpp deleted file mode 100644 index 8415cda78d9..00000000000 --- a/storage/bdb/test/scr015/TestSimpleAccess.cpp +++ /dev/null @@ -1,67 +0,0 @@ -/*- - * See the file LICENSE for redistribution information. - * - * Copyright (c) 2000-2004 - * Sleepycat Software. All rights reserved. - * - * $Id: TestSimpleAccess.cpp,v 1.7 2004/01/28 03:36:33 bostic Exp $ - */ - -/* - * Do some regression tests for constructors. - * Run normally (without arguments) it is a simple regression test. - * Run with a numeric argument, it repeats the regression a number - * of times, to try to determine if there are memory leaks. - */ - -#include -#include - -int main(int argc, char *argv[]) -{ - try { - Db *db = new Db(NULL, 0); - db->open(NULL, "my.db", NULL, DB_BTREE, DB_CREATE, 0644); - - // populate our massive database. - // all our strings include null for convenience. - // Note we have to cast for idiomatic - // usage, since newer gcc requires it. - Dbt *keydbt = new Dbt((char *)"key", 4); - Dbt *datadbt = new Dbt((char *)"data", 5); - db->put(NULL, keydbt, datadbt, 0); - - // Now, retrieve. We could use keydbt over again, - // but that wouldn't be typical in an application. - Dbt *goodkeydbt = new Dbt((char *)"key", 4); - Dbt *badkeydbt = new Dbt((char *)"badkey", 7); - Dbt *resultdbt = new Dbt(); - resultdbt->set_flags(DB_DBT_MALLOC); - - int ret; - - if ((ret = db->get(NULL, goodkeydbt, resultdbt, 0)) != 0) { - cout << "get: " << DbEnv::strerror(ret) << "\n"; - } - else { - char *result = (char *)resultdbt->get_data(); - cout << "got data: " << result << "\n"; - } - - if ((ret = db->get(NULL, badkeydbt, resultdbt, 0)) != 0) { - // We expect this... - cout << "get using bad key: " - << DbEnv::strerror(ret) << "\n"; - } - else { - char *result = (char *)resultdbt->get_data(); - cout << "*** got data using bad key!!: " - << result << "\n"; - } - cout << "finished test\n"; - } - catch (DbException &dbe) { - cerr << "Db Exception: " << dbe.what(); - } - return 0; -} diff --git a/storage/bdb/test/scr015/TestSimpleAccess.testout b/storage/bdb/test/scr015/TestSimpleAccess.testout deleted file mode 100644 index dc88d4788e4..00000000000 --- a/storage/bdb/test/scr015/TestSimpleAccess.testout +++ /dev/null @@ -1,3 +0,0 @@ -got data: data -get using bad key: DB_NOTFOUND: No matching key/data pair found -finished test diff --git a/storage/bdb/test/scr015/TestTruncate.cpp b/storage/bdb/test/scr015/TestTruncate.cpp deleted file mode 100644 index 54ecf81c8ef..00000000000 --- a/storage/bdb/test/scr015/TestTruncate.cpp +++ /dev/null @@ -1,84 +0,0 @@ -/*- - * See the file LICENSE for redistribution information. - * - * Copyright (c) 2000-2004 - * Sleepycat Software. All rights reserved. - * - * $Id: TestTruncate.cpp,v 1.7 2004/01/28 03:36:34 bostic Exp $ - */ - -/* - * Do some regression tests for constructors. - * Run normally (without arguments) it is a simple regression test. - * Run with a numeric argument, it repeats the regression a number - * of times, to try to determine if there are memory leaks. - */ - -#include -#include - -int main(int argc, char *argv[]) -{ - try { - Db *db = new Db(NULL, 0); - db->open(NULL, "my.db", NULL, DB_BTREE, DB_CREATE, 0644); - - // populate our massive database. - // all our strings include null for convenience. - // Note we have to cast for idiomatic - // usage, since newer gcc requires it. - Dbt *keydbt = new Dbt((char*)"key", 4); - Dbt *datadbt = new Dbt((char*)"data", 5); - db->put(NULL, keydbt, datadbt, 0); - - // Now, retrieve. We could use keydbt over again, - // but that wouldn't be typical in an application. - Dbt *goodkeydbt = new Dbt((char*)"key", 4); - Dbt *badkeydbt = new Dbt((char*)"badkey", 7); - Dbt *resultdbt = new Dbt(); - resultdbt->set_flags(DB_DBT_MALLOC); - - int ret; - - if ((ret = db->get(NULL, goodkeydbt, resultdbt, 0)) != 0) { - cout << "get: " << DbEnv::strerror(ret) << "\n"; - } - else { - char *result = (char *)resultdbt->get_data(); - cout << "got data: " << result << "\n"; - } - - if ((ret = db->get(NULL, badkeydbt, resultdbt, 0)) != 0) { - // We expect this... - cout << "get using bad key: " - << DbEnv::strerror(ret) << "\n"; - } - else { - char *result = (char *)resultdbt->get_data(); - cout << "*** got data using bad key!!: " - << result << "\n"; - } - - // Now, truncate and make sure that it's really gone. - cout << "truncating data...\n"; - u_int32_t nrecords; - db->truncate(NULL, &nrecords, 0); - cout << "truncate returns " << nrecords << "\n"; - if ((ret = db->get(NULL, goodkeydbt, resultdbt, 0)) != 0) { - // We expect this... - cout << "after truncate get: " - << DbEnv::strerror(ret) << "\n"; - } - else { - char *result = (char *)resultdbt->get_data(); - cout << "got data: " << result << "\n"; - } - - db->close(0); - cout << "finished test\n"; - } - catch (DbException &dbe) { - cerr << "Db Exception: " << dbe.what(); - } - return 0; -} diff --git a/storage/bdb/test/scr015/TestTruncate.testout b/storage/bdb/test/scr015/TestTruncate.testout deleted file mode 100644 index 0a4bc98165d..00000000000 --- a/storage/bdb/test/scr015/TestTruncate.testout +++ /dev/null @@ -1,6 +0,0 @@ -got data: data -get using bad key: DB_NOTFOUND: No matching key/data pair found -truncating data... -truncate returns 1 -after truncate get: DB_NOTFOUND: No matching key/data pair found -finished test diff --git a/storage/bdb/test/scr015/chk.cxxtests b/storage/bdb/test/scr015/chk.cxxtests deleted file mode 100644 index 3d1e3947c4c..00000000000 --- a/storage/bdb/test/scr015/chk.cxxtests +++ /dev/null @@ -1,73 +0,0 @@ -#!/bin/sh - -# -# $Id: chk.cxxtests,v 1.8 2004/09/28 19:58:42 mjc Exp $ -# -# Check to make sure that regression tests for C++ run. - -TEST_CXX_SRCDIR=../test/scr015 # must be a relative directory - -# All paths must be relative to a subdirectory of the build directory -LIBS="-L.. -ldb_cxx" -CXXFLAGS="-I.. -I../../dbinc" - -[ `uname` = "Linux" ] && LIBS="$LIBS -lpthread" - -# Test must be run from a local build directory, not from a test -# directory. -cd .. -[ -f db_config.h ] || { - echo 'FAIL: chk.cxxtests must be run from a local build directory.' - exit 1 -} -[ -d ../env ] || { - echo 'FAIL: chk.cxxtests must be run from a local build directory.' - exit 1 -} -[ -f libdb.a ] || make libdb.a || { - echo 'FAIL: unable to build libdb.a' - exit 1 -} -[ -f libdb_cxx.a ] || make libdb_cxx.a || { - echo 'FAIL: unable to build libdb_cxx.a' - exit 1 -} -CXX=`sed -e '/^CXX=/!d' -e 's/^CXX=//' -e 's/.*mode=compile *//' Makefile` -echo " ====== cxx tests using $CXX" -testnames=`cd $TEST_CXX_SRCDIR; ls *.cpp | sed -e 's/\.cpp$//'` - -for testname in $testnames; do - if grep -x $testname $TEST_CXX_SRCDIR/ignore > /dev/null; then - echo " **** cxx test $testname ignored" - continue - fi - - echo " ==== cxx test $testname" - rm -rf TESTCXX; mkdir TESTCXX - cd ./TESTCXX - testprefix=../$TEST_CXX_SRCDIR/$testname - - ${CXX} ${CXXFLAGS} -o $testname $testprefix.cpp ${LIBS} > ../$testname.compileout 2>&1 || { - echo "FAIL: compilation of $testname failed, see ../$testname.compileout" - exit 1 - } - rm -f ../$testname.compileout - infile=$testprefix.testin - [ -f $infile ] || infile=/dev/null - goodoutfile=$testprefix.testout - [ -f $goodoutfile ] || goodoutfile=/dev/null - gooderrfile=$testprefix.testerr - [ -f $gooderrfile ] || gooderrfile=/dev/null - ./$testname <$infile >../$testname.out 2>../$testname.err - cmp ../$testname.out $goodoutfile > /dev/null || { - echo "FAIL: $testname output differs: see ../$testname.out, $goodoutfile" - exit 1 - } - cmp ../$testname.err $gooderrfile > /dev/null || { - echo "FAIL: $testname error differs: see ../$testname.err, $gooderrfile" - exit 1 - } - cd .. - rm -f $testname.err $testname.out -done -rm -rf TESTCXX -exit 0 diff --git a/storage/bdb/test/scr015/ignore b/storage/bdb/test/scr015/ignore deleted file mode 100644 index 55ce82ae372..00000000000 --- a/storage/bdb/test/scr015/ignore +++ /dev/null @@ -1,4 +0,0 @@ -# -# $Id: ignore,v 1.3 2001/10/12 13:02:32 dda Exp $ -# -# A list of tests to ignore diff --git a/storage/bdb/test/scr015/testall b/storage/bdb/test/scr015/testall deleted file mode 100644 index a2d493a8b22..00000000000 --- a/storage/bdb/test/scr015/testall +++ /dev/null @@ -1,32 +0,0 @@ -#!/bin/sh - -# $Id: testall,v 1.3 2001/09/13 14:49:36 dda Exp $ -# -# Run all the C++ regression tests - -ecode=0 -prefixarg="" -stdinarg="" -while : -do - case "$1" in - --prefix=* ) - prefixarg="$1"; shift;; - --stdin ) - stdinarg="$1"; shift;; - * ) - break - esac -done -files="`find . -name \*.cpp -print`" -for file in $files; do - name=`echo $file | sed -e 's:^\./::' -e 's/\.cpp$//'` - if grep $name ignore > /dev/null; then - echo " **** cxx test $name ignored" - else - echo " ==== cxx test $name" - if ! sh ./testone $prefixarg $stdinarg $name; then - ecode=1 - fi - fi -done -exit $ecode diff --git a/storage/bdb/test/scr015/testone b/storage/bdb/test/scr015/testone deleted file mode 100644 index 3bbba3f90f0..00000000000 --- a/storage/bdb/test/scr015/testone +++ /dev/null @@ -1,122 +0,0 @@ -#!/bin/sh - -# $Id: testone,v 1.5 2002/07/05 22:17:59 dda Exp $ -# -# Run just one C++ regression test, the single argument -# is the basename of the test, e.g. TestRpcServer - -error() -{ - echo '' >&2 - echo "C++ regression error: $@" >&2 - echo '' >&2 - ecode=1 -} - -# compares the result against the good version, -# reports differences, and removes the result file -# if there are no differences. -# -compare_result() -{ - good="$1" - latest="$2" - if [ ! -e "$good" ]; then - echo "Note: $good does not exist" - return - fi - tmpout=/tmp/blddb$$.tmp - diff "$good" "$latest" > $tmpout - if [ -s $tmpout ]; then - nbad=`grep '^[0-9]' $tmpout | wc -l` - error "$good and $latest differ in $nbad places." - else - rm $latest - fi - rm -f $tmpout -} - -ecode=0 -stdinflag=n -gdbflag=n -CXX=${CXX:-c++} -LIBS=${LIBS:-} - -# remove any -c option in the CXXFLAGS -CXXFLAGS="`echo " ${CXXFLAGS} " | sed -e 's/ -c //g'`" - -# determine the prefix of the install tree -prefix="" -while : -do - case "$1" in - --prefix=* ) - prefix="`echo $1 | sed -e 's/--prefix=//'`"; shift - LIBS="-L$prefix/lib -ldb_cxx $LIBS" - CXXFLAGS="-I$prefix/include $CXXFLAGS" - export LD_LIBRARY_PATH="$prefix/lib:$LD_LIBRARY_PATH" - ;; - --stdin ) - stdinflag=y; shift - ;; - --gdb ) - CXXFLAGS="-g $CXXFLAGS" - gdbflag=y; shift - ;; - * ) - break - ;; - esac -done - -if [ "$#" = 0 ]; then - echo 'Usage: testone [ --prefix= | --stdin ] TestName' - exit 1 -fi -name="$1" - -# compile -rm -rf TESTDIR; mkdir TESTDIR -cd ./TESTDIR - -${CXX} ${CXXFLAGS} -o $name ../$name.cpp ${LIBS} > ../$name.compileout 2>&1 -if [ $? != 0 -o -s ../$name.compileout ]; then - error "compilation of $name failed, see $name.compileout" - exit 1 -fi -rm -f ../$name.compileout - -# find input and error file -infile=../$name.testin -if [ ! -f $infile ]; then - infile=/dev/null -fi - -# run and diff results -rm -rf TESTDIR -if [ "$gdbflag" = y ]; then - if [ -s $infile ]; then - echo "Input file is $infile" - fi - gdb ./$name - exit 0 -elif [ "$stdinflag" = y ]; then - ./$name >../$name.out 2>../$name.err -else - ./$name <$infile >../$name.out 2>../$name.err -fi -cd .. - -testerr=$name.testerr -if [ ! -f $testerr ]; then - testerr=/dev/null -fi - -testout=$name.testout -if [ ! -f $testout ]; then - testout=/dev/null -fi - -compare_result $testout $name.out -compare_result $testerr $name.err -rm -rf TESTDIR -exit $ecode diff --git a/storage/bdb/test/scr016/CallbackTest.java b/storage/bdb/test/scr016/CallbackTest.java deleted file mode 100644 index 14abcf44f18..00000000000 --- a/storage/bdb/test/scr016/CallbackTest.java +++ /dev/null @@ -1,83 +0,0 @@ -package com.sleepycat.test; -import com.sleepycat.db.*; - -public class CallbackTest -{ - public static void main(String args[]) - { - try { - Db db = new Db(null, 0); - db.setBtreeCompare(new BtreeCompare()); - db.open(null, "test.db", "", Db.DB_BTREE, Db.DB_CREATE, 0666); - StringDbt[] keys = new StringDbt[10]; - StringDbt[] datas = new StringDbt[10]; - for (int i = 0; i<10; i++) { - int val = (i * 3) % 10; - keys[i] = new StringDbt("key" + val); - datas[i] = new StringDbt("data" + val); - System.out.println("put " + val); - db.put(null, keys[i], datas[i], 0); - } - } - catch (DbException dbe) { - System.err.println("FAIL: " + dbe); - } - catch (java.io.FileNotFoundException fnfe) { - System.err.println("FAIL: " + fnfe); - } - - } - - -} - -class BtreeCompare - implements DbBtreeCompare -{ - /* A weird comparator, for example. - * In fact, it may not be legal, since it's not monotonically increasing. - */ - public int compare(Db db, Dbt dbt1, Dbt dbt2) - { - System.out.println("compare function called"); - byte b1[] = dbt1.getData(); - byte b2[] = dbt2.getData(); - System.out.println(" " + (new String(b1)) + ", " + (new String(b2))); - int len1 = b1.length; - int len2 = b2.length; - if (len1 != len2) - return (len1 < len2) ? 1 : -1; - int value = 1; - for (int i=0; i option, a la configure -to set the top of the BerkeleyDB install directory. This forces -the proper options to be added to $LD_LIBRARY_PATH. -For example, - - $ ./testone --prefix=/usr/include/BerkeleyDB TestAppendRecno - $ ./testall --prefix=/usr/include/BerkeleyDB - -The test framework is pretty simple. Any .java file in this -directory that is not mentioned in the 'ignore' file represents a -test. If the test is not compiled successfully, the compiler output -is left in .compileout . Otherwise, the java program is run in -a clean subdirectory using as input .testin, or if that doesn't -exist, /dev/null. Output and error from the test run are put into -.out, .err . If .testout, .testerr exist, -they are used as reference files and any differences are reported. -If either of the reference files does not exist, /dev/null is used. diff --git a/storage/bdb/test/scr016/TestAppendRecno.java b/storage/bdb/test/scr016/TestAppendRecno.java deleted file mode 100644 index 4237b99db7d..00000000000 --- a/storage/bdb/test/scr016/TestAppendRecno.java +++ /dev/null @@ -1,258 +0,0 @@ -/*- - * See the file LICENSE for redistribution information. - * - * Copyright (c) 1997-2004 - * Sleepycat Software. All rights reserved. - * - * $Id: TestAppendRecno.java,v 1.6 2004/01/28 03:36:34 bostic Exp $ - */ - -package com.sleepycat.test; - -import com.sleepycat.db.*; -import java.io.File; -import java.io.FileNotFoundException; -import java.io.InputStreamReader; -import java.io.IOException; -import java.io.PrintStream; - -public class TestAppendRecno - implements DbAppendRecno -{ - private static final String FileName = "access.db"; - int callback_count = 0; - Db table = null; - - public TestAppendRecno() - { - } - - private static void usage() - { - System.err.println("usage: TestAppendRecno\n"); - System.exit(1); - } - - public static void main(String argv[]) - { - try - { - TestAppendRecno app = new TestAppendRecno(); - app.run(); - } - catch (DbException dbe) - { - System.err.println("TestAppendRecno: " + dbe.toString()); - System.exit(1); - } - catch (FileNotFoundException fnfe) - { - System.err.println("TestAppendRecno: " + fnfe.toString()); - System.exit(1); - } - System.exit(0); - } - - public void run() - throws DbException, FileNotFoundException - { - // Remove the previous database. - new File(FileName).delete(); - - // Create the database object. - // There is no environment for this simple example. - table = new Db(null, 0); - table.set_error_stream(System.err); - table.set_errpfx("TestAppendRecno"); - table.set_append_recno(this); - - table.open(null, FileName, null, Db.DB_RECNO, Db.DB_CREATE, 0644); - for (int i=0; i<10; i++) { - System.out.println("\n*** Iteration " + i ); - try { - RecnoDbt key = new RecnoDbt(77+i); - StringDbt data = new StringDbt("data" + i + "_xyz"); - table.put(null, key, data, Db.DB_APPEND); - } - catch (DbException dbe) { - System.out.println("dbe: " + dbe); - } - } - - // Acquire an iterator for the table. - Dbc iterator; - iterator = table.cursor(null, 0); - - // Walk through the table, printing the key/data pairs. - // See class StringDbt defined below. - // - RecnoDbt key = new RecnoDbt(); - StringDbt data = new StringDbt(); - while (iterator.get(key, data, Db.DB_NEXT) == 0) - { - System.out.println(key.getRecno() + " : " + data.getString()); - } - iterator.close(); - table.close(0); - System.out.println("Test finished."); - } - - public void db_append_recno(Db db, Dbt dbt, int recno) - throws DbException - { - int count = callback_count++; - - System.out.println("====\ncallback #" + count); - System.out.println("db is table: " + (db == table)); - System.out.println("recno = " + recno); - - // This gives variable output. - //System.out.println("dbt = " + dbt); - if (dbt instanceof RecnoDbt) { - System.out.println("dbt = " + - ((RecnoDbt)dbt).getRecno()); - } - else if (dbt instanceof StringDbt) { - System.out.println("dbt = " + - ((StringDbt)dbt).getString()); - } - else { - // Note: the dbts are created out of whole - // cloth by Berkeley DB, not us! - System.out.println("internally created dbt: " + - new StringDbt(dbt) + ", size " + - dbt.get_size()); - } - - switch (count) { - case 0: - // nothing - break; - - case 1: - dbt.set_size(dbt.get_size() - 1); - break; - - case 2: - System.out.println("throwing..."); - throw new DbException("append_recno thrown"); - //not reached - - case 3: - // Should result in an error (size unchanged). - dbt.set_offset(1); - break; - - case 4: - dbt.set_offset(1); - dbt.set_size(dbt.get_size() - 1); - break; - - case 5: - dbt.set_offset(1); - dbt.set_size(dbt.get_size() - 2); - break; - - case 6: - dbt.set_data(new String("abc").getBytes()); - dbt.set_size(3); - break; - - case 7: - // Should result in an error. - dbt.set_data(null); - break; - - case 8: - // Should result in an error. - dbt.set_data(new String("abc").getBytes()); - dbt.set_size(4); - break; - - default: - break; - } - } - - - // Here's an example of how you can extend a Dbt to store recno's. - // - static /*inner*/ - class RecnoDbt extends Dbt - { - RecnoDbt() - { - this(0); // let other constructor do most of the work - } - - RecnoDbt(int value) - { - set_flags(Db.DB_DBT_USERMEM); // do not allocate on retrieval - arr = new byte[4]; - set_data(arr); // use our local array for data - set_ulen(4); // size of return storage - setRecno(value); - } - - public String toString() /*override*/ - { - return String.valueOf(getRecno()); - } - - void setRecno(int value) - { - set_recno_key_data(value); - set_size(arr.length); - } - - int getRecno() - { - return get_recno_key_data(); - } - - byte arr[]; - } - - // Here's an example of how you can extend a Dbt in a straightforward - // way to allow easy storage/retrieval of strings, or whatever - // kind of data you wish. We've declared it as a static inner - // class, but it need not be. - // - static /*inner*/ - class StringDbt extends Dbt - { - StringDbt(Dbt dbt) - { - set_data(dbt.get_data()); - set_size(dbt.get_size()); - } - - StringDbt() - { - set_flags(Db.DB_DBT_MALLOC); // tell Db to allocate on retrieval - } - - StringDbt(String value) - { - setString(value); - set_flags(Db.DB_DBT_MALLOC); // tell Db to allocate on retrieval - } - - void setString(String value) - { - set_data(value.getBytes()); - set_size(value.length()); - } - - String getString() - { - return new String(get_data(), 0, get_size()); - } - - public String toString() /*override*/ - { - return getString(); - } - } -} - diff --git a/storage/bdb/test/scr016/TestAppendRecno.testout b/storage/bdb/test/scr016/TestAppendRecno.testout deleted file mode 100644 index 970174e7a96..00000000000 --- a/storage/bdb/test/scr016/TestAppendRecno.testout +++ /dev/null @@ -1,82 +0,0 @@ - -*** Iteration 0 -==== -callback #0 -db is table: true -recno = 1 -internally created dbt: data0_xyz, size 9 - -*** Iteration 1 -==== -callback #1 -db is table: true -recno = 2 -internally created dbt: data1_xyz, size 9 - -*** Iteration 2 -==== -callback #2 -db is table: true -recno = 3 -internally created dbt: data2_xyz, size 9 -throwing... -dbe: com.sleepycat.db.DbException: append_recno thrown - -*** Iteration 3 -==== -callback #3 -db is table: true -recno = 3 -internally created dbt: data3_xyz, size 9 -dbe: com.sleepycat.db.DbException: Dbt.size + Dbt.offset greater than array length - -*** Iteration 4 -==== -callback #4 -db is table: true -recno = 3 -internally created dbt: data4_xyz, size 9 - -*** Iteration 5 -==== -callback #5 -db is table: true -recno = 4 -internally created dbt: data5_xyz, size 9 - -*** Iteration 6 -==== -callback #6 -db is table: true -recno = 5 -internally created dbt: data6_xyz, size 9 - -*** Iteration 7 -==== -callback #7 -db is table: true -recno = 6 -internally created dbt: data7_xyz, size 9 -dbe: com.sleepycat.db.DbException: Dbt.data is null - -*** Iteration 8 -==== -callback #8 -db is table: true -recno = 6 -internally created dbt: data8_xyz, size 9 -dbe: com.sleepycat.db.DbException: Dbt.size + Dbt.offset greater than array length - -*** Iteration 9 -==== -callback #9 -db is table: true -recno = 6 -internally created dbt: data9_xyz, size 9 -1 : data0_xyz -2 : data1_xy -3 : ata4_xyz -4 : ata5_xy -5 : abc -6 : data9_xyz -Test finished. diff --git a/storage/bdb/test/scr016/TestAssociate.java b/storage/bdb/test/scr016/TestAssociate.java deleted file mode 100644 index 80451d1e66b..00000000000 --- a/storage/bdb/test/scr016/TestAssociate.java +++ /dev/null @@ -1,333 +0,0 @@ -/*- - * See the file LICENSE for redistribution information. - * - * Copyright (c) 1997-2004 - * Sleepycat Software. All rights reserved. - * - * $Id: TestAssociate.java,v 1.8 2004/01/28 03:36:34 bostic Exp $ - */ - -package com.sleepycat.test; - -import com.sleepycat.db.*; -import java.io.File; -import java.io.FileNotFoundException; -import java.io.Reader; -import java.io.StringReader; -import java.io.IOException; -import java.io.PrintStream; -import java.util.Hashtable; - -public class TestAssociate - implements DbDupCompare -{ - private static final String FileName = "access.db"; - public static Db saveddb1 = null; - public static Db saveddb2 = null; - - public TestAssociate() - { - } - - private static void usage() - { - System.err.println("usage: TestAssociate\n"); - System.exit(1); - } - - public static void main(String argv[]) - { - try - { - TestAssociate app = new TestAssociate(); - app.run(); - } - catch (DbException dbe) - { - System.err.println("TestAssociate: " + dbe.toString()); - System.exit(1); - } - catch (FileNotFoundException fnfe) - { - System.err.println("TestAssociate: " + fnfe.toString()); - System.exit(1); - } - System.exit(0); - } - - public static int counter = 0; - public static String results[] = { "abc", "def", "ghi", "JKL", "MNO", null }; - - // Prompts for a line, and keeps prompting until a non blank - // line is returned. Returns null on error. - // - static public String askForLine(Reader reader, - PrintStream out, String prompt) - { - /* - String result = ""; - while (result != null && result.length() == 0) { - out.print(prompt); - out.flush(); - result = getLine(reader); - } - return result; - */ - return results[counter++]; - } - - // Not terribly efficient, but does the job. - // Works for reading a line from stdin or a file. - // Returns null on EOF. If EOF appears in the middle - // of a line, returns that line, then null on next call. - // - static public String getLine(Reader reader) - { - StringBuffer b = new StringBuffer(); - int c; - try { - while ((c = reader.read()) != -1 && c != '\n') { - if (c != '\r') - b.append((char)c); - } - } - catch (IOException ioe) { - c = -1; - } - - if (c == -1 && b.length() == 0) - return null; - else - return b.toString(); - } - - static public String shownull(Object o) - { - if (o == null) - return "null"; - else - return "not null"; - } - - public void run() - throws DbException, FileNotFoundException - { - // Remove the previous database. - new File(FileName).delete(); - - // Create the database object. - // There is no environment for this simple example. - DbEnv dbenv = new DbEnv(0); - dbenv.open("./", Db.DB_CREATE|Db.DB_INIT_MPOOL, 0644); - (new java.io.File(FileName)).delete(); - Db table = new Db(dbenv, 0); - Db table2 = new Db(dbenv, 0); - table2.set_dup_compare(this); - table2.set_flags(Db.DB_DUPSORT); - table.set_error_stream(System.err); - table2.set_error_stream(System.err); - table.set_errpfx("TestAssociate"); - table2.set_errpfx("TestAssociate(table2)"); - System.out.println("Primary database is " + shownull(table)); - System.out.println("Secondary database is " + shownull(table2)); - saveddb1 = table; - saveddb2 = table2; - table.open(null, FileName, null, Db.DB_BTREE, Db.DB_CREATE, 0644); - table2.open(null, FileName + "2", null, - Db.DB_BTREE, Db.DB_CREATE, 0644); - table.associate(null, table2, new Capitalize(), 0); - - // - // Insert records into the database, where the key is the user - // input and the data is the user input in reverse order. - // - Reader reader = new StringReader("abc\ndef\njhi"); - - for (;;) { - String line = askForLine(reader, System.out, "input> "); - if (line == null) - break; - - String reversed = (new StringBuffer(line)).reverse().toString(); - - // See definition of StringDbt below - // - StringDbt key = new StringDbt(line); - StringDbt data = new StringDbt(reversed); - - try - { - int err; - if ((err = table.put(null, - key, data, Db.DB_NOOVERWRITE)) == Db.DB_KEYEXIST) { - System.out.println("Key " + line + " already exists."); - } - } - catch (DbException dbe) - { - System.out.println(dbe.toString()); - } - System.out.println(""); - } - - // Acquire an iterator for the table. - Dbc iterator; - iterator = table2.cursor(null, 0); - - // Walk through the table, printing the key/data pairs. - // See class StringDbt defined below. - // - StringDbt key = new StringDbt(); - StringDbt data = new StringDbt(); - StringDbt pkey = new StringDbt(); - - while (iterator.get(key, data, Db.DB_NEXT) == 0) - { - System.out.println(key.getString() + " : " + data.getString()); - } - - key.setString("BC"); - System.out.println("get BC returns " + table2.get(null, key, data, 0)); - System.out.println(" values: " + key.getString() + " : " + data.getString()); - System.out.println("pget BC returns " + table2.pget(null, key, pkey, data, 0)); - System.out.println(" values: " + key.getString() + " : " + pkey.getString() + " : " + data.getString()); - key.setString("KL"); - System.out.println("get KL returns " + table2.get(null, key, data, 0)); - System.out.println(" values: " + key.getString() + " : " + data.getString()); - System.out.println("pget KL returns " + table2.pget(null, key, pkey, data, 0)); - System.out.println(" values: " + key.getString() + " : " + pkey.getString() + " : " + data.getString()); - - iterator.close(); - table.close(0); - } - - // Here's an example of how you can extend a Dbt in a straightforward - // way to allow easy storage/retrieval of strings, or whatever - // kind of data you wish. We've declared it as a static inner - // class, but it need not be. - // - static /*inner*/ - class StringDbt extends Dbt - { - StringDbt() - { - set_flags(Db.DB_DBT_MALLOC); // tell Db to allocate on retrieval - } - - StringDbt(String value) - { - setString(value); - set_flags(Db.DB_DBT_MALLOC); // tell Db to allocate on retrieval - } - - void setString(String value) - { - set_data(value.getBytes()); - set_size(value.length()); - } - - String getString() - { - return new String(get_data(), 0, get_size()); - } - - public String toString() - { - return "StringDbt=" + getString(); - } - } - - /* creates a stupid secondary index as follows: - For an N letter key, we use N-1 letters starting at - position 1. If the new letters are already capitalized, - we return the old array, but with offset set to 1. - If the letters are not capitalized, we create a new, - capitalized array. This is pretty stupid for - an application, but it tests all the paths in the runtime. - */ - public static class Capitalize implements DbSecondaryKeyCreate - { - public int secondaryKeyCreate(Db secondary, Dbt key, Dbt value, - Dbt result) - throws DbException - { - String which = "unknown db"; - if (saveddb1.equals(secondary)) { - which = "primary"; - } - else if (saveddb2.equals(secondary)) { - which = "secondary"; - } - System.out.println("secondaryKeyCreate, Db: " + shownull(secondary) + "(" + which + "), key: " + show_dbt(key) + ", data: " + show_dbt(value)); - int len = key.get_size(); - byte[] arr = key.get_data(); - boolean capped = true; - - if (len < 1) - throw new DbException("bad key"); - - if (len < 2) - return Db.DB_DONOTINDEX; - - result.set_size(len - 1); - for (int i=1; capped && i sz2) - return 1; - byte[] data1 = dbt1.get_data(); - byte[] data2 = dbt2.get_data(); - for (int i=0; i count) { - ERR("reread length is bad: expect " + count + " got "+ len + " (" + key_string + ")" ); - } - else if (!data_string.equals(key_string)) { - ERR("key/data don't match"); - } - else if ((bitmap & bit) != 0) { - ERR("key already seen"); - } - else if ((expected & bit) == 0) { - ERR("key was not expected"); - } - else { - bitmap |= bit; - expected &= ~(bit); - for (i=0; i= '0' && ch <= '9') { - mask |= (1 << (ch - '0')); - } - else if (ch == 'v') { - verbose_flag = true; - } - else { - ERR("Usage: construct01 [-testdigits] count"); - } - } - VERBOSEOUT("mask = " + mask); - - } - else { - try { - iterations = Integer.parseInt(arg); - if (iterations < 0) { - ERR("Usage: construct01 [-testdigits] count"); - } - } - catch (NumberFormatException nfe) { - ERR("EXCEPTION RECEIVED: " + nfe); - } - } - } - - // Run GC before and after the test to give - // a baseline for any Java memory used. - // - System.gc(); - System.runFinalization(); - VERBOSEOUT("gc complete"); - long starttotal = Runtime.getRuntime().totalMemory(); - long startfree = Runtime.getRuntime().freeMemory(); - - TestConstruct01 con = new TestConstruct01(); - int[] dbt_flags = { 0, Db.DB_DBT_MALLOC, Db.DB_DBT_REALLOC }; - String[] dbt_flags_name = { "default", "malloc", "realloc" }; - - TestOptions options = new TestOptions(); - options.testmask = mask; - - for (int flagiter = 0; flagiter < dbt_flags.length; flagiter++) { - options.dbt_alloc_flags = dbt_flags[flagiter]; - - VERBOSEOUT("Running with DBT alloc flags: " + - dbt_flags_name[flagiter]); - for (int i=0; i -scale) - return "<" + scale; - } - return ">" + max; - } - -} - -class TestOptions -{ - int testmask = 0; // which tests to run - int dbt_alloc_flags = 0; // DB_DBT_* flags to use - int successcounter =0; -} - diff --git a/storage/bdb/test/scr016/TestConstruct01.testerr b/storage/bdb/test/scr016/TestConstruct01.testerr deleted file mode 100644 index e69de29bb2d..00000000000 diff --git a/storage/bdb/test/scr016/TestConstruct01.testout b/storage/bdb/test/scr016/TestConstruct01.testout deleted file mode 100644 index 2de13da0036..00000000000 --- a/storage/bdb/test/scr016/TestConstruct01.testout +++ /dev/null @@ -1,3 +0,0 @@ -ALL TESTS SUCCESSFUL -delta for total mem: <10 -delta for free mem: <1000 diff --git a/storage/bdb/test/scr016/TestConstruct02.java b/storage/bdb/test/scr016/TestConstruct02.java deleted file mode 100644 index 2e55cfc6f32..00000000000 --- a/storage/bdb/test/scr016/TestConstruct02.java +++ /dev/null @@ -1,326 +0,0 @@ -/*- - * See the file LICENSE for redistribution information. - * - * Copyright (c) 2000-2004 - * Sleepycat Software. All rights reserved. - * - * $Id: TestConstruct02.java,v 1.8 2004/01/28 03:36:34 bostic Exp $ - */ - -/* - * Do some regression tests for constructors. - * Run normally (without arguments) it is a simple regression test. - * Run with a numeric argument, it repeats the regression a number - * of times, to try to determine if there are memory leaks. - */ - -package com.sleepycat.test; -import com.sleepycat.db.*; -import java.io.File; -import java.io.IOException; -import java.io.FileNotFoundException; - -public class TestConstruct02 -{ - public static final String CONSTRUCT02_DBNAME = "construct02.db"; - public static final String CONSTRUCT02_DBDIR = "./"; - public static final String CONSTRUCT02_DBFULLPATH = - CONSTRUCT02_DBDIR + "/" + CONSTRUCT02_DBNAME; - - private int itemcount; // count the number of items in the database - public static boolean verbose_flag = false; - - private DbEnv dbenv = new DbEnv(0); - - public TestConstruct02() - throws DbException, FileNotFoundException - { - dbenv.open(CONSTRUCT02_DBDIR, Db.DB_CREATE | Db.DB_INIT_MPOOL, 0666); - } - - public void close() - { - try { - dbenv.close(0); - removeall(true, true); - } - catch (DbException dbe) { - ERR("DbException: " + dbe); - } - } - - public static void ERR(String a) - { - System.out.println("FAIL: " + a); - sysexit(1); - } - - public static void DEBUGOUT(String s) - { - System.out.println(s); - } - - public static void VERBOSEOUT(String s) - { - if (verbose_flag) - System.out.println(s); - } - - public static void sysexit(int code) - { - System.exit(code); - } - - private static void check_file_removed(String name, boolean fatal, - boolean force_remove_first) - { - File f = new File(name); - if (force_remove_first) { - f.delete(); - } - if (f.exists()) { - if (fatal) - System.out.print("FAIL: "); - System.out.print("File \"" + name + "\" still exists after run\n"); - if (fatal) - sysexit(1); - } - } - - - // Check that key/data for 0 - count-1 are already present, - // and write a key/data for count. The key and data are - // both "0123...N" where N == count-1. - // - void rundb(Db db, int count) - throws DbException, FileNotFoundException - { - if (count >= 64) - throw new IllegalArgumentException("rundb count arg >= 64"); - - // The bit map of keys we've seen - long bitmap = 0; - - // The bit map of keys we expect to see - long expected = (1 << (count+1)) - 1; - - byte outbuf[] = new byte[count+1]; - int i; - for (i=0; i count) { - ERR("reread length is bad: expect " + count + " got "+ len); - } - else if ((bitmap & bit) != 0) { - ERR("key already seen"); - } - else if ((expected & bit) == 0) { - ERR("key was not expected"); - } - bitmap |= bit; - expected &= ~(bit); - } - if (expected != 0) { - System.out.print(" expected more keys, bitmap is: " + - expected + "\n"); - ERR("missing keys in database"); - } - dbcp.close(); - } - - void t1() - throws DbException, FileNotFoundException - { - Db db = new Db(dbenv, 0); - db.set_error_stream(System.err); - db.set_pagesize(1024); - db.open(null, CONSTRUCT02_DBNAME, null, Db.DB_BTREE, - Db.DB_CREATE, 0664); - - rundb(db, itemcount++); - rundb(db, itemcount++); - rundb(db, itemcount++); - rundb(db, itemcount++); - rundb(db, itemcount++); - rundb(db, itemcount++); - db.close(0); - - // Reopen no longer allowed, so we create a new db. - db = new Db(dbenv, 0); - db.set_error_stream(System.err); - db.set_pagesize(1024); - db.open(null, CONSTRUCT02_DBNAME, null, Db.DB_BTREE, - Db.DB_CREATE, 0664); - rundb(db, itemcount++); - rundb(db, itemcount++); - rundb(db, itemcount++); - rundb(db, itemcount++); - db.close(0); - } - - // remove any existing environment or database - void removeall(boolean use_db, boolean remove_env) - { - { - try { - if (remove_env) { - DbEnv tmpenv = new DbEnv(0); - tmpenv.remove(CONSTRUCT02_DBDIR, Db.DB_FORCE); - } - else if (use_db) { - /**/ - //memory leak for this: - Db tmpdb = new Db(null, 0); - tmpdb.remove(CONSTRUCT02_DBFULLPATH, null, 0); - /**/ - } - } - catch (DbException dbe) { - System.err.println("error during remove: " + dbe); - } - catch (FileNotFoundException dbe) { - System.err.println("error during remove: " + dbe); - } - } - check_file_removed(CONSTRUCT02_DBFULLPATH, true, !use_db); - if (remove_env) { - for (int i=0; i<8; i++) { - String fname = "__db.00" + i; - check_file_removed(fname, true, !use_db); - } - } - } - - boolean doall() - { - itemcount = 0; - try { - VERBOSEOUT(" Running test 1:\n"); - t1(); - VERBOSEOUT(" finished.\n"); - removeall(true, false); - return true; - } - catch (DbException dbe) { - ERR("EXCEPTION RECEIVED: " + dbe); - } - catch (FileNotFoundException fnfe) { - ERR("EXCEPTION RECEIVED: " + fnfe); - } - return false; - } - - public static void main(String args[]) - { - int iterations = 200; - - for (int argcnt=0; argcnt -scale) - return "<" + scale; - } - return ">" + max; - } -} diff --git a/storage/bdb/test/scr016/TestConstruct02.testout b/storage/bdb/test/scr016/TestConstruct02.testout deleted file mode 100644 index 5d2041cd197..00000000000 --- a/storage/bdb/test/scr016/TestConstruct02.testout +++ /dev/null @@ -1,3 +0,0 @@ -ALL TESTS SUCCESSFUL -delta for total mem: <10 -delta for free mem: <10000 diff --git a/storage/bdb/test/scr016/TestDbtFlags.java b/storage/bdb/test/scr016/TestDbtFlags.java deleted file mode 100644 index 0e2110ad137..00000000000 --- a/storage/bdb/test/scr016/TestDbtFlags.java +++ /dev/null @@ -1,241 +0,0 @@ -/*- - * See the file LICENSE for redistribution information. - * - * Copyright (c) 1997-2004 - * Sleepycat Software. All rights reserved. - * - * $Id: TestDbtFlags.java,v 1.8 2004/01/28 03:36:34 bostic Exp $ - */ - -package com.sleepycat.test; - -import com.sleepycat.db.*; -import java.io.File; -import java.io.FileNotFoundException; -import java.io.InputStreamReader; -import java.io.IOException; -import java.io.PrintStream; - -public class TestDbtFlags -{ - private static final String FileName = "access.db"; - private int flag_value; - private int buf_size; - private int cur_input_line = 0; - - /*zippy quotes for test input*/ - static final String[] input_lines = { - "If we shadows have offended", - "Think but this, and all is mended", - "That you have but slumber'd here", - "While these visions did appear", - "And this weak and idle theme", - "No more yielding but a dream", - "Gentles, do not reprehend", - "if you pardon, we will mend", - "And, as I am an honest Puck, if we have unearned luck", - "Now to 'scape the serpent's tongue, we will make amends ere long;", - "Else the Puck a liar call; so, good night unto you all.", - "Give me your hands, if we be friends, and Robin shall restore amends." - }; - - public TestDbtFlags(int flag_value, int buf_size) - { - this.flag_value = flag_value; - this.buf_size = buf_size; - } - - public static void runWithFlags(int flag_value, int size) - { - String msg = "=-=-=-= Test with DBT flags " + flag_value + - " bufsize " + size; - System.out.println(msg); - System.err.println(msg); - - try - { - TestDbtFlags app = new TestDbtFlags(flag_value, size); - app.run(); - } - catch (DbException dbe) - { - System.err.println("TestDbtFlags: " + dbe.toString()); - System.exit(1); - } - catch (FileNotFoundException fnfe) - { - System.err.println("TestDbtFlags: " + fnfe.toString()); - System.exit(1); - } - } - - public static void main(String argv[]) - { - runWithFlags(Db.DB_DBT_MALLOC, -1); - runWithFlags(Db.DB_DBT_REALLOC, -1); - runWithFlags(Db.DB_DBT_USERMEM, 20); - runWithFlags(Db.DB_DBT_USERMEM, 50); - runWithFlags(Db.DB_DBT_USERMEM, 200); - runWithFlags(0, -1); - - System.exit(0); - } - - String get_input_line() - { - if (cur_input_line >= input_lines.length) - return null; - return input_lines[cur_input_line++]; - } - - public void run() - throws DbException, FileNotFoundException - { - // Remove the previous database. - new File(FileName).delete(); - - // Create the database object. - // There is no environment for this simple example. - Db table = new Db(null, 0); - table.setErrorStream(System.err); - table.setErrorPrefix("TestDbtFlags"); - table.open(null, FileName, null, Db.DB_BTREE, Db.DB_CREATE, 0644); - - // - // Insert records into the database, where the key is the user - // input and the data is the user input in reverse order. - // - for (;;) { - //System.err.println("input line " + cur_input_line); - String line = get_input_line(); - if (line == null) - break; - - String reversed = (new StringBuffer(line)).reverse().toString(); - - // See definition of StringDbt below - // - StringDbt key = new StringDbt(line, flag_value); - StringDbt data = new StringDbt(reversed, flag_value); - - try - { - int err; - if ((err = table.put(null, - key, data, Db.DB_NOOVERWRITE)) == Db.DB_KEYEXIST) { - System.out.println("Key " + line + " already exists."); - } - key.check_flags(); - data.check_flags(); - } - catch (DbException dbe) - { - System.out.println(dbe.toString()); - } - } - - // Acquire an iterator for the table. - Dbc iterator; - iterator = table.cursor(null, 0); - - // Walk through the table, printing the key/data pairs. - // See class StringDbt defined below. - // - StringDbt key = new StringDbt(flag_value, buf_size); - StringDbt data = new StringDbt(flag_value, buf_size); - - int iteration_count = 0; - int dbreturn = 0; - - while (dbreturn == 0) { - //System.err.println("iteration " + iteration_count); - try { - if ((dbreturn = iterator.get(key, data, Db.DB_NEXT)) == 0) { - System.out.println(key.get_string() + " : " + data.get_string()); - } - } - catch (DbMemoryException dme) { - /* In a real application, we'd normally increase - * the size of the buffer. Since we've created - * this error condition for testing, we'll just report it. - * We still need to skip over this record, and we don't - * want to mess with our original Dbt's, since we want - * to see more errors. So create some temporary - * mallocing Dbts to get this record. - */ - System.err.println("exception, iteration " + iteration_count + - ": " + dme); - System.err.println(" key size: " + key.getSize() + - " ulen: " + key.getUserBufferLength()); - System.err.println(" data size: " + key.getSize() + - " ulen: " + key.getUserBufferLength()); - - dme.getDbt().setSize(buf_size); - StringDbt tempkey = new StringDbt(Db.DB_DBT_MALLOC, -1); - StringDbt tempdata = new StringDbt(Db.DB_DBT_MALLOC, -1); - if ((dbreturn = iterator.get(tempkey, tempdata, Db.DB_NEXT)) != 0) { - System.err.println("cannot get expected next record"); - return; - } - System.out.println(tempkey.get_string() + " : " + - tempdata.get_string()); - } - iteration_count++; - } - key.check_flags(); - data.check_flags(); - - iterator.close(); - table.close(0); - } - - // Here's an example of how you can extend a Dbt in a straightforward - // way to allow easy storage/retrieval of strings, or whatever - // kind of data you wish. We've declared it as a static inner - // class, but it need not be. - // - static /*inner*/ - class StringDbt extends Dbt - { - int saved_flags; - - StringDbt(int flags, int buf_size) - { - this.saved_flags = flags; - setFlags(saved_flags); - if (buf_size != -1) { - setData(new byte[buf_size]); - setUserBufferLength(buf_size); - } - } - - StringDbt(String value, int flags) - { - this.saved_flags = flags; - setFlags(saved_flags); - set_string(value); - } - - void set_string(String value) - { - setData(value.getBytes()); - setSize(value.length()); - check_flags(); - } - - String get_string() - { - check_flags(); - return new String(getData(), 0, getSize()); - } - - void check_flags() - { - int actual_flags = getFlags(); - if (actual_flags != saved_flags) { - System.err.println("flags botch: expected " + saved_flags + - ", got " + actual_flags); - } - } - } -} diff --git a/storage/bdb/test/scr016/TestDbtFlags.testerr b/storage/bdb/test/scr016/TestDbtFlags.testerr deleted file mode 100644 index 7666868ebd4..00000000000 --- a/storage/bdb/test/scr016/TestDbtFlags.testerr +++ /dev/null @@ -1,54 +0,0 @@ -=-=-=-= Test with DBT flags 4 bufsize -1 -=-=-=-= Test with DBT flags 16 bufsize -1 -=-=-=-= Test with DBT flags 32 bufsize 20 -exception, iteration 0: Dbt not large enough for available data - key size: 28 ulen: 20 - data size: 28 ulen: 20 -exception, iteration 1: Dbt not large enough for available data - key size: 53 ulen: 20 - data size: 53 ulen: 20 -exception, iteration 2: Dbt not large enough for available data - key size: 55 ulen: 20 - data size: 55 ulen: 20 -exception, iteration 3: Dbt not large enough for available data - key size: 25 ulen: 20 - data size: 25 ulen: 20 -exception, iteration 4: Dbt not large enough for available data - key size: 69 ulen: 20 - data size: 69 ulen: 20 -exception, iteration 5: Dbt not large enough for available data - key size: 27 ulen: 20 - data size: 27 ulen: 20 -exception, iteration 6: Dbt not large enough for available data - key size: 28 ulen: 20 - data size: 28 ulen: 20 -exception, iteration 7: Dbt not large enough for available data - key size: 65 ulen: 20 - data size: 65 ulen: 20 -exception, iteration 8: Dbt not large enough for available data - key size: 32 ulen: 20 - data size: 32 ulen: 20 -exception, iteration 9: Dbt not large enough for available data - key size: 33 ulen: 20 - data size: 33 ulen: 20 -exception, iteration 10: Dbt not large enough for available data - key size: 30 ulen: 20 - data size: 30 ulen: 20 -exception, iteration 11: Dbt not large enough for available data - key size: 27 ulen: 20 - data size: 27 ulen: 20 -=-=-=-= Test with DBT flags 32 bufsize 50 -exception, iteration 1: Dbt not large enough for available data - key size: 53 ulen: 50 - data size: 53 ulen: 50 -exception, iteration 2: Dbt not large enough for available data - key size: 55 ulen: 50 - data size: 55 ulen: 50 -exception, iteration 4: Dbt not large enough for available data - key size: 69 ulen: 50 - data size: 69 ulen: 50 -exception, iteration 7: Dbt not large enough for available data - key size: 65 ulen: 50 - data size: 65 ulen: 50 -=-=-=-= Test with DBT flags 32 bufsize 200 -=-=-=-= Test with DBT flags 0 bufsize -1 diff --git a/storage/bdb/test/scr016/TestDbtFlags.testout b/storage/bdb/test/scr016/TestDbtFlags.testout deleted file mode 100644 index b8deb1bcc16..00000000000 --- a/storage/bdb/test/scr016/TestDbtFlags.testout +++ /dev/null @@ -1,78 +0,0 @@ -=-=-=-= Test with DBT flags 4 bufsize -1 -And this weak and idle theme : emeht eldi dna kaew siht dnA -And, as I am an honest Puck, if we have unearned luck : kcul denraenu evah ew fi ,kcuP tsenoh na ma I sa ,dnA -Else the Puck a liar call; so, good night unto you all. : .lla uoy otnu thgin doog ,os ;llac rail a kcuP eht eslE -Gentles, do not reprehend : dneherper ton od ,seltneG -Give me your hands, if we be friends, and Robin shall restore amends. : .sdnema erotser llahs niboR dna ,sdneirf eb ew fi ,sdnah ruoy em eviG -If we shadows have offended : dedneffo evah swodahs ew fI -No more yielding but a dream : maerd a tub gnidleiy erom oN -Now to 'scape the serpent's tongue, we will make amends ere long; : ;gnol ere sdnema ekam lliw ew ,eugnot s'tnepres eht epacs' ot woN -That you have but slumber'd here : ereh d'rebmuls tub evah uoy tahT -Think but this, and all is mended : dednem si lla dna ,siht tub knihT -While these visions did appear : raeppa did snoisiv eseht elihW -if you pardon, we will mend : dnem lliw ew ,nodrap uoy fi -=-=-=-= Test with DBT flags 16 bufsize -1 -And this weak and idle theme : emeht eldi dna kaew siht dnA -And, as I am an honest Puck, if we have unearned luck : kcul denraenu evah ew fi ,kcuP tsenoh na ma I sa ,dnA -Else the Puck a liar call; so, good night unto you all. : .lla uoy otnu thgin doog ,os ;llac rail a kcuP eht eslE -Gentles, do not reprehend : dneherper ton od ,seltneG -Give me your hands, if we be friends, and Robin shall restore amends. : .sdnema erotser llahs niboR dna ,sdneirf eb ew fi ,sdnah ruoy em eviG -If we shadows have offended : dedneffo evah swodahs ew fI -No more yielding but a dream : maerd a tub gnidleiy erom oN -Now to 'scape the serpent's tongue, we will make amends ere long; : ;gnol ere sdnema ekam lliw ew ,eugnot s'tnepres eht epacs' ot woN -That you have but slumber'd here : ereh d'rebmuls tub evah uoy tahT -Think but this, and all is mended : dednem si lla dna ,siht tub knihT -While these visions did appear : raeppa did snoisiv eseht elihW -if you pardon, we will mend : dnem lliw ew ,nodrap uoy fi -=-=-=-= Test with DBT flags 32 bufsize 20 -And this weak and idle theme : emeht eldi dna kaew siht dnA -And, as I am an honest Puck, if we have unearned luck : kcul denraenu evah ew fi ,kcuP tsenoh na ma I sa ,dnA -Else the Puck a liar call; so, good night unto you all. : .lla uoy otnu thgin doog ,os ;llac rail a kcuP eht eslE -Gentles, do not reprehend : dneherper ton od ,seltneG -Give me your hands, if we be friends, and Robin shall restore amends. : .sdnema erotser llahs niboR dna ,sdneirf eb ew fi ,sdnah ruoy em eviG -If we shadows have offended : dedneffo evah swodahs ew fI -No more yielding but a dream : maerd a tub gnidleiy erom oN -Now to 'scape the serpent's tongue, we will make amends ere long; : ;gnol ere sdnema ekam lliw ew ,eugnot s'tnepres eht epacs' ot woN -That you have but slumber'd here : ereh d'rebmuls tub evah uoy tahT -Think but this, and all is mended : dednem si lla dna ,siht tub knihT -While these visions did appear : raeppa did snoisiv eseht elihW -if you pardon, we will mend : dnem lliw ew ,nodrap uoy fi -=-=-=-= Test with DBT flags 32 bufsize 50 -And this weak and idle theme : emeht eldi dna kaew siht dnA -And, as I am an honest Puck, if we have unearned luck : kcul denraenu evah ew fi ,kcuP tsenoh na ma I sa ,dnA -Else the Puck a liar call; so, good night unto you all. : .lla uoy otnu thgin doog ,os ;llac rail a kcuP eht eslE -Gentles, do not reprehend : dneherper ton od ,seltneG -Give me your hands, if we be friends, and Robin shall restore amends. : .sdnema erotser llahs niboR dna ,sdneirf eb ew fi ,sdnah ruoy em eviG -If we shadows have offended : dedneffo evah swodahs ew fI -No more yielding but a dream : maerd a tub gnidleiy erom oN -Now to 'scape the serpent's tongue, we will make amends ere long; : ;gnol ere sdnema ekam lliw ew ,eugnot s'tnepres eht epacs' ot woN -That you have but slumber'd here : ereh d'rebmuls tub evah uoy tahT -Think but this, and all is mended : dednem si lla dna ,siht tub knihT -While these visions did appear : raeppa did snoisiv eseht elihW -if you pardon, we will mend : dnem lliw ew ,nodrap uoy fi -=-=-=-= Test with DBT flags 32 bufsize 200 -And this weak and idle theme : emeht eldi dna kaew siht dnA -And, as I am an honest Puck, if we have unearned luck : kcul denraenu evah ew fi ,kcuP tsenoh na ma I sa ,dnA -Else the Puck a liar call; so, good night unto you all. : .lla uoy otnu thgin doog ,os ;llac rail a kcuP eht eslE -Gentles, do not reprehend : dneherper ton od ,seltneG -Give me your hands, if we be friends, and Robin shall restore amends. : .sdnema erotser llahs niboR dna ,sdneirf eb ew fi ,sdnah ruoy em eviG -If we shadows have offended : dedneffo evah swodahs ew fI -No more yielding but a dream : maerd a tub gnidleiy erom oN -Now to 'scape the serpent's tongue, we will make amends ere long; : ;gnol ere sdnema ekam lliw ew ,eugnot s'tnepres eht epacs' ot woN -That you have but slumber'd here : ereh d'rebmuls tub evah uoy tahT -Think but this, and all is mended : dednem si lla dna ,siht tub knihT -While these visions did appear : raeppa did snoisiv eseht elihW -if you pardon, we will mend : dnem lliw ew ,nodrap uoy fi -=-=-=-= Test with DBT flags 0 bufsize -1 -And this weak and idle theme : emeht eldi dna kaew siht dnA -And, as I am an honest Puck, if we have unearned luck : kcul denraenu evah ew fi ,kcuP tsenoh na ma I sa ,dnA -Else the Puck a liar call; so, good night unto you all. : .lla uoy otnu thgin doog ,os ;llac rail a kcuP eht eslE -Gentles, do not reprehend : dneherper ton od ,seltneG -Give me your hands, if we be friends, and Robin shall restore amends. : .sdnema erotser llahs niboR dna ,sdneirf eb ew fi ,sdnah ruoy em eviG -If we shadows have offended : dedneffo evah swodahs ew fI -No more yielding but a dream : maerd a tub gnidleiy erom oN -Now to 'scape the serpent's tongue, we will make amends ere long; : ;gnol ere sdnema ekam lliw ew ,eugnot s'tnepres eht epacs' ot woN -That you have but slumber'd here : ereh d'rebmuls tub evah uoy tahT -Think but this, and all is mended : dednem si lla dna ,siht tub knihT -While these visions did appear : raeppa did snoisiv eseht elihW -if you pardon, we will mend : dnem lliw ew ,nodrap uoy fi diff --git a/storage/bdb/test/scr016/TestGetSetMethods.java b/storage/bdb/test/scr016/TestGetSetMethods.java deleted file mode 100644 index 06a52c5ad0e..00000000000 --- a/storage/bdb/test/scr016/TestGetSetMethods.java +++ /dev/null @@ -1,99 +0,0 @@ -/*- - * See the file LICENSE for redistribution information. - * - * Copyright (c) 2000-2004 - * Sleepycat Software. All rights reserved. - * - * $Id: TestGetSetMethods.java,v 1.7 2004/01/28 03:36:34 bostic Exp $ - */ - -/* - * Do some regression tests for simple get/set access methods - * on DbEnv, DbTxn, Db. We don't currently test that they have - * the desired effect, only that they operate and return correctly. - */ -package com.sleepycat.test; - -import com.sleepycat.db.*; -import java.io.FileNotFoundException; - -public class TestGetSetMethods -{ - public void testMethods() - throws DbException, FileNotFoundException - { - DbEnv dbenv = new DbEnv(0); - DbTxn dbtxn; - byte[][] conflicts = new byte[10][10]; - - dbenv.setTimeout(0x90000000, - Db.DB_SET_LOCK_TIMEOUT); - dbenv.setLogBufferSize(0x1000); - dbenv.setLogDir("."); - dbenv.setLogMax(0x10000000); - dbenv.setLogRegionMax(0x100000); - dbenv.setLockConflicts(conflicts); - dbenv.setLockDetect(Db.DB_LOCK_DEFAULT); - // exists, but is deprecated: - // dbenv.set_lk_max(0); - dbenv.setLockMaxLockers(100); - dbenv.setLockMaxLocks(10); - dbenv.setLockMaxObjects(1000); - dbenv.setMemoryPoolMapSize(0x10000); - dbenv.setTestAndSetSpins(1000); - - // Need to open the environment so we - // can get a transaction. - // - dbenv.open(".", Db.DB_CREATE | Db.DB_INIT_TXN | - Db.DB_INIT_LOCK | Db.DB_INIT_LOG | - Db.DB_INIT_MPOOL, - 0644); - - dbtxn = dbenv.txnBegin(null, Db.DB_TXN_NOWAIT); - dbtxn.setTimeout(0xA0000000, Db.DB_SET_TXN_TIMEOUT); - dbtxn.abort(); - - dbenv.close(0); - - // We get a db, one for each type. - // That's because once we call (for instance) - // setBtreeMinKey, DB 'knows' that this is a - // Btree Db, and it cannot be used to try Hash - // or Recno functions. - // - Db db_bt = new Db(null, 0); - db_bt.setBtreeMinKey(100); - db_bt.setCacheSize(0x100000, 0); - db_bt.close(0); - - Db db_h = new Db(null, 0); - db_h.setHashFillFactor(0x10); - db_h.setHashNumElements(100); - db_h.setByteOrder(0); - db_h.setPageSize(0x10000); - db_h.close(0); - - Db db_re = new Db(null, 0); - db_re.setRecordDelimiter('@'); - db_re.setRecordPad(10); - db_re.setRecordSource("re.in"); - db_re.setRecordLength(1000); - db_re.close(0); - - Db db_q = new Db(null, 0); - db_q.setQueueExtentSize(200); - db_q.close(0); - } - - public static void main(String[] args) - { - try { - TestGetSetMethods tester = new TestGetSetMethods(); - tester.testMethods(); - } - catch (Exception e) { - System.err.println("TestGetSetMethods: Exception: " + e); - } - } -} diff --git a/storage/bdb/test/scr016/TestKeyRange.java b/storage/bdb/test/scr016/TestKeyRange.java deleted file mode 100644 index 57a776b014a..00000000000 --- a/storage/bdb/test/scr016/TestKeyRange.java +++ /dev/null @@ -1,205 +0,0 @@ -/*- - * See the file LICENSE for redistribution information. - * - * Copyright (c) 1997-2004 - * Sleepycat Software. All rights reserved. - * - * $Id: TestKeyRange.java,v 1.7 2004/01/28 03:36:34 bostic Exp $ - */ - -/* - * NOTE: TestKeyRange is AccessExample changed to test Db.key_range. - * See comments with ADDED for specific areas of change. - */ - -package com.sleepycat.test; - -import com.sleepycat.db.*; -import java.io.File; -import java.io.FileNotFoundException; -import java.io.StringReader; -import java.io.Reader; -import java.io.IOException; -import java.io.PrintStream; - -public class TestKeyRange -{ - private static final String FileName = "access.db"; - - public TestKeyRange() - { - } - - private static void usage() - { - System.err.println("usage: TestKeyRange\n"); - System.exit(1); - } - - public static void main(String argv[]) - { - try - { - TestKeyRange app = new TestKeyRange(); - app.run(); - } - catch (DbException dbe) - { - System.err.println("TestKeyRange: " + dbe.toString()); - System.exit(1); - } - catch (FileNotFoundException fnfe) - { - System.err.println("TestKeyRange: " + fnfe.toString()); - System.exit(1); - } - System.exit(0); - } - - // Prompts for a line, and keeps prompting until a non blank - // line is returned. Returns null on error. - // - static public String askForLine(Reader reader, - PrintStream out, String prompt) - { - String result = ""; - while (result != null && result.length() == 0) { - out.print(prompt); - out.flush(); - result = getLine(reader); - } - return result; - } - - // Not terribly efficient, but does the job. - // Works for reading a line from stdin or a file. - // Returns null on EOF. If EOF appears in the middle - // of a line, returns that line, then null on next call. - // - static public String getLine(Reader reader) - { - StringBuffer b = new StringBuffer(); - int c; - try { - while ((c = reader.read()) != -1 && c != '\n') { - if (c != '\r') - b.append((char)c); - } - } - catch (IOException ioe) { - c = -1; - } - - if (c == -1 && b.length() == 0) - return null; - else - return b.toString(); - } - - public void run() - throws DbException, FileNotFoundException - { - // Remove the previous database. - new File(FileName).delete(); - - // Create the database object. - // There is no environment for this simple example. - Db table = new Db(null, 0); - table.setErrorStream(System.err); - table.setErrorPrefix("TestKeyRange"); - table.open(null, FileName, null, Db.DB_BTREE, Db.DB_CREATE, 0644); - - // - // Insert records into the database, where the key is the user - // input and the data is the user input in reverse order. - // - Reader reader = new StringReader("abc\nmiddle\nzend\nmoremiddle\nZED\nMAMAMIA"); - - int count= 0;/*ADDED*/ - for (;;) { - String line = askForLine(reader, System.out, "input>"); - if (line == null) - break; - - String reversed = (new StringBuffer(line)).reverse().toString(); - - // See definition of StringDbt below - // - StringDbt key = new StringDbt(line); - StringDbt data = new StringDbt(reversed); - - try - { - int err; - if ((err = table.put(null, key, data, 0)) == Db.DB_KEYEXIST) { - System.out.println("Key " + line + " already exists."); - } - } - catch (DbException dbe) - { - System.out.println(dbe.toString()); - } - System.out.println(""); - - /*START ADDED*/ - { - if (count++ > 0) { - DbKeyRange range = new DbKeyRange(); - table.keyRange(null, key, range, 0); - System.out.println("less: " + range.less); - System.out.println("equal: " + range.equal); - System.out.println("greater: " + range.greater); - } - } - /*END ADDED*/ - - } - - // Acquire an iterator for the table. - Dbc iterator; - iterator = table.cursor(null, 0); - - // Walk through the table, printing the key/data pairs. - // See class StringDbt defined below. - // - StringDbt key = new StringDbt(); - StringDbt data = new StringDbt(); - while (iterator.get(key, data, Db.DB_NEXT) == 0) - { - System.out.println(key.getString() + " : " + data.getString()); - } - iterator.close(); - table.close(0); - } - - // Here's an example of how you can extend a Dbt in a straightforward - // way to allow easy storage/retrieval of strings, or whatever - // kind of data you wish. We've declared it as a static inner - // class, but it need not be. - // - static /*inner*/ - class StringDbt extends Dbt - { - StringDbt() - { - setFlags(Db.DB_DBT_MALLOC); // tell Db to allocate on retrieval - } - - StringDbt(String value) - { - setString(value); - setFlags(Db.DB_DBT_MALLOC); // tell Db to allocate on retrieval - } - - void setString(String value) - { - setData(value.getBytes()); - setSize(value.length()); - } - - String getString() - { - return new String(getData(), 0, getSize()); - } - } -} diff --git a/storage/bdb/test/scr016/TestKeyRange.testout b/storage/bdb/test/scr016/TestKeyRange.testout deleted file mode 100644 index c265f3289fb..00000000000 --- a/storage/bdb/test/scr016/TestKeyRange.testout +++ /dev/null @@ -1,27 +0,0 @@ -input> -input> -less: 0.5 -equal: 0.5 -greater: 0.0 -input> -less: 0.6666666666666666 -equal: 0.3333333333333333 -greater: 0.0 -input> -less: 0.5 -equal: 0.25 -greater: 0.25 -input> -less: 0.0 -equal: 0.2 -greater: 0.8 -input> -less: 0.0 -equal: 0.16666666666666666 -greater: 0.8333333333333334 -input>MAMAMIA : AIMAMAM -ZED : DEZ -abc : cba -middle : elddim -moremiddle : elddimerom -zend : dnez diff --git a/storage/bdb/test/scr016/TestLockVec.java b/storage/bdb/test/scr016/TestLockVec.java deleted file mode 100644 index b14a20c6848..00000000000 --- a/storage/bdb/test/scr016/TestLockVec.java +++ /dev/null @@ -1,249 +0,0 @@ -/*- - * See the file LICENSE for redistribution information. - * - * Copyright (c) 1997-2004 - * Sleepycat Software. All rights reserved. - * - * $Id: TestLockVec.java,v 1.7 2004/01/28 03:36:34 bostic Exp $ - */ - -/* - * test of DbEnv.lock_vec() - */ - -package com.sleepycat.test; - -import com.sleepycat.db.*; -import java.io.FileNotFoundException; - -public class TestLockVec -{ - public static int locker1; - public static int locker2; - - public static void gdb_pause() - { - try { - System.err.println("attach gdb and type return..."); - System.in.read(new byte[10]); - } - catch (java.io.IOException ie) { - } - } - - public static void main(String[] args) - { - try { - DbEnv dbenv1 = new DbEnv(0); - DbEnv dbenv2 = new DbEnv(0); - dbenv1.open(".", - Db.DB_CREATE | Db.DB_INIT_LOCK | Db.DB_INIT_MPOOL, 0); - dbenv2.open(".", - Db.DB_CREATE | Db.DB_INIT_LOCK | Db.DB_INIT_MPOOL, 0); - locker1 = dbenv1.lockId(); - locker2 = dbenv1.lockId(); - Db db1 = new Db(dbenv1, 0); - db1.open(null, "my.db", null, Db.DB_BTREE, Db.DB_CREATE, 0); - Db db2 = new Db(dbenv2, 0); - db2.open(null, "my.db", null, Db.DB_BTREE, 0, 0); - - // populate our database, just two elements. - Dbt Akey = new Dbt("A".getBytes()); - Dbt Adata = new Dbt("Adata".getBytes()); - Dbt Bkey = new Dbt("B".getBytes()); - Dbt Bdata = new Dbt("Bdata".getBytes()); - - // We don't allow Dbts to be reused within the - // same method call, so we need some duplicates. - Dbt Akeyagain = new Dbt("A".getBytes()); - Dbt Bkeyagain = new Dbt("B".getBytes()); - - db1.put(null, Akey, Adata, 0); - db1.put(null, Bkey, Bdata, 0); - - Dbt notInDatabase = new Dbt("C".getBytes()); - - /* make sure our check mechanisms work */ - int expectedErrs = 0; - - lock_check_free(dbenv2, Akey); - try { - lock_check_held(dbenv2, Bkey, Db.DB_LOCK_READ); - } - catch (DbException dbe1) { - expectedErrs += 1; - } - DbLock tmplock = dbenv1.lockGet(locker1, Db.DB_LOCK_NOWAIT, - Akey, Db.DB_LOCK_READ); - lock_check_held(dbenv2, Akey, Db.DB_LOCK_READ); - try { - lock_check_free(dbenv2, Akey); - } - catch (DbException dbe2) { - expectedErrs += 2; - } - if (expectedErrs != 1+2) { - System.err.println("lock check mechanism is broken"); - System.exit(1); - } - dbenv1.lockPut(tmplock); - - /* Now on with the test, a series of lock_vec requests, - * with checks between each call. - */ - - System.out.println("get a few"); - /* Request: get A(W), B(R), B(R) */ - DbLockRequest[] reqs = new DbLockRequest[3]; - - reqs[0] = new DbLockRequest(Db.DB_LOCK_GET, Db.DB_LOCK_WRITE, - Akey, null); - reqs[1] = new DbLockRequest(Db.DB_LOCK_GET, Db.DB_LOCK_READ, - Bkey, null); - reqs[2] = new DbLockRequest(Db.DB_LOCK_GET, Db.DB_LOCK_READ, - Bkeyagain, null); - - dbenv1.lockVector(locker1, Db.DB_LOCK_NOWAIT, reqs, 0, 3); - - /* Locks held: A(W), B(R), B(R) */ - lock_check_held(dbenv2, Bkey, Db.DB_LOCK_READ); - lock_check_held(dbenv2, Akey, Db.DB_LOCK_WRITE); - - System.out.println("put a couple"); - /* Request: put A, B(first) */ - reqs[0].setOp(Db.DB_LOCK_PUT); - reqs[1].setOp(Db.DB_LOCK_PUT); - - dbenv1.lockVector(locker1, Db.DB_LOCK_NOWAIT, reqs, 0, 2); - - /* Locks held: B(R) */ - lock_check_free(dbenv2, Akey); - lock_check_held(dbenv2, Bkey, Db.DB_LOCK_READ); - - System.out.println("put one more, test index offset"); - /* Request: put B(second) */ - reqs[2].setOp(Db.DB_LOCK_PUT); - - dbenv1.lockVector(locker1, Db.DB_LOCK_NOWAIT, reqs, 2, 1); - - /* Locks held: */ - lock_check_free(dbenv2, Akey); - lock_check_free(dbenv2, Bkey); - - System.out.println("get a few"); - /* Request: get A(R), A(R), B(R) */ - reqs[0] = new DbLockRequest(Db.DB_LOCK_GET, Db.DB_LOCK_READ, - Akey, null); - reqs[1] = new DbLockRequest(Db.DB_LOCK_GET, Db.DB_LOCK_READ, - Akeyagain, null); - reqs[2] = new DbLockRequest(Db.DB_LOCK_GET, Db.DB_LOCK_READ, - Bkey, null); - dbenv1.lockVector(locker1, Db.DB_LOCK_NOWAIT, reqs, 0, 3); - - /* Locks held: A(R), B(R), B(R) */ - lock_check_held(dbenv2, Akey, Db.DB_LOCK_READ); - lock_check_held(dbenv2, Bkey, Db.DB_LOCK_READ); - - System.out.println("try putobj"); - /* Request: get B(R), putobj A */ - reqs[1] = new DbLockRequest(Db.DB_LOCK_GET, Db.DB_LOCK_READ, - Bkey, null); - reqs[2] = new DbLockRequest(Db.DB_LOCK_PUT_OBJ, 0, - Akey, null); - dbenv1.lockVector(locker1, Db.DB_LOCK_NOWAIT, reqs, 1, 2); - - /* Locks held: B(R), B(R) */ - lock_check_free(dbenv2, Akey); - lock_check_held(dbenv2, Bkey, Db.DB_LOCK_READ); - - System.out.println("get one more"); - /* Request: get A(W) */ - reqs[0] = new DbLockRequest(Db.DB_LOCK_GET, Db.DB_LOCK_WRITE, - Akey, null); - dbenv1.lockVector(locker1, Db.DB_LOCK_NOWAIT, reqs, 0, 1); - - /* Locks held: A(W), B(R), B(R) */ - lock_check_held(dbenv2, Akey, Db.DB_LOCK_WRITE); - lock_check_held(dbenv2, Bkey, Db.DB_LOCK_READ); - - System.out.println("putall"); - /* Request: putall */ - reqs[0] = new DbLockRequest(Db.DB_LOCK_PUT_ALL, 0, - null, null); - dbenv1.lockVector(locker1, Db.DB_LOCK_NOWAIT, reqs, 0, 1); - - lock_check_free(dbenv2, Akey); - lock_check_free(dbenv2, Bkey); - db1.close(0); - dbenv1.close(0); - db2.close(0); - dbenv2.close(0); - System.out.println("done"); - } - catch (DbLockNotGrantedException nge) { - System.err.println("Db Exception: " + nge); - } - catch (DbException dbe) { - System.err.println("Db Exception: " + dbe); - } - catch (FileNotFoundException fnfe) { - System.err.println("FileNotFoundException: " + fnfe); - } - - } - - /* Verify that the lock is free, throw an exception if not. - * We do this by trying to grab a write lock (no wait). - */ - static void lock_check_free(DbEnv dbenv, Dbt dbt) - throws DbException - { - DbLock tmplock = dbenv.lockGet(locker2, Db.DB_LOCK_NOWAIT, - dbt, Db.DB_LOCK_WRITE); - dbenv.lockPut(tmplock); - } - - /* Verify that the lock is held with the mode, throw an exception if not. - * If we have a write lock, we should not be able to get the lock - * for reading. If we have a read lock, we should be able to get - * it for reading, but not writing. - */ - static void lock_check_held(DbEnv dbenv, Dbt dbt, int mode) - throws DbException - { - DbLock never = null; - - try { - if (mode == Db.DB_LOCK_WRITE) { - never = dbenv.lockGet(locker2, Db.DB_LOCK_NOWAIT, - dbt, Db.DB_LOCK_READ); - } - else if (mode == Db.DB_LOCK_READ) { - DbLock rlock = dbenv.lockGet(locker2, Db.DB_LOCK_NOWAIT, - dbt, Db.DB_LOCK_READ); - dbenv.lockPut(rlock); - never = dbenv.lockGet(locker2, Db.DB_LOCK_NOWAIT, - dbt, Db.DB_LOCK_WRITE); - } - else { - throw new DbException("lock_check_held bad mode"); - } - } - catch (DbLockNotGrantedException nge) { - /* We expect this on our last lock_get call */ - } - - /* make sure we failed */ - if (never != null) { - try { - dbenv.lockPut(never); - } - catch (DbException dbe2) { - System.err.println("Got some real troubles now"); - System.exit(1); - } - throw new DbException("lock_check_held: lock was not held"); - } - } - -} diff --git a/storage/bdb/test/scr016/TestLockVec.testout b/storage/bdb/test/scr016/TestLockVec.testout deleted file mode 100644 index 1cf16c6ac4e..00000000000 --- a/storage/bdb/test/scr016/TestLockVec.testout +++ /dev/null @@ -1,8 +0,0 @@ -get a few -put a couple -put one more, test index offset -get a few -try putobj -get one more -putall -done diff --git a/storage/bdb/test/scr016/TestLogc.java b/storage/bdb/test/scr016/TestLogc.java deleted file mode 100644 index 8f406831829..00000000000 --- a/storage/bdb/test/scr016/TestLogc.java +++ /dev/null @@ -1,100 +0,0 @@ -/*- - * See the file LICENSE for redistribution information. - * - * Copyright (c) 1997-2004 - * Sleepycat Software. All rights reserved. - * - * $Id: TestLogc.java,v 1.9 2004/01/28 03:36:34 bostic Exp $ - */ - -/* - * A basic regression test for the Logc class. - */ - -package com.sleepycat.test; - -import com.sleepycat.db.*; -import java.io.FileNotFoundException; - -public class TestLogc -{ - public static void main(String[] args) - { - try { - DbEnv env = new DbEnv(0); - env.open(".", Db.DB_CREATE | Db.DB_INIT_LOG | Db.DB_INIT_MPOOL, 0); - - // Do some database activity to get something into the log. - Db db1 = new Db(env, 0); - db1.open(null, "first.db", null, Db.DB_BTREE, Db.DB_CREATE, 0); - db1.put(null, new Dbt("a".getBytes()), new Dbt("b".getBytes()), 0); - db1.put(null, new Dbt("c".getBytes()), new Dbt("d".getBytes()), 0); - db1.close(0); - - Db db2 = new Db(env, 0); - db2.open(null, "second.db", null, Db.DB_BTREE, Db.DB_CREATE, 0644); - db2.put(null, new Dbt("w".getBytes()), new Dbt("x".getBytes()), 0); - db2.put(null, new Dbt("y".getBytes()), new Dbt("z".getBytes()), 0); - db2.close(0); - - // Now get a log cursor and walk through. - DbLogc logc = env.log_cursor(0); - - int ret = 0; - DbLsn lsn = new DbLsn(); - Dbt dbt = new Dbt(); - int flags = Db.DB_FIRST; - - int count = 0; - while ((ret = logc.get(lsn, dbt, flags)) == 0) { - - // We ignore the contents of the log record, - // it's not portable. Even the exact count - // is may change when the underlying implementation - // changes, we'll just make sure at the end we saw - // 'enough'. - // - // System.out.println("logc.get: " + count); - // System.out.println(showDbt(dbt)); - // - count++; - flags = Db.DB_NEXT; - } - if (ret != Db.DB_NOTFOUND) { - System.err.println("*** FAIL: logc.get returned: " + - DbEnv.strerror(ret)); - } - logc.close(0); - - // There has to be at *least* four log records, - // since we did four separate database operations. - // - if (count < 4) - System.out.println("*** FAIL: not enough log records"); - - System.out.println("TestLogc done."); - } - catch (DbException dbe) { - System.err.println("*** FAIL: Db Exception: " + dbe); - } - catch (FileNotFoundException fnfe) { - System.err.println("*** FAIL: FileNotFoundException: " + fnfe); - } - - } - - public static String showDbt(Dbt dbt) - { - StringBuffer sb = new StringBuffer(); - int size = dbt.get_size(); - byte[] data = dbt.get_data(); - int i; - for (i=0; i "); - if (line == null) - break; - - String reversed = (new StringBuffer(line)).reverse().toString(); - - // See definition of StringDbt below - // - StringDbt key = new StringDbt(line); - StringDbt data = new StringDbt(reversed); - - try - { - int err; - if ((err = table.put(null, - key, data, Db.DB_NOOVERWRITE)) == Db.DB_KEYEXIST) { - System.out.println("Key " + line + " already exists."); - } - } - catch (DbException dbe) - { - System.out.println(dbe.toString()); - } - System.out.println(""); - } - - // Acquire an iterator for the table. - Dbc iterator; - iterator = table.cursor(null, 0); - - // Walk through the table, printing the key/data pairs. - // See class StringDbt defined below. - // - StringDbt key = new StringDbt(); - StringDbt data = new StringDbt(); - while (iterator.get(key, data, Db.DB_NEXT) == 0) - { - System.out.println(key.getString() + " : " + data.getString()); - } - iterator.close(); - table.close(0); - } - - // Here's an example of how you can extend a Dbt in a straightforward - // way to allow easy storage/retrieval of strings, or whatever - // kind of data you wish. We've declared it as a static inner - // class, but it need not be. - // - static /*inner*/ - class StringDbt extends Dbt - { - StringDbt() - { - setFlags(Db.DB_DBT_MALLOC); // tell Db to allocate on retrieval - } - - StringDbt(String value) - { - setString(value); - setFlags(Db.DB_DBT_MALLOC); // tell Db to allocate on retrieval - } - - void setString(String value) - { - setData(value.getBytes()); - setSize(value.length()); - } - - String getString() - { - return new String(getData(), 0, getSize()); - } - } -} diff --git a/storage/bdb/test/scr016/TestOpenEmpty.testerr b/storage/bdb/test/scr016/TestOpenEmpty.testerr deleted file mode 100644 index c08da27b36e..00000000000 --- a/storage/bdb/test/scr016/TestOpenEmpty.testerr +++ /dev/null @@ -1,2 +0,0 @@ -TestOpenEmpty: access.db: unexpected file type or format -TestOpenEmpty: java.lang.IllegalArgumentException: Invalid argument diff --git a/storage/bdb/test/scr016/TestReplication.java b/storage/bdb/test/scr016/TestReplication.java deleted file mode 100644 index e20b9a92901..00000000000 --- a/storage/bdb/test/scr016/TestReplication.java +++ /dev/null @@ -1,289 +0,0 @@ -/*- - * See the file LICENSE for redistribution information. - * - * Copyright (c) 1997-2004 - * Sleepycat Software. All rights reserved. - * - * $Id: TestReplication.java,v 1.7 2004/01/28 03:36:34 bostic Exp $ - */ - -/* - * Simple test of replication, merely to exercise the individual - * methods in the API. Rather than use TCP/IP, our transport - * mechanism is just an ArrayList of byte arrays. - * It's managed like a queue, and synchronization is via - * the ArrayList object itself and java's wait/notify. - * It's not terribly extensible, but it's fine for a small test. - */ - -package com.sleepycat.test; - -import com.sleepycat.db.*; -import java.io.File; -import java.io.FileNotFoundException; -import java.io.IOException; -import java.util.Vector; - -public class TestReplication extends Thread - implements DbRepTransport -{ - public static final String MASTER_ENVDIR = "./master"; - public static final String CLIENT_ENVDIR = "./client"; - - private Vector queue = new Vector(); - private DbEnv master_env; - private DbEnv client_env; - - private static void mkdir(String name) - throws IOException - { - (new File(name)).mkdir(); - } - - - // The client thread runs this - public void run() - { - try { - System.err.println("c10"); - client_env = new DbEnv(0); - System.err.println("c11"); - client_env.set_rep_transport(1, this); - System.err.println("c12"); - client_env.open(CLIENT_ENVDIR, Db.DB_CREATE | Db.DB_INIT_MPOOL, 0); - System.err.println("c13"); - Dbt myid = new Dbt("master01".getBytes()); - System.err.println("c14"); - client_env.rep_start(myid, Db.DB_REP_CLIENT); - System.err.println("c15"); - DbEnv.RepProcessMessage processMsg = new DbEnv.RepProcessMessage(); - processMsg.envid = 2; - System.err.println("c20"); - boolean running = true; - - Dbt control = new Dbt(); - Dbt rec = new Dbt(); - - while (running) { - int msgtype = 0; - - System.err.println("c30"); - synchronized (queue) { - if (queue.size() == 0) { - System.err.println("c40"); - sleepShort(); - } - else { - msgtype = ((Integer)queue.firstElement()).intValue(); - queue.removeElementAt(0); - byte[] data; - - System.err.println("c50 " + msgtype); - - switch (msgtype) { - case -1: - running = false; - break; - case 1: - data = (byte[])queue.firstElement(); - queue.removeElementAt(0); - control.set_data(data); - control.set_size(data.length); - break; - case 2: - control.set_data(null); - control.set_size(0); - break; - case 3: - data = (byte[])queue.firstElement(); - queue.removeElementAt(0); - rec.set_data(data); - rec.set_size(data.length); - break; - case 4: - rec.set_data(null); - rec.set_size(0); - break; - } - - } - } - System.err.println("c60"); - if (msgtype == 3 || msgtype == 4) { - System.out.println("client: Got message"); - client_env.rep_process_message(control, rec, - processMsg); - } - } - System.err.println("c70"); - Db db = new Db(client_env, 0); - db.open(null, "x.db", null, Db.DB_BTREE, 0, 0); - Dbt data = new Dbt(); - System.err.println("c80"); - db.get(null, new Dbt("Hello".getBytes()), data, 0); - System.err.println("c90"); - System.out.println("Hello " + new String(data.get_data(), data.get_offset(), data.get_size())); - System.err.println("c95"); - client_env.close(0); - } - catch (Exception e) { - System.err.println("client exception: " + e); - } - } - - // Implements DbTransport - public int send(DbEnv env, Dbt control, Dbt rec, int flags, int envid) - throws DbException - { - System.out.println("Send to " + envid); - if (envid == 1) { - System.err.println("Unexpected envid = " + envid); - return 0; - } - - int nbytes = 0; - - synchronized (queue) { - System.out.println("Sending message"); - byte[] data = control.get_data(); - if (data != null && data.length > 0) { - queue.addElement(new Integer(1)); - nbytes += data.length; - byte[] newdata = new byte[data.length]; - System.arraycopy(data, 0, newdata, 0, data.length); - queue.addElement(newdata); - } - else - { - queue.addElement(new Integer(2)); - } - - data = rec.get_data(); - if (data != null && data.length > 0) { - queue.addElement(new Integer(3)); - nbytes += data.length; - byte[] newdata = new byte[data.length]; - System.arraycopy(data, 0, newdata, 0, data.length); - queue.addElement(newdata); - } - else - { - queue.addElement(new Integer(4)); - } - System.out.println("MASTER: sent message"); - } - return 0; - } - - public void sleepShort() - { - try { - sleep(100); - } - catch (InterruptedException ie) - { - } - } - - public void send_terminator() - { - synchronized (queue) { - queue.addElement(new Integer(-1)); - } - } - - public void master() - { - try { - master_env = new DbEnv(0); - master_env.set_rep_transport(2, this); - master_env.open(MASTER_ENVDIR, Db.DB_CREATE | Db.DB_INIT_MPOOL, 0644); - System.err.println("10"); - Dbt myid = new Dbt("client01".getBytes()); - master_env.rep_start(myid, Db.DB_REP_MASTER); - System.err.println("10"); - Db db = new Db(master_env, 0); - System.err.println("20"); - db.open(null, "x.db", null, Db.DB_BTREE, Db.DB_CREATE, 0644); - System.err.println("30"); - db.put(null, new Dbt("Hello".getBytes()), - new Dbt("world".getBytes()), 0); - System.err.println("40"); - //DbEnv.RepElectResult electionResult = new DbEnv.RepElectResult(); - //master_env.rep_elect(2, 2, 3, 4, electionResult); - db.close(0); - System.err.println("50"); - master_env.close(0); - send_terminator(); - } - catch (Exception e) { - System.err.println("client exception: " + e); - } - } - - public static void main(String[] args) - { - // The test should only take a few milliseconds. - // give it 10 seconds before bailing out. - TimelimitThread t = new TimelimitThread(1000*10); - t.start(); - - try { - mkdir(CLIENT_ENVDIR); - mkdir(MASTER_ENVDIR); - - TestReplication rep = new TestReplication(); - - // Run the client as a seperate thread. - rep.start(); - - // Run the master. - rep.master(); - - // Wait for the master to finish. - rep.join(); - } - catch (Exception e) - { - System.err.println("Exception: " + e); - } - t.finished(); - } - -} - -class TimelimitThread extends Thread -{ - long nmillis; - boolean finished = false; - - TimelimitThread(long nmillis) - { - this.nmillis = nmillis; - } - - public void finished() - { - finished = true; - } - - public void run() - { - long targetTime = System.currentTimeMillis() + nmillis; - long curTime; - - while (!finished && - ((curTime = System.currentTimeMillis()) < targetTime)) { - long diff = targetTime - curTime; - if (diff > 100) - diff = 100; - try { - sleep(diff); - } - catch (InterruptedException ie) { - } - } - System.err.println(""); - System.exit(1); - } -} diff --git a/storage/bdb/test/scr016/TestRpcServer.java b/storage/bdb/test/scr016/TestRpcServer.java deleted file mode 100644 index ee040f0cfd6..00000000000 --- a/storage/bdb/test/scr016/TestRpcServer.java +++ /dev/null @@ -1,193 +0,0 @@ -/*- - * See the file LICENSE for redistribution information. - * - * Copyright (c) 1997-2004 - * Sleepycat Software. All rights reserved. - * - * $Id: TestRpcServer.java,v 1.5 2004/01/28 03:36:34 bostic Exp $ - */ - -package com.sleepycat.test; - -import com.sleepycat.db.*; -import java.io.File; -import java.io.FileNotFoundException; -import java.io.Reader; -import java.io.StringReader; -import java.io.IOException; -import java.io.PrintStream; - -public class TestRpcServer -{ - private static final String FileName = "access.db"; - - public TestRpcServer() - { - } - - private static void usage() - { - System.err.println("usage: TestRpcServer\n"); - System.exit(1); - } - - public static void main(String argv[]) - { - try - { - TestRpcServer app = new TestRpcServer(); - app.run(); - } - catch (DbException dbe) - { - System.err.println("TestRpcServer: " + dbe.toString()); - System.exit(1); - } - catch (FileNotFoundException fnfe) - { - System.err.println("TestRpcServer: " + fnfe.toString()); - System.exit(1); - } - System.exit(0); - } - - // Prompts for a line, and keeps prompting until a non blank - // line is returned. Returns null on error. - // - static public String askForLine(Reader reader, - PrintStream out, String prompt) - { - String result = ""; - while (result != null && result.length() == 0) { - out.print(prompt); - out.flush(); - result = getLine(reader); - } - return result; - } - - // Not terribly efficient, but does the job. - // Works for reading a line from stdin or a file. - // Returns null on EOF. If EOF appears in the middle - // of a line, returns that line, then null on next call. - // - static public String getLine(Reader reader) - { - StringBuffer b = new StringBuffer(); - int c; - try { - while ((c = reader.read()) != -1 && c != '\n') { - if (c != '\r') - b.append((char)c); - } - } - catch (IOException ioe) { - c = -1; - } - - if (c == -1 && b.length() == 0) - return null; - else - return b.toString(); - } - - public void run() - throws DbException, FileNotFoundException - { - // Remove the previous database. - new File(FileName).delete(); - - DbEnv dbenv = new DbEnv(Db.DB_CLIENT); - dbenv.set_rpc_server(null, "localhost", 0, 0, 0); - dbenv.open(".", Db.DB_CREATE, 0644); - System.out.println("server connection set"); - - // Create the database object. - // There is no environment for this simple example. - Db table = new Db(dbenv, 0); - table.set_error_stream(System.err); - table.set_errpfx("TestRpcServer"); - table.open(FileName, null, Db.DB_BTREE, Db.DB_CREATE, 0644); - - // - // Insert records into the database, where the key is the user - // input and the data is the user input in reverse order. - // - Reader reader = - new StringReader("abc\nStuff\nmore Stuff\nlast line\n"); - - for (;;) { - String line = askForLine(reader, System.out, "input> "); - if (line == null) - break; - - String reversed = (new StringBuffer(line)).reverse().toString(); - - // See definition of StringDbt below - // - StringDbt key = new StringDbt(line); - StringDbt data = new StringDbt(reversed); - - try - { - int err; - if ((err = table.put(null, - key, data, Db.DB_NOOVERWRITE)) == Db.DB_KEYEXIST) { - System.out.println("Key " + line + " already exists."); - } - } - catch (DbException dbe) - { - System.out.println(dbe.toString()); - } - System.out.println(""); - } - - // Acquire an iterator for the table. - Dbc iterator; - iterator = table.cursor(null, 0); - - // Walk through the table, printing the key/data pairs. - // See class StringDbt defined below. - // - StringDbt key = new StringDbt(); - StringDbt data = new StringDbt(); - while (iterator.get(key, data, Db.DB_NEXT) == 0) - { - System.out.println(key.getString() + " : " + data.getString()); - } - iterator.close(); - table.close(0); - } - - // Here's an example of how you can extend a Dbt in a straightforward - // way to allow easy storage/retrieval of strings, or whatever - // kind of data you wish. We've declared it as a static inner - // class, but it need not be. - // - static /*inner*/ - class StringDbt extends Dbt - { - StringDbt() - { - set_flags(Db.DB_DBT_MALLOC); // tell Db to allocate on retrieval - } - - StringDbt(String value) - { - setString(value); - set_flags(Db.DB_DBT_MALLOC); // tell Db to allocate on retrieval - } - - void setString(String value) - { - set_data(value.getBytes()); - set_size(value.length()); - } - - String getString() - { - return new String(get_data(), 0, get_size()); - } - } -} diff --git a/storage/bdb/test/scr016/TestSameDbt.java b/storage/bdb/test/scr016/TestSameDbt.java deleted file mode 100644 index 28a46af31e9..00000000000 --- a/storage/bdb/test/scr016/TestSameDbt.java +++ /dev/null @@ -1,56 +0,0 @@ -/*- - * See the file LICENSE for redistribution information. - * - * Copyright (c) 1997-2004 - * Sleepycat Software. All rights reserved. - * - * $Id: TestSameDbt.java,v 1.6 2004/01/28 03:36:34 bostic Exp $ - */ - -/* - * Simple test for get/put of specific values. - */ - -package com.sleepycat.test; - -import com.sleepycat.db.*; -import java.io.FileNotFoundException; - -public class TestSameDbt -{ - public static void main(String[] args) - { - try { - Db db = new Db(null, 0); - db.open(null, "my.db", null, Db.DB_BTREE, Db.DB_CREATE, 0644); - - // try reusing the dbt - Dbt keydatadbt = new Dbt("stuff".getBytes()); - int gotexcept = 0; - - try { - db.put(null, keydatadbt, keydatadbt, 0); - } - catch (DbException dbe) { - System.out.println("got expected Db Exception: " + dbe); - gotexcept++; - } - - if (gotexcept != 1) { - System.err.println("Missed exception"); - System.out.println("** FAIL **"); - } - else { - System.out.println("Test succeeded."); - } - } - catch (DbException dbe) { - System.err.println("Db Exception: " + dbe); - } - catch (FileNotFoundException fnfe) { - System.err.println("FileNotFoundException: " + fnfe); - } - - } - -} diff --git a/storage/bdb/test/scr016/TestSameDbt.testout b/storage/bdb/test/scr016/TestSameDbt.testout deleted file mode 100644 index be4bbbe59e9..00000000000 --- a/storage/bdb/test/scr016/TestSameDbt.testout +++ /dev/null @@ -1,2 +0,0 @@ -got expected Db Exception: com.sleepycat.db.DbException: Dbt is already in use -Test succeeded. diff --git a/storage/bdb/test/scr016/TestSimpleAccess.java b/storage/bdb/test/scr016/TestSimpleAccess.java deleted file mode 100644 index 72886b5d240..00000000000 --- a/storage/bdb/test/scr016/TestSimpleAccess.java +++ /dev/null @@ -1,37 +0,0 @@ -/*- - * See the file LICENSE for redistribution information. - * - * Copyright (c) 1997-2004 - * Sleepycat Software. All rights reserved. - * - * $Id: TestSimpleAccess.java,v 1.7 2004/01/28 03:36:34 bostic Exp $ - */ - -/* - * Simple test for get/put of specific values. - */ - -package com.sleepycat.test; - -import com.sleepycat.db.*; -import java.io.FileNotFoundException; - -public class TestSimpleAccess -{ - public static void main(String[] args) - { - try { - Db db = new Db(null, 0); - db.open(null, "my.db", null, Db.DB_BTREE, Db.DB_CREATE, 0644); - - TestUtil.populate(db); - System.out.println("finished test"); - } - catch (DbException dbe) { - System.err.println("Db Exception: " + dbe); - } - catch (FileNotFoundException fnfe) { - System.err.println("FileNotFoundException: " + fnfe); - } - } -} diff --git a/storage/bdb/test/scr016/TestSimpleAccess.testout b/storage/bdb/test/scr016/TestSimpleAccess.testout deleted file mode 100644 index dc88d4788e4..00000000000 --- a/storage/bdb/test/scr016/TestSimpleAccess.testout +++ /dev/null @@ -1,3 +0,0 @@ -got data: data -get using bad key: DB_NOTFOUND: No matching key/data pair found -finished test diff --git a/storage/bdb/test/scr016/TestStat.java b/storage/bdb/test/scr016/TestStat.java deleted file mode 100644 index 84add875e04..00000000000 --- a/storage/bdb/test/scr016/TestStat.java +++ /dev/null @@ -1,94 +0,0 @@ -/*- - * See the file LICENSE for redistribution information. - * - * Copyright (c) 1997-2004 - * Sleepycat Software. All rights reserved. - * - * $Id: TestStat.java,v 1.10 2004/01/28 03:36:34 bostic Exp $ - */ - -/* - * Simple test for get/put of specific values. - */ - -package com.sleepycat.test; - -import com.sleepycat.db.*; -import java.io.FileNotFoundException; - -public class TestStat -{ - public static void main(String[] args) - { - int envflags = - Db.DB_INIT_MPOOL | Db.DB_INIT_LOCK | Db.DB_INIT_LOG | - Db.DB_INIT_REP | Db.DB_INIT_TXN | Db.DB_CREATE; - try { - DbEnv dbenv = new DbEnv(0); - dbenv.open(".", envflags, 0); - - // Use a separate environment that has no activity - // to do the replication stats. We don't want to get - // into configuring a real replication environment here. - DbEnv repenv = new DbEnv(0); - repenv.open(".", envflags, 0); - - // Keep a couple transactions open so DbTxnStat active - // array will have some entries. - DbTxn dbtxn1 = dbenv.txnBegin(null, 0); - DbTxn dbtxn2 = dbenv.txnBegin(dbtxn1, 0); - Db db = new Db(dbenv, 0); - db.open(null, "my.db", null, Db.DB_BTREE, Db.DB_CREATE, 0); - - TestUtil.populate(db); - System.out.println("BtreeStat:"); - DbBtreeStat stat = (DbBtreeStat)db.stat(0); - System.out.println(" bt_magic: " + stat.bt_magic); - - System.out.println("LogStat:"); - DbLogStat logstat = dbenv.logStat(0); - System.out.println(" st_magic: " + logstat.st_magic); - System.out.println(" st_cur_file: " + logstat.st_cur_file); - - System.out.println("TxnStat:"); - DbTxnStat txnstat = dbenv.txnStat(0); - System.out.println(" st_ncommits: " + txnstat.st_ncommits); - System.out.println(" st_nactive: " + txnstat.st_nactive); - - DbTxnStat.Active active0 = txnstat.st_txnarray[0]; - DbTxnStat.Active active1 = txnstat.st_txnarray[1]; - if (active0.txnid != active1.parentid && - active1.txnid != active0.parentid) { - System.out.println("Missing PARENT/CHILD txn relationship:"); - System.out.println(" st_active[0].txnid: " + active0.txnid); - System.out.println(" st_active[0].parentid: " + - active0.parentid); - System.out.println(" st_active[1].txnid: " + active1.txnid); - System.out.println(" st_active[1].parentid: " + - active1.parentid); - } - - System.out.println("DbMpoolStat:"); - DbMpoolStat mpstat = dbenv.memoryPoolStat(0); - System.out.println(" st_gbytes: " + mpstat.st_gbytes); - - System.out.println("DbMpoolFileStat:"); - DbMpoolFStat[] mpfstat = dbenv.memoryPoolFileStat(0); - System.out.println(" num files: " + mpfstat.length); - - System.out.println("RepStat:"); - DbRepStat repstat = repenv.replicationStat(0); - System.out.println(" st_status: " + repstat.st_status); - System.out.println(" st_log_duplication: " + - repstat.st_log_duplicated); - - System.out.println("finished test"); - } - catch (DbException dbe) { - System.err.println("Db Exception: " + dbe); - } - catch (FileNotFoundException fnfe) { - System.err.println("FileNotFoundException: " + fnfe); - } - } -} diff --git a/storage/bdb/test/scr016/TestStat.testout b/storage/bdb/test/scr016/TestStat.testout deleted file mode 100644 index bd21aa7638d..00000000000 --- a/storage/bdb/test/scr016/TestStat.testout +++ /dev/null @@ -1,18 +0,0 @@ -got data: data -get using bad key: DB_NOTFOUND: No matching key/data pair found -BtreeStat: - bt_magic: 340322 -LogStat: - st_magic: 264584 - st_cur_file: 1 -TxnStat: - st_ncommits: 0 - st_nactive: 2 -DbMpoolStat: - st_gbytes: 0 -DbMpoolFileStat: - num files: 1 -RepStat: - st_status: 0 - st_log_duplication: 0 -finished test diff --git a/storage/bdb/test/scr016/TestTruncate.java b/storage/bdb/test/scr016/TestTruncate.java deleted file mode 100644 index c84f68db4ee..00000000000 --- a/storage/bdb/test/scr016/TestTruncate.java +++ /dev/null @@ -1,87 +0,0 @@ -/*- - * See the file LICENSE for redistribution information. - * - * Copyright (c) 1997-2004 - * Sleepycat Software. All rights reserved. - * - * $Id: TestTruncate.java,v 1.8 2004/01/28 03:36:34 bostic Exp $ - */ - -/* - * Simple test for get/put of specific values. - */ - -package com.sleepycat.test; - -import com.sleepycat.db.*; -import java.io.FileNotFoundException; - -public class TestTruncate -{ - public static void main(String[] args) - { - try { - Db db = new Db(null, 0); - db.open(null, "my.db", null, Db.DB_BTREE, Db.DB_CREATE, 0644); - - // populate our massive database. - Dbt keydbt = new Dbt("key".getBytes()); - Dbt datadbt = new Dbt("data".getBytes()); - db.put(null, keydbt, datadbt, 0); - - // Now, retrieve. We could use keydbt over again, - // but that wouldn't be typical in an application. - Dbt goodkeydbt = new Dbt("key".getBytes()); - Dbt badkeydbt = new Dbt("badkey".getBytes()); - Dbt resultdbt = new Dbt(); - resultdbt.setFlags(Db.DB_DBT_MALLOC); - - int ret; - - if ((ret = db.get(null, goodkeydbt, resultdbt, 0)) != 0) { - System.out.println("get: " + DbEnv.strerror(ret)); - } - else { - String result = - new String(resultdbt.getData(), 0, resultdbt.getSize()); - System.out.println("got data: " + result); - } - - if ((ret = db.get(null, badkeydbt, resultdbt, 0)) != 0) { - // We expect this... - System.out.println("get using bad key: " + DbEnv.strerror(ret)); - } - else { - String result = - new String(resultdbt.getData(), 0, resultdbt.getSize()); - System.out.println("*** got data using bad key!!: " + result); - } - - // Now, truncate and make sure that it's really gone. - System.out.println("truncating data..."); - int nrecords = db.truncate(null, 0); - System.out.println("truncate returns " + nrecords); - if ((ret = db.get(null, goodkeydbt, resultdbt, 0)) != 0) { - // We expect this... - System.out.println("after truncate get: " + - DbEnv.strerror(ret)); - } - else { - String result = - new String(resultdbt.getData(), 0, resultdbt.getSize()); - System.out.println("got data: " + result); - } - - db.close(0); - System.out.println("finished test"); - } - catch (DbException dbe) { - System.err.println("Db Exception: " + dbe); - } - catch (FileNotFoundException fnfe) { - System.err.println("FileNotFoundException: " + fnfe); - } - - } - -} diff --git a/storage/bdb/test/scr016/TestTruncate.testout b/storage/bdb/test/scr016/TestTruncate.testout deleted file mode 100644 index 0a4bc98165d..00000000000 --- a/storage/bdb/test/scr016/TestTruncate.testout +++ /dev/null @@ -1,6 +0,0 @@ -got data: data -get using bad key: DB_NOTFOUND: No matching key/data pair found -truncating data... -truncate returns 1 -after truncate get: DB_NOTFOUND: No matching key/data pair found -finished test diff --git a/storage/bdb/test/scr016/TestUtil.java b/storage/bdb/test/scr016/TestUtil.java deleted file mode 100644 index 799d326cde7..00000000000 --- a/storage/bdb/test/scr016/TestUtil.java +++ /dev/null @@ -1,57 +0,0 @@ -/*- - * See the file LICENSE for redistribution information. - * - * Copyright (c) 1997-2004 - * Sleepycat Software. All rights reserved. - * - * $Id: TestUtil.java,v 1.5 2004/01/28 03:36:34 bostic Exp $ - */ - -/* - * Utilities used by many tests. - */ - -package com.sleepycat.test; - -import com.sleepycat.db.*; -import java.io.FileNotFoundException; - -public class TestUtil -{ - public static void populate(Db db) - throws DbException - { - // populate our massive database. - Dbt keydbt = new Dbt("key".getBytes()); - Dbt datadbt = new Dbt("data".getBytes()); - db.put(null, keydbt, datadbt, 0); - - // Now, retrieve. We could use keydbt over again, - // but that wouldn't be typical in an application. - Dbt goodkeydbt = new Dbt("key".getBytes()); - Dbt badkeydbt = new Dbt("badkey".getBytes()); - Dbt resultdbt = new Dbt(); - resultdbt.setFlags(Db.DB_DBT_MALLOC); - - int ret; - - if ((ret = db.get(null, goodkeydbt, resultdbt, 0)) != 0) { - System.out.println("get: " + DbEnv.strerror(ret)); - } - else { - String result = - new String(resultdbt.getData(), 0, resultdbt.getSize()); - System.out.println("got data: " + result); - } - - if ((ret = db.get(null, badkeydbt, resultdbt, 0)) != 0) { - // We expect this... - System.out.println("get using bad key: " + DbEnv.strerror(ret)); - } - else { - String result = - new String(resultdbt.getData(), 0, resultdbt.getSize()); - System.out.println("*** got data using bad key!!: " + result); - } - } -} diff --git a/storage/bdb/test/scr016/TestXAServlet.java b/storage/bdb/test/scr016/TestXAServlet.java deleted file mode 100644 index 29545b3b93a..00000000000 --- a/storage/bdb/test/scr016/TestXAServlet.java +++ /dev/null @@ -1,313 +0,0 @@ -/*- - * See the file LICENSE for redistribution information. - * - * Copyright (c) 1997-2004 - * Sleepycat Software. All rights reserved. - * - * $Id: TestXAServlet.java,v 1.4 2004/01/28 03:36:34 bostic Exp $ - */ - -/* - * Simple test of XA, using WebLogic. - */ - -package com.sleepycat.test; - -import com.sleepycat.db.*; -import com.sleepycat.db.xa.*; -import java.io.File; -import java.io.FileNotFoundException; -import java.io.IOException; -import java.io.PrintWriter; -import java.util.Hashtable; -import javax.servlet.*; -import javax.servlet.http.*; -import javax.transaction.*; -import javax.transaction.xa.*; -import javax.naming.Context; -import javax.naming.InitialContext; -import javax.naming.NamingException; -import weblogic.transaction.TxHelper; -import weblogic.transaction.TransactionManager; - -public class TestXAServlet extends HttpServlet -{ - public static final String ENV_HOME = "TESTXADIR"; - public static final String DEFAULT_URL = "t3://localhost:7001"; - public static String filesep = System.getProperty("file.separator"); - - private static TransactionManager tm; - private static DbXAResource xaresource; - private static boolean initialized = false; - - /** - * Utility to remove files recursively. - */ - public static void removeRecursive(File f) - { - if (f.isDirectory()) { - String[] sub = f.list(); - for (int i=0; iIf you're debugging, you should now start the debugger and set breakpoints."); - } - - public void doXATransaction(PrintWriter out, String key, String value, - String operation) - throws ServletException, IOException - { - try { - int counter = incrCount(); - if (key == null || key.equals("")) - key = "key" + counter; - if (value == null || value.equals("")) - value = "value" + counter; - - out.println("Adding (\"" + key + "\", \"" + value + "\")"); - - System.out.println("XA transaction begin"); - tm.begin(); - System.out.println("getting XA transaction"); - DbXAResource.DbAttach attach = DbXAResource.xa_attach(null, null); - DbTxn txn = attach.get_txn(); - DbEnv env = attach.get_env(); - Db db = new Db(env, 0); - db.open(txn, "my.db", null, Db.DB_BTREE, Db.DB_CREATE, 0644); - System.out.println("DB put " + key); - db.put(txn, - new Dbt(key.getBytes()), - new Dbt(value.getBytes()), - 0); - - if (operation.equals("rollback")) { - out.println("

ROLLBACK"); - System.out.println("XA transaction rollback"); - tm.rollback(); - System.out.println("XA rollback returned"); - - // The old db is no good after the rollback - // since the open was part of the transaction. - // Get another db for the cursor dump - // - db = new Db(env, 0); - db.open(null, "my.db", null, Db.DB_BTREE, Db.DB_CREATE, 0644); - } - else { - out.println("

COMMITTED"); - System.out.println("XA transaction commit"); - tm.commit(); - } - - // Show the current state of the database. - Dbc dbc = db.cursor(null, 0); - Dbt gotkey = new Dbt(); - Dbt gotdata = new Dbt(); - - out.println("

Current database values:"); - while (dbc.get(gotkey, gotdata, Db.DB_NEXT) == 0) { - out.println("
" + getDbtString(gotkey) + " : " - + getDbtString(gotdata)); - } - dbc.close(); - db.close(0); - } - catch (DbException dbe) { - System.err.println("Db Exception: " + dbe); - out.println(" *** Exception received: " + dbe); - dbe.printStackTrace(); - } - catch (FileNotFoundException fnfe) { - System.err.println("FileNotFoundException: " + fnfe); - out.println(" *** Exception received: " + fnfe); - fnfe.printStackTrace(); - } - // Includes SystemException, NotSupportedException, RollbackException - catch (Exception e) { - System.err.println("Exception: " + e); - out.println(" *** Exception received: " + e); - e.printStackTrace(); - } - } - - private static Xid getBogusXid() - throws XAException - { - return new DbXid(1, "BOGUS_gtrid".getBytes(), - "BOGUS_bqual".getBytes()); - } - - private static String getDbtString(Dbt dbt) - { - return new String(dbt.get_data(), 0, dbt.get_size()); - } - - /** - * doGet is called as a result of invoking the servlet. - */ - public void doGet(HttpServletRequest req, HttpServletResponse resp) - throws ServletException, IOException - { - try { - resp.setContentType("text/html"); - PrintWriter out = resp.getWriter(); - - String key = req.getParameter("key"); - String value = req.getParameter("value"); - String operation = req.getParameter("operation"); - - out.println(""); - out.println(""); - out.println("Berkeley DB with XA"); - out.println(""); - out.println("Database put and commit
"); - out.println("Database put and rollback
"); - out.println("Close the XA resource manager
"); - out.println("Forget an operation (bypasses TM)
"); - out.println("Prepare an operation (bypasses TM)
"); - out.println("
"); - - if (!debugInited) { - // Don't initialize XA yet, give the user - // a chance to attach a debugger if necessary. - debugSetup(out); - debugInited = true; - } - else { - initialize(); - if (operation == null) - operation = "commit"; - - if (operation.equals("close")) { - shutdown(out); - } - else if (operation.equals("forget")) { - // A bogus test, we just make sure the API is callable. - out.println("

FORGET"); - System.out.println("XA forget bogus XID (bypass TM)"); - xaresource.forget(getBogusXid()); - } - else if (operation.equals("prepare")) { - // A bogus test, we just make sure the API is callable. - out.println("

PREPARE"); - System.out.println("XA prepare bogus XID (bypass TM)"); - xaresource.prepare(getBogusXid()); - } - else { - // commit, rollback, prepare, forget - doXATransaction(out, key, value, operation); - } - } - out.println(""); - - System.out.println("Finished."); - } - // Includes SystemException, NotSupportedException, RollbackException - catch (Exception e) { - System.err.println("Exception: " + e); - e.printStackTrace(); - } - - } - - - /** - * From weblogic's sample code: - * samples/examples/jta/jmsjdbc/Client.java - */ - private static InitialContext getInitialContext(String url) - throws NamingException - { - Hashtable env = new Hashtable(); - env.put(Context.INITIAL_CONTEXT_FACTORY, - "weblogic.jndi.WLInitialContextFactory"); - env.put(Context.PROVIDER_URL, url); - return new InitialContext(env); - } - -} diff --git a/storage/bdb/test/scr016/chk.javatests b/storage/bdb/test/scr016/chk.javatests deleted file mode 100644 index 2e2217dc720..00000000000 --- a/storage/bdb/test/scr016/chk.javatests +++ /dev/null @@ -1,79 +0,0 @@ -#!/bin/sh - -# -# $Id: chk.javatests,v 1.6 2003/11/21 02:35:46 bostic Exp $ -# -# Check to make sure that regression tests for Java run. - -TEST_JAVA_SRCDIR=../test/scr016 # must be a relative directory -JAVA=${JAVA:-java} -JAVAC=${JAVAC:-javac} - -# CLASSPATH is used by javac and java. -# We use CLASSPATH rather than the -classpath command line option -# because the latter behaves differently from JDK1.1 and JDK1.2 -export CLASSPATH="./classes:../db.jar" -export LD_LIBRARY_PATH="../.libs" - - -# All paths must be relative to a subdirectory of the build directory -LIBS="-L.. -ldb -ldb_cxx" -CXXFLAGS="-I.. -I../../dbinc" - -# Test must be run from a local build directory, not from a test -# directory. -cd .. -[ -f db_config.h ] || { - echo 'FAIL: chk.javatests must be run from a local build directory.' - exit 1 -} -[ -d ../env ] || { - echo 'FAIL: chk.javatests must be run from a local build directory.' - exit 1 -} -version=`sed -e 's/.* \([0-9]*\.[0-9]*\)\..*/\1/' -e q ../README ` -[ -f libdb_java-$version.la ] || make libdb_java-$version.la || { - echo "FAIL: unable to build libdb_java-$version.la" - exit 1 -} -[ -f db.jar ] || make db.jar || { - echo 'FAIL: unable to build db.jar' - exit 1 -} -testnames=`cd $TEST_JAVA_SRCDIR; ls *.java | sed -e 's/\.java$//'` - -for testname in $testnames; do - if grep -x $testname $TEST_JAVA_SRCDIR/ignore > /dev/null; then - echo " **** java test $testname ignored" - continue - fi - - echo " ==== java test $testname" - rm -rf TESTJAVA; mkdir -p TESTJAVA/classes - cd ./TESTJAVA - testprefix=../$TEST_JAVA_SRCDIR/$testname - ${JAVAC} -d ./classes $testprefix.java ../$TEST_JAVA_SRCDIR/TestUtil.java > ../$testname.compileout 2>&1 || { -pwd - echo "FAIL: compilation of $testname failed, see ../$testname.compileout" - exit 1 - } - rm -f ../$testname.compileout - infile=$testprefix.testin - [ -f $infile ] || infile=/dev/null - goodoutfile=$testprefix.testout - [ -f $goodoutfile ] || goodoutfile=/dev/null - gooderrfile=$testprefix.testerr - [ -f $gooderrfile ] || gooderrfile=/dev/null - ${JAVA} com.sleepycat.test.$testname <$infile >../$testname.out 2>../$testname.err - cmp ../$testname.out $goodoutfile > /dev/null || { - echo "FAIL: $testname output differs: see ../$testname.out, $goodoutfile" - exit 1 - } - cmp ../$testname.err $gooderrfile > /dev/null || { - echo "FAIL: $testname error differs: see ../$testname.err, $gooderrfile" - exit 1 - } - cd .. - rm -f $testname.err $testname.out -done -rm -rf TESTJAVA -exit 0 diff --git a/storage/bdb/test/scr016/ignore b/storage/bdb/test/scr016/ignore deleted file mode 100644 index 03469ecdc51..00000000000 --- a/storage/bdb/test/scr016/ignore +++ /dev/null @@ -1,25 +0,0 @@ -# -# $Id: ignore,v 1.7 2003/08/07 15:48:03 mjc Exp $ -# -# A list of tests to ignore - -# These tests are not debugged -TestRpcServer -TestReplication - -# These are currently not working -TestAppendRecno -TestAssociate -TestLogc -TestConstruct01 -TestConstruct02 - -# We no longer check to see that a Dbt is used more than -# once simultaneously. It's no longer a disastrous error. -TestSameDbt - -# TestUtil is used by the other tests, it does not stand on its own -TestUtil - -# XA needs a special installation, it is not part of testall -TestXAServlet diff --git a/storage/bdb/test/scr016/testall b/storage/bdb/test/scr016/testall deleted file mode 100644 index a4e1b5a8c70..00000000000 --- a/storage/bdb/test/scr016/testall +++ /dev/null @@ -1,32 +0,0 @@ -#!/bin/sh - -# $Id: testall,v 1.4 2001/09/13 14:49:37 dda Exp $ -# -# Run all the Java regression tests - -ecode=0 -prefixarg="" -stdinarg="" -while : -do - case "$1" in - --prefix=* ) - prefixarg="$1"; shift;; - --stdin ) - stdinarg="$1"; shift;; - * ) - break - esac -done -files="`find . -name \*.java -print`" -for file in $files; do - name=`echo $file | sed -e 's:^\./::' -e 's/\.java$//'` - if grep $name ignore > /dev/null; then - echo " **** java test $name ignored" - else - echo " ==== java test $name" - if ! sh ./testone $prefixarg $stdinarg $name; then - ecode=1 - fi - fi -done -exit $ecode diff --git a/storage/bdb/test/scr016/testone b/storage/bdb/test/scr016/testone deleted file mode 100644 index ff654da3af8..00000000000 --- a/storage/bdb/test/scr016/testone +++ /dev/null @@ -1,126 +0,0 @@ -#!/bin/sh - -# $Id: testone,v 1.7 2003/05/06 17:09:44 dda Exp $ -# -# Run just one Java regression test, the single argument -# is the classname within this package. - -error() -{ - echo '' >&2 - echo "Java regression error: $@" >&2 - echo '' >&2 - ecode=1 -} - -# compares the result against the good version, -# reports differences, and removes the result file -# if there are no differences. -# -compare_result() -{ - good="$1" - latest="$2" - if [ ! -e "$good" ]; then - echo "Note: $good does not exist" - return - fi - tmpout=/tmp/blddb$$.tmp - diff "$good" "$latest" > $tmpout - if [ -s $tmpout ]; then - nbad=`grep '^[0-9]' $tmpout | wc -l` - error "$good and $latest differ in $nbad places." - else - rm $latest - fi - rm -f $tmpout -} - -ecode=0 -stdinflag=n -JAVA=${JAVA:-java} -JAVAC=${JAVAC:-javac} - -# classdir is relative to TESTDIR subdirectory -classdir=./classes - -# CLASSPATH is used by javac and java. -# We use CLASSPATH rather than the -classpath command line option -# because the latter behaves differently from JDK1.1 and JDK1.2 -export CLASSPATH="$classdir:$CLASSPATH" - -# determine the prefix of the install tree -prefix="" -while : -do - case "$1" in - --prefix=* ) - prefix="`echo $1 | sed -e 's/--prefix=//'`"; shift - export LD_LIBRARY_PATH="$prefix/lib:$LD_LIBRARY_PATH" - export CLASSPATH="$prefix/lib/db.jar:$CLASSPATH" - ;; - --stdin ) - stdinflag=y; shift - ;; - * ) - break - ;; - esac -done - -if [ "$#" = 0 ]; then - echo 'Usage: testone [ --prefix=

| --stdin ] TestName' - exit 1 -fi -name="$1" - -# class must be public -if ! grep "public.*class.*$name" $name.java > /dev/null; then - error "public class $name is not declared in file $name.java" - exit 1 -fi - -# compile -rm -rf TESTDIR; mkdir TESTDIR -cd ./TESTDIR -mkdir -p $classdir -${JAVAC} -deprecation -d $classdir ../$name.java ../TestUtil.java > ../$name.compileout 2>&1 -ERR=$? -if [ $ERR != 0 -o -s ../$name.compileout ]; then - error "compilation of $name failed, see $name.compileout" - if [ "$ERR" != 0 ]; then - exit 1 - fi -else - rm -f ../$name.compileout -fi - -# find input and error file -infile=../$name.testin -if [ ! -f $infile ]; then - infile=/dev/null -fi - -# run and diff results -rm -rf TESTDIR -if [ "$stdinflag" = y ] -then - ${JAVA} com.sleepycat.test.$name $TEST_ARGS >../$name.out 2>../$name.err -else - ${JAVA} com.sleepycat.test.$name $TEST_ARGS <$infile >../$name.out 2>../$name.err -fi -cd .. - -testerr=$name.testerr -if [ ! -f $testerr ]; then - testerr=/dev/null -fi - -testout=$name.testout -if [ ! -f $testout ]; then - testout=/dev/null -fi - -compare_result $testout $name.out -compare_result $testerr $name.err -rm -rf TESTDIR -exit $ecode diff --git a/storage/bdb/test/scr017/O.BH b/storage/bdb/test/scr017/O.BH deleted file mode 100644 index cd499d38779..00000000000 --- a/storage/bdb/test/scr017/O.BH +++ /dev/null @@ -1,196 +0,0 @@ -abc_10_efg -abc_10_efg -abc_11_efg -abc_11_efg -abc_12_efg -abc_12_efg -abc_13_efg -abc_13_efg -abc_14_efg -abc_14_efg -abc_15_efg -abc_15_efg -abc_16_efg -abc_16_efg -abc_17_efg -abc_17_efg -abc_18_efg -abc_18_efg -abc_19_efg -abc_19_efg -abc_1_efg -abc_1_efg -abc_20_efg -abc_20_efg -abc_21_efg -abc_21_efg -abc_22_efg -abc_22_efg -abc_23_efg -abc_23_efg -abc_24_efg -abc_24_efg -abc_25_efg -abc_25_efg -abc_26_efg -abc_26_efg -abc_27_efg -abc_27_efg -abc_28_efg -abc_28_efg -abc_29_efg -abc_29_efg -abc_2_efg -abc_2_efg -abc_30_efg -abc_30_efg -abc_31_efg -abc_31_efg -abc_32_efg -abc_32_efg -abc_33_efg -abc_33_efg -abc_34_efg -abc_34_efg -abc_36_efg -abc_36_efg -abc_37_efg -abc_37_efg -abc_38_efg -abc_38_efg -abc_39_efg -abc_39_efg -abc_3_efg -abc_3_efg -abc_40_efg -abc_40_efg -abc_41_efg -abc_41_efg -abc_42_efg -abc_42_efg -abc_43_efg -abc_43_efg -abc_44_efg -abc_44_efg -abc_45_efg -abc_45_efg -abc_46_efg -abc_46_efg -abc_47_efg -abc_47_efg -abc_48_efg -abc_48_efg -abc_49_efg -abc_49_efg -abc_4_efg -abc_4_efg -abc_50_efg -abc_50_efg -abc_51_efg -abc_51_efg -abc_52_efg -abc_52_efg -abc_53_efg -abc_53_efg -abc_54_efg -abc_54_efg -abc_55_efg -abc_55_efg -abc_56_efg -abc_56_efg -abc_57_efg -abc_57_efg -abc_58_efg -abc_58_efg -abc_59_efg -abc_59_efg -abc_5_efg -abc_5_efg -abc_60_efg -abc_60_efg -abc_61_efg -abc_61_efg -abc_62_efg -abc_62_efg -abc_63_efg -abc_63_efg -abc_64_efg -abc_64_efg -abc_65_efg -abc_65_efg -abc_66_efg -abc_66_efg -abc_67_efg -abc_67_efg -abc_68_efg -abc_68_efg -abc_69_efg -abc_69_efg -abc_6_efg -abc_6_efg -abc_70_efg -abc_70_efg -abc_71_efg -abc_71_efg -abc_72_efg -abc_72_efg -abc_73_efg -abc_73_efg -abc_74_efg -abc_74_efg -abc_75_efg -abc_75_efg -abc_76_efg -abc_76_efg -abc_77_efg -abc_77_efg -abc_78_efg -abc_78_efg -abc_79_efg -abc_79_efg -abc_7_efg -abc_7_efg -abc_80_efg -abc_80_efg -abc_81_efg -abc_81_efg -abc_82_efg -abc_82_efg -abc_83_efg -abc_83_efg -abc_84_efg -abc_84_efg -abc_85_efg -abc_85_efg -abc_86_efg -abc_86_efg -abc_87_efg -abc_87_efg -abc_88_efg -abc_88_efg -abc_89_efg -abc_89_efg -abc_8_efg -abc_8_efg -abc_90_efg -abc_90_efg -abc_91_efg -abc_91_efg -abc_92_efg -abc_92_efg -abc_93_efg -abc_93_efg -abc_94_efg -abc_94_efg -abc_95_efg -abc_95_efg -abc_96_efg -abc_96_efg -abc_97_efg -abc_97_efg -abc_98_efg -abc_98_efg -abc_99_efg -abc_99_efg -abc_9_efg -abc_9_efg diff --git a/storage/bdb/test/scr017/O.R b/storage/bdb/test/scr017/O.R deleted file mode 100644 index d78a04727d8..00000000000 --- a/storage/bdb/test/scr017/O.R +++ /dev/null @@ -1,196 +0,0 @@ -1 -abc_1_efg -2 -abc_2_efg -3 -abc_3_efg -4 -abc_4_efg -5 -abc_5_efg -6 -abc_6_efg -7 -abc_7_efg -8 -abc_8_efg -9 -abc_9_efg -10 -abc_10_efg -11 -abc_11_efg -12 -abc_12_efg -13 -abc_13_efg -14 -abc_14_efg -15 -abc_15_efg -16 -abc_16_efg -17 -abc_17_efg -18 -abc_18_efg -19 -abc_19_efg -20 -abc_20_efg -21 -abc_21_efg -22 -abc_22_efg -23 -abc_23_efg -24 -abc_24_efg -25 -abc_25_efg -26 -abc_26_efg -27 -abc_27_efg -28 -abc_28_efg -29 -abc_29_efg -30 -abc_30_efg -31 -abc_31_efg -32 -abc_32_efg -33 -abc_33_efg -34 -abc_34_efg -35 -abc_36_efg -36 -abc_37_efg -37 -abc_38_efg -38 -abc_39_efg -39 -abc_40_efg -40 -abc_41_efg -41 -abc_42_efg -42 -abc_43_efg -43 -abc_44_efg -44 -abc_45_efg -45 -abc_46_efg -46 -abc_47_efg -47 -abc_48_efg -48 -abc_49_efg -49 -abc_50_efg -50 -abc_51_efg -51 -abc_52_efg -52 -abc_53_efg -53 -abc_54_efg -54 -abc_55_efg -55 -abc_56_efg -56 -abc_57_efg -57 -abc_58_efg -58 -abc_59_efg -59 -abc_60_efg -60 -abc_61_efg -61 -abc_62_efg -62 -abc_63_efg -63 -abc_64_efg -64 -abc_65_efg -65 -abc_66_efg -66 -abc_67_efg -67 -abc_68_efg -68 -abc_69_efg -69 -abc_70_efg -70 -abc_71_efg -71 -abc_72_efg -72 -abc_73_efg -73 -abc_74_efg -74 -abc_75_efg -75 -abc_76_efg -76 -abc_77_efg -77 -abc_78_efg -78 -abc_79_efg -79 -abc_80_efg -80 -abc_81_efg -81 -abc_82_efg -82 -abc_83_efg -83 -abc_84_efg -84 -abc_85_efg -85 -abc_86_efg -86 -abc_87_efg -87 -abc_88_efg -88 -abc_89_efg -89 -abc_90_efg -90 -abc_91_efg -91 -abc_92_efg -92 -abc_93_efg -93 -abc_94_efg -94 -abc_95_efg -95 -abc_96_efg -96 -abc_97_efg -97 -abc_98_efg -98 -abc_99_efg diff --git a/storage/bdb/test/scr017/chk.db185 b/storage/bdb/test/scr017/chk.db185 deleted file mode 100644 index e6bfef84562..00000000000 --- a/storage/bdb/test/scr017/chk.db185 +++ /dev/null @@ -1,29 +0,0 @@ -#!/bin/sh - -# -# $Id: chk.db185,v 1.3 2003/09/30 19:31:36 bostic Exp $ -# -# Check to make sure we can run DB 1.85 code. - -[ -f ../libdb.a ] || (cd .. && make libdb.a) || { - echo 'FAIL: unable to find or build libdb.a' - exit 1 -} - -if cc -g -Wall -I.. t.c ../libdb.a -o t; then - : -else - echo "FAIL: unable to compile test program t.c" - exit 1 -fi - -if ./t; then - : -else - echo "FAIL: test program failed" - exit 1 -fi - -# Clean up so the next test doesn't get confused. -rm -rf * - -exit 0 diff --git a/storage/bdb/test/scr017/t.c b/storage/bdb/test/scr017/t.c deleted file mode 100644 index f03b33880d6..00000000000 --- a/storage/bdb/test/scr017/t.c +++ /dev/null @@ -1,188 +0,0 @@ -#include - -#include -#include -#include -#include -#include - -#include "db_185.h" - -void err(char *); -int mycmp(const DBT *, const DBT *); -void ops(DB *, int); - -int -main() -{ - DB *dbp; - HASHINFO h_info; - BTREEINFO b_info; - RECNOINFO r_info; - - printf("\tBtree...\n"); - memset(&b_info, 0, sizeof(b_info)); - b_info.flags = R_DUP; - b_info.cachesize = 100 * 1024; - b_info.psize = 512; - b_info.lorder = 4321; - b_info.compare = mycmp; - (void)remove("a.db"); - if ((dbp = - dbopen("a.db", O_CREAT | O_RDWR, 0664, DB_BTREE, &b_info)) == NULL) - err("dbopen: btree"); - ops(dbp, DB_BTREE); - - printf("\tHash...\n"); - memset(&h_info, 0, sizeof(h_info)); - h_info.bsize = 512; - h_info.ffactor = 6; - h_info.nelem = 1000; - h_info.cachesize = 100 * 1024; - h_info.lorder = 1234; - (void)remove("a.db"); - if ((dbp = - dbopen("a.db", O_CREAT | O_RDWR, 0664, DB_HASH, &h_info)) == NULL) - err("dbopen: hash"); - ops(dbp, DB_HASH); - - printf("\tRecno...\n"); - memset(&r_info, 0, sizeof(r_info)); - r_info.flags = R_FIXEDLEN; - r_info.cachesize = 100 * 1024; - r_info.psize = 1024; - r_info.reclen = 37; - (void)remove("a.db"); - if ((dbp = - dbopen("a.db", O_CREAT | O_RDWR, 0664, DB_RECNO, &r_info)) == NULL) - err("dbopen: recno"); - ops(dbp, DB_RECNO); - - return (0); -} - -int -mycmp(a, b) - const DBT *a, *b; -{ - size_t len; - u_int8_t *p1, *p2; - - len = a->size > b->size ? b->size : a->size; - for (p1 = a->data, p2 = b->data; len--; ++p1, ++p2) - if (*p1 != *p2) - return ((long)*p1 - (long)*p2); - return ((long)a->size - (long)b->size); -} - -void -ops(dbp, type) - DB *dbp; - int type; -{ - FILE *outfp; - DBT key, data; - recno_t recno; - int i, ret; - char buf[64]; - - memset(&key, 0, sizeof(key)); - memset(&data, 0, sizeof(data)); - - for (i = 1; i < 100; ++i) { /* Test DB->put. */ - sprintf(buf, "abc_%d_efg", i); - if (type == DB_RECNO) { - recno = i; - key.data = &recno; - key.size = sizeof(recno); - } else { - key.data = data.data = buf; - key.size = data.size = strlen(buf); - } - - data.data = buf; - data.size = strlen(buf); - if (dbp->put(dbp, &key, &data, 0)) - err("DB->put"); - } - - if (type == DB_RECNO) { /* Test DB->get. */ - recno = 97; - key.data = &recno; - key.size = sizeof(recno); - } else { - key.data = buf; - key.size = strlen(buf); - } - sprintf(buf, "abc_%d_efg", 97); - if (dbp->get(dbp, &key, &data, 0) != 0) - err("DB->get"); - if (memcmp(data.data, buf, strlen(buf))) - err("DB->get: wrong data returned"); - - if (type == DB_RECNO) { /* Test DB->put no-overwrite. */ - recno = 42; - key.data = &recno; - key.size = sizeof(recno); - } else { - key.data = buf; - key.size = strlen(buf); - } - sprintf(buf, "abc_%d_efg", 42); - if (dbp->put(dbp, &key, &data, R_NOOVERWRITE) == 0) - err("DB->put: no-overwrite succeeded"); - - if (type == DB_RECNO) { /* Test DB->del. */ - recno = 35; - key.data = &recno; - key.size = sizeof(recno); - } else { - sprintf(buf, "abc_%d_efg", 35); - key.data = buf; - key.size = strlen(buf); - } - if (dbp->del(dbp, &key, 0)) - err("DB->del"); - - /* Test DB->seq. */ - if ((outfp = fopen("output", "w")) == NULL) - err("fopen: output"); - while ((ret = dbp->seq(dbp, &key, &data, R_NEXT)) == 0) { - if (type == DB_RECNO) - fprintf(outfp, "%d\n", *(int *)key.data); - else - fprintf(outfp, - "%.*s\n", (int)key.size, (char *)key.data); - fprintf(outfp, "%.*s\n", (int)data.size, (char *)data.data); - } - if (ret != 1) - err("DB->seq"); - fclose(outfp); - switch (type) { - case DB_BTREE: - ret = system("cmp output O.BH"); - break; - case DB_HASH: - ret = system("sort output | cmp - O.BH"); - break; - case DB_RECNO: - ret = system("cmp output O.R"); - break; - } - if (ret != 0) - err("output comparison failed"); - - if (dbp->sync(dbp, 0)) /* Test DB->sync. */ - err("DB->sync"); - - if (dbp->close(dbp)) /* Test DB->close. */ - err("DB->close"); -} - -void -err(s) - char *s; -{ - fprintf(stderr, "\t%s: %s\n", s, strerror(errno)); - exit (1); -} diff --git a/storage/bdb/test/scr018/chk.comma b/storage/bdb/test/scr018/chk.comma deleted file mode 100644 index 42df48d1881..00000000000 --- a/storage/bdb/test/scr018/chk.comma +++ /dev/null @@ -1,30 +0,0 @@ -#!/bin/sh - -# -# $Id: chk.comma,v 1.1 2001/11/03 18:43:49 bostic Exp $ -# -# Look for trailing commas in declarations. Some compilers can't handle: -# enum { -# foo, -# bar, -# }; - -[ -f ../libdb.a ] || (cd .. && make libdb.a) || { - echo 'FAIL: unable to find or build libdb.a' - exit 1 -} - -if cc -g -Wall -I.. t.c ../libdb.a -o t; then - : -else - echo "FAIL: unable to compile test program t.c" - exit 1 -fi - -if ./t ../../*/*.[ch] ../../*/*.in; then - : -else - echo "FAIL: test program failed" - exit 1 -fi - -exit 0 diff --git a/storage/bdb/test/scr018/t.c b/storage/bdb/test/scr018/t.c deleted file mode 100644 index 4056a605928..00000000000 --- a/storage/bdb/test/scr018/t.c +++ /dev/null @@ -1,46 +0,0 @@ -#include - -#include -#include -#include -#include - -int -chk(f) - char *f; -{ - int ch, l, r; - - if (freopen(f, "r", stdin) == NULL) { - fprintf(stderr, "%s: %s\n", f, strerror(errno)); - exit (1); - } - for (l = 1, r = 0; (ch = getchar()) != EOF;) { - if (ch != ',') - goto next; - do { ch = getchar(); } while (isblank(ch)); - if (ch != '\n') - goto next; - ++l; - do { ch = getchar(); } while (isblank(ch)); - if (ch != '}') - goto next; - r = 1; - printf("%s: line %d\n", f, l); - -next: if (ch == '\n') - ++l; - } - return (r); -} - -int -main(int argc, char *argv[]) -{ - int r; - - for (r = 0; *++argv != NULL;) - if (chk(*argv)) - r = 1; - return (r); -} diff --git a/storage/bdb/test/scr019/chk.include b/storage/bdb/test/scr019/chk.include deleted file mode 100644 index edd2bf6b451..00000000000 --- a/storage/bdb/test/scr019/chk.include +++ /dev/null @@ -1,44 +0,0 @@ -#!/bin/sh - -# -# $Id: chk.include,v 1.4 2004/10/07 20:34:39 bostic Exp $ -# -# Check for inclusion of files already included in db_int.h. - -d=../.. - -# Test must be run from the top-level directory, not from a test directory. -[ -f $d/LICENSE ] || { - echo 'FAIL: cannot find source distribution directory.' - exit 1 -} - -t1=__1 -t2=__2 - -egrep -- '#include[ ]' $d/dbinc/db_int.in | -sed -e '/[ ]db\.h'/d \ - -e 's/^#include.//' \ - -e 's/[<>"]//g' \ - -e 's/[ ].*//' > $t1 - -for i in `cat $t1`; do - (cd $d && egrep "^#include[ ].*[<\"]$i[>\"]" */*.[ch]) -done | -sed -e '/^build/d' \ - -e '/^db_dump185/d' \ - -e '/^examples_c/d' \ - -e '/^libdb_java.*errno.h/d' \ - -e '/^libdb_java.*java_util.h/d' \ - -e '/^mod_db4/d' \ - -e '/^mutex\/tm.c/d' \ - -e '/^perl/d' \ - -e '/^php_db4/d' \ - -e '/^test_/d' \ - > $t2 - -[ -s $t2 ] && { - echo 'FAIL: found extraneous includes in the source' - cat $t2 - exit 1 -} -exit 0 diff --git a/storage/bdb/test/scr020/chk.inc b/storage/bdb/test/scr020/chk.inc deleted file mode 100644 index 24c1a47c10d..00000000000 --- a/storage/bdb/test/scr020/chk.inc +++ /dev/null @@ -1,44 +0,0 @@ -#!/bin/sh - -# -# $Id: chk.inc,v 1.2 2003/08/01 16:49:29 bostic Exp $ -# -# Check for inclusion of db_config.h after "const" or other includes. - -d=../.. - -# Test must be run from the top-level directory, not from a test directory. -[ -f $d/LICENSE ] || { - echo 'FAIL: cannot find source distribution directory.' - exit 1 -} - -t1=__1 -t2=__2 - -(cd $d && find . -name '*.[chys]' -o -name '*.cpp' | - xargs egrep -l '#include.*db_config.h') | tee /tmp/o | - sed -e '/dbdemo.c$/d' \ - -e '/db_java_wrap.c$/d' > $t1 - -(for i in `cat $t1`; do - egrep -w 'db_config.h|const' /dev/null $d/$i | head -1 -done) > $t2 - -if egrep const $t2 > /dev/null; then - echo 'FAIL: found const before include of db_config.h' - egrep const $t2 - exit 1 -fi - -:> $t2 -for i in `cat $t1`; do - egrep -w '#include' /dev/null $d/$i | head -1 >> $t2 -done - -if egrep -v db_config.h $t2 > /dev/null; then - echo 'FAIL: found includes before include of db_config.h' - egrep -v db_config.h $t2 - exit 1 -fi - -exit 0 diff --git a/storage/bdb/test/scr021/chk.flags b/storage/bdb/test/scr021/chk.flags deleted file mode 100644 index 2f32b82e636..00000000000 --- a/storage/bdb/test/scr021/chk.flags +++ /dev/null @@ -1,118 +0,0 @@ -#!/bin/sh - -# -# $Id: chk.flags,v 1.12 2004/10/19 02:28:19 bostic Exp $ -# -# Check flag name-spaces. - -d=../.. -t1=__1 -t2=__2 - -if cc -g -Wall -I.. t.c -o t; then - : -else - echo "FAIL: unable to compile test program t.c" - exit 1 -fi - -if ./t $d/*/*.[ch] $d/*/*.in > $t1; then - : -else - echo "FAIL: test program failed" - exit 1 -fi - -echo Checking DB_ENV flags... -cat $t1 | -grep '(dbenv,' | -sed -e '/DB_ENV_/d' \ - -e '/env_method.c.*, mapped_flags*)/d' \ - -e '/env_region.c.*, db_env_reset)/d' \ - > $t2 -[ -s $t2 ] && { - cat $t2 - exit 1 -} - -grep 'DB_ENV_' $t1 | -sed -e '/((*dbenv)*,/d' \ - -e '/((*dbp)*->dbenv,/d' \ - -e '/((*infop)*->dbenv,/d' \ - -e '/((*reginfop)*->dbenv,/d' \ - -e '/((*sdbp)*->dbenv,/d' \ - > $t2 -[ -s $t2 ] && { - cat $t2 - exit 1 -} - -echo Checking DB flags... -cp $t1 /tmp/_f -cat $t1 | -grep '(dbp,' | -sed -e '/DB_AM_/d' \ - -e '/dbp, mapped_flag)/d' \ - > $t2 -[ -s $t2 ] && { - cat $t2 - exit 1 -} - -grep 'DB_AM_' $t1 | -sed -e '/((*[ ]*db_rep->rep_db)*,/d' \ - -e '/((*[ ]*dbc)*->dbp,/d' \ - -e '/((*[ ]*dbc_arg->dbp)*,/d' \ - -e '/((*[ ]*dbp)*,/d' \ - -e '/((*[ ]*dbp)*->s_primary,/d' \ - -e '/((D),/d' \ - -e '/(mdbp,/d' \ - -e '/(pdbp,/d' \ - -e '/(pginfo, /d' \ - -e '/(sdbp,/d' \ - -e '/(subdbp,/d' \ - -e '/fop_util.c:.*(t2dbp,/d' \ - -e '/rep_backup.c.*(rfp,/d' \ - > $t2 -[ -s $t2 ] && { - cat $t2 - exit 1 -} - -echo Checking DBC flags... -cat $t1 | -grep '(dbc,' | -sed -e '/DBC_/d' \ - > $t2 -[ -s $t2 ] && { - cat $t2 - exit 1 -} - -grep 'DBC_' $t1 | -sed -e '/((*dbc)*,/d' \ - -e '/(dbc_arg,/d' \ - -e '/(dbc_c,/d' \ - -e '/(dbc_n,/d' \ - -e '/(dbc_orig,/d' \ - -e '/(opd,/d' \ - -e '/(pdbc,/d' \ - -e '/(sdbc,/d' \ - > $t2 -[ -s $t2 ] && { - cat $t2 - exit 1 -} - -echo Checking for bad use of macros... -egrep 'case .*F_SET\(|case .*F_CLR\(' $d/*/*.c > $t1 -egrep 'for .*F_SET\(|for .*F_CLR\(' $d/*/*.c >> $t1 -egrep 'if .*F_SET\(|if .*F_CLR\(' $d/*/*.c >> $t1 -egrep 'switch .*F_SET\(|switch .*F_CLR\(' $d/*/*.c >> $t1 -egrep 'while .*F_SET\(|while .*F_CLR\(' $d/*/*.c >> $t1 -[ -s $t1 ] && { - echo 'if statement followed by non-test macro' - cat $t1 - exit 1 -} - -exit 0 diff --git a/storage/bdb/test/scr022/chk.rr b/storage/bdb/test/scr022/chk.rr deleted file mode 100644 index 53d8bb15833..00000000000 --- a/storage/bdb/test/scr022/chk.rr +++ /dev/null @@ -1,27 +0,0 @@ -#!/bin/sh - -# -# $Id: chk.rr,v 1.5 2004/10/07 20:40:43 bostic Exp $ - -d=../.. - -t1=__1 - -# Check for DB_RUNRECOVERY being specified instead of a call to db_panic. -egrep DB_RUNRECOVERY $d/*/*.c | - sed -e '/__db_panic(.*, DB_RUNRECOVERY)/d' \ - -e '/case DB_RUNRECOVERY:/d' \ - -e '/db_dispatch.c:.*if (ret == DB_RUNRECOVERY/d' \ - -e '/db_err.c:/d' \ - -e '/os_errno.c:.*evalue == DB_RUNRECOVERY/d' \ - -e '/\/php_db4\//d' \ - -e '/rep_backup.c:.*Panic the env and return DB_RUNRECOVERY/d' \ - -e '/txn.c:.* \* DB_RUNRECOVERY and we need to/d' \ - -e '/txn.c:.*returned DB_RUNRECOVERY and we need to/d' \ - > $t1 -[ -s $t1 ] && { - echo "DB_RUNRECOVERY used; should be a call to db_panic." - cat $t1 - exit 1 -} - -exit 0 diff --git a/storage/bdb/test/sdb001.tcl b/storage/bdb/test/sdb001.tcl deleted file mode 100644 index 4ebb24dea29..00000000000 --- a/storage/bdb/test/sdb001.tcl +++ /dev/null @@ -1,147 +0,0 @@ -# See the file LICENSE for redistribution information. -# -# Copyright (c) 1999-2004 -# Sleepycat Software. All rights reserved. -# -# $Id: sdb001.tcl,v 11.23 2004/04/05 17:49:25 carol Exp $ -# -# TEST sdb001 Tests mixing db and subdb operations -# TEST Tests mixing db and subdb operations -# TEST Create a db, add data, try to create a subdb. -# TEST Test naming db and subdb with a leading - for correct parsing -# TEST Existence check -- test use of -excl with subdbs -# TEST -# TEST Test non-subdb and subdb operations -# TEST Test naming (filenames begin with -) -# TEST Test existence (cannot create subdb of same name with -excl) -proc sdb001 { method args } { - source ./include.tcl - global errorInfo - - set args [convert_args $method $args] - set omethod [convert_method $method] - - if { [is_queue $method] == 1 } { - puts "Subdb001: skipping for method $method" - return - } - puts "Subdb001: $method ($args) subdb and non-subdb tests" - - set testfile $testdir/subdb001.db - set eindex [lsearch -exact $args "-env"] - if { $eindex != -1 } { - set env NULL - incr eindex - set env [lindex $args $eindex] - puts "Subdb001 skipping for env $env" - return - } - # Create the database and open the dictionary - set subdb subdb0 - cleanup $testdir NULL - puts "\tSubdb001.a: Non-subdb database and subdb operations" - # - # Create a db with no subdbs. Add some data. Close. Try to - # open/add with a subdb. Should fail. - # - puts "\tSubdb001.a.0: Create db, add data, close, try subdb" - set db [eval {berkdb_open -create -mode 0644} \ - $args {$omethod $testfile}] - error_check_good dbopen [is_valid_db $db] TRUE - - set did [open $dict] - - set pflags "" - set gflags "" - set count 0 - - if { [is_record_based $method] == 1 } { - append gflags " -recno" - } - while { [gets $did str] != -1 && $count < 5 } { - if { [is_record_based $method] == 1 } { - global kvals - - set key [expr $count + 1] - set kvals($key) $str - } else { - set key $str - } - set ret [eval \ - {$db put} $pflags {$key [chop_data $method $str]}] - error_check_good put $ret 0 - - set ret [eval {$db get} $gflags {$key}] - error_check_good \ - get $ret [list [list $key [pad_data $method $str]]] - incr count - } - close $did - error_check_good db_close [$db close] 0 - set ret [catch {eval {berkdb_open_noerr -create -mode 0644} $args \ - {$omethod $testfile $subdb}} db] - error_check_bad dbopen $ret 0 - # - # Create a db with no subdbs. Add no data. Close. Try to - # open/add with a subdb. Should fail. - # - set testfile $testdir/subdb001a.db - puts "\tSubdb001.a.1: Create db, close, try subdb" - # - # !!! - # Using -truncate is illegal when opening for subdbs, but we - # can use it here because we are not using subdbs for this - # create. - # - set db [eval {berkdb_open -create -truncate -mode 0644} $args \ - {$omethod $testfile}] - error_check_good dbopen [is_valid_db $db] TRUE - error_check_good db_close [$db close] 0 - - set ret [catch {eval {berkdb_open_noerr -create -mode 0644} $args \ - {$omethod $testfile $subdb}} db] - error_check_bad dbopen $ret 0 - - if { [is_queue $method] == 1 } { - puts "Subdb001: skipping remainder of test for method $method" - return - } - - # - # Test naming, db and subdb names beginning with -. - # - puts "\tSubdb001.b: Naming" - set cwd [pwd] - cd $testdir - set testfile1 -subdb001.db - set subdb -subdb - puts "\tSubdb001.b.0: Create db and subdb with -name, no --" - set ret [catch {eval {berkdb_open -create -mode 0644} $args \ - {$omethod $testfile1 $subdb}} db] - error_check_bad dbopen $ret 0 - puts "\tSubdb001.b.1: Create db and subdb with -name, with --" - set db [eval {berkdb_open -create -mode 0644} $args \ - {$omethod -- $testfile1 $subdb}] - error_check_good dbopen [is_valid_db $db] TRUE - error_check_good db_close [$db close] 0 - - cd $cwd - - # - # Create 1 db with 1 subdb. Try to create another subdb of - # the same name. Should fail. - # - - puts "\tSubdb001.c: Existence check" - set testfile $testdir/subdb001d.db - set subdb subdb - set ret [catch {eval {berkdb_open -create -excl -mode 0644} $args \ - {$omethod $testfile $subdb}} db] - error_check_good dbopen [is_valid_db $db] TRUE - set ret [catch {eval {berkdb_open_noerr -create -excl -mode 0644} \ - $args {$omethod $testfile $subdb}} db1] - error_check_bad dbopen $ret 0 - error_check_good db_close [$db close] 0 - - return -} diff --git a/storage/bdb/test/sdb002.tcl b/storage/bdb/test/sdb002.tcl deleted file mode 100644 index d951d716310..00000000000 --- a/storage/bdb/test/sdb002.tcl +++ /dev/null @@ -1,228 +0,0 @@ -# See the file LICENSE for redistribution information. -# -# Copyright (c) 1999-2004 -# Sleepycat Software. All rights reserved. -# -# $Id: sdb002.tcl,v 11.41 2004/04/05 17:49:26 carol Exp $ -# -# TEST sdb002 -# TEST Tests basic subdb functionality -# TEST Small keys, small data -# TEST Put/get per key -# TEST Dump file -# TEST Close, reopen -# TEST Dump file -# TEST -# TEST Use the first 10,000 entries from the dictionary. -# TEST Insert each with self as key and data; retrieve each. -# TEST After all are entered, retrieve all; compare output to original. -# TEST Close file, reopen, do retrieve and re-verify. -# TEST Then repeat using an environment. -proc sdb002 { method {nentries 10000} args } { - global passwd - global has_crypto - - set eindex [lsearch -exact $args "-env"] - if { $eindex != -1 } { - set env NULL - incr eindex - set env [lindex $args $eindex] - puts "Subdb002 skipping for env $env" - return - } - set largs $args - subdb002_main $method $nentries $largs - append largs " -chksum " - subdb002_main $method $nentries $largs - - # Skip remainder of test if release does not support encryption. - if { $has_crypto == 0 } { - return - } - - append largs "-encryptaes $passwd " - subdb002_main $method $nentries $largs -} - -proc subdb002_main { method nentries largs } { - source ./include.tcl - global encrypt - - set largs [convert_args $method $largs] - set omethod [convert_method $method] - - env_cleanup $testdir - - puts "Subdb002: $method ($largs) basic subdb tests" - set testfile $testdir/subdb002.db - subdb002_body $method $omethod $nentries $largs $testfile NULL - - # Run convert_encrypt so that old_encrypt will be reset to - # the proper value and cleanup will work. - convert_encrypt $largs - set encargs "" - set largs [split_encargs $largs encargs] - - cleanup $testdir NULL - if { [is_queue $omethod] == 1 } { - set sdb002_env berkdb_env_noerr - } else { - set sdb002_env berkdb_env - } - set env [eval {$sdb002_env -create -cachesize {0 10000000 0} \ - -mode 0644} -home $testdir $encargs] - error_check_good env_open [is_valid_env $env] TRUE - puts "Subdb002: $method ($largs) basic subdb tests in an environment" - - # We're in an env--use default path to database rather than specifying - # it explicitly. - set testfile subdb002.db - subdb002_body $method $omethod $nentries $largs $testfile $env - error_check_good env_close [$env close] 0 -} - -proc subdb002_body { method omethod nentries largs testfile env } { - global encrypt - global passwd - source ./include.tcl - - # Create the database and open the dictionary - set subdb subdb0 - set t1 $testdir/t1 - set t2 $testdir/t2 - set t3 $testdir/t3 - - if { [is_queue $omethod] == 1 } { - set sdb002_open berkdb_open_noerr - } else { - set sdb002_open berkdb_open - } - - if { $env == "NULL" } { - set ret [catch {eval {$sdb002_open -create -mode 0644} $largs \ - {$omethod $testfile $subdb}} db] - } else { - set ret [catch {eval {$sdb002_open -create -mode 0644} $largs \ - {-env $env $omethod $testfile $subdb}} db] - } - - # - # If -queue method, we need to make sure that trying to - # create a subdb fails. - if { [is_queue $method] == 1 } { - error_check_bad dbopen $ret 0 - puts "Subdb002: skipping remainder of test for method $method" - return - } - - error_check_good dbopen $ret 0 - error_check_good dbopen [is_valid_db $db] TRUE - - set did [open $dict] - - set pflags "" - set gflags "" - set count 0 - - if { [is_record_based $method] == 1 } { - set checkfunc subdb002_recno.check - append gflags " -recno" - } else { - set checkfunc subdb002.check - } - puts "\tSubdb002.a: put/get loop" - # Here is the loop where we put and get each key/data pair - while { [gets $did str] != -1 && $count < $nentries } { - if { [is_record_based $method] == 1 } { - global kvals - - set key [expr $count + 1] - set kvals($key) [pad_data $method $str] - } else { - set key $str - } - set ret [eval \ - {$db put} $pflags {$key [chop_data $method $str]}] - error_check_good put $ret 0 - - set ret [eval {$db get} $gflags {$key}] - error_check_good \ - get $ret [list [list $key [pad_data $method $str]]] - incr count - } - close $did - # Now we will get each key from the DB and compare the results - # to the original. - puts "\tSubdb002.b: dump file" - set txn "" - dump_file $db $txn $t1 $checkfunc - error_check_good db_close [$db close] 0 - - # Now compare the keys to see if they match the dictionary (or ints) - if { [is_record_based $method] == 1 } { - set oid [open $t2 w] - for {set i 1} {$i <= $nentries} {set i [incr i]} { - puts $oid $i - } - close $oid - file rename -force $t1 $t3 - } else { - set q q - filehead $nentries $dict $t3 - filesort $t3 $t2 - filesort $t1 $t3 - } - - error_check_good Subdb002:diff($t3,$t2) \ - [filecmp $t3 $t2] 0 - - puts "\tSubdb002.c: close, open, and dump file" - # Now, reopen the file and run the last test again. - open_and_dump_subfile $testfile $env $t1 $checkfunc \ - dump_file_direction "-first" "-next" $subdb - if { [is_record_based $method] != 1 } { - filesort $t1 $t3 - } - - error_check_good Subdb002:diff($t2,$t3) \ - [filecmp $t2 $t3] 0 - - # Now, reopen the file and run the last test again in the - # reverse direction. - puts "\tSubdb002.d: close, open, and dump file in reverse direction" - open_and_dump_subfile $testfile $env $t1 $checkfunc \ - dump_file_direction "-last" "-prev" $subdb - - if { [is_record_based $method] != 1 } { - filesort $t1 $t3 - } - - error_check_good Subdb002:diff($t3,$t2) \ - [filecmp $t3 $t2] 0 - - puts "\tSubdb002.e: db_dump with subdatabase" - set outfile $testdir/subdb002.dump - set dumpargs " -f $outfile -s $subdb " - if { $encrypt > 0 } { - append dumpargs " -P $passwd " - } - if { $env != "NULL" } { - append dumpargs " -h $testdir " - } - append dumpargs " $testfile" - set stat [catch {eval {exec $util_path/db_dump} $dumpargs} ret] - error_check_good dbdump.subdb $stat 0 -} - -# Check function for Subdb002; keys and data are identical -proc subdb002.check { key data } { - error_check_good "key/data mismatch" $data $key -} - -proc subdb002_recno.check { key data } { - global dict - global kvals - - error_check_good key"$key"_exists [info exists kvals($key)] 1 - error_check_good "key/data mismatch, key $key" $data $kvals($key) -} diff --git a/storage/bdb/test/sdb003.tcl b/storage/bdb/test/sdb003.tcl deleted file mode 100644 index c652a76d63f..00000000000 --- a/storage/bdb/test/sdb003.tcl +++ /dev/null @@ -1,182 +0,0 @@ -# See the file LICENSE for redistribution information. -# -# Copyright (c) 1999-2004 -# Sleepycat Software. All rights reserved. -# -# $Id: sdb003.tcl,v 11.29 2004/01/28 03:36:29 bostic Exp $ -# -# TEST sdb003 -# TEST Tests many subdbs -# TEST Creates many subdbs and puts a small amount of -# TEST data in each (many defaults to 1000) -# TEST -# TEST Use the first 1000 entries from the dictionary as subdbnames. -# TEST Insert each with entry as name of subdatabase and a partial list -# TEST as key/data. After all are entered, retrieve all; compare output -# TEST to original. Close file, reopen, do retrieve and re-verify. -proc sdb003 { method {nentries 1000} args } { - source ./include.tcl - - set args [convert_args $method $args] - set omethod [convert_method $method] - - if { [is_queue $method] == 1 } { - puts "Subdb003: skipping for method $method" - return - } - - puts "Subdb003: $method ($args) many subdb tests" - - set txnenv 0 - set eindex [lsearch -exact $args "-env"] - # - # If we are using an env, then testfile should just be the db name. - # Otherwise it is the test directory and the name. - if { $eindex == -1 } { - set testfile $testdir/subdb003.db - set env NULL - } else { - set testfile subdb003.db - incr eindex - set env [lindex $args $eindex] - set txnenv [is_txnenv $env] - if { $txnenv == 1 } { - append args " -auto_commit " - if { $nentries == 1000 } { - set nentries 100 - } - } - set testdir [get_home $env] - } - # Create the database and open the dictionary - set t1 $testdir/t1 - set t2 $testdir/t2 - set t3 $testdir/t3 - cleanup $testdir $env - - set pflags "" - set gflags "" - set txn "" - set fcount 0 - - if { [is_record_based $method] == 1 } { - set checkfunc subdb003_recno.check - append gflags " -recno" - } else { - set checkfunc subdb003.check - } - - # Here is the loop where we put and get each key/data pair - set ndataent 10 - set fdid [open $dict] - while { [gets $fdid str] != -1 && $fcount < $nentries } { - if { $str == "" } { - continue - } - set subdb $str - set db [eval {berkdb_open -create -mode 0644} \ - $args {$omethod $testfile $subdb}] - error_check_good dbopen [is_valid_db $db] TRUE - - set count 0 - set did [open $dict] - while { [gets $did str] != -1 && $count < $ndataent } { - if { [is_record_based $method] == 1 } { - global kvals - - set key [expr $count + 1] - set kvals($key) [pad_data $method $str] - } else { - set key $str - } - if { $txnenv == 1 } { - set t [$env txn] - error_check_good txn [is_valid_txn $t $env] TRUE - set txn "-txn $t" - } - set ret [eval {$db put} \ - $txn $pflags {$key [chop_data $method $str]}] - error_check_good put $ret 0 - if { $txnenv == 1 } { - error_check_good txn [$t commit] 0 - } - - set ret [eval {$db get} $gflags {$key}] - error_check_good get $ret [list [list $key \ - [pad_data $method $str]]] - incr count - } - close $did - incr fcount - - if { $txnenv == 1 } { - set t [$env txn] - error_check_good txn [is_valid_txn $t $env] TRUE - set txn "-txn $t" - } - dump_file $db $txn $t1 $checkfunc - if { $txnenv == 1 } { - error_check_good txn [$t commit] 0 - } - error_check_good db_close [$db close] 0 - - # Now compare the keys to see if they match - if { [is_record_based $method] == 1 } { - set oid [open $t2 w] - for {set i 1} {$i <= $ndataent} {set i [incr i]} { - puts $oid $i - } - close $oid - file rename -force $t1 $t3 - } else { - set q q - filehead $ndataent $dict $t3 - filesort $t3 $t2 - filesort $t1 $t3 - } - - error_check_good Subdb003:diff($t3,$t2) \ - [filecmp $t3 $t2] 0 - - # Now, reopen the file and run the last test again. - open_and_dump_subfile $testfile $env $t1 $checkfunc \ - dump_file_direction "-first" "-next" $subdb - if { [is_record_based $method] != 1 } { - filesort $t1 $t3 - } - - error_check_good Subdb003:diff($t2,$t3) \ - [filecmp $t2 $t3] 0 - - # Now, reopen the file and run the last test again in the - # reverse direction. - open_and_dump_subfile $testfile $env $t1 $checkfunc \ - dump_file_direction "-last" "-prev" $subdb - - if { [is_record_based $method] != 1 } { - filesort $t1 $t3 - } - - error_check_good Subdb003:diff($t3,$t2) \ - [filecmp $t3 $t2] 0 - if { [expr $fcount % 100] == 0 } { - puts -nonewline "$fcount " - flush stdout - } - } - close $fdid - puts "" -} - -# Check function for Subdb003; keys and data are identical -proc subdb003.check { key data } { - error_check_good "key/data mismatch" $data $key -} - -proc subdb003_recno.check { key data } { - global dict - global kvals - - error_check_good key"$key"_exists [info exists kvals($key)] 1 - error_check_good "key/data mismatch, key $key" $data $kvals($key) -} diff --git a/storage/bdb/test/sdb004.tcl b/storage/bdb/test/sdb004.tcl deleted file mode 100644 index 273bda9bf4b..00000000000 --- a/storage/bdb/test/sdb004.tcl +++ /dev/null @@ -1,241 +0,0 @@ -# See the file LICENSE for redistribution information. -# -# Copyright (c) 1999-2004 -# Sleepycat Software. All rights reserved. -# -# $Id: sdb004.tcl,v 11.25 2004/01/28 03:36:29 bostic Exp $ -# -# TEST sdb004 -# TEST Tests large subdb names -# TEST subdb name = filecontents, -# TEST key = filename, data = filecontents -# TEST Put/get per key -# TEST Dump file -# TEST Dump subdbs, verify data and subdb name match -# TEST -# TEST Create 1 db with many large subdbs. Use the contents as subdb names. -# TEST Take the source files and dbtest executable and enter their names as -# TEST the key with their contents as data. After all are entered, retrieve -# TEST all; compare output to original. Close file, reopen, do retrieve and -# TEST re-verify. -proc sdb004 { method args} { - global names - source ./include.tcl - - set args [convert_args $method $args] - set omethod [convert_method $method] - - if { [is_queue $method] == 1 || [is_fixed_length $method] == 1 } { - puts "Subdb004: skipping for method $method" - return - } - - puts "Subdb004: $method ($args) \ - filecontents=subdbname filename=key filecontents=data pairs" - - set txnenv 0 - set envargs "" - set eindex [lsearch -exact $args "-env"] - # - # If we are using an env, then testfile should just be the db name. - # Otherwise it is the test directory and the name. - if { $eindex == -1 } { - set testfile $testdir/subdb004.db - set env NULL - } else { - set testfile subdb004.db - incr eindex - set env [lindex $args $eindex] - set envargs " -env $env " - set txnenv [is_txnenv $env] - if { $txnenv == 1 } { - append args " -auto_commit " - append envargs " -auto_commit " - } - set testdir [get_home $env] - } - # Create the database and open the dictionary - set t1 $testdir/t1 - set t2 $testdir/t2 - set t3 $testdir/t3 - set t4 $testdir/t4 - - cleanup $testdir $env - set pflags "" - set gflags "" - set txn "" - if { [is_record_based $method] == 1 } { - set checkfunc subdb004_recno.check - append gflags "-recno" - } else { - set checkfunc subdb004.check - } - - # Here is the loop where we put and get each key/data pair - # Note that the subdatabase name is passed in as a char *, not - # in a DBT, so it may not contain nulls; use only source files. - set file_list [glob $src_root/*/*.c] - set fcount [llength $file_list] - if { $txnenv == 1 && $fcount > 100 } { - set file_list [lrange $file_list 0 99] - set fcount 100 - } - - set count 0 - if { [is_record_based $method] == 1 } { - set oid [open $t2 w] - for {set i 1} {$i <= $fcount} {set i [incr i]} { - puts $oid $i - } - close $oid - } else { - set oid [open $t2.tmp w] - foreach f $file_list { - puts $oid $f - } - close $oid - filesort $t2.tmp $t2 - } - puts "\tSubdb004.a: Set/Check each subdb" - foreach f $file_list { - if { [is_record_based $method] == 1 } { - set key [expr $count + 1] - set names([expr $count + 1]) $f - } else { - set key $f - } - # Should really catch errors - set fid [open $f r] - fconfigure $fid -translation binary - set data [read $fid] - set subdb $data - close $fid - set db [eval {berkdb_open -create -mode 0644} \ - $args {$omethod $testfile $subdb}] - error_check_good dbopen [is_valid_db $db] TRUE - if { $txnenv == 1 } { - set t [$env txn] - error_check_good txn [is_valid_txn $t $env] TRUE - set txn "-txn $t" - } - set ret [eval \ - {$db put} $txn $pflags {$key [chop_data $method $data]}] - error_check_good put $ret 0 - if { $txnenv == 1 } { - error_check_good txn [$t commit] 0 - } - - # Should really catch errors - set fid [open $t4 w] - fconfigure $fid -translation binary - if [catch {eval {$db get} $gflags {$key}} data] { - puts -nonewline $fid $data - } else { - # Data looks like {{key data}} - set key [lindex [lindex $data 0] 0] - set data [lindex [lindex $data 0] 1] - puts -nonewline $fid $data - } - close $fid - - error_check_good Subdb004:diff($f,$t4) \ - [filecmp $f $t4] 0 - - incr count - - # Now we will get each key from the DB and compare the results - # to the original. - # puts "\tSubdb004.b: dump file" - if { $txnenv == 1 } { - set t [$env txn] - error_check_good txn [is_valid_txn $t $env] TRUE - set txn "-txn $t" - } - dump_bin_file $db $txn $t1 $checkfunc - if { $txnenv == 1 } { - error_check_good txn [$t commit] 0 - } - error_check_good db_close [$db close] 0 - - } - - # - # Now for each file, check that the subdb name is the same - # as the data in that subdb and that the filename is the key. - # - puts "\tSubdb004.b: Compare subdb names with key/data" - set db [eval {berkdb_open -rdonly} $envargs {$testfile}] - error_check_good dbopen [is_valid_db $db] TRUE - if { $txnenv == 1 } { - set t [$env txn] - error_check_good txn [is_valid_txn $t $env] TRUE - set txn "-txn $t" - } - set c [eval {$db cursor} $txn] - error_check_good db_cursor [is_valid_cursor $c $db] TRUE - - for {set d [$c get -first] } { [llength $d] != 0 } \ - {set d [$c get -next] } { - set subdbname [lindex [lindex $d 0] 0] - set subdb [eval {berkdb_open} $args {$testfile $subdbname}] - error_check_good dbopen [is_valid_db $db] TRUE - - # Output the subdb name - set ofid [open $t3 w] - fconfigure $ofid -translation binary - if { [string compare "\0" \ - [string range $subdbname end end]] == 0 } { - set slen [expr [string length $subdbname] - 2] - set subdbname [string range $subdbname 1 $slen] - } - puts -nonewline $ofid $subdbname - close $ofid - - # Output the data - set subc [eval {$subdb cursor} $txn] - error_check_good db_cursor [is_valid_cursor $subc $subdb] TRUE - set d [$subc get -first] - error_check_good dbc_get [expr [llength $d] != 0] 1 - set key [lindex [lindex $d 0] 0] - set data [lindex [lindex $d 0] 1] - - set ofid [open $t1 w] - fconfigure $ofid -translation binary - puts -nonewline $ofid $data - close $ofid - - $checkfunc $key $t1 - $checkfunc $key $t3 - - error_check_good Subdb004:diff($t3,$t1) \ - [filecmp $t3 $t1] 0 - error_check_good curs_close [$subc close] 0 - error_check_good db_close [$subdb close] 0 - } - error_check_good curs_close [$c close] 0 - if { $txnenv == 1 } { - error_check_good txn [$t commit] 0 - } - error_check_good db_close [$db close] 0 - - if { [is_record_based $method] != 1 } { - fileremove $t2.tmp - } -} - -# Check function for subdb004; key should be file name; data should be contents -proc subdb004.check { binfile tmpfile } { - source ./include.tcl - - error_check_good Subdb004:datamismatch($binfile,$tmpfile) \ - [filecmp $binfile $tmpfile] 0 -} -proc subdb004_recno.check { binfile tmpfile } { - global names - source ./include.tcl - - set fname $names($binfile) - error_check_good key"$binfile"_exists [info exists names($binfile)] 1 - error_check_good Subdb004:datamismatch($fname,$tmpfile) \ - [filecmp $fname $tmpfile] 0 -} diff --git a/storage/bdb/test/sdb005.tcl b/storage/bdb/test/sdb005.tcl deleted file mode 100644 index 7173bef009c..00000000000 --- a/storage/bdb/test/sdb005.tcl +++ /dev/null @@ -1,146 +0,0 @@ -# See the file LICENSE for redistribution information. -# -# Copyright (c) 1999-2004 -# Sleepycat Software. All rights reserved. -# -# $Id: sdb005.tcl,v 11.21 2004/01/28 03:36:29 bostic Exp $ -# -# TEST sdb005 -# TEST Tests cursor operations in subdbs -# TEST Put/get per key -# TEST Verify cursor operations work within subdb -# TEST Verify cursor operations do not work across subdbs -# TEST -# -# We should test this on all btrees, all hash, and a combination thereof -proc sdb005 {method {nentries 100} args } { - source ./include.tcl - - set args [convert_args $method $args] - set omethod [convert_method $method] - - if { [is_queue $method] == 1 } { - puts "Subdb005: skipping for method $method" - return - } - - puts "Subdb005: $method ( $args ) subdb cursor operations test" - set txnenv 0 - set envargs "" - set eindex [lsearch -exact $args "-env"] - # - # If we are using an env, then testfile should just be the db name. - # Otherwise it is the test directory and the name. - if { $eindex == -1 } { - set testfile $testdir/subdb005.db - set env NULL - } else { - set testfile subdb005.db - incr eindex - set env [lindex $args $eindex] - set envargs " -env $env " - set txnenv [is_txnenv $env] - if { $txnenv == 1 } { - append args " -auto_commit " - append envargs " -auto_commit " - if { $nentries == 100 } { - set nentries 20 - } - } - set testdir [get_home $env] - } - - cleanup $testdir $env - set txn "" - set psize 8192 - set duplist {-1 -1 -1 -1 -1} - build_all_subdb \ - $testfile [list $method] $psize $duplist $nentries $args - set numdb [llength $duplist] - # - # Get a cursor in each subdb and move past the end of each - # subdb. Make sure we don't end up in another subdb. - # - puts "\tSubdb005.a: Cursor ops - first/prev and last/next" - if { $txnenv == 1 } { - set t [$env txn] - error_check_good txn [is_valid_txn $t $env] TRUE - set txn "-txn $t" - } - for {set i 0} {$i < $numdb} {incr i} { - set db [eval {berkdb_open -unknown} $args {$testfile sub$i.db}] - error_check_good dbopen [is_valid_db $db] TRUE - set db_handle($i) $db - # Used in 005.c test - lappend subdbnames sub$i.db - - set dbc [eval {$db cursor} $txn] - error_check_good db_cursor [is_valid_cursor $dbc $db] TRUE - set d [$dbc get -first] - error_check_good dbc_get [expr [llength $d] != 0] 1 - - # Used in 005.b test - set db_key($i) [lindex [lindex $d 0] 0] - - set d [$dbc get -prev] - error_check_good dbc_get [expr [llength $d] == 0] 1 - set d [$dbc get -last] - error_check_good dbc_get [expr [llength $d] != 0] 1 - set d [$dbc get -next] - error_check_good dbc_get [expr [llength $d] == 0] 1 - error_check_good dbc_close [$dbc close] 0 - } - # - # Get a key from each subdb and try to get this key in a - # different subdb. Make sure it fails - # - puts "\tSubdb005.b: Get keys in different subdb's" - for {set i 0} {$i < $numdb} {incr i} { - set n [expr $i + 1] - if {$n == $numdb} { - set n 0 - } - set db $db_handle($i) - if { [is_record_based $method] == 1 } { - set d [eval {$db get -recno} $txn {$db_key($n)}] - error_check_good \ - db_get [expr [llength $d] == 0] 1 - } else { - set d [eval {$db get} $txn {$db_key($n)}] - error_check_good db_get [expr [llength $d] == 0] 1 - } - } - if { $txnenv == 1 } { - error_check_good txn [$t commit] 0 - } - # - # Clean up - # - for {set i 0} {$i < $numdb} {incr i} { - error_check_good db_close [$db_handle($i) close] 0 - } - - # - # Check contents of DB for subdb names only. Makes sure that - # every subdbname is there and that nothing else is there. - # - puts "\tSubdb005.c: Check DB is read-only" - error_check_bad dbopen [catch \ - {berkdb_open_noerr -unknown $testfile} ret] 0 - - puts "\tSubdb005.d: Check contents of DB for subdb names only" - set db [eval {berkdb_open -unknown -rdonly} $envargs {$testfile}] - error_check_good dbopen [is_valid_db $db] TRUE - set subdblist [$db get -glob *] - foreach kd $subdblist { - # subname also used in subdb005.e,f below - set subname [lindex $kd 0] - set i [lsearch $subdbnames $subname] - error_check_good subdb_search [expr $i != -1] 1 - set subdbnames [lreplace $subdbnames $i $i] - } - error_check_good subdb_done [llength $subdbnames] 0 - - error_check_good db_close [$db close] 0 - return -} diff --git a/storage/bdb/test/sdb006.tcl b/storage/bdb/test/sdb006.tcl deleted file mode 100644 index ffe3e470074..00000000000 --- a/storage/bdb/test/sdb006.tcl +++ /dev/null @@ -1,169 +0,0 @@ -# See the file LICENSE for redistribution information. -# -# Copyright (c) 1999-2004 -# Sleepycat Software. All rights reserved. -# -# $Id: sdb006.tcl,v 11.23 2004/01/28 03:36:29 bostic Exp $ -# -# TEST sdb006 -# TEST Tests intra-subdb join -# TEST -# TEST We'll test 2-way, 3-way, and 4-way joins and figure that if those work, -# TEST everything else does as well. We'll create test databases called -# TEST sub1.db, sub2.db, sub3.db, and sub4.db. The number on the database -# TEST describes the duplication -- duplicates are of the form 0, N, 2N, 3N, -# TEST ... where N is the number of the database. Primary.db is the primary -# TEST database, and sub0.db is the database that has no matching duplicates. -# TEST All of these are within a single database. -# -# We should test this on all btrees, all hash, and a combination thereof -proc sdb006 {method {nentries 100} args } { - source ./include.tcl - global rand_init - - # NB: these flags are internal only, ok - set args [convert_args $method $args] - set omethod [convert_method $method] - - if { [is_record_based $method] == 1 || [is_rbtree $method] } { - puts "\tSubdb006 skipping for method $method." - return - } - - set txnenv 0 - set eindex [lsearch -exact $args "-env"] - # - # If we are using an env, then testfile should just be the db name. - # Otherwise it is the test directory and the name. - if { $eindex == -1 } { - set testfile $testdir/subdb006.db - set env NULL - } else { - set testfile subdb006.db - incr eindex - set env [lindex $args $eindex] - set txnenv [is_txnenv $env] - if { $txnenv == 1 } { - append args " -auto_commit " - if { $nentries == 100 } { - # !!! - # nentries must be greater than the number - # of do_join_subdb calls below. - # - set nentries 35 - } - } - set testdir [get_home $env] - } - berkdb srand $rand_init - - set oargs $args - foreach opt {" -dup" " -dupsort"} { - append args $opt - - puts "Subdb006: $method ( $args ) Intra-subdb join" - set txn "" - # - # Get a cursor in each subdb and move past the end of each - # subdb. Make sure we don't end up in another subdb. - # - puts "\tSubdb006.a: Intra-subdb join" - - if { $env != "NULL" } { - set testdir [get_home $env] - } - cleanup $testdir $env - - set psize 8192 - set duplist {0 50 25 16 12} - set numdb [llength $duplist] - build_all_subdb $testfile [list $method] $psize \ - $duplist $nentries $args - - # Build the primary - puts "Subdb006: Building the primary database $method" - set oflags "-create -mode 0644 [conv $omethod \ - [berkdb random_int 1 2]]" - set db [eval {berkdb_open} $oflags $oargs $testfile primary.db] - error_check_good dbopen [is_valid_db $db] TRUE - for { set i 0 } { $i < 1000 } { incr i } { - if { $txnenv == 1 } { - set t [$env txn] - error_check_good txn [is_valid_txn $t $env] TRUE - set txn "-txn $t" - } - set key [format "%04d" $i] - set ret [eval {$db put} $txn {$key stub}] - error_check_good "primary put" $ret 0 - if { $txnenv == 1 } { - error_check_good txn [$t commit] 0 - } - } - error_check_good "primary close" [$db close] 0 - set did [open $dict] - gets $did str - do_join_subdb $testfile primary.db "1 0" $str $oargs - gets $did str - do_join_subdb $testfile primary.db "2 0" $str $oargs - gets $did str - do_join_subdb $testfile primary.db "3 0" $str $oargs - gets $did str - do_join_subdb $testfile primary.db "4 0" $str $oargs - gets $did str - do_join_subdb $testfile primary.db "1" $str $oargs - gets $did str - do_join_subdb $testfile primary.db "2" $str $oargs - gets $did str - do_join_subdb $testfile primary.db "3" $str $oargs - gets $did str - do_join_subdb $testfile primary.db "4" $str $oargs - gets $did str - do_join_subdb $testfile primary.db "1 2" $str $oargs - gets $did str - do_join_subdb $testfile primary.db "1 2 3" $str $oargs - gets $did str - do_join_subdb $testfile primary.db "1 2 3 4" $str $oargs - gets $did str - do_join_subdb $testfile primary.db "2 1" $str $oargs - gets $did str - do_join_subdb $testfile primary.db "3 2 1" $str $oargs - gets $did str - do_join_subdb $testfile primary.db "4 3 2 1" $str $oargs - gets $did str - do_join_subdb $testfile primary.db "1 3" $str $oargs - gets $did str - do_join_subdb $testfile primary.db "3 1" $str $oargs - gets $did str - do_join_subdb $testfile primary.db "1 4" $str $oargs - gets $did str - do_join_subdb $testfile primary.db "4 1" $str $oargs - gets $did str - do_join_subdb $testfile primary.db "2 3" $str $oargs - gets $did str - do_join_subdb $testfile primary.db "3 2" $str $oargs - gets $did str - do_join_subdb $testfile primary.db "2 4" $str $oargs - gets $did str - do_join_subdb $testfile primary.db "4 2" $str $oargs - gets $did str - do_join_subdb $testfile primary.db "3 4" $str $oargs - gets $did str - do_join_subdb $testfile primary.db "4 3" $str $oargs - gets $did str - do_join_subdb $testfile primary.db "2 3 4" $str $oargs - gets $did str - do_join_subdb $testfile primary.db "3 4 1" $str $oargs - gets $did str - do_join_subdb $testfile primary.db "4 2 1" $str $oargs - gets $did str - do_join_subdb $testfile primary.db "0 2 1" $str $oargs - gets $did str - do_join_subdb $testfile primary.db "3 2 0" $str $oargs - gets $did str - do_join_subdb $testfile primary.db "4 3 2 1" $str $oargs - gets $did str - do_join_subdb $testfile primary.db "4 3 0 1" $str $oargs - - close $did - } -} diff --git a/storage/bdb/test/sdb007.tcl b/storage/bdb/test/sdb007.tcl deleted file mode 100644 index 86f46599296..00000000000 --- a/storage/bdb/test/sdb007.tcl +++ /dev/null @@ -1,141 +0,0 @@ -# See the file LICENSE for redistribution information. -# -# Copyright (c) 1999-2004 -# Sleepycat Software. All rights reserved. -# -# $Id: sdb007.tcl,v 11.25 2004/09/22 18:01:06 bostic Exp $ -# -# TEST sdb007 -# TEST Tests page size difference errors between subdbs. -# TEST Test 3 different scenarios for page sizes. -# TEST 1. Create/open with a default page size, 2nd subdb create with -# TEST specified different one, should error. -# TEST 2. Create/open with specific page size, 2nd subdb create with -# TEST different one, should error. -# TEST 3. Create/open with specified page size, 2nd subdb create with -# TEST same specified size, should succeed. -# TEST (4th combo of using all defaults is a basic test, done elsewhere) -proc sdb007 { method args } { - source ./include.tcl - global is_envmethod - - set db2args [convert_args -btree $args] - set args [convert_args $method $args] - set omethod [convert_method $method] - - if { [is_queue $method] == 1 } { - puts "Subdb007: skipping for method $method" - return - } - set pgindex [lsearch -exact $args "-pagesize"] - if { $pgindex != -1 } { - puts "Subdb007: skipping for specific page sizes" - return - } - - puts "Subdb007: $method ($args) subdb tests with different page sizes" - - set txnenv 0 - set envargs "" - set eindex [lsearch -exact $args "-env"] - # - # If we are using an env, then testfile should just be the db name. - # Otherwise it is the test directory and the name. - if { $eindex == -1 } { - set testfile $testdir/subdb007.db - set env NULL - } else { - set testfile subdb007.db - incr eindex - set env [lindex $args $eindex] - set envargs " -env $env " - set txnenv [is_txnenv $env] - if { $txnenv == 1 } { - append args " -auto_commit " - append envargs " -auto_commit " - append db2args " -auto_commit " - } - set testdir [get_home $env] - } - set sub1 "sub1" - set sub2 "sub2" - cleanup $testdir $env - set txn "" - - puts "\tSubdb007.a.0: create subdb with default page size" - set db [eval {berkdb_open -create -mode 0644} \ - $args {$omethod $testfile $sub1}] - error_check_good subdb [is_valid_db $db] TRUE - # - # Figure out what the default page size is so that we can - # guarantee we create it with a different value. - set statret [$db stat] - set pgsz 0 - foreach pair $statret { - set fld [lindex $pair 0] - if { [string compare $fld {Page size}] == 0 } { - set pgsz [lindex $pair 1] - } - } - error_check_good dbclose [$db close] 0 - - if { $pgsz == 512 } { - set pgsz2 2048 - } else { - set pgsz2 512 - } - - puts "\tSubdb007.a.1: create 2nd subdb with specified page size" - set stat [catch {eval {berkdb_open_noerr -create -btree} \ - $db2args {-pagesize $pgsz2 $testfile $sub2}} ret] - error_check_good subdb:pgsz $stat 1 - # We'll get a different error if running in an env, - # because the env won't have been opened with noerr. - # Skip the test for what the error is, just getting the - # error is enough. - if { $is_envmethod == 0 } { - error_check_good subdb:fail [is_substr $ret \ - "Different pagesize specified"] 1 - } - - set ret [eval {berkdb dbremove} $envargs {$testfile}] - - puts "\tSubdb007.b.0: create subdb with specified page size" - set db [eval {berkdb_open -create -mode 0644} \ - $args {-pagesize $pgsz2 $omethod $testfile $sub1}] - error_check_good subdb [is_valid_db $db] TRUE - set statret [$db stat] - set newpgsz 0 - foreach pair $statret { - set fld [lindex $pair 0] - if { [string compare $fld {Page size}] == 0 } { - set newpgsz [lindex $pair 1] - } - } - error_check_good pgsize $pgsz2 $newpgsz - error_check_good dbclose [$db close] 0 - - puts "\tSubdb007.b.1: create 2nd subdb with different page size" - set stat [catch {eval {berkdb_open_noerr -create -btree} \ - $db2args {-pagesize $pgsz $testfile $sub2}} ret] - error_check_good subdb:pgsz $stat 1 - if { $is_envmethod == 0 } { - error_check_good subdb:fail [is_substr $ret \ - "Different pagesize specified"] 1 - } - - set ret [eval {berkdb dbremove} $envargs {$testfile}] - - puts "\tSubdb007.c.0: create subdb with specified page size" - set db [eval {berkdb_open -create -mode 0644} \ - $args {-pagesize $pgsz2 $omethod $testfile $sub1}] - error_check_good subdb [is_valid_db $db] TRUE - error_check_good dbclose [$db close] 0 - - puts "\tSubdb007.c.1: create 2nd subdb with same specified page size" - set db [eval {berkdb_open -create -mode 0644} \ - $args {-pagesize $pgsz2 $omethod $testfile $sub2}] - error_check_good subdb [is_valid_db $db] TRUE - error_check_good dbclose [$db close] 0 - -} diff --git a/storage/bdb/test/sdb008.tcl b/storage/bdb/test/sdb008.tcl deleted file mode 100644 index 3beb8313b67..00000000000 --- a/storage/bdb/test/sdb008.tcl +++ /dev/null @@ -1,94 +0,0 @@ -# See the file LICENSE for redistribution information. -# -# Copyright (c) 1999-2004 -# Sleepycat Software. All rights reserved. -# -# $Id: sdb008.tcl,v 11.31 2004/01/28 03:36:29 bostic Exp $ -# -# TEST sdb008 -# TEST Tests explicit setting of lorders for subdatabases -- the -# TEST lorder should be ignored. -proc sdb008 { method args } { - source ./include.tcl - - set args [convert_args $method $args] - set omethod [convert_method $method] - - if { [is_queue $method] == 1 } { - puts "Subdb008: skipping for method $method" - return - } - set eindex [lsearch -exact $args "-env"] - - # If we are using an env, then testfile should just be the db name. - # Otherwise it is the test directory and the name. - if { $eindex == -1 } { - set testfile1 $testdir/subdb008a.db - set testfile2 $testdir/subdb008b.db - set env NULL - } else { - set testfile1 subdb008a.db - set testfile2 subdb008b.db - incr eindex - set env [lindex $args $eindex] - set txnenv [is_txnenv $env] - if { $txnenv == 1 } { - append args " -auto_commit " - } - set testdir [get_home $env] - } - cleanup $testdir $env - - puts "Subdb008: $method ($args) subdb tests with different lorders" - - puts "\tSubdb008.a.0: create subdb with system default lorder" - set lorder "1234" - if { [big_endian] } { - set lorder "4321" - } - set db [eval {berkdb_open -create -mode 0644} \ - $args {$omethod $testfile1 "sub1"}] - error_check_good subdb [is_valid_db $db] TRUE - error_check_good dbclose [$db close] 0 - - # Explicitly try to create subdb's of each byte order. In both - # cases the subdb should be forced to the byte order of the - # parent database. - puts "\tSubdb008.a.1: Try to create subdb with -1234 lorder" - set db [eval {berkdb_open -create -mode 0644} \ - $args {-lorder 1234 $omethod $testfile1 "sub2"}] - error_check_good lorder_1234 [eval $db get_lorder] $lorder - error_check_good subdb [is_valid_db $db] TRUE - error_check_good dbclose [$db close] 0 - - puts "\tSubdb008.a.2: Try to create subdb with -4321 lorder" - set db [eval {berkdb_open -create -mode 0644} \ - $args {-lorder 4321 $omethod $testfile1 "sub3"}] - error_check_good lorder_4321 [eval $db get_lorder] $lorder - error_check_good subdb [is_valid_db $db] TRUE - error_check_good dbclose [$db close] 0 - - puts "\tSubdb008.b.0: create subdb with non-default lorder" - set reverse_lorder "4321" - if { [big_endian] } { - set reverse_lorder "1234" - } - set db [eval {berkdb_open -create -mode 0644} \ - {-lorder $reverse_lorder} $args {$omethod $testfile2 "sub1"}] - error_check_good subdb [is_valid_db $db] TRUE - error_check_good dbclose [$db close] 0 - - puts "\tSubdb008.b.1: Try to create subdb with -1234 lorder" - set db [eval {berkdb_open -create -mode 0644} \ - $args {-lorder 1234 $omethod $testfile2 "sub2"}] - error_check_good lorder_1234 [eval $db get_lorder] $reverse_lorder - error_check_good subdb [is_valid_db $db] TRUE - error_check_good dbclose [$db close] 0 - - puts "\tSubdb008.b.2: Try to create subdb with -4321 lorder" - set db [eval {berkdb_open -create -mode 0644} \ - $args {-lorder 4321 $omethod $testfile2 "sub3"}] - error_check_good lorder_4321 [eval $db get_lorder] $reverse_lorder - error_check_good subdb [is_valid_db $db] TRUE - error_check_good dbclose [$db close] 0 -} diff --git a/storage/bdb/test/sdb009.tcl b/storage/bdb/test/sdb009.tcl deleted file mode 100644 index 3f36a27d781..00000000000 --- a/storage/bdb/test/sdb009.tcl +++ /dev/null @@ -1,108 +0,0 @@ -# See the file LICENSE for redistribution information. -# -# Copyright (c) 2000-2004 -# Sleepycat Software. All rights reserved. -# -# $Id: sdb009.tcl,v 11.12 2004/01/28 03:36:29 bostic Exp $ -# -# TEST sdb009 -# TEST Test DB->rename() method for subdbs -proc sdb009 { method args } { - global errorCode - source ./include.tcl - - set omethod [convert_method $method] - set args [convert_args $method $args] - - puts "Subdb009: $method ($args): Test of DB->rename()" - - if { [is_queue $method] == 1 } { - puts "\tSubdb009: Skipping for method $method." - return - } - - set txnenv 0 - set envargs "" - set eindex [lsearch -exact $args "-env"] - # - # If we are using an env, then testfile should just be the db name. - # Otherwise it is the test directory and the name. - if { $eindex == -1 } { - set testfile $testdir/subdb009.db - set env NULL - } else { - set testfile subdb009.db - incr eindex - set env [lindex $args $eindex] - set envargs " -env $env " - set txnenv [is_txnenv $env] - if { $txnenv == 1 } { - append args " -auto_commit " - append envargs " -auto_commit " - } - set testdir [get_home $env] - } - set oldsdb OLDDB - set newsdb NEWDB - - # Make sure we're starting from a clean slate. - cleanup $testdir $env - error_check_bad "$testfile exists" [file exists $testfile] 1 - - puts "\tSubdb009.a: Create/rename file" - puts "\t\tSubdb009.a.1: create" - set db [eval {berkdb_open -create -mode 0644}\ - $omethod $args {$testfile $oldsdb}] - error_check_good dbopen [is_valid_db $db] TRUE - - # The nature of the key and data are unimportant; use numeric key - # so record-based methods don't need special treatment. - set txn "" - set key 1 - set data [pad_data $method data] - - if { $txnenv == 1 } { - set t [$env txn] - error_check_good txn [is_valid_txn $t $env] TRUE - set txn "-txn $t" - } - error_check_good dbput [eval {$db put} $txn {$key $data}] 0 - if { $txnenv == 1 } { - error_check_good txn [$t commit] 0 - } - error_check_good dbclose [$db close] 0 - - puts "\t\tSubdb009.a.2: rename" - error_check_good rename_file [eval {berkdb dbrename} $envargs \ - {$testfile $oldsdb $newsdb}] 0 - - puts "\t\tSubdb009.a.3: check" - # Open again with create to make sure we've really completely - # disassociated the subdb from the old name. - set odb [eval {berkdb_open -create -mode 0644}\ - $omethod $args $testfile $oldsdb] - error_check_good odb_open [is_valid_db $odb] TRUE - set odbt [$odb get $key] - error_check_good odb_close [$odb close] 0 - - set ndb [eval {berkdb_open -create -mode 0644}\ - $omethod $args $testfile $newsdb] - error_check_good ndb_open [is_valid_db $ndb] TRUE - set ndbt [$ndb get $key] - error_check_good ndb_close [$ndb close] 0 - - # The DBT from the "old" database should be empty, not the "new" one. - error_check_good odbt_empty [llength $odbt] 0 - error_check_bad ndbt_empty [llength $ndbt] 0 - error_check_good ndbt [lindex [lindex $ndbt 0] 1] $data - - # Now there's both an old and a new. Rename the "new" to the "old" - # and make sure that fails. - puts "\tSubdb009.b: Make sure rename fails instead of overwriting" - set ret [catch {eval {berkdb dbrename} $envargs $testfile \ - $oldsdb $newsdb} res] - error_check_bad rename_overwrite $ret 0 - error_check_good rename_overwrite_ret [is_substr $errorCode EEXIST] 1 - - puts "\tSubdb009 succeeded." -} diff --git a/storage/bdb/test/sdb010.tcl b/storage/bdb/test/sdb010.tcl deleted file mode 100644 index 8f1d6f8a171..00000000000 --- a/storage/bdb/test/sdb010.tcl +++ /dev/null @@ -1,170 +0,0 @@ -# See the file LICENSE for redistribution information. -# -# Copyright (c) 2000-2004 -# Sleepycat Software. All rights reserved. -# -# $Id: sdb010.tcl,v 11.19 2004/10/18 17:34:17 carol Exp $ -# -# TEST sdb010 -# TEST Test DB->remove() method and DB->truncate() for subdbs -proc sdb010 { method args } { - global errorCode - source ./include.tcl - - set args [convert_args $method $args] - set omethod [convert_method $method] - - puts "Subdb010: Test of DB->remove() and DB->truncate" - - if { [is_queue $method] == 1 } { - puts "\tSubdb010: Skipping for method $method." - return - } - - set txnenv 0 - set envargs "" - set eindex [lsearch -exact $args "-env"] - # - # If we are not given an env, create one. - if { $eindex == -1 } { - set env [berkdb_env -create -home $testdir -mode 0644] - error_check_good env_open [is_valid_env $env] TRUE - } else { - incr eindex - set env [lindex $args $eindex] - } - set testfile subdb010.db - set envargs " -env $env " - set txnenv [is_txnenv $env] - if { $txnenv == 1 } { - append args " -auto_commit " - append envargs " -auto_commit " - } - set testdir [get_home $env] - set tfpath $testdir/$testfile - - cleanup $testdir $env - - set txn "" - set testdb DATABASE - set testdb2 DATABASE2 - - set db [eval {berkdb_open -create -mode 0644} $omethod \ - $args $envargs $testfile $testdb] - error_check_good db_open [is_valid_db $db] TRUE - error_check_good db_close [$db close] 0 - - puts "\tSubdb010.a: Test of DB->remove()" - error_check_good file_exists_before [file exists $tfpath] 1 - error_check_good db_remove [eval {berkdb dbremove} $envargs \ - $testfile $testdb] 0 - - # File should still exist. - error_check_good file_exists_after [file exists $tfpath] 1 - - # But database should not. - set ret [catch {eval berkdb_open $omethod \ - $args $envargs $testfile $testdb} res] - error_check_bad open_failed ret 0 - error_check_good open_failed_ret [is_substr $errorCode ENOENT] 1 - - puts "\tSubdb010.b: Setup for DB->truncate()" - # The nature of the key and data are unimportant; use numeric key - # so record-based methods don't need special treatment. - set key1 1 - set key2 2 - set data1 [pad_data $method data1] - set data2 [pad_data $method data2] - - set db [eval {berkdb_open -create -mode 0644} $omethod \ - $args $envargs {$testfile $testdb}] - error_check_good db_open [is_valid_db $db] TRUE - if { $txnenv == 1 } { - set t [$env txn] - error_check_good txn [is_valid_txn $t $env] TRUE - set txn "-txn $t" - } - error_check_good dbput [eval {$db put} $txn {$key1 $data1}] 0 - if { $txnenv == 1 } { - error_check_good txn [$t commit] 0 - } - - set db2 [eval {berkdb_open -create -mode 0644} $omethod \ - $args $envargs $testfile $testdb2] - error_check_good db_open [is_valid_db $db2] TRUE - if { $txnenv == 1 } { - set t [$env txn] - error_check_good txn [is_valid_txn $t $env] TRUE - set txn "-txn $t" - } - error_check_good dbput [eval {$db2 put} $txn {$key2 $data2}] 0 - if { $txnenv == 1 } { - error_check_good txn [$t commit] 0 - } - - error_check_good db_close [$db close] 0 - error_check_good db_close [$db2 close] 0 - - puts "\tSubdb010.c: truncate" - # - # Return value should be 1, the count of how many items were - # destroyed when we truncated. - set db [eval {berkdb_open -create -mode 0644} $omethod \ - $args $envargs $testfile $testdb] - error_check_good db_open [is_valid_db $db] TRUE - if { $txnenv == 1 } { - set t [$env txn] - error_check_good txn [is_valid_txn $t $env] TRUE - set txn "-txn $t" - } - error_check_good trunc_subdb [eval {$db truncate} $txn] 1 - if { $txnenv == 1 } { - error_check_good txn [$t commit] 0 - } - error_check_good db_close [$db close] 0 - - puts "\tSubdb010.d: check" - set db [eval {berkdb_open} $args $envargs {$testfile $testdb}] - error_check_good db_open [is_valid_db $db] TRUE - if { $txnenv == 1 } { - set t [$env txn] - error_check_good txn [is_valid_txn $t $env] TRUE - set txn "-txn $t" - } - set dbc [eval {$db cursor} $txn] - error_check_good db_cursor [is_valid_cursor $dbc $db] TRUE - set kd [$dbc get -first] - error_check_good trunc_dbcget [llength $kd] 0 - error_check_good dbcclose [$dbc close] 0 - if { $txnenv == 1 } { - error_check_good txn [$t commit] 0 - } - - set db2 [eval {berkdb_open} $args $envargs {$testfile $testdb2}] - error_check_good db_open [is_valid_db $db2] TRUE - if { $txnenv == 1 } { - set t [$env txn] - error_check_good txn [is_valid_txn $t $env] TRUE - set txn "-txn $t" - } - set dbc [eval {$db2 cursor} $txn] - error_check_good db_cursor [is_valid_cursor $dbc $db2] TRUE - set kd [$dbc get -first] - error_check_bad notrunc_dbcget1 [llength $kd] 0 - set db2kd [list [list $key2 $data2]] - error_check_good key2 $kd $db2kd - set kd [$dbc get -next] - error_check_good notrunc_dbget2 [llength $kd] 0 - error_check_good dbcclose [$dbc close] 0 - if { $txnenv == 1 } { - error_check_good txn [$t commit] 0 - } - - error_check_good db_close [$db close] 0 - error_check_good db_close [$db2 close] 0 - - # If we created our env, close it. - if { $eindex == -1 } { - error_check_good env_close [$env close] 0 - } -} diff --git a/storage/bdb/test/sdb011.tcl b/storage/bdb/test/sdb011.tcl deleted file mode 100644 index d36b83e5df2..00000000000 --- a/storage/bdb/test/sdb011.tcl +++ /dev/null @@ -1,141 +0,0 @@ -# See the file LICENSE for redistribution information. -# -# Copyright (c) 1999-2004 -# Sleepycat Software. All rights reserved. -# -# $Id: sdb011.tcl,v 11.14 2004/01/28 03:36:29 bostic Exp $ -# -# TEST sdb011 -# TEST Test deleting Subdbs with overflow pages -# TEST Create 1 db with many large subdbs. -# TEST Test subdatabases with overflow pages. -proc sdb011 { method {ndups 13} {nsubdbs 10} args} { - global names - source ./include.tcl - global rand_init - error_check_good set_random_seed [berkdb srand $rand_init] 0 - - set args [convert_args $method $args] - set omethod [convert_method $method] - - if { [is_queue $method] == 1 || [is_fixed_length $method] == 1 } { - puts "Subdb011: skipping for method $method" - return - } - set txnenv 0 - set envargs "" - set max_files 0 - set eindex [lsearch -exact $args "-env"] - # - # If we are using an env, then testfile should just be the db name. - # Otherwise it is the test directory and the name. - if { $eindex == -1 } { - set testfile $testdir/subdb011.db - set env NULL - set tfpath $testfile - } else { - set testfile subdb011.db - incr eindex - set env [lindex $args $eindex] - set envargs " -env $env " - set txnenv [is_txnenv $env] - if { $txnenv == 1 } { - append args " -auto_commit " - append envargs " -auto_commit " - set max_files 50 - if { $ndups == 13 } { - set ndups 7 - } - } - set testdir [get_home $env] - set tfpath $testdir/$testfile - } - - # Create the database and open the dictionary - - cleanup $testdir $env - set txn "" - - # Here is the loop where we put and get each key/data pair - set file_list [get_file_list] - set flen [llength $file_list] - puts "Subdb011: $method ($args) $ndups overflow dups with \ - $flen filename=key filecontents=data pairs" - - puts "\tSubdb011.a: Create each of $nsubdbs subdbs and dups" - set slist {} - set i 0 - set count 0 - foreach f $file_list { - set i [expr $i % $nsubdbs] - if { [is_record_based $method] == 1 } { - set key [expr $count + 1] - set names([expr $count + 1]) $f - } else { - set key $f - } - # Should really catch errors - set fid [open $f r] - fconfigure $fid -translation binary - set filecont [read $fid] - set subdb subdb$i - lappend slist $subdb - close $fid - set db [eval {berkdb_open -create -mode 0644} \ - $args {$omethod $testfile $subdb}] - error_check_good dbopen [is_valid_db $db] TRUE - for {set dup 0} {$dup < $ndups} {incr dup} { - set data $dup:$filecont - if { $txnenv == 1 } { - set t [$env txn] - error_check_good txn [is_valid_txn $t $env] TRUE - set txn "-txn $t" - } - set ret [eval {$db put} $txn {$key \ - [chop_data $method $data]}] - error_check_good put $ret 0 - if { $txnenv == 1 } { - error_check_good txn [$t commit] 0 - } - } - error_check_good dbclose [$db close] 0 - incr i - incr count - } - - puts "\tSubdb011.b: Verify overflow pages" - foreach subdb $slist { - set db [eval {berkdb_open -create -mode 0644} \ - $args {$omethod $testfile $subdb}] - error_check_good dbopen [is_valid_db $db] TRUE - set stat [$db stat] - - # What everyone else calls overflow pages, hash calls "big - # pages", so we need to special-case hash here. (Hash - # overflow pages are additional pages after the first in a - # bucket.) - if { [string compare [$db get_type] hash] == 0 } { - error_check_bad overflow \ - [is_substr $stat "{{Number of big pages} 0}"] 1 - } else { - error_check_bad overflow \ - [is_substr $stat "{{Overflow pages} 0}"] 1 - } - error_check_good dbclose [$db close] 0 - } - - puts "\tSubdb011.c: Delete subdatabases" - for {set i $nsubdbs} {$i > 0} {set i [expr $i - 1]} { - # - # Randomly delete a subdatabase - set sindex [berkdb random_int 0 [expr $i - 1]] - set subdb [lindex $slist $sindex] - # - # Delete the one we did from the list - set slist [lreplace $slist $sindex $sindex] - error_check_good file_exists_before [file exists $tfpath] 1 - error_check_good db_remove [eval {berkdb dbremove} $envargs \ - {$testfile $subdb}] 0 - } -} - diff --git a/storage/bdb/test/sdb012.tcl b/storage/bdb/test/sdb012.tcl deleted file mode 100644 index d316acb7111..00000000000 --- a/storage/bdb/test/sdb012.tcl +++ /dev/null @@ -1,428 +0,0 @@ -# See the file LICENSE for redistribution information. -# -# Copyright (c) 1999-2004 -# Sleepycat Software. All rights reserved. -# -# $Id: sdb012.tcl,v 1.6 2004/01/28 03:36:29 bostic Exp $ -# -# TEST sdb012 -# TEST Test subdbs with locking and transactions -# TEST Tests creating and removing subdbs while handles -# TEST are open works correctly, and in the face of txns. -# -proc sdb012 { method args } { - source ./include.tcl - - set args [convert_args $method $args] - set omethod [convert_method $method] - - if { [is_queue $method] == 1 } { - puts "Subdb012: skipping for method $method" - return - } - - # If we are using an env, then skip this test. It needs its own. - set eindex [lsearch -exact $args "-env"] - if { $eindex != -1 } { - incr eindex - set env [lindex $args $eindex] - puts "Subdb012 skipping for env $env" - return - } - set encargs "" - set largs [split_encargs $args encargs] - - puts "Subdb012: $method ($largs $encargs) subdb txn/locking tests" - - # - # sdb012_body takes a txn list containing 4 elements. - # {txn command for first subdb - # txn command for second subdb - # txn command for first subdb removal - # txn command for second subdb removal} - # - # The allowed commands are 'none' 'one', 'auto', 'abort', 'commit'. - # 'none' is a special case meaning run without a txn. In the - # case where all 4 items are 'none', we run in a lock-only env. - # 'one' is a special case meaning we create the subdbs together - # in one single transaction. It is indicated as the value for t1, - # and the value in t2 indicates if that single txn should be - # aborted or committed. It is not used and has no meaning - # in the removal case. 'auto' means use the -auto_commit flag - # to the operation, and 'abort' and 'commit' do the obvious. - # - # First test locking w/o txns. If any in tlist are 'none', - # all must be none. - # - # Now run through the txn-based operations - set count 0 - set sdb "Subdb012." - set teststr "abcdefghijklmnopqrstuvwxyz" - set testlet [split $teststr {}] - foreach t1 { none one abort auto commit } { - foreach t2 { none abort auto commit } { - if { $t1 == "one" } { - if { $t2 == "none" || $t2 == "auto"} { - continue - } - } - set tlet [lindex $testlet $count] - foreach r1 { none abort auto commit } { - foreach r2 { none abort auto commit } { - set tlist [list $t1 $t2 $r1 $r2] - sdb012_body $testdir $omethod $largs \ - $encargs $sdb$tlet $tlist - } - } - incr count - } - } - -} - -proc s012 { method args } { - source ./include.tcl - - set omethod [convert_method $method] - - set encargs "" - set largs "" - - puts "Subdb012: $method ($largs $encargs) subdb txn/locking tests" - - set sdb "Subdb012." - set tlet X - set tlist $args - error_check_good tlist [llength $tlist] 4 - sdb012_body $testdir $omethod $largs $encargs $sdb$tlet $tlist -} - -# -# This proc checks the tlist values and returns the flags -# that should be used when opening the env. If we are running -# with no txns, then just -lock, otherwise -txn. -# -proc sdb012_subsys { tlist } { - set t1 [lindex $tlist 0] - # - # If we have no txns, all elements of the list should be none. - # In that case we only run with locking turned on. - # Otherwise, we use the full txn subsystems. - # - set allnone {none none none none} - if { $allnone == $tlist } { - set subsys "-lock" - } else { - set subsys "-txn" - } - return $subsys -} - -# -# This proc parses the tlist and returns a list of 4 items that -# should be used in operations. I.e. it will begin the txns as -# needed, or return a -auto_commit flag, etc. -# -proc sdb012_tflags { env tlist } { - set ret "" - set t1 "" - foreach t $tlist { - switch $t { - one { - set t1 [$env txn] - error_check_good txnbegin [is_valid_txn $t1 $env] TRUE - lappend ret "-txn $t1" - lappend ret "-txn $t1" - } - auto { - lappend ret "-auto_commit" - } - abort - - commit { - # - # If the previous command was a "one", skip over - # this commit/abort. Otherwise start a new txn - # for the removal case. - # - if { $t1 == "" } { - set txn [$env txn] - error_check_good txnbegin [is_valid_txn $txn \ - $env] TRUE - lappend ret "-txn $txn" - } else { - set t1 "" - } - } - none { - lappend ret "" - } - default { - error "Txn command $t not implemented" - } - } - } - return $ret -} - -# -# This proc parses the tlist and returns a list of 4 items that -# should be used in the txn conclusion operations. I.e. it will -# give "" if using auto_commit (i.e. no final txn op), or a single -# abort/commit if both subdb's are in one txn. -# -proc sdb012_top { tflags tlist } { - set ret "" - set t1 "" - # - # We know both lists have 4 items. Iterate over them - # using multiple value lists so we know which txn goes - # with each op. - # - # The tflags list is needed to extract the txn command - # out for the operation. The tlist list is needed to - # determine what operation we are doing. - # - foreach t $tlist tf $tflags { - switch $t { - one { - set t1 [lindex $tf 1] - } - auto { - lappend ret "sdb012_nop" - } - abort - - commit { - # - # If the previous command was a "one" (i.e. t1 - # is set), append a correct command and then - # an empty one. - # - if { $t1 == "" } { - set txn [lindex $tf 1] - set top "$txn $t" - lappend ret $top - } else { - set top "$t1 $t" - lappend ret "sdb012_nop" - lappend ret $top - set t1 "" - } - } - none { - lappend ret "sdb012_nop" - } - } - } - return $ret -} - -proc sdb012_nop { } { - return 0 -} - -proc sdb012_isabort { tlist item } { - set i [lindex $tlist $item] - if { $i == "one" } { - set i [lindex $tlist [expr $item + 1]] - } - if { $i == "abort" } { - return 1 - } else { - return 0 - } -} - -proc sdb012_body { testdir omethod largs encargs msg tlist } { - - puts "\t$msg: $tlist" - set testfile subdb012.db - set subdb1 sub1 - set subdb2 sub2 - - set subsys [sdb012_subsys $tlist] - env_cleanup $testdir - set env [eval {berkdb_env -create -home} $testdir $subsys $encargs] - error_check_good dbenv [is_valid_env $env] TRUE - error_check_good test_lock [$env test abort subdb_lock] 0 - - # - # Convert from our tlist txn commands into real flags we - # will pass to commands. Use the multiple values feature - # of foreach to do this efficiently. - # - set tflags [sdb012_tflags $env $tlist] - foreach {txn1 txn2 rem1 rem2} $tflags {break} - foreach {top1 top2 rop1 rop2} [sdb012_top $tflags $tlist] {break} - -# puts "txn1 $txn1, txn2 $txn2, rem1 $rem1, rem2 $rem2" -# puts "top1 $top1, top2 $top2, rop1 $rop1, rop2 $rop2" - puts "\t$msg.0: Create sub databases in env with $subsys" - set s1 [eval {berkdb_open -env $env -create -mode 0644} \ - $largs $txn1 {$omethod $testfile $subdb1}] - error_check_good dbopen [is_valid_db $s1] TRUE - - set ret [eval $top1] - error_check_good t1_end $ret 0 - - set s2 [eval {berkdb_open -env $env -create -mode 0644} \ - $largs $txn2 {$omethod $testfile $subdb2}] - error_check_good dbopen [is_valid_db $s2] TRUE - - puts "\t$msg.1: Subdbs are open; resolve txns if necessary" - set ret [eval $top2] - error_check_good t2_end $ret 0 - - set t1_isabort [sdb012_isabort $tlist 0] - set t2_isabort [sdb012_isabort $tlist 1] - set r1_isabort [sdb012_isabort $tlist 2] - set r2_isabort [sdb012_isabort $tlist 3] - -# puts "t1_isabort $t1_isabort, t2_isabort $t2_isabort, r1_isabort $r1_isabort, r2_isabort $r2_isabort" - - puts "\t$msg.2: Subdbs are open; verify removal failures" - # Verify removes of subdbs with open subdb's fail - # - # We should fail no matter what. If we aborted, then the - # subdb should not exist. If we didn't abort, we should fail - # with DB_LOCK_NOTGRANTED. - # - # XXX - Do we need -auto_commit for all these failing ones? - set r [ catch {berkdb dbremove -env $env $testfile $subdb1} result ] - error_check_bad dbremove1_open $r 0 - if { $t1_isabort } { - error_check_good dbremove1_open_ab [is_substr \ - $result "no such file"] 1 - } else { - error_check_good dbremove1_open [is_substr \ - $result DB_LOCK_NOTGRANTED] 1 - } - - set r [ catch {berkdb dbremove -env $env $testfile $subdb2} result ] - error_check_bad dbremove2_open $r 0 - if { $t2_isabort } { - error_check_good dbremove2_open_ab [is_substr \ - $result "no such file"] 1 - } else { - error_check_good dbremove2_open [is_substr \ - $result DB_LOCK_NOTGRANTED] 1 - } - - # Verify file remove fails - set r [catch {berkdb dbremove -env $env $testfile} result] - error_check_bad dbremovef_open $r 0 - - # - # If both aborted, there should be no file?? - # - if { $t1_isabort && $t2_isabort } { - error_check_good dbremovef_open_ab [is_substr \ - $result "no such file"] 1 - } else { - error_check_good dbremovef_open [is_substr \ - $result DB_LOCK_NOTGRANTED] 1 - } - - puts "\t$msg.3: Close subdb2; verify removals" - error_check_good close_s2 [$s2 close] 0 - set r [ catch {eval {berkdb dbremove -env} \ - $env $rem2 $testfile $subdb2} result ] - if { $t2_isabort } { - error_check_bad dbrem2_ab $r 0 - error_check_good dbrem2_ab [is_substr \ - $result "no such file"] 1 - } else { - error_check_good dbrem2 $result 0 - } - # Resolve subdb2 removal txn - set r [eval $rop2] - error_check_good rop2 $r 0 - - set r [ catch {berkdb dbremove -env $env $testfile $subdb1} result ] - error_check_bad dbremove1.2_open $r 0 - if { $t1_isabort } { - error_check_good dbremove1.2_open_ab [is_substr \ - $result "no such file"] 1 - } else { - error_check_good dbremove1.2_open [is_substr \ - $result DB_LOCK_NOTGRANTED] 1 - } - - # There are three cases here: - # 1. if both t1 and t2 aborted, the file shouldn't exist - # 2. if only t1 aborted, the file still exists and nothing is open - # 3. if neither aborted a remove should fail because the first - # subdb is still open - # In case 2, don't try the remove, because it should succeed - # and we won't be able to test anything else. - if { !$t1_isabort || $t2_isabort } { - set r [catch {berkdb dbremove -env $env $testfile} result] - if { $t1_isabort && $t2_isabort } { - error_check_bad dbremovef.2_open $r 0 - error_check_good dbremove.2_open_ab [is_substr \ - $result "no such file"] 1 - } else { - error_check_bad dbremovef.2_open $r 0 - error_check_good dbremove.2_open [is_substr \ - $result DB_LOCK_NOTGRANTED] 1 - } - } - - puts "\t$msg.4: Close subdb1; verify removals" - error_check_good close_s1 [$s1 close] 0 - set r [ catch {eval {berkdb dbremove -env} \ - $env $rem1 $testfile $subdb1} result ] - if { $t1_isabort } { - error_check_bad dbremove1_ab $r 0 - error_check_good dbremove1_ab [is_substr \ - $result "no such file"] 1 - } else { - error_check_good dbremove1 $result 0 - } - # Resolve subdb1 removal txn - set r [eval $rop1] - error_check_good rop1 $r 0 - - - # Verify removal of subdb2. All DB handles are closed now. - # So we have two scenarios: - # 1. The removal of subdb2 above was successful and subdb2 - # doesn't exist and we should fail that way. - # 2. The removal of subdb2 above was aborted, and this - # removal should succeed. - # - set r [ catch {berkdb dbremove -env $env $testfile $subdb2} result ] - if { $r2_isabort && !$t2_isabort } { - error_check_good dbremove2.1_ab $result 0 - } else { - error_check_bad dbremove2.1 $r 0 - error_check_good dbremove2.1 [is_substr \ - $result "no such file"] 1 - } - - # Verify removal of subdb1. All DB handles are closed now. - # So we have two scenarios: - # 1. The removal of subdb1 above was successful and subdb1 - # doesn't exist and we should fail that way. - # 2. The removal of subdb1 above was aborted, and this - # removal should succeed. - # - set r [ catch {berkdb dbremove -env $env $testfile $subdb1} result ] - if { $r1_isabort && !$t1_isabort } { - error_check_good dbremove1.1 $result 0 - } else { - error_check_bad dbremove_open $r 0 - error_check_good dbremove.1 [is_substr \ - $result "no such file"] 1 - } - - puts "\t$msg.5: All closed; remove file" - set r [catch {berkdb dbremove -env $env $testfile} result] - if { $t1_isabort && $t2_isabort } { - error_check_bad dbremove_final_ab $r 0 - error_check_good dbremove_file_abstr [is_substr \ - $result "no such file"] 1 - } else { - error_check_good dbremove_final $r 0 - } - error_check_good envclose [$env close] 0 -} diff --git a/storage/bdb/test/sdbscript.tcl b/storage/bdb/test/sdbscript.tcl deleted file mode 100644 index 645351b077e..00000000000 --- a/storage/bdb/test/sdbscript.tcl +++ /dev/null @@ -1,47 +0,0 @@ -# See the file LICENSE for redistribution information. -# -# Copyright (c) 1999-2004 -# Sleepycat Software. All rights reserved. -# -# $Id: sdbscript.tcl,v 11.11 2004/01/28 03:36:29 bostic Exp $ -# -# Usage: subdbscript testfile subdbnumber factor -# testfile: name of DB itself -# subdbnumber: n, subdb indicator, of form sub$n.db -# factor: Delete over factor'th + n'th from my subdb. -# -# I.e. if factor is 10, and n is 0, remove entries, 0, 10, 20, ... -# if factor is 10 and n is 1, remove entries 1, 11, 21, ... -source ./include.tcl -source $test_path/test.tcl - -set usage "subdbscript testfile subdbnumber factor" - -# Verify usage -if { $argc != 3 } { - puts stderr "FAIL:[timestamp] Usage: $usage" - exit -} - -# Initialize arguments -set testfile [lindex $argv 0] -set n [ lindex $argv 1 ] -set factor [ lindex $argv 2 ] - -set db [berkdb_open -unknown $testfile sub$n.db] -error_check_good db_open [is_valid_db $db] TRUE - -set dbc [$db cursor] -error_check_good db_cursor [is_valid_cursor $dbc $db] TRUE -set i 1 -for {set d [$dbc get -first]} {[llength $d] != 0} {set d [$dbc get -next]} { - set x [expr $i - $n] - if { $x >= 0 && [expr $x % $factor] == 0 } { - puts "Deleting $d" - error_check_good dbc_del [$dbc del] 0 - } - incr i -} -error_check_good db_close [$db close] 0 - -exit diff --git a/storage/bdb/test/sdbtest001.tcl b/storage/bdb/test/sdbtest001.tcl deleted file mode 100644 index 65b64dac2d3..00000000000 --- a/storage/bdb/test/sdbtest001.tcl +++ /dev/null @@ -1,150 +0,0 @@ -# See the file LICENSE for redistribution information. -# -# Copyright (c) 1999-2004 -# Sleepycat Software. All rights reserved. -# -# $Id: sdbtest001.tcl,v 11.21 2004/01/28 03:36:29 bostic Exp $ -# -# TEST sdbtest001 -# TEST Tests multiple access methods in one subdb -# TEST Open several subdbs, each with a different access method -# TEST Small keys, small data -# TEST Put/get per key per subdb -# TEST Dump file, verify per subdb -# TEST Close, reopen per subdb -# TEST Dump file, verify per subdb -# TEST -# TEST Make several subdb's of different access methods all in one DB. -# TEST Rotate methods and repeat [#762]. -# TEST Use the first 10,000 entries from the dictionary. -# TEST Insert each with self as key and data; retrieve each. -# TEST After all are entered, retrieve all; compare output to original. -# TEST Close file, reopen, do retrieve and re-verify. -proc sdbtest001 { {nentries 10000} } { - source ./include.tcl - - puts "Subdbtest001: many different subdb access methods in one" - - # Create the database and open the dictionary - set testfile $testdir/subdbtest001.db - set t1 $testdir/t1 - set t2 $testdir/t2 - set t3 $testdir/t3 - set t4 $testdir/t4 - - set txn "" - set count 0 - - # Set up various methods to rotate through - lappend method_list [list "-rrecno" "-rbtree" "-hash" "-recno" "-btree"] - lappend method_list [list "-recno" "-hash" "-btree" "-rbtree" "-rrecno"] - lappend method_list [list "-btree" "-recno" "-rbtree" "-rrecno" "-hash"] - lappend method_list [list "-hash" "-recno" "-rbtree" "-rrecno" "-btree"] - lappend method_list [list "-rbtree" "-hash" "-btree" "-rrecno" "-recno"] - lappend method_list [list "-rrecno" "-recno"] - lappend method_list [list "-recno" "-rrecno"] - lappend method_list [list "-hash" "-dhash"] - lappend method_list [list "-dhash" "-hash"] - lappend method_list [list "-rbtree" "-btree" "-dbtree" "-ddbtree"] - lappend method_list [list "-btree" "-rbtree" "-ddbtree" "-dbtree"] - lappend method_list [list "-dbtree" "-ddbtree" "-btree" "-rbtree"] - lappend method_list [list "-ddbtree" "-dbtree" "-rbtree" "-btree"] - set plist [list 512 8192 1024 4096 2048 16384] - set mlen [llength $method_list] - set plen [llength $plist] - while { $plen < $mlen } { - set plist [concat $plist $plist] - set plen [llength $plist] - } - set pgsz 0 - foreach methods $method_list { - cleanup $testdir NULL - puts "\tSubdbtest001.a: create subdbs of different access methods:" - puts "\tSubdbtest001.a: $methods" - set nsubdbs [llength $methods] - set duplist "" - for { set i 0 } { $i < $nsubdbs } { incr i } { - lappend duplist -1 - } - set psize [lindex $plist $pgsz] - incr pgsz - set newent [expr $nentries / $nsubdbs] - build_all_subdb $testfile $methods $psize $duplist $newent - - # Now we will get each key from the DB and compare the results - # to the original. - for { set subdb 0 } { $subdb < $nsubdbs } { incr subdb } { - - set method [lindex $methods $subdb] - set method [convert_method $method] - if { [is_record_based $method] == 1 } { - set checkfunc subdbtest001_recno.check - } else { - set checkfunc subdbtest001.check - } - - puts "\tSubdbtest001.b: dump file sub$subdb.db" - set db [berkdb_open -unknown $testfile sub$subdb.db] - dump_file $db $txn $t1 $checkfunc - error_check_good db_close [$db close] 0 - - # Now compare the keys to see if they match the - # dictionary (or ints) - if { [is_record_based $method] == 1 } { - set oid [open $t2 w] - for {set i 1} {$i <= $newent} {incr i} { - puts $oid [expr $subdb * $newent + $i] - } - close $oid - file rename -force $t1 $t3 - } else { - # filehead uses 1-based line numbers - set beg [expr $subdb * $newent] - incr beg - set end [expr $beg + $newent - 1] - filehead $end $dict $t3 $beg - filesort $t3 $t2 - filesort $t1 $t3 - } - - error_check_good Subdbtest001:diff($t3,$t2) \ - [filecmp $t3 $t2] 0 - - puts "\tSubdbtest001.c: sub$subdb.db: close, open, and dump file" - # Now, reopen the file and run the last test again. - open_and_dump_subfile $testfile NULL $t1 $checkfunc \ - dump_file_direction "-first" "-next" sub$subdb.db - if { [string compare $method "-recno"] != 0 } { - filesort $t1 $t3 - } - - error_check_good Subdbtest001:diff($t2,$t3) \ - [filecmp $t2 $t3] 0 - - # Now, reopen the file and run the last test again in the - # reverse direction. - puts "\tSubdbtest001.d: sub$subdb.db: close, open, and dump file in reverse direction" - open_and_dump_subfile $testfile NULL $t1 $checkfunc \ - dump_file_direction "-last" "-prev" sub$subdb.db - - if { [string compare $method "-recno"] != 0 } { - filesort $t1 $t3 - } - - error_check_good Subdbtest001:diff($t3,$t2) \ - [filecmp $t3 $t2] 0 - } - } -} - -# Check function for Subdbtest001; keys and data are identical -proc subdbtest001.check { key data } { - error_check_good "key/data mismatch" $data $key -} - -proc subdbtest001_recno.check { key data } { -global dict -global kvals - error_check_good key"$key"_exists [info exists kvals($key)] 1 - error_check_good "key/data mismatch, key $key" $data $kvals($key) -} diff --git a/storage/bdb/test/sdbtest002.tcl b/storage/bdb/test/sdbtest002.tcl deleted file mode 100644 index 4e51809fbb5..00000000000 --- a/storage/bdb/test/sdbtest002.tcl +++ /dev/null @@ -1,173 +0,0 @@ -# See the file LICENSE for redistribution information. -# -# Copyright (c) 1999-2004 -# Sleepycat Software. All rights reserved. -# -# $Id: sdbtest002.tcl,v 11.29 2004/01/28 03:36:30 bostic Exp $ -# -# TEST sdbtest002 -# TEST Tests multiple access methods in one subdb access by multiple -# TEST processes. -# TEST Open several subdbs, each with a different access method -# TEST Small keys, small data -# TEST Put/get per key per subdb -# TEST Fork off several child procs to each delete selected -# TEST data from their subdb and then exit -# TEST Dump file, verify contents of each subdb is correct -# TEST Close, reopen per subdb -# TEST Dump file, verify per subdb -# TEST -# TEST Make several subdb's of different access methods all in one DB. -# TEST Fork of some child procs to each manipulate one subdb and when -# TEST they are finished, verify the contents of the databases. -# TEST Use the first 10,000 entries from the dictionary. -# TEST Insert each with self as key and data; retrieve each. -# TEST After all are entered, retrieve all; compare output to original. -# TEST Close file, reopen, do retrieve and re-verify. -proc sdbtest002 { {nentries 10000} } { - source ./include.tcl - - puts "Subdbtest002: many different subdb access methods in one" - - # Create the database and open the dictionary - set testfile $testdir/subdbtest002.db - set t1 $testdir/t1 - set t2 $testdir/t2 - set t3 $testdir/t3 - set t4 $testdir/t4 - - set txn "" - set count 0 - - # Set up various methods to rotate through - set methods \ - [list "-rbtree" "-recno" "-btree" "-btree" "-recno" "-rbtree"] - cleanup $testdir NULL - puts "\tSubdbtest002.a: create subdbs of different methods: $methods" - set psize 4096 - set nsubdbs [llength $methods] - set duplist "" - for { set i 0 } { $i < $nsubdbs } { incr i } { - lappend duplist -1 - } - set newent [expr $nentries / $nsubdbs] - - # - # XXX We need dict sorted to figure out what was deleted - # since things are stored sorted in the btree. - # - filesort $dict $t4 - set dictorig $dict - set dict $t4 - - build_all_subdb $testfile $methods $psize $duplist $newent - - # Now we will get each key from the DB and compare the results - # to the original. - set pidlist "" - puts "\tSubdbtest002.b: create $nsubdbs procs to delete some keys" - for { set subdb 0 } { $subdb < $nsubdbs } { incr subdb } { - puts "$tclsh_path\ - $test_path/sdbscript.tcl $testfile \ - $subdb $nsubdbs >& $testdir/subdb002.log.$subdb" - set p [exec $tclsh_path $test_path/wrap.tcl \ - sdbscript.tcl \ - $testdir/subdb002.log.$subdb $testfile $subdb $nsubdbs &] - lappend pidlist $p - } - watch_procs $pidlist 5 - - for { set subdb 0 } { $subdb < $nsubdbs } { incr subdb } { - set method [lindex $methods $subdb] - set method [convert_method $method] - if { [is_record_based $method] == 1 } { - set checkfunc subdbtest002_recno.check - } else { - set checkfunc subdbtest002.check - } - - puts "\tSubdbtest002.b: dump file sub$subdb.db" - set db [berkdb_open -unknown $testfile sub$subdb.db] - error_check_good db_open [is_valid_db $db] TRUE - dump_file $db $txn $t1 $checkfunc - error_check_good db_close [$db close] 0 - # - # This is just so that t2 is there and empty - # since we are only appending below. - # - exec > $t2 - - # Now compare the keys to see if they match the dictionary (or ints) - if { [is_record_based $method] == 1 } { - set oid [open $t2 w] - for {set i 1} {$i <= $newent} {incr i} { - set x [expr $i - $subdb] - if { [expr $x % $nsubdbs] != 0 } { - puts $oid [expr $subdb * $newent + $i] - } - } - close $oid - file rename -force $t1 $t3 - } else { - set oid [open $t4 r] - for {set i 1} {[gets $oid line] >= 0} {incr i} { - set farr($i) $line - } - close $oid - - set oid [open $t2 w] - for {set i 1} {$i <= $newent} {incr i} { - # Sed uses 1-based line numbers - set x [expr $i - $subdb] - if { [expr $x % $nsubdbs] != 0 } { - set beg [expr $subdb * $newent] - set beg [expr $beg + $i] - puts $oid $farr($beg) - } - } - close $oid - filesort $t1 $t3 - } - - error_check_good Subdbtest002:diff($t3,$t2) \ - [filecmp $t3 $t2] 0 - - puts "\tSubdbtest002.c: sub$subdb.db: close, open, and dump file" - # Now, reopen the file and run the last test again. - open_and_dump_subfile $testfile NULL $t1 $checkfunc \ - dump_file_direction "-first" "-next" sub$subdb.db - if { [string compare $method "-recno"] != 0 } { - filesort $t1 $t3 - } - - error_check_good Subdbtest002:diff($t2,$t3) \ - [filecmp $t2 $t3] 0 - - # Now, reopen the file and run the last test again in the - # reverse direction. - puts "\tSubdbtest002.d: sub$subdb.db: close, open, and dump file in reverse direction" - open_and_dump_subfile $testfile NULL $t1 $checkfunc \ - dump_file_direction "-last" "-prev" sub$subdb.db - - if { [string compare $method "-recno"] != 0 } { - filesort $t1 $t3 - } - - error_check_good Subdbtest002:diff($t3,$t2) \ - [filecmp $t3 $t2] 0 - } - set dict $dictorig - return -} - -# Check function for Subdbtest002; keys and data are identical -proc subdbtest002.check { key data } { - error_check_good "key/data mismatch" $data $key -} - -proc subdbtest002_recno.check { key data } { -global dict -global kvals - error_check_good key"$key"_exists [info exists kvals($key)] 1 - error_check_good "key/data mismatch, key $key" $data $kvals($key) -} diff --git a/storage/bdb/test/sdbutils.tcl b/storage/bdb/test/sdbutils.tcl deleted file mode 100644 index 5c6deb3b47b..00000000000 --- a/storage/bdb/test/sdbutils.tcl +++ /dev/null @@ -1,197 +0,0 @@ -# See the file LICENSE for redistribution information. -# -# Copyright (c) 1999-2004 -# Sleepycat Software. All rights reserved. -# -# $Id: sdbutils.tcl,v 11.16 2004/01/28 03:36:30 bostic Exp $ -# -proc build_all_subdb { dbname methods psize dups {nentries 100} {dbargs ""}} { - set nsubdbs [llength $dups] - set mlen [llength $methods] - set savearg $dbargs - for {set i 0} {$i < $nsubdbs} { incr i } { - set m [lindex $methods [expr $i % $mlen]] - set dbargs $savearg - subdb_build $dbname $nentries [lindex $dups $i] \ - $i $m $psize sub$i.db $dbargs - } -} - -proc subdb_build { name nkeys ndups dup_interval method psize subdb dbargs} { - source ./include.tcl - - set dbargs [convert_args $method $dbargs] - set omethod [convert_method $method] - - puts "Method: $method" - - set txnenv 0 - set eindex [lsearch -exact $dbargs "-env"] - if { $eindex != -1 } { - incr eindex - set env [lindex $dbargs $eindex] - set txnenv [is_txnenv $env] - } - # Create the database and open the dictionary - set oflags "-create -mode 0644 $omethod \ - -pagesize $psize $dbargs $name $subdb" - set db [eval {berkdb_open} $oflags] - error_check_good dbopen [is_valid_db $db] TRUE - set did [open $dict] - set count 0 - if { $ndups >= 0 } { - puts "\tBuilding $method $name $subdb. \ - $nkeys keys with $ndups duplicates at interval of $dup_interval" - } - if { $ndups < 0 } { - puts "\tBuilding $method $name $subdb. \ - $nkeys unique keys of pagesize $psize" - # - # If ndups is < 0, we want unique keys in each subdb, - # so skip ahead in the dict by nkeys * iteration - # - for { set count 0 } \ - { $count < [expr $nkeys * $dup_interval] } { - incr count} { - set ret [gets $did str] - if { $ret == -1 } { - break - } - } - } - set txn "" - for { set count 0 } { [gets $did str] != -1 && $count < $nkeys } { - incr count} { - for { set i 0 } { $i < $ndups } { incr i } { - set data [format "%04d" [expr $i * $dup_interval]] - if { $txnenv == 1 } { - set t [$env txn] - error_check_good txn [is_valid_txn $t $env] TRUE - set txn "-txn $t" - } - set ret [eval {$db put} $txn {$str \ - [chop_data $method $data]}] - error_check_good put $ret 0 - if { $txnenv == 1 } { - error_check_good txn [$t commit] 0 - } - } - - if { $txnenv == 1 } { - set t [$env txn] - error_check_good txn [is_valid_txn $t $env] TRUE - set txn "-txn $t" - } - if { $ndups == 0 } { - set ret [eval {$db put} $txn {$str \ - [chop_data $method NODUP]}] - error_check_good put $ret 0 - } elseif { $ndups < 0 } { - if { [is_record_based $method] == 1 } { - global kvals - - set num [expr $nkeys * $dup_interval] - set num [expr $num + $count + 1] - set ret [eval {$db put} $txn {$num \ - [chop_data $method $str]}] - set kvals($num) [pad_data $method $str] - error_check_good put $ret 0 - } else { - set ret [eval {$db put} $txn \ - {$str [chop_data $method $str]}] - error_check_good put $ret 0 - } - } - if { $txnenv == 1 } { - error_check_good txn [$t commit] 0 - } - } - close $did - error_check_good close:$name [$db close] 0 -} - -proc do_join_subdb { db primary subdbs key oargs } { - source ./include.tcl - - puts "\tJoining: $subdbs on $key" - - # Open all the databases - set p [eval {berkdb_open -unknown} $oargs $db $primary] - error_check_good "primary open" [is_valid_db $p] TRUE - - set dblist "" - set curslist "" - - foreach i $subdbs { - set jdb [eval {berkdb_open -unknown} $oargs $db sub$i.db] - error_check_good "sub$i.db open" [is_valid_db $jdb] TRUE - - lappend jlist [list $jdb $key] - lappend dblist $jdb - - } - - set join_res [eval {$p get_join} $jlist] - set ndups [llength $join_res] - - # Calculate how many dups we expect. - # We go through the list of indices. If we find a 0, then we - # expect 0 dups. For everything else, we look at pairs of numbers, - # if the are relatively prime, multiply them and figure out how - # many times that goes into 50. If they aren't relatively prime, - # take the number of times the larger goes into 50. - set expected 50 - set last 1 - foreach n $subdbs { - if { $n == 0 } { - set expected 0 - break - } - if { $last == $n } { - continue - } - - if { [expr $last % $n] == 0 || [expr $n % $last] == 0 } { - if { $n > $last } { - set last $n - set expected [expr 50 / $last] - } - } else { - set last [expr $n * $last / [gcd $n $last]] - set expected [expr 50 / $last] - } - } - - error_check_good number_of_dups:$subdbs $ndups $expected - - # - # If we get here, we have the number expected, now loop - # through each and see if it is what we expected. - # - for { set i 0 } { $i < $ndups } { incr i } { - set pair [lindex $join_res $i] - set k [lindex $pair 0] - foreach j $subdbs { - error_check_bad valid_dup:$j:$subdbs $j 0 - set kval [string trimleft $k 0] - if { [string length $kval] == 0 } { - set kval 0 - } - error_check_good \ - valid_dup:$j:$subdbs [expr $kval % $j] 0 - } - } - - error_check_good close_primary [$p close] 0 - foreach i $dblist { - error_check_good close_index:$i [$i close] 0 - } -} - -proc n_to_subname { n } { - if { $n == 0 } { - return null.db; - } else { - return sub$n.db; - } -} diff --git a/storage/bdb/test/sec001.tcl b/storage/bdb/test/sec001.tcl deleted file mode 100644 index ed2ca81f9d4..00000000000 --- a/storage/bdb/test/sec001.tcl +++ /dev/null @@ -1,223 +0,0 @@ -# See the file LICENSE for redistribution information. -# -# Copyright (c) 1999-2004 -# Sleepycat Software. All rights reserved. -# -# $Id: sec001.tcl,v 11.12 2004/09/22 18:01:06 bostic Exp $ -# -# TEST sec001 -# TEST Test of security interface -proc sec001 { } { - global errorInfo - global errorCode - global has_crypto - global is_hp_test - - source ./include.tcl - # Skip test if release does not support encryption. - if { $has_crypto == 0 } { - puts "Skipping test sec001 for non-crypto release." - return - } - - set testfile1 env1.db - set testfile2 $testdir/env2.db - set subdb1 sub1 - set subdb2 sub2 - - puts "Sec001: Test of basic encryption interface." - env_cleanup $testdir - - set passwd1 "passwd1" - set passwd1_bad "passwd1_bad" - set passwd2 "passwd2" - set key "key" - set data "data" - - # - # This first group tests bad create scenarios and also - # tests attempting to use encryption after creating a - # non-encrypted env/db to begin with. - # - set nopass "" - puts "\tSec001.a.1: Create db with encryption." - set db [berkdb_open -create -encryptaes $passwd1 -btree $testfile2] - error_check_good db [is_valid_db $db] TRUE - error_check_good dbput [$db put $key $data] 0 - error_check_good dbclose [$db close] 0 - - puts "\tSec001.a.2: Open db without encryption." - set stat [catch {berkdb_open_noerr $testfile2} ret] - error_check_good db:nocrypto $stat 1 - error_check_good db:fail [is_substr $ret "no encryption key"] 1 - - set ret [berkdb dbremove -encryptaes $passwd1 $testfile2] - - puts "\tSec001.b.1: Create db without encryption or checksum." - set db [berkdb_open -create -btree $testfile2] - error_check_good db [is_valid_db $db] TRUE - error_check_good dbput [$db put $key $data] 0 - error_check_good dbclose [$db close] 0 - - puts "\tSec001.b.2: Open db with encryption." - set stat [catch {berkdb_open_noerr -encryptaes $passwd1 $testfile2} ret] - error_check_good db:nocrypto $stat 1 - error_check_good db:fail [is_substr $ret "supplied encryption key"] 1 - - set ret [berkdb dbremove $testfile2] - - puts "\tSec001.c.1: Create db with checksum." - set db [berkdb_open -create -chksum -btree $testfile2] - error_check_good db [is_valid_db $db] TRUE - error_check_good dbput [$db put $key $data] 0 - error_check_good dbclose [$db close] 0 - - puts "\tSec001.c.2: Open db with encryption." - set stat [catch {berkdb_open_noerr -encryptaes $passwd1 $testfile2} ret] - error_check_good db:nocrypto $stat 1 - error_check_good db:fail [is_substr $ret "supplied encryption key"] 1 - - set ret [berkdb dbremove $testfile2] - - puts "\tSec001.d.1: Create subdb with encryption." - set db [berkdb_open -create -encryptaes $passwd1 -btree \ - $testfile2 $subdb1] - error_check_good subdb [is_valid_db $db] TRUE - error_check_good dbput [$db put $key $data] 0 - error_check_good dbclose [$db close] 0 - - puts "\tSec001.d.2: Create 2nd subdb without encryption." - set stat [catch {berkdb_open_noerr -create -btree \ - $testfile2 $subdb2} ret] - error_check_good subdb:nocrypto $stat 1 - error_check_good subdb:fail [is_substr $ret "no encryption key"] 1 - - set ret [berkdb dbremove -encryptaes $passwd1 $testfile2] - - puts "\tSec001.e.1: Create subdb without encryption or checksum." - set db [berkdb_open -create -btree $testfile2 $subdb1] - error_check_good db [is_valid_db $db] TRUE - error_check_good dbput [$db put $key $data] 0 - error_check_good dbclose [$db close] 0 - - puts "\tSec001.e.2: Create 2nd subdb with encryption." - set stat [catch {berkdb_open_noerr -create -btree -encryptaes $passwd1 \ - $testfile2 $subdb2} ret] - error_check_good subdb:nocrypto $stat 1 - error_check_good subdb:fail [is_substr $ret "supplied encryption key"] 1 - - env_cleanup $testdir - - puts "\tSec001.f.1: Open env with encryption, empty passwd." - set stat [catch {berkdb_env_noerr -create -home $testdir \ - -encryptaes $nopass} ret] - error_check_good env:nopass $stat 1 - error_check_good env:fail [is_substr $ret "Empty password"] 1 - - puts "\tSec001.f.2: Create without encryption algorithm (DB_ENCRYPT_ANY)." - set stat [catch {berkdb_env_noerr -create -home $testdir \ - -encryptany $passwd1} ret] - error_check_good env:any $stat 1 - error_check_good env:fail [is_substr $ret "algorithm not supplied"] 1 - - puts "\tSec001.f.3: Create without encryption." - set env [berkdb_env -create -home $testdir] - error_check_good env [is_valid_env $env] TRUE - - # Skip this piece of the test on HP-UX, where we can't - # join the env. - if { $is_hp_test != 1 } { - puts "\tSec001.f.4: Open again with encryption." - set stat [catch {berkdb_env_noerr -home $testdir \ - -encryptaes $passwd1} ret] - error_check_good env:unencrypted $stat 1 - error_check_good env:fail [is_substr $ret \ - "Joining non-encrypted environment"] 1 - } - - error_check_good envclose [$env close] 0 - - env_cleanup $testdir - - # - # This second group tests creating and opening a secure env. - # We test that others can join successfully, and that other's with - # bad/no passwords cannot. Also test that we cannot use the - # db->set_encrypt method when we've already got a secure dbenv. - # - puts "\tSec001.g.1: Open with encryption." - set env [berkdb_env_noerr -create -home $testdir -encryptaes $passwd1] - error_check_good env [is_valid_env $env] TRUE - - # We can't open an env twice in HP-UX, so skip the rest. - if { $is_hp_test == 1 } { - puts "Skipping remainder of test for HP-UX." - error_check_good env_close [$env close] 0 - return - } - - puts "\tSec001.g.2: Open again with encryption - same passwd." - set env1 [berkdb_env -home $testdir -encryptaes $passwd1] - error_check_good env [is_valid_env $env1] TRUE - error_check_good envclose [$env1 close] 0 - - puts "\tSec001.g.3: Open again with any encryption (DB_ENCRYPT_ANY)." - set env1 [berkdb_env -home $testdir -encryptany $passwd1] - error_check_good env [is_valid_env $env1] TRUE - error_check_good envclose [$env1 close] 0 - - puts "\tSec001.g.4: Open with encryption - different length passwd." - set stat [catch {berkdb_env_noerr -home $testdir \ - -encryptaes $passwd1_bad} ret] - error_check_good env:$passwd1_bad $stat 1 - error_check_good env:fail [is_substr $ret "Invalid password"] 1 - - puts "\tSec001.g.5: Open with encryption - different passwd." - set stat [catch {berkdb_env_noerr -home $testdir \ - -encryptaes $passwd2} ret] - error_check_good env:$passwd2 $stat 1 - error_check_good env:fail [is_substr $ret "Invalid password"] 1 - - puts "\tSec001.g.6: Open env without encryption." - set stat [catch {berkdb_env_noerr -home $testdir} ret] - error_check_good env:$passwd2 $stat 1 - error_check_good env:fail [is_substr $ret "Encrypted environment"] 1 - - puts "\tSec001.g.7: Open database with encryption in env" - set stat [catch {berkdb_open_noerr -env $env -btree -create \ - -encryptaes $passwd2 $testfile1} ret] - error_check_good db:$passwd2 $stat 1 - error_check_good env:fail [is_substr $ret "method not permitted"] 1 - - puts "\tSec001.g.8: Close creating env" - error_check_good envclose [$env close] 0 - - # - # This third group tests opening the env after the original env - # handle is closed. Just to make sure we can reopen it in - # the right fashion even if no handles are currently open. - # - puts "\tSec001.h.1: Reopen without encryption." - set stat [catch {berkdb_env_noerr -home $testdir} ret] - error_check_good env:noencrypt $stat 1 - error_check_good env:fail [is_substr $ret "Encrypted environment"] 1 - - puts "\tSec001.h.2: Reopen with bad passwd." - set stat [catch {berkdb_env_noerr -home $testdir -encryptaes \ - $passwd1_bad} ret] - error_check_good env:$passwd1_bad $stat 1 - error_check_good env:fail [is_substr $ret "Invalid password"] 1 - - puts "\tSec001.h.3: Reopen with encryption." - set env [berkdb_env -create -home $testdir -encryptaes $passwd1] - error_check_good env [is_valid_env $env] TRUE - - puts "\tSec001.h.4: 2nd Reopen with encryption." - set env1 [berkdb_env -home $testdir -encryptaes $passwd1] - error_check_good env [is_valid_env $env1] TRUE - - error_check_good envclose [$env1 close] 0 - error_check_good envclose [$env close] 0 - - puts "\tSec001 complete." -} diff --git a/storage/bdb/test/sec002.tcl b/storage/bdb/test/sec002.tcl deleted file mode 100644 index 5bdd4af3bcc..00000000000 --- a/storage/bdb/test/sec002.tcl +++ /dev/null @@ -1,181 +0,0 @@ -# See the file LICENSE for redistribution information. -# -# Copyright (c) 1999-2004 -# Sleepycat Software. All rights reserved. -# -# $Id: sec002.tcl,v 11.13 2004/11/02 16:12:04 carol Exp $ -# -# TEST sec002 -# TEST Test of security interface and catching errors in the -# TEST face of attackers overwriting parts of existing files. -proc sec002 { } { - global errorInfo - global errorCode - global has_crypto - - source ./include.tcl - - # Skip test if release does not support encryption. - if { $has_crypto == 0 } { - puts "Skipping test sec002 for non-crypto release." - return - } - - set testfile1 $testdir/sec002-1.db - set testfile2 $testdir/sec002-2.db - set testfile3 $testdir/sec002-3.db - set testfile4 $testdir/sec002-4.db - - puts "Sec002: Test of basic encryption interface." - env_cleanup $testdir - - set passwd1 "passwd1" - set passwd2 "passwd2" - set key "key" - set data "data" - set pagesize 1024 - - # - # Set up 4 databases, two encrypted, but with different passwords - # and one unencrypt, but with checksumming turned on and one - # unencrypted and no checksumming. Place the exact same data - # in each one. - # - puts "\tSec002.a: Setup databases" - set db_cmd "-create -pagesize $pagesize -btree " - set db [eval {berkdb_open} -encryptaes $passwd1 $db_cmd $testfile1] - error_check_good db [is_valid_db $db] TRUE - error_check_good dbput [$db put $key $data] 0 - error_check_good dbclose [$db close] 0 - - set db [eval {berkdb_open} -encryptaes $passwd2 $db_cmd $testfile2] - error_check_good db [is_valid_db $db] TRUE - error_check_good dbput [$db put $key $data] 0 - error_check_good dbclose [$db close] 0 - - set db [eval {berkdb_open} -chksum $db_cmd $testfile3] - error_check_good db [is_valid_db $db] TRUE - error_check_good dbput [$db put $key $data] 0 - error_check_good dbclose [$db close] 0 - - set db [eval {berkdb_open} $db_cmd $testfile4] - error_check_good db [is_valid_db $db] TRUE - error_check_good dbput [$db put $key $data] 0 - error_check_good dbclose [$db close] 0 - - # - # If we reopen the normal file with the -chksum flag, there - # should be no error and checksumming should be ignored. - # If we reopen a checksummed file without the -chksum flag, - # checksumming should still be in effect. [#6959] - # - puts "\tSec002.b: Inheritance of chksum properties" - puts "\t\tSec002.b1: Reopen ordinary file with -chksum flag" - set db [eval {berkdb_open} -chksum $testfile4] - error_check_good open_with_chksum [is_valid_db $db] TRUE - set retdata [$db get $key] - error_check_good testfile4_get [lindex [lindex $retdata 0] 1] $data - error_check_good dbclose [$db close] 0 - - puts "\t\tSec002.b2: Reopen checksummed file without -chksum flag" - set db [eval {berkdb_open} $testfile3] - error_check_good open_wo_chksum [is_valid_db $db] TRUE - set retdata [$db get $key] - error_check_good testfile3_get [lindex [lindex $retdata 0] 1] $data - error_check_good dbclose [$db close] 0 - - # - # First just touch some bits in the file. We know that in btree - # meta pages, bytes 92-459 are unused. Scribble on them in both - # an encrypted, and both unencrypted files. We should get - # a checksum error for the encrypted, and checksummed files. - # We should get no error for the normal file. - # - set fidlist {} - set fid [open $testfile1 r+] - lappend fidlist $fid - set fid [open $testfile3 r+] - lappend fidlist $fid - set fid [open $testfile4 r+] - lappend fidlist $fid - - puts "\tSec002.c: Overwrite unused space in meta-page" - foreach f $fidlist { - fconfigure $f -translation binary - seek $f 100 start - set byte [read $f 1] - binary scan $byte c val - set newval [expr ~$val] - set newbyte [binary format c $newval] - seek $f 100 start - puts -nonewline $f $newbyte - close $f - } - puts "\tSec002.d: Reopen modified databases" - set stat [catch {berkdb_open_noerr -encryptaes $passwd1 $testfile1} ret] - error_check_good db:$testfile1 $stat 1 - error_check_good db:$testfile1:fail \ - [is_substr $ret "metadata page checksum error"] 1 - - set stat [catch {berkdb_open_noerr -chksum $testfile3} ret] - error_check_good db:$testfile3 $stat 1 - error_check_good db:$testfile3:fail \ - [is_substr $ret "metadata page checksum error"] 1 - - set stat [catch {berkdb_open_noerr $testfile4} db] - error_check_good db:$testfile4 $stat 0 - error_check_good dbclose [$db close] 0 - - # Skip the remainder of the test for Windows platforms. - # Forcing the error which causes DB_RUNRECOVERY to be - # returned ends up leaving open files that cannot be removed. - if { $is_windows_test == 1 } { - cleanup $testdir NULL 1 - puts "Skipping remainder of test for Windows" - return - } - - puts "\tSec002.e: Replace root page in encrypted w/ encrypted" - set fid1 [open $testfile1 r+] - fconfigure $fid1 -translation binary - set fid2 [open $testfile2 r+] - fconfigure $fid2 -translation binary - seek $fid1 $pagesize start - seek $fid2 $pagesize start - fcopy $fid1 $fid2 -size $pagesize - close $fid1 - close $fid2 - - set db [berkdb_open_noerr -encryptaes $passwd2 $testfile2] - error_check_good db [is_valid_db $db] TRUE - set stat [catch {$db get $key} ret] - error_check_good dbget $stat 1 - error_check_good db:$testfile2:fail1 \ - [is_substr $ret "checksum error"] 1 - set stat [catch {$db close} ret] - error_check_good dbclose $stat 1 - error_check_good db:$testfile2:fail2 [is_substr $ret "DB_RUNRECOVERY"] 1 - - puts "\tSec002.f: Replace root page in encrypted w/ unencrypted" - set fid2 [open $testfile2 r+] - fconfigure $fid2 -translation binary - set fid4 [open $testfile4 r+] - fconfigure $fid4 -translation binary - seek $fid2 $pagesize start - seek $fid4 $pagesize start - fcopy $fid4 $fid2 -size $pagesize - close $fid4 - close $fid2 - - set db [berkdb_open_noerr -encryptaes $passwd2 $testfile2] - error_check_good db [is_valid_db $db] TRUE - set stat [catch {$db get $key} ret] - error_check_good dbget $stat 1 - error_check_good db:$testfile2:fail \ - [is_substr $ret "checksum error"] 1 - set stat [catch {$db close} ret] - error_check_good dbclose $stat 1 - error_check_good db:$testfile2:fail [is_substr $ret "DB_RUNRECOVERY"] 1 - - cleanup $testdir NULL 1 -} diff --git a/storage/bdb/test/shelltest.tcl b/storage/bdb/test/shelltest.tcl deleted file mode 100644 index cb588f13378..00000000000 --- a/storage/bdb/test/shelltest.tcl +++ /dev/null @@ -1,101 +0,0 @@ -# See the file LICENSE for redistribution information. -# -# Copyright (c) 2001-2004 -# Sleepycat Software. All rights reserved. -# -# $Id: shelltest.tcl,v 1.31 2004/03/15 21:09:49 bostic Exp $ -# -# TEST scr### -# TEST The scr### directories are shell scripts that test a variety of -# TEST things, including things about the distribution itself. These -# TEST tests won't run on most systems, so don't even try to run them. -# -# shelltest.tcl: -# Code to run shell script tests, to incorporate Java, C++, -# example compilation, etc. test scripts into the Tcl framework. -proc shelltest {{ run_one 0 } { xml 0 }} { - source ./include.tcl - global shelltest_list - global xmlshelltest_list - - set SH /bin/sh - if { [file executable $SH] != 1 } { - puts "Shell tests require valid shell /bin/sh: not found." - puts "Skipping shell tests." - return 0 - } - - if { $xml == 1 } { - set shelltest_list $xmlshelltest_list - } - - if { $run_one == 0 } { - puts "Running shell script tests..." - - foreach testpair $shelltest_list { - set dir [lindex $testpair 0] - set test [lindex $testpair 1] - - env_cleanup $testdir - shelltest_copy $test_path/$dir $testdir - shelltest_run $SH $dir $test $testdir - } - } else { - set run_one [expr $run_one - 1]; - set dir [lindex [lindex $shelltest_list $run_one] 0] - set test [lindex [lindex $shelltest_list $run_one] 1] - - env_cleanup $testdir - shelltest_copy $test_path/$dir $testdir - shelltest_run $SH $dir $test $testdir - } -} - -proc shelltest_copy { fromdir todir } { - set globall [glob $fromdir/*] - - foreach f $globall { - file copy $f $todir/ - } -} - -proc shelltest_run { sh srcdir test testdir } { - puts "Running shell script $srcdir ($test)..." - - set ret [catch {exec $sh -c "cd $testdir && sh $test" >&@ stdout} res] - - if { $ret != 0 } { - puts "FAIL: shell test $srcdir/$test exited abnormally" - } -} - -proc scr001 {} { shelltest 1 } -proc scr002 {} { shelltest 2 } -proc scr003 {} { shelltest 3 } -proc scr004 {} { shelltest 4 } -proc scr005 {} { shelltest 5 } -proc scr006 {} { shelltest 6 } -proc scr007 {} { shelltest 7 } -proc scr008 {} { shelltest 8 } -proc scr009 {} { shelltest 9 } -proc scr010 {} { shelltest 10 } -proc scr011 {} { shelltest 11 } -proc scr012 {} { shelltest 12 } -proc scr013 {} { shelltest 13 } -proc scr014 {} { shelltest 14 } -proc scr015 {} { shelltest 15 } -proc scr016 {} { shelltest 16 } -proc scr017 {} { shelltest 17 } -proc scr018 {} { shelltest 18 } -proc scr019 {} { shelltest 19 } -proc scr020 {} { shelltest 20 } -proc scr021 {} { shelltest 21 } -proc scr022 {} { shelltest 22 } -proc scr023 {} { shelltest 23 } -proc scr024 {} { shelltest 24 } -proc scr025 {} { shelltest 25 } -proc scr026 {} { shelltest 26 } -proc scr027 {} { shelltest 27 } -proc scr028 {} { shelltest 28 } -proc scr029 {} { shelltest 29 } -proc scr030 {} { shelltest 30 } diff --git a/storage/bdb/test/si001.tcl b/storage/bdb/test/si001.tcl deleted file mode 100644 index e5cffc0e11a..00000000000 --- a/storage/bdb/test/si001.tcl +++ /dev/null @@ -1,178 +0,0 @@ -# See the file LICENSE for redistribution information. -# -# Copyright (c) 2001-2004 -# Sleepycat Software. All rights reserved. -# -# $Id: si001.tcl,v 1.18 2004/10/27 20:40:25 carol Exp $ -# -# TEST si001 -# TEST Basic secondary index put/delete test -# TEST -# TEST Put data in primary db and check that pget on secondary -# TEST index finds the right entries. Alter the primary in the -# TEST following ways, checking for correct data each time: -# TEST Overwrite data in primary database. -# TEST Delete half of entries through primary. -# TEST Delete half of remaining entries through secondary. -# TEST Append data (for record-based primaries only). -proc si001 { methods {nentries 200} {tnum "001"} args } { - source ./include.tcl - global dict nsecondaries - - # Primary method/args. - set pmethod [lindex $methods 0] - set pargs [convert_args $pmethod $args] - set pomethod [convert_method $pmethod] - - # Method/args for all the secondaries. If only one method - # was specified, assume the same method and a standard N - # secondaries. - set methods [lrange $methods 1 end] - if { [llength $methods] == 0 } { - for { set i 0 } { $i < $nsecondaries } { incr i } { - lappend methods $pmethod - } - } - - set argses [convert_argses $methods $args] - set omethods [convert_methods $methods] - - puts "si$tnum \{\[ list $pmethod $methods \]\} $nentries" - env_cleanup $testdir - - set pname "primary$tnum.db" - set snamebase "secondary$tnum" - - # Open an environment - # XXX if one is not supplied! - set env [berkdb_env -create -home $testdir] - error_check_good env_open [is_valid_env $env] TRUE - - # Open the primary. - set pdb [eval {berkdb_open -create -env} $env $pomethod $pargs $pname] - error_check_good primary_open [is_valid_db $pdb] TRUE - - # Open and associate the secondaries - set sdbs {} - for { set i 0 } { $i < [llength $omethods] } { incr i } { - set sdb [eval {berkdb_open -create -env} $env \ - [lindex $omethods $i] [lindex $argses $i] $snamebase.$i.db] - error_check_good second_open($i) [is_valid_db $sdb] TRUE - - error_check_good db_associate($i) \ - [$pdb associate [callback_n $i] $sdb] 0 - lappend sdbs $sdb - } - - puts "\tSi$tnum.a: Put loop" - # Open dictionary and leave open until done with test .e so append - # won't require configuration for duplicates. - set did [open $dict] - for { set n 0 } { [gets $did str] != -1 && $n < $nentries } { incr n } { - if { [is_record_based $pmethod] == 1 } { - set key [expr $n + 1] - set datum $str - } else { - set key $str - gets $did datum - } - set keys($n) $key - set data($n) [pad_data $pmethod $datum] - - set ret [eval {$pdb put} {$key [chop_data $pmethod $datum]}] - error_check_good put($n) $ret 0 - } - - check_secondaries $pdb $sdbs $nentries keys data "Si$tnum.a" - - puts "\tSi$tnum.b: Put/overwrite loop" - for { set n 0 } { $n < $nentries } { incr n } { - set newd $data($n).$keys($n) - set ret [eval {$pdb put} {$keys($n) [chop_data $pmethod $newd]}] - error_check_good put_overwrite($n) $ret 0 - set data($n) [pad_data $pmethod $newd] - } - check_secondaries $pdb $sdbs $nentries keys data "Si$tnum.b" - - # Delete the second half of the entries through the primary. - # We do the second half so we can just pass keys(0 ... n/2) - # to check_secondaries. - set half [expr $nentries / 2] - puts "\tSi$tnum.c: Primary delete loop: deleting $half entries" - for { set n $half } { $n < $nentries } { incr n } { - set ret [$pdb del $keys($n)] - error_check_good pdel($n) $ret 0 - } - check_secondaries $pdb $sdbs $half keys data "Si$tnum.c" - - # Delete half of what's left, through the first secondary. - set quar [expr $half / 2] - puts "\tSi$tnum.d: Secondary delete loop: deleting $quar entries" - set sdb [lindex $sdbs 0] - set callback [callback_n 0] - for { set n $quar } { $n < $half } { incr n } { - set skey [$callback $keys($n) [pad_data $pmethod $data($n)]] - set ret [$sdb del $skey] - error_check_good sdel($n) $ret 0 - } - check_secondaries $pdb $sdbs $quar keys data "Si$tnum.d" - set left $quar - - # For queue and recno only, test append, adding back a quarter of - # the original number of entries. - if { [is_record_based $pmethod] == 1 } { - puts "\tSi$tnum.e: Append loop: append $quar entries" - for { set n $quar } { [gets $did str] != -1 && $n < $half } \ - { incr n } { - set key [expr $n + 1] - set datum $str - set keys($n) $key - set data($n) [pad_data $pmethod $datum] - - set ret [eval {$pdb put} \ - {$key [chop_data $pmethod $datum]}] - error_check_good put($n) $ret 0 - } - check_secondaries $pdb $sdbs $half keys data "Si$tnum.e" - set left $half - } - - close $did - - puts "\tSi$tnum.f: Truncate primary, check secondaries are empty." - error_check_good truncate [$pdb truncate] $left - foreach sdb $sdbs { - set scursor [$sdb cursor] - error_check_good db_cursor [is_substr $scursor $sdb] 1 - set ret [$scursor get -first] - error_check_good sec_empty [string length $ret] 0 - error_check_good cursor_close [$scursor close] 0 - } - - - puts "\tSi$tnum.g: Closing/disassociating primary first" - error_check_good primary_close [$pdb close] 0 - foreach sdb $sdbs { - error_check_good secondary_close [$sdb close] 0 - } - error_check_good env_close [$env close] 0 - - # Reopen with _noerr for test of truncate secondary. - puts "\tSi$tnum.h: Truncate secondary (should fail)" - - set env [berkdb_env_noerr -create -home $testdir] - error_check_good env_open [is_valid_env $env] TRUE - - set pdb [eval {berkdb_open_noerr -create -env} $env \ - $pomethod $pargs $pname] - set sdb [eval {berkdb_open_noerr -create -env} $env \ - [lindex $omethods 0] [lindex $argses 0] $snamebase.0.db ] - $pdb associate [callback_n 0] $sdb - - set ret [catch {$sdb truncate} ret] - error_check_good trunc_secondary $ret 1 - - error_check_good primary_close [$pdb close] 0 - error_check_good secondary_close [$sdb close] 0 - error_check_good env_close [$env close] 0 -} diff --git a/storage/bdb/test/si002.tcl b/storage/bdb/test/si002.tcl deleted file mode 100644 index 9e38c8a98e6..00000000000 --- a/storage/bdb/test/si002.tcl +++ /dev/null @@ -1,175 +0,0 @@ -# See the file LICENSE for redistribution information. -# -# Copyright (c) 2001-2004 -# Sleepycat Software. All rights reserved. -# -# $Id: si002.tcl,v 1.14 2004/10/27 20:40:25 carol Exp $ -# -# TEST si002 -# TEST Basic cursor-based secondary index put/delete test -# TEST -# TEST Cursor put data in primary db and check that pget -# TEST on secondary index finds the right entries. -# TEST Overwrite while walking primary, check pget again. -# TEST Overwrite while walking secondary (use c_pget), check -# TEST pget again. -# TEST Cursor delete half of entries through primary, check. -# TEST Cursor delete half of remainder through secondary, check. -proc si002 { methods {nentries 200} {tnum "002"} args } { - source ./include.tcl - global dict nsecondaries - - # Primary method/args. - set pmethod [lindex $methods 0] - set pargs [convert_args $pmethod $args] - set pomethod [convert_method $pmethod] - - # Method/args for all the secondaries. If only one method - # was specified, assume the same method and a standard N - # secondaries. - set methods [lrange $methods 1 end] - if { [llength $methods] == 0 } { - for { set i 0 } { $i < $nsecondaries } { incr i } { - lappend methods $pmethod - } - } - - set argses [convert_argses $methods $args] - set omethods [convert_methods $methods] - - puts "si$tnum \{\[ list $pmethod $methods \]\} $nentries" - env_cleanup $testdir - - set pname "primary$tnum.db" - set snamebase "secondary$tnum" - - # Open an environment - # XXX if one is not supplied! - set env [berkdb_env -create -home $testdir] - error_check_good env_open [is_valid_env $env] TRUE - - # Open the primary. - set pdb [eval {berkdb_open -create -env} $env $pomethod $pargs $pname] - error_check_good primary_open [is_valid_db $pdb] TRUE - - # Open and associate the secondaries - set sdbs {} - for { set i 0 } { $i < [llength $omethods] } { incr i } { - set sdb [eval {berkdb_open -create -env} $env \ - [lindex $omethods $i] [lindex $argses $i] $snamebase.$i.db] - error_check_good second_open($i) [is_valid_db $sdb] TRUE - - error_check_good db_associate($i) \ - [$pdb associate [callback_n $i] $sdb] 0 - lappend sdbs $sdb - } - - puts "\tSi$tnum.a: Cursor put (-keyfirst/-keylast) loop" - set did [open $dict] - set pdbc [$pdb cursor] - error_check_good pdb_cursor [is_valid_cursor $pdbc $pdb] TRUE - for { set n 0 } { [gets $did str] != -1 && $n < $nentries } { incr n } { - if { [is_record_based $pmethod] == 1 } { - set key [expr $n + 1] - set datum $str - } else { - set key $str - gets $did datum - } - set ns($key) $n - set keys($n) $key - set data($n) [pad_data $pmethod $datum] - - if { $n % 2 == 0 } { - set pflag " -keyfirst " - } else { - set pflag " -keylast " - } - - set ret [eval {$pdbc put} $pflag \ - {$key [chop_data $pmethod $datum]}] - error_check_good put($n) $ret 0 - } - close $did - error_check_good pdbc_close [$pdbc close] 0 - check_secondaries $pdb $sdbs $nentries keys data "Si$tnum.a" - - puts "\tSi$tnum.b: Cursor put overwrite (-current) loop" - set pdbc [$pdb cursor] - error_check_good pdb_cursor [is_valid_cursor $pdbc $pdb] TRUE - for { set dbt [$pdbc get -first] } { [llength $dbt] > 0 } \ - { set dbt [$pdbc get -next] } { - set key [lindex [lindex $dbt 0] 0] - set datum [lindex [lindex $dbt 0] 1] - set newd $datum.$key - set ret [eval {$pdbc put -current} [chop_data $pmethod $newd]] - error_check_good put_overwrite($key) $ret 0 - set data($ns($key)) [pad_data $pmethod $newd] - } - error_check_good pdbc_close [$pdbc close] 0 - check_secondaries $pdb $sdbs $nentries keys data "Si$tnum.b" - - puts "\tSi$tnum.c: Secondary c_pget/primary put overwrite loop" - # We walk the first secondary, then put-overwrite each primary key/data - # pair we find. This doubles as a DBC->c_pget test. - set sdb [lindex $sdbs 0] - set sdbc [$sdb cursor] - error_check_good sdb_cursor [is_valid_cursor $sdbc $sdb] TRUE - for { set dbt [$sdbc pget -first] } { [llength $dbt] > 0 } \ - { set dbt [$sdbc pget -next] } { - set pkey [lindex [lindex $dbt 0] 1] - set pdatum [lindex [lindex $dbt 0] 2] - - # Extended entries will be showing up underneath us, in - # unpredictable places. Keep track of which pkeys - # we've extended, and don't extend them repeatedly. - if { [info exists pkeys_done($pkey)] == 1 } { - continue - } else { - set pkeys_done($pkey) 1 - } - - set newd $pdatum.[string range $pdatum 0 2] - set ret [eval {$pdb put} $pkey [chop_data $pmethod $newd]] - error_check_good pdb_put($pkey) $ret 0 - set data($ns($pkey)) [pad_data $pmethod $newd] - } - error_check_good sdbc_close [$sdbc close] 0 - check_secondaries $pdb $sdbs $nentries keys data "Si$tnum.c" - - # Delete the second half of the entries through the primary. - # We do the second half so we can just pass keys(0 ... n/2) - # to check_secondaries. - set half [expr $nentries / 2] - puts "\tSi$tnum.d:\ - Primary cursor delete loop: deleting $half entries" - set pdbc [$pdb cursor] - error_check_good pdb_cursor [is_valid_cursor $pdbc $pdb] TRUE - set dbt [$pdbc get -first] - for { set i 0 } { [llength $dbt] > 0 && $i < $half } { incr i } { - error_check_good pdbc_del [$pdbc del] 0 - set dbt [$pdbc get -next] - } - error_check_good pdbc_close [$pdbc close] 0 - cursor_check_secondaries $pdb $sdbs $half "Si$tnum.d" - - # Delete half of what's left, through the first secondary. - set quar [expr $half / 2] - puts "\tSi$tnum.e:\ - Secondary cursor delete loop: deleting $quar entries" - set sdb [lindex $sdbs 0] - set sdbc [$sdb cursor] - set dbt [$sdbc get -first] - for { set i 0 } { [llength $dbt] > 0 && $i < $quar } { incr i } { - error_check_good sdbc_del [$sdbc del] 0 - set dbt [$sdbc get -next] - } - error_check_good sdbc_close [$sdbc close] 0 - cursor_check_secondaries $pdb $sdbs $quar "Si$tnum.e" - - foreach sdb $sdbs { - error_check_good secondary_close [$sdb close] 0 - } - error_check_good primary_close [$pdb close] 0 - error_check_good env_close [$env close] 0 -} diff --git a/storage/bdb/test/si003.tcl b/storage/bdb/test/si003.tcl deleted file mode 100644 index 60ddba6a35f..00000000000 --- a/storage/bdb/test/si003.tcl +++ /dev/null @@ -1,148 +0,0 @@ -# See the file LICENSE for redistribution information. -# -# Copyright (c) 2001-2004 -# Sleepycat Software. All rights reserved. -# -# $Id: si003.tcl,v 1.12 2004/10/27 20:40:25 carol Exp $ -# -# TEST si003 -# TEST si001 with secondaries created and closed mid-test -# TEST Basic secondary index put/delete test with secondaries -# TEST created mid-test. -proc si003 { methods {nentries 200} {tnum "003"} args } { - source ./include.tcl - global dict nsecondaries - - # There's no reason to run this test on large lists. - if { $nentries > 1000 } { - puts "Skipping si003 for large lists (over 1000 items)" - return - } - - # Primary method/args. - set pmethod [lindex $methods 0] - set pargs [convert_args $pmethod $args] - set pomethod [convert_method $pmethod] - - # Method/args for all the secondaries. If only one method - # was specified, assume the same method and a standard N - # secondaries. - set methods [lrange $methods 1 end] - if { [llength $methods] == 0 } { - for { set i 0 } { $i < $nsecondaries } { incr i } { - lappend methods $pmethod - } - } - - set argses [convert_argses $methods $args] - set omethods [convert_methods $methods] - - puts "si$tnum \{\[ list $pmethod $methods \]\} $nentries" - env_cleanup $testdir - - set pname "primary$tnum.db" - set snamebase "secondary$tnum" - - # Open an environment - # XXX if one is not supplied! - set env [eval {berkdb_env -create -home $testdir}] - error_check_good env_open [is_valid_env $env] TRUE - - # Open the primary. - set pdb [eval {berkdb_open -create -env} $env $pomethod $pargs $pname] - error_check_good primary_open [is_valid_db $pdb] TRUE - - puts -nonewline "\tSi$tnum.a: Put loop ... " - set did [open $dict] - for { set n 0 } { [gets $did str] != -1 && $n < $nentries } { incr n } { - if { [is_record_based $pmethod] == 1 } { - set key [expr $n + 1] - set datum $str - } else { - set key $str - gets $did datum - } - set keys($n) $key - set data($n) [pad_data $pmethod $datum] - - set ret [eval {$pdb put} {$key [chop_data $pmethod $datum]}] - error_check_good put($n) $ret 0 - } - close $did - - # Open and associate the secondaries - set sdbs {} - puts "opening secondaries." - for { set i 0 } { $i < [llength $omethods] } { incr i } { - set sdb [eval {berkdb_open -create -env} $env \ - [lindex $omethods $i] [lindex $argses $i] $snamebase.$i.db] - error_check_good second_open($i) [is_valid_db $sdb] TRUE - - error_check_good db_associate($i) \ - [$pdb associate -create [callback_n $i] $sdb] 0 - lappend sdbs $sdb - } - check_secondaries $pdb $sdbs $nentries keys data "Si$tnum.a" - - puts -nonewline "\tSi$tnum.b: Put/overwrite loop ... " - for { set n 0 } { $n < $nentries } { incr n } { - set newd $data($n).$keys($n) - set ret [eval {$pdb put} {$keys($n) [chop_data $pmethod $newd]}] - error_check_good put_overwrite($n) $ret 0 - set data($n) [pad_data $pmethod $newd] - } - - # Close the secondaries again. - puts "closing secondaries." - for { set sdb [lindex $sdbs end] } { [string length $sdb] > 0 } \ - { set sdb [lindex $sdbs end] } { - error_check_good second_close($sdb) [$sdb close] 0 - set sdbs [lrange $sdbs 0 end-1] - check_secondaries \ - $pdb $sdbs $nentries keys data "Si$tnum.b" - } - - # Delete the second half of the entries through the primary. - # We do the second half so we can just pass keys(0 ... n/2) - # to check_secondaries. - set half [expr $nentries / 2] - puts -nonewline \ - "\tSi$tnum.c: Primary delete loop: deleting $half entries ..." - for { set n $half } { $n < $nentries } { incr n } { - set ret [$pdb del $keys($n)] - error_check_good pdel($n) $ret 0 - } - - # Open and associate the secondaries - set sdbs {} - puts "\n\t\topening secondaries." - for { set i 0 } { $i < [llength $omethods] } { incr i } { - set sdb [eval {berkdb_open -create -env} $env \ - [lindex $omethods $i] [lindex $argses $i] \ - $snamebase.r2.$i.db] - error_check_good second_open($i) [is_valid_db $sdb] TRUE - - error_check_good db_associate($i) \ - [$pdb associate -create [callback_n $i] $sdb] 0 - lappend sdbs $sdb - } - check_secondaries $pdb $sdbs $half keys data "Si$tnum.c" - - # Delete half of what's left, through the first secondary. - set quar [expr $half / 2] - puts "\tSi$tnum.d: Secondary delete loop: deleting $quar entries" - set sdb [lindex $sdbs 0] - set callback [callback_n 0] - for { set n $quar } { $n < $half } { incr n } { - set skey [$callback $keys($n) [pad_data $pmethod $data($n)]] - set ret [$sdb del $skey] - error_check_good sdel($n) $ret 0 - } - check_secondaries $pdb $sdbs $quar keys data "Si$tnum.d" - - foreach sdb $sdbs { - error_check_good secondary_close [$sdb close] 0 - } - error_check_good primary_close [$pdb close] 0 - error_check_good env_close [$env close] 0 -} diff --git a/storage/bdb/test/si004.tcl b/storage/bdb/test/si004.tcl deleted file mode 100644 index 08af8b261ce..00000000000 --- a/storage/bdb/test/si004.tcl +++ /dev/null @@ -1,200 +0,0 @@ -# See the file LICENSE for redistribution information. -# -# Copyright (c) 2001-2004 -# Sleepycat Software. All rights reserved. -# -# $Id: si004.tcl,v 1.12 2004/10/27 20:40:25 carol Exp $ -# -# TEST si004 -# TEST si002 with secondaries created and closed mid-test -# TEST Basic cursor-based secondary index put/delete test, with -# TEST secondaries created mid-test. -proc si004 { methods {nentries 200} {tnum "004"} args } { - source ./include.tcl - global dict nsecondaries - - # There's no reason to run this test on large lists. - if { $nentries > 1000 } { - puts "Skipping si004 for large lists (over 1000 items)." - return - } - - # Primary method/args. - set pmethod [lindex $methods 0] - set pargs [convert_args $pmethod $args] - set pomethod [convert_method $pmethod] - - # Method/args for all the secondaries. If only one method - # was specified, assume the same method and a standard N - # secondaries. - set methods [lrange $methods 1 end] - if { [llength $methods] == 0 } { - for { set i 0 } { $i < $nsecondaries } { incr i } { - lappend methods $pmethod - } - } - - set argses [convert_argses $methods $args] - set omethods [convert_methods $methods] - - puts "si$tnum \{\[ list $pmethod $methods \]\} $nentries" - env_cleanup $testdir - - set pname "primary$tnum.db" - set snamebase "secondary$tnum" - - # Open an environment - # XXX if one is not supplied! - set env [berkdb_env -create -home $testdir] - error_check_good env_open [is_valid_env $env] TRUE - - # Open the primary. - set pdb [eval {berkdb_open -create -env} $env $pomethod $pargs $pname] - error_check_good primary_open [is_valid_db $pdb] TRUE - - puts -nonewline \ - "\tSi$tnum.a: Cursor put (-keyfirst/-keylast) loop ... " - set did [open $dict] - set pdbc [$pdb cursor] - error_check_good pdb_cursor [is_valid_cursor $pdbc $pdb] TRUE - for { set n 0 } { [gets $did str] != -1 && $n < $nentries } { incr n } { - if { [is_record_based $pmethod] == 1 } { - set key [expr $n + 1] - set datum $str - } else { - set key $str - gets $did datum - } - set ns($key) $n - set keys($n) $key - set data($n) [pad_data $pmethod $datum] - - if { $n % 2 == 0 } { - set pflag " -keyfirst " - } else { - set pflag " -keylast " - } - - set ret [eval {$pdbc put} $pflag \ - {$key [chop_data $pmethod $datum]}] - error_check_good put($n) $ret 0 - } - close $did - error_check_good pdbc_close [$pdbc close] 0 - - # Open and associate the secondaries - set sdbs {} - puts "\n\t\topening secondaries." - for { set i 0 } { $i < [llength $omethods] } { incr i } { - set sdb [eval {berkdb_open -create -env} $env \ - [lindex $omethods $i] [lindex $argses $i] $snamebase.$i.db] - error_check_good second_open($i) [is_valid_db $sdb] TRUE - - error_check_good db_associate($i) \ - [$pdb associate -create [callback_n $i] $sdb] 0 - lappend sdbs $sdb - } - check_secondaries $pdb $sdbs $nentries keys data "Si$tnum.a" - - puts "\tSi$tnum.b: Cursor put overwrite (-current) loop" - set pdbc [$pdb cursor] - error_check_good pdb_cursor [is_valid_cursor $pdbc $pdb] TRUE - for { set dbt [$pdbc get -first] } { [llength $dbt] > 0 } \ - { set dbt [$pdbc get -next] } { - set key [lindex [lindex $dbt 0] 0] - set datum [lindex [lindex $dbt 0] 1] - set newd $datum.$key - set ret [eval {$pdbc put -current} [chop_data $pmethod $newd]] - error_check_good put_overwrite($key) $ret 0 - set data($ns($key)) [pad_data $pmethod $newd] - } - error_check_good pdbc_close [$pdbc close] 0 - check_secondaries $pdb $sdbs $nentries keys data "Si$tnum.b" - - puts -nonewline "\tSi$tnum.c:\ - Secondary c_pget/primary put overwrite loop ... " - # We walk the first secondary, then put-overwrite each primary key/data - # pair we find. This doubles as a DBC->c_pget test. - set sdb [lindex $sdbs 0] - set sdbc [$sdb cursor] - error_check_good sdb_cursor [is_valid_cursor $sdbc $sdb] TRUE - for { set dbt [$sdbc pget -first] } { [llength $dbt] > 0 } \ - { set dbt [$sdbc pget -next] } { - set pkey [lindex [lindex $dbt 0] 1] - set pdatum [lindex [lindex $dbt 0] 2] - - # Extended entries will be showing up underneath us, in - # unpredictable places. Keep track of which pkeys - # we've extended, and don't extend them repeatedly. - if { [info exists pkeys_done($pkey)] == 1 } { - continue - } else { - set pkeys_done($pkey) 1 - } - - set newd $pdatum.[string range $pdatum 0 2] - set ret [eval {$pdb put} $pkey [chop_data $pmethod $newd]] - error_check_good pdb_put($pkey) $ret 0 - set data($ns($pkey)) [pad_data $pmethod $newd] - } - error_check_good sdbc_close [$sdbc close] 0 - - # Close the secondaries again. - puts "\n\t\tclosing secondaries." - for { set sdb [lindex $sdbs end] } { [string length $sdb] > 0 } \ - { set sdb [lindex $sdbs end] } { - error_check_good second_close($sdb) [$sdb close] 0 - set sdbs [lrange $sdbs 0 end-1] - check_secondaries \ - $pdb $sdbs $nentries keys data "Si$tnum.c" - } - - # Delete the second half of the entries through the primary. - # We do the second half so we can just pass keys(0 ... n/2) - # to check_secondaries. - set half [expr $nentries / 2] - puts -nonewline "\tSi$tnum.d:\ - Primary cursor delete loop: deleting $half entries ... " - set pdbc [$pdb cursor] - error_check_good pdb_cursor [is_valid_cursor $pdbc $pdb] TRUE - set dbt [$pdbc get -first] - for { set i 0 } { [llength $dbt] > 0 && $i < $half } { incr i } { - error_check_good pdbc_del [$pdbc del] 0 - set dbt [$pdbc get -next] - } - error_check_good pdbc_close [$pdbc close] 0 - - set sdbs {} - puts "\n\t\topening secondaries." - for { set i 0 } { $i < [llength $omethods] } { incr i } { - set sdb [eval {berkdb_open -create -env} $env \ - [lindex $omethods $i] [lindex $argses $i] \ - $snamebase.r2.$i.db] - error_check_good second_open($i) [is_valid_db $sdb] TRUE - - error_check_good db_associate($i) \ - [$pdb associate -create [callback_n $i] $sdb] 0 - lappend sdbs $sdb - } - cursor_check_secondaries $pdb $sdbs $half "Si$tnum.d" - - # Delete half of what's left, through the first secondary. - set quar [expr $half / 2] - puts "\tSi$tnum.e:\ - Secondary cursor delete loop: deleting $quar entries" - set sdb [lindex $sdbs 0] - set sdbc [$sdb cursor] - set dbt [$sdbc get -first] - for { set i 0 } { [llength $dbt] > 0 && $i < $quar } { incr i } { - error_check_good sdbc_del [$sdbc del] 0 - set dbt [$sdbc get -next] - } - error_check_good sdbc_close [$sdbc close] 0 - cursor_check_secondaries $pdb $sdbs $quar "Si$tnum.e" - - foreach sdb $sdbs { - error_check_good secondary_close [$sdb close] 0 - } - error_check_good primary_close [$pdb close] 0 - error_check_good env_close [$env close] 0 -} diff --git a/storage/bdb/test/si005.tcl b/storage/bdb/test/si005.tcl deleted file mode 100644 index 292cf1f6092..00000000000 --- a/storage/bdb/test/si005.tcl +++ /dev/null @@ -1,135 +0,0 @@ -# See the file LICENSE for redistribution information. -# -# Copyright (c) 2001-2004 -# Sleepycat Software. All rights reserved. -# -# $Id: si005.tcl,v 11.11 2004/10/27 20:40:25 carol Exp $ -# -# TEST si005 -# TEST Basic secondary index put/delete test with transactions -proc si005 { methods {nentries 200} {tnum "005"} args } { - source ./include.tcl - global dict nsecondaries - - # Primary method/args. - set pmethod [lindex $methods 0] - set pargs [convert_args $pmethod $args] - set pomethod [convert_method $pmethod] - - # Method/args for all the secondaries. If only one method - # was specified, assume the same method and a standard N - # secondaries. - set methods [lrange $methods 1 end] - if { [llength $methods] == 0 } { - for { set i 0 } { $i < $nsecondaries } { incr i } { - lappend methods $pmethod - } - } - - # Since this is a transaction test, don't allow nentries to be large. - if { $nentries > 1000 } { - puts "Skipping si005 for large lists (over 1000 items)." - return - } - - set argses [convert_argses $methods $args] - set omethods [convert_methods $methods] - - puts "si$tnum \{\[ list $pmethod $methods \]\} $nentries" - puts "\twith transactions" - env_cleanup $testdir - - set pname "primary$tnum.db" - set snamebase "secondary$tnum" - - # Open an environment - # XXX if one is not supplied! - set env [berkdb_env -create -home $testdir -txn] - error_check_good env_open [is_valid_env $env] TRUE - - # Open the primary. - set pdb [eval {berkdb_open -create -auto_commit -env} $env $pomethod \ - $pargs $pname] - error_check_good primary_open [is_valid_db $pdb] TRUE - - # Open and associate the secondaries - set sdbs {} - for { set i 0 } { $i < [llength $omethods] } { incr i } { - set sdb [eval {berkdb_open -create -auto_commit -env} $env \ - [lindex $omethods $i] [lindex $argses $i] $snamebase.$i.db] - error_check_good second_open($i) [is_valid_db $sdb] TRUE - - error_check_good db_associate($i) \ - [$pdb associate -auto_commit [callback_n $i] $sdb] 0 - lappend sdbs $sdb - } - - puts "\tSi$tnum.a: Put loop" - set did [open $dict] - for { set n 0 } { [gets $did str] != -1 && $n < $nentries } { incr n } { - if { [is_record_based $pmethod] == 1 } { - set key [expr $n + 1] - set datum $str - } else { - set key $str - gets $did datum - } - set keys($n) $key - set data($n) [pad_data $pmethod $datum] - - set txn [$env txn] - set ret [eval {$pdb put} -txn $txn \ - {$key [chop_data $pmethod $datum]}] - error_check_good put($n) $ret 0 - error_check_good txn_commit($n) [$txn commit] 0 - } - close $did - check_secondaries $pdb $sdbs $nentries keys data "Si$tnum.a" - - puts "\tSi$tnum.b: Put/overwrite loop" - for { set n 0 } { $n < $nentries } { incr n } { - set newd $data($n).$keys($n) - - set txn [$env txn] - set ret [eval {$pdb put} -txn $txn \ - {$keys($n) [chop_data $pmethod $newd]}] - error_check_good put_overwrite($n) $ret 0 - set data($n) [pad_data $pmethod $newd] - error_check_good txn_commit($n) [$txn commit] 0 - } - check_secondaries $pdb $sdbs $nentries keys data "Si$tnum.b" - - # Delete the second half of the entries through the primary. - # We do the second half so we can just pass keys(0 ... n/2) - # to check_secondaries. - set half [expr $nentries / 2] - puts "\tSi$tnum.c: Primary delete loop: deleting $half entries" - for { set n $half } { $n < $nentries } { incr n } { - set txn [$env txn] - set ret [$pdb del -txn $txn $keys($n)] - error_check_good pdel($n) $ret 0 - error_check_good txn_commit($n) [$txn commit] 0 - } - check_secondaries $pdb $sdbs $half keys data "Si$tnum.c" - - # Delete half of what's left, through the first secondary. - set quar [expr $half / 2] - puts "\tSi$tnum.d: Secondary delete loop: deleting $quar entries" - set sdb [lindex $sdbs 0] - set callback [callback_n 0] - for { set n $quar } { $n < $half } { incr n } { - set skey [$callback $keys($n) [pad_data $pmethod $data($n)]] - set txn [$env txn] - set ret [$sdb del -txn $txn $skey] - error_check_good sdel($n) $ret 0 - error_check_good txn_commit($n) [$txn commit] 0 - } - check_secondaries $pdb $sdbs $quar keys data "Si$tnum.d" - - puts "\tSi$tnum.e: Closing/disassociating primary first" - error_check_good primary_close [$pdb close] 0 - foreach sdb $sdbs { - error_check_good secondary_close [$sdb close] 0 - } - error_check_good env_close [$env close] 0 -} diff --git a/storage/bdb/test/si006.tcl b/storage/bdb/test/si006.tcl deleted file mode 100644 index 3a1dbb3c4f8..00000000000 --- a/storage/bdb/test/si006.tcl +++ /dev/null @@ -1,129 +0,0 @@ -# See the file LICENSE for redistribution information. -# -# Copyright (c) 2001-2002 -# Sleepycat Software. All rights reserved. -# -# $Id: si006.tcl,v 1.2 2002/05/15 17:18:03 sandstro Exp $ -# -# TEST sindex006 -# TEST Basic secondary index put/delete test with transactions -proc sindex006 { methods {nentries 200} {tnum 6} args } { - source ./include.tcl - global dict nsecondaries - - # Primary method/args. - set pmethod [lindex $methods 0] - set pargs [convert_args $pmethod $args] - set pomethod [convert_method $pmethod] - - # Method/args for all the secondaries. If only one method - # was specified, assume the same method and a standard N - # secondaries. - set methods [lrange $methods 1 end] - if { [llength $methods] == 0 } { - for { set i 0 } { $i < $nsecondaries } { incr i } { - lappend methods $pmethod - } - } - - set argses [convert_argses $methods $args] - set omethods [convert_methods $methods] - - puts "Sindex00$tnum ($pmethod/$methods) $nentries equal key/data pairs" - puts " with transactions" - env_cleanup $testdir - - set pname "primary00$tnum.db" - set snamebase "secondary00$tnum" - - # Open an environment - # XXX if one is not supplied! - set env [berkdb_env -create -home $testdir -txn] - error_check_good env_open [is_valid_env $env] TRUE - - # Open the primary. - set pdb [eval {berkdb_open -create -auto_commit -env} $env $pomethod \ - $pargs $pname] - error_check_good primary_open [is_valid_db $pdb] TRUE - - # Open and associate the secondaries - set sdbs {} - for { set i 0 } { $i < [llength $omethods] } { incr i } { - set sdb [eval {berkdb_open -create -auto_commit -env} $env \ - [lindex $omethods $i] [lindex $argses $i] $snamebase.$i.db] - error_check_good second_open($i) [is_valid_db $sdb] TRUE - - error_check_good db_associate($i) \ - [$pdb associate -auto_commit [callback_n $i] $sdb] 0 - lappend sdbs $sdb - } - - puts "\tSindex00$tnum.a: Put loop" - set did [open $dict] - for { set n 0 } { [gets $did str] != -1 && $n < $nentries } { incr n } { - if { [is_record_based $pmethod] == 1 } { - set key [expr $n + 1] - set datum $str - } else { - set key $str - gets $did datum - } - set keys($n) $key - set data($n) [pad_data $pmethod $datum] - - set txn [$env txn] - set ret [eval {$pdb put} -txn $txn \ - {$key [chop_data $pmethod $datum]}] - error_check_good put($n) $ret 0 - error_check_good txn_commit($n) [$txn commit] 0 - } - close $did - check_secondaries $pdb $sdbs $nentries keys data "Sindex00$tnum.a" - - puts "\tSindex00$tnum.b: Put/overwrite loop" - for { set n 0 } { $n < $nentries } { incr n } { - set newd $data($n).$keys($n) - - set txn [$env txn] - set ret [eval {$pdb put} -txn $txn \ - {$keys($n) [chop_data $pmethod $newd]}] - error_check_good put_overwrite($n) $ret 0 - set data($n) [pad_data $pmethod $newd] - error_check_good txn_commit($n) [$txn commit] 0 - } - check_secondaries $pdb $sdbs $nentries keys data "Sindex00$tnum.b" - - # Delete the second half of the entries through the primary. - # We do the second half so we can just pass keys(0 ... n/2) - # to check_secondaries. - set half [expr $nentries / 2] - puts "\tSindex00$tnum.c: Primary delete loop: deleting $half entries" - for { set n $half } { $n < $nentries } { incr n } { - set txn [$env txn] - set ret [$pdb del -txn $txn $keys($n)] - error_check_good pdel($n) $ret 0 - error_check_good txn_commit($n) [$txn commit] 0 - } - check_secondaries $pdb $sdbs $half keys data "Sindex00$tnum.c" - - # Delete half of what's left, through the first secondary. - set quar [expr $half / 2] - puts "\tSindex00$tnum.d: Secondary delete loop: deleting $quar entries" - set sdb [lindex $sdbs 0] - set callback [callback_n 0] - for { set n $quar } { $n < $half } { incr n } { - set skey [$callback $keys($n) [pad_data $pmethod $data($n)]] - set txn [$env txn] - set ret [$sdb del -txn $txn $skey] - error_check_good sdel($n) $ret 0 - error_check_good txn_commit($n) [$txn commit] 0 - } - check_secondaries $pdb $sdbs $quar keys data "Sindex00$tnum.d" - - puts "\tSindex00$tnum.e: Closing/disassociating primary first" - error_check_good primary_close [$pdb close] 0 - foreach sdb $sdbs { - error_check_good secondary_close [$sdb close] 0 - } - error_check_good env_close [$env close] 0 -} diff --git a/storage/bdb/test/sindex.tcl b/storage/bdb/test/sindex.tcl deleted file mode 100644 index fc2a0fc2f31..00000000000 --- a/storage/bdb/test/sindex.tcl +++ /dev/null @@ -1,259 +0,0 @@ -# See the file LICENSE for redistribution information. -# -# Copyright (c) 2001-2002 -# Sleepycat Software. All rights reserved. -# -# $Id: sindex.tcl,v 1.8 2002/05/07 17:15:46 krinsky Exp $ -# -# Secondary index test driver and maintenance routines. -# -# Breaking from the usual convention, we put the driver function -# for the secondary index tests here, in its own file. The reason -# for this is that it's something which compartmentalizes nicely, -# has little in common with other driver functions, and -# is likely to be run on its own from time to time. -# -# The secondary index tests themselves live in si0*.tcl. - -# Standard number of secondary indices to create if a single-element -# list of methods is passed into the secondary index tests. -global nsecondaries -set nsecondaries 2 - -# Run the secondary index tests. -proc sindex { {verbose 0} args } { - global verbose_check_secondaries - set verbose_check_secondaries $verbose - - # Run basic tests with a single secondary index and a small number - # of keys, then again with a larger number of keys. (Note that - # we can't go above 5000, since we use two items from our - # 10K-word list for each key/data pair.) - foreach n { 200 5000 } { - foreach pm { btree hash recno frecno queue queueext } { - foreach sm { dbtree dhash ddbtree ddhash btree hash } { - sindex001 [list $pm $sm $sm] $n - sindex002 [list $pm $sm $sm] $n - # Skip tests 3 & 4 for large lists; - # they're not that interesting. - if { $n < 1000 } { - sindex003 [list $pm $sm $sm] $n - sindex004 [list $pm $sm $sm] $n - } - - sindex006 [list $pm $sm $sm] $n - } - } - } - - # Run secondary index join test. (There's no point in running - # this with both lengths, the primary is unhappy for now with fixed- - # length records (XXX), and we need unsorted dups in the secondaries.) - foreach pm { btree hash recno } { - foreach sm { btree hash } { - sindex005 [list $pm $sm $sm] 1000 - } - sindex005 [list $pm btree hash] 1000 - sindex005 [list $pm hash btree] 1000 - } - - - # Run test with 50 secondaries. - foreach pm { btree hash } { - set methlist [list $pm] - for { set i 0 } { $i < 50 } { incr i } { - # XXX this should incorporate hash after #3726 - if { $i % 2 == 0 } { - lappend methlist "dbtree" - } else { - lappend methlist "ddbtree" - } - } - sindex001 $methlist 500 - sindex002 $methlist 500 - sindex003 $methlist 500 - sindex004 $methlist 500 - } -} - -# The callback function we use for each given secondary in most tests -# is a simple function of its place in the list of secondaries (0-based) -# and the access method (since recnos may need different callbacks). -# -# !!! -# Note that callbacks 0-3 return unique secondary keys if the input data -# are unique; callbacks 4 and higher may not, so don't use them with -# the normal wordlist and secondaries that don't support dups. -# The callbacks that incorporate a key don't work properly with recno -# access methods, at least not in the current test framework (the -# error_check_good lines test for e.g. 1foo, when the database has -# e.g. 0x010x000x000x00foo). -proc callback_n { n } { - switch $n { - 0 { return _s_reversedata } - 1 { return _s_noop } - 2 { return _s_concatkeydata } - 3 { return _s_concatdatakey } - 4 { return _s_reverseconcat } - 5 { return _s_truncdata } - 6 { return _s_alwayscocacola } - } - return _s_noop -} - -proc _s_reversedata { a b } { return [reverse $b] } -proc _s_truncdata { a b } { return [string range $b 1 end] } -proc _s_concatkeydata { a b } { return $a$b } -proc _s_concatdatakey { a b } { return $b$a } -proc _s_reverseconcat { a b } { return [reverse $a$b] } -proc _s_alwayscocacola { a b } { return "Coca-Cola" } -proc _s_noop { a b } { return $b } - -# Should the check_secondary routines print lots of output? -set verbose_check_secondaries 0 - -# Given a primary database handle, a list of secondary handles, a -# number of entries, and arrays of keys and data, verify that all -# databases have what they ought to. -proc check_secondaries { pdb sdbs nentries keyarr dataarr {pref "Check"} } { - upvar $keyarr keys - upvar $dataarr data - global verbose_check_secondaries - - # Make sure each key/data pair is in the primary. - if { $verbose_check_secondaries } { - puts "\t\t$pref.1: Each key/data pair is in the primary" - } - for { set i 0 } { $i < $nentries } { incr i } { - error_check_good pdb_get($i) [$pdb get $keys($i)] \ - [list [list $keys($i) $data($i)]] - } - - for { set j 0 } { $j < [llength $sdbs] } { incr j } { - # Make sure each key/data pair is in this secondary. - if { $verbose_check_secondaries } { - puts "\t\t$pref.2:\ - Each skey/key/data tuple is in secondary #$j" - } - for { set i 0 } { $i < $nentries } { incr i } { - set sdb [lindex $sdbs $j] - set skey [[callback_n $j] $keys($i) $data($i)] - error_check_good sdb($j)_pget($i) \ - [$sdb pget -get_both $skey $keys($i)] \ - [list [list $skey $keys($i) $data($i)]] - } - - # Make sure this secondary contains only $nentries - # items. - if { $verbose_check_secondaries } { - puts "\t\t$pref.3: Secondary #$j has $nentries items" - } - set dbc [$sdb cursor] - error_check_good dbc($i) \ - [is_valid_cursor $dbc $sdb] TRUE - for { set k 0 } { [llength [$dbc get -next]] > 0 } \ - { incr k } { } - error_check_good numitems($i) $k $nentries - error_check_good dbc($i)_close [$dbc close] 0 - } - - if { $verbose_check_secondaries } { - puts "\t\t$pref.4: Primary has $nentries items" - } - set dbc [$pdb cursor] - error_check_good pdbc [is_valid_cursor $dbc $pdb] TRUE - for { set k 0 } { [llength [$dbc get -next]] > 0 } { incr k } { } - error_check_good numitems $k $nentries - error_check_good pdbc_close [$dbc close] 0 -} - -# Given a primary database handle and a list of secondary handles, walk -# through the primary and make sure all the secondaries are correct, -# then walk through the secondaries and make sure the primary is correct. -# -# This is slightly less rigorous than the normal check_secondaries--we -# use it whenever we don't have up-to-date "keys" and "data" arrays. -proc cursor_check_secondaries { pdb sdbs nentries { pref "Check" } } { - global verbose_check_secondaries - - # Make sure each key/data pair in the primary is in each secondary. - set pdbc [$pdb cursor] - error_check_good ccs_pdbc [is_valid_cursor $pdbc $pdb] TRUE - set i 0 - if { $verbose_check_secondaries } { - puts "\t\t$pref.1:\ - Key/data in primary => key/data in secondaries" - } - - for { set dbt [$pdbc get -first] } { [llength $dbt] > 0 } \ - { set dbt [$pdbc get -next] } { - incr i - set pkey [lindex [lindex $dbt 0] 0] - set pdata [lindex [lindex $dbt 0] 1] - for { set j 0 } { $j < [llength $sdbs] } { incr j } { - set sdb [lindex $sdbs $j] - set sdbt [$sdb pget -get_both \ - [[callback_n $j] $pkey $pdata] $pkey] - error_check_good pkey($pkey,$j) \ - [lindex [lindex $sdbt 0] 1] $pkey - error_check_good pdata($pdata,$j) \ - [lindex [lindex $sdbt 0] 2] $pdata - } - } - error_check_good ccs_pdbc_close [$pdbc close] 0 - error_check_good primary_has_nentries $i $nentries - - for { set j 0 } { $j < [llength $sdbs] } { incr j } { - if { $verbose_check_secondaries } { - puts "\t\t$pref.2:\ - Key/data in secondary #$j => key/data in primary" - } - set sdb [lindex $sdbs $j] - set sdbc [$sdb cursor] - error_check_good ccs_sdbc($j) [is_valid_cursor $sdbc $sdb] TRUE - set i 0 - for { set dbt [$sdbc pget -first] } { [llength $dbt] > 0 } \ - { set dbt [$sdbc pget -next] } { - incr i - set pkey [lindex [lindex $dbt 0] 1] - set pdata [lindex [lindex $dbt 0] 2] - error_check_good pdb_get($pkey/$pdata,$j) \ - [$pdb get -get_both $pkey $pdata] \ - [list [list $pkey $pdata]] - } - error_check_good secondary($j)_has_nentries $i $nentries - - # To exercise pget -last/pget -prev, we do it backwards too. - set i 0 - for { set dbt [$sdbc pget -last] } { [llength $dbt] > 0 } \ - { set dbt [$sdbc pget -prev] } { - incr i - set pkey [lindex [lindex $dbt 0] 1] - set pdata [lindex [lindex $dbt 0] 2] - error_check_good pdb_get_bkwds($pkey/$pdata,$j) \ - [$pdb get -get_both $pkey $pdata] \ - [list [list $pkey $pdata]] - } - error_check_good secondary($j)_has_nentries_bkwds $i $nentries - - error_check_good ccs_sdbc_close($j) [$sdbc close] 0 - } -} - -# The secondary index tests take a list of the access methods that -# each array ought to use. Convert at one blow into a list of converted -# argses and omethods for each method in the list. -proc convert_argses { methods largs } { - set ret {} - foreach m $methods { - lappend ret [convert_args $m $largs] - } - return $ret -} -proc convert_methods { methods } { - set ret {} - foreach m $methods { - lappend ret [convert_method $m] - } - return $ret -} diff --git a/storage/bdb/test/sysscript.tcl b/storage/bdb/test/sysscript.tcl deleted file mode 100644 index 8386949a6ff..00000000000 --- a/storage/bdb/test/sysscript.tcl +++ /dev/null @@ -1,282 +0,0 @@ -# See the file LICENSE for redistribution information. -# -# Copyright (c) 1996-2004 -# Sleepycat Software. All rights reserved. -# -# $Id: sysscript.tcl,v 11.19 2004/01/28 03:36:30 bostic Exp $ -# -# System integration test script. -# This script runs a single process that tests the full functionality of -# the system. The database under test contains nfiles files. Each process -# randomly generates a key and some data. Both keys and data are bimodally -# distributed between small keys (1-10 characters) and large keys (the avg -# length is indicated via the command line parameter. -# The process then decides on a replication factor between 1 and nfiles. -# It writes the key and data to that many files and tacks on the file ids -# of the files it writes to the data string. For example, let's say that -# I randomly generate the key dog and data cat. Then I pick a replication -# factor of 3. I pick 3 files from the set of n (say 1, 3, and 5). I then -# rewrite the data as 1:3:5:cat. I begin a transaction, add the key/data -# pair to each file and then commit. Notice that I may generate replication -# of the form 1:3:3:cat in which case I simply add a duplicate to file 3. -# -# Usage: sysscript dir nfiles key_avg data_avg -# -# dir: DB_HOME directory -# nfiles: number of files in the set -# key_avg: average big key size -# data_avg: average big data size - -source ./include.tcl -source $test_path/test.tcl -source $test_path/testutils.tcl - -set mypid [pid] - -set usage "sysscript dir nfiles key_avg data_avg method" - -# Verify usage -if { $argc != 5 } { - puts stderr "FAIL:[timestamp] Usage: $usage" - exit -} - -puts [concat "Argc: " $argc " Argv: " $argv] - -# Initialize arguments -set dir [lindex $argv 0] -set nfiles [ lindex $argv 1 ] -set key_avg [ lindex $argv 2 ] -set data_avg [ lindex $argv 3 ] -set method [ lindex $argv 4 ] - -# Initialize seed -global rand_init -berkdb srand $rand_init - -puts "Beginning execution for $mypid" -puts "$dir DB_HOME" -puts "$nfiles files" -puts "$key_avg average key length" -puts "$data_avg average data length" - -flush stdout - -# Create local environment -set dbenv [berkdb_env -txn -home $dir] -set err [catch {error_check_good $mypid:dbenv [is_substr $dbenv env] 1} ret] -if {$err != 0} { - puts $ret - return -} - -# Now open the files -for { set i 0 } { $i < $nfiles } { incr i } { - set file test044.$i.db - set db($i) [berkdb open -auto_commit -env $dbenv $method $file] - set err [catch {error_check_bad $mypid:dbopen $db($i) NULL} ret] - if {$err != 0} { - puts $ret - return - } - set err [catch {error_check_bad $mypid:dbopen [is_substr $db($i) \ - error] 1} ret] - if {$err != 0} { - puts $ret - return - } -} - -set record_based [is_record_based $method] -while { 1 } { - # Decide if we're going to create a big key or a small key - # We give small keys a 70% chance. - if { [berkdb random_int 1 10] < 8 } { - set k [random_data 5 0 0 $record_based] - } else { - set k [random_data $key_avg 0 0 $record_based] - } - set data [chop_data $method [random_data $data_avg 0 0]] - - set txn [$dbenv txn] - set err [catch {error_check_good $mypid:txn_begin [is_substr $txn \ - $dbenv.txn] 1} ret] - if {$err != 0} { - puts $ret - return - } - - # Open cursors - for { set f 0 } {$f < $nfiles} {incr f} { - set cursors($f) [$db($f) cursor -txn $txn] - set err [catch {error_check_good $mypid:cursor_open \ - [is_substr $cursors($f) $db($f)] 1} ret] - if {$err != 0} { - puts $ret - return - } - } - set aborted 0 - - # Check to see if key is already in database - set found 0 - for { set i 0 } { $i < $nfiles } { incr i } { - set r [$db($i) get -txn $txn $k] - set r [$db($i) get -txn $txn $k] - if { $r == "-1" } { - for {set f 0 } {$f < $nfiles} {incr f} { - set err [catch {error_check_good \ - $mypid:cursor_close \ - [$cursors($f) close] 0} ret] - if {$err != 0} { - puts $ret - return - } - } - set err [catch {error_check_good $mypid:txn_abort \ - [$txn abort] 0} ret] - if {$err != 0} { - puts $ret - return - } - set aborted 1 - set found 2 - break - } elseif { $r != "Key $k not found." } { - set found 1 - break - } - } - switch $found { - 2 { - # Transaction aborted, no need to do anything. - } - 0 { - # Key was not found, decide how much to replicate - # and then create a list of that many file IDs. - set repl [berkdb random_int 1 $nfiles] - set fset "" - for { set i 0 } { $i < $repl } {incr i} { - set f [berkdb random_int 0 [expr $nfiles - 1]] - lappend fset $f - set data [chop_data $method $f:$data] - } - - foreach i $fset { - set r [$db($i) put -txn $txn $k $data] - if {$r == "-1"} { - for {set f 0 } {$f < $nfiles} {incr f} { - set err [catch {error_check_good \ - $mypid:cursor_close \ - [$cursors($f) close] 0} ret] - if {$err != 0} { - puts $ret - return - } - } - set err [catch {error_check_good \ - $mypid:txn_abort [$txn abort] 0} ret] - if {$err != 0} { - puts $ret - return - } - set aborted 1 - break - } - } - } - 1 { - # Key was found. Make sure that all the data values - # look good. - set f [zero_list $nfiles] - set data $r - while { [set ndx [string first : $r]] != -1 } { - set fnum [string range $r 0 [expr $ndx - 1]] - if { [lindex $f $fnum] == 0 } { - #set flag -set - set full [record $cursors($fnum) get -set $k] - } else { - #set flag -next - set full [record $cursors($fnum) get -next] - } - if {[llength $full] == 0} { - for {set f 0 } {$f < $nfiles} {incr f} { - set err [catch {error_check_good \ - $mypid:cursor_close \ - [$cursors($f) close] 0} ret] - if {$err != 0} { - puts $ret - return - } - } - set err [catch {error_check_good \ - $mypid:txn_abort [$txn abort] 0} ret] - if {$err != 0} { - puts $ret - return - } - set aborted 1 - break - } - set err [catch {error_check_bad \ - $mypid:curs_get($k,$data,$fnum,$flag) \ - [string length $full] 0} ret] - if {$err != 0} { - puts $ret - return - } - set key [lindex [lindex $full 0] 0] - set rec [pad_data $method [lindex [lindex $full 0] 1]] - set err [catch {error_check_good \ - $mypid:dbget_$fnum:key $key $k} ret] - if {$err != 0} { - puts $ret - return - } - set err [catch {error_check_good \ - $mypid:dbget_$fnum:data($k) $rec $data} ret] - if {$err != 0} { - puts $ret - return - } - set f [lreplace $f $fnum $fnum 1] - incr ndx - set r [string range $r $ndx end] - } - } - } - if { $aborted == 0 } { - for {set f 0 } {$f < $nfiles} {incr f} { - set err [catch {error_check_good $mypid:cursor_close \ - [$cursors($f) close] 0} ret] - if {$err != 0} { - puts $ret - return - } - } - set err [catch {error_check_good $mypid:commit [$txn commit] \ - 0} ret] - if {$err != 0} { - puts $ret - return - } - } -} - -# Close files -for { set i 0 } { $i < $nfiles} { incr i } { - set r [$db($i) close] - set err [catch {error_check_good $mypid:db_close:$i $r 0} ret] - if {$err != 0} { - puts $ret - return - } -} - -# Close tm and environment -$dbenv close - -puts "[timestamp] [pid] Complete" -flush stdout - -filecheck $file 0 diff --git a/storage/bdb/test/test.tcl b/storage/bdb/test/test.tcl deleted file mode 100644 index 3bd3e4d9c40..00000000000 --- a/storage/bdb/test/test.tcl +++ /dev/null @@ -1,1941 +0,0 @@ -# See the file LICENSE for redistribution information. -# -# Copyright (c) 1996-2004 -# Sleepycat Software. All rights reserved. -# -# $Id: test.tcl,v 11.273 2004/11/01 14:48:23 carol Exp $ - -source ./include.tcl - -# Load DB's TCL API. -load $tcllib - -if { [file exists $testdir] != 1 } { - file mkdir $testdir -} - -global __debug_print -global __debug_on -global __debug_test - -# -# Test if utilities work to figure out the path. Most systems -# use ., but QNX has a problem with execvp of shell scripts which -# causes it to break. -# -set stat [catch {exec ./db_printlog -?} ret] -if { [string first "exec format error" $ret] != -1 } { - set util_path ./.libs -} else { - set util_path . -} -set __debug_print 0 -set encrypt 0 -set old_encrypt 0 -set passwd test_passwd - -# Error stream that (should!) always go to the console, even if we're -# redirecting to ALL.OUT. -set consoleerr stderr - -set dict $test_path/wordlist -set alphabet "abcdefghijklmnopqrstuvwxyz" -set datastr "abcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyz" - -# Random number seed. -global rand_init -set rand_init 12082003 - -# Default record length for fixed record length access method(s) -set fixed_len 20 - -set recd_debug 0 -set log_log_record_types 0 -set ohandles {} - -# Normally, we're not running an all-tests-in-one-env run. This matters -# for error stream/error prefix settings in berkdb_open. -global is_envmethod -set is_envmethod 0 - -# For testing locker id wrap around. -global lock_curid -global lock_maxid -set lock_curid 0 -set lock_maxid 2147483647 -global txn_curid -global txn_maxid -set txn_curid 2147483648 -set txn_maxid 4294967295 - -# The variable one_test allows us to run all the permutations -# of a test with run_all or run_std. -global one_test -if { [info exists one_test] != 1 } { - set one_test "ALL" -} - -# This is where the test numbering and parameters now live. -source $test_path/testparams.tcl - -# Set up any OS-specific values -global tcl_platform -set is_windows_test [is_substr $tcl_platform(os) "Win"] -set is_hp_test [is_substr $tcl_platform(os) "HP-UX"] -set is_je_test 0 -set is_qnx_test [is_substr $tcl_platform(os) "QNX"] -set upgrade_be [big_endian] - -global EXE BAT -if { $is_windows_test == 1 } { - set EXE ".exe" - set BAT ".bat" -} else { - set EXE "" - set BAT "" -} - -# Try to open an encrypted database. If it fails, this release -# doesn't support encryption, and encryption tests should be skipped. -set has_crypto 1 -set stat [catch {set db \ - [eval {berkdb open -create -btree -encryptaes test_passwd} ] } result ] -if { $stat != 0 } { - # Make sure it's the right error for a non-crypto release. - error_check_good non_crypto_release \ - [expr [is_substr $result "operation not supported"] || \ - [is_substr $result "invalid argument"]] 1 - set has_crypto 0 -} else { - # It is a crypto release. Get rid of the db, we don't need it. - error_check_good close_encrypted_db [$db close] 0 -} - -# From here on out, test.tcl contains the procs that are used to -# run all or part of the test suite. - -proc run_std { { testname ALL } args } { - global test_names - global one_test - source ./include.tcl - - set one_test $testname - if { $one_test != "ALL" } { - # Source testparams again to adjust test_names. - source $test_path/testparams.tcl - } - - set exflgs [eval extractflags $args] - set args [lindex $exflgs 0] - set flags [lindex $exflgs 1] - - set display 1 - set run 1 - set am_only 0 - set no_am 0 - set std_only 1 - set rflags {--} - foreach f $flags { - switch $f { - A { - set std_only 0 - } - M { - set no_am 1 - puts "run_std: all but access method tests." - } - m { - set am_only 1 - puts "run_std: access method tests only." - } - n { - set display 1 - set run 0 - set rflags [linsert $rflags 0 "-n"] - } - } - } - - if { $std_only == 1 } { - fileremove -f ALL.OUT - - set o [open ALL.OUT a] - if { $run == 1 } { - puts -nonewline "Test suite run started at: " - puts [clock format [clock seconds] -format "%H:%M %D"] - puts [berkdb version -string] - - puts -nonewline $o "Test suite run started at: " - puts $o [clock format [clock seconds] -format "%H:%M %D"] - puts $o [berkdb version -string] - } - close $o - } - - set test_list { - {"environment" "env"} - {"archive" "archive"} - {"file operations" "fop"} - {"locking" "lock"} - {"logging" "log"} - {"memory pool" "memp"} - {"mutex" "mutex"} - {"transaction" "txn"} - {"deadlock detection" "dead"} - {"subdatabase" "sdb"} - {"byte-order" "byte"} - {"recno backing file" "rsrc"} - {"DBM interface" "dbm"} - {"NDBM interface" "ndbm"} - {"Hsearch interface" "hsearch"} - {"secondary index" "sindex"} - } - - if { $am_only == 0 } { - - foreach pair $test_list { - set msg [lindex $pair 0] - set cmd [lindex $pair 1] - puts "Running $msg tests" - if [catch {exec $tclsh_path << \ - "global one_test; set one_test $one_test; \ - source $test_path/test.tcl; r $rflags $cmd" \ - >>& ALL.OUT } res] { - set o [open ALL.OUT a] - puts $o "FAIL: $cmd test: $res" - close $o - } - } - - # Run recovery tests. - # - # XXX These too are broken into separate tclsh instantiations - # so we don't require so much memory, but I think it's cleaner - # and more useful to do it down inside proc r than here, - # since "r recd" gets done a lot and needs to work. - # - # Note that we still wrap the test in an exec so that - # its output goes to ALL.OUT. run_recd will wrap each test - # so that both error streams go to stdout (which here goes - # to ALL.OUT); information that run_recd wishes to print - # to the "real" stderr, but outside the wrapping for each test, - # such as which tests are being skipped, it can still send to - # stderr. - puts "Running recovery tests" - if [catch { - exec $tclsh_path << \ - "global one_test; set one_test $one_test; \ - source $test_path/test.tcl; r $rflags recd" \ - 2>@ stderr >> ALL.OUT - } res] { - set o [open ALL.OUT a] - puts $o "FAIL: recd tests: $res" - close $o - } - - # Run join test - # - # XXX - # Broken up into separate tclsh instantiations so we don't - # require so much memory. - if { $one_test == "ALL" } { - puts "Running join test" - foreach test "join1 join2 join3 join4 join5 join6" { - if [catch {exec $tclsh_path << \ - "source $test_path/test.tcl; r $rflags $test" \ - >>& ALL.OUT } res] { - set o [open ALL.OUT a] - puts $o "FAIL: $test test: $res" - close $o - } - } - } - } - - if { $no_am == 0 } { - # Access method tests. - # - # XXX - # Broken up into separate tclsh instantiations so we don't - # require so much memory. - foreach method \ - "btree hash queue queueext recno rbtree frecno rrecno" { - puts "Running $method tests" - foreach test $test_names(test) { - if { $run == 0 } { - set o [open ALL.OUT a] - run_method \ - -$method $test $display $run $o - close $o - } - if { $run } { - if [catch {exec $tclsh_path << \ - "global one_test; \ - set one_test $one_test; \ - source $test_path/test.tcl; \ - run_method \ - -$method $test $display $run"\ - >>& ALL.OUT } res] { - set o [open ALL.OUT a] - puts $o "FAIL:$test $method: $res" - close $o - } - } - } - } - } - - # If not actually running, no need to check for failure. - # If running in the context of the larger 'run_all' we don't - # check for failure here either. - if { $run == 0 || $std_only == 0 } { - return - } - - set failed [check_output ALL.OUT] - - set o [open ALL.OUT a] - if { $failed == 0 } { - puts "Regression Tests Succeeded" - puts $o "Regression Tests Succeeded" - } else { - puts "Regression Tests Failed" - puts "Check UNEXPECTED OUTPUT lines." - puts "Review ALL.OUT.x for details." - puts $o "Regression Tests Failed" - } - - puts -nonewline "Test suite run completed at: " - puts [clock format [clock seconds] -format "%H:%M %D"] - puts -nonewline $o "Test suite run completed at: " - puts $o [clock format [clock seconds] -format "%H:%M %D"] - close $o -} - -proc check_output { file } { - # These are all the acceptable patterns. - set pattern {(?x) - ^[:space:]*$| - .*?wrap\.tcl.*| - .*?dbscript\.tcl.*| - .*?ddscript\.tcl.*| - .*?mpoolscript\.tcl.*| - .*?mutexscript\.tcl.*| - ^\d\d:\d\d:\d\d\s\(\d\d:\d\d:\d\d\)$| - ^\d\d:\d\d:\d\d\s\(\d\d:\d\d:\d\d\)\sCrashing$| - ^\d\d:\d\d:\d\d\s\(\d\d:\d\d:\d\d\)\s[p|P]rocesses\srunning:.*| - ^\d\d:\d\d:\d\d\s\(\d\d:\d\d:\d\d\)\s5\sprocesses\srunning.*| - ^\d:\sPut\s\d*\sstrings\srandom\soffsets.*| - ^100.*| - ^eval\s.*| - ^exec\s.*| - ^jointest.*$| - ^r\sarchive\s*| - ^r\sdbm\s*| - ^r\shsearch\s*| - ^r\sndbm\s*| - ^r\srpc\s*| - ^run_recd:\s.*| - ^run_reptest:\s.*| - ^run_rpcmethod:\s.*| - ^run_secenv:\s.*| - ^All\sprocesses\shave\sexited.$| - ^Beginning\scycle\s\d$| - ^Byteorder:.*| - ^Child\sruns\scomplete\.\s\sParent\smodifies\sdata\.$| - ^Deadlock\sdetector:\s\d*\sCheckpoint\sdaemon\s\d*$| - ^Ending\srecord.*| - ^Environment\s.*?specified;\s\sskipping\.$| - ^Executing\srecord\s.*| - ^Join\stest:\.*| - ^Method:\s.*| - ^Repl:\stest\d\d\d:.*| - ^Repl:\ssdb\d\d\d:.*| - ^Script\swatcher\sprocess\s.*| - ^Sleepycat\sSoftware:\sBerkeley\sDB\s.*| - ^Test\ssuite\srun\s.*| - ^Unlinking\slog:\serror\smessage\sOK$| - ^Verifying\s.*| - ^\t*\.\.\.dbc->get.*$| - ^\t*\.\.\.dbc->put.*$| - ^\t*\.\.\.key\s\d*$| - ^\t*\.\.\.Skipping\sdbc.*| - ^\t*and\s\d*\sduplicate\sduplicates\.$| - ^\t*About\sto\srun\srecovery\s.*complete$| - ^\t*Archive[:\.].*| - ^\t*Building\s.*| - ^\t*closing\ssecondaries\.$| - ^\t*Command\sexecuted\sand\s.*$| - ^\t*DBM.*| - ^\t*[d|D]ead[0-9][0-9][0-9].*| - ^\t*Dump\/load\sof.*| - ^\t*[e|E]nv[0-9][0-9][0-9].*| - ^\t*Executing\scommand$| - ^\t*Executing\stxn_.*| - ^\t*File\srecd005\.\d\.db\sexecuted\sand\saborted\.$| - ^\t*File\srecd005\.\d\.db\sexecuted\sand\scommitted\.$| - ^\t*[f|F]op[0-9][0-9][0-9].*| - ^\t*HSEARCH.*| - ^\t*Initial\sCheckpoint$| - ^\t*Iteration\s\d*:\sCheckpointing\.$| - ^\t*Joining:\s.*| - ^\t*Kid[1|2]\sabort\.\.\.complete$| - ^\t*Kid[1|2]\scommit\.\.\.complete$| - ^\t*[l|L]ock[0-9][0-9][0-9].*| - ^\t*[l|L]og[0-9][0-9][0-9].*| - ^\t*[m|M]emp[0-9][0-9][0-9].*| - ^\t*[m|M]utex[0-9][0-9][0-9].*| - ^\t*NDBM.*| - ^\t*opening\ssecondaries\.$| - ^\t*op_recover_rec:\sRunning\srecovery.*| - ^\t*[r|R]ecd[0-9][0-9][0-9].*| - ^\t*[r|R]ep[0-9][0-9][0-9].*| - ^\t*[r|R]ep_test.*| - ^\t*[r|R]pc[0-9][0-9][0-9].*| - ^\t*[r|R]src[0-9][0-9][0-9].*| - ^\t*Run_rpcmethod.*| - ^\t*Running\srecovery\son\s.*| - ^\t*[s|S]ec[0-9][0-9][0-9].*| - ^\t*[s|S]i[0-9][0-9][0-9].*| - ^\t*Sijoin.*| - ^\t*sdb[0-9][0-9][0-9].*| - ^\t*Skipping\s.*| - ^\t*Subdb[0-9][0-9][0-9].*| - ^\t*Subdbtest[0-9][0-9][0-9].*| - ^\t*Syncing$| - ^\t*[t|T]est[0-9][0-9][0-9].*| - ^\t*[t|T]xn[0-9][0-9][0-9].*| - ^\t*Txnscript.*| - ^\t*Using\s.*?\senvironment\.$| - ^\t*Verification\sof.*| - ^\t*with\stransactions$} - - set failed 0 - set f [open $file r] - while { [gets $f line] >= 0 } { - if { [regexp $pattern $line] == 0 } { - puts -nonewline "UNEXPECTED OUTPUT: " - puts $line - set failed 1 - } - } - close $f - return $failed -} - -proc r { args } { - global test_names - global has_crypto - global rand_init - global one_test - - source ./include.tcl - - set exflgs [eval extractflags $args] - set args [lindex $exflgs 0] - set flags [lindex $exflgs 1] - - set display 1 - set run 1 - set saveflags "--" - foreach f $flags { - switch $f { - n { - set display 1 - set run 0 - set saveflags "-n $saveflags" - } - } - } - - if {[catch { - set sub [ lindex $args 0 ] - switch $sub { - dead - - env - - lock - - log - - memp - - mutex - - rsrc - - sdbtest - - txn { - if { $display } { - run_subsystem $sub 1 0 - } - if { $run } { - run_subsystem $sub - } - } - byte { - if { $one_test == "ALL" } { - run_test byteorder $display $run - } - } - archive - - dbm - - hsearch - - ndbm - - shelltest { - if { $one_test == "ALL" } { - if { $display } { puts "r $sub" } - if { $run } { - check_handles - $sub - } - } - } - bigfile - - elect - - fop { - foreach test $test_names($sub) { - eval run_test $test $display $run - } - } - join { - eval r $saveflags join1 - eval r $saveflags join2 - eval r $saveflags join3 - eval r $saveflags join4 - eval r $saveflags join5 - eval r $saveflags join6 - } - join1 { - if { $display } { puts jointest } - if { $run } { - check_handles - jointest - } - } - joinbench { - puts "[timestamp]" - eval r $saveflags join1 - eval r $saveflags join2 - puts "[timestamp]" - } - join2 { - if { $display } { puts "jointest 512" } - if { $run } { - check_handles - jointest 512 - } - } - join3 { - if { $display } { - puts "jointest 8192 0 -join_item" - } - if { $run } { - check_handles - jointest 8192 0 -join_item - } - } - join4 { - if { $display } { puts "jointest 8192 2" } - if { $run } { - check_handles - jointest 8192 2 - } - } - join5 { - if { $display } { puts "jointest 8192 3" } - if { $run } { - check_handles - jointest 8192 3 - } - } - join6 { - if { $display } { puts "jointest 512 3" } - if { $run } { - check_handles - jointest 512 3 - } - } - recd { - check_handles - run_recds $run $display [lrange $args 1 end] - } - rep { - foreach test $test_names(rep) { - run_test $test $display $run - } - # We seed the random number generator here - # instead of in run_repmethod so that we - # aren't always reusing the first few - # responses from random_int. - # - berkdb srand $rand_init - foreach sub { test sdb } { - foreach test $test_names($sub) { - eval run_test run_repmethod \ - $display $run $test - } - } - } - rpc { - if { $one_test == "ALL" } { - if { $display } { puts "r $sub" } - global BAT EXE rpc_svc svc_list - global rpc_svc svc_list is_je_test - set old_rpc_src $rpc_svc - foreach rpc_svc $svc_list { - if { $rpc_svc == "berkeley_dbje_svc" } { - set old_util_path $util_path - set util_path $je_root/dist - set is_je_test 1 - } - - if { !$run || \ - ![file exist $util_path/$rpc_svc$BAT] || \ - ![file exist $util_path/$rpc_svc$EXE] } { - continue - } - - run_subsystem rpc - if { [catch {run_rpcmethod -txn} ret] != 0 } { - puts $ret - } - - if { $is_je_test } { - check_handles - eval run_rpcmethod -btree - verify_dir $testdir "" 1 - } else { - run_test run_rpcmethod $display $run - } - - if { $is_je_test } { - set util_path $old_util_path - set is_je_test 0 - } - - } - set rpc_svc $old_rpc_src - } - } - sec { - # Skip secure mode tests if release - # does not support encryption. - if { $has_crypto == 0 } { - return - } - if { $display } { - run_subsystem $sub 1 0 - } - if { $run } { - run_subsystem $sub 0 1 - } - foreach test $test_names(test) { - eval run_test run_secmethod \ - $display $run $test - eval run_test run_secenv \ - $display $run $test - } - } - sdb { - if { $one_test == "ALL" } { - if { $display } { - run_subsystem sdbtest 1 0 - } - if { $run } { - run_subsystem sdbtest 0 1 - } - } - foreach test $test_names(sdb) { - eval run_test $test $display $run - } - } - sindex { - if { $one_test == "ALL" } { - if { $display } { - sindex 1 0 - sijoin 1 0 - } - if { $run } { - sindex 0 1 - sijoin 0 1 - } - } - } - btree - - rbtree - - hash - - iqueue - - iqueueext - - queue - - queueext - - recno - - frecno - - rrecno { - foreach test $test_names(test) { - eval run_method [lindex $args 0] $test \ - $display $run [lrange $args 1 end] - } - } - - default { - error \ - "FAIL:[timestamp] r: $args: unknown command" - } - } - flush stdout - flush stderr - } res] != 0} { - global errorInfo; - - set fnl [string first "\n" $errorInfo] - set theError [string range $errorInfo 0 [expr $fnl - 1]] - if {[string first FAIL $errorInfo] == -1} { - error "FAIL:[timestamp] r: $args: $theError" - } else { - error $theError; - } - } -} - -proc run_subsystem { sub { display 0 } { run 1} } { - global test_names - - if { [info exists test_names($sub)] != 1 } { - puts stderr "Subsystem $sub has no tests specified in\ - testparams.tcl; skipping." - return - } - foreach test $test_names($sub) { - if { $display } { - puts "eval $test" - } - if { $run } { - check_handles - if {[catch {eval $test} ret] != 0 } { - puts "FAIL: run_subsystem: $sub $test: \ - $ret" - } - } - } -} - -proc run_test { test {display 0} {run 1} args } { - source ./include.tcl - foreach method "hash queue queueext recno rbtree frecno rrecno btree" { - if { $display } { - puts "eval $test -$method $args; verify_dir $testdir \"\" 1" - } - if { $run } { - check_handles - eval $test -$method $args - verify_dir $testdir "" 1 - } - } -} - -proc run_method { method test {display 0} {run 1} \ - { outfile stdout } args } { - global __debug_on - global __debug_print - global __debug_test - global test_names - global parms - source ./include.tcl - - if {[catch { - if { $display } { - puts -nonewline $outfile "eval $test $method" - puts -nonewline $outfile " $parms($test) $args" - puts $outfile " ; verify_dir $testdir \"\" 1" - } - if { $run } { - check_handles $outfile - puts $outfile "[timestamp]" - eval $test $method $parms($test) $args - if { $__debug_print != 0 } { - puts $outfile "" - } - # verify all databases the test leaves behind - verify_dir $testdir "" 1 - if { $__debug_on != 0 } { - debug $__debug_test - } - } - flush stdout - flush stderr - } res] != 0} { - global errorInfo; - - set fnl [string first "\n" $errorInfo] - set theError [string range $errorInfo 0 [expr $fnl - 1]] - if {[string first FAIL $errorInfo] == -1} { - error "FAIL:[timestamp]\ - run_method: $method $test: $theError" - } else { - error $theError; - } - } -} - -proc run_rpcmethod { method {largs ""} } { - global __debug_on - global __debug_print - global __debug_test - global rpc_tests - global parms - global is_envmethod - global rpc_svc - source ./include.tcl - - puts "run_rpcmethod: $method $largs" - - set save_largs $largs - set dpid [rpc_server_start] - puts "\tRun_rpcmethod.a: started server, pid $dpid" - remote_cleanup $rpc_server $rpc_testdir $testdir - - set home [file tail $rpc_testdir] - - set is_envmethod 1 - set use_txn 0 - if { [string first "txn" $method] != -1 } { - set use_txn 1 - } - if { $use_txn == 1 } { - set ntxns 32 - set i 1 - check_handles - remote_cleanup $rpc_server $rpc_testdir $testdir - set env [eval {berkdb_env -create -mode 0644 -home $home \ - -server $rpc_server -client_timeout 10000} -txn] - error_check_good env_open [is_valid_env $env] TRUE - - set stat [catch {eval txn001_suba $ntxns $env} res] - if { $stat == 0 } { - set stat [catch {eval txn001_subb $ntxns $env} res] - } - set stat [catch {eval txn003} res] - error_check_good envclose [$env close] 0 - } else { - foreach test $rpc_tests($rpc_svc) { - set stat [catch { - check_handles - remote_cleanup $rpc_server $rpc_testdir $testdir - # - # Set server cachesize to 128Mb. Otherwise - # some tests won't fit (like test084 -btree). - # - set env [eval {berkdb_env -create -mode 0644 \ - -home $home -server $rpc_server \ - -client_timeout 10000 \ - -cachesize {0 134217728 1}}] - error_check_good env_open \ - [is_valid_env $env] TRUE - set largs $save_largs - append largs " -env $env " - - puts "[timestamp]" - eval $test $method $parms($test) $largs - if { $__debug_print != 0 } { - puts "" - } - if { $__debug_on != 0 } { - debug $__debug_test - } - flush stdout - flush stderr - error_check_good envclose [$env close] 0 - set env "" - } res] - - if { $stat != 0} { - global errorInfo; - - puts "$res" - - set fnl [string first "\n" $errorInfo] - set theError [string range $errorInfo 0 [expr $fnl - 1]] - if {[string first FAIL $errorInfo] == -1} { - puts "FAIL:[timestamp]\ - run_rpcmethod: $method $test: $errorInfo" - } else { - puts $theError; - } - - catch { $env close } ignore - set env "" - tclkill $dpid - set dpid [rpc_server_start] - } - } - } - set is_envmethod 0 - tclkill $dpid -} - -proc run_rpcnoserver { method {largs ""} } { - global __debug_on - global __debug_print - global __debug_test - global test_names - global parms - global is_envmethod - source ./include.tcl - - puts "run_rpcnoserver: $method $largs" - - set save_largs $largs - remote_cleanup $rpc_server $rpc_testdir $testdir - set home [file tail $rpc_testdir] - - set is_envmethod 1 - set use_txn 0 - if { [string first "txn" $method] != -1 } { - set use_txn 1 - } - if { $use_txn == 1 } { - set ntxns 32 - set i 1 - check_handles - remote_cleanup $rpc_server $rpc_testdir $testdir - set env [eval {berkdb_env -create -mode 0644 -home $home \ - -server $rpc_server -client_timeout 10000} -txn] - error_check_good env_open [is_valid_env $env] TRUE - - set stat [catch {eval txn001_suba $ntxns $env} res] - if { $stat == 0 } { - set stat [catch {eval txn001_subb $ntxns $env} res] - } - error_check_good envclose [$env close] 0 - } else { - set stat [catch { - foreach test $test_names { - check_handles - if { [info exists parms($test)] != 1 } { - puts stderr "$test disabled in \ - testparams.tcl; skipping." - continue - } - remote_cleanup $rpc_server $rpc_testdir $testdir - # - # Set server cachesize to 1Mb. Otherwise some - # tests won't fit (like test084 -btree). - # - set env [eval {berkdb_env -create -mode 0644 \ - -home $home -server $rpc_server \ - -client_timeout 10000 \ - -cachesize {0 1048576 1} }] - error_check_good env_open \ - [is_valid_env $env] TRUE - append largs " -env $env " - - puts "[timestamp]" - eval $test $method $parms($test) $largs - if { $__debug_print != 0 } { - puts "" - } - if { $__debug_on != 0 } { - debug $__debug_test - } - flush stdout - flush stderr - set largs $save_largs - error_check_good envclose [$env close] 0 - } - } res] - } - if { $stat != 0} { - global errorInfo; - - set fnl [string first "\n" $errorInfo] - set theError [string range $errorInfo 0 [expr $fnl - 1]] - if {[string first FAIL $errorInfo] == -1} { - error "FAIL:[timestamp]\ - run_rpcnoserver: $method $i: $theError" - } else { - error $theError; - } - set is_envmethod 0 - } - -} - -# -# Run method tests in secure mode. -# -proc run_secmethod { method test {display 0} {run 1} \ - { outfile stdout } args } { - global passwd - global has_crypto - - # Skip secure mode tests if release does not support encryption. - if { $has_crypto == 0 } { - return - } - - set largs $args - append largs " -encryptaes $passwd " - eval run_method $method $test $display $run $outfile $largs -} - -# -# Run method tests each in its own, new secure environment. -# -proc run_secenv { method test {largs ""} } { - global __debug_on - global __debug_print - global __debug_test - global is_envmethod - global has_crypto - global test_names - global parms - global passwd - source ./include.tcl - - # Skip secure mode tests if release does not support encryption. - if { $has_crypto == 0 } { - return - } - - puts "run_secenv: $method $test $largs" - - set save_largs $largs - env_cleanup $testdir - set is_envmethod 1 - set stat [catch { - check_handles - set env [eval {berkdb_env -create -mode 0644 -home $testdir \ - -encryptaes $passwd -cachesize {0 1048576 1}}] - error_check_good env_open [is_valid_env $env] TRUE - append largs " -env $env " - - puts "[timestamp]" - if { [info exists parms($test)] != 1 } { - puts stderr "$test disabled in\ - testparams.tcl; skipping." - continue - } - - # - # Run each test multiple times in the secure env. - # Once with a secure env + clear database - # Once with a secure env + secure database - # - eval $test $method $parms($test) $largs - append largs " -encrypt " - eval $test $method $parms($test) $largs - - if { $__debug_print != 0 } { - puts "" - } - if { $__debug_on != 0 } { - debug $__debug_test - } - flush stdout - flush stderr - set largs $save_largs - error_check_good envclose [$env close] 0 - error_check_good envremove [berkdb envremove \ - -home $testdir -encryptaes $passwd] 0 - } res] - if { $stat != 0} { - global errorInfo; - - set fnl [string first "\n" $errorInfo] - set theError [string range $errorInfo 0 [expr $fnl - 1]] - if {[string first FAIL $errorInfo] == -1} { - error "FAIL:[timestamp]\ - run_secenv: $method $test: $theError" - } else { - error $theError; - } - set is_envmethod 0 - } - -} - -# -# Run replication method tests in master and client env. -# -proc run_reptest { method test {droppct 0} {nclients 1} {do_del 0} \ - {do_sec 0} {do_oob 0} {largs "" } } { - source ./include.tcl - global __debug_on - global __debug_print - global __debug_test - global is_envmethod - global parms - global passwd - global has_crypto - - puts "run_reptest: $method $test $droppct $nclients $do_del $do_sec $do_oob $largs" - - env_cleanup $testdir - set is_envmethod 1 - set stat [catch { - if { $do_sec && $has_crypto } { - set envargs "-encryptaes $passwd" - append largs " -encrypt " - } else { - set envargs "" - } - check_handles - # - # This will set up the master and client envs - # and will return us the args to pass to the - # test. - - set largs [repl_envsetup \ - $envargs $largs $test $nclients $droppct $do_oob] - - puts "[timestamp]" - if { [info exists parms($test)] != 1 } { - puts stderr "$test disabled in\ - testparams.tcl; skipping." - continue - } - puts -nonewline \ - "Repl: $test: dropping $droppct%, $nclients clients " - if { $do_del } { - puts -nonewline " with delete verification;" - } else { - puts -nonewline " no delete verification;" - } - if { $do_sec } { - puts -nonewline " with security;" - } else { - puts -nonewline " no security;" - } - if { $do_oob } { - puts -nonewline " with out-of-order msgs;" - } else { - puts -nonewline " no out-of-order msgs;" - } - puts "" - - eval $test $method $parms($test) $largs - - if { $__debug_print != 0 } { - puts "" - } - if { $__debug_on != 0 } { - debug $__debug_test - } - flush stdout - flush stderr - repl_envprocq $test $nclients $do_oob - repl_envver0 $test $method $nclients - if { $do_del } { - repl_verdel $test $method $nclients - } - repl_envclose $test $envargs - } res] - if { $stat != 0} { - global errorInfo; - - set fnl [string first "\n" $errorInfo] - set theError [string range $errorInfo 0 [expr $fnl - 1]] - if {[string first FAIL $errorInfo] == -1} { - error "FAIL:[timestamp]\ - run_reptest: $method $test: $theError" - } else { - error $theError; - } - } - set is_envmethod 0 -} - -# -# Run replication method tests in master and client env. -# -proc run_repmethod { method test {numcl 0} {display 0} {run 1} \ - {outfile stdout} {largs ""} } { - source ./include.tcl - global __debug_on - global __debug_print - global __debug_test - global is_envmethod - global test_names - global parms - global has_crypto - global passwd - - set save_largs $largs - env_cleanup $testdir - - # Use an array for number of clients because we really don't - # want to evenly-weight all numbers of clients. Favor smaller - # numbers but test more clients occasionally. - set drop_list { 0 0 0 0 0 1 1 5 5 10 20 } - set drop_len [expr [llength $drop_list] - 1] - set client_list { 1 1 2 1 1 1 2 2 3 1 } - set cl_len [expr [llength $client_list] - 1] - - if { $numcl == 0 } { - set clindex [berkdb random_int 0 $cl_len] - set nclients [lindex $client_list $clindex] - } else { - set nclients $numcl - } - set drindex [berkdb random_int 0 $drop_len] - set droppct [lindex $drop_list $drindex] - set do_sec [berkdb random_int 0 1] - set do_oob [berkdb random_int 0 1] - set do_del [berkdb random_int 0 1] - - if { $display == 1 } { - puts $outfile "eval run_reptest $method $test $droppct \ - $nclients $do_del $do_sec $do_oob $largs" - } - if { $run == 1 } { - run_reptest $method $test $droppct $nclients $do_del \ - $do_sec $do_oob $largs - } -} - -# -# Run method tests, each in its own, new environment. (As opposed to -# run_envmethod1 which runs all the tests in a single environment.) -# -proc run_envmethod { method test {display 0} {run 1} {outfile stdout} \ - { largs "" } } { - global __debug_on - global __debug_print - global __debug_test - global is_envmethod - global test_names - global parms - source ./include.tcl - - set save_largs $largs - set envargs "" - env_cleanup $testdir - - if { $display == 1 } { - puts $outfile "eval run_envmethod $method \ - $test 0 1 stdout $largs" - } - - # To run a normal test using system memory, call run_envmethod - # with the flag -shm. - set sindex [lsearch -exact $largs "-shm"] - if { $sindex >= 0 } { - if { [mem_chk " -system_mem -shm_key 1 "] == 1 } { - break - } else { - append envargs " -system_mem -shm_key 1 " - set largs [lreplace $largs $sindex $sindex] - } - } - - # Test for -thread option and pass to berkdb_env open. Leave in - # $largs because -thread can also be passed to an individual - # test as an arg. Double the number of lockers because a threaded - # env requires more than an ordinary env. - if { [lsearch -exact $largs "-thread"] != -1 } { - append envargs " -thread -lock_max_lockers 2000 " - } - - # Test for -alloc option and pass to berkdb_env open only. - # Remove from largs because -alloc is not an allowed test arg. - set aindex [lsearch -exact $largs "-alloc"] - if { $aindex >= 0 } { - append envargs " -alloc " - set largs [lreplace $largs $aindex $aindex] - } - - if { $run == 1 } { - set is_envmethod 1 - set stat [catch { - check_handles - set env [eval {berkdb_env -create -txn \ - -mode 0644 -home $testdir} $envargs] - error_check_good env_open [is_valid_env $env] TRUE - append largs " -env $env " - - puts "[timestamp]" - if { [info exists parms($test)] != 1 } { - puts stderr "$test disabled in\ - testparams.tcl; skipping." - continue - } - eval $test $method $parms($test) $largs - - if { $__debug_print != 0 } { - puts "" - } - if { $__debug_on != 0 } { - debug $__debug_test - } - flush stdout - flush stderr - set largs $save_largs - error_check_good envclose [$env close] 0 - error_check_good envremove [berkdb envremove \ - -home $testdir] 0 - } res] - if { $stat != 0} { - global errorInfo; - - set fnl [string first "\n" $errorInfo] - set theError [string range $errorInfo 0 [expr $fnl - 1]] - if {[string first FAIL $errorInfo] == -1} { - error "FAIL:[timestamp]\ - run_envmethod: $method $test: $theError" - } else { - error $theError; - } - } - set is_envmethod 0 - } -} - -proc run_recd { method test {run 1} {display 0} args } { - global __debug_on - global __debug_print - global __debug_test - global parms - global test_names - global log_log_record_types - global gen_upgrade_log - global upgrade_be - global upgrade_dir - global upgrade_method - global upgrade_name - source ./include.tcl - - if { $run == 1 } { - puts "run_recd: $method $test $parms($test) $args" - } - if {[catch { - if { $display } { - puts "eval $test $method $parms($test) $args" - } - if { $run } { - check_handles - set upgrade_method $method - set upgrade_name $test - puts "[timestamp]" - # By redirecting stdout to stdout, we make exec - # print output rather than simply returning it. - # By redirecting stderr to stdout too, we make - # sure everything winds up in the ALL.OUT file. - set ret [catch { exec $tclsh_path << \ - "source $test_path/test.tcl; \ - set log_log_record_types $log_log_record_types;\ - set gen_upgrade_log $gen_upgrade_log;\ - set upgrade_be $upgrade_be; \ - set upgrade_dir $upgrade_dir; \ - set upgrade_method $upgrade_method; \ - set upgrade_name $upgrade_name; \ - eval $test $method $parms($test) $args" \ - >&@ stdout - } res] - - # Don't die if the test failed; we want - # to just proceed. - if { $ret != 0 } { - puts "FAIL:[timestamp] $res" - } - - if { $__debug_print != 0 } { - puts "" - } - if { $__debug_on != 0 } { - debug $__debug_test - } - flush stdout - flush stderr - } - } res] != 0} { - global errorInfo; - - set fnl [string first "\n" $errorInfo] - set theError [string range $errorInfo 0 [expr $fnl - 1]] - if {[string first FAIL $errorInfo] == -1} { - error "FAIL:[timestamp]\ - run_recd: $method: $theError" - } else { - error $theError; - } - } -} - -proc run_recds { {run 1} {display 0} args } { - source ./include.tcl - global log_log_record_types - global test_names - global gen_upgrade_log - global encrypt - - set log_log_record_types 1 - logtrack_init - - foreach method \ - "btree rbtree hash queue queueext recno frecno rrecno" { - check_handles -#set test_names(recd) "recd005 recd017" - foreach test $test_names(recd) { - # Skip recd017 for non-crypto upgrade testing. - # Run only recd017 for crypto upgrade testing. - if { $gen_upgrade_log == 1 && $test == "recd017" && \ - $encrypt == 0 } { - puts "Skipping recd017 for non-crypto run." - continue - } - if { $gen_upgrade_log == 1 && $test != "recd017" && \ - $encrypt == 1 } { - puts "Skipping $test for crypto run." - continue - } - if { [catch {eval \ - run_recd $method $test $run $display \ - $args} ret ] != 0 } { - puts $ret - } - if { $gen_upgrade_log == 1 } { - save_upgrade_files $testdir - } - } - } - # We can skip logtrack_summary during the crypto upgrade run - - # it doesn't introduce any new log types. - if { $run } { - if { $gen_upgrade_log == 0 || $encrypt == 0 } { - logtrack_summary - } - } - set log_log_record_types 0 -} - -proc run_all { { testname ALL } args } { - global test_names - global one_test - global has_crypto - source ./include.tcl - - fileremove -f ALL.OUT - - set one_test $testname - if { $one_test != "ALL" } { - # Source testparams again to adjust test_names. - source $test_path/testparams.tcl - } - - set exflgs [eval extractflags $args] - set flags [lindex $exflgs 1] - set display 1 - set run 1 - set am_only 0 - set parallel 0 - set nparalleltests 0 - set rflags {--} - foreach f $flags { - switch $f { - m { - set am_only 1 - } - n { - set display 1 - set run 0 - set rflags [linsert $rflags 0 "-n"] - } - } - } - - set o [open ALL.OUT a] - if { $run == 1 } { - puts -nonewline "Test suite run started at: " - puts [clock format [clock seconds] -format "%H:%M %D"] - puts [berkdb version -string] - - puts -nonewline $o "Test suite run started at: " - puts $o [clock format [clock seconds] -format "%H:%M %D"] - puts $o [berkdb version -string] - } - close $o - # - # First run standard tests. Send in a -A to let run_std know - # that it is part of the "run_all" run, so that it doesn't - # print out start/end times. - # - lappend args -A - eval {run_std} $one_test $args - - set test_pagesizes [get_test_pagesizes] - set args [lindex $exflgs 0] - set save_args $args - - foreach pgsz $test_pagesizes { - set args $save_args - append args " -pagesize $pgsz -chksum" - if { $am_only == 0 } { - # Run recovery tests. - # - # XXX These don't actually work at multiple pagesizes; - # disable them for now. - # - # XXX These too are broken into separate tclsh - # instantiations so we don't require so much - # memory, but I think it's cleaner - # and more useful to do it down inside proc r than here, - # since "r recd" gets done a lot and needs to work. - # - # XXX See comment in run_std for why this only directs - # stdout and not stderr. Don't worry--the right stuff - # happens. - #puts "Running recovery tests with pagesize $pgsz" - #if [catch {exec $tclsh_path \ - # << "source $test_path/test.tcl; \ - # r $rflags recd $args" \ - # 2>@ stderr >> ALL.OUT } res] { - # set o [open ALL.OUT a] - # puts $o "FAIL: recd test:" - # puts $o $res - # close $o - #} - } - - # Access method tests. - # Run subdb tests with varying pagesizes too. - # XXX - # Broken up into separate tclsh instantiations so - # we don't require so much memory. - foreach method \ - "btree rbtree hash queue queueext recno frecno rrecno" { - puts "Running $method tests with pagesize $pgsz" - foreach sub {test sdb} { - foreach test $test_names($sub) { - if { $run == 0 } { - set o [open ALL.OUT a] - eval {run_method -$method \ - $test $display $run $o} \ - $args - close $o - } - if { $run } { - if [catch {exec $tclsh_path << \ - "global one_test; \ - set one_test $one_test; \ - source $test_path/test.tcl; \ - eval {run_method -$method \ - $test $display $run \ - stdout} $args" \ - >>& ALL.OUT } res] { - set o [open ALL.OUT a] - puts $o "FAIL: \ - -$method $test: $res" - close $o - } - } - } - } - } - } - set args $save_args - # - # Run access method tests at default page size in one env. - # - foreach method "btree rbtree hash queue queueext recno frecno rrecno" { - puts "Running $method tests in a txn env" - foreach sub {test sdb} { - foreach test $test_names($sub) { - if { $run == 0 } { - set o [open ALL.OUT a] - run_envmethod -$method $test $display \ - $run $o $args - close $o - } - if { $run } { - if [catch {exec $tclsh_path << \ - "global one_test; \ - set one_test $one_test; \ - source $test_path/test.tcl; \ - run_envmethod -$method $test \ - $display $run stdout $args" \ - >>& ALL.OUT } res] { - set o [open ALL.OUT a] - puts $o "FAIL: run_envmethod \ - $method $test: $res" - close $o - } - } - } - } - } - # - # Run access method tests at default page size in thread-enabled env. - # We're not truly running threaded tests, just testing the interface. - # - foreach method "btree rbtree hash queue queueext recno frecno rrecno" { - puts "Running $method tests in a threaded txn env" - foreach sub {test sdb} { - foreach test $test_names($sub) { - if { $run == 0 } { - set o [open ALL.OUT a] - eval {run_envmethod -$method $test \ - $display $run $o -thread} - close $o - } - if { $run } { - if [catch {exec $tclsh_path << \ - "global one_test; \ - set one_test $one_test; \ - source $test_path/test.tcl; \ - eval {run_envmethod -$method $test \ - $display $run stdout -thread}" \ - >>& ALL.OUT } res] { - set o [open ALL.OUT a] - puts $o "FAIL: run_envmethod \ - $method $test -thread: $res" - close $o - } - } - } - } - } - # - # Run access method tests at default page size with -alloc enabled. - # - foreach method "btree rbtree hash queue queueext recno frecno rrecno" { - puts "Running $method tests in an env with -alloc" - foreach sub {test sdb} { - foreach test $test_names($sub) { - if { $run == 0 } { - set o [open ALL.OUT a] - eval {run_envmethod -$method $test \ - $display $run $o -alloc} - close $o - } - if { $run } { - if [catch {exec $tclsh_path << \ - "global one_test; \ - set one_test $one_test; \ - source $test_path/test.tcl; \ - eval {run_envmethod -$method $test \ - $display $run stdout -alloc}" \ - >>& ALL.OUT } res] { - set o [open ALL.OUT a] - puts $o "FAIL: run_envmethod \ - $method $test -alloc: $res" - close $o - } - } - } - } - } - # - # Run tests using proc r. The replication tests have been - # moved from run_std to run_all. - # - set test_list [list {"replication" "rep"}] - # - # If release supports encryption, run security tests. - # - if { $has_crypto == 1 } { - lappend test_list {"security" "sec"} - } - # - # If configured for RPC, then run rpc tests too. - # - if { [file exists ./berkeley_db_svc] || - [file exists ./berkeley_db_cxxsvc] || - [file exists ./berkeley_db_javasvc] } { - lappend test_list {"RPC" "rpc"} - } - - foreach pair $test_list { - set msg [lindex $pair 0] - set cmd [lindex $pair 1] - puts "Running $msg tests" - if [catch {exec $tclsh_path << \ - "global one_test; set one_test $one_test; \ - source $test_path/test.tcl; \ - r $rflags $cmd $args" >>& ALL.OUT } res] { - set o [open ALL.OUT a] - puts $o "FAIL: $cmd test: $res" - close $o - } - } - - # If not actually running, no need to check for failure. - if { $run == 0 } { - return - } - - set failed 0 - set o [open ALL.OUT r] - while { [gets $o line] >= 0 } { - if { [regexp {^FAIL} $line] != 0 } { - set failed 1 - } - } - close $o - set o [open ALL.OUT a] - if { $failed == 0 } { - puts "Regression Tests Succeeded" - puts $o "Regression Tests Succeeded" - } else { - puts "Regression Tests Failed; see ALL.OUT for log" - puts $o "Regression Tests Failed" - } - - puts -nonewline "Test suite run completed at: " - puts [clock format [clock seconds] -format "%H:%M %D"] - puts -nonewline $o "Test suite run completed at: " - puts $o [clock format [clock seconds] -format "%H:%M %D"] - close $o -} - -# -# Run method tests in one environment. (As opposed to run_envmethod -# which runs each test in its own, new environment.) -# -proc run_envmethod1 { method {display 0} {run 1} { outfile stdout } args } { - global __debug_on - global __debug_print - global __debug_test - global is_envmethod - global test_names - global parms - source ./include.tcl - - if { $run == 1 } { - puts "run_envmethod1: $method $args" - } - - set is_envmethod 1 - if { $run == 1 } { - check_handles - env_cleanup $testdir - error_check_good envremove [berkdb envremove -home $testdir] 0 - set env [eval {berkdb_env -create -cachesize {0 10000000 0}} \ - {-mode 0644 -home $testdir}] - error_check_good env_open [is_valid_env $env] TRUE - append largs " -env $env " - } - - if { $display } { - # The envmethod1 tests can't be split up, since they share - # an env. - puts $outfile "eval run_envmethod1 $method $args" - } - - set stat [catch { - foreach test $test_names(test) { - if { [info exists parms($test)] != 1 } { - puts stderr "$test disabled in\ - testparams.tcl; skipping." - continue - } - if { $run } { - puts $outfile "[timestamp]" - eval $test $method $parms($test) $largs - if { $__debug_print != 0 } { - puts $outfile "" - } - if { $__debug_on != 0 } { - debug $__debug_test - } - } - flush stdout - flush stderr - } - } res] - if { $stat != 0} { - global errorInfo; - - set fnl [string first "\n" $errorInfo] - set theError [string range $errorInfo 0 [expr $fnl - 1]] - if {[string first FAIL $errorInfo] == -1} { - error "FAIL:[timestamp]\ - run_envmethod: $method $test: $theError" - } else { - error $theError; - } - } - set stat [catch { - foreach test $test_names(test) { - if { [info exists parms($test)] != 1 } { - puts stderr "$test disabled in\ - testparams.tcl; skipping." - continue - } - if { $run } { - puts $outfile "[timestamp]" - eval $test $method $parms($test) $largs - if { $__debug_print != 0 } { - puts $outfile "" - } - if { $__debug_on != 0 } { - debug $__debug_test - } - } - flush stdout - flush stderr - } - } res] - if { $stat != 0} { - global errorInfo; - - set fnl [string first "\n" $errorInfo] - set theError [string range $errorInfo 0 [expr $fnl - 1]] - if {[string first FAIL $errorInfo] == -1} { - error "FAIL:[timestamp]\ - run_envmethod1: $method $test: $theError" - } else { - error $theError; - } - } - if { $run == 1 } { - error_check_good envclose [$env close] 0 - check_handles $outfile - } - set is_envmethod 0 - -} - -# Run the secondary index tests. -proc sindex { {display 0} {run 1} {outfile stdout} {verbose 0} args } { - global test_names - global testdir - global verbose_check_secondaries - set verbose_check_secondaries $verbose - # Standard number of secondary indices to create if a single-element - # list of methods is passed into the secondary index tests. - global nsecondaries - set nsecondaries 2 - - # Run basic tests with a single secondary index and a small number - # of keys, then again with a larger number of keys. (Note that - # we can't go above 5000, since we use two items from our - # 10K-word list for each key/data pair.) - foreach n { 200 5000 } { - foreach pm { btree hash recno frecno queue queueext } { - foreach sm { dbtree dhash ddbtree ddhash btree hash } { - foreach test $test_names(si) { - if { $display } { - puts -nonewline $outfile \ - "eval $test {\[list\ - $pm $sm $sm\]} $n ;" - puts $outfile " verify_dir \ - $testdir \"\" 1" - } - if { $run } { - check_handles $outfile - eval $test \ - {[list $pm $sm $sm]} $n - verify_dir $testdir "" 1 - } - } - } - } - } - - # Run tests with 20 secondaries. - foreach pm { btree hash } { - set methlist [list $pm] - for { set j 1 } { $j <= 20 } {incr j} { - # XXX this should incorporate hash after #3726 - if { $j % 2 == 0 } { - lappend methlist "dbtree" - } else { - lappend methlist "ddbtree" - } - } - foreach test $test_names(si) { - if { $display } { - puts "eval $test {\[list $methlist\]} 500" - } - if { $run } { - eval $test {$methlist} 500 - } - } - } -} - -# Run secondary index join test. (There's no point in running -# this with both lengths, the primary is unhappy for now with fixed- -# length records (XXX), and we need unsorted dups in the secondaries.) -proc sijoin { {display 0} {run 1} {outfile stdout} } { - foreach pm { btree hash recno } { - if { $display } { - foreach sm { btree hash } { - puts $outfile "eval sijointest\ - {\[list $pm $sm $sm\]} 1000" - } - puts $outfile "eval sijointest\ - {\[list $pm btree hash\]} 1000" - puts $outfile "eval sijointest\ - {\[list $pm hash btree\]} 1000" - } - if { $run } { - foreach sm { btree hash } { - eval sijointest {[list $pm $sm $sm]} 1000 - } - eval sijointest {[list $pm btree hash]} 1000 - eval sijointest {[list $pm hash btree]} 1000 - } - } -} - -proc run { proc_suffix method {start 1} {stop 999} } { - global test_names - - switch -exact -- $proc_suffix { - envmethod - - method - - recd - - repmethod - - reptest - - secenv - - secmethod { - # Run_recd runs the recd tests, all others - # run the "testxxx" tests. - if { $proc_suffix == "recd" } { - set testtype recd - } else { - set testtype test - } - - for { set i $start } { $i <= $stop } { incr i } { - set name [format "%s%03d" $testtype $i] - # If a test number is missing, silently skip - # to next test; sparse numbering is allowed. - if { [lsearch -exact $test_names($testtype) \ - $name] == -1 } { - continue - } - run_$proc_suffix $method $name - } - } - default { - puts "$proc_suffix is not set up with to be used with run" - } - } -} - - -# We want to test all of 512b, 8Kb, and 64Kb pages, but chances are one -# of these is the default pagesize. We don't want to run all the AM tests -# twice, so figure out what the default page size is, then return the -# other two. -proc get_test_pagesizes { } { - # Create an in-memory database. - set db [berkdb_open -create -btree] - error_check_good gtp_create [is_valid_db $db] TRUE - set statret [$db stat] - set pgsz 0 - foreach pair $statret { - set fld [lindex $pair 0] - if { [string compare $fld {Page size}] == 0 } { - set pgsz [lindex $pair 1] - } - } - - error_check_good gtp_close [$db close] 0 - - error_check_bad gtp_pgsz $pgsz 0 - switch $pgsz { - 512 { return {8192 65536} } - 8192 { return {512 65536} } - 65536 { return {512 8192} } - default { return {512 8192 65536} } - } - error_check_good NOTREACHED 0 1 -} diff --git a/storage/bdb/test/test001.tcl b/storage/bdb/test/test001.tcl deleted file mode 100644 index 2d7130fcbbc..00000000000 --- a/storage/bdb/test/test001.tcl +++ /dev/null @@ -1,222 +0,0 @@ -# See the file LICENSE for redistribution information. -# -# Copyright (c) 1996-2004 -# Sleepycat Software. All rights reserved. -# -# $Id: test001.tcl,v 11.38 2004/09/22 18:01:06 bostic Exp $ -# -# TEST test001 -# TEST Small keys/data -# TEST Put/get per key -# TEST Dump file -# TEST Close, reopen -# TEST Dump file -# TEST -# TEST Use the first 10,000 entries from the dictionary. -# TEST Insert each with self as key and data; retrieve each. -# TEST After all are entered, retrieve all; compare output to original. -# TEST Close file, reopen, do retrieve and re-verify. -proc test001 { method {nentries 10000} \ - {start 0} {skip 0} {tnum "001"} args } { - source ./include.tcl - - set args [convert_args $method $args] - set omethod [convert_method $method] - - # Create the database and open the dictionary - set eindex [lsearch -exact $args "-env"] - # - # If we are using an env, then testfile should just be the db name. - # Otherwise it is the test directory and the name. - # If we are not using an external env, then test setting - # the database cache size and using multiple caches. - set txnenv 0 - if { $eindex == -1 } { - set testfile $testdir/test$tnum.db - append args " -cachesize {0 1048576 3} " - set env NULL - } else { - set testfile test$tnum.db - incr eindex - set env [lindex $args $eindex] - set txnenv [is_txnenv $env] - if { $txnenv == 1 } { - append args " -auto_commit " - # - # If we are using txns and running with the - # default, set the default down a bit. - # - if { $nentries == 10000 } { - set nentries 100 - } - } - set testdir [get_home $env] - } - puts "Test$tnum: $method ($args) $nentries equal key/data pairs" - set did [open $dict] - - # The "start" variable determines the record number to start - # with, if we're using record numbers. The "skip" variable - # determines the dictionary entry to start with. - # In normal use, skip will match start. - - puts "\tTest$tnum: Starting at $start with dictionary entry $skip" - if { $skip != 0 } { - for { set count 0 } { $count < $skip } { incr count } { - gets $did str - } - } - - set t1 $testdir/t1 - set t2 $testdir/t2 - set t3 $testdir/t3 - set temp $testdir/temp - cleanup $testdir $env - - set db [eval {berkdb_open \ - -create -mode 0644} $args $omethod $testfile] - error_check_good dbopen [is_valid_db $db] TRUE - - set pflags "" - set gflags "" - set txn "" - - if { [is_record_based $method] == 1 } { - set checkfunc test001_recno.check - append gflags " -recno" - } else { - set checkfunc test001.check - } - puts "\tTest$tnum.a: put/get loop" - # Here is the loop where we put and get each key/data pair - set count 0 - while { [gets $did str] != -1 && $count < $nentries } { - if { [is_record_based $method] == 1 } { - global kvals - - set key [expr $count + 1 + $start] - if { 0xffffffff > 0 && $key > 0xffffffff } { - set key [expr $key - 0x100000000] - } - if { $key == 0 || $key - 0xffffffff == 1 } { - incr key - incr count - } - set kvals($key) [pad_data $method $str] - } else { - set key $str - set str [reverse $str] - } - if { $txnenv == 1 } { - set t [$env txn] - error_check_good txn [is_valid_txn $t $env] TRUE - set txn "-txn $t" - } - set ret [eval \ - {$db put} $txn $pflags {$key [chop_data $method $str]}] - error_check_good put $ret 0 - if { $txnenv == 1 } { - error_check_good txn [$t commit] 0 - if { $count % 50 == 0 } { - error_check_good txn_checkpoint($count) \ - [$env txn_checkpoint] 0 - } - } - - set ret [eval {$db get} $gflags {$key}] - error_check_good \ - get $ret [list [list $key [pad_data $method $str]]] - - # Test DB_GET_BOTH for success - set ret [$db get -get_both $key [pad_data $method $str]] - error_check_good \ - getboth $ret [list [list $key [pad_data $method $str]]] - - # Test DB_GET_BOTH for failure - set ret [$db get -get_both $key [pad_data $method BAD$str]] - error_check_good getbothBAD [llength $ret] 0 - - incr count - } - close $did - if { $txnenv == 1 } { - set t [$env txn] - error_check_good txn [is_valid_txn $t $env] TRUE - set txn "-txn $t" - } - # Now we will get each key from the DB and compare the results - # to the original. - - puts "\tTest$tnum.b: dump file" - dump_file $db $txn $t1 $checkfunc - # - # dump_file should just have been "get" calls, so - # aborting a get should really be a no-op. Abort - # just for the fun of it. - if { $txnenv == 1 } { - error_check_good txn [$t abort] 0 - } - error_check_good db_close [$db close] 0 - - # Now compare the keys to see if they match the dictionary (or ints) - if { [is_record_based $method] == 1 } { - set oid [open $t2 w] - for { set i 1 } { $i <= $nentries } { incr i } { - set j [expr $i + $start] - if { 0xffffffff > 0 && $j > 0xffffffff } { - set j [expr $j - 0x100000000] - } - if { $j == 0 } { - incr i - incr j - } - puts $oid $j - } - close $oid - } else { - filehead [expr $nentries + $start] $dict $t2 [expr $start + 1] - } - filesort $t2 $temp - file rename -force $temp $t2 - filesort $t1 $t3 - - error_check_good Test$tnum:diff($t3,$t2) \ - [filecmp $t3 $t2] 0 - - puts "\tTest$tnum.c: close, open, and dump file" - # Now, reopen the file and run the last test again. - open_and_dump_file $testfile $env $t1 $checkfunc \ - dump_file_direction "-first" "-next" - if { [string compare $omethod "-recno"] != 0 } { - filesort $t1 $t3 - } - - error_check_good Test$tnum:diff($t2,$t3) \ - [filecmp $t2 $t3] 0 - - # Now, reopen the file and run the last test again in the - # reverse direction. - puts "\tTest$tnum.d: close, open, and dump file in reverse direction" - open_and_dump_file $testfile $env $t1 $checkfunc \ - dump_file_direction "-last" "-prev" - - if { [string compare $omethod "-recno"] != 0 } { - filesort $t1 $t3 - } - - error_check_good Test$tnum:diff($t3,$t2) \ - [filecmp $t3 $t2] 0 -} - -# Check function for test001; keys and data are identical -proc test001.check { key data } { - error_check_good "key/data mismatch" $data [reverse $key] -} - -proc test001_recno.check { key data } { - global dict - global kvals - - error_check_good key"$key"_exists [info exists kvals($key)] 1 - error_check_good "key/data mismatch, key $key" $data $kvals($key) -} diff --git a/storage/bdb/test/test002.tcl b/storage/bdb/test/test002.tcl deleted file mode 100644 index 265f0640ff3..00000000000 --- a/storage/bdb/test/test002.tcl +++ /dev/null @@ -1,161 +0,0 @@ -# See the file LICENSE for redistribution information. -# -# Copyright (c) 1996-2004 -# Sleepycat Software. All rights reserved. -# -# $Id: test002.tcl,v 11.21 2004/01/28 03:36:30 bostic Exp $ -# -# TEST test002 -# TEST Small keys/medium data -# TEST Put/get per key -# TEST Dump file -# TEST Close, reopen -# TEST Dump file -# TEST -# TEST Use the first 10,000 entries from the dictionary. -# TEST Insert each with self as key and a fixed, medium length data string; -# TEST retrieve each. After all are entered, retrieve all; compare output -# TEST to original. Close file, reopen, do retrieve and re-verify. - -proc test002 { method {nentries 10000} args } { - global datastr - global pad_datastr - source ./include.tcl - - set args [convert_args $method $args] - set omethod [convert_method $method] - - set txnenv 0 - set eindex [lsearch -exact $args "-env"] - # - # If we are using an env, then testfile should just be the db name. - # Otherwise it is the test directory and the name. - if { $eindex == -1 } { - set testfile $testdir/test002.db - set env NULL - } else { - set testfile test002.db - incr eindex - set env [lindex $args $eindex] - set txnenv [is_txnenv $env] - if { $txnenv == 1 } { - append args " -auto_commit " - # - # If we are using txns and running with the - # default, set the default down a bit. - # - if { $nentries == 10000 } { - set nentries 100 - } - } - set testdir [get_home $env] - } - # Create the database and open the dictionary - puts "Test002: $method ($args) $nentries key pairs" - - set t1 $testdir/t1 - set t2 $testdir/t2 - set t3 $testdir/t3 - cleanup $testdir $env - set db [eval {berkdb_open \ - -create -mode 0644} $args {$omethod $testfile}] - error_check_good dbopen [is_valid_db $db] TRUE - set did [open $dict] - - set pflags "" - set gflags "" - set txn "" - set count 0 - - # Here is the loop where we put and get each key/data pair - - if { [is_record_based $method] == 1 } { - append gflags "-recno" - } - set pad_datastr [pad_data $method $datastr] - puts "\tTest002.a: put/get loop" - while { [gets $did str] != -1 && $count < $nentries } { - if { [is_record_based $method] == 1 } { - set key [expr $count + 1] - } else { - set key $str - } - if { $txnenv == 1 } { - set t [$env txn] - error_check_good txn [is_valid_txn $t $env] TRUE - set txn "-txn $t" - } - set ret [eval {$db put} $txn $pflags {$key [chop_data $method $datastr]}] - error_check_good put $ret 0 - if { $txnenv == 1 } { - error_check_good txn [$t commit] 0 - } - - set ret [eval {$db get} $gflags {$key}] - - error_check_good get $ret [list [list $key [pad_data $method $datastr]]] - incr count - } - close $did - - # Now we will get each key from the DB and compare the results - # to the original. - puts "\tTest002.b: dump file" - if { $txnenv == 1 } { - set t [$env txn] - error_check_good txn [is_valid_txn $t $env] TRUE - set txn "-txn $t" - } - dump_file $db $txn $t1 test002.check - if { $txnenv == 1 } { - error_check_good txn [$t commit] 0 - } - error_check_good db_close [$db close] 0 - - # Now compare the keys to see if they match the dictionary - if { [is_record_based $method] == 1 } { - set oid [open $t2 w] - for {set i 1} {$i <= $nentries} {set i [incr i]} { - puts $oid $i - } - close $oid - filesort $t2 $t3 - file rename -force $t3 $t2 - } else { - set q q - filehead $nentries $dict $t3 - filesort $t3 $t2 - } - filesort $t1 $t3 - - error_check_good Test002:diff($t3,$t2) \ - [filecmp $t3 $t2] 0 - - # Now, reopen the file and run the last test again. - puts "\tTest002.c: close, open, and dump file" - open_and_dump_file $testfile $env $t1 test002.check \ - dump_file_direction "-first" "-next" - - if { [string compare $omethod "-recno"] != 0 } { - filesort $t1 $t3 - } - error_check_good Test002:diff($t3,$t2) \ - [filecmp $t3 $t2] 0 - - # Now, reopen the file and run the last test again in reverse direction. - puts "\tTest002.d: close, open, and dump file in reverse direction" - open_and_dump_file $testfile $env $t1 test002.check \ - dump_file_direction "-last" "-prev" - - if { [string compare $omethod "-recno"] != 0 } { - filesort $t1 $t3 - } - error_check_good Test002:diff($t3,$t2) \ - [filecmp $t3 $t2] 0 -} - -# Check function for test002; data should be fixed are identical -proc test002.check { key data } { - global pad_datastr - error_check_good "data mismatch for key $key" $data $pad_datastr -} diff --git a/storage/bdb/test/test003.tcl b/storage/bdb/test/test003.tcl deleted file mode 100644 index 4e9c0125d3c..00000000000 --- a/storage/bdb/test/test003.tcl +++ /dev/null @@ -1,205 +0,0 @@ -# See the file LICENSE for redistribution information. -# -# Copyright (c) 1996-2004 -# Sleepycat Software. All rights reserved. -# -# $Id: test003.tcl,v 11.28 2004/01/28 03:36:30 bostic Exp $ -# -# TEST test003 -# TEST Small keys/large data -# TEST Put/get per key -# TEST Dump file -# TEST Close, reopen -# TEST Dump file -# TEST -# TEST Take the source files and dbtest executable and enter their names -# TEST as the key with their contents as data. After all are entered, -# TEST retrieve all; compare output to original. Close file, reopen, do -# TEST retrieve and re-verify. -proc test003 { method args} { - global names - source ./include.tcl - - set args [convert_args $method $args] - set omethod [convert_method $method] - - if {[is_fixed_length $method] == 1} { - puts "Test003 skipping for method $method" - return - } - puts "Test003: $method ($args) filename=key filecontents=data pairs" - - # Create the database and open the dictionary - set limit 0 - set txnenv 0 - set eindex [lsearch -exact $args "-env"] - # - # If we are using an env, then testfile should just be the db name. - # Otherwise it is the test directory and the name. - if { $eindex == -1 } { - set testfile $testdir/test003.db - set env NULL - } else { - set testfile test003.db - incr eindex - set env [lindex $args $eindex] - set txnenv [is_txnenv $env] - if { $txnenv == 1 } { - append args " -auto_commit " - set limit 100 - } - set testdir [get_home $env] - } - set t1 $testdir/t1 - set t2 $testdir/t2 - set t3 $testdir/t3 - set t4 $testdir/t4 - - cleanup $testdir $env - set db [eval {berkdb_open \ - -create -mode 0644} $args $omethod $testfile] - error_check_good dbopen [is_valid_db $db] TRUE - set pflags "" - set gflags "" - set txn "" - if { [is_record_based $method] == 1 } { - set checkfunc test003_recno.check - append gflags "-recno" - } else { - set checkfunc test003.check - } - - # Here is the loop where we put and get each key/data pair - set file_list [get_file_list] - set len [llength $file_list] - puts "\tTest003.a: put/get loop $len entries" - set count 0 - foreach f $file_list { - if { [string compare [file type $f] "file"] != 0 } { - continue - } - - if { [is_record_based $method] == 1 } { - set key [expr $count + 1] - set names([expr $count + 1]) $f - } else { - set key $f - } - - # Should really catch errors - set fid [open $f r] - fconfigure $fid -translation binary - set data [read $fid] - close $fid - if { $txnenv == 1 } { - set t [$env txn] - error_check_good txn [is_valid_txn $t $env] TRUE - set txn "-txn $t" - } - set ret [eval {$db put} \ - $txn $pflags {$key [chop_data $method $data]}] - error_check_good put $ret 0 - if { $txnenv == 1 } { - error_check_good txn [$t commit] 0 - } - - # Should really catch errors - set fid [open $t4 w] - fconfigure $fid -translation binary - if [catch {eval {$db get} $gflags {$key}} data] { - puts -nonewline $fid $data - } else { - # Data looks like {{key data}} - set key [lindex [lindex $data 0] 0] - set data [lindex [lindex $data 0] 1] - puts -nonewline $fid [pad_data $method $data] - } - close $fid - - error_check_good \ - Test003:diff($f,$t4) [filecmp $f $t4] 0 - - incr count - } - - # Now we will get each key from the DB and compare the results - # to the original. - puts "\tTest003.b: dump file" - if { $txnenv == 1 } { - set t [$env txn] - error_check_good txn [is_valid_txn $t $env] TRUE - set txn "-txn $t" - } - dump_bin_file $db $txn $t1 $checkfunc - if { $txnenv == 1 } { - error_check_good txn [$t commit] 0 - } - error_check_good db_close [$db close] 0 - - # Now compare the keys to see if they match the entries in the - # current directory - if { [is_record_based $method] == 1 } { - set oid [open $t2 w] - for {set i 1} {$i <= $count} {set i [incr i]} { - puts $oid $i - } - close $oid - file rename -force $t1 $t3 - } else { - set oid [open $t2.tmp w] - foreach f $file_list { - if { [string compare [file type $f] "file"] != 0 } { - continue - } - puts $oid $f - } - close $oid - filesort $t2.tmp $t2 - fileremove $t2.tmp - filesort $t1 $t3 - } - - error_check_good \ - Test003:diff($t3,$t2) [filecmp $t3 $t2] 0 - - # Now, reopen the file and run the last test again. - puts "\tTest003.c: close, open, and dump file" - open_and_dump_file $testfile $env $t1 $checkfunc \ - dump_bin_file_direction "-first" "-next" - - if { [is_record_based $method] == 1 } { - filesort $t1 $t3 -n - } - - error_check_good \ - Test003:diff($t3,$t2) [filecmp $t3 $t2] 0 - - # Now, reopen the file and run the last test again in reverse direction. - puts "\tTest003.d: close, open, and dump file in reverse direction" - open_and_dump_file $testfile $env $t1 $checkfunc \ - dump_bin_file_direction "-last" "-prev" - - if { [is_record_based $method] == 1 } { - filesort $t1 $t3 -n - } - - error_check_good \ - Test003:diff($t3,$t2) [filecmp $t3 $t2] 0 -} - -# Check function for test003; key should be file name; data should be contents -proc test003.check { binfile tmpfile } { - source ./include.tcl - - error_check_good Test003:datamismatch($binfile,$tmpfile) \ - [filecmp $binfile $tmpfile] 0 -} -proc test003_recno.check { binfile tmpfile } { - global names - source ./include.tcl - - set fname $names($binfile) - error_check_good key"$binfile"_exists [info exists names($binfile)] 1 - error_check_good Test003:datamismatch($fname,$tmpfile) \ - [filecmp $fname $tmpfile] 0 -} diff --git a/storage/bdb/test/test004.tcl b/storage/bdb/test/test004.tcl deleted file mode 100644 index 598e3eb2299..00000000000 --- a/storage/bdb/test/test004.tcl +++ /dev/null @@ -1,169 +0,0 @@ -# See the file LICENSE for redistribution information. -# -# Copyright (c) 1996-2004 -# Sleepycat Software. All rights reserved. -# -# $Id: test004.tcl,v 11.24 2004/01/28 03:36:30 bostic Exp $ -# -# TEST test004 -# TEST Small keys/medium data -# TEST Put/get per key -# TEST Sequential (cursor) get/delete -# TEST -# TEST Check that cursor operations work. Create a database. -# TEST Read through the database sequentially using cursors and -# TEST delete each element. -proc test004 { method {nentries 10000} {reopen "004"} {build_only 0} args} { - source ./include.tcl - - set do_renumber [is_rrecno $method] - set args [convert_args $method $args] - set omethod [convert_method $method] - - set tnum test$reopen - - # Create the database and open the dictionary - set txnenv 0 - set eindex [lsearch -exact $args "-env"] - # - # If we are using an env, then testfile should just be the db name. - # Otherwise it is the test directory and the name. - if { $eindex == -1 } { - set testfile $testdir/$tnum.db - set env NULL - } else { - set testfile $tnum.db - incr eindex - set env [lindex $args $eindex] - set txnenv [is_txnenv $env] - if { $txnenv == 1 } { - append args " -auto_commit " - # - # If we are using txns and running with the - # default, set the default down a bit. - # - if { $nentries == 10000 } { - set nentries 100 - } - } - set testdir [get_home $env] - } - - puts -nonewline "$tnum:\ - $method ($args) $nentries delete small key; medium data pairs" - if {$reopen == "005"} { - puts "(with close)" - } else { - puts "" - } - - # Create the database and open the dictionary - set t1 $testdir/t1 - set t2 $testdir/t2 - set t3 $testdir/t3 - cleanup $testdir $env - set db [eval {berkdb_open -create -mode 0644} $args {$omethod $testfile}] - error_check_good dbopen [is_valid_db $db] TRUE - - set did [open $dict] - - set pflags "" - set gflags "" - set txn "" - set count 0 - - if { [is_record_based $method] == 1 } { - append gflags " -recno" - } - - # Here is the loop where we put and get each key/data pair - set kvals "" - puts "\tTest$reopen.a: put/get loop" - while { [gets $did str] != -1 && $count < $nentries } { - if { [is_record_based $method] == 1 } { - set key [expr $count + 1] - lappend kvals $str - } else { - set key $str - } - - set datastr [ make_data_str $str ] - - if { $txnenv == 1 } { - set t [$env txn] - error_check_good txn [is_valid_txn $t $env] TRUE - set txn "-txn $t" - } - set ret [eval {$db put} $txn $pflags \ - {$key [chop_data $method $datastr]}] - error_check_good put $ret 0 - if { $txnenv == 1 } { - error_check_good txn [$t commit] 0 - } - - set ret [eval {$db get} $gflags {$key}] - error_check_good "$tnum:put" $ret \ - [list [list $key [pad_data $method $datastr]]] - incr count - } - close $did - if { $build_only == 1 } { - return $db - } - if { $reopen == "005" } { - error_check_good db_close [$db close] 0 - - set db [eval {berkdb_open} $args {$testfile}] - error_check_good dbopen [is_valid_db $db] TRUE - } - puts "\tTest$reopen.b: get/delete loop" - # Now we will get each key from the DB and compare the results - # to the original, then delete it. - set outf [open $t1 w] - if { $txnenv == 1 } { - set t [$env txn] - error_check_good txn [is_valid_txn $t $env] TRUE - set txn "-txn $t" - } - set c [eval {$db cursor} $txn] - - set count 0 - for {set d [$c get -first] } { [llength $d] != 0 } { - set d [$c get -next] } { - set k [lindex [lindex $d 0] 0] - set d2 [lindex [lindex $d 0] 1] - if { [is_record_based $method] == 1 } { - set datastr \ - [make_data_str [lindex $kvals [expr $k - 1]]] - } else { - set datastr [make_data_str $k] - } - error_check_good $tnum:$k $d2 [pad_data $method $datastr] - puts $outf $k - $c del - if { [is_record_based $method] == 1 && \ - $do_renumber == 1 } { - set kvals [lreplace $kvals 0 0] - } - incr count - } - close $outf - error_check_good curs_close [$c close] 0 - if { $txnenv == 1 } { - error_check_good txn [$t commit] 0 - } - - # Now compare the keys to see if they match the dictionary - if { [is_record_based $method] == 1 } { - error_check_good test$reopen:keys_deleted $count $nentries - } else { - set q q - filehead $nentries $dict $t3 - filesort $t3 $t2 - filesort $t1 $t3 - error_check_good Test$reopen:diff($t3,$t2) \ - [filecmp $t3 $t2] 0 - } - - error_check_good db_close [$db close] 0 -} diff --git a/storage/bdb/test/test005.tcl b/storage/bdb/test/test005.tcl deleted file mode 100644 index e3972b5b127..00000000000 --- a/storage/bdb/test/test005.tcl +++ /dev/null @@ -1,19 +0,0 @@ -# See the file LICENSE for redistribution information. -# -# Copyright (c) 1996-2004 -# Sleepycat Software. All rights reserved. -# -# $Id: test005.tcl,v 11.10 2004/01/28 03:36:30 bostic Exp $ -# -# TEST test005 -# TEST Small keys/medium data -# TEST Put/get per key -# TEST Close, reopen -# TEST Sequential (cursor) get/delete -# TEST -# TEST Check that cursor operations work. Create a database; close -# TEST it and reopen it. Then read through the database sequentially -# TEST using cursors and delete each element. -proc test005 { method {nentries 10000} args } { - eval {test004 $method $nentries "005" 0} $args -} diff --git a/storage/bdb/test/test006.tcl b/storage/bdb/test/test006.tcl deleted file mode 100644 index 4107bbf090c..00000000000 --- a/storage/bdb/test/test006.tcl +++ /dev/null @@ -1,195 +0,0 @@ -# See the file LICENSE for redistribution information. -# -# Copyright (c) 1996-2004 -# Sleepycat Software. All rights reserved. -# -# $Id: test006.tcl,v 11.26 2004/09/22 18:01:06 bostic Exp $ -# -# TEST test006 -# TEST Small keys/medium data -# TEST Put/get per key -# TEST Keyed delete and verify -# TEST -# TEST Keyed delete test. -# TEST Create database. -# TEST Go through database, deleting all entries by key. -# TEST Then do the same for unsorted and sorted dups. -proc test006 { method {nentries 10000} {reopen 0} {tnum "006"} \ - {ndups 5} args } { - - test006_body $method $nentries $reopen $tnum 1 "" "" $args - - # For methods supporting dups, run the test with sorted and - # with unsorted dups. - if { [is_btree $method] == 1 || [is_hash $method] == 1 } { - foreach {sort flags} {unsorted -dup sorted "-dup -dupsort"} { - test006_body $method $nentries $reopen \ - $tnum $ndups $sort $flags $args - } - } -} - -proc test006_body { method {nentries 10000} {reopen 0} {tnum "006"} \ - {ndups 5} sort flags {largs ""} } { - global is_je_test - source ./include.tcl - - set do_renumber [is_rrecno $method] - set largs [convert_args $method $largs] - set omethod [convert_method $method] - - set tname Test$tnum - set dbname test$tnum - - # Create the database and open the dictionary - set txnenv 0 - set eindex [lsearch -exact $largs "-env"] - # - # If we are using an env, then testfile should just be the db name. - # Otherwise it is the test directory and the name. - if { $eindex == -1 } { - set basename $testdir/$dbname - set env NULL - } else { - set basename $dbname - incr eindex - set env [lindex $largs $eindex] - if { $is_je_test && $sort == "unsorted" } { - puts "Test$tnum skipping $sort duplicates for JE" - return - } - set txnenv [is_txnenv $env] - if { $txnenv == 1 } { - append largs " -auto_commit " - # - # If we are using txns and running with the - # default, set the default down a bit. - # - if { $nentries == 10000 } { - set nentries 100 - } - } - set testdir [get_home $env] - } - puts -nonewline "$tname: $method ($flags $largs) " - puts -nonewline "$nentries equal small key; medium data pairs" - if {$reopen == 1} { - puts " (with close)" - } else { - puts "" - } - - set pflags "" - set gflags "" - set txn "" - if { [is_record_based $method] == 1 } { - append gflags " -recno" - } - - cleanup $testdir $env - - # Here is the loop where we put and get each key/data pair. - - set count 0 - set testfile $basename$sort.db - set db [eval {berkdb_open -create \ - -mode 0644} $largs $flags {$omethod $testfile}] - error_check_good dbopen [is_valid_db $db] TRUE - - puts "\t$tname.a: put/get loop" - set did [open $dict] - while { [gets $did str] != -1 && $count < $nentries } { - if { [is_record_based $method] == 1 } { - set key [expr $count + 1 ] - } else { - set key $str - } - - set str [make_data_str $str] - for { set j 1 } { $j <= $ndups } {incr j} { - set datastr $j$str - if { $txnenv == 1 } { - set t [$env txn] - error_check_good txn \ - [is_valid_txn $t $env] TRUE - set txn "-txn $t" - } - set ret [eval {$db put} $txn $pflags \ - {$key [chop_data $method $datastr]}] - error_check_good put $ret 0 - if { $txnenv == 1 } { - error_check_good txn \ - [$t commit] 0 - } - } - incr count - } - close $did - - # Close and reopen database, if testing reopen. - - if { $reopen == 1 } { - error_check_good db_close [$db close] 0 - - set db [eval {berkdb_open} $largs {$testfile}] - error_check_good dbopen [is_valid_db $db] TRUE - } - - # Now we will get each key from the DB and compare the results - # to the original, then delete it. - - puts "\t$tname.b: get/delete loop" - if { $txnenv == 1 } { - set t [$env txn] - error_check_good txn [is_valid_txn $t $env] TRUE - set txn "-txn $t" - } - set dbc [eval {$db cursor} $txn] - error_check_good db_cursor [is_substr $dbc $db] 1 - - set i 1 - for { set ret [$dbc get -first] } \ - { [string length $ret] != 0 } \ - { set ret [$dbc get -next] } { - set key [lindex [lindex $ret 0] 0] - set data [lindex [lindex $ret 0] 1] - if { $i == 1 } { - set curkey $key - } - error_check_good seq_get:key $key $curkey - - if { $i == $ndups } { - set i 1 - } else { - incr i - } - - # Now delete the key - set ret [$dbc del] - error_check_good db_del:$key $ret 0 - } - error_check_good dbc_close [$dbc close] 0 - if { $txnenv == 1 } { - error_check_good txn [$t commit] 0 - } - error_check_good db_close [$db close] 0 - - puts "\t$tname.c: verify empty file" - # Double check that file is now empty - set db [eval {berkdb_open} $largs $testfile] - error_check_good dbopen [is_valid_db $db] TRUE - if { $txnenv == 1 } { - set t [$env txn] - error_check_good txn [is_valid_txn $t $env] TRUE - set txn "-txn $t" - } - set dbc [eval {$db cursor} $txn] - error_check_good db_cursor [is_substr $dbc $db] 1 - set ret [$dbc get -first] - error_check_good get_on_empty [string length $ret] 0 - error_check_good dbc_close [$dbc close] 0 - if { $txnenv == 1 } { - error_check_good txn [$t commit] 0 - } -error_check_good db_close [$db close] 0 -} diff --git a/storage/bdb/test/test007.tcl b/storage/bdb/test/test007.tcl deleted file mode 100644 index 6cb57495add..00000000000 --- a/storage/bdb/test/test007.tcl +++ /dev/null @@ -1,19 +0,0 @@ -# See the file LICENSE for redistribution information. -# -# Copyright (c) 1996-2004 -# Sleepycat Software. All rights reserved. -# -# $Id: test007.tcl,v 11.12 2004/01/28 03:36:30 bostic Exp $ -# -# TEST test007 -# TEST Small keys/medium data -# TEST Put/get per key -# TEST Close, reopen -# TEST Keyed delete -# TEST -# TEST Check that delete operations work. Create a database; close -# TEST database and reopen it. Then issues delete by key for each -# TEST entry. (Test006 plus reopen) -proc test007 { method {nentries 10000} {tnum "007"} {ndups 5} args} { - eval {test006 $method $nentries 1 $tnum $ndups} $args -} diff --git a/storage/bdb/test/test008.tcl b/storage/bdb/test/test008.tcl deleted file mode 100644 index d798d3fb22d..00000000000 --- a/storage/bdb/test/test008.tcl +++ /dev/null @@ -1,200 +0,0 @@ -# See the file LICENSE for redistribution information. -# -# Copyright (c) 1996-2004 -# Sleepycat Software. All rights reserved. -# -# $Id: test008.tcl,v 11.26 2004/01/28 03:36:30 bostic Exp $ -# -# TEST test008 -# TEST Small keys/large data -# TEST Put/get per key -# TEST Loop through keys by steps (which change) -# TEST ... delete each key at step -# TEST ... add each key back -# TEST ... change step -# TEST Confirm that overflow pages are getting reused -# TEST -# TEST Take the source files and dbtest executable and enter their names as -# TEST the key with their contents as data. After all are entered, begin -# TEST looping through the entries; deleting some pairs and then readding them. -proc test008 { method {reopen "008"} {debug 0} args} { - source ./include.tcl - - set tnum test$reopen - set args [convert_args $method $args] - set omethod [convert_method $method] - - if { [is_record_based $method] == 1 } { - puts "Test$reopen skipping for method $method" - return - } - - puts -nonewline "$tnum: $method filename=key filecontents=data pairs" - if {$reopen == "009"} { - puts "(with close)" - } else { - puts "" - } - - # Create the database and open the dictionary - set txnenv 0 - set eindex [lsearch -exact $args "-env"] - # - # If we are using an env, then testfile should just be the db name. - # Otherwise it is the test directory and the name. - if { $eindex == -1 } { - set testfile $testdir/$tnum.db - set env NULL - } else { - set testfile $tnum.db - incr eindex - set env [lindex $args $eindex] - set txnenv [is_txnenv $env] - if { $txnenv == 1 } { - append args " -auto_commit " - } - set testdir [get_home $env] - } - set t1 $testdir/t1 - set t2 $testdir/t2 - set t3 $testdir/t3 - set t4 $testdir/t4 - - cleanup $testdir $env - - set db [eval {berkdb_open -create -mode 0644} \ - $args {$omethod $testfile}] - error_check_good dbopen [is_valid_db $db] TRUE - - set pflags "" - set gflags "" - set txn "" - - # Here is the loop where we put and get each key/data pair - set file_list [get_file_list] - - set count 0 - puts "\tTest$reopen.a: Initial put/get loop" - foreach f $file_list { - set names($count) $f - set key $f - - if { $txnenv == 1 } { - set t [$env txn] - error_check_good txn [is_valid_txn $t $env] TRUE - set txn "-txn $t" - } - put_file $db $txn $pflags $f - if { $txnenv == 1 } { - error_check_good txn [$t commit] 0 - } - - if { $txnenv == 1 } { - set t [$env txn] - error_check_good txn [is_valid_txn $t $env] TRUE - set txn "-txn $t" - } - get_file $db $txn $gflags $f $t4 - if { $txnenv == 1 } { - error_check_good txn [$t commit] 0 - } - - error_check_good Test$reopen:diff($f,$t4) \ - [filecmp $f $t4] 0 - - incr count - } - - if {$reopen == "009"} { - error_check_good db_close [$db close] 0 - - set db [eval {berkdb_open} $args $testfile] - error_check_good dbopen [is_valid_db $db] TRUE - } - - # Now we will get step through keys again (by increments) and - # delete all the entries, then re-insert them. - - puts "\tTest$reopen.b: Delete re-add loop" - foreach i "1 2 4 8 16" { - for {set ndx 0} {$ndx < $count} { incr ndx $i} { - if { $txnenv == 1 } { - set t [$env txn] - error_check_good txn [is_valid_txn $t $env] TRUE - set txn "-txn $t" - } - set r [eval {$db del} $txn {$names($ndx)}] - error_check_good db_del:$names($ndx) $r 0 - if { $txnenv == 1 } { - error_check_good txn [$t commit] 0 - } - } - for {set ndx 0} {$ndx < $count} { incr ndx $i} { - if { $txnenv == 1 } { - set t [$env txn] - error_check_good txn [is_valid_txn $t $env] TRUE - set txn "-txn $t" - } - put_file $db $txn $pflags $names($ndx) - if { $txnenv == 1 } { - error_check_good txn [$t commit] 0 - } - } - } - - if {$reopen == "009"} { - error_check_good db_close [$db close] 0 - set db [eval {berkdb_open} $args $testfile] - error_check_good dbopen [is_valid_db $db] TRUE - } - - # Now, reopen the file and make sure the key/data pairs look right. - puts "\tTest$reopen.c: Dump contents forward" - if { $txnenv == 1 } { - set t [$env txn] - error_check_good txn [is_valid_txn $t $env] TRUE - set txn "-txn $t" - } - dump_bin_file $db $txn $t1 test008.check - if { $txnenv == 1 } { - error_check_good txn [$t commit] 0 - } - - set oid [open $t2.tmp w] - foreach f $file_list { - puts $oid $f - } - close $oid - filesort $t2.tmp $t2 - fileremove $t2.tmp - filesort $t1 $t3 - - error_check_good Test$reopen:diff($t3,$t2) \ - [filecmp $t3 $t2] 0 - - # Now, reopen the file and run the last test again in reverse direction. - puts "\tTest$reopen.d: Dump contents backward" - if { $txnenv == 1 } { - set t [$env txn] - error_check_good txn [is_valid_txn $t $env] TRUE - set txn "-txn $t" - } - dump_bin_file_direction $db $txn $t1 test008.check "-last" "-prev" - if { $txnenv == 1 } { - error_check_good txn [$t commit] 0 - } - - filesort $t1 $t3 - - error_check_good Test$reopen:diff($t3,$t2) \ - [filecmp $t3 $t2] 0 - error_check_good close:$db [$db close] 0 -} - -proc test008.check { binfile tmpfile } { - global tnum - source ./include.tcl - - error_check_good diff($binfile,$tmpfile) \ - [filecmp $binfile $tmpfile] 0 -} diff --git a/storage/bdb/test/test009.tcl b/storage/bdb/test/test009.tcl deleted file mode 100644 index 258b01277ec..00000000000 --- a/storage/bdb/test/test009.tcl +++ /dev/null @@ -1,18 +0,0 @@ -# See the file LICENSE for redistribution information. -# -# Copyright (c) 1996-2004 -# Sleepycat Software. All rights reserved. -# -# $Id: test009.tcl,v 11.11 2004/01/28 03:36:30 bostic Exp $ -# -# TEST test009 -# TEST Small keys/large data -# TEST Same as test008; close and reopen database -# TEST -# TEST Check that we reuse overflow pages. Create database with lots of -# TEST big key/data pairs. Go through and delete and add keys back -# TEST randomly. Then close the DB and make sure that we have everything -# TEST we think we should. -proc test009 { method args} { - eval {test008 $method "009" 0} $args -} diff --git a/storage/bdb/test/test010.tcl b/storage/bdb/test/test010.tcl deleted file mode 100644 index bbf6fba679f..00000000000 --- a/storage/bdb/test/test010.tcl +++ /dev/null @@ -1,176 +0,0 @@ -# See the file LICENSE for redistribution information. -# -# Copyright (c) 1996-2004 -# Sleepycat Software. All rights reserved. -# -# $Id: test010.tcl,v 11.23 2004/01/28 03:36:30 bostic Exp $ -# -# TEST test010 -# TEST Duplicate test -# TEST Small key/data pairs. -# TEST -# TEST Use the first 10,000 entries from the dictionary. -# TEST Insert each with self as key and data; add duplicate records for each. -# TEST After all are entered, retrieve all; verify output. -# TEST Close file, reopen, do retrieve and re-verify. -# TEST This does not work for recno -proc test010 { method {nentries 10000} {ndups 5} {tnum "010"} args } { - source ./include.tcl - - set omethod $method - set args [convert_args $method $args] - set omethod [convert_method $method] - - if { [is_record_based $method] == 1 || \ - [is_rbtree $method] == 1 } { - puts "Test$tnum skipping for method $method" - return - } - - # Create the database and open the dictionary - set txnenv 0 - set eindex [lsearch -exact $args "-env"] - # - # If we are using an env, then testfile should just be the db name. - # Otherwise it is the test directory and the name. - if { $eindex == -1 } { - set testfile $testdir/test$tnum.db - set env NULL - } else { - set testfile test$tnum.db - incr eindex - set env [lindex $args $eindex] - set txnenv [is_txnenv $env] - if { $txnenv == 1 } { - append args " -auto_commit " - # - # If we are using txns and running with the - # default, set the default down a bit. - # - if { $nentries == 10000 } { - set nentries 100 - } - reduce_dups nentries ndups - } - set testdir [get_home $env] - } - puts "Test$tnum: $method ($args) $nentries \ - small $ndups dup key/data pairs" - - set t1 $testdir/t1 - set t2 $testdir/t2 - set t3 $testdir/t3 - - cleanup $testdir $env - - set db [eval {berkdb_open \ - -create -mode 0644 -dup} $args {$omethod $testfile}] - error_check_good dbopen [is_valid_db $db] TRUE - - set did [open $dict] - - set pflags "" - set gflags "" - set txn "" - set count 0 - - # Here is the loop where we put and get each key/data pair - while { [gets $did str] != -1 && $count < $nentries } { - for { set i 1 } { $i <= $ndups } { incr i } { - set datastr $i:$str - if { $txnenv == 1 } { - set t [$env txn] - error_check_good txn [is_valid_txn $t $env] TRUE - set txn "-txn $t" - } - set ret [eval {$db put} \ - $txn $pflags {$str [chop_data $method $datastr]}] - error_check_good put $ret 0 - if { $txnenv == 1 } { - error_check_good txn [$t commit] 0 - } - } - - # Now retrieve all the keys matching this key - set x 1 - if { $txnenv == 1 } { - set t [$env txn] - error_check_good txn [is_valid_txn $t $env] TRUE - set txn "-txn $t" - } - set dbc [eval {$db cursor} $txn] - for {set ret [$dbc get "-set" $str]} \ - {[llength $ret] != 0} \ - {set ret [$dbc get "-next"] } { - if {[llength $ret] == 0} { - break - } - set k [lindex [lindex $ret 0] 0] - if { [string compare $k $str] != 0 } { - break - } - set datastr [lindex [lindex $ret 0] 1] - set d [data_of $datastr] - error_check_good "Test$tnum:get" $d $str - set id [ id_of $datastr ] - error_check_good "Test$tnum:dup#" $id $x - incr x - } - error_check_good "Test$tnum:ndups:$str" [expr $x - 1] $ndups - error_check_good cursor_close [$dbc close] 0 - if { $txnenv == 1 } { - error_check_good txn [$t commit] 0 - } - - incr count - } - close $did - - # Now we will get each key from the DB and compare the results - # to the original. - puts "\tTest$tnum.a: Checking file for correct duplicates" - set dlist "" - for { set i 1 } { $i <= $ndups } {incr i} { - lappend dlist $i - } - if { $txnenv == 1 } { - set t [$env txn] - error_check_good txn [is_valid_txn $t $env] TRUE - set txn "-txn $t" - } - dup_check $db $txn $t1 $dlist - if { $txnenv == 1 } { - error_check_good txn [$t commit] 0 - } - - # Now compare the keys to see if they match the dictionary entries - set q q - filehead $nentries $dict $t3 - filesort $t3 $t2 - filesort $t1 $t3 - - error_check_good Test$tnum:diff($t3,$t2) \ - [filecmp $t3 $t2] 0 - - error_check_good db_close [$db close] 0 - set db [eval {berkdb_open} $args $testfile] - error_check_good dbopen [is_valid_db $db] TRUE - - puts "\tTest$tnum.b: Checking file for correct duplicates after close" - if { $txnenv == 1 } { - set t [$env txn] - error_check_good txn [is_valid_txn $t $env] TRUE - set txn "-txn $t" - } - dup_check $db $txn $t1 $dlist - if { $txnenv == 1 } { - error_check_good txn [$t commit] 0 - } - - # Now compare the keys to see if they match the dictionary entries - filesort $t1 $t3 - error_check_good Test$tnum:diff($t3,$t2) \ - [filecmp $t3 $t2] 0 - - error_check_good db_close [$db close] 0 -} diff --git a/storage/bdb/test/test011.tcl b/storage/bdb/test/test011.tcl deleted file mode 100644 index ad8439011c8..00000000000 --- a/storage/bdb/test/test011.tcl +++ /dev/null @@ -1,470 +0,0 @@ -# See the file LICENSE for redistribution information. -# -# Copyright (c) 1996-2004 -# Sleepycat Software. All rights reserved. -# -# $Id: test011.tcl,v 11.30 2004/01/28 03:36:30 bostic Exp $ -# -# TEST test011 -# TEST Duplicate test -# TEST Small key/data pairs. -# TEST Test DB_KEYFIRST, DB_KEYLAST, DB_BEFORE and DB_AFTER. -# TEST To test off-page duplicates, run with small pagesize. -# TEST -# TEST Use the first 10,000 entries from the dictionary. -# TEST Insert each with self as key and data; add duplicate records for each. -# TEST Then do some key_first/key_last add_before, add_after operations. -# TEST This does not work for recno -# TEST -# TEST To test if dups work when they fall off the main page, run this with -# TEST a very tiny page size. -proc test011 { method {nentries 10000} {ndups 5} {tnum "011"} args } { - global dlist - global rand_init - source ./include.tcl - - set dlist "" - - if { [is_rbtree $method] == 1 } { - puts "Test$tnum skipping for method $method" - return - } - if { [is_record_based $method] == 1 } { - test011_recno $method $nentries $tnum $args - return - } - if {$ndups < 5} { - set ndups 5 - } - - set args [convert_args $method $args] - set omethod [convert_method $method] - - berkdb srand $rand_init - - # Create the database and open the dictionary - set txnenv 0 - set eindex [lsearch -exact $args "-env"] - # - # If we are using an env, then testfile should just be the db name. - # Otherwise it is the test directory and the name. - if { $eindex == -1 } { - set testfile $testdir/test$tnum.db - set env NULL - } else { - set testfile test$tnum.db - incr eindex - set env [lindex $args $eindex] - set txnenv [is_txnenv $env] - if { $txnenv == 1 } { - append args " -auto_commit " - # - # If we are using txns and running with the - # default, set the default down a bit. - # - if { $nentries == 10000 } { - set nentries 100 - } - reduce_dups nentries ndups - } - set testdir [get_home $env] - } - - puts -nonewline "Test$tnum: $method $nentries small $ndups dup " - puts "key/data pairs, cursor ops" - - set t1 $testdir/t1 - set t2 $testdir/t2 - set t3 $testdir/t3 - cleanup $testdir $env - - set db [eval {berkdb_open -create \ - -mode 0644} [concat $args "-dup"] {$omethod $testfile}] - error_check_good dbopen [is_valid_db $db] TRUE - - set did [open $dict] - - set pflags "" - set gflags "" - set txn "" - set count 0 - - # Here is the loop where we put and get each key/data pair - # We will add dups with values 1, 3, ... $ndups. Then we'll add - # 0 and $ndups+1 using keyfirst/keylast. We'll add 2 and 4 using - # add before and add after. - puts "\tTest$tnum.a: put and get duplicate keys." - set i "" - for { set i 1 } { $i <= $ndups } { incr i 2 } { - lappend dlist $i - } - set maxodd $i - while { [gets $did str] != -1 && $count < $nentries } { - for { set i 1 } { $i <= $ndups } { incr i 2 } { - set datastr $i:$str - if { $txnenv == 1 } { - set t [$env txn] - error_check_good txn [is_valid_txn $t $env] TRUE - set txn "-txn $t" - } - set ret [eval {$db put} $txn $pflags {$str $datastr}] - error_check_good put $ret 0 - if { $txnenv == 1 } { - error_check_good txn [$t commit] 0 - } - } - - # Now retrieve all the keys matching this key - set x 1 - if { $txnenv == 1 } { - set t [$env txn] - error_check_good txn [is_valid_txn $t $env] TRUE - set txn "-txn $t" - } - set dbc [eval {$db cursor} $txn] - for {set ret [$dbc get "-set" $str ]} \ - {[llength $ret] != 0} \ - {set ret [$dbc get "-next"] } { - if {[llength $ret] == 0} { - break - } - set k [lindex [lindex $ret 0] 0] - if { [string compare $k $str] != 0 } { - break - } - set datastr [lindex [lindex $ret 0] 1] - set d [data_of $datastr] - - error_check_good Test$tnum:put $d $str - set id [ id_of $datastr ] - error_check_good Test$tnum:dup# $id $x - incr x 2 - } - error_check_good Test$tnum:numdups $x $maxodd - error_check_good curs_close [$dbc close] 0 - if { $txnenv == 1 } { - error_check_good txn [$t commit] 0 - } - incr count - } - close $did - - # Now we will get each key from the DB and compare the results - # to the original. - puts "\tTest$tnum.b: \ - traverse entire file checking duplicates before close." - if { $txnenv == 1 } { - set t [$env txn] - error_check_good txn [is_valid_txn $t $env] TRUE - set txn "-txn $t" - } - dup_check $db $txn $t1 $dlist - if { $txnenv == 1 } { - error_check_good txn [$t commit] 0 - } - - # Now compare the keys to see if they match the dictionary entries - set q q - filehead $nentries $dict $t3 - filesort $t3 $t2 - filesort $t1 $t3 - - error_check_good Test$tnum:diff($t3,$t2) \ - [filecmp $t3 $t2] 0 - - error_check_good db_close [$db close] 0 - - set db [eval {berkdb_open} $args $testfile] - error_check_good dbopen [is_valid_db $db] TRUE - - puts "\tTest$tnum.c: \ - traverse entire file checking duplicates after close." - if { $txnenv == 1 } { - set t [$env txn] - error_check_good txn [is_valid_txn $t $env] TRUE - set txn "-txn $t" - } - dup_check $db $txn $t1 $dlist - if { $txnenv == 1 } { - error_check_good txn [$t commit] 0 - } - - # Now compare the keys to see if they match the dictionary entries - filesort $t1 $t3 - error_check_good Test$tnum:diff($t3,$t2) \ - [filecmp $t3 $t2] 0 - - puts "\tTest$tnum.d: Testing key_first functionality" - if { $txnenv == 1 } { - set t [$env txn] - error_check_good txn [is_valid_txn $t $env] TRUE - set txn "-txn $t" - } - add_dup $db $txn $nentries "-keyfirst" 0 0 - set dlist [linsert $dlist 0 0] - dup_check $db $txn $t1 $dlist - if { $txnenv == 1 } { - error_check_good txn [$t commit] 0 - } - - puts "\tTest$tnum.e: Testing key_last functionality" - if { $txnenv == 1 } { - set t [$env txn] - error_check_good txn [is_valid_txn $t $env] TRUE - set txn "-txn $t" - } - add_dup $db $txn $nentries "-keylast" [expr $maxodd - 1] 0 - lappend dlist [expr $maxodd - 1] - dup_check $db $txn $t1 $dlist - if { $txnenv == 1 } { - error_check_good txn [$t commit] 0 - } - - puts "\tTest$tnum.f: Testing add_before functionality" - if { $txnenv == 1 } { - set t [$env txn] - error_check_good txn [is_valid_txn $t $env] TRUE - set txn "-txn $t" - } - add_dup $db $txn $nentries "-before" 2 3 - set dlist [linsert $dlist 2 2] - dup_check $db $txn $t1 $dlist - if { $txnenv == 1 } { - error_check_good txn [$t commit] 0 - } - - puts "\tTest$tnum.g: Testing add_after functionality" - if { $txnenv == 1 } { - set t [$env txn] - error_check_good txn [is_valid_txn $t $env] TRUE - set txn "-txn $t" - } - add_dup $db $txn $nentries "-after" 4 4 - set dlist [linsert $dlist 4 4] - dup_check $db $txn $t1 $dlist - if { $txnenv == 1 } { - error_check_good txn [$t commit] 0 - } - - error_check_good db_close [$db close] 0 -} - -proc add_dup {db txn nentries flag dataval iter} { - source ./include.tcl - - set dbc [eval {$db cursor} $txn] - set did [open $dict] - set count 0 - while { [gets $did str] != -1 && $count < $nentries } { - set datastr $dataval:$str - set ret [$dbc get "-set" $str] - error_check_bad "cget(SET)" [is_substr $ret Error] 1 - for { set i 1 } { $i < $iter } { incr i } { - set ret [$dbc get "-next"] - error_check_bad "cget(NEXT)" [is_substr $ret Error] 1 - } - - if { [string compare $flag "-before"] == 0 || - [string compare $flag "-after"] == 0 } { - set ret [$dbc put $flag $datastr] - } else { - set ret [$dbc put $flag $str $datastr] - } - error_check_good "$dbc put $flag" $ret 0 - incr count - } - close $did - $dbc close -} - -proc test011_recno { method {nentries 10000} {tnum "011"} largs } { - global dlist - source ./include.tcl - - set largs [convert_args $method $largs] - set omethod [convert_method $method] - set renum [is_rrecno $method] - - puts "Test$tnum: \ - $method ($largs) $nentries test cursor insert functionality" - - # Create the database and open the dictionary - set eindex [lsearch -exact $largs "-env"] - # - # If we are using an env, then testfile should just be the db name. - # Otherwise it is the test directory and the name. - set txnenv 0 - if { $eindex == -1 } { - set testfile $testdir/test$tnum.db - set env NULL - } else { - set testfile test$tnum.db - incr eindex - set env [lindex $largs $eindex] - set txnenv [is_txnenv $env] - if { $txnenv == 1 } { - append largs " -auto_commit " - # - # If we are using txns and running with the - # default, set the default down a bit. - # - if { $nentries == 10000 } { - set nentries 100 - } - } - set testdir [get_home $env] - } - set t1 $testdir/t1 - set t2 $testdir/t2 - set t3 $testdir/t3 - cleanup $testdir $env - - if {$renum == 1} { - append largs " -renumber" - } - set db [eval {berkdb_open \ - -create -mode 0644} $largs {$omethod $testfile}] - error_check_good dbopen [is_valid_db $db] TRUE - - set did [open $dict] - - set pflags "" - set gflags "" - set txn "" - set count 0 - - # The basic structure of the test is that we pick a random key - # in the database and then add items before, after, ?? it. The - # trickiness is that with RECNO, these are not duplicates, they - # are creating new keys. Therefore, every time we do this, the - # keys assigned to other values change. For this reason, we'll - # keep the database in tcl as a list and insert properly into - # it to verify that the right thing is happening. If we do not - # have renumber set, then the BEFORE and AFTER calls should fail. - - # Seed the database with an initial record - gets $did str - if { $txnenv == 1 } { - set t [$env txn] - error_check_good txn [is_valid_txn $t $env] TRUE - set txn "-txn $t" - } - set ret [eval {$db put} $txn {1 [chop_data $method $str]}] - if { $txnenv == 1 } { - error_check_good txn [$t commit] 0 - } - error_check_good put $ret 0 - set count 1 - - set dlist "NULL $str" - - # Open a cursor - if { $txnenv == 1 } { - set t [$env txn] - error_check_good txn [is_valid_txn $t $env] TRUE - set txn "-txn $t" - } - set dbc [eval {$db cursor} $txn] - puts "\tTest$tnum.a: put and get entries" - while { [gets $did str] != -1 && $count < $nentries } { - # Pick a random key - set key [berkdb random_int 1 $count] - set ret [$dbc get -set $key] - set k [lindex [lindex $ret 0] 0] - set d [lindex [lindex $ret 0] 1] - error_check_good cget:SET:key $k $key - error_check_good \ - cget:SET $d [pad_data $method [lindex $dlist $key]] - - # Current - set ret [$dbc put -current [chop_data $method $str]] - error_check_good cput:$key $ret 0 - set dlist [lreplace $dlist $key $key [pad_data $method $str]] - - # Before - if { [gets $did str] == -1 } { - continue; - } - - if { $renum == 1 } { - set ret [$dbc put \ - -before [chop_data $method $str]] - error_check_good cput:$key:BEFORE $ret $key - set dlist [linsert $dlist $key $str] - incr count - - # After - if { [gets $did str] == -1 } { - continue; - } - set ret [$dbc put \ - -after [chop_data $method $str]] - error_check_good cput:$key:AFTER $ret [expr $key + 1] - set dlist [linsert $dlist [expr $key + 1] $str] - incr count - } - - # Now verify that the keys are in the right place - set i 0 - for {set ret [$dbc get "-set" $key]} \ - {[string length $ret] != 0 && $i < 3} \ - {set ret [$dbc get "-next"] } { - set check_key [expr $key + $i] - - set k [lindex [lindex $ret 0] 0] - error_check_good cget:$key:loop $k $check_key - - set d [lindex [lindex $ret 0] 1] - error_check_good cget:data $d \ - [pad_data $method [lindex $dlist $check_key]] - incr i - } - } - close $did - error_check_good cclose [$dbc close] 0 - if { $txnenv == 1 } { - error_check_good txn [$t commit] 0 - } - - # Create check key file. - set oid [open $t2 w] - for {set i 1} {$i <= $count} {incr i} { - puts $oid $i - } - close $oid - - puts "\tTest$tnum.b: dump file" - if { $txnenv == 1 } { - set t [$env txn] - error_check_good txn [is_valid_txn $t $env] TRUE - set txn "-txn $t" - } - dump_file $db $txn $t1 test011_check - if { $txnenv == 1 } { - error_check_good txn [$t commit] 0 - } - error_check_good Test$tnum:diff($t2,$t1) \ - [filecmp $t2 $t1] 0 - - error_check_good db_close [$db close] 0 - - puts "\tTest$tnum.c: close, open, and dump file" - open_and_dump_file $testfile $env $t1 test011_check \ - dump_file_direction "-first" "-next" - error_check_good Test$tnum:diff($t2,$t1) \ - [filecmp $t2 $t1] 0 - - puts "\tTest$tnum.d: close, open, and dump file in reverse direction" - open_and_dump_file $testfile $env $t1 test011_check \ - dump_file_direction "-last" "-prev" - - filesort $t1 $t3 -n - error_check_good Test$tnum:diff($t2,$t3) \ - [filecmp $t2 $t3] 0 -} - -proc test011_check { key data } { - global dlist - - error_check_good "get key $key" $data [lindex $dlist $key] -} diff --git a/storage/bdb/test/test012.tcl b/storage/bdb/test/test012.tcl deleted file mode 100644 index 42225d982cd..00000000000 --- a/storage/bdb/test/test012.tcl +++ /dev/null @@ -1,139 +0,0 @@ -# See the file LICENSE for redistribution information. -# -# Copyright (c) 1996-2004 -# Sleepycat Software. All rights reserved. -# -# $Id: test012.tcl,v 11.22 2004/01/28 03:36:30 bostic Exp $ -# -# TEST test012 -# TEST Large keys/small data -# TEST Same as test003 except use big keys (source files and -# TEST executables) and small data (the file/executable names). -# TEST -# TEST Take the source files and dbtest executable and enter their contents -# TEST as the key with their names as data. After all are entered, retrieve -# TEST all; compare output to original. Close file, reopen, do retrieve and -# TEST re-verify. -proc test012 { method args} { - global names - source ./include.tcl - - set args [convert_args $method $args] - set omethod [convert_method $method] - - if { [is_record_based $method] == 1 } { - puts "Test012 skipping for method $method" - return - } - - puts "Test012: $method ($args) filename=data filecontents=key pairs" - - # Create the database and open the dictionary - set txnenv 0 - set eindex [lsearch -exact $args "-env"] - # - # If we are using an env, then testfile should just be the db name. - # Otherwise it is the test directory and the name. - if { $eindex == -1 } { - set testfile $testdir/test012.db - set env NULL - } else { - set testfile test012.db - incr eindex - set env [lindex $args $eindex] - set txnenv [is_txnenv $env] - if { $txnenv == 1 } { - append args " -auto_commit " - } - set testdir [get_home $env] - } - set t1 $testdir/t1 - set t2 $testdir/t2 - set t3 $testdir/t3 - set t4 $testdir/t4 - - cleanup $testdir $env - - set db [eval {berkdb_open \ - -create -mode 0644} $args {$omethod $testfile}] - error_check_good dbopen [is_valid_db $db] TRUE - - set pflags "" - set gflags "" - set txn "" - - # Here is the loop where we put and get each key/data pair - set file_list [get_file_list] - - puts "\tTest012.a: put/get loop" - set count 0 - foreach f $file_list { - if { $txnenv == 1 } { - set t [$env txn] - error_check_good txn [is_valid_txn $t $env] TRUE - set txn "-txn $t" - } - put_file_as_key $db $txn $pflags $f - - set kd [get_file_as_key $db $txn $gflags $f] - if { $txnenv == 1 } { - error_check_good txn [$t commit] 0 - } - incr count - } - - # Now we will get each key from the DB and compare the results - # to the original. - puts "\tTest012.b: dump file" - if { $txnenv == 1 } { - set t [$env txn] - error_check_good txn [is_valid_txn $t $env] TRUE - set txn "-txn $t" - } - dump_binkey_file $db $txn $t1 test012.check - if { $txnenv == 1 } { - error_check_good txn [$t commit] 0 - } - error_check_good db_close [$db close] 0 - - # Now compare the data to see if they match the .o and dbtest files - set oid [open $t2.tmp w] - foreach f $file_list { - puts $oid $f - } - close $oid - filesort $t2.tmp $t2 - fileremove $t2.tmp - filesort $t1 $t3 - - error_check_good Test012:diff($t3,$t2) \ - [filecmp $t3 $t2] 0 - - # Now, reopen the file and run the last test again. - puts "\tTest012.c: close, open, and dump file" - open_and_dump_file $testfile $env $t1 test012.check \ - dump_binkey_file_direction "-first" "-next" - - filesort $t1 $t3 - - error_check_good Test012:diff($t3,$t2) \ - [filecmp $t3 $t2] 0 - - # Now, reopen the file and run the last test again in reverse direction. - puts "\tTest012.d: close, open, and dump file in reverse direction" - open_and_dump_file $testfile $env $t1 test012.check\ - dump_binkey_file_direction "-last" "-prev" - - filesort $t1 $t3 - - error_check_good Test012:diff($t3,$t2) \ - [filecmp $t3 $t2] 0 -} - -# Check function for test012; key should be file name; data should be contents -proc test012.check { binfile tmpfile } { - source ./include.tcl - - error_check_good Test012:diff($binfile,$tmpfile) \ - [filecmp $binfile $tmpfile] 0 -} diff --git a/storage/bdb/test/test013.tcl b/storage/bdb/test/test013.tcl deleted file mode 100644 index e456965bfd9..00000000000 --- a/storage/bdb/test/test013.tcl +++ /dev/null @@ -1,240 +0,0 @@ -# See the file LICENSE for redistribution information. -# -# Copyright (c) 1996-2004 -# Sleepycat Software. All rights reserved. -# -# $Id: test013.tcl,v 11.26 2004/01/28 03:36:30 bostic Exp $ -# -# TEST test013 -# TEST Partial put test -# TEST Overwrite entire records using partial puts. -# TEST Make surethat NOOVERWRITE flag works. -# TEST -# TEST 1. Insert 10000 keys and retrieve them (equal key/data pairs). -# TEST 2. Attempt to overwrite keys with NO_OVERWRITE set (expect error). -# TEST 3. Actually overwrite each one with its datum reversed. -# TEST -# TEST No partial testing here. -proc test013 { method {nentries 10000} args } { - global errorCode - global errorInfo - global fixed_len - - source ./include.tcl - - set args [convert_args $method $args] - set omethod [convert_method $method] - - # Create the database and open the dictionary - set txnenv 0 - set eindex [lsearch -exact $args "-env"] - # - # If we are using an env, then testfile should just be the db name. - # Otherwise it is the test directory and the name. - if { $eindex == -1 } { - set testfile $testdir/test013.db - set env NULL - } else { - set testfile test013.db - incr eindex - set env [lindex $args $eindex] - set txnenv [is_txnenv $env] - if { $txnenv == 1 } { - append args " -auto_commit " - # - # If we are using txns and running with the - # default, set the default down a bit. - # - if { $nentries == 10000 } { - set nentries 100 - } - } - set testdir [get_home $env] - } - puts "Test013: $method ($args) $nentries equal key/data pairs, put test" - - set t1 $testdir/t1 - set t2 $testdir/t2 - set t3 $testdir/t3 - cleanup $testdir $env - - set db [eval {berkdb_open \ - -create -mode 0644} $args {$omethod $testfile}] - error_check_good dbopen [is_valid_db $db] TRUE - - set did [open $dict] - - set pflags "" - set gflags "" - set txn "" - set count 0 - - if { [is_record_based $method] == 1 } { - set checkfunc test013_recno.check - append gflags " -recno" - global kvals - } else { - set checkfunc test013.check - } - puts "\tTest013.a: put/get loop" - # Here is the loop where we put and get each key/data pair - while { [gets $did str] != -1 && $count < $nentries } { - if { [is_record_based $method] == 1 } { - set key [expr $count + 1] - set kvals($key) [pad_data $method $str] - } else { - set key $str - } - if { $txnenv == 1 } { - set t [$env txn] - error_check_good txn [is_valid_txn $t $env] TRUE - set txn "-txn $t" - } - set ret [eval {$db put} \ - $txn $pflags {$key [chop_data $method $str]}] - error_check_good put $ret 0 - - set ret [eval {$db get} $gflags $txn {$key}] - error_check_good \ - get $ret [list [list $key [pad_data $method $str]]] - if { $txnenv == 1 } { - error_check_good txn [$t commit] 0 - } - incr count - } - close $did - - # Now we will try to overwrite each datum, but set the - # NOOVERWRITE flag. - puts "\tTest013.b: overwrite values with NOOVERWRITE flag." - set did [open $dict] - set count 0 - while { [gets $did str] != -1 && $count < $nentries } { - if { [is_record_based $method] == 1 } { - set key [expr $count + 1] - } else { - set key $str - } - - if { $txnenv == 1 } { - set t [$env txn] - error_check_good txn [is_valid_txn $t $env] TRUE - set txn "-txn $t" - } - set ret [eval {$db put} $txn $pflags \ - {-nooverwrite $key [chop_data $method $str]}] - error_check_good put [is_substr $ret "DB_KEYEXIST"] 1 - - # Value should be unchanged. - set ret [eval {$db get} $txn $gflags {$key}] - error_check_good \ - get $ret [list [list $key [pad_data $method $str]]] - if { $txnenv == 1 } { - error_check_good txn [$t commit] 0 - } - incr count - } - close $did - - # Now we will replace each item with its datum capitalized. - puts "\tTest013.c: overwrite values with capitalized datum" - set did [open $dict] - set count 0 - while { [gets $did str] != -1 && $count < $nentries } { - if { [is_record_based $method] == 1 } { - set key [expr $count + 1] - } else { - set key $str - } - set rstr [string toupper $str] - if { $txnenv == 1 } { - set t [$env txn] - error_check_good txn [is_valid_txn $t $env] TRUE - set txn "-txn $t" - } - set r [eval {$db put} \ - $txn $pflags {$key [chop_data $method $rstr]}] - error_check_good put $r 0 - - # Value should be changed. - set ret [eval {$db get} $txn $gflags {$key}] - error_check_good \ - get $ret [list [list $key [pad_data $method $rstr]]] - if { $txnenv == 1 } { - error_check_good txn [$t commit] 0 - } - incr count - } - close $did - - # Now make sure that everything looks OK - puts "\tTest013.d: check entire file contents" - if { $txnenv == 1 } { - set t [$env txn] - error_check_good txn [is_valid_txn $t $env] TRUE - set txn "-txn $t" - } - dump_file $db $txn $t1 $checkfunc - if { $txnenv == 1 } { - error_check_good txn [$t commit] 0 - } - error_check_good db_close [$db close] 0 - - # Now compare the keys to see if they match the dictionary (or ints) - if { [is_record_based $method] == 1 } { - set oid [open $t2 w] - for {set i 1} {$i <= $nentries} {incr i} { - puts $oid $i - } - close $oid - file rename -force $t1 $t3 - } else { - set q q - filehead $nentries $dict $t3 - filesort $t3 $t2 - filesort $t1 $t3 - } - - error_check_good \ - Test013:diff($t3,$t2) [filecmp $t3 $t2] 0 - - puts "\tTest013.e: close, open, and dump file" - # Now, reopen the file and run the last test again. - open_and_dump_file $testfile $env $t1 $checkfunc \ - dump_file_direction "-first" "-next" - - if { [is_record_based $method] == 0 } { - filesort $t1 $t3 - } - - error_check_good \ - Test013:diff($t3,$t2) [filecmp $t3 $t2] 0 - - # Now, reopen the file and run the last test again in the - # reverse direction. - puts "\tTest013.f: close, open, and dump file in reverse direction" - open_and_dump_file $testfile $env $t1 $checkfunc \ - dump_file_direction "-last" "-prev" - - if { [is_record_based $method] == 0 } { - filesort $t1 $t3 - } - - error_check_good \ - Test013:diff($t3,$t2) [filecmp $t3 $t2] 0 -} - -# Check function for test013; keys and data are identical -proc test013.check { key data } { - error_check_good \ - "key/data mismatch for $key" $data [string toupper $key] -} - -proc test013_recno.check { key data } { - global dict - global kvals - - error_check_good key"$key"_exists [info exists kvals($key)] 1 - error_check_good \ - "data mismatch for $key" $data [string toupper $kvals($key)] -} diff --git a/storage/bdb/test/test014.tcl b/storage/bdb/test/test014.tcl deleted file mode 100644 index 708d5dc09e5..00000000000 --- a/storage/bdb/test/test014.tcl +++ /dev/null @@ -1,253 +0,0 @@ -# See the file LICENSE for redistribution information. -# -# Copyright (c) 1996-2004 -# Sleepycat Software. All rights reserved. -# -# $Id: test014.tcl,v 11.26 2004/01/28 03:36:30 bostic Exp $ -# -# TEST test014 -# TEST Exercise partial puts on short data -# TEST Run 5 combinations of numbers of characters to replace, -# TEST and number of times to increase the size by. -# TEST -# TEST Partial put test, small data, replacing with same size. The data set -# TEST consists of the first nentries of the dictionary. We will insert them -# TEST (and retrieve them) as we do in test 1 (equal key/data pairs). Then -# TEST we'll try to perform partial puts of some characters at the beginning, -# TEST some at the end, and some at the middle. -proc test014 { method {nentries 10000} args } { - set fixed 0 - set args [convert_args $method $args] - - if { [is_fixed_length $method] == 1 } { - set fixed 1 - } - - puts "Test014: $method ($args) $nentries equal key/data pairs, put test" - - # flagp indicates whether this is a postpend or a - # normal partial put - set flagp 0 - - eval {test014_body $method $flagp 1 1 $nentries} $args - eval {test014_body $method $flagp 1 4 $nentries} $args - eval {test014_body $method $flagp 2 4 $nentries} $args - eval {test014_body $method $flagp 1 128 $nentries} $args - eval {test014_body $method $flagp 2 16 $nentries} $args - if { $fixed == 0 } { - eval {test014_body $method $flagp 0 1 $nentries} $args - eval {test014_body $method $flagp 0 4 $nentries} $args - eval {test014_body $method $flagp 0 128 $nentries} $args - - # POST-PENDS : - # partial put data after the end of the existent record - # chars: number of empty spaces that will be padded with null - # increase: is the length of the str to be appended (after pad) - # - set flagp 1 - eval {test014_body $method $flagp 1 1 $nentries} $args - eval {test014_body $method $flagp 4 1 $nentries} $args - eval {test014_body $method $flagp 128 1 $nentries} $args - eval {test014_body $method $flagp 1 4 $nentries} $args - eval {test014_body $method $flagp 1 128 $nentries} $args - } - puts "Test014 complete." -} - -proc test014_body { method flagp chars increase {nentries 10000} args } { - source ./include.tcl - - set omethod [convert_method $method] - - if { [is_fixed_length $method] == 1 && $chars != $increase } { - puts "Test014: $method: skipping replace\ - $chars chars with string $increase times larger." - return - } - - if { $flagp == 1} { - puts "Test014: Postpending string of len $increase with \ - gap $chars." - } else { - puts "Test014: Replace $chars chars with string \ - $increase times larger" - } - - # Create the database and open the dictionary - set txnenv 0 - set eindex [lsearch -exact $args "-env"] - # - # If we are using an env, then testfile should just be the db name. - # Otherwise it is the test directory and the name. - if { $eindex == -1 } { - set testfile $testdir/test014.db - set env NULL - } else { - set testfile test014.db - incr eindex - set env [lindex $args $eindex] - set txnenv [is_txnenv $env] - if { $txnenv == 1 } { - append args " -auto_commit " - # - # If we are using txns and running with the - # default, set the default down a bit. - # - if { $nentries == 10000 } { - set nentries 100 - } - } - set testdir [get_home $env] - } - set t1 $testdir/t1 - set t2 $testdir/t2 - set t3 $testdir/t3 - cleanup $testdir $env - - set db [eval {berkdb_open \ - -create -mode 0644} $args {$omethod $testfile}] - error_check_good dbopen [is_valid_db $db] TRUE - - set gflags "" - set pflags "" - set txn "" - set count 0 - - if { [is_record_based $method] == 1 } { - append gflags " -recno" - } - - puts "\tTest014.a: put/get loop" - # Here is the loop where we put and get each key/data pair - # We will do the initial put and then three Partial Puts - # for the beginning, middle and end of the string. - set did [open $dict] - while { [gets $did str] != -1 && $count < $nentries } { - if { [is_record_based $method] == 1 } { - set key [expr $count + 1] - } else { - set key $str - } - if { $flagp == 1 } { - # this is for postpend only - global dvals - - # initial put - if { $txnenv == 1 } { - set t [$env txn] - error_check_good txn [is_valid_txn $t $env] TRUE - set txn "-txn $t" - } - set ret [eval {$db put} $txn {$key $str}] - if { $txnenv == 1 } { - error_check_good txn [$t commit] 0 - } - error_check_good dbput $ret 0 - - set offset [string length $str] - - # increase is the actual number of new bytes - # to be postpended (besides the null padding) - set data [repeat "P" $increase] - - # chars is the amount of padding in between - # the old data and the new - set len [expr $offset + $chars + $increase] - set dvals($key) [binary format \ - a[set offset]x[set chars]a[set increase] \ - $str $data] - set offset [expr $offset + $chars] - if { $txnenv == 1 } { - set t [$env txn] - error_check_good txn [is_valid_txn $t $env] TRUE - set txn "-txn $t" - } - set ret [eval {$db put -partial [list $offset 0]} \ - $txn {$key $data}] - error_check_good dbput:post $ret 0 - if { $txnenv == 1 } { - error_check_good txn [$t commit] 0 - } - } else { - if { $txnenv == 1 } { - set t [$env txn] - error_check_good txn [is_valid_txn $t $env] TRUE - set txn "-txn $t" - } - partial_put $method $db $txn \ - $gflags $key $str $chars $increase - if { $txnenv == 1 } { - error_check_good txn [$t commit] 0 - } - } - incr count - } - close $did - - # Now make sure that everything looks OK - puts "\tTest014.b: check entire file contents" - if { $txnenv == 1 } { - set t [$env txn] - error_check_good txn [is_valid_txn $t $env] TRUE - set txn "-txn $t" - } - dump_file $db $txn $t1 test014.check - if { $txnenv == 1 } { - error_check_good txn [$t commit] 0 - } - error_check_good db_close [$db close] 0 - - # Now compare the keys to see if they match the dictionary (or ints) - if { [is_record_based $method] == 1 } { - set oid [open $t2 w] - for {set i 1} {$i <= $nentries} {set i [incr i]} { - puts $oid $i - } - close $oid - file rename -force $t1 $t3 - } else { - set q q - filehead $nentries $dict $t3 - filesort $t3 $t2 - filesort $t1 $t3 - } - - error_check_good \ - Test014:diff($t3,$t2) [filecmp $t3 $t2] 0 - - puts "\tTest014.c: close, open, and dump file" - # Now, reopen the file and run the last test again. - open_and_dump_file $testfile $env \ - $t1 test014.check dump_file_direction "-first" "-next" - - if { [string compare $omethod "-recno"] != 0 } { - filesort $t2 $t3 - file rename -force $t3 $t2 - filesort $t1 $t3 - } - - error_check_good \ - Test014:diff($t3,$t2) [filecmp $t3 $t2] 0 - # Now, reopen the file and run the last test again in the - # reverse direction. - puts "\tTest014.d: close, open, and dump file in reverse direction" - open_and_dump_file $testfile $env $t1 \ - test014.check dump_file_direction "-last" "-prev" - - if { [string compare $omethod "-recno"] != 0 } { - filesort $t2 $t3 - file rename -force $t3 $t2 - filesort $t1 $t3 - } - - error_check_good \ - Test014:diff($t3,$t2) [filecmp $t3 $t2] 0 -} - -# Check function for test014; keys and data are identical -proc test014.check { key data } { - global dvals - - error_check_good key"$key"_exists [info exists dvals($key)] 1 - error_check_good "data mismatch for key $key" $data $dvals($key) -} diff --git a/storage/bdb/test/test015.tcl b/storage/bdb/test/test015.tcl deleted file mode 100644 index 9401228116f..00000000000 --- a/storage/bdb/test/test015.tcl +++ /dev/null @@ -1,276 +0,0 @@ -# See the file LICENSE for redistribution information. -# -# Copyright (c) 1996-2004 -# Sleepycat Software. All rights reserved. -# -# $Id: test015.tcl,v 11.29 2004/01/28 03:36:30 bostic Exp $ -# -# TEST test015 -# TEST Partial put test -# TEST Partial put test where the key does not initially exist. -proc test015 { method {nentries 7500} { start 0 } args } { - global fixed_len testdir - - set low_range 50 - set mid_range 100 - set high_range 1000 - - if { [is_fixed_length $method] } { - set low_range [expr $fixed_len/2 - 2] - set mid_range [expr $fixed_len/2] - set high_range $fixed_len - } - - set t_table { - { 1 { 1 1 1 } } - { 2 { 1 1 5 } } - { 3 { 1 1 $low_range } } - { 4 { 1 $mid_range 1 } } - { 5 { $mid_range $high_range 5 } } - { 6 { 1 $mid_range $low_range } } - } - - puts "Test015: \ - $method ($args) $nentries equal key/data pairs, partial put test" - test015_init - if { $start == 0 } { - set start { 1 2 3 4 5 6 } - } - foreach entry $t_table { - set this [lindex $entry 0] - if { [lsearch $start $this] == -1 } { - continue - } - puts -nonewline "$this: " - eval [concat test015_body $method [lindex $entry 1] \ - $nentries $args] - set eindex [lsearch -exact $args "-env"] - if { $eindex != -1 } { - incr eindex - set env [lindex $args $eindex] - set testdir [get_home $env] - } -puts "Verifying testdir $testdir" - - error_check_good verify [verify_dir $testdir "\tTest015.e: "] 0 - } -} - -proc test015_init { } { - global rand_init - - berkdb srand $rand_init -} - -proc test015_body { method off_low off_hi rcount {nentries 10000} args } { - global dvals - global fixed_len - global testdir - source ./include.tcl - - set args [convert_args $method $args] - set omethod [convert_method $method] - - set checkfunc test015.check - - if { [is_fixed_length $method] && \ - [string compare $omethod "-recno"] == 0} { - # is fixed recno method - set checkfunc test015.check - } - - puts "Put $rcount strings random offsets between $off_low and $off_hi" - - # Create the database and open the dictionary - set txnenv 0 - set eindex [lsearch -exact $args "-env"] - # - # If we are using an env, then testfile should just be the db name. - # Otherwise it is the test directory and the name. - if { $eindex == -1 } { - set testfile $testdir/test015.db - set env NULL - } else { - set testfile test015.db - incr eindex - set env [lindex $args $eindex] - set txnenv [is_txnenv $env] - if { $txnenv == 1 } { - append args " -auto_commit " - # - # If we are using txns and running with the - # default, set the default down a bit. - # - if { $nentries > 5000 } { - set nentries 100 - } - } - set testdir [get_home $env] - } - set retdir $testdir - set t1 $testdir/t1 - set t2 $testdir/t2 - set t3 $testdir/t3 - cleanup $testdir $env - - set db [eval {berkdb_open \ - -create -mode 0644} $args {$omethod $testfile}] - error_check_good dbopen [is_valid_db $db] TRUE - - set pflags "" - set gflags "" - set txn "" - set count 0 - - puts "\tTest015.a: put/get loop for $nentries entries" - - # Here is the loop where we put and get each key/data pair - # Each put is a partial put of a record that does not exist. - set did [open $dict] - while { [gets $did str] != -1 && $count < $nentries } { - if { [is_record_based $method] == 1 } { - if { [string length $str] > $fixed_len } { - continue - } - set key [expr $count + 1] - } else { - set key $str - } - - if { 0 } { - set data [replicate $str $rcount] - set off [ berkdb random_int $off_low $off_hi ] - set offn [expr $off + 1] - if { [is_fixed_length $method] && \ - [expr [string length $data] + $off] >= $fixed_len} { - set data [string range $data 0 [expr $fixed_len-$offn]] - } - set dvals($key) [partial_shift $data $off right] - } else { - set data [chop_data $method [replicate $str $rcount]] - - # This is a hack. In DB we will store the records with - # some padding, but these will get lost if we just return - # them in TCL. As a result, we're going to have to hack - # get to check for 0 padding and return a list consisting - # of the number of 0's and the actual data. - set off [ berkdb random_int $off_low $off_hi ] - - # There is no string concatenation function in Tcl - # (although there is one in TclX), so we have to resort - # to this hack. Ugh. - set slen [string length $data] - if {[is_fixed_length $method] && \ - $slen > $fixed_len - $off} { - set $slen [expr $fixed_len - $off] - } - set a "a" - set dvals($key) [pad_data \ - $method [eval "binary format x$off$a$slen" {$data}]] - } - if {[is_fixed_length $method] && \ - [string length $data] > ($fixed_len - $off)} { - set slen [expr $fixed_len - $off] - set data [eval "binary format a$slen" {$data}] - } - if { $txnenv == 1 } { - set t [$env txn] - error_check_good txn [is_valid_txn $t $env] TRUE - set txn "-txn $t" - } - set ret [eval {$db put} $txn \ - {-partial [list $off [string length $data]] $key $data}] - error_check_good put $ret 0 - if { $txnenv == 1 } { - error_check_good txn [$t commit] 0 - } - - incr count - } - close $did - - # Now make sure that everything looks OK - puts "\tTest015.b: check entire file contents" - if { $txnenv == 1 } { - set t [$env txn] - error_check_good txn [is_valid_txn $t $env] TRUE - set txn "-txn $t" - } - dump_file $db $txn $t1 $checkfunc - if { $txnenv == 1 } { - error_check_good txn [$t commit] 0 - } - error_check_good db_close [$db close] 0 - - # Now compare the keys to see if they match the dictionary (or ints) - if { [is_record_based $method] == 1 } { - set oid [open $t2 w] - for {set i 1} {$i <= $nentries} {set i [incr i]} { - puts $oid $i - } - close $oid - filesort $t2 $t3 - file rename -force $t3 $t2 - filesort $t1 $t3 - } else { - set q q - filehead $nentries $dict $t3 - filesort $t3 $t2 - filesort $t1 $t3 - } - - error_check_good Test015:diff($t3,$t2) \ - [filecmp $t3 $t2] 0 - - puts "\tTest015.c: close, open, and dump file" - # Now, reopen the file and run the last test again. - open_and_dump_file $testfile $env $t1 \ - $checkfunc dump_file_direction "-first" "-next" - - if { [string compare $omethod "-recno"] != 0 } { - filesort $t1 $t3 - } - - error_check_good Test015:diff($t3,$t2) \ - [filecmp $t3 $t2] 0 - - # Now, reopen the file and run the last test again in the - # reverse direction. - puts "\tTest015.d: close, open, and dump file in reverse direction" - open_and_dump_file $testfile $env $t1 \ - $checkfunc dump_file_direction "-last" "-prev" - - if { [string compare $omethod "-recno"] != 0 } { - filesort $t1 $t3 - } - - error_check_good Test015:diff($t3,$t2) \ - [filecmp $t3 $t2] 0 - - unset dvals -} - -# Check function for test015; keys and data are identical -proc test015.check { key data } { - global dvals - - error_check_good key"$key"_exists [info exists dvals($key)] 1 - binary scan $data "c[string length $data]" a - binary scan $dvals($key) "c[string length $dvals($key)]" b - error_check_good "mismatch on padding for key $key" $a $b -} - -proc test015.fixed.check { key data } { - global dvals - global fixed_len - - error_check_good key"$key"_exists [info exists dvals($key)] 1 - if { [string length $data] > $fixed_len } { - error_check_bad \ - "data length:[string length $data] \ - for fixed:$fixed_len" 1 1 - } - puts "$data : $dvals($key)" - error_check_good compare_data($data,$dvals($key) \ - $dvals($key) $data -} diff --git a/storage/bdb/test/test016.tcl b/storage/bdb/test/test016.tcl deleted file mode 100644 index 481c85ec766..00000000000 --- a/storage/bdb/test/test016.tcl +++ /dev/null @@ -1,207 +0,0 @@ -# See the file LICENSE for redistribution information. -# -# Copyright (c) 1996-2004 -# Sleepycat Software. All rights reserved. -# -# $Id: test016.tcl,v 11.25 2004/01/28 03:36:30 bostic Exp $ -# -# TEST test016 -# TEST Partial put test -# TEST Partial put where the datum gets shorter as a result of the put. -# TEST -# TEST Partial put test where partial puts make the record smaller. -# TEST Use the first 10,000 entries from the dictionary. -# TEST Insert each with self as key and a fixed, medium length data string; -# TEST retrieve each. After all are entered, go back and do partial puts, -# TEST replacing a random-length string with the key value. -# TEST Then verify. - -proc test016 { method {nentries 10000} args } { - global datastr - global dvals - global rand_init - source ./include.tcl - - berkdb srand $rand_init - - set args [convert_args $method $args] - set omethod [convert_method $method] - - if { [is_fixed_length $method] == 1 } { - puts "Test016: skipping for method $method" - return - } - - # Create the database and open the dictionary - set txnenv 0 - set eindex [lsearch -exact $args "-env"] - # - # If we are using an env, then testfile should just be the db name. - # Otherwise it is the test directory and the name. - if { $eindex == -1 } { - set testfile $testdir/test016.db - set env NULL - } else { - set testfile test016.db - incr eindex - set env [lindex $args $eindex] - set txnenv [is_txnenv $env] - if { $txnenv == 1 } { - append args " -auto_commit " - # - # If we are using txns and running with the - # default, set the default down a bit. - # - if { $nentries == 10000 } { - set nentries 100 - } - } - set testdir [get_home $env] - } - puts "Test016: $method ($args) $nentries partial put shorten" - - set t1 $testdir/t1 - set t2 $testdir/t2 - set t3 $testdir/t3 - cleanup $testdir $env - set db [eval {berkdb_open \ - -create -mode 0644} $args {$omethod $testfile}] - error_check_good dbopen [is_valid_db $db] TRUE - - set pflags "" - set gflags "" - set txn "" - set count 0 - - if { [is_record_based $method] == 1 } { - append gflags " -recno" - } - - # Here is the loop where we put and get each key/data pair - puts "\tTest016.a: put/get loop" - set did [open $dict] - while { [gets $did str] != -1 && $count < $nentries } { - if { [is_record_based $method] == 1 } { - set key [expr $count + 1] - } else { - set key $str - } - if { $txnenv == 1 } { - set t [$env txn] - error_check_good txn [is_valid_txn $t $env] TRUE - set txn "-txn $t" - } - set ret [eval {$db put} \ - $txn $pflags {$key [chop_data $method $datastr]}] - error_check_good put $ret 0 - - set ret [eval {$db get} $txn $gflags {$key}] - error_check_good \ - get $ret [list [list $key [pad_data $method $datastr]]] - if { $txnenv == 1 } { - error_check_good txn [$t commit] 0 - } - incr count - } - close $did - - # Next we will do a partial put replacement, making the data - # shorter - puts "\tTest016.b: partial put loop" - set did [open $dict] - set count 0 - set len [string length $datastr] - while { [gets $did str] != -1 && $count < $nentries } { - if { [is_record_based $method] == 1 } { - set key [expr $count + 1] - } else { - set key $str - } - - set repl_len [berkdb random_int [string length $key] $len] - set repl_off [berkdb random_int 0 [expr $len - $repl_len] ] - set s1 [string range $datastr 0 [ expr $repl_off - 1] ] - set s2 [string toupper $key] - set s3 [string range $datastr [expr $repl_off + $repl_len] end ] - set dvals($key) [pad_data $method $s1$s2$s3] - if { $txnenv == 1 } { - set t [$env txn] - error_check_good txn [is_valid_txn $t $env] TRUE - set txn "-txn $t" - } - set ret [eval {$db put} $txn {-partial \ - [list $repl_off $repl_len] $key [chop_data $method $s2]}] - error_check_good put $ret 0 - set ret [eval {$db get} $txn $gflags {$key}] - error_check_good \ - put $ret [list [list $key [pad_data $method $s1$s2$s3]]] - if { $txnenv == 1 } { - error_check_good txn [$t commit] 0 - } - incr count - } - close $did - - # Now we will get each key from the DB and compare the results - # to the original. - puts "\tTest016.c: dump file" - if { $txnenv == 1 } { - set t [$env txn] - error_check_good txn [is_valid_txn $t $env] TRUE - set txn "-txn $t" - } - dump_file $db $txn $t1 test016.check - if { $txnenv == 1 } { - error_check_good txn [$t commit] 0 - } - error_check_good db_close [$db close] 0 - - # Now compare the keys to see if they match the dictionary - if { [is_record_based $method] == 1 } { - set oid [open $t2 w] - for {set i 1} {$i <= $nentries} {set i [incr i]} { - puts $oid $i - } - close $oid - file rename -force $t1 $t3 - } else { - set q q - filehead $nentries $dict $t3 - filesort $t3 $t2 - filesort $t1 $t3 - } - - error_check_good Test016:diff($t3,$t2) \ - [filecmp $t3 $t2] 0 - - # Now, reopen the file and run the last test again. - puts "\tTest016.d: close, open, and dump file" - open_and_dump_file $testfile $env $t1 test016.check \ - dump_file_direction "-first" "-next" - - if { [ is_record_based $method ] == 0 } { - filesort $t1 $t3 - } - error_check_good Test016:diff($t3,$t2) \ - [filecmp $t3 $t2] 0 - - # Now, reopen the file and run the last test again in reverse direction. - puts "\tTest016.e: close, open, and dump file in reverse direction" - open_and_dump_file $testfile $env $t1 test016.check \ - dump_file_direction "-last" "-prev" - - if { [ is_record_based $method ] == 0 } { - filesort $t1 $t3 - } - error_check_good Test016:diff($t3,$t2) \ - [filecmp $t3 $t2] 0 -} - -# Check function for test016; data should be whatever is set in dvals -proc test016.check { key data } { - global datastr - global dvals - - error_check_good key"$key"_exists [info exists dvals($key)] 1 - error_check_good "data mismatch for key $key" $data $dvals($key) -} diff --git a/storage/bdb/test/test017.tcl b/storage/bdb/test/test017.tcl deleted file mode 100644 index 6503b2cc140..00000000000 --- a/storage/bdb/test/test017.tcl +++ /dev/null @@ -1,316 +0,0 @@ -# See the file LICENSE for redistribution information. -# -# Copyright (c) 1996-2004 -# Sleepycat Software. All rights reserved. -# -# $Id: test017.tcl,v 11.29 2004/01/28 03:36:30 bostic Exp $ -# -# TEST test017 -# TEST Basic offpage duplicate test. -# TEST -# TEST Run duplicates with small page size so that we test off page duplicates. -# TEST Then after we have an off-page database, test with overflow pages too. -proc test017 { method {contents 0} {ndups 19} {tnum "017"} args } { - source ./include.tcl - - set args [convert_args $method $args] - set omethod [convert_method $method] - - if { [is_record_based $method] == 1 || [is_rbtree $method] == 1 } { - puts "Test$tnum skipping for method $method" - return - } - set pgindex [lsearch -exact $args "-pagesize"] - if { $pgindex != -1 } { - incr pgindex - if { [lindex $args $pgindex] > 8192 } { - puts "Test$tnum: Skipping for large pagesizes" - return - } - } - - # Create the database and open the dictionary - set limit 0 - set txnenv 0 - set eindex [lsearch -exact $args "-env"] - # - # If we are using an env, then testfile should just be the db name. - # Otherwise it is the test directory and the name. - if { $eindex == -1 } { - set testfile $testdir/test$tnum.db - set env NULL - } else { - set testfile test$tnum.db - incr eindex - set env [lindex $args $eindex] - set txnenv [is_txnenv $env] - if { $txnenv == 1 } { - append args " -auto_commit " - set limit 100 - } - set testdir [get_home $env] - } - set t1 $testdir/t1 - set t2 $testdir/t2 - set t3 $testdir/t3 - set t4 $testdir/t4 - - cleanup $testdir $env - - set db [eval {berkdb_open \ - -create -mode 0644 -dup} $args {$omethod $testfile}] - error_check_good dbopen [is_valid_db $db] TRUE - - set pflags "" - set gflags "" - set txn "" - set count 0 - - set file_list [get_file_list 1] - if { $txnenv == 1 } { - if { [llength $file_list] > $limit } { - set file_list [lrange $file_list 0 $limit] - } - set flen [llength $file_list] - reduce_dups flen ndups - } - puts "Test$tnum: $method ($args) Off page duplicate tests\ - with $ndups duplicates" - - set ovfl "" - # Here is the loop where we put and get each key/data pair - puts -nonewline "\tTest$tnum.a: Creating duplicates with " - if { $contents != 0 } { - puts "file contents as key/data" - } else { - puts "file name as key/data" - } - foreach f $file_list { - if { $contents != 0 } { - set fid [open $f r] - fconfigure $fid -translation binary - # - # Prepend file name to guarantee uniqueness - set filecont [read $fid] - set str $f:$filecont - close $fid - } else { - set str $f - } - for { set i 1 } { $i <= $ndups } { incr i } { - set datastr $i:$str - if { $txnenv == 1 } { - set t [$env txn] - error_check_good txn [is_valid_txn $t $env] TRUE - set txn "-txn $t" - } - set ret [eval {$db put} \ - $txn $pflags {$str [chop_data $method $datastr]}] - error_check_good put $ret 0 - if { $txnenv == 1 } { - error_check_good txn [$t commit] 0 - } - } - - # - # Save 10% files for overflow test - # - if { $contents == 0 && [expr $count % 10] == 0 } { - lappend ovfl $f - } - # Now retrieve all the keys matching this key - set ret [$db get $str] - error_check_bad $f:dbget_dups [llength $ret] 0 - error_check_good $f:dbget_dups1 [llength $ret] $ndups - set x 1 - if { $txnenv == 1 } { - set t [$env txn] - error_check_good txn [is_valid_txn $t $env] TRUE - set txn "-txn $t" - } - set dbc [eval {$db cursor} $txn] - for {set ret [$dbc get "-set" $str]} \ - {[llength $ret] != 0} \ - {set ret [$dbc get "-next"] } { - set k [lindex [lindex $ret 0] 0] - if { [string compare $k $str] != 0 } { - break - } - set datastr [lindex [lindex $ret 0] 1] - set d [data_of $datastr] - if {[string length $d] == 0} { - break - } - error_check_good "Test$tnum:get" $d $str - set id [ id_of $datastr ] - error_check_good "Test$tnum:$f:dup#" $id $x - incr x - } - error_check_good "Test$tnum:ndups:$str" [expr $x - 1] $ndups - error_check_good cursor_close [$dbc close] 0 - if { $txnenv == 1 } { - error_check_good txn [$t commit] 0 - } - incr count - } - - # Now we will get each key from the DB and compare the results - # to the original. - puts "\tTest$tnum.b: Checking file for correct duplicates" - set dlist "" - for { set i 1 } { $i <= $ndups } {incr i} { - lappend dlist $i - } - set oid [open $t2.tmp w] - set o1id [open $t4.tmp w] - foreach f $file_list { - for {set i 1} {$i <= $ndups} {incr i} { - puts $o1id $f - } - puts $oid $f - } - close $oid - close $o1id - filesort $t2.tmp $t2 - filesort $t4.tmp $t4 - fileremove $t2.tmp - fileremove $t4.tmp - - if { $txnenv == 1 } { - set t [$env txn] - error_check_good txn [is_valid_txn $t $env] TRUE - set txn "-txn $t" - } - dup_check $db $txn $t1 $dlist - if { $txnenv == 1 } { - error_check_good txn [$t commit] 0 - } - if {$contents == 0} { - filesort $t1 $t3 - - error_check_good Test$tnum:diff($t3,$t2) [filecmp $t3 $t2] 0 - - # Now compare the keys to see if they match the file names - if { $txnenv == 1 } { - set t [$env txn] - error_check_good txn [is_valid_txn $t $env] TRUE - set txn "-txn $t" - } - dump_file $db $txn $t1 test017.check - if { $txnenv == 1 } { - error_check_good txn [$t commit] 0 - } - filesort $t1 $t3 - - error_check_good Test$tnum:diff($t3,$t4) [filecmp $t3 $t4] 0 - } - - error_check_good db_close [$db close] 0 - set db [eval {berkdb_open} $args $testfile] - error_check_good dbopen [is_valid_db $db] TRUE - - puts "\tTest$tnum.c: Checking file for correct duplicates after close" - if { $txnenv == 1 } { - set t [$env txn] - error_check_good txn [is_valid_txn $t $env] TRUE - set txn "-txn $t" - } - dup_check $db $txn $t1 $dlist - if { $txnenv == 1 } { - error_check_good txn [$t commit] 0 - } - - if {$contents == 0} { - # Now compare the keys to see if they match the filenames - filesort $t1 $t3 - error_check_good Test$tnum:diff($t3,$t2) [filecmp $t3 $t2] 0 - } - error_check_good db_close [$db close] 0 - - puts "\tTest$tnum.d: Verify off page duplicates and overflow status" - set db [eval {berkdb_open} $args $testfile] - error_check_good dbopen [is_valid_db $db] TRUE - set stat [$db stat] - if { [is_btree $method] } { - error_check_bad stat:offpage \ - [is_substr $stat "{{Internal pages} 0}"] 1 - } - if {$contents == 0} { - # This check doesn't work in hash, since overflow - # pages count extra pages in buckets as well as true - # P_OVERFLOW pages. - if { [is_hash $method] == 0 } { - error_check_good overflow \ - [is_substr $stat "{{Overflow pages} 0}"] 1 - } - } else { - if { [is_hash $method] } { - error_check_bad overflow \ - [is_substr $stat "{{Number of big pages} 0}"] 1 - } else { - error_check_bad overflow \ - [is_substr $stat "{{Overflow pages} 0}"] 1 - } - } - - # - # If doing overflow test, do that now. Else we are done. - # Add overflow pages by adding a large entry to a duplicate. - # - if { [llength $ovfl] == 0} { - error_check_good db_close [$db close] 0 - return - } - - puts "\tTest$tnum.e: Add overflow duplicate entries" - set ovfldup [expr $ndups + 1] - foreach f $ovfl { - # - # This is just like put_file, but prepends the dup number - # - set fid [open $f r] - fconfigure $fid -translation binary - set fdata [read $fid] - close $fid - set data $ovfldup:$fdata:$fdata:$fdata:$fdata - - if { $txnenv == 1 } { - set t [$env txn] - error_check_good txn [is_valid_txn $t $env] TRUE - set txn "-txn $t" - } - set ret [eval {$db put} $txn $pflags {$f $data}] - error_check_good ovfl_put $ret 0 - if { $txnenv == 1 } { - error_check_good txn [$t commit] 0 - } - } - - puts "\tTest$tnum.f: Verify overflow duplicate entries" - if { $txnenv == 1 } { - set t [$env txn] - error_check_good txn [is_valid_txn $t $env] TRUE - set txn "-txn $t" - } - dup_check $db $txn $t1 $dlist $ovfldup - if { $txnenv == 1 } { - error_check_good txn [$t commit] 0 - } - filesort $t1 $t3 - error_check_good Test$tnum:diff($t3,$t2) [filecmp $t3 $t2] 0 - - set stat [$db stat] - if { [is_hash [$db get_type]] } { - error_check_bad overflow1_hash [is_substr $stat \ - "{{Number of big pages} 0}"] 1 - } else { - error_check_bad \ - overflow1 [is_substr $stat "{{Overflow pages} 0}"] 1 - } - error_check_good db_close [$db close] 0 -} - -# Check function; verify data contains key -proc test017.check { key data } { - error_check_good "data mismatch for key $key" $key [data_of $data] -} diff --git a/storage/bdb/test/test018.tcl b/storage/bdb/test/test018.tcl deleted file mode 100644 index bf2e3eb562e..00000000000 --- a/storage/bdb/test/test018.tcl +++ /dev/null @@ -1,21 +0,0 @@ -# See the file LICENSE for redistribution information. -# -# Copyright (c) 1996-2004 -# Sleepycat Software. All rights reserved. -# -# $Id: test018.tcl,v 11.10 2004/01/28 03:36:30 bostic Exp $ -# -# TEST test018 -# TEST Offpage duplicate test -# TEST Key_{first,last,before,after} offpage duplicates. -# TEST Run duplicates with small page size so that we test off page -# TEST duplicates. -proc test018 { method {nentries 10000} args} { - puts "Test018: Off page duplicate tests" - set pgindex [lsearch -exact $args "-pagesize"] - if { $pgindex != -1 } { - puts "Test018: Skipping for specific pagesizes" - return - } - eval {test011 $method $nentries 19 "018" -pagesize 512} $args -} diff --git a/storage/bdb/test/test019.tcl b/storage/bdb/test/test019.tcl deleted file mode 100644 index 68f6487be4f..00000000000 --- a/storage/bdb/test/test019.tcl +++ /dev/null @@ -1,136 +0,0 @@ -# See the file LICENSE for redistribution information. -# -# Copyright (c) 1996-2004 -# Sleepycat Software. All rights reserved. -# -# $Id: test019.tcl,v 11.24 2004/04/22 18:57:32 sue Exp $ -# -# TEST test019 -# TEST Partial get test. -proc test019 { method {nentries 10000} args } { - global fixed_len - global rand_init - source ./include.tcl - - set args [convert_args $method $args] - set omethod [convert_method $method] - # Create the database and open the dictionary - set txnenv 0 - set eindex [lsearch -exact $args "-env"] - # - # If we are using an env, then testfile should just be the db name. - # Otherwise it is the test directory and the name. - if { $eindex == -1 } { - set testfile $testdir/test019.db - set env NULL - } else { - set testfile test019.db - incr eindex - set env [lindex $args $eindex] - set txnenv [is_txnenv $env] - if { $txnenv == 1 } { - append args " -auto_commit " - # - # If we are using txns and running with the - # default, set the default down a bit. - # - if { $nentries == 10000 } { - set nentries 100 - } - } - set testdir [get_home $env] - } - puts "Test019: $method ($args) $nentries partial get test" - - cleanup $testdir $env - - set db [eval {berkdb_open \ - -create -mode 0644} $args {$omethod $testfile}] - error_check_good dbopen [is_valid_db $db] TRUE - set did [open $dict] - berkdb srand $rand_init - - set pflags "" - set gflags "" - set txn "" - set count 0 - - if { [is_record_based $method] == 1 } { - append gflags " -recno" - } - - puts "\tTest019.a: put/get loop" - for { set i 0 } { [gets $did str] != -1 && $i < $nentries } \ - { incr i } { - - if { [is_record_based $method] == 1 } { - set key [expr $i + 1] - } else { - set key $str - } - set repl [berkdb random_int $fixed_len 100] - set data [chop_data $method [replicate $str $repl]] - if { $txnenv == 1 } { - set t [$env txn] - error_check_good txn [is_valid_txn $t $env] TRUE - set txn "-txn $t" - } - set ret [eval {$db put} $txn {-nooverwrite $key $data}] - error_check_good dbput:$key $ret 0 - - set ret [eval {$db get} $txn $gflags {$key}] - error_check_good \ - dbget:$key $ret [list [list $key [pad_data $method $data]]] - set kvals($key) $repl - if { $txnenv == 1 } { - error_check_good txn [$t commit] 0 - } - } - close $did - - puts "\tTest019.b: partial get loop" - set did [open $dict] - for { set i 0 } { [gets $did str] != -1 && $i < $nentries } \ - { incr i } { - if { [is_record_based $method] == 1 } { - set key [expr $i + 1] - } else { - set key $str - } - set data [pad_data $method [replicate $str $kvals($key)]] - - set maxndx [expr [string length $data] - 1] - - if { $maxndx > 0 } { - set beg [berkdb random_int 0 [expr $maxndx - 1]] - set len [berkdb random_int 0 [expr $maxndx * 2]] - } else { - set beg 0 - set len 0 - } - - if { $txnenv == 1 } { - set t [$env txn] - error_check_good txn [is_valid_txn $t $env] TRUE - set txn "-txn $t" - } - set ret [eval {$db get} \ - $txn {-partial [list $beg $len]} $gflags {$key}] - if { $txnenv == 1 } { - error_check_good txn [$t commit] 0 - } - - # In order for tcl to handle this, we have to overwrite the - # last character with a NULL. That makes the length one less - # than we expect. - set k [lindex [lindex $ret 0] 0] - set d [lindex [lindex $ret 0] 1] - error_check_good dbget_key $k $key - - error_check_good dbget_data $d \ - [string range $data $beg [expr $beg + $len - 1]] - - } - error_check_good db_close [$db close] 0 - close $did -} diff --git a/storage/bdb/test/test020.tcl b/storage/bdb/test/test020.tcl deleted file mode 100644 index 19eda9c313c..00000000000 --- a/storage/bdb/test/test020.tcl +++ /dev/null @@ -1,137 +0,0 @@ -# See the file LICENSE for redistribution information. -# -# Copyright (c) 1996-2004 -# Sleepycat Software. All rights reserved. -# -# $Id: test020.tcl,v 11.19 2004/01/28 03:36:30 bostic Exp $ -# -# TEST test020 -# TEST In-Memory database tests. -proc test020 { method {nentries 10000} args } { - source ./include.tcl - - set args [convert_args $method $args] - set omethod [convert_method $method] - if { [is_queueext $method] == 1 || \ - [is_rbtree $method] == 1 } { - puts "Test020 skipping for method $method" - return - } - # Create the database and open the dictionary - set t1 $testdir/t1 - set t2 $testdir/t2 - set t3 $testdir/t3 - set txnenv 0 - set eindex [lsearch -exact $args "-env"] - # - # Check if we are using an env. - if { $eindex == -1 } { - set env NULL - } else { - incr eindex - set env [lindex $args $eindex] - set txnenv [is_txnenv $env] - if { $txnenv == 1 } { - append args " -auto_commit " - # - # If we are using txns and running with the - # default, set the default down a bit. - # - if { $nentries == 10000 } { - set nentries 100 - } - } - set testdir [get_home $env] - } - puts "Test020: $method ($args) $nentries equal key/data pairs" - - cleanup $testdir $env - set db [eval {berkdb_open \ - -create -mode 0644} $args {$omethod}] - error_check_good dbopen [is_valid_db $db] TRUE - set did [open $dict] - - set pflags "" - set gflags "" - set txn "" - set count 0 - - if { [is_record_based $method] == 1 } { - set checkfunc test020_recno.check - append gflags " -recno" - } else { - set checkfunc test020.check - } - puts "\tTest020.a: put/get loop" - # Here is the loop where we put and get each key/data pair - while { [gets $did str] != -1 && $count < $nentries } { - if { [is_record_based $method] == 1 } { - global kvals - - set key [expr $count + 1] - set kvals($key) [pad_data $method $str] - } else { - set key $str - } - if { $txnenv == 1 } { - set t [$env txn] - error_check_good txn [is_valid_txn $t $env] TRUE - set txn "-txn $t" - } - set ret [eval {$db put} \ - $txn $pflags {$key [chop_data $method $str]}] - error_check_good put $ret 0 - set ret [eval {$db get} $txn $gflags {$key}] - error_check_good \ - get $ret [list [list $key [pad_data $method $str]]] - if { $txnenv == 1 } { - error_check_good txn [$t commit] 0 - } - incr count - } - close $did - # Now we will get each key from the DB and compare the results - # to the original. - puts "\tTest020.b: dump file" - if { $txnenv == 1 } { - set t [$env txn] - error_check_good txn [is_valid_txn $t $env] TRUE - set txn "-txn $t" - } - dump_file $db $txn $t1 $checkfunc - if { $txnenv == 1 } { - error_check_good txn [$t commit] 0 - } - error_check_good db_close [$db close] 0 - - # Now compare the keys to see if they match the dictionary (or ints) - if { [is_record_based $method] == 1 } { - set oid [open $t2 w] - for {set i 1} {$i <= $nentries} {set i [incr i]} { - puts $oid $i - } - close $oid - file rename -force $t1 $t3 - } else { - set q q - filehead $nentries $dict $t3 - filesort $t3 $t2 - filesort $t1 $t3 - } - - error_check_good Test020:diff($t3,$t2) \ - [filecmp $t3 $t2] 0 -} - -# Check function for test020; keys and data are identical -proc test020.check { key data } { - error_check_good "key/data mismatch" $data $key -} - -proc test020_recno.check { key data } { - global dict - global kvals - - error_check_good key"$key"_exists [info exists kvals($key)] 1 - error_check_good "data mismatch: key $key" $data $kvals($key) -} diff --git a/storage/bdb/test/test021.tcl b/storage/bdb/test/test021.tcl deleted file mode 100644 index 43a7a4bde14..00000000000 --- a/storage/bdb/test/test021.tcl +++ /dev/null @@ -1,162 +0,0 @@ -# See the file LICENSE for redistribution information. -# -# Copyright (c) 1996-2004 -# Sleepycat Software. All rights reserved. -# -# $Id: test021.tcl,v 11.17 2004/01/28 03:36:30 bostic Exp $ -# -# TEST test021 -# TEST Btree range tests. -# TEST -# TEST Use the first 10,000 entries from the dictionary. -# TEST Insert each with self, reversed as key and self as data. -# TEST After all are entered, retrieve each using a cursor SET_RANGE, and -# TEST getting about 20 keys sequentially after it (in some cases we'll -# TEST run out towards the end of the file). -proc test021 { method {nentries 10000} args } { - source ./include.tcl - - set args [convert_args $method $args] - set omethod [convert_method $method] - - # Create the database and open the dictionary - set txnenv 0 - set eindex [lsearch -exact $args "-env"] - # - # If we are using an env, then testfile should just be the db name. - # Otherwise it is the test directory and the name. - if { $eindex == -1 } { - set testfile $testdir/test021.db - set env NULL - } else { - set testfile test021.db - incr eindex - set env [lindex $args $eindex] - set txnenv [is_txnenv $env] - if { $txnenv == 1 } { - append args " -auto_commit " - # - # If we are using txns and running with the - # default, set the default down a bit. - # - if { $nentries == 10000 } { - set nentries 100 - } - } - set testdir [get_home $env] - } - puts "Test021: $method ($args) $nentries equal key/data pairs" - - set t1 $testdir/t1 - set t2 $testdir/t2 - set t3 $testdir/t3 - cleanup $testdir $env - set db [eval {berkdb_open \ - -create -mode 0644} $args {$omethod $testfile}] - error_check_good dbopen [is_valid_db $db] TRUE - - set did [open $dict] - - set pflags "" - set gflags "" - set txn "" - set count 0 - - if { [is_record_based $method] == 1 } { - set checkfunc test021_recno.check - append gflags " -recno" - } else { - set checkfunc test021.check - } - puts "\tTest021.a: put loop" - # Here is the loop where we put each key/data pair - while { [gets $did str] != -1 && $count < $nentries } { - if { [is_record_based $method] == 1 } { - global kvals - - set key [expr $count + 1] - set kvals($key) [pad_data $method $str] - } else { - set key [reverse $str] - } - - if { $txnenv == 1 } { - set t [$env txn] - error_check_good txn [is_valid_txn $t $env] TRUE - set txn "-txn $t" - } - set r [eval {$db put} \ - $txn $pflags {$key [chop_data $method $str]}] - error_check_good db_put $r 0 - if { $txnenv == 1 } { - error_check_good txn [$t commit] 0 - } - incr count - } - close $did - - # Now we will get each key from the DB and retrieve about 20 - # records after it. - error_check_good db_close [$db close] 0 - - puts "\tTest021.b: test ranges" - set db [eval {berkdb_open -rdonly} $args $omethod $testfile ] - error_check_good dbopen [is_valid_db $db] TRUE - - # Open a cursor - if { $txnenv == 1 } { - set t [$env txn] - error_check_good txn [is_valid_txn $t $env] TRUE - set txn "-txn $t" - } - set dbc [eval {$db cursor} $txn] - error_check_good db_cursor [is_substr $dbc $db] 1 - - set did [open $dict] - set i 0 - while { [gets $did str] != -1 && $i < $count } { - if { [is_record_based $method] == 1 } { - set key [expr $i + 1] - } else { - set key [reverse $str] - } - - set r [$dbc get -set_range $key] - error_check_bad dbc_get:$key [string length $r] 0 - set k [lindex [lindex $r 0] 0] - set d [lindex [lindex $r 0] 1] - $checkfunc $k $d - - for { set nrecs 0 } { $nrecs < 20 } { incr nrecs } { - set r [$dbc get "-next"] - # no error checking because we may run off the end - # of the database - if { [llength $r] == 0 } { - continue; - } - set k [lindex [lindex $r 0] 0] - set d [lindex [lindex $r 0] 1] - $checkfunc $k $d - } - incr i - } - error_check_good dbc_close [$dbc close] 0 - if { $txnenv == 1 } { - error_check_good txn [$t commit] 0 - } - error_check_good db_close [$db close] 0 - close $did -} - -# Check function for test021; keys and data are reversed -proc test021.check { key data } { - error_check_good "key/data mismatch for $key" $data [reverse $key] -} - -proc test021_recno.check { key data } { - global dict - global kvals - - error_check_good key"$key"_exists [info exists kvals($key)] 1 - error_check_good "data mismatch: key $key" $data $kvals($key) -} diff --git a/storage/bdb/test/test022.tcl b/storage/bdb/test/test022.tcl deleted file mode 100644 index deded62e7a7..00000000000 --- a/storage/bdb/test/test022.tcl +++ /dev/null @@ -1,62 +0,0 @@ -# See the file LICENSE for redistribution information. -# -# Copyright (c) 1999-2004 -# Sleepycat Software. All rights reserved. -# -# $Id: test022.tcl,v 11.16 2004/01/28 03:36:30 bostic Exp $ -# -# TEST test022 -# TEST Test of DB->getbyteswapped(). -proc test022 { method args } { - source ./include.tcl - - set args [convert_args $method $args] - set omethod [convert_method $method] - - puts "Test022 ($args) $omethod: DB->getbyteswapped()" - - set txnenv 0 - set eindex [lsearch -exact $args "-env"] - # - # If we are using an env, then testfile should just be the db name. - # Otherwise it is the test directory and the name. - if { $eindex == -1 } { - set testfile1 "$testdir/test022a.db" - set testfile2 "$testdir/test022b.db" - set env NULL - } else { - set testfile1 "test022a.db" - set testfile2 "test022b.db" - incr eindex - set env [lindex $args $eindex] - set txnenv [is_txnenv $env] - if { $txnenv == 1 } { - append args " -auto_commit " - } - set testdir [get_home $env] - } - cleanup $testdir $env - - # Create two databases, one in each byte order. - set db1 [eval {berkdb_open -create \ - -mode 0644} $omethod $args {-lorder 1234} $testfile1] - error_check_good db1_open [is_valid_db $db1] TRUE - - set db2 [eval {berkdb_open -create \ - -mode 0644} $omethod $args {-lorder 4321} $testfile2] - error_check_good db2_open [is_valid_db $db2] TRUE - - # Call DB->get_byteswapped on both of them. - set db1_order [$db1 is_byteswapped] - set db2_order [$db2 is_byteswapped] - - # Make sure that both answers are either 1 or 0, - # and that exactly one of them is 1. - error_check_good is_byteswapped_sensible_1 \ - [expr ($db1_order == 1 && $db2_order == 0) || \ - ($db1_order == 0 && $db2_order == 1)] 1 - - error_check_good db1_close [$db1 close] 0 - error_check_good db2_close [$db2 close] 0 - puts "\tTest022 complete." -} diff --git a/storage/bdb/test/test023.tcl b/storage/bdb/test/test023.tcl deleted file mode 100644 index c4a707288ff..00000000000 --- a/storage/bdb/test/test023.tcl +++ /dev/null @@ -1,219 +0,0 @@ -# See the file LICENSE for redistribution information. -# -# Copyright (c) 1996-2004 -# Sleepycat Software. All rights reserved. -# -# $Id: test023.tcl,v 11.21 2004/09/20 17:06:16 sue Exp $ -# -# TEST test023 -# TEST Duplicate test -# TEST Exercise deletes and cursor operations within a duplicate set. -# TEST Add a key with duplicates (first time on-page, second time off-page) -# TEST Number the dups. -# TEST Delete dups and make sure that CURRENT/NEXT/PREV work correctly. -proc test023 { method args } { - global alphabet - global dupnum - global dupstr - global errorInfo - source ./include.tcl - - set args [convert_args $method $args] - set omethod [convert_method $method] - puts "Test023: $method delete duplicates/check cursor operations" - if { [is_record_based $method] == 1 || \ - [is_rbtree $method] == 1 } { - puts "Test023: skipping for method $omethod" - return - } - - # Create the database and open the dictionary - set txnenv 0 - set eindex [lsearch -exact $args "-env"] - # - # If we are using an env, then testfile should just be the db name. - # Otherwise it is the test directory and the name. - if { $eindex == -1 } { - set testfile $testdir/test023.db - set env NULL - } else { - set testfile test023.db - incr eindex - set env [lindex $args $eindex] - set txnenv [is_txnenv $env] - if { $txnenv == 1 } { - append args " -auto_commit " - } - set testdir [get_home $env] - } - set t1 $testdir/t1 - cleanup $testdir $env - set db [eval {berkdb_open \ - -create -mode 0644 -dup} $args {$omethod $testfile}] - error_check_good dbopen [is_valid_db $db] TRUE - - set pflags "" - set gflags "" - set txn "" - - if { $txnenv == 1 } { - set t [$env txn] - error_check_good txn [is_valid_txn $t $env] TRUE - set txn "-txn $t" - } - set dbc [eval {$db cursor} $txn] - error_check_good db_cursor [is_valid_cursor $dbc $db] TRUE - - foreach i { onpage offpage } { - if { $i == "onpage" } { - set dupstr DUP - } else { - set dupstr [repeat $alphabet 50] - } - puts "\tTest023.a: Insert key w/$i dups" - set key "duplicate_val_test" - for { set count 0 } { $count < 20 } { incr count } { - set ret \ - [eval {$db put} $txn $pflags {$key $count$dupstr}] - error_check_good db_put $ret 0 - } - - # Now let's get all the items and make sure they look OK. - puts "\tTest023.b: Check initial duplicates" - set dupnum 0 - dump_file $db $txn $t1 test023.check - - # Delete a couple of random items (FIRST, LAST one in middle) - # Make sure that current returns an error and that NEXT and - # PREV do the right things. - - set ret [$dbc get -set $key] - error_check_bad dbc_get:SET [llength $ret] 0 - - puts "\tTest023.c: Delete first and try gets" - # This should be the first duplicate - error_check_good \ - dbc_get:SET $ret [list [list duplicate_val_test 0$dupstr]] - - # Now delete it. - set ret [$dbc del] - error_check_good dbc_del:FIRST $ret 0 - - # Now current should fail - set ret [$dbc get -current] - error_check_good dbc_get:CURRENT $ret "" - - # Now Prev should fail - set ret [$dbc get -prev] - error_check_good dbc_get:prev0 [llength $ret] 0 - - # Now 10 nexts should work to get us in the middle - for { set j 1 } { $j <= 10 } { incr j } { - set ret [$dbc get -next] - error_check_good \ - dbc_get:next [llength [lindex $ret 0]] 2 - error_check_good \ - dbc_get:next [lindex [lindex $ret 0] 1] $j$dupstr - } - - puts "\tTest023.d: Delete middle and try gets" - # Now do the delete on the current key. - set ret [$dbc del] - error_check_good dbc_del:10 $ret 0 - - # Now current should fail - set ret [$dbc get -current] - error_check_good dbc_get:deleted $ret "" - - # Prev and Next should work - set ret [$dbc get -next] - error_check_good dbc_get:next [llength [lindex $ret 0]] 2 - error_check_good \ - dbc_get:next [lindex [lindex $ret 0] 1] 11$dupstr - - set ret [$dbc get -prev] - error_check_good dbc_get:next [llength [lindex $ret 0]] 2 - error_check_good \ - dbc_get:next [lindex [lindex $ret 0] 1] 9$dupstr - - # Now go to the last one - for { set j 11 } { $j <= 19 } { incr j } { - set ret [$dbc get -next] - error_check_good \ - dbc_get:next [llength [lindex $ret 0]] 2 - error_check_good \ - dbc_get:next [lindex [lindex $ret 0] 1] $j$dupstr - } - - puts "\tTest023.e: Delete last and try gets" - # Now do the delete on the current key. - set ret [$dbc del] - error_check_good dbc_del:LAST $ret 0 - - # Now current should fail - set ret [$dbc get -current] - error_check_good dbc_get:deleted $ret "" - - # Next should fail - set ret [$dbc get -next] - error_check_good dbc_get:next19 [llength $ret] 0 - - # Prev should work - set ret [$dbc get -prev] - error_check_good dbc_get:next [llength [lindex $ret 0]] 2 - error_check_good \ - dbc_get:next [lindex [lindex $ret 0] 1] 18$dupstr - - # Now overwrite the current one, then count the number - # of data items to make sure that we have the right number. - - puts "\tTest023.f: Count keys, overwrite current, count again" - # At this point we should have 17 keys the (initial 20 minus - # 3 deletes) - set dbc2 [eval {$db cursor} $txn] - error_check_good db_cursor:2 [is_substr $dbc2 $db] 1 - - set count_check 0 - for { set rec [$dbc2 get -first] } { - [llength $rec] != 0 } { set rec [$dbc2 get -next] } { - incr count_check - } - error_check_good numdups $count_check 17 - - set ret [$dbc put -current OVERWRITE] - error_check_good dbc_put:current $ret 0 - - set count_check 0 - for { set rec [$dbc2 get -first] } { - [llength $rec] != 0 } { set rec [$dbc2 get -next] } { - incr count_check - } - error_check_good numdups $count_check 17 - error_check_good dbc2_close [$dbc2 close] 0 - - # Done, delete all the keys for next iteration - set ret [eval {$db del} $txn {$key}] - error_check_good db_delete $ret 0 - - # database should be empty - - set ret [$dbc get -first] - error_check_good first_after_empty [llength $ret] 0 - } - - error_check_good dbc_close [$dbc close] 0 - if { $txnenv == 1 } { - error_check_good txn [$t commit] 0 - } - error_check_good db_close [$db close] 0 - -} - -# Check function for test023; keys and data are identical -proc test023.check { key data } { - global dupnum - global dupstr - error_check_good "bad key" $key duplicate_val_test - error_check_good "data mismatch for $key" $data $dupnum$dupstr - incr dupnum -} diff --git a/storage/bdb/test/test024.tcl b/storage/bdb/test/test024.tcl deleted file mode 100644 index 4ac1fceaeb2..00000000000 --- a/storage/bdb/test/test024.tcl +++ /dev/null @@ -1,268 +0,0 @@ -# See the file LICENSE for redistribution information. -# -# Copyright (c) 1996-2004 -# Sleepycat Software. All rights reserved. -# -# $Id: test024.tcl,v 11.21 2004/01/28 03:36:30 bostic Exp $ -# -# TEST test024 -# TEST Record number retrieval test. -# TEST Test the Btree and Record number get-by-number functionality. -proc test024 { method {nentries 10000} args} { - source ./include.tcl - global rand_init - - set do_renumber [is_rrecno $method] - set args [convert_args $method $args] - set omethod [convert_method $method] - - puts "Test024: $method ($args)" - - if { [string compare $omethod "-hash"] == 0 } { - puts "Test024 skipping for method HASH" - return - } - - berkdb srand $rand_init - - # Create the database and open the dictionary - set txnenv 0 - set eindex [lsearch -exact $args "-env"] - # - # If we are using an env, then testfile should just be the db name. - # Otherwise it is the test directory and the name. - if { $eindex == -1 } { - set testfile $testdir/test024.db - set env NULL - } else { - set testfile test024.db - incr eindex - set env [lindex $args $eindex] - set txnenv [is_txnenv $env] - if { $txnenv == 1 } { - append args " -auto_commit " - # - # If we are using txns and running with the - # default, set the default down a bit. - # - if { $nentries == 10000 } { - set nentries 100 - } - } - set testdir [get_home $env] - } - set t1 $testdir/t1 - set t2 $testdir/t2 - set t3 $testdir/t3 - - cleanup $testdir $env - - # Read the first nentries dictionary elements and reverse them. - # Keep a list of these (these will be the keys). - puts "\tTest024.a: initialization" - set keys "" - set did [open $dict] - set count 0 - while { [gets $did str] != -1 && $count < $nentries } { - lappend keys [reverse $str] - incr count - } - close $did - - # Generate sorted order for the keys - set sorted_keys [lsort $keys] - # Create the database - if { [string compare $omethod "-btree"] == 0 } { - set db [eval {berkdb_open -create \ - -mode 0644 -recnum} $args {$omethod $testfile}] - error_check_good dbopen [is_valid_db $db] TRUE - } else { - set db [eval {berkdb_open -create \ - -mode 0644} $args {$omethod $testfile}] - error_check_good dbopen [is_valid_db $db] TRUE - } - - set pflags "" - set gflags "" - set txn "" - - if { [is_record_based $method] == 1 } { - set gflags " -recno" - } - - puts "\tTest024.b: put/get loop" - foreach k $keys { - if { [is_record_based $method] == 1 } { - set key [lsearch $sorted_keys $k] - incr key - } else { - set key $k - } - if { $txnenv == 1 } { - set t [$env txn] - error_check_good txn [is_valid_txn $t $env] TRUE - set txn "-txn $t" - } - set ret [eval {$db put} \ - $txn $pflags {$key [chop_data $method $k]}] - error_check_good put $ret 0 - set ret [eval {$db get} $txn $gflags {$key}] - error_check_good \ - get $ret [list [list $key [pad_data $method $k]]] - if { $txnenv == 1 } { - error_check_good txn [$t commit] 0 - } - } - - # Now we will get each key from the DB and compare the results - # to the original. - puts "\tTest024.c: dump file" - - # Put sorted keys in file - set oid [open $t1 w] - foreach k $sorted_keys { - puts $oid [pad_data $method $k] - } - close $oid - - # Instead of using dump_file; get all the keys by keynum - set oid [open $t2 w] - if { [string compare $omethod "-btree"] == 0 } { - set do_renumber 1 - } - - set gflags " -recno" - - if { $txnenv == 1 } { - set t [$env txn] - error_check_good txn [is_valid_txn $t $env] TRUE - set txn "-txn $t" - } - for { set k 1 } { $k <= $count } { incr k } { - set ret [eval {$db get} $txn $gflags {$k}] - puts $oid [lindex [lindex $ret 0] 1] - error_check_good recnum_get [lindex [lindex $ret 0] 1] \ - [pad_data $method [lindex $sorted_keys [expr $k - 1]]] - } - close $oid - if { $txnenv == 1 } { - error_check_good txn [$t commit] 0 - } - error_check_good db_close [$db close] 0 - - error_check_good Test024.c:diff($t1,$t2) \ - [filecmp $t1 $t2] 0 - - # Now, reopen the file and run the last test again. - puts "\tTest024.d: close, open, and dump file" - set db [eval {berkdb_open -rdonly} $args $testfile] - error_check_good dbopen [is_valid_db $db] TRUE - set oid [open $t2 w] - if { $txnenv == 1 } { - set t [$env txn] - error_check_good txn [is_valid_txn $t $env] TRUE - set txn "-txn $t" - } - for { set k 1 } { $k <= $count } { incr k } { - set ret [eval {$db get} $txn $gflags {$k}] - puts $oid [lindex [lindex $ret 0] 1] - error_check_good recnum_get [lindex [lindex $ret 0] 1] \ - [pad_data $method [lindex $sorted_keys [expr $k - 1]]] - } - if { $txnenv == 1 } { - error_check_good txn [$t commit] 0 - } - close $oid - error_check_good db_close [$db close] 0 - error_check_good Test024.d:diff($t1,$t2) \ - [filecmp $t1 $t2] 0 - - # Now, reopen the file and run the last test again in reverse direction. - puts "\tTest024.e: close, open, and dump file in reverse direction" - set db [eval {berkdb_open -rdonly} $args $testfile] - error_check_good dbopen [is_valid_db $db] TRUE - # Put sorted keys in file - set rsorted "" - foreach k $sorted_keys { - set rsorted [linsert $rsorted 0 $k] - } - set oid [open $t1 w] - foreach k $rsorted { - puts $oid [pad_data $method $k] - } - close $oid - - set oid [open $t2 w] - if { $txnenv == 1 } { - set t [$env txn] - error_check_good txn [is_valid_txn $t $env] TRUE - set txn "-txn $t" - } - for { set k $count } { $k > 0 } { incr k -1 } { - set ret [eval {$db get} $txn $gflags {$k}] - puts $oid [lindex [lindex $ret 0] 1] - error_check_good recnum_get [lindex [lindex $ret 0] 1] \ - [pad_data $method [lindex $sorted_keys [expr $k - 1]]] - } - if { $txnenv == 1 } { - error_check_good txn [$t commit] 0 - } - close $oid - error_check_good db_close [$db close] 0 - error_check_good Test024.e:diff($t1,$t2) \ - [filecmp $t1 $t2] 0 - - # Now try deleting elements and making sure they work - puts "\tTest024.f: delete test" - set db [eval {berkdb_open} $args $testfile] - error_check_good dbopen [is_valid_db $db] TRUE - while { $count > 0 } { - set kndx [berkdb random_int 1 $count] - set kval [lindex $keys [expr $kndx - 1]] - set recno [expr [lsearch $sorted_keys $kval] + 1] - - if { $txnenv == 1 } { - set t [$env txn] - error_check_good txn [is_valid_txn $t $env] TRUE - set txn "-txn $t" - } - if { [is_record_based $method] == 1 } { - set ret [eval {$db del} $txn {$recno}] - } else { - set ret [eval {$db del} $txn {$kval}] - } - error_check_good delete $ret 0 - if { $txnenv == 1 } { - error_check_good txn [$t commit] 0 - } - - # Remove the key from the key list - set ndx [expr $kndx - 1] - set keys [lreplace $keys $ndx $ndx] - - if { $do_renumber == 1 } { - set r [expr $recno - 1] - set sorted_keys [lreplace $sorted_keys $r $r] - } - - # Check that the keys after it have been renumbered - if { $txnenv == 1 } { - set t [$env txn] - error_check_good txn [is_valid_txn $t $env] TRUE - set txn "-txn $t" - } - if { $do_renumber == 1 && $recno != $count } { - set r [expr $recno - 1] - set ret [eval {$db get} $txn $gflags {$recno}] - error_check_good get_after_del \ - [lindex [lindex $ret 0] 1] [lindex $sorted_keys $r] - } - if { $txnenv == 1 } { - error_check_good txn [$t commit] 0 - } - - # Decrement count - incr count -1 - } - error_check_good db_close [$db close] 0 -} diff --git a/storage/bdb/test/test025.tcl b/storage/bdb/test/test025.tcl deleted file mode 100644 index 8f3cb5c0cd3..00000000000 --- a/storage/bdb/test/test025.tcl +++ /dev/null @@ -1,146 +0,0 @@ -# See the file LICENSE for redistribution information. -# -# Copyright (c) 1996-2004 -# Sleepycat Software. All rights reserved. -# -# $Id: test025.tcl,v 11.22 2004/01/28 03:36:30 bostic Exp $ -# -# TEST test025 -# TEST DB_APPEND flag test. -proc test025 { method {nentries 10000} {start 0 } {tnum "025"} args} { - global kvals - source ./include.tcl - - set args [convert_args $method $args] - set omethod [convert_method $method] - puts "Test$tnum: $method ($args)" - - if { [string compare $omethod "-btree"] == 0 } { - puts "Test$tnum skipping for method BTREE" - return - } - if { [string compare $omethod "-hash"] == 0 } { - puts "Test$tnum skipping for method HASH" - return - } - - # Create the database and open the dictionary - set txnenv 0 - set eindex [lsearch -exact $args "-env"] - # - # If we are using an env, then testfile should just be the db name. - # Otherwise it is the test directory and the name. - if { $eindex == -1 } { - set testfile $testdir/test$tnum.db - set env NULL - } else { - set testfile test$tnum.db - incr eindex - set env [lindex $args $eindex] - set txnenv [is_txnenv $env] - if { $txnenv == 1 } { - append args " -auto_commit " - # - # If we are using txns and running with the - # default, set the default down a bit. - # - if { $nentries == 10000 } { - set nentries 100 - } - } - set testdir [get_home $env] - } - set t1 $testdir/t1 - - cleanup $testdir $env - set db [eval {berkdb_open \ - -create -mode 0644} $args {$omethod $testfile}] - error_check_good dbopen [is_valid_db $db] TRUE - set did [open $dict] - - puts "\tTest$tnum.a: put/get loop" - set gflags " -recno" - set pflags " -append" - set txn "" - set checkfunc test025_check - - # Here is the loop where we put and get each key/data pair - set count $start - set nentries [expr $start + $nentries] - if { $count != 0 } { - gets $did str - set k [expr $count + 1] - set kvals($k) [pad_data $method $str] - if { $txnenv == 1 } { - set t [$env txn] - error_check_good txn [is_valid_txn $t $env] TRUE - set txn "-txn $t" - } - set ret [eval {$db put} $txn {$k [chop_data $method $str]}] - error_check_good db_put $ret 0 - if { $txnenv == 1 } { - error_check_good txn [$t commit] 0 - } - incr count - } - - while { [gets $did str] != -1 && $count < $nentries } { - set k [expr $count + 1] - set kvals($k) [pad_data $method $str] - if { $txnenv == 1 } { - set t [$env txn] - error_check_good txn [is_valid_txn $t $env] TRUE - set txn "-txn $t" - } - set ret [eval {$db put} $txn $pflags {[chop_data $method $str]}] - error_check_good db_put $ret $k - - set ret [eval {$db get} $txn $gflags {$k}] - error_check_good \ - get $ret [list [list $k [pad_data $method $str]]] - if { $txnenv == 1 } { - error_check_good txn [$t commit] 0 - } - - # The recno key will be count + 1, so when we hit - # UINT32_MAX - 1, reset to 0. - if { $count == [expr 0xfffffffe] } { - set count 0 - } else { - incr count - } - } - close $did - - # Now we will get each key from the DB and compare the results - # to the original. - puts "\tTest$tnum.b: dump file" - if { $txnenv == 1 } { - set t [$env txn] - error_check_good txn [is_valid_txn $t $env] TRUE - set txn "-txn $t" - } - dump_file $db $txn $t1 $checkfunc - if { $txnenv == 1 } { - error_check_good txn [$t commit] 0 - } - error_check_good db_close [$db close] 0 - - puts "\tTest$tnum.c: close, open, and dump file" - # Now, reopen the file and run the last test again. - open_and_dump_file $testfile $env $t1 $checkfunc \ - dump_file_direction -first -next - - # Now, reopen the file and run the last test again in the - # reverse direction. - puts "\tTest$tnum.d: close, open, and dump file in reverse direction" - open_and_dump_file $testfile $env $t1 $checkfunc \ - dump_file_direction -last -prev -} - -proc test025_check { key data } { - global kvals - - error_check_good key"$key"_exists [info exists kvals($key)] 1 - error_check_good " key/data mismatch for |$key|" $data $kvals($key) -} diff --git a/storage/bdb/test/test026.tcl b/storage/bdb/test/test026.tcl deleted file mode 100644 index ce91e2b464d..00000000000 --- a/storage/bdb/test/test026.tcl +++ /dev/null @@ -1,155 +0,0 @@ -# See the file LICENSE for redistribution information. -# -# Copyright (c) 1996-2004 -# Sleepycat Software. All rights reserved. -# -# $Id: test026.tcl,v 11.23 2004/01/28 03:36:30 bostic Exp $ -# -# TEST test026 -# TEST Small keys/medium data w/duplicates -# TEST Put/get per key. -# TEST Loop through keys -- delete each key -# TEST ... test that cursors delete duplicates correctly -# TEST -# TEST Keyed delete test through cursor. If ndups is small; this will -# TEST test on-page dups; if it's large, it will test off-page dups. -proc test026 { method {nentries 2000} {ndups 5} {tnum "026"} args} { - source ./include.tcl - - set args [convert_args $method $args] - set omethod [convert_method $method] - - if { [is_record_based $method] == 1 || \ - [is_rbtree $method] == 1 } { - puts "Test$tnum skipping for method $method" - return - } - # Create the database and open the dictionary - set txnenv 0 - set eindex [lsearch -exact $args "-env"] - # - # If we are using an env, then testfile should just be the db name. - # Otherwise it is the test directory and the name. - if { $eindex == -1 } { - set testfile $testdir/test$tnum.db - set env NULL - } else { - set testfile test$tnum.db - incr eindex - set env [lindex $args $eindex] - set txnenv [is_txnenv $env] - if { $txnenv == 1 } { - append args " -auto_commit " - # - # If we are using txns and running with the - # default, set the defaults down a bit. - # If we are wanting a lot of dups, set that - # down a bit or repl testing takes very long. - # - if { $nentries == 2000 } { - set nentries 100 - } - reduce_dups nentries ndups - } - set testdir [get_home $env] - } - cleanup $testdir $env - puts "Test$tnum: $method ($args) $nentries keys\ - with $ndups dups; cursor delete test" - - set pflags "" - set gflags "" - set txn "" - set count 0 - - # Here is the loop where we put and get each key/data pair - - puts "\tTest$tnum.a: Put loop" - set db [eval {berkdb_open -create \ - -mode 0644} $args {$omethod -dup $testfile}] - error_check_good dbopen [is_valid_db $db] TRUE - set did [open $dict] - while { [gets $did str] != -1 && $count < [expr $nentries * $ndups] } { - set datastr [ make_data_str $str ] - for { set j 1 } { $j <= $ndups} {incr j} { - if { $txnenv == 1 } { - set t [$env txn] - error_check_good txn [is_valid_txn $t $env] TRUE - set txn "-txn $t" - } - set ret [eval {$db put} \ - $txn $pflags {$str [chop_data $method $j$datastr]}] - error_check_good db_put $ret 0 - if { $txnenv == 1 } { - error_check_good txn [$t commit] 0 - } - incr count - } - } - close $did - - error_check_good db_close [$db close] 0 - set db [eval {berkdb_open} $args $testfile] - error_check_good dbopen [is_valid_db $db] TRUE - - # Now we will sequentially traverse the database getting each - # item and deleting it. - set count 0 - if { $txnenv == 1 } { - set t [$env txn] - error_check_good txn [is_valid_txn $t $env] TRUE - set txn "-txn $t" - } - set dbc [eval {$db cursor} $txn] - error_check_good db_cursor [is_substr $dbc $db] 1 - - puts "\tTest$tnum.b: Get/delete loop" - set i 1 - for { set ret [$dbc get -first] } { - [string length $ret] != 0 } { - set ret [$dbc get -next] } { - - set key [lindex [lindex $ret 0] 0] - set data [lindex [lindex $ret 0] 1] - if { $i == 1 } { - set curkey $key - } - error_check_good seq_get:key $key $curkey - error_check_good \ - seq_get:data $data [pad_data $method $i[make_data_str $key]] - - if { $i == $ndups } { - set i 1 - } else { - incr i - } - - # Now delete the key - set ret [$dbc del] - error_check_good db_del:$key $ret 0 - } - error_check_good dbc_close [$dbc close] 0 - if { $txnenv == 1 } { - error_check_good txn [$t commit] 0 - } - error_check_good db_close [$db close] 0 - - puts "\tTest$tnum.c: Verify empty file" - # Double check that file is now empty - set db [eval {berkdb_open} $args $testfile] - error_check_good dbopen [is_valid_db $db] TRUE - if { $txnenv == 1 } { - set t [$env txn] - error_check_good txn [is_valid_txn $t $env] TRUE - set txn "-txn $t" - } - set dbc [eval {$db cursor} $txn] - error_check_good db_cursor [is_substr $dbc $db] 1 - set ret [$dbc get -first] - error_check_good get_on_empty [string length $ret] 0 - error_check_good dbc_close [$dbc close] 0 - if { $txnenv == 1 } { - error_check_good txn [$t commit] 0 - } - error_check_good db_close [$db close] 0 -} diff --git a/storage/bdb/test/test027.tcl b/storage/bdb/test/test027.tcl deleted file mode 100644 index 7f6d78c3ad6..00000000000 --- a/storage/bdb/test/test027.tcl +++ /dev/null @@ -1,17 +0,0 @@ -# See the file LICENSE for redistribution information. -# -# Copyright (c) 1996-2004 -# Sleepycat Software. All rights reserved. -# -# $Id: test027.tcl,v 11.10 2004/01/28 03:36:30 bostic Exp $ -# -# TEST test027 -# TEST Off-page duplicate test -# TEST Test026 with parameters to force off-page duplicates. -# TEST -# TEST Check that delete operations work. Create a database; close -# TEST database and reopen it. Then issues delete by key for each -# TEST entry. -proc test027 { method {nentries 100} args} { - eval {test026 $method $nentries 100 "027"} $args -} diff --git a/storage/bdb/test/test028.tcl b/storage/bdb/test/test028.tcl deleted file mode 100644 index 3884d83e965..00000000000 --- a/storage/bdb/test/test028.tcl +++ /dev/null @@ -1,222 +0,0 @@ -# See the file LICENSE for redistribution information. -# -# Copyright (c) 1996-2004 -# Sleepycat Software. All rights reserved. -# -# $Id: test028.tcl,v 11.22 2004/01/28 03:36:30 bostic Exp $ -# -# TEST test028 -# TEST Cursor delete test -# TEST Test put operations after deleting through a cursor. -proc test028 { method args } { - global dupnum - global dupstr - global alphabet - source ./include.tcl - - set args [convert_args $method $args] - set omethod [convert_method $method] - - puts "Test028: $method put after cursor delete test" - - if { [is_rbtree $method] == 1 } { - puts "Test028 skipping for method $method" - return - } - if { [is_record_based $method] == 1 } { - set key 10 - } else { - append args " -dup" - set key "put_after_cursor_del" - } - - # Create the database and open the dictionary - set txnenv 0 - set eindex [lsearch -exact $args "-env"] - # - # If we are using an env, then testfile should just be the db name. - # Otherwise it is the test directory and the name. - if { $eindex == -1 } { - set testfile $testdir/test028.db - set env NULL - } else { - set testfile test028.db - incr eindex - set env [lindex $args $eindex] - set txnenv [is_txnenv $env] - if { $txnenv == 1 } { - append args " -auto_commit " - } - set testdir [get_home $env] - } - set t1 $testdir/t1 - cleanup $testdir $env - set db [eval {berkdb_open \ - -create -mode 0644} $args {$omethod $testfile}] - error_check_good dbopen [is_valid_db $db] TRUE - - set ndups 20 - set txn "" - set pflags "" - set gflags "" - - if { [is_record_based $method] == 1 } { - set gflags " -recno" - } - - if { $txnenv == 1 } { - set t [$env txn] - error_check_good txn [is_valid_txn $t $env] TRUE - set txn "-txn $t" - } - set dbc [eval {$db cursor} $txn] - error_check_good db_cursor [is_substr $dbc $db] 1 - - foreach i { offpage onpage } { - foreach b { bigitem smallitem } { - if { $i == "onpage" } { - if { $b == "bigitem" } { - set dupstr [repeat $alphabet 100] - } else { - set dupstr DUP - } - } else { - if { $b == "bigitem" } { - set dupstr [repeat $alphabet 100] - } else { - set dupstr [repeat $alphabet 50] - } - } - - if { $b == "bigitem" } { - set dupstr [repeat $dupstr 10] - } - puts "\tTest028: $i/$b" - - puts "\tTest028.a: Insert key with single data item" - set ret [eval {$db put} \ - $txn $pflags {$key [chop_data $method $dupstr]}] - error_check_good db_put $ret 0 - - # Now let's get the item and make sure its OK. - puts "\tTest028.b: Check initial entry" - set ret [eval {$db get} $txn $gflags {$key}] - error_check_good db_get \ - $ret [list [list $key [pad_data $method $dupstr]]] - - # Now try a put with NOOVERWRITE SET (should be error) - puts "\tTest028.c: No_overwrite test" - set ret [eval {$db put} $txn \ - {-nooverwrite $key [chop_data $method $dupstr]}] - error_check_good \ - db_put [is_substr $ret "DB_KEYEXIST"] 1 - - # Now delete the item with a cursor - puts "\tTest028.d: Delete test" - set ret [$dbc get -set $key] - error_check_bad dbc_get:SET [llength $ret] 0 - - set ret [$dbc del] - error_check_good dbc_del $ret 0 - - puts "\tTest028.e: Reput the item" - set ret [eval {$db put} $txn \ - {-nooverwrite $key [chop_data $method $dupstr]}] - error_check_good db_put $ret 0 - - puts "\tTest028.f: Retrieve the item" - set ret [eval {$db get} $txn $gflags {$key}] - error_check_good db_get $ret \ - [list [list $key [pad_data $method $dupstr]]] - - # Delete the key to set up for next test - set ret [eval {$db del} $txn {$key}] - error_check_good db_del $ret 0 - - # Now repeat the above set of tests with - # duplicates (if not RECNO). - if { [is_record_based $method] == 1 } { - continue; - } - - puts "\tTest028.g: Insert key with duplicates" - for { set count 0 } { $count < $ndups } { incr count } { - set ret [eval {$db put} $txn \ - {$key [chop_data $method $count$dupstr]}] - error_check_good db_put $ret 0 - } - - puts "\tTest028.h: Check dups" - set dupnum 0 - dump_file $db $txn $t1 test028.check - - # Try no_overwrite - puts "\tTest028.i: No_overwrite test" - set ret [eval {$db put} \ - $txn {-nooverwrite $key $dupstr}] - error_check_good \ - db_put [is_substr $ret "DB_KEYEXIST"] 1 - - # Now delete all the elements with a cursor - puts "\tTest028.j: Cursor Deletes" - set count 0 - for { set ret [$dbc get -set $key] } { - [string length $ret] != 0 } { - set ret [$dbc get -next] } { - set k [lindex [lindex $ret 0] 0] - set d [lindex [lindex $ret 0] 1] - error_check_good db_seq(key) $k $key - error_check_good db_seq(data) $d $count$dupstr - set ret [$dbc del] - error_check_good dbc_del $ret 0 - incr count - if { $count == [expr $ndups - 1] } { - puts "\tTest028.k:\ - Duplicate No_Overwrite test" - set ret [eval {$db put} $txn \ - {-nooverwrite $key $dupstr}] - error_check_good db_put [is_substr \ - $ret "DB_KEYEXIST"] 1 - } - } - - # Make sure all the items are gone - puts "\tTest028.l: Get after delete" - set ret [$dbc get -set $key] - error_check_good get_after_del [string length $ret] 0 - - puts "\tTest028.m: Reput the item" - set ret [eval {$db put} \ - $txn {-nooverwrite $key 0$dupstr}] - error_check_good db_put $ret 0 - for { set count 1 } { $count < $ndups } { incr count } { - set ret [eval {$db put} $txn \ - {$key $count$dupstr}] - error_check_good db_put $ret 0 - } - - puts "\tTest028.n: Retrieve the item" - set dupnum 0 - dump_file $db $txn $t1 test028.check - - # Clean out in prep for next test - set ret [eval {$db del} $txn {$key}] - error_check_good db_del $ret 0 - } - } - error_check_good dbc_close [$dbc close] 0 - if { $txnenv == 1 } { - error_check_good txn [$t commit] 0 - } - error_check_good db_close [$db close] 0 - -} - -# Check function for test028; keys and data are identical -proc test028.check { key data } { - global dupnum - global dupstr - error_check_good "Bad key" $key put_after_cursor_del - error_check_good "data mismatch for $key" $data $dupnum$dupstr - incr dupnum -} diff --git a/storage/bdb/test/test029.tcl b/storage/bdb/test/test029.tcl deleted file mode 100644 index 53622efeb73..00000000000 --- a/storage/bdb/test/test029.tcl +++ /dev/null @@ -1,247 +0,0 @@ -# See the file LICENSE for redistribution information. -# -# Copyright (c) 1996-2004 -# Sleepycat Software. All rights reserved. -# -# $Id: test029.tcl,v 11.24 2004/09/22 18:01:06 bostic Exp $ -# -# TEST test029 -# TEST Test the Btree and Record number renumbering. -proc test029 { method {nentries 10000} args} { - source ./include.tcl - - set do_renumber [is_rrecno $method] - set args [convert_args $method $args] - set omethod [convert_method $method] - - puts "Test029: $method ($args)" - - if { [string compare $omethod "-hash"] == 0 } { - puts "Test029 skipping for method HASH" - return - } - if { [is_record_based $method] == 1 && $do_renumber != 1 } { - puts "Test029 skipping for method RECNO (w/out renumbering)" - return - } - - # Create the database and open the dictionary - set txnenv 0 - set eindex [lsearch -exact $args "-env"] - # - # If we are using an env, then testfile should just be the db name. - # Otherwise it is the test directory and the name. - if { $eindex == -1 } { - set testfile $testdir/test029.db - set env NULL - } else { - set testfile test029.db - incr eindex - set env [lindex $args $eindex] - set txnenv [is_txnenv $env] - if { $txnenv == 1 } { - append args " -auto_commit " - # - # If we are using txns and running with the - # default, set the default down a bit. - # - if { $nentries == 10000 } { - # Do not set nentries down to 100 until we - # fix SR #5958. - set nentries 1000 - } - } - set testdir [get_home $env] - } - cleanup $testdir $env - - # Read the first nentries dictionary elements and reverse them. - # Keep a list of these (these will be the keys). - puts "\tTest029.a: initialization" - set keys "" - set did [open $dict] - set count 0 - while { [gets $did str] != -1 && $count < $nentries } { - lappend keys [reverse $str] - incr count - } - close $did - - # Generate sorted order for the keys - set sorted_keys [lsort $keys] - - # Save the first and last keys - set last_key [lindex $sorted_keys end] - set last_keynum [llength $sorted_keys] - - set first_key [lindex $sorted_keys 0] - set first_keynum 1 - - # Create the database - if { [string compare $omethod "-btree"] == 0 } { - set db [eval {berkdb_open -create \ - -mode 0644 -recnum} $args {$omethod $testfile}] - error_check_good dbopen [is_valid_db $db] TRUE - } else { - set db [eval {berkdb_open -create \ - -mode 0644} $args {$omethod $testfile}] - error_check_good dbopen [is_valid_db $db] TRUE - } - - set pflags "" - set gflags "" - set txn "" - - if { [is_record_based $method] == 1 } { - append gflags " -recno" - } - - puts "\tTest029.b: put/get loop" - foreach k $keys { - if { [is_record_based $method] == 1 } { - set key [lsearch $sorted_keys $k] - incr key - } else { - set key $k - } - if { $txnenv == 1 } { - set t [$env txn] - error_check_good txn [is_valid_txn $t $env] TRUE - set txn "-txn $t" - } - set ret [eval {$db put} \ - $txn $pflags {$key [chop_data $method $k]}] - error_check_good dbput $ret 0 - - set ret [eval {$db get} $txn $gflags {$key}] - error_check_good dbget [lindex [lindex $ret 0] 1] $k - if { $txnenv == 1 } { - error_check_good txn [$t commit] 0 - } - } - - # Now delete the first key in the database - puts "\tTest029.c: delete and verify renumber" - - # Delete the first key in the file - if { [is_record_based $method] == 1 } { - set key $first_keynum - } else { - set key $first_key - } - - if { $txnenv == 1 } { - set t [$env txn] - error_check_good txn [is_valid_txn $t $env] TRUE - set txn "-txn $t" - } - set ret [eval {$db del} $txn {$key}] - error_check_good db_del $ret 0 - if { $txnenv == 1 } { - error_check_good txn [$t commit] 0 - } - - # Now we are ready to retrieve records based on - # record number - if { [string compare $omethod "-btree"] == 0 } { - append gflags " -recno" - } - - # First try to get the old last key (shouldn't exist) - if { $txnenv == 1 } { - set t [$env txn] - error_check_good txn [is_valid_txn $t $env] TRUE - set txn "-txn $t" - } - set ret [eval {$db get} $txn $gflags {$last_keynum}] - error_check_good get_after_del $ret [list] - if { $txnenv == 1 } { - error_check_good txn [$t commit] 0 - } - - # Now try to get what we think should be the last key - if { $txnenv == 1 } { - set t [$env txn] - error_check_good txn [is_valid_txn $t $env] TRUE - set txn "-txn $t" - } - set ret [eval {$db get} $txn $gflags {[expr $last_keynum - 1]}] - error_check_good \ - getn_last_after_del [lindex [lindex $ret 0] 1] $last_key - if { $txnenv == 1 } { - error_check_good txn [$t commit] 0 - } - - # Create a cursor; we need it for the next test and we - # need it for recno here. - if { $txnenv == 1 } { - set t [$env txn] - error_check_good txn [is_valid_txn $t $env] TRUE - set txn "-txn $t" - } - set dbc [eval {$db cursor} $txn] - error_check_good db_cursor [is_valid_cursor $dbc $db] TRUE - - # OK, now re-put the first key and make sure that we - # renumber the last key appropriately. - if { [string compare $omethod "-btree"] == 0 } { - set ret [eval {$db put} $txn \ - {$key [chop_data $method $first_key]}] - error_check_good db_put $ret 0 - } else { - # Recno - set ret [$dbc get -first] - set ret [eval {$dbc put} $pflags {-before $first_key}] - error_check_bad dbc_put:DB_BEFORE $ret 0 - } - - # Now check that the last record matches the last record number - set ret [eval {$db get} $txn $gflags {$last_keynum}] - error_check_good \ - getn_last_after_put [lindex [lindex $ret 0] 1] $last_key - - # Now delete the first key in the database using a cursor - puts "\tTest029.d: delete with cursor and verify renumber" - - set ret [$dbc get -first] - error_check_good dbc_first $ret [list [list $key $first_key]] - - # Now delete at the cursor - set ret [$dbc del] - error_check_good dbc_del $ret 0 - - # Now check the record numbers of the last keys again. - # First try to get the old last key (shouldn't exist) - set ret [eval {$db get} $txn $gflags {$last_keynum}] - error_check_good get_last_after_cursor_del:$ret $ret [list] - - # Now try to get what we think should be the last key - set ret [eval {$db get} $txn $gflags {[expr $last_keynum - 1]}] - error_check_good \ - getn_after_cursor_del [lindex [lindex $ret 0] 1] $last_key - - # Re-put the first key and make sure that we renumber the last - # key appropriately. We can't do a c_put -current, so do - # a db put instead. - if { [string compare $omethod "-btree"] == 0 } { - puts "\tTest029.e: put (non-cursor) and verify renumber" - set ret [eval {$db put} $txn \ - {$key [chop_data $method $first_key]}] - error_check_good db_put $ret 0 - } else { - puts "\tTest029.e: put with cursor and verify renumber" - set ret [eval {$dbc put} $pflags {-before $first_key}] - error_check_bad dbc_put:DB_BEFORE $ret 0 - } - - # Now check that the last record matches the last record number - set ret [eval {$db get} $txn $gflags {$last_keynum}] - error_check_good \ - get_after_cursor_reput [lindex [lindex $ret 0] 1] $last_key - - error_check_good dbc_close [$dbc close] 0 - if { $txnenv == 1 } { - error_check_good txn [$t commit] 0 - } - error_check_good db_close [$db close] 0 -} diff --git a/storage/bdb/test/test030.tcl b/storage/bdb/test/test030.tcl deleted file mode 100644 index 3ee9daa3f50..00000000000 --- a/storage/bdb/test/test030.tcl +++ /dev/null @@ -1,231 +0,0 @@ -# See the file LICENSE for redistribution information. -# -# Copyright (c) 1996-2004 -# Sleepycat Software. All rights reserved. -# -# $Id: test030.tcl,v 11.20 2004/01/28 03:36:30 bostic Exp $ -# -# TEST test030 -# TEST Test DB_NEXT_DUP Functionality. -proc test030 { method {nentries 10000} args } { - global rand_init - source ./include.tcl - - set args [convert_args $method $args] - set omethod [convert_method $method] - - if { [is_record_based $method] == 1 || - [is_rbtree $method] == 1 } { - puts "Test030 skipping for method $method" - return - } - berkdb srand $rand_init - - # Create the database and open the dictionary - set txnenv 0 - set eindex [lsearch -exact $args "-env"] - # - # If we are using an env, then testfile should just be the db name. - # Otherwise it is the test directory and the name. - if { $eindex == -1 } { - set testfile $testdir/test030.db - set cntfile $testdir/cntfile.db - set env NULL - } else { - set testfile test030.db - set cntfile cntfile.db - incr eindex - set env [lindex $args $eindex] - set txnenv [is_txnenv $env] - if { $txnenv == 1 } { - append args " -auto_commit " - # - # If we are using txns and running with the - # default, set the default down a bit. - # - if { $nentries == 10000 } { - set nentries 100 - } - } - set testdir [get_home $env] - } - - puts "Test030: $method ($args) $nentries DB_NEXT_DUP testing" - set t1 $testdir/t1 - set t2 $testdir/t2 - set t3 $testdir/t3 - cleanup $testdir $env - - set db [eval {berkdb_open -create \ - -mode 0644 -dup} $args {$omethod $testfile}] - error_check_good dbopen [is_valid_db $db] TRUE - - # Use a second DB to keep track of how many duplicates - # we enter per key - - set cntdb [eval {berkdb_open -create \ - -mode 0644} $args {-btree $cntfile}] - error_check_good dbopen:cntfile [is_valid_db $db] TRUE - - set pflags "" - set gflags "" - set txn "" - set count 0 - - # Here is the loop where we put and get each key/data pair - # We will add between 1 and 10 dups with values 1 ... dups - # We'll verify each addition. - - set did [open $dict] - puts "\tTest030.a: put and get duplicate keys." - if { $txnenv == 1 } { - set t [$env txn] - error_check_good txn [is_valid_txn $t $env] TRUE - set txn "-txn $t" - } - set dbc [eval {$db cursor} $txn] - - while { [gets $did str] != -1 && $count < $nentries } { - set ndup [berkdb random_int 1 10] - - for { set i 1 } { $i <= $ndup } { incr i 1 } { - set ctxn "" - if { $txnenv == 1 } { - set ct [$env txn] - error_check_good txn \ - [is_valid_txn $ct $env] TRUE - set ctxn "-txn $ct" - } - set ret [eval {$cntdb put} \ - $ctxn $pflags {$str [chop_data $method $ndup]}] - error_check_good put_cnt $ret 0 - if { $txnenv == 1 } { - error_check_good txn [$ct commit] 0 - } - set datastr $i:$str - set ret [eval {$db put} \ - $txn $pflags {$str [chop_data $method $datastr]}] - error_check_good put $ret 0 - } - - # Now retrieve all the keys matching this key - set x 0 - for {set ret [$dbc get -set $str]} \ - {[llength $ret] != 0} \ - {set ret [$dbc get -nextdup] } { - incr x - - if { [llength $ret] == 0 } { - break - } - - set k [lindex [lindex $ret 0] 0] - if { [string compare $k $str] != 0 } { - break - } - - set datastr [lindex [lindex $ret 0] 1] - set d [data_of $datastr] - error_check_good Test030:put $d $str - - set id [ id_of $datastr ] - error_check_good Test030:dup# $id $x - } - error_check_good Test030:numdups $x $ndup - incr count - } - close $did - - # Verify on sequential pass of entire file - puts "\tTest030.b: sequential check" - - # We can't just set lastkey to a null string, since that might - # be a key now! - set lastkey "THIS STRING WILL NEVER BE A KEY" - - for {set ret [$dbc get -first]} \ - {[llength $ret] != 0} \ - {set ret [$dbc get -next] } { - - # Outer loop should always get a new key - - set k [lindex [lindex $ret 0] 0] - error_check_bad outer_get_loop:key $k $lastkey - - set datastr [lindex [lindex $ret 0] 1] - set d [data_of $datastr] - set id [ id_of $datastr ] - - error_check_good outer_get_loop:data $d $k - error_check_good outer_get_loop:id $id 1 - - set lastkey $k - # Figure out how may dups we should have - if { $txnenv == 1 } { - set ct [$env txn] - error_check_good txn [is_valid_txn $ct $env] TRUE - set ctxn "-txn $ct" - } - set ret [eval {$cntdb get} $ctxn $pflags {$k}] - set ndup [lindex [lindex $ret 0] 1] - if { $txnenv == 1 } { - error_check_good txn [$ct commit] 0 - } - - set howmany 1 - for { set ret [$dbc get -nextdup] } \ - { [llength $ret] != 0 } \ - { set ret [$dbc get -nextdup] } { - incr howmany - - set k [lindex [lindex $ret 0] 0] - error_check_good inner_get_loop:key $k $lastkey - - set datastr [lindex [lindex $ret 0] 1] - set d [data_of $datastr] - set id [ id_of $datastr ] - - error_check_good inner_get_loop:data $d $k - error_check_good inner_get_loop:id $id $howmany - - } - error_check_good ndups_found $howmany $ndup - } - - # Verify on key lookup - puts "\tTest030.c: keyed check" - set cnt_dbc [$cntdb cursor] - for {set ret [$cnt_dbc get -first]} \ - {[llength $ret] != 0} \ - {set ret [$cnt_dbc get -next] } { - set k [lindex [lindex $ret 0] 0] - - set howmany [lindex [lindex $ret 0] 1] - error_check_bad cnt_seq:data [string length $howmany] 0 - - set i 0 - for {set ret [$dbc get -set $k]} \ - {[llength $ret] != 0} \ - {set ret [$dbc get -nextdup] } { - incr i - - set k [lindex [lindex $ret 0] 0] - - set datastr [lindex [lindex $ret 0] 1] - set d [data_of $datastr] - set id [ id_of $datastr ] - - error_check_good inner_get_loop:data $d $k - error_check_good inner_get_loop:id $id $i - } - error_check_good keyed_count $i $howmany - - } - error_check_good cnt_curs_close [$cnt_dbc close] 0 - error_check_good db_curs_close [$dbc close] 0 - if { $txnenv == 1 } { - error_check_good txn [$t commit] 0 - } - error_check_good cnt_file_close [$cntdb close] 0 - error_check_good db_file_close [$db close] 0 -} diff --git a/storage/bdb/test/test031.tcl b/storage/bdb/test/test031.tcl deleted file mode 100644 index 2b4ad0d9878..00000000000 --- a/storage/bdb/test/test031.tcl +++ /dev/null @@ -1,230 +0,0 @@ -# See the file LICENSE for redistribution information. -# -# Copyright (c) 1996-2004 -# Sleepycat Software. All rights reserved. -# -# $Id: test031.tcl,v 11.27 2004/01/28 03:36:30 bostic Exp $ -# -# TEST test031 -# TEST Duplicate sorting functionality -# TEST Make sure DB_NODUPDATA works. -# TEST -# TEST Use the first 10,000 entries from the dictionary. -# TEST Insert each with self as key and "ndups" duplicates -# TEST For the data field, prepend random five-char strings (see test032) -# TEST that we force the duplicate sorting code to do something. -# TEST Along the way, test that we cannot insert duplicate duplicates -# TEST using DB_NODUPDATA. -# TEST -# TEST By setting ndups large, we can make this an off-page test -# TEST After all are entered, retrieve all; verify output. -# TEST Close file, reopen, do retrieve and re-verify. -# TEST This does not work for recno -proc test031 { method {nentries 10000} {ndups 5} {tnum "031"} args } { - global alphabet - global rand_init - source ./include.tcl - - berkdb srand $rand_init - - set args [convert_args $method $args] - set omethod [convert_method $method] - - # Create the database and open the dictionary - set txnenv 0 - set eindex [lsearch -exact $args "-env"] - # - # If we are using an env, then testfile should just be the db name. - # Otherwise it is the test directory and the name. - if { $eindex == -1 } { - set testfile $testdir/test$tnum.db - set checkdb $testdir/checkdb.db - set env NULL - } else { - set testfile test$tnum.db - set checkdb checkdb.db - incr eindex - set env [lindex $args $eindex] - set txnenv [is_txnenv $env] - if { $txnenv == 1 } { - append args " -auto_commit " - # - # If we are using txns and running with the - # default, set the default down a bit. - # - if { $nentries == 10000 } { - set nentries 100 - } - reduce_dups nentries ndups - } - set testdir [get_home $env] - } - set t1 $testdir/t1 - set t2 $testdir/t2 - set t3 $testdir/t3 - cleanup $testdir $env - - puts "Test$tnum: \ - $method ($args) $nentries small $ndups sorted dup key/data pairs" - if { [is_record_based $method] == 1 || \ - [is_rbtree $method] == 1 } { - puts "Test$tnum skipping for method $omethod" - return - } - set db [eval {berkdb_open -create \ - -mode 0644} $args {$omethod -dup -dupsort $testfile}] - error_check_good dbopen [is_valid_db $db] TRUE - set did [open $dict] - - set check_db [eval {berkdb_open \ - -create -mode 0644} $args {-hash $checkdb}] - error_check_good dbopen:check_db [is_valid_db $check_db] TRUE - - set pflags "" - set gflags "" - set txn "" - set count 0 - - # Here is the loop where we put and get each key/data pair - puts "\tTest$tnum.a: Put/get loop, check nodupdata" - if { $txnenv == 1 } { - set t [$env txn] - error_check_good txn [is_valid_txn $t $env] TRUE - set txn "-txn $t" - } - set dbc [eval {$db cursor} $txn] - error_check_good cursor_open [is_valid_cursor $dbc $db] TRUE - while { [gets $did str] != -1 && $count < $nentries } { - # Re-initialize random string generator - randstring_init $ndups - - set dups "" - for { set i 1 } { $i <= $ndups } { incr i } { - set pref [randstring] - set dups $dups$pref - set datastr $pref:$str - if { $i == 2 } { - set nodupstr $datastr - } - set ret [eval {$db put} \ - $txn $pflags {$str [chop_data $method $datastr]}] - error_check_good put $ret 0 - } - - # Test DB_NODUPDATA using the DB handle - set ret [eval {$db put -nodupdata} \ - $txn $pflags {$str [chop_data $method $nodupstr]}] - error_check_good db_nodupdata [is_substr $ret "DB_KEYEXIST"] 1 - - set ret [eval {$check_db put} \ - $txn $pflags {$str [chop_data $method $dups]}] - error_check_good checkdb_put $ret 0 - - # Now retrieve all the keys matching this key - set x 0 - set lastdup "" - # Test DB_NODUPDATA using cursor handle - set ret [$dbc get -set $str] - error_check_bad dbc_get [llength $ret] 0 - set datastr [lindex [lindex $ret 0] 1] - error_check_bad dbc_data [string length $datastr] 0 - set ret [eval {$dbc put -nodupdata} \ - {$str [chop_data $method $datastr]}] - error_check_good dbc_nodupdata [is_substr $ret "DB_KEYEXIST"] 1 - - for {set ret [$dbc get -set $str]} \ - {[llength $ret] != 0} \ - {set ret [$dbc get -nextdup] } { - set k [lindex [lindex $ret 0] 0] - if { [string compare $k $str] != 0 } { - break - } - set datastr [lindex [lindex $ret 0] 1] - if {[string length $datastr] == 0} { - break - } - if {[string compare \ - $lastdup [pad_data $method $datastr]] > 0} { - error_check_good \ - sorted_dups($lastdup,$datastr) 0 1 - } - incr x - set lastdup $datastr - } - error_check_good "Test$tnum:ndups:$str" $x $ndups - incr count - } - error_check_good cursor_close [$dbc close] 0 - if { $txnenv == 1 } { - error_check_good txn [$t commit] 0 - } - close $did - - # Now we will get each key from the DB and compare the results - # to the original. - puts "\tTest$tnum.b: Checking file for correct duplicates" - if { $txnenv == 1 } { - set t [$env txn] - error_check_good txn [is_valid_txn $t $env] TRUE - set txn "-txn $t" - } - set dbc [eval {$db cursor} $txn] - error_check_good cursor_open(2) [is_valid_cursor $dbc $db] TRUE - - set lastkey "THIS WILL NEVER BE A KEY VALUE" - # no need to delete $lastkey - set firsttimethru 1 - for {set ret [$dbc get -first]} \ - {[llength $ret] != 0} \ - {set ret [$dbc get -next] } { - set k [lindex [lindex $ret 0] 0] - set d [lindex [lindex $ret 0] 1] - error_check_bad data_check:$d [string length $d] 0 - - if { [string compare $k $lastkey] != 0 } { - # Remove last key from the checkdb - if { $firsttimethru != 1 } { - error_check_good check_db:del:$lastkey \ - [eval {$check_db del} $txn {$lastkey}] 0 - } - set firsttimethru 0 - set lastdup "" - set lastkey $k - set dups [lindex [lindex [eval {$check_db get} \ - $txn {$k}] 0] 1] - error_check_good check_db:get:$k \ - [string length $dups] [expr $ndups * 4] - } - - if { [string compare $lastdup $d] > 0 } { - error_check_good dup_check:$k:$d 0 1 - } - set lastdup $d - - set pref [string range $d 0 3] - set ndx [string first $pref $dups] - error_check_good valid_duplicate [expr $ndx >= 0] 1 - set a [string range $dups 0 [expr $ndx - 1]] - set b [string range $dups [expr $ndx + 4] end] - set dups $a$b - } - # Remove last key from the checkdb - if { [string length $lastkey] != 0 } { - error_check_good check_db:del:$lastkey \ - [eval {$check_db del} $txn {$lastkey}] 0 - } - - # Make sure there is nothing left in check_db - - set check_c [eval {$check_db cursor} $txn] - set ret [$check_c get -first] - error_check_good check_c:get:$ret [llength $ret] 0 - error_check_good check_c:close [$check_c close] 0 - - error_check_good dbc_close [$dbc close] 0 - if { $txnenv == 1 } { - error_check_good txn [$t commit] 0 - } - error_check_good check_db:close [$check_db close] 0 - error_check_good db_close [$db close] 0 -} diff --git a/storage/bdb/test/test032.tcl b/storage/bdb/test/test032.tcl deleted file mode 100644 index e7cc49b4776..00000000000 --- a/storage/bdb/test/test032.tcl +++ /dev/null @@ -1,231 +0,0 @@ -# See the file LICENSE for redistribution information. -# -# Copyright (c) 1996-2004 -# Sleepycat Software. All rights reserved. -# -# $Id: test032.tcl,v 11.26 2004/01/28 03:36:31 bostic Exp $ -# -# TEST test032 -# TEST DB_GET_BOTH, DB_GET_BOTH_RANGE -# TEST -# TEST Use the first 10,000 entries from the dictionary. Insert each with -# TEST self as key and "ndups" duplicates. For the data field, prepend the -# TEST letters of the alphabet in a random order so we force the duplicate -# TEST sorting code to do something. By setting ndups large, we can make -# TEST this an off-page test. -# TEST -# TEST Test the DB_GET_BOTH functionality by retrieving each dup in the file -# TEST explicitly. Test the DB_GET_BOTH_RANGE functionality by retrieving -# TEST the unique key prefix (cursor only). Finally test the failure case. -proc test032 { method {nentries 10000} {ndups 5} {tnum "032"} args } { - global alphabet rand_init - source ./include.tcl - - set args [convert_args $method $args] - set omethod [convert_method $method] - - berkdb srand $rand_init - - # Create the database and open the dictionary - set txnenv 0 - set eindex [lsearch -exact $args "-env"] - # - # If we are using an env, then testfile should just be the db name. - # Otherwise it is the test directory and the name. - if { $eindex == -1 } { - set testfile $testdir/test$tnum.db - set checkdb $testdir/checkdb.db - set env NULL - } else { - set testfile test$tnum.db - set checkdb checkdb.db - incr eindex - set env [lindex $args $eindex] - set txnenv [is_txnenv $env] - if { $txnenv == 1 } { - append args " -auto_commit " - # - # If we are using txns and running with the - # default, set the default down a bit. - # - if { $nentries == 10000 } { - set nentries 100 - } - reduce_dups nentries ndups - } - set testdir [get_home $env] - } - set t1 $testdir/t1 - set t2 $testdir/t2 - set t3 $testdir/t3 - cleanup $testdir $env - - puts "Test$tnum:\ - $method ($args) $nentries small sorted $ndups dup key/data pairs" - if { [is_record_based $method] == 1 || \ - [is_rbtree $method] == 1 } { - puts "Test$tnum skipping for method $omethod" - return - } - set db [eval {berkdb_open -create -mode 0644 \ - $omethod -dup -dupsort} $args {$testfile} ] - error_check_good dbopen [is_valid_db $db] TRUE - set did [open $dict] - - set check_db [eval {berkdb_open \ - -create -mode 0644} $args {-hash $checkdb}] - error_check_good dbopen:check_db [is_valid_db $check_db] TRUE - - set pflags "" - set gflags "" - set txn "" - set count 0 - - # Here is the loop where we put and get each key/data pair - puts "\tTest$tnum.a: Put/get loop" - if { $txnenv == 1 } { - set t [$env txn] - error_check_good txn [is_valid_txn $t $env] TRUE - set txn "-txn $t" - } - set dbc [eval {$db cursor} $txn] - error_check_good cursor_open [is_valid_cursor $dbc $db] TRUE - while { [gets $did str] != -1 && $count < $nentries } { - # Re-initialize random string generator - randstring_init $ndups - - set dups "" - for { set i 1 } { $i <= $ndups } { incr i } { - set pref [randstring] - set dups $dups$pref - set datastr $pref:$str - set ret [eval {$db put} \ - $txn $pflags {$str [chop_data $method $datastr]}] - error_check_good put $ret 0 - } - set ret [eval {$check_db put} \ - $txn $pflags {$str [chop_data $method $dups]}] - error_check_good checkdb_put $ret 0 - - # Now retrieve all the keys matching this key - set x 0 - set lastdup "" - for {set ret [$dbc get -set $str]} \ - {[llength $ret] != 0} \ - {set ret [$dbc get -nextdup] } { - set k [lindex [lindex $ret 0] 0] - if { [string compare $k $str] != 0 } { - break - } - set datastr [lindex [lindex $ret 0] 1] - if {[string length $datastr] == 0} { - break - } - if {[string compare $lastdup $datastr] > 0} { - error_check_good \ - sorted_dups($lastdup,$datastr) 0 1 - } - incr x - set lastdup $datastr - } - - error_check_good "Test$tnum:ndups:$str" $x $ndups - incr count - } - error_check_good cursor_close [$dbc close] 0 - if { $txnenv == 1 } { - error_check_good txn [$t commit] 0 - } - close $did - - # Now we will get each key from the DB and compare the results - # to the original. - puts "\tTest$tnum.b: Checking file for correct duplicates (no cursor)" - if { $txnenv == 1 } { - set t [$env txn] - error_check_good txn [is_valid_txn $t $env] TRUE - set txn "-txn $t" - } - set check_c [eval {$check_db cursor} $txn] - error_check_good check_c_open(2) \ - [is_valid_cursor $check_c $check_db] TRUE - - for {set ndx 0} {$ndx < [expr 4 * $ndups]} {incr ndx 4} { - for {set ret [$check_c get -first]} \ - {[llength $ret] != 0} \ - {set ret [$check_c get -next] } { - set k [lindex [lindex $ret 0] 0] - set d [lindex [lindex $ret 0] 1] - error_check_bad data_check:$d [string length $d] 0 - - set pref [string range $d $ndx [expr $ndx + 3]] - set data $pref:$k - set ret [eval {$db get} $txn {-get_both $k $data}] - error_check_good \ - get_both_data:$k $ret [list [list $k $data]] - } - } - - $db sync - - # Now repeat the above test using cursor ops - puts "\tTest$tnum.c: Checking file for correct duplicates (cursor)" - set dbc [eval {$db cursor} $txn] - error_check_good cursor_open [is_valid_cursor $dbc $db] TRUE - - for {set ndx 0} {$ndx < [expr 4 * $ndups]} {incr ndx 4} { - for {set ret [$check_c get -first]} \ - {[llength $ret] != 0} \ - {set ret [$check_c get -next] } { - set k [lindex [lindex $ret 0] 0] - set d [lindex [lindex $ret 0] 1] - error_check_bad data_check:$d [string length $d] 0 - - set pref [string range $d $ndx [expr $ndx + 3]] - set data $pref:$k - set ret [eval {$dbc get} {-get_both $k $data}] - error_check_good \ - curs_get_both_data:$k $ret [list [list $k $data]] - - set ret [eval {$dbc get} {-get_both_range $k $pref}] - error_check_good \ - curs_get_both_range:$k $ret [list [list $k $data]] - } - } - - # Now check the error case - puts "\tTest$tnum.d: Check error case (no cursor)" - for {set ret [$check_c get -first]} \ - {[llength $ret] != 0} \ - {set ret [$check_c get -next] } { - set k [lindex [lindex $ret 0] 0] - set d [lindex [lindex $ret 0] 1] - error_check_bad data_check:$d [string length $d] 0 - - set data XXX$k - set ret [eval {$db get} $txn {-get_both $k $data}] - error_check_good error_case:$k [llength $ret] 0 - } - - # Now check the error case - puts "\tTest$tnum.e: Check error case (cursor)" - for {set ret [$check_c get -first]} \ - {[llength $ret] != 0} \ - {set ret [$check_c get -next] } { - set k [lindex [lindex $ret 0] 0] - set d [lindex [lindex $ret 0] 1] - error_check_bad data_check:$d [string length $d] 0 - - set data XXX$k - set ret [eval {$dbc get} {-get_both $k $data}] - error_check_good error_case:$k [llength $ret] 0 - } - - error_check_good check_c:close [$check_c close] 0 - error_check_good dbc_close [$dbc close] 0 - if { $txnenv == 1 } { - error_check_good txn [$t commit] 0 - } - error_check_good check_db:close [$check_db close] 0 - error_check_good db_close [$db close] 0 -} diff --git a/storage/bdb/test/test033.tcl b/storage/bdb/test/test033.tcl deleted file mode 100644 index b606883c1a0..00000000000 --- a/storage/bdb/test/test033.tcl +++ /dev/null @@ -1,176 +0,0 @@ -# See the file LICENSE for redistribution information. -# -# Copyright (c) 1996-2004 -# Sleepycat Software. All rights reserved. -# -# $Id: test033.tcl,v 11.27 2004/01/28 03:36:31 bostic Exp $ -# -# TEST test033 -# TEST DB_GET_BOTH without comparison function -# TEST -# TEST Use the first 10,000 entries from the dictionary. Insert each with -# TEST self as key and data; add duplicate records for each. After all are -# TEST entered, retrieve all and verify output using DB_GET_BOTH (on DB and -# TEST DBC handles) and DB_GET_BOTH_RANGE (on a DBC handle) on existent and -# TEST nonexistent keys. -# TEST -# TEST XXX -# TEST This does not work for rbtree. -proc test033 { method {nentries 10000} {ndups 5} {tnum "033"} args } { - source ./include.tcl - - set args [convert_args $method $args] - set omethod [convert_method $method] - if { [is_rbtree $method] == 1 } { - puts "Test$tnum skipping for method $method" - return - } - - set txnenv 0 - set eindex [lsearch -exact $args "-env"] - # - # If we are using an env, then testfile should just be the db name. - # Otherwise it is the test directory and the name. - if { $eindex == -1 } { - set testfile $testdir/test$tnum.db - set env NULL - } else { - set testfile test$tnum.db - incr eindex - set env [lindex $args $eindex] - set txnenv [is_txnenv $env] - if { $txnenv == 1 } { - append args " -auto_commit " - # - # If we are using txns and running with the - # default, set the default down a bit. - # - if { $nentries == 10000 } { - set nentries 100 - } - reduce_dups nentries ndups - } - set testdir [get_home $env] - } - - puts "Test$tnum: $method ($args) $nentries small $ndups dup key/data pairs" - set t1 $testdir/t1 - set t2 $testdir/t2 - set t3 $testdir/t3 - cleanup $testdir $env - - # Duplicate data entries are not allowed in record based methods. - if { [is_record_based $method] == 1 } { - set db [eval {berkdb_open -create -mode 0644 \ - $omethod} $args {$testfile}] - } else { - set db [eval {berkdb_open -create -mode 0644 \ - $omethod -dup} $args {$testfile}] - } - error_check_good dbopen [is_valid_db $db] TRUE - - set pflags "" - set gflags "" - set txn "" - - # Allocate a cursor for DB_GET_BOTH_RANGE. - if { $txnenv == 1 } { - set t [$env txn] - error_check_good txn [is_valid_txn $t $env] TRUE - set txn "-txn $t" - } - set dbc [eval {$db cursor} $txn] - error_check_good cursor_open [is_valid_cursor $dbc $db] TRUE - - puts "\tTest$tnum.a: Put/get loop." - # Here is the loop where we put and get each key/data pair - set count 0 - set did [open $dict] - while { [gets $did str] != -1 && $count < $nentries } { - if { [is_record_based $method] == 1 } { - set key [expr $count + 1] - set ret [eval {$db put} $txn $pflags \ - {$key [chop_data $method $str]}] - error_check_good put $ret 0 - } else { - for { set i 1 } { $i <= $ndups } { incr i } { - set datastr $i:$str - set ret [eval {$db put} \ - $txn $pflags {$str [chop_data $method $datastr]}] - error_check_good db_put $ret 0 - } - } - - # Now retrieve all the keys matching this key and dup - # for non-record based AMs. - if { [is_record_based $method] == 1 } { - test033_recno.check $db $dbc $method $str $txn $key - } else { - test033_check $db $dbc $method $str $txn $ndups - } - incr count - } - - close $did - - puts "\tTest$tnum.b: Verifying DB_GET_BOTH after creation." - set count 0 - set did [open $dict] - while { [gets $did str] != -1 && $count < $nentries } { - # Now retrieve all the keys matching this key - # for non-record based AMs. - if { [is_record_based $method] == 1 } { - set key [expr $count + 1] - test033_recno.check $db $dbc $method $str $txn $key - } else { - test033_check $db $dbc $method $str $txn $ndups - } - incr count - } - close $did - - error_check_good dbc_close [$dbc close] 0 - if { $txnenv == 1 } { - error_check_good txn [$t commit] 0 - } - error_check_good db_close [$db close] 0 -} - -# No testing of dups is done on record-based methods. -proc test033_recno.check {db dbc method str txn key} { - set ret [eval {$db get} $txn {-recno $key}] - error_check_good "db_get:$method" \ - [lindex [lindex $ret 0] 1] [pad_data $method $str] - set ret [$dbc get -get_both $key [pad_data $method $str]] - error_check_good "db_get_both:$method" \ - [lindex [lindex $ret 0] 1] [pad_data $method $str] -} - -# Testing of non-record-based methods includes duplicates -# and get_both_range. -proc test033_check {db dbc method str txn ndups} { - for {set i 1} {$i <= $ndups } { incr i } { - set datastr $i:$str - - set ret [eval {$db get} $txn {-get_both $str $datastr}] - error_check_good "db_get_both:dup#" \ - [lindex [lindex $ret 0] 1] $datastr - - set ret [$dbc get -get_both $str $datastr] - error_check_good "dbc_get_both:dup#" \ - [lindex [lindex $ret 0] 1] $datastr - - set ret [$dbc get -get_both_range $str $datastr] - error_check_good "dbc_get_both_range:dup#" \ - [lindex [lindex $ret 0] 1] $datastr - } - - # Now retrieve non-existent dup (i is ndups + 1) - set datastr $i:$str - set ret [eval {$db get} $txn {-get_both $str $datastr}] - error_check_good db_get_both:dupfailure [llength $ret] 0 - set ret [$dbc get -get_both $str $datastr] - error_check_good dbc_get_both:dupfailure [llength $ret] 0 - set ret [$dbc get -get_both_range $str $datastr] - error_check_good dbc_get_both_range [llength $ret] 0 -} diff --git a/storage/bdb/test/test034.tcl b/storage/bdb/test/test034.tcl deleted file mode 100644 index 5da92052214..00000000000 --- a/storage/bdb/test/test034.tcl +++ /dev/null @@ -1,23 +0,0 @@ -# See the file LICENSE for redistribution information. -# -# Copyright (c) 1998-2004 -# Sleepycat Software. All rights reserved. -# -# $Id: test034.tcl,v 11.12 2004/01/28 03:36:31 bostic Exp $ -# -# TEST test034 -# TEST test032 with off-page duplicates -# TEST DB_GET_BOTH, DB_GET_BOTH_RANGE functionality with off-page duplicates. -proc test034 { method {nentries 10000} args} { - set pgindex [lsearch -exact $args "-pagesize"] - if { $pgindex != -1 } { - puts "Test034: Skipping for specific pagesizes" - return - } - # Test with off-page duplicates - eval {test032 $method $nentries 20 "034" -pagesize 512} $args - - # Test with multiple pages of off-page duplicates - eval {test032 $method [expr $nentries / 10] 100 "034" -pagesize 512} \ - $args -} diff --git a/storage/bdb/test/test035.tcl b/storage/bdb/test/test035.tcl deleted file mode 100644 index 63945c58875..00000000000 --- a/storage/bdb/test/test035.tcl +++ /dev/null @@ -1,22 +0,0 @@ -# See the file LICENSE for redistribution information. -# -# Copyright (c) 1996-2004 -# Sleepycat Software. All rights reserved. -# -# $Id: test035.tcl,v 11.12 2004/01/28 03:36:31 bostic Exp $ -# -# TEST test035 -# TEST Test033 with off-page duplicates -# TEST DB_GET_BOTH functionality with off-page duplicates. -proc test035 { method {nentries 10000} args} { - set pgindex [lsearch -exact $args "-pagesize"] - if { $pgindex != -1 } { - puts "Test035: Skipping for specific pagesizes" - return - } - # Test with off-page duplicates - eval {test033 $method $nentries 20 "035" -pagesize 512} $args - # Test with multiple pages of off-page duplicates - eval {test033 $method [expr $nentries / 10] 100 "035" -pagesize 512} \ - $args -} diff --git a/storage/bdb/test/test036.tcl b/storage/bdb/test/test036.tcl deleted file mode 100644 index 5fe24cb21ae..00000000000 --- a/storage/bdb/test/test036.tcl +++ /dev/null @@ -1,173 +0,0 @@ -# See the file LICENSE for redistribution information. -# -# Copyright (c) 1996-2004 -# Sleepycat Software. All rights reserved. -# -# $Id: test036.tcl,v 11.20 2004/01/28 03:36:31 bostic Exp $ -# -# TEST test036 -# TEST Test KEYFIRST and KEYLAST when the key doesn't exist -# TEST Put nentries key/data pairs (from the dictionary) using a cursor -# TEST and KEYFIRST and KEYLAST (this tests the case where use use cursor -# TEST put for non-existent keys). -proc test036 { method {nentries 10000} args } { - source ./include.tcl - - set args [convert_args $method $args] - set omethod [convert_method $method] - if { [is_record_based $method] == 1 } { - puts "Test036 skipping for method recno" - return - } - - # Create the database and open the dictionary - set txnenv 0 - set eindex [lsearch -exact $args "-env"] - # - # If we are using an env, then testfile should just be the db name. - # Otherwise it is the test directory and the name. - if { $eindex == -1 } { - set testfile $testdir/test036.db - set env NULL - } else { - set testfile test036.db - incr eindex - set env [lindex $args $eindex] - set txnenv [is_txnenv $env] - if { $txnenv == 1 } { - append args " -auto_commit " - # - # If we are using txns and running with the - # default, set the default down a bit. - # - if { $nentries == 10000 } { - set nentries 100 - } - } - set testdir [get_home $env] - } - - puts "Test036: $method ($args) $nentries equal key/data pairs" - set t1 $testdir/t1 - set t2 $testdir/t2 - set t3 $testdir/t3 - cleanup $testdir $env - set db [eval {berkdb_open \ - -create -mode 0644} $args {$omethod $testfile}] - error_check_good dbopen [is_valid_db $db] TRUE - set did [open $dict] - - set pflags "" - set gflags "" - set txn "" - set count 0 - - if { [is_record_based $method] == 1 } { - set checkfunc test036_recno.check - append gflags " -recno" - } else { - set checkfunc test036.check - } - puts "\tTest036.a: put/get loop KEYFIRST" - # Here is the loop where we put and get each key/data pair - if { $txnenv == 1 } { - set t [$env txn] - error_check_good txn [is_valid_txn $t $env] TRUE - set txn "-txn $t" - } - set dbc [eval {$db cursor} $txn] - error_check_good cursor [is_valid_cursor $dbc $db] TRUE - while { [gets $did str] != -1 && $count < $nentries } { - if { [is_record_based $method] == 1 } { - global kvals - - set key [expr $count + 1] - set kvals($key) $str - } else { - set key $str - } - set ret [eval {$dbc put} $pflags {-keyfirst $key $str}] - error_check_good put $ret 0 - - set ret [eval {$db get} $txn $gflags {$key}] - error_check_good get [lindex [lindex $ret 0] 1] $str - incr count - } - error_check_good dbc_close [$dbc close] 0 - if { $txnenv == 1 } { - error_check_good txn [$t commit] 0 - } - - puts "\tTest036.a: put/get loop KEYLAST" - if { $txnenv == 1 } { - set t [$env txn] - error_check_good txn [is_valid_txn $t $env] TRUE - set txn "-txn $t" - } - set dbc [eval {$db cursor} $txn] - error_check_good cursor [is_valid_cursor $dbc $db] TRUE - while { [gets $did str] != -1 && $count < $nentries } { - if { [is_record_based $method] == 1 } { - global kvals - - set key [expr $count + 1] - set kvals($key) $str - } else { - set key $str - } - set ret [eval {$dbc put} $txn $pflags {-keylast $key $str}] - error_check_good put $ret 0 - - set ret [eval {$db get} $txn $gflags {$key}] - error_check_good get [lindex [lindex $ret 0] 1] $str - incr count - } - error_check_good dbc_close [$dbc close] 0 - if { $txnenv == 1 } { - error_check_good txn [$t commit] 0 - } - close $did - - # Now we will get each key from the DB and compare the results - # to the original. - puts "\tTest036.c: dump file" - if { $txnenv == 1 } { - set t [$env txn] - error_check_good txn [is_valid_txn $t $env] TRUE - set txn "-txn $t" - } - dump_file $db $txn $t1 $checkfunc - if { $txnenv == 1 } { - error_check_good txn [$t commit] 0 - } - error_check_good db_close [$db close] 0 - - # Now compare the keys to see if they match the dictionary (or ints) - if { [is_record_based $method] == 1 } { - set oid [open $t2 w] - for {set i 1} {$i <= $nentries} {set i [incr i]} { - puts $oid $i - } - close $oid - file rename -force $t1 $t3 - } else { - set q q - filehead $nentries $dict $t3 - filesort $t3 $t2 - filesort $t1 $t3 - } - -} - -# Check function for test036; keys and data are identical -proc test036.check { key data } { - error_check_good "key/data mismatch" $data $key -} - -proc test036_recno.check { key data } { - global dict - global kvals - - error_check_good key"$key"_exists [info exists kvals($key)] 1 - error_check_good "key/data mismatch, key $key" $data $kvals($key) -} diff --git a/storage/bdb/test/test037.tcl b/storage/bdb/test/test037.tcl deleted file mode 100644 index c571ffa3e9d..00000000000 --- a/storage/bdb/test/test037.tcl +++ /dev/null @@ -1,196 +0,0 @@ -# See the file LICENSE for redistribution information. -# -# Copyright (c) 1996-2004 -# Sleepycat Software. All rights reserved. -# -# $Id: test037.tcl,v 11.20 2004/01/28 03:36:31 bostic Exp $ -# -# TEST test037 -# TEST Test DB_RMW -proc test037 { method {nentries 100} args } { - global encrypt - - source ./include.tcl - set eindex [lsearch -exact $args "-env"] - # - # If we are using an env, then skip this test. It needs its own. - if { $eindex != -1 } { - incr eindex - set env [lindex $args $eindex] - puts "Test037 skipping for env $env" - return - } - - puts "Test037: RMW $method" - - set args [convert_args $method $args] - set encargs "" - set args [split_encargs $args encargs] - set omethod [convert_method $method] - - # Create the database - env_cleanup $testdir - set testfile test037.db - - set local_env \ - [eval {berkdb_env -create -mode 0644 -txn} $encargs -home $testdir] - error_check_good dbenv [is_valid_env $local_env] TRUE - - set db [eval {berkdb_open \ - -env $local_env -create -mode 0644 $omethod} $args {$testfile}] - error_check_good dbopen [is_valid_db $db] TRUE - - set did [open $dict] - set count 0 - - set pflags "" - set gflags "" - set txn "" - - if { [is_record_based $method] == 1 } { - append gflags " -recno" - } - - puts "\tTest037.a: Creating database" - # Here is the loop where we put and get each key/data pair - while { [gets $did str] != -1 && $count < $nentries } { - if { [is_record_based $method] == 1 } { - global kvals - - set key [expr $count + 1] - set kvals($key) [pad_data $method $str] - } else { - set key $str - } - set ret [eval {$db put} \ - $txn $pflags {$key [chop_data $method $str]}] - error_check_good put $ret 0 - - set ret [eval {$db get} $txn $gflags {$key}] - error_check_good get \ - [lindex [lindex $ret 0] 1] [pad_data $method $str] - incr count - } - close $did - error_check_good dbclose [$db close] 0 - error_check_good envclode [$local_env close] 0 - - puts "\tTest037.b: Setting up environments" - - # Open local environment - set env_cmd [concat berkdb_env -create -txn $encargs -home $testdir] - set local_env [eval $env_cmd] - error_check_good dbenv [is_valid_env $local_env] TRUE - - # Open local transaction - set local_txn [$local_env txn] - error_check_good txn_open [is_valid_txn $local_txn $local_env] TRUE - - # Open remote environment - set f1 [open |$tclsh_path r+] - puts $f1 "source $test_path/test.tcl" - - set remote_env [send_cmd $f1 $env_cmd] - error_check_good remote:env_open [is_valid_env $remote_env] TRUE - - # Open remote transaction - set remote_txn [send_cmd $f1 "$remote_env txn"] - error_check_good \ - remote:txn_open [is_valid_txn $remote_txn $remote_env] TRUE - - # Now try put test without RMW. Gets on one site should not - # lock out gets on another. - - # Open databases and dictionary - puts "\tTest037.c: Opening databases" - set did [open $dict] - set rkey 0 - - set db [berkdb_open -auto_commit -env $local_env $testfile] - error_check_good dbopen [is_valid_db $db] TRUE - set rdb [send_cmd $f1 \ - "berkdb_open -auto_commit -env $remote_env -mode 0644 $testfile"] - error_check_good remote:dbopen [is_valid_db $rdb] TRUE - - puts "\tTest037.d: Testing without RMW" - - # Now, get a key and try to "get" it from both DBs. - error_check_bad "gets on new open" [gets $did str] -1 - incr rkey - if { [is_record_based $method] == 1 } { - set key $rkey - } else { - set key $str - } - - set rec [eval {$db get -txn $local_txn} $gflags {$key}] - error_check_good local_get [lindex [lindex $rec 0] 1] \ - [pad_data $method $str] - - set r [send_timed_cmd $f1 0 "$rdb get -txn $remote_txn $gflags $key"] - error_check_good remote_send $r 0 - - # Now sleep before releasing local record lock - tclsleep 5 - error_check_good local_commit [$local_txn commit] 0 - - # Now get the remote result - set remote_time [rcv_result $f1] - error_check_good no_rmw_get:remote_time [expr $remote_time <= 1] 1 - - # Commit the remote - set r [send_cmd $f1 "$remote_txn commit"] - error_check_good remote_commit $r 0 - - puts "\tTest037.e: Testing with RMW" - - # Open local transaction - set local_txn [$local_env txn] - error_check_good \ - txn_open [is_valid_txn $local_txn $local_env] TRUE - - # Open remote transaction - set remote_txn [send_cmd $f1 "$remote_env txn"] - error_check_good remote:txn_open \ - [is_valid_txn $remote_txn $remote_env] TRUE - - # Now, get a key and try to "get" it from both DBs. - error_check_bad "gets on new open" [gets $did str] -1 - incr rkey - if { [is_record_based $method] == 1 } { - set key $rkey - } else { - set key $str - } - - set rec [eval {$db get -txn $local_txn -rmw} $gflags {$key}] - error_check_good \ - local_get [lindex [lindex $rec 0] 1] [pad_data $method $str] - - set r [send_timed_cmd $f1 0 "$rdb get -txn $remote_txn $gflags $key"] - error_check_good remote_send $r 0 - - # Now sleep before releasing local record lock - tclsleep 5 - error_check_good local_commit [$local_txn commit] 0 - - # Now get the remote result - set remote_time [rcv_result $f1] - error_check_good rmw_get:remote_time [expr $remote_time > 4] 1 - - # Commit the remote - set r [send_cmd $f1 "$remote_txn commit"] - error_check_good remote_commit $r 0 - - # Close everything up: remote first - set r [send_cmd $f1 "$rdb close"] - error_check_good remote_db_close $r 0 - - set r [send_cmd $f1 "$remote_env close"] - - # Close locally - error_check_good db_close [$db close] 0 - $local_env close - close $did - close $f1 -} diff --git a/storage/bdb/test/test038.tcl b/storage/bdb/test/test038.tcl deleted file mode 100644 index eaf934f5d13..00000000000 --- a/storage/bdb/test/test038.tcl +++ /dev/null @@ -1,227 +0,0 @@ -# See the file LICENSE for redistribution information. -# -# Copyright (c) 1996-2004 -# Sleepycat Software. All rights reserved. -# -# $Id: test038.tcl,v 11.26 2004/01/28 03:36:31 bostic Exp $ -# -# TEST test038 -# TEST DB_GET_BOTH, DB_GET_BOTH_RANGE on deleted items -# TEST -# TEST Use the first 10,000 entries from the dictionary. Insert each with -# TEST self as key and "ndups" duplicates. For the data field, prepend the -# TEST letters of the alphabet in a random order so we force the duplicate -# TEST sorting code to do something. By setting ndups large, we can make -# TEST this an off-page test -# TEST -# TEST Test the DB_GET_BOTH and DB_GET_BOTH_RANGE functionality by retrieving -# TEST each dup in the file explicitly. Then remove each duplicate and try -# TEST the retrieval again. -proc test038 { method {nentries 10000} {ndups 5} {tnum "038"} args } { - global alphabet - global rand_init - source ./include.tcl - - berkdb srand $rand_init - - set args [convert_args $method $args] - set omethod [convert_method $method] - - if { [is_record_based $method] == 1 || \ - [is_rbtree $method] == 1 } { - puts "Test$tnum skipping for method $method" - return - } - # Create the database and open the dictionary - set txnenv 0 - set eindex [lsearch -exact $args "-env"] - # - # If we are using an env, then testfile should just be the db name. - # Otherwise it is the test directory and the name. - if { $eindex == -1 } { - set testfile $testdir/test$tnum.db - set checkdb $testdir/checkdb.db - set env NULL - } else { - set testfile test$tnum.db - set checkdb checkdb.db - incr eindex - set env [lindex $args $eindex] - set txnenv [is_txnenv $env] - if { $txnenv == 1 } { - append args " -auto_commit " - # - # If we are using txns and running with the - # default, set the default down a bit. - # - if { $nentries == 10000 } { - set nentries 100 - } - reduce_dups nentries ndups - } - set testdir [get_home $env] - } - set t1 $testdir/t1 - set t2 $testdir/t2 - set t3 $testdir/t3 - cleanup $testdir $env - - puts "Test$tnum: \ - $method ($args) $nentries small sorted dup key/data pairs" - set db [eval {berkdb_open -create -mode 0644 \ - $omethod -dup -dupsort} $args {$testfile}] - error_check_good dbopen [is_valid_db $db] TRUE - set did [open $dict] - - set check_db [eval {berkdb_open \ - -create -mode 0644 -hash} $args {$checkdb}] - error_check_good dbopen:check_db [is_valid_db $check_db] TRUE - - set pflags "" - set gflags "" - set txn "" - set count 0 - - # Here is the loop where we put and get each key/data pair - puts "\tTest$tnum.a: Put/get loop" - if { $txnenv == 1 } { - set t [$env txn] - error_check_good txn [is_valid_txn $t $env] TRUE - set txn "-txn $t" - } - set dbc [eval {$db cursor} $txn] - error_check_good cursor_open [is_valid_cursor $dbc $db] TRUE - while { [gets $did str] != -1 && $count < $nentries } { - set dups "" - for { set i 1 } { $i <= $ndups } { incr i } { - set pref \ - [string index $alphabet [berkdb random_int 0 25]] - set pref $pref[string \ - index $alphabet [berkdb random_int 0 25]] - while { [string first $pref $dups] != -1 } { - set pref [string toupper $pref] - if { [string first $pref $dups] != -1 } { - set pref [string index $alphabet \ - [berkdb random_int 0 25]] - set pref $pref[string index $alphabet \ - [berkdb random_int 0 25]] - } - } - if { [string length $dups] == 0 } { - set dups $pref - } else { - set dups "$dups $pref" - } - set datastr $pref:$str - set ret [eval {$db put} \ - $txn $pflags {$str [chop_data $method $datastr]}] - error_check_good put $ret 0 - } - set ret [eval {$check_db put} \ - $txn $pflags {$str [chop_data $method $dups]}] - error_check_good checkdb_put $ret 0 - - # Now retrieve all the keys matching this key - set x 0 - set lastdup "" - for {set ret [$dbc get -set $str]} \ - {[llength $ret] != 0} \ - {set ret [$dbc get -nextdup] } { - set k [lindex [lindex $ret 0] 0] - if { [string compare $k $str] != 0 } { - break - } - set datastr [lindex [lindex $ret 0] 1] - if {[string length $datastr] == 0} { - break - } - if {[string compare $lastdup $datastr] > 0} { - error_check_good sorted_dups($lastdup,$datastr)\ - 0 1 - } - incr x - set lastdup $datastr - } - error_check_good "Test$tnum:ndups:$str" $x $ndups - incr count - } - error_check_good cursor_close [$dbc close] 0 - if { $txnenv == 1 } { - error_check_good txn [$t commit] 0 - } - close $did - - # Now check the duplicates, then delete then recheck - puts "\tTest$tnum.b: Checking and Deleting duplicates" - if { $txnenv == 1 } { - set t [$env txn] - error_check_good txn [is_valid_txn $t $env] TRUE - set txn "-txn $t" - } - set dbc [eval {$db cursor} $txn] - error_check_good cursor_open [is_valid_cursor $dbc $db] TRUE - set check_c [eval {$check_db cursor} $txn] - error_check_good cursor_open [is_valid_cursor $check_c $check_db] TRUE - - for {set ndx 0} {$ndx < $ndups} {incr ndx} { - for {set ret [$check_c get -first]} \ - {[llength $ret] != 0} \ - {set ret [$check_c get -next] } { - set k [lindex [lindex $ret 0] 0] - set d [lindex [lindex $ret 0] 1] - error_check_bad data_check:$d [string length $d] 0 - - set nn [expr $ndx * 3] - set pref [string range $d $nn [expr $nn + 1]] - set data $pref:$k - set ret [$dbc get -get_both $k $data] - error_check_good \ - get_both_key:$k [lindex [lindex $ret 0] 0] $k - error_check_good \ - get_both_data:$k [lindex [lindex $ret 0] 1] $data - - set ret [$dbc get -get_both_range $k $pref] - error_check_good \ - get_both_key:$k [lindex [lindex $ret 0] 0] $k - error_check_good \ - get_both_data:$k [lindex [lindex $ret 0] 1] $data - - set ret [$dbc del] - error_check_good del $ret 0 - - set ret [eval {$db get} $txn {-get_both $k $data}] - error_check_good error_case:$k [llength $ret] 0 - - # We should either not find anything (if deleting the - # largest duplicate in the set) or a duplicate that - # sorts larger than the one we deleted. - set ret [$dbc get -get_both_range $k $pref] - if { [llength $ret] != 0 } { - set datastr [lindex [lindex $ret 0] 1]] - if {[string compare \ - $pref [lindex [lindex $ret 0] 1]] >= 0} { - error_check_good \ - error_case_range:sorted_dups($pref,$datastr) 0 1 - } - } - - if {$ndx != 0} { - set n [expr ($ndx - 1) * 3] - set pref [string range $d $n [expr $n + 1]] - set data $pref:$k - set ret \ - [eval {$db get} $txn {-get_both $k $data}] - error_check_good error_case:$k [llength $ret] 0 - } - } - } - - error_check_good check_c:close [$check_c close] 0 - error_check_good dbc_close [$dbc close] 0 - if { $txnenv == 1 } { - error_check_good txn [$t commit] 0 - } - - error_check_good check_db:close [$check_db close] 0 - error_check_good db_close [$db close] 0 -} diff --git a/storage/bdb/test/test039.tcl b/storage/bdb/test/test039.tcl deleted file mode 100644 index 67b2eaf3361..00000000000 --- a/storage/bdb/test/test039.tcl +++ /dev/null @@ -1,211 +0,0 @@ -# See the file LICENSE for redistribution information. -# -# Copyright (c) 1996-2004 -# Sleepycat Software. All rights reserved. -# -# $Id: test039.tcl,v 11.23 2004/01/28 03:36:31 bostic Exp $ -# -# TEST test039 -# TEST DB_GET_BOTH/DB_GET_BOTH_RANGE on deleted items without comparison -# TEST function. -# TEST -# TEST Use the first 10,000 entries from the dictionary. Insert each with -# TEST self as key and "ndups" duplicates. For the data field, prepend the -# TEST letters of the alphabet in a random order so we force the duplicate -# TEST sorting code to do something. By setting ndups large, we can make -# TEST this an off-page test. -# TEST -# TEST Test the DB_GET_BOTH and DB_GET_BOTH_RANGE functionality by retrieving -# TEST each dup in the file explicitly. Then remove each duplicate and try -# TEST the retrieval again. -proc test039 { method {nentries 10000} {ndups 5} {tnum "039"} args } { - global alphabet - global rand_init - source ./include.tcl - - berkdb srand $rand_init - - set args [convert_args $method $args] - set omethod [convert_method $method] - - if { [is_record_based $method] == 1 || \ - [is_rbtree $method] == 1 } { - puts "Test$tnum skipping for method $method" - return - } - # Create the database and open the dictionary - set txnenv 0 - set eindex [lsearch -exact $args "-env"] - # - # If we are using an env, then testfile should just be the db name. - # Otherwise it is the test directory and the name. - if { $eindex == -1 } { - set testfile $testdir/test$tnum.db - set checkdb $testdir/checkdb.db - set env NULL - } else { - set testfile test$tnum.db - set checkdb checkdb.db - incr eindex - set env [lindex $args $eindex] - set txnenv [is_txnenv $env] - if { $txnenv == 1 } { - append args " -auto_commit " - # - # If we are using txns and running with the - # default, set the default down a bit. - # - if { $nentries == 10000 } { - set nentries 100 - } - reduce_dups nentries ndups - } - set testdir [get_home $env] - } - set t1 $testdir/t1 - set t2 $testdir/t2 - set t3 $testdir/t3 - cleanup $testdir $env - - puts "Test$tnum: $method $nentries \ - small $ndups unsorted dup key/data pairs" - - set db [eval {berkdb_open -create -mode 0644 \ - $omethod -dup} $args {$testfile}] - error_check_good dbopen [is_valid_db $db] TRUE - set did [open $dict] - - set check_db [eval \ - {berkdb_open -create -mode 0644 -hash} $args {$checkdb}] - error_check_good dbopen:check_db [is_valid_db $check_db] TRUE - - set pflags "" - set gflags "" - set txn "" - set count 0 - - # Here is the loop where we put and get each key/data pair - puts "\tTest$tnum.a: Put/get loop" - if { $txnenv == 1 } { - set t [$env txn] - error_check_good txn [is_valid_txn $t $env] TRUE - set txn "-txn $t" - } - set dbc [eval {$db cursor} $txn] - error_check_good cursor_open [is_valid_cursor $dbc $db] TRUE - while { [gets $did str] != -1 && $count < $nentries } { - set dups "" - for { set i 1 } { $i <= $ndups } { incr i } { - set pref \ - [string index $alphabet [berkdb random_int 0 25]] - set pref $pref[string \ - index $alphabet [berkdb random_int 0 25]] - while { [string first $pref $dups] != -1 } { - set pref [string toupper $pref] - if { [string first $pref $dups] != -1 } { - set pref [string index $alphabet \ - [berkdb random_int 0 25]] - set pref $pref[string index $alphabet \ - [berkdb random_int 0 25]] - } - } - if { [string length $dups] == 0 } { - set dups $pref - } else { - set dups "$dups $pref" - } - set datastr $pref:$str - set ret [eval {$db put} \ - $txn $pflags {$str [chop_data $method $datastr]}] - error_check_good put $ret 0 - } - set ret [eval {$check_db put} \ - $txn $pflags {$str [chop_data $method $dups]}] - error_check_good checkdb_put $ret 0 - - # Now retrieve all the keys matching this key - set x 0 - set lastdup "" - for {set ret [$dbc get -set $str]} \ - {[llength $ret] != 0} \ - {set ret [$dbc get -nextdup] } { - set k [lindex [lindex $ret 0] 0] - if { [string compare $k $str] != 0 } { - break - } - set datastr [lindex [lindex $ret 0] 1] - if {[string length $datastr] == 0} { - break - } - set xx [expr $x * 3] - set check_data \ - [string range $dups $xx [expr $xx + 1]]:$k - error_check_good retrieve $datastr $check_data - incr x - } - error_check_good "Test$tnum:ndups:$str" $x $ndups - incr count - } - error_check_good cursor_close [$dbc close] 0 - if { $txnenv == 1 } { - error_check_good txn [$t commit] 0 - } - close $did - - # Now check the duplicates, then delete then recheck - puts "\tTest$tnum.b: Checking and Deleting duplicates" - if { $txnenv == 1 } { - set t [$env txn] - error_check_good txn [is_valid_txn $t $env] TRUE - set txn "-txn $t" - } - set dbc [eval {$db cursor} $txn] - error_check_good cursor_open [is_valid_cursor $dbc $db] TRUE - set check_c [eval {$check_db cursor} $txn] - error_check_good cursor_open [is_valid_cursor $check_c $check_db] TRUE - - for {set ndx 0} {$ndx < $ndups} {incr ndx} { - for {set ret [$check_c get -first]} \ - {[llength $ret] != 0} \ - {set ret [$check_c get -next] } { - set k [lindex [lindex $ret 0] 0] - set d [lindex [lindex $ret 0] 1] - error_check_bad data_check:$d [string length $d] 0 - - set nn [expr $ndx * 3] - set pref [string range $d $nn [expr $nn + 1]] - set data $pref:$k - set ret [$dbc get -get_both $k $data] - error_check_good \ - get_both_key:$k [lindex [lindex $ret 0] 0] $k - error_check_good \ - get_both_data:$k [lindex [lindex $ret 0] 1] $data - - set ret [$dbc del] - error_check_good del $ret 0 - - set ret [$dbc get -get_both $k $data] - error_check_good get_both:$k [llength $ret] 0 - - set ret [$dbc get -get_both_range $k $data] - error_check_good get_both_range:$k [llength $ret] 0 - - if {$ndx != 0} { - set n [expr ($ndx - 1) * 3] - set pref [string range $d $n [expr $n + 1]] - set data $pref:$k - set ret [$dbc get -get_both $k $data] - error_check_good error_case:$k [llength $ret] 0 - } - } - } - - error_check_good check_c:close [$check_c close] 0 - error_check_good dbc_close [$dbc close] 0 - if { $txnenv == 1 } { - error_check_good txn [$t commit] 0 - } - - error_check_good check_db:close [$check_db close] 0 - error_check_good db_close [$db close] 0 -} diff --git a/storage/bdb/test/test040.tcl b/storage/bdb/test/test040.tcl deleted file mode 100644 index 61d5cd1f366..00000000000 --- a/storage/bdb/test/test040.tcl +++ /dev/null @@ -1,23 +0,0 @@ -# See the file LICENSE for redistribution information. -# -# Copyright (c) 1998-2004 -# Sleepycat Software. All rights reserved. -# -# $Id: test040.tcl,v 11.10 2004/01/28 03:36:31 bostic Exp $ -# -# TEST test040 -# TEST Test038 with off-page duplicates -# TEST DB_GET_BOTH functionality with off-page duplicates. -proc test040 { method {nentries 10000} args} { - set pgindex [lsearch -exact $args "-pagesize"] - if { $pgindex != -1 } { - puts "Test040: skipping for specific pagesizes" - return - } - # Test with off-page duplicates - eval {test038 $method $nentries 20 "040" -pagesize 512} $args - - # Test with multiple pages of off-page duplicates - eval {test038 $method [expr $nentries / 10] 100 "040" -pagesize 512} \ - $args -} diff --git a/storage/bdb/test/test041.tcl b/storage/bdb/test/test041.tcl deleted file mode 100644 index 790ece9437f..00000000000 --- a/storage/bdb/test/test041.tcl +++ /dev/null @@ -1,18 +0,0 @@ -# See the file LICENSE for redistribution information. -# -# Copyright (c) 1996-2004 -# Sleepycat Software. All rights reserved. -# -# $Id: test041.tcl,v 11.9 2004/01/28 03:36:31 bostic Exp $ -# -# TEST test041 -# TEST Test039 with off-page duplicates -# TEST DB_GET_BOTH functionality with off-page duplicates. -proc test041 { method {nentries 10000} args} { - # Test with off-page duplicates - eval {test039 $method $nentries 20 "041" -pagesize 512} $args - - # Test with multiple pages of off-page duplicates - eval {test039 $method [expr $nentries / 10] 100 "041" -pagesize 512} \ - $args -} diff --git a/storage/bdb/test/test042.tcl b/storage/bdb/test/test042.tcl deleted file mode 100644 index b216ebf27d8..00000000000 --- a/storage/bdb/test/test042.tcl +++ /dev/null @@ -1,184 +0,0 @@ -# See the file LICENSE for redistribution information. -# -# Copyright (c) 1996-2004 -# Sleepycat Software. All rights reserved. -# -# $Id: test042.tcl,v 11.46 2004/09/22 18:01:06 bostic Exp $ -# -# TEST test042 -# TEST Concurrent Data Store test (CDB) -# TEST -# TEST Multiprocess DB test; verify that locking is working for the -# TEST concurrent access method product. -# TEST -# TEST Use the first "nentries" words from the dictionary. Insert each with -# TEST self as key and a fixed, medium length data string. Then fire off -# TEST multiple processes that bang on the database. Each one should try to -# TEST read and write random keys. When they rewrite, they'll append their -# TEST pid to the data string (sometimes doing a rewrite sometimes doing a -# TEST partial put). Some will use cursors to traverse through a few keys -# TEST before finding one to write. - -proc test042 { method {nentries 1000} args } { - global encrypt - - # - # If we are using an env, then skip this test. It needs its own. - set eindex [lsearch -exact $args "-env"] - if { $eindex != -1 } { - incr eindex - set env [lindex $args $eindex] - puts "Test042 skipping for env $env" - return - } - - set args [convert_args $method $args] - if { $encrypt != 0 } { - puts "Test042 skipping for security" - return - } - test042_body $method $nentries 0 $args - test042_body $method $nentries 1 $args -} - -proc test042_body { method nentries alldb args } { - source ./include.tcl - - if { $alldb } { - set eflag "-cdb -cdb_alldb" - } else { - set eflag "-cdb" - } - puts "Test042: CDB Test ($eflag) $method $nentries" - - # Set initial parameters - set do_exit 0 - set iter 10000 - set procs 5 - - # Process arguments - set oargs "" - for { set i 0 } { $i < [llength $args] } {incr i} { - switch -regexp -- [lindex $args $i] { - -dir { incr i; set testdir [lindex $args $i] } - -iter { incr i; set iter [lindex $args $i] } - -procs { incr i; set procs [lindex $args $i] } - -exit { set do_exit 1 } - default { append oargs " " [lindex $args $i] } - } - } - - # Create the database and open the dictionary - set basename test042 - set t1 $testdir/t1 - set t2 $testdir/t2 - set t3 $testdir/t3 - - env_cleanup $testdir - - set env [eval {berkdb_env -create} $eflag -home $testdir] - error_check_good dbenv [is_valid_env $env] TRUE - - # Env is created, now set up database - test042_dbinit $env $nentries $method $oargs $basename.0.db - if { $alldb } { - for { set i 1 } {$i < $procs} {incr i} { - test042_dbinit $env $nentries $method $oargs \ - $basename.$i.db - } - } - - # Remove old mpools and Open/create the lock and mpool regions - error_check_good env:close:$env [$env close] 0 - set ret [berkdb envremove -home $testdir] - error_check_good env_remove $ret 0 - - set env [eval {berkdb_env \ - -create -cachesize {0 1048576 1}} $eflag -home $testdir] - error_check_good dbenv [is_valid_widget $env env] TRUE - - if { $do_exit == 1 } { - return - } - - # Now spawn off processes - berkdb debug_check - puts "\tTest042.b: forking off $procs children" - set pidlist {} - - for { set i 0 } {$i < $procs} {incr i} { - if { $alldb } { - set tf $basename.$i.db - } else { - set tf $basename.0.db - } - puts "exec $tclsh_path $test_path/wrap.tcl \ - mdbscript.tcl $testdir/test042.$i.log \ - $method $testdir $tf $nentries $iter $i $procs &" - set p [exec $tclsh_path $test_path/wrap.tcl \ - mdbscript.tcl $testdir/test042.$i.log $method \ - $testdir $tf $nentries $iter $i $procs &] - lappend pidlist $p - } - puts "Test042: $procs independent processes now running" - watch_procs $pidlist - - # Make sure we haven't added or lost any entries. - set dblist [glob $testdir/$basename.*.db] - foreach file $dblist { - set tf [file tail $file] - set db [eval {berkdb_open -env $env $tf}] - set statret [$db stat] - foreach pair $statret { - set fld [lindex $pair 0] - if { [string compare $fld {Number of records}] == 0 } { - set numrecs [lindex $pair 1] - break - } - } - error_check_good nentries $numrecs $nentries - error_check_good db_close [$db close] 0 - } - - # Check for test failure - set errstrings [eval findfail [glob $testdir/test042.*.log]] - foreach str $errstrings { - puts "FAIL: error message in log file: $str" - } - - # Test is done, blow away lock and mpool region - reset_env $env -} - -proc test042_dbinit { env nentries method oargs tf } { - global datastr - source ./include.tcl - - set omethod [convert_method $method] - set db [eval {berkdb_open -env $env -create \ - -mode 0644 $omethod} $oargs $tf] - error_check_good dbopen [is_valid_db $db] TRUE - - set did [open $dict] - - set pflags "" - set gflags "" - set txn "" - set count 0 - - # Here is the loop where we put each key/data pair - puts "\tTest042.a: put loop $tf" - while { [gets $did str] != -1 && $count < $nentries } { - if { [is_record_based $method] == 1 } { - set key [expr $count + 1] - } else { - set key $str - } - set ret [eval {$db put} \ - $txn $pflags {$key [chop_data $method $datastr]}] - error_check_good put:$db $ret 0 - incr count - } - close $did - error_check_good close:$db [$db close] 0 -} diff --git a/storage/bdb/test/test043.tcl b/storage/bdb/test/test043.tcl deleted file mode 100644 index bbb934ccef1..00000000000 --- a/storage/bdb/test/test043.tcl +++ /dev/null @@ -1,192 +0,0 @@ -# See the file LICENSE for redistribution information. -# -# Copyright (c) 1996-2004 -# Sleepycat Software. All rights reserved. -# -# $Id: test043.tcl,v 11.19 2004/01/28 03:36:31 bostic Exp $ -# -# TEST test043 -# TEST Recno renumbering and implicit creation test -# TEST Test the Record number implicit creation and renumbering options. -proc test043 { method {nentries 10000} args} { - source ./include.tcl - - set do_renumber [is_rrecno $method] - set args [convert_args $method $args] - set omethod [convert_method $method] - - puts "Test043: $method ($args)" - - if { [is_record_based $method] != 1 } { - puts "Test043 skipping for method $method" - return - } - - # Create the database and open the dictionary - set txnenv 0 - set eindex [lsearch -exact $args "-env"] - # - # If we are using an env, then testfile should just be the db name. - # Otherwise it is the test directory and the name. - if { $eindex == -1 } { - set testfile $testdir/test043.db - set env NULL - } else { - set testfile test043.db - incr eindex - set env [lindex $args $eindex] - set txnenv [is_txnenv $env] - if { $txnenv == 1 } { - append args " -auto_commit " - # - # If we are using txns and running with the - # default, set the default down a bit. - # - if { $nentries == 10000 } { - set nentries 100 - } - } - set testdir [get_home $env] - } - cleanup $testdir $env - - # Create the database - set db [eval {berkdb_open -create -mode 0644} $args \ - {$omethod $testfile}] - error_check_good dbopen [is_valid_db $db] TRUE - - set pflags "" - set gflags " -recno" - set txn "" - - # First test implicit creation and retrieval - set count 1 - set interval 5 - if { $nentries < $interval } { - set nentries [expr $interval + 1] - } - puts "\tTest043.a: insert keys at $interval record intervals" - while { $count <= $nentries } { - if { $txnenv == 1 } { - set t [$env txn] - error_check_good txn [is_valid_txn $t $env] TRUE - set txn "-txn $t" - } - set ret [eval {$db put} \ - $txn $pflags {$count [chop_data $method $count]}] - error_check_good "$db put $count" $ret 0 - if { $txnenv == 1 } { - error_check_good txn [$t commit] 0 - } - set last $count - incr count $interval - } - - puts "\tTest043.b: get keys using DB_FIRST/DB_NEXT" - if { $txnenv == 1 } { - set t [$env txn] - error_check_good txn [is_valid_txn $t $env] TRUE - set txn "-txn $t" - } - set dbc [eval {$db cursor} $txn] - error_check_good "$db cursor" [is_valid_cursor $dbc $db] TRUE - - set check 1 - for { set rec [$dbc get -first] } { [llength $rec] != 0 } { - set rec [$dbc get -next] } { - set k [lindex [lindex $rec 0] 0] - set d [pad_data $method [lindex [lindex $rec 0] 1]] - error_check_good "$dbc get key==data" [pad_data $method $k] $d - error_check_good "$dbc get sequential" $k $check - if { $k > $nentries } { - error_check_good "$dbc get key too large" $k $nentries - } - incr check $interval - } - - # Now make sure that we get DB_KEYEMPTY for non-existent keys - puts "\tTest043.c: Retrieve non-existent keys" - global errorInfo - - set check 1 - for { set rec [$dbc get -first] } { [llength $rec] != 0 } { - set rec [$dbc get -next] } { - set k [lindex [lindex $rec 0] 0] - - set ret [eval {$db get} $txn $gflags {[expr $k + 1]}] - error_check_good "$db \ - get [expr $k + 1]" $ret [list] - - incr check $interval - # Make sure we don't do a retrieve past the end of file - if { $check >= $last } { - break - } - } - - # Now try deleting and make sure the right thing happens. - puts "\tTest043.d: Delete tests" - set rec [$dbc get -first] - error_check_bad "$dbc get -first" [llength $rec] 0 - error_check_good "$dbc get -first key" [lindex [lindex $rec 0] 0] 1 - error_check_good "$dbc get -first data" \ - [lindex [lindex $rec 0] 1] [pad_data $method 1] - - # Delete the first item - error_check_good "$dbc del" [$dbc del] 0 - - # Retrieving 1 should always fail - set ret [eval {$db get} $txn $gflags {1}] - error_check_good "$db get 1" $ret [list] - - # Now, retrieving other keys should work; keys will vary depending - # upon renumbering. - if { $do_renumber == 1 } { - set count [expr 0 + $interval] - set max [expr $nentries - 1] - } else { - set count [expr 1 + $interval] - set max $nentries - } - - while { $count <= $max } { - set rec [eval {$db get} $txn $gflags {$count}] - if { $do_renumber == 1 } { - set data [expr $count + 1] - } else { - set data $count - } - error_check_good "$db get $count" \ - [pad_data $method $data] [lindex [lindex $rec 0] 1] - incr count $interval - } - set max [expr $count - $interval] - - puts "\tTest043.e: Verify LAST/PREV functionality" - set count $max - for { set rec [$dbc get -last] } { [llength $rec] != 0 } { - set rec [$dbc get -prev] } { - set k [lindex [lindex $rec 0] 0] - set d [lindex [lindex $rec 0] 1] - if { $do_renumber == 1 } { - set data [expr $k + 1] - } else { - set data $k - } - error_check_good \ - "$dbc get key==data" [pad_data $method $data] $d - error_check_good "$dbc get sequential" $k $count - if { $k > $nentries } { - error_check_good "$dbc get key too large" $k $nentries - } - set count [expr $count - $interval] - if { $count < 1 } { - break - } - } - error_check_good dbc_close [$dbc close] 0 - if { $txnenv == 1 } { - error_check_good txn [$t commit] 0 - } - error_check_good db_close [$db close] 0 -} diff --git a/storage/bdb/test/test044.tcl b/storage/bdb/test/test044.tcl deleted file mode 100644 index 22d56a4345e..00000000000 --- a/storage/bdb/test/test044.tcl +++ /dev/null @@ -1,252 +0,0 @@ -# See the file LICENSE for redistribution information. -# -# Copyright (c) 1996-2004 -# Sleepycat Software. All rights reserved. -# -# $Id: test044.tcl,v 11.35 2004/01/28 03:36:31 bostic Exp $ -# -# TEST test044 -# TEST Small system integration tests -# TEST Test proper functioning of the checkpoint daemon, -# TEST recovery, transactions, etc. -# TEST -# TEST System integration DB test: verify that locking, recovery, checkpoint, -# TEST and all the other utilities basically work. -# TEST -# TEST The test consists of $nprocs processes operating on $nfiles files. A -# TEST transaction consists of adding the same key/data pair to some random -# TEST number of these files. We generate a bimodal distribution in key size -# TEST with 70% of the keys being small (1-10 characters) and the remaining -# TEST 30% of the keys being large (uniform distribution about mean $key_avg). -# TEST If we generate a key, we first check to make sure that the key is not -# TEST already in the dataset. If it is, we do a lookup. -# -# XXX -# This test uses grow-only files currently! -proc test044 { method {nprocs 5} {nfiles 10} {cont 0} args } { - source ./include.tcl - global encrypt - global rand_init - - set args [convert_args $method $args] - set omethod [convert_method $method] - - berkdb srand $rand_init - - # If we are using an env, then skip this test. It needs its own. - set eindex [lsearch -exact $args "-env"] - if { $eindex != -1 } { - incr eindex - set env [lindex $args $eindex] - puts "Test044 skipping for env $env" - return - } - if { $encrypt != 0 } { - puts "Test044 skipping for security" - return - } - - puts "Test044: system integration test db $method $nprocs processes \ - on $nfiles files" - - # Parse options - set otherargs "" - set key_avg 10 - set data_avg 20 - set do_exit 0 - for { set i 0 } { $i < [llength $args] } {incr i} { - switch -regexp -- [lindex $args $i] { - -key_avg { incr i; set key_avg [lindex $args $i] } - -data_avg { incr i; set data_avg [lindex $args $i] } - -testdir { incr i; set testdir [lindex $args $i] } - -x.* { set do_exit 1 } - default { - lappend otherargs [lindex $args $i] - } - } - } - - if { $cont == 0 } { - # Create the database and open the dictionary - env_cleanup $testdir - - # Create an environment - puts "\tTest044.a: creating environment and $nfiles files" - set dbenv [berkdb_env -create -txn -home $testdir] - error_check_good env_open [is_valid_env $dbenv] TRUE - - # Create a bunch of files - set m $method - - for { set i 0 } { $i < $nfiles } { incr i } { - if { $method == "all" } { - switch [berkdb random_int 1 2] { - 1 { set m -btree } - 2 { set m -hash } - } - } else { - set m $omethod - } - - set db [eval {berkdb_open -env $dbenv -create \ - -mode 0644 $m} $otherargs {test044.$i.db}] - error_check_good dbopen [is_valid_db $db] TRUE - error_check_good db_close [$db close] 0 - } - } - - # Close the environment - $dbenv close - - if { $do_exit == 1 } { - return - } - - # Database is created, now fork off the kids. - puts "\tTest044.b: forking off $nprocs processes and utilities" - set cycle 1 - set ncycles 3 - while { $cycle <= $ncycles } { - set dbenv [berkdb_env -create -txn -home $testdir] - error_check_good env_open [is_valid_env $dbenv] TRUE - - # Fire off deadlock detector and checkpointer - puts "Beginning cycle $cycle" - set ddpid [exec $util_path/db_deadlock -h $testdir -t 5 &] - set cppid [exec $util_path/db_checkpoint -h $testdir -p 2 &] - puts "Deadlock detector: $ddpid Checkpoint daemon $cppid" - - set pidlist {} - for { set i 0 } {$i < $nprocs} {incr i} { - set p [exec $tclsh_path \ - $test_path/sysscript.tcl $testdir \ - $nfiles $key_avg $data_avg $omethod \ - >& $testdir/test044.$i.log &] - lappend pidlist $p - } - set sleep [berkdb random_int 300 600] - puts \ -"[timestamp] $nprocs processes running $pidlist for $sleep seconds" - tclsleep $sleep - - # Now simulate a crash - puts "[timestamp] Crashing" - - # - # The environment must remain open until this point to get - # proper sharing (using the paging file) on Win/9X. [#2342] - # - error_check_good env_close [$dbenv close] 0 - - tclkill $ddpid - tclkill $cppid - - foreach p $pidlist { - tclkill $p - } - - # Check for test failure - set errstrings [eval findfail [glob $testdir/test044.*.log]] - foreach str $errstrings { - puts "FAIL: error message in log file: $str" - } - - # Now run recovery - test044_verify $testdir $nfiles - incr cycle - } -} - -proc test044_usage { } { - puts -nonewline "test044 method nentries [-d directory] [-i iterations]" - puts " [-p procs] -x" -} - -proc test044_verify { dir nfiles } { - source ./include.tcl - - # Save everything away in case something breaks -# for { set f 0 } { $f < $nfiles } {incr f} { -# file copy -force $dir/test044.$f.db $dir/test044.$f.save1 -# } -# foreach f [glob $dir/log.*] { -# if { [is_substr $f save] == 0 } { -# file copy -force $f $f.save1 -# } -# } - - # Run recovery and then read through all the database files to make - # sure that they all look good. - - puts "\tTest044.verify: Running recovery and verifying file contents" - set stat [catch {exec $util_path/db_recover -h $dir} result] - if { $stat == 1 } { - error "FAIL: Recovery error: $result." - } - - # Save everything away in case something breaks -# for { set f 0 } { $f < $nfiles } {incr f} { -# file copy -force $dir/test044.$f.db $dir/test044.$f.save2 -# } -# foreach f [glob $dir/log.*] { -# if { [is_substr $f save] == 0 } { -# file copy -force $f $f.save2 -# } -# } - - for { set f 0 } { $f < $nfiles } { incr f } { - set db($f) [berkdb_open $dir/test044.$f.db] - error_check_good $f:dbopen [is_valid_db $db($f)] TRUE - - set cursors($f) [$db($f) cursor] - error_check_bad $f:cursor_open $cursors($f) NULL - error_check_good \ - $f:cursor_open [is_substr $cursors($f) $db($f)] 1 - } - - for { set f 0 } { $f < $nfiles } { incr f } { - for {set d [$cursors($f) get -first] } \ - { [string length $d] != 0 } \ - { set d [$cursors($f) get -next] } { - - set k [lindex [lindex $d 0] 0] - set d [lindex [lindex $d 0] 1] - - set flist [zero_list $nfiles] - set r $d - while { [set ndx [string first : $r]] != -1 } { - set fnum [string range $r 0 [expr $ndx - 1]] - if { [lindex $flist $fnum] == 0 } { - set fl "-set" - } else { - set fl "-next" - } - - if { $fl != "-set" || $fnum != $f } { - if { [string compare $fl "-set"] == 0} { - set full [$cursors($fnum) \ - get -set $k] - } else { - set full [$cursors($fnum) \ - get -next] - } - set key [lindex [lindex $full 0] 0] - set rec [lindex [lindex $full 0] 1] - error_check_good \ - $f:dbget_$fnum:key $key $k - error_check_good \ - $f:dbget_$fnum:data $rec $d - } - - set flist [lreplace $flist $fnum $fnum 1] - incr ndx - set r [string range $r $ndx end] - } - } - } - - for { set f 0 } { $f < $nfiles } { incr f } { - error_check_good $cursors($f) [$cursors($f) close] 0 - error_check_good db_close:$f [$db($f) close] 0 - } -} diff --git a/storage/bdb/test/test045.tcl b/storage/bdb/test/test045.tcl deleted file mode 100644 index 2b4c517ca86..00000000000 --- a/storage/bdb/test/test045.tcl +++ /dev/null @@ -1,126 +0,0 @@ -# See the file LICENSE for redistribution information. -# -# Copyright (c) 1996-2004 -# Sleepycat Software. All rights reserved. -# -# $Id: test045.tcl,v 11.27 2004/01/28 03:36:31 bostic Exp $ -# -# TEST test045 -# TEST Small random tester -# TEST Runs a number of random add/delete/retrieve operations. -# TEST Tests both successful conditions and error conditions. -# TEST -# TEST Run the random db tester on the specified access method. -# -# Options are: -# -adds -# -cursors -# -dataavg -# -delete -# -dups -# -errpct -# -init -# -keyavg -proc test045 { method {nops 10000} args } { - source ./include.tcl - global encrypt - - # - # If we are using an env, then skip this test. It needs its own. - set eindex [lsearch -exact $args "-env"] - if { $eindex != -1 } { - incr eindex - set env [lindex $args $eindex] - puts "Test045 skipping for env $env" - return - } - set args [convert_args $method $args] - if { $encrypt != 0 } { - puts "Test045 skipping for security" - return - } - set omethod [convert_method $method] - - puts "Test045: Random tester on $method for $nops operations" - - # Set initial parameters - set adds [expr $nops * 10] - set cursors 5 - set dataavg 40 - set delete $nops - set dups 0 - set errpct 0 - set init 0 - if { [is_record_based $method] == 1 } { - set keyavg 10 - } else { - set keyavg 25 - } - - # Process arguments - set oargs "" - for { set i 0 } { $i < [llength $args] } {incr i} { - switch -regexp -- [lindex $args $i] { - -adds { incr i; set adds [lindex $args $i] } - -cursors { incr i; set cursors [lindex $args $i] } - -dataavg { incr i; set dataavg [lindex $args $i] } - -delete { incr i; set delete [lindex $args $i] } - -dups { incr i; set dups [lindex $args $i] } - -errpct { incr i; set errpct [lindex $args $i] } - -init { incr i; set init [lindex $args $i] } - -keyavg { incr i; set keyavg [lindex $args $i] } - -extent { incr i; - lappend oargs "-extent" "100" } - default { lappend oargs [lindex $args $i] } - } - } - - # Create the database and and initialize it. - set root $testdir/test045 - set f $root.db - env_cleanup $testdir - - # Run the script with 3 times the number of initial elements to - # set it up. - set db [eval {berkdb_open \ - -create -mode 0644 $omethod} $oargs {$f}] - error_check_good dbopen:$f [is_valid_db $db] TRUE - - set r [$db close] - error_check_good dbclose:$f $r 0 - - # We redirect standard out, but leave standard error here so we - # can see errors. - - puts "\tTest045.a: Initializing database" - if { $init != 0 } { - set n [expr 3 * $init] - exec $tclsh_path \ - $test_path/dbscript.tcl $method $f $n \ - 1 $init $n $keyavg $dataavg $dups 0 -1 \ - > $testdir/test045.init - } - # Check for test failure - set initerrs [findfail $testdir/test045.init] - foreach str $initerrs { - puts "FAIL: error message in .init file: $str" - } - - puts "\tTest045.b: Now firing off berkdb rand dbscript, running: " - # Now the database is initialized, run a test - puts "$tclsh_path\ - $test_path/dbscript.tcl $method $f $nops $cursors $delete $adds \ - $keyavg $dataavg $dups $errpct > $testdir/test045.log" - - exec $tclsh_path \ - $test_path/dbscript.tcl $method $f \ - $nops $cursors $delete $adds $keyavg \ - $dataavg $dups $errpct \ - > $testdir/test045.log - - # Check for test failure - set logerrs [findfail $testdir/test045.log] - foreach str $logerrs { - puts "FAIL: error message in log file: $str" - } -} diff --git a/storage/bdb/test/test046.tcl b/storage/bdb/test/test046.tcl deleted file mode 100644 index 63d0ec7c486..00000000000 --- a/storage/bdb/test/test046.tcl +++ /dev/null @@ -1,813 +0,0 @@ -# See the file LICENSE for redistribution information. -# -# Copyright (c) 1999-2004 -# Sleepycat Software. All rights reserved. -# -# $Id: test046.tcl,v 11.36 2004/01/28 03:36:31 bostic Exp $ -# -# TEST test046 -# TEST Overwrite test of small/big key/data with cursor checks. -proc test046 { method args } { - global alphabet - global errorInfo - global errorCode - source ./include.tcl - - set args [convert_args $method $args] - set omethod [convert_method $method] - - puts "\tTest046: Overwrite test with cursor and small/big key/data." - puts "\tTest046:\t$method $args" - - if { [is_rrecno $method] == 1} { - puts "\tTest046: skipping for method $method." - return - } - - set key "key" - set data "data" - set txn "" - set flags "" - - if { [is_record_based $method] == 1} { - set key "" - } - - puts "\tTest046: Create $method database." - set txnenv 0 - set eindex [lsearch -exact $args "-env"] - # - # If we are using an env, then testfile should just be the db name. - # Otherwise it is the test directory and the name. - if { $eindex == -1 } { - set testfile $testdir/test046 - set env NULL - } else { - set testfile test046 - incr eindex - set env [lindex $args $eindex] - set txnenv [is_txnenv $env] - if { $txnenv == 1 } { - append args " -auto_commit " - } - set testdir [get_home $env] - } - set t1 $testdir/t1 - cleanup $testdir $env - - set oflags "-create -mode 0644 $args $omethod" - set db [eval {berkdb_open} $oflags $testfile.a.db] - error_check_good dbopen [is_valid_db $db] TRUE - - # keep nkeys even - set nkeys 20 - - # Fill page w/ small key/data pairs - puts "\tTest046: Fill page with $nkeys small key/data pairs." - for { set i 1 } { $i <= $nkeys } { incr i } { - if { $txnenv == 1 } { - set t [$env txn] - error_check_good txn [is_valid_txn $t $env] TRUE - set txn "-txn $t" - } - if { [is_record_based $method] == 1} { - set ret [eval {$db put} $txn {$i $data$i}] - } elseif { $i < 10 } { - set ret [eval {$db put} $txn [set key]00$i \ - [set data]00$i] - } elseif { $i < 100 } { - set ret [eval {$db put} $txn [set key]0$i \ - [set data]0$i] - } else { - set ret [eval {$db put} $txn {$key$i $data$i}] - } - error_check_good dbput $ret 0 - if { $txnenv == 1 } { - error_check_good txn [$t commit] 0 - } - } - - # open curs to db - if { $txnenv == 1 } { - set t [$env txn] - error_check_good txn [is_valid_txn $t $env] TRUE - set txn "-txn $t" - } - set dbc [eval {$db cursor} $txn] - error_check_good db_cursor [is_substr $dbc $db] 1 - - # get db order of keys - for {set i 1; set ret [$dbc get -first]} { [llength $ret] != 0} { \ - set ret [$dbc get -next]} { - set key_set($i) [lindex [lindex $ret 0] 0] - set data_set($i) [lindex [lindex $ret 0] 1] - incr i - } - - puts "\tTest046.a: Deletes by key." - puts "\t\tTest046.a.1: Get data with SET, then delete before cursor." - # get key in middle of page, call this the nth set curr to it - set i [expr $nkeys/2] - set ret [$dbc get -set $key_set($i)] - error_check_bad dbc_get:set [llength $ret] 0 - set curr $ret - - # delete before cursor(n-1), make sure it is gone - set i [expr $i - 1] - error_check_good db_del [eval {$db del} $txn {$key_set($i)}] 0 - - # use set_range to get first key starting at n-1, should - # give us nth--but only works for btree - if { [is_btree $method] == 1 } { - set ret [$dbc get -set_range $key_set($i)] - } else { - if { [is_record_based $method] == 1 } { - set ret [$dbc get -set $key_set($i)] - error_check_good \ - dbc_get:deleted(recno) [llength [lindex $ret 1]] 0 - #error_check_good \ - # catch:get [catch {$dbc get -set $key_set($i)} ret] 1 - #error_check_good \ - # dbc_get:deleted(recno) [is_substr $ret "KEYEMPTY"] 1 - } else { - set ret [$dbc get -set $key_set($i)] - error_check_good dbc_get:deleted [llength $ret] 0 - } - set ret [$dbc get -set $key_set([incr i])] - incr i -1 - } - error_check_bad dbc_get:set(R)(post-delete) [llength $ret] 0 - error_check_good dbc_get(match):set $ret $curr - - puts "\t\tTest046.a.2: Delete cursor item by key." - # nth key, which cursor should be on now - set i [incr i] - set ret [eval {$db del} $txn {$key_set($i)}] - error_check_good db_del $ret 0 - - # this should return n+1 key/data, curr has nth key/data - if { [string compare $omethod "-btree"] == 0 } { - set ret [$dbc get -set_range $key_set($i)] - } else { - if { [is_record_based $method] == 1 } { - set ret [$dbc get -set $key_set($i)] - error_check_good \ - dbc_get:deleted(recno) [llength [lindex $ret 1]] 0 - #error_check_good \ - # catch:get [catch {$dbc get -set $key_set($i)} ret] 1 - #error_check_good \ - # dbc_get:deleted(recno) [is_substr $ret "KEYEMPTY"] 1 - } else { - set ret [$dbc get -set $key_set($i)] - error_check_good dbc_get:deleted [llength $ret] 0 - } - set ret [$dbc get -set $key_set([expr $i+1])] - } - error_check_bad dbc_get(post-delete):set_range [llength $ret] 0 - error_check_bad dbc_get(no-match):set_range $ret $curr - - puts "\t\tTest046.a.3: Delete item after cursor." - # we'll delete n+2, since we have deleted n-1 and n - # i still equal to nth, cursor on n+1 - set i [incr i] - set ret [$dbc get -set $key_set($i)] - error_check_bad dbc_get:set [llength $ret] 0 - set curr [$dbc get -next] - error_check_bad dbc_get:next [llength $curr] 0 - set ret [$dbc get -prev] - error_check_bad dbc_get:prev [llength $curr] 0 - # delete *after* cursor pos. - error_check_good db:del [eval {$db del} $txn {$key_set([incr i])}] 0 - - # make sure item is gone, try to get it - if { [string compare $omethod "-btree"] == 0} { - set ret [$dbc get -set_range $key_set($i)] - } else { - if { [is_record_based $method] == 1 } { - set ret [$dbc get -set $key_set($i)] - error_check_good \ - dbc_get:deleted(recno) [llength [lindex $ret 1]] 0 - #error_check_good \ - # catch:get [catch {$dbc get -set $key_set($i)} ret] 1 - #error_check_good \ - # dbc_get:deleted(recno) [is_substr $ret "KEYEMPTY"] 1 - } else { - set ret [$dbc get -set $key_set($i)] - error_check_good dbc_get:deleted [llength $ret] 0 - } - set ret [$dbc get -set $key_set([expr $i +1])] - } - error_check_bad dbc_get:set(_range) [llength $ret] 0 - error_check_bad dbc_get:set(_range) $ret $curr - error_check_good dbc_get:set [lindex [lindex $ret 0] 0] \ - $key_set([expr $i+1]) - - puts "\tTest046.b: Deletes by cursor." - puts "\t\tTest046.b.1: Delete, do DB_NEXT." - error_check_good dbc:del [$dbc del] 0 - set ret [$dbc get -next] - error_check_bad dbc_get:next [llength $ret] 0 - set i [expr $i+2] - # i = n+4 - error_check_good dbc_get:next(match) \ - [lindex [lindex $ret 0] 0] $key_set($i) - - puts "\t\tTest046.b.2: Delete, do DB_PREV." - error_check_good dbc:del [$dbc del] 0 - set ret [$dbc get -prev] - error_check_bad dbc_get:prev [llength $ret] 0 - set i [expr $i-3] - # i = n+1 (deleted all in between) - error_check_good dbc_get:prev(match) \ - [lindex [lindex $ret 0] 0] $key_set($i) - - puts "\t\tTest046.b.3: Delete, do DB_CURRENT." - error_check_good dbc:del [$dbc del] 0 - # we just deleted, so current item should be KEYEMPTY, throws err - set ret [$dbc get -current] - error_check_good dbc_get:curr:deleted [llength [lindex $ret 1]] 0 - #error_check_good catch:get:current [catch {$dbc get -current} ret] 1 - #error_check_good dbc_get:curr:deleted [is_substr $ret "DB_KEYEMPTY"] 1 - - puts "\tTest046.c: Inserts (before/after), by key then cursor." - puts "\t\tTest046.c.1: Insert by key before the cursor." - # i is at curs pos, i=n+1, we want to go BEFORE - set i [incr i -1] - set ret [eval {$db put} $txn {$key_set($i) $data_set($i)}] - error_check_good db_put:before $ret 0 - - puts "\t\tTest046.c.2: Insert by key after the cursor." - set i [incr i +2] - set ret [eval {$db put} $txn {$key_set($i) $data_set($i)}] - error_check_good db_put:after $ret 0 - - puts "\t\tTest046.c.3: Insert by curs with deleted curs (should fail)." - # cursor is on n+1, we'll change i to match - set i [incr i -1] - - error_check_good dbc:close [$dbc close] 0 - if { $txnenv == 1 } { - error_check_good txn [$t commit] 0 - } - error_check_good db:close [$db close] 0 - if { [is_record_based $method] == 1} { - puts "\t\tSkipping the rest of test for method $method." - puts "\tTest046 ($method) complete." - return - } else { - # Reopen without printing __db_errs. - set db [eval {berkdb_open_noerr} $oflags $testfile.a.db] - error_check_good dbopen [is_valid_db $db] TRUE - if { $txnenv == 1 } { - set t [$env txn] - error_check_good txn [is_valid_txn $t $env] TRUE - set txn "-txn $t" - } - set dbc [eval {$db cursor} $txn] - error_check_good cursor [is_valid_cursor $dbc $db] TRUE - - # should fail with EINVAL (deleted cursor) - set errorCode NONE - error_check_good catch:put:before 1 \ - [catch {$dbc put -before $data_set($i)} ret] - error_check_good dbc_put:deleted:before \ - [is_substr $errorCode "EINVAL"] 1 - - # should fail with EINVAL - set errorCode NONE - error_check_good catch:put:after 1 \ - [catch {$dbc put -after $data_set($i)} ret] - error_check_good dbc_put:deleted:after \ - [is_substr $errorCode "EINVAL"] 1 - - puts "\t\tTest046.c.4:\ - Insert by cursor before/after existent cursor." - # can't use before after w/o dup except renumber in recno - # first, restore an item so they don't fail - #set ret [eval {$db put} $txn {$key_set($i) $data_set($i)}] - #error_check_good db_put $ret 0 - - #set ret [$dbc get -set $key_set($i)] - #error_check_bad dbc_get:set [llength $ret] 0 - #set i [incr i -2] - # i = n - 1 - #set ret [$dbc get -prev] - #set ret [$dbc put -before $key_set($i) $data_set($i)] - #error_check_good dbc_put:before $ret 0 - # cursor pos is adjusted to match prev, recently inserted - #incr i - # i = n - #set ret [$dbc put -after $key_set($i) $data_set($i)] - #error_check_good dbc_put:after $ret 0 - } - - # For the next part of the test, we need a db with no dups to test - # overwrites - puts "\tTest046.d.0: Cleanup, close db, open new db with no dups." - error_check_good dbc:close [$dbc close] 0 - if { $txnenv == 1 } { - error_check_good txn [$t commit] 0 - } - error_check_good db:close [$db close] 0 - - set db [eval {berkdb_open} $oflags $testfile.d.db] - error_check_good dbopen [is_valid_db $db] TRUE - # Fill page w/ small key/data pairs - puts "\tTest046.d.0: Fill page with $nkeys small key/data pairs." - for { set i 1 } { $i < $nkeys } { incr i } { - if { $txnenv == 1 } { - set t [$env txn] - error_check_good txn [is_valid_txn $t $env] TRUE - set txn "-txn $t" - } - set ret [eval {$db put} $txn {$key$i $data$i}] - error_check_good dbput $ret 0 - if { $txnenv == 1 } { - error_check_good txn [$t commit] 0 - } - } - - if { $txnenv == 1 } { - set t [$env txn] - error_check_good txn [is_valid_txn $t $env] TRUE - set txn "-txn $t" - } - set dbc [eval {$db cursor} $txn] - error_check_good db_cursor [is_valid_cursor $dbc $db] TRUE - set nkeys 20 - - # Prepare cursor on item - set ret [$dbc get -first] - error_check_bad dbc_get:first [llength $ret] 0 - - # Prepare unique big/small values for an initial - # and an overwrite set of key/data - foreach ptype {init over} { - foreach size {big small} { - if { [string compare $size big] == 0 } { - set key_$ptype$size \ - KEY_$size[repeat alphabet 250] - set data_$ptype$size \ - DATA_$size[repeat alphabet 250] - } else { - set key_$ptype$size \ - KEY_$size[repeat alphabet 10] - set data_$ptype$size \ - DATA_$size[repeat alphabet 10] - } - } - } - - set i 0 - # Do all overwrites for key and cursor - foreach type {key_over curs_over} { - # Overwrite (i=initial) four different kinds of pairs - incr i - puts "\tTest046.d: Overwrites $type." - foreach i_pair {\ - {small small} {big small} {small big} {big big} } { - # Overwrite (w=write) with four different kinds of data - foreach w_pair {\ - {small small} {big small} {small big} {big big} } { - - # we can only overwrite if key size matches - if { [string compare [lindex \ - $i_pair 0] [lindex $w_pair 0]] != 0} { - continue - } - - # first write the initial key/data - set ret [$dbc put -keyfirst \ - key_init[lindex $i_pair 0] \ - data_init[lindex $i_pair 1]] - error_check_good \ - dbc_put:curr:init:$i_pair $ret 0 - set ret [$dbc get -current] - error_check_bad dbc_get:curr [llength $ret] 0 - error_check_good dbc_get:curr:data \ - [lindex [lindex $ret 0] 1] \ - data_init[lindex $i_pair 1] - - # Now, try to overwrite: dups not supported in - # this db - if { [string compare $type key_over] == 0 } { - puts "\t\tTest046.d.$i: Key\ - Overwrite:($i_pair) by ($w_pair)." - set ret [eval {$db put} $txn \ - $"key_init[lindex $i_pair 0]" \ - $"data_over[lindex $w_pair 1]"] - error_check_good \ - dbput:over:i($i_pair):o($w_pair) $ret 0 - # check value - set ret [eval {$db get} $txn \ - $"key_init[lindex $i_pair 0]"] - error_check_bad \ - db:get:check [llength $ret] 0 - error_check_good db:get:compare_data \ - [lindex [lindex $ret 0] 1] \ - $"data_over[lindex $w_pair 1]" - } else { - # This is a cursor overwrite - puts \ - "\t\tTest046.d.$i:Curs Overwrite:($i_pair) by ($w_pair)." - set ret [$dbc put -current \ - $"data_over[lindex $w_pair 1]"] - error_check_good \ - dbcput:over:i($i_pair):o($w_pair) $ret 0 - # check value - set ret [$dbc get -current] - error_check_bad \ - dbc_get:curr [llength $ret] 0 - error_check_good dbc_get:curr:data \ - [lindex [lindex $ret 0] 1] \ - $"data_over[lindex $w_pair 1]" - } - } ;# foreach write pair - } ;# foreach initial pair - } ;# foreach type big/small - - puts "\tTest046.d.3: Cleanup for next part of test." - error_check_good dbc_close [$dbc close] 0 - if { $txnenv == 1 } { - error_check_good txn [$t commit] 0 - } - error_check_good db_close [$db close] 0 - - if { [is_rbtree $method] == 1} { - puts "\tSkipping the rest of Test046 for method $method." - puts "\tTest046 complete." - return - } - - puts "\tTest046.e.1: Open db with sorted dups." - set db [eval {berkdb_open_noerr} $oflags -dup -dupsort $testfile.e.db] - error_check_good dbopen [is_valid_db $db] TRUE - - # keep nkeys even - set nkeys 20 - set ndups 20 - - # Fill page w/ small key/data pairs - puts "\tTest046.e.2:\ - Put $nkeys small key/data pairs and $ndups sorted dups." - for { set i 0 } { $i < $nkeys } { incr i } { - if { $txnenv == 1 } { - set t [$env txn] - error_check_good txn [is_valid_txn $t $env] TRUE - set txn "-txn $t" - } - if { $i < 10 } { - set ret [eval {$db put} $txn [set key]0$i [set data]0$i] - } else { - set ret [eval {$db put} $txn {$key$i $data$i}] - } - error_check_good dbput $ret 0 - if { $txnenv == 1 } { - error_check_good txn [$t commit] 0 - } - } - - if { $txnenv == 1 } { - set t [$env txn] - error_check_good txn [is_valid_txn $t $env] TRUE - set txn "-txn $t" - } - # open curs to db - set dbc [eval {$db cursor} $txn] - error_check_good db_cursor [is_substr $dbc $db] 1 - - # get db order of keys - for {set i 0; set ret [$dbc get -first]} { [llength $ret] != 0} { \ - set ret [$dbc get -next]} { - set key_set($i) [lindex [lindex $ret 0] 0] - set data_set($i) [lindex [lindex $ret 0] 1] - incr i - } - - # put 20 sorted duplicates on key in middle of page - set i [expr $nkeys/2] - set ret [$dbc get -set $key_set($i)] - error_check_bad dbc_get:set [llength $ret] 0 - - set keym $key_set($i) - - for { set i 0 } { $i < $ndups } { incr i } { - if { $i < 10 } { - set ret [eval {$db put} $txn {$keym DUPLICATE_0$i}] - } else { - set ret [eval {$db put} $txn {$keym DUPLICATE_$i}] - } - error_check_good db_put:DUP($i) $ret 0 - } - - puts "\tTest046.e.3: Check duplicate duplicates" - set ret [eval {$db put} $txn {$keym DUPLICATE_00}] - error_check_good dbput:dupdup [is_substr $ret "DB_KEYEXIST"] 1 - - # get dup ordering - for {set i 0; set ret [$dbc get -set $keym]} { [llength $ret] != 0} {\ - set ret [$dbc get -nextdup] } { - set dup_set($i) [lindex [lindex $ret 0] 1] - incr i - } - - # put cursor on item in middle of dups - set i [expr $ndups/2] - set ret [$dbc get -get_both $keym $dup_set($i)] - error_check_bad dbc_get:get_both [llength $ret] 0 - - puts "\tTest046.f: Deletes by cursor." - puts "\t\tTest046.f.1: Delete by cursor, do a DB_NEXT, check cursor." - set ret [$dbc get -current] - error_check_bad dbc_get:current [llength $ret] 0 - error_check_good dbc:del [$dbc del] 0 - set ret [$dbc get -next] - error_check_bad dbc_get:next [llength $ret] 0 - error_check_good \ - dbc_get:nextdup [lindex [lindex $ret 0] 1] $dup_set([incr i]) - - puts "\t\tTest046.f.2: Delete by cursor, do DB_PREV, check cursor." - error_check_good dbc:del [$dbc del] 0 - set ret [$dbc get -prev] - error_check_bad dbc_get:prev [llength $ret] 0 - set i [incr i -2] - error_check_good dbc_get:prev [lindex [lindex $ret 0] 1] $dup_set($i) - - puts "\t\tTest046.f.3: Delete by cursor, do DB_CURRENT, check cursor." - error_check_good dbc:del [$dbc del] 0 - set ret [$dbc get -current] - error_check_good dbc_get:current:deleted [llength [lindex $ret 1]] 0 - #error_check_good catch:dbc_get:curr [catch {$dbc get -current} ret] 1 - #error_check_good \ - # dbc_get:current:deleted [is_substr $ret "DB_KEYEMPTY"] 1 - error_check_good dbc_close [$dbc close] 0 - if { $txnenv == 1 } { - error_check_good txn [$t commit] 0 - } - - if { $txnenv == 1 } { - set t [$env txn] - error_check_good txn [is_valid_txn $t $env] TRUE - set txn "-txn $t" - } - # restore deleted keys - error_check_good db_put:1 [eval {$db put} $txn {$keym $dup_set($i)}] 0 - error_check_good db_put:2 [eval {$db put} $txn \ - {$keym $dup_set([incr i])}] 0 - error_check_good db_put:3 [eval {$db put} $txn \ - {$keym $dup_set([incr i])}] 0 - if { $txnenv == 1 } { - error_check_good txn [$t commit] 0 - } - - # tested above - - # Reopen database without __db_err, reset cursor - error_check_good dbclose [$db close] 0 - set db [eval {berkdb_open_noerr} $oflags -dup -dupsort $testfile.e.db] - error_check_good dbopen [is_valid_db $db] TRUE - if { $txnenv == 1 } { - set t [$env txn] - error_check_good txn [is_valid_txn $t $env] TRUE - set txn "-txn $t" - } - set dbc [eval {$db cursor} $txn] - error_check_good db_cursor [is_valid_cursor $dbc $db] TRUE - - set ret [$dbc get -set $keym] - error_check_bad dbc_get:set [llength $ret] 0 - set ret2 [$dbc get -current] - error_check_bad dbc_get:current [llength $ret2] 0 - # match - error_check_good dbc_get:current/set(match) $ret $ret2 - # right one? - error_check_good \ - dbc_get:curr/set(matchdup) [lindex [lindex $ret 0] 1] $dup_set(0) - - # cursor is on first dup - set ret [$dbc get -next] - error_check_bad dbc_get:next [llength $ret] 0 - # now on second dup - error_check_good dbc_get:next [lindex [lindex $ret 0] 1] $dup_set(1) - # check cursor - set ret [$dbc get -current] - error_check_bad dbc_get:curr [llength $ret] 0 - error_check_good \ - dbcget:curr(compare) [lindex [lindex $ret 0] 1] $dup_set(1) - - puts "\tTest046.g: Inserts." - puts "\t\tTest046.g.1: Insert by key before cursor." - set i 0 - - # use "spam" to prevent a duplicate duplicate. - set ret [eval {$db put} $txn {$keym $dup_set($i)spam}] - error_check_good db_put:before $ret 0 - # make sure cursor was maintained - set ret [$dbc get -current] - error_check_bad dbc_get:curr [llength $ret] 0 - error_check_good \ - dbc_get:current(post-put) [lindex [lindex $ret 0] 1] $dup_set(1) - - puts "\t\tTest046.g.2: Insert by key after cursor." - set i [expr $i + 2] - # use "eggs" to prevent a duplicate duplicate - set ret [eval {$db put} $txn {$keym $dup_set($i)eggs}] - error_check_good db_put:after $ret 0 - # make sure cursor was maintained - set ret [$dbc get -current] - error_check_bad dbc_get:curr [llength $ret] 0 - error_check_good \ - dbc_get:curr(post-put,after) [lindex [lindex $ret 0] 1] $dup_set(1) - - puts "\t\tTest046.g.3: Insert by curs before/after curs (should fail)." - # should return EINVAL (dupsort specified) - error_check_good dbc_put:before:catch \ - [catch {$dbc put -before $dup_set([expr $i -1])} ret] 1 - error_check_good \ - dbc_put:before:deleted [is_substr $errorCode "EINVAL"] 1 - error_check_good dbc_put:after:catch \ - [catch {$dbc put -after $dup_set([expr $i +2])} ret] 1 - error_check_good \ - dbc_put:after:deleted [is_substr $errorCode "EINVAL"] 1 - - puts "\tTest046.h: Cursor overwrites." - puts "\t\tTest046.h.1: Test that dupsort disallows current overwrite." - set ret [$dbc get -set $keym] - error_check_bad dbc_get:set [llength $ret] 0 - error_check_good \ - catch:dbc_put:curr [catch {$dbc put -current DATA_OVERWRITE} ret] 1 - error_check_good dbc_put:curr:dupsort [is_substr $errorCode EINVAL] 1 - - puts "\t\tTest046.h.2: New db (no dupsort)." - error_check_good dbc_close [$dbc close] 0 - if { $txnenv == 1 } { - error_check_good txn [$t commit] 0 - } - error_check_good db_close [$db close] 0 - - set db [eval {berkdb_open} \ - $oflags -dup $testfile.h.db] - error_check_good db_open [is_valid_db $db] TRUE - if { $txnenv == 1 } { - set t [$env txn] - error_check_good txn [is_valid_txn $t $env] TRUE - set txn "-txn $t" - } - set dbc [eval {$db cursor} $txn] - error_check_good db_cursor [is_valid_cursor $dbc $db] TRUE - - for {set i 0} {$i < $nkeys} {incr i} { - if { $i < 10 } { - set ret [eval {$db put} $txn {key0$i datum0$i}] - error_check_good db_put $ret 0 - } else { - set ret [eval {$db put} $txn {key$i datum$i}] - error_check_good db_put $ret 0 - } - if { $i == 0 } { - for {set j 0} {$j < $ndups} {incr j} { - if { $i < 10 } { - set keyput key0$i - } else { - set keyput key$i - } - if { $j < 10 } { - set ret [eval {$db put} $txn \ - {$keyput DUP_datum0$j}] - } else { - set ret [eval {$db put} $txn \ - {$keyput DUP_datum$j}] - } - error_check_good dbput:dup $ret 0 - } - } - } - - for {set i 0; set ret [$dbc get -first]} { [llength $ret] != 0} { \ - set ret [$dbc get -next]} { - set key_set($i) [lindex [lindex $ret 0] 0] - set data_set($i) [lindex [lindex $ret 0] 1] - incr i - } - - for {set i 0; set ret [$dbc get -set key00]} {\ - [llength $ret] != 0} {set ret [$dbc get -nextdup]} { - set dup_set($i) [lindex [lindex $ret 0] 1] - incr i - } - set i 0 - set keym key0$i - set ret [$dbc get -set $keym] - error_check_bad dbc_get:set [llength $ret] 0 - error_check_good \ - dbc_get:set(match) [lindex [lindex $ret 0] 1] $dup_set($i) - - set ret [$dbc get -nextdup] - error_check_bad dbc_get:nextdup [llength $ret] 0 - error_check_good dbc_get:nextdup(match) \ - [lindex [lindex $ret 0] 1] $dup_set([expr $i + 1]) - - puts "\t\tTest046.h.3: Insert by cursor before cursor (DB_BEFORE)." - set ret [$dbc put -before BEFOREPUT] - error_check_good dbc_put:before $ret 0 - set ret [$dbc get -current] - error_check_bad dbc_get:curr [llength $ret] 0 - error_check_good \ - dbc_get:curr:match [lindex [lindex $ret 0] 1] BEFOREPUT - # make sure that this is actually a dup w/ dup before - set ret [$dbc get -prev] - error_check_bad dbc_get:prev [llength $ret] 0 - error_check_good dbc_get:prev:match \ - [lindex [lindex $ret 0] 1] $dup_set($i) - set ret [$dbc get -prev] - # should not be a dup - error_check_bad dbc_get:prev(no_dup) \ - [lindex [lindex $ret 0] 0] $keym - - puts "\t\tTest046.h.4: Insert by cursor after cursor (DB_AFTER)." - set ret [$dbc get -set $keym] - - # delete next 3 when fix - #puts "[$dbc get -current]\ - # [$dbc get -next] [$dbc get -next] [$dbc get -next] [$dbc get -next]" - #set ret [$dbc get -set $keym] - - error_check_bad dbc_get:set [llength $ret] 0 - set ret [$dbc put -after AFTERPUT] - error_check_good dbc_put:after $ret 0 - #puts [$dbc get -current] - - # delete next 3 when fix - #set ret [$dbc get -set $keym] - #puts "[$dbc get -current] next: [$dbc get -next] [$dbc get -next]" - #set ret [$dbc get -set AFTERPUT] - #set ret [$dbc get -set $keym] - #set ret [$dbc get -next] - #puts $ret - - set ret [$dbc get -current] - error_check_bad dbc_get:curr [llength $ret] 0 - error_check_good dbc_get:curr:match [lindex [lindex $ret 0] 1] AFTERPUT - set ret [$dbc get -prev] - # now should be on first item (non-dup) of keym - error_check_bad dbc_get:prev1 [llength $ret] 0 - error_check_good \ - dbc_get:match [lindex [lindex $ret 0] 1] $dup_set($i) - set ret [$dbc get -next] - error_check_bad dbc_get:next [llength $ret] 0 - error_check_good \ - dbc_get:match2 [lindex [lindex $ret 0] 1] AFTERPUT - set ret [$dbc get -next] - error_check_bad dbc_get:next [llength $ret] 0 - # this is the dup we added previously - error_check_good \ - dbc_get:match3 [lindex [lindex $ret 0] 1] BEFOREPUT - - # now get rid of the dups we added - error_check_good dbc_del [$dbc del] 0 - set ret [$dbc get -prev] - error_check_bad dbc_get:prev2 [llength $ret] 0 - error_check_good dbc_del2 [$dbc del] 0 - # put cursor on first dup item for the rest of test - set ret [$dbc get -set $keym] - error_check_bad dbc_get:first [llength $ret] 0 - error_check_good \ - dbc_get:first:check [lindex [lindex $ret 0] 1] $dup_set($i) - - puts "\t\tTest046.h.5: Overwrite small by small." - set ret [$dbc put -current DATA_OVERWRITE] - error_check_good dbc_put:current:overwrite $ret 0 - set ret [$dbc get -current] - error_check_good dbc_get:current(put,small/small) \ - [lindex [lindex $ret 0] 1] DATA_OVERWRITE - - puts "\t\tTest046.h.6: Overwrite small with big." - set ret [$dbc put -current DATA_BIG_OVERWRITE[repeat $alphabet 200]] - error_check_good dbc_put:current:overwrite:big $ret 0 - set ret [$dbc get -current] - error_check_good dbc_get:current(put,small/big) \ - [is_substr [lindex [lindex $ret 0] 1] DATA_BIG_OVERWRITE] 1 - - puts "\t\tTest046.h.7: Overwrite big with big." - set ret [$dbc put -current DATA_BIG_OVERWRITE2[repeat $alphabet 200]] - error_check_good dbc_put:current:overwrite(2):big $ret 0 - set ret [$dbc get -current] - error_check_good dbc_get:current(put,big/big) \ - [is_substr [lindex [lindex $ret 0] 1] DATA_BIG_OVERWRITE2] 1 - - puts "\t\tTest046.h.8: Overwrite big with small." - set ret [$dbc put -current DATA_OVERWRITE2] - error_check_good dbc_put:current:overwrite:small $ret 0 - set ret [$dbc get -current] - error_check_good dbc_get:current(put,big/small) \ - [is_substr [lindex [lindex $ret 0] 1] DATA_OVERWRITE2] 1 - - puts "\tTest046.i: Cleaning up from test." - error_check_good dbc_close [$dbc close] 0 - if { $txnenv == 1 } { - error_check_good txn [$t commit] 0 - } - error_check_good db_close [$db close] 0 - - puts "\tTest046 complete." -} diff --git a/storage/bdb/test/test047.tcl b/storage/bdb/test/test047.tcl deleted file mode 100644 index 48b6fc759ab..00000000000 --- a/storage/bdb/test/test047.tcl +++ /dev/null @@ -1,258 +0,0 @@ -# See the file LICENSE for redistribution information. -# -# Copyright (c) 1999-2004 -# Sleepycat Software. All rights reserved. -# -# $Id: test047.tcl,v 11.22 2004/01/28 03:36:31 bostic Exp $ -# -# TEST test047 -# TEST DBcursor->c_get get test with SET_RANGE option. -proc test047 { method args } { - source ./include.tcl - - set tnum 047 - set args [convert_args $method $args] - - if { [is_btree $method] != 1 } { - puts "Test$tnum skipping for method $method" - return - } - - set method "-btree" - - puts "\tTest$tnum: Test of SET_RANGE interface to DB->c_get ($method)." - - set key "key" - set data "data" - set txn "" - set flags "" - - puts "\tTest$tnum.a: Create $method database." - set eindex [lsearch -exact $args "-env"] - set txnenv 0 - # - # If we are using an env, then testfile should just be the db name. - # Otherwise it is the test directory and the name. - if { $eindex == -1 } { - set testfile $testdir/test$tnum.db - set testfile1 $testdir/test$tnum.a.db - set testfile2 $testdir/test$tnum.b.db - set env NULL - } else { - set testfile test$tnum.db - set testfile1 test$tnum.a.db - set testfile2 test$tnum.b.db - incr eindex - set env [lindex $args $eindex] - set txnenv [is_txnenv $env] - if { $txnenv == 1 } { - append args " -auto_commit " - } - set testdir [get_home $env] - } - set t1 $testdir/t1 - cleanup $testdir $env - - set oflags "-create -mode 0644 -dup $args $method" - set db [eval {berkdb_open} $oflags $testfile] - error_check_good dbopen [is_valid_db $db] TRUE - - set nkeys 20 - # Fill page w/ small key/data pairs - # - puts "\tTest$tnum.b: Fill page with $nkeys small key/data pairs." - for { set i 0 } { $i < $nkeys } { incr i } { - if { $txnenv == 1 } { - set t [$env txn] - error_check_good txn [is_valid_txn $t $env] TRUE - set txn "-txn $t" - } - set ret [eval {$db put} $txn {$key$i $data$i}] - error_check_good dbput $ret 0 - if { $txnenv == 1 } { - error_check_good txn [$t commit] 0 - } - } - - if { $txnenv == 1 } { - set t [$env txn] - error_check_good txn [is_valid_txn $t $env] TRUE - set txn "-txn $t" - } - # open curs to db - set dbc [eval {$db cursor} $txn] - error_check_good db_cursor [is_valid_cursor $dbc $db] TRUE - - puts "\tTest$tnum.c: Get data with SET_RANGE, then delete by cursor." - set i 0 - set ret [$dbc get -set_range $key$i] - error_check_bad dbc_get:set_range [llength $ret] 0 - set curr $ret - - # delete by cursor, make sure it is gone - error_check_good dbc_del [$dbc del] 0 - - set ret [$dbc get -set_range $key$i] - error_check_bad dbc_get(post-delete):set_range [llength $ret] 0 - error_check_bad dbc_get(no-match):set_range $ret $curr - - puts "\tTest$tnum.d: \ - Use another cursor to fix item on page, delete by db." - set dbcurs2 [eval {$db cursor} $txn] - error_check_good db:cursor2 [is_valid_cursor $dbcurs2 $db] TRUE - - set ret [$dbcurs2 get -set [lindex [lindex $ret 0] 0]] - error_check_bad dbc_get(2):set [llength $ret] 0 - set curr $ret - error_check_good db:del [eval {$db del} $txn \ - {[lindex [lindex $ret 0] 0]}] 0 - - # make sure item is gone - set ret [$dbcurs2 get -set_range [lindex [lindex $curr 0] 0]] - error_check_bad dbc2_get:set_range [llength $ret] 0 - error_check_bad dbc2_get:set_range $ret $curr - - puts "\tTest$tnum.e: Close for second part of test, close db/cursors." - error_check_good dbc:close [$dbc close] 0 - error_check_good dbc2:close [$dbcurs2 close] 0 - if { $txnenv == 1 } { - error_check_good txn [$t commit] 0 - } - error_check_good dbclose [$db close] 0 - - # open db - set db [eval {berkdb_open} $oflags $testfile1] - error_check_good dbopen2 [is_valid_db $db] TRUE - - set nkeys 10 - puts "\tTest$tnum.f: Fill page with $nkeys pairs, one set of dups." - for {set i 0} { $i < $nkeys } {incr i} { - # a pair - if { $txnenv == 1 } { - set t [$env txn] - error_check_good txn [is_valid_txn $t $env] TRUE - set txn "-txn $t" - } - set ret [eval {$db put} $txn {$key$i $data$i}] - error_check_good dbput($i) $ret 0 - if { $txnenv == 1 } { - error_check_good txn [$t commit] 0 - } - } - - set j 0 - for {set i 0} { $i < $nkeys } {incr i} { - # a dup set for same 1 key - if { $txnenv == 1 } { - set t [$env txn] - error_check_good txn [is_valid_txn $t $env] TRUE - set txn "-txn $t" - } - set ret [eval {$db put} $txn {$key$i DUP_$data$i}] - error_check_good dbput($i):dup $ret 0 - if { $txnenv == 1 } { - error_check_good txn [$t commit] 0 - } - } - - puts "\tTest$tnum.g: \ - Get dups key w/ SET_RANGE, pin onpage with another cursor." - set i 0 - if { $txnenv == 1 } { - set t [$env txn] - error_check_good txn [is_valid_txn $t $env] TRUE - set txn "-txn $t" - } - set dbc [eval {$db cursor} $txn] - error_check_good db_cursor [is_valid_cursor $dbc $db] TRUE - set ret [$dbc get -set_range $key$i] - error_check_bad dbc_get:set_range [llength $ret] 0 - - set dbc2 [eval {$db cursor} $txn] - error_check_good db_cursor [is_valid_cursor $dbc2 $db] TRUE - set ret2 [$dbc2 get -set_range $key$i] - error_check_bad dbc2_get:set_range [llength $ret] 0 - - error_check_good dbc_compare $ret $ret2 - puts "\tTest$tnum.h: \ - Delete duplicates' key, use SET_RANGE to get next dup." - set ret [$dbc2 del] - error_check_good dbc2_del $ret 0 - set ret [$dbc get -set_range $key$i] - error_check_bad dbc_get:set_range [llength $ret] 0 - error_check_bad dbc_get:set_range $ret $ret2 - - error_check_good dbc_close [$dbc close] 0 - error_check_good dbc2_close [$dbc2 close] 0 - if { $txnenv == 1 } { - error_check_good txn [$t commit] 0 - } - error_check_good db_close [$db close] 0 - - set db [eval {berkdb_open} $oflags $testfile2] - error_check_good dbopen [is_valid_db $db] TRUE - - set nkeys 10 - set ndups 1000 - - puts "\tTest$tnum.i: Fill page with $nkeys pairs and $ndups dups." - for {set i 0} { $i < $nkeys } { incr i} { - # a pair - if { $txnenv == 1 } { - set t [$env txn] - error_check_good txn [is_valid_txn $t $env] TRUE - set txn "-txn $t" - } - set ret [eval {$db put} $txn {$key$i $data$i}] - error_check_good dbput $ret 0 - - # dups for single pair - if { $i == 0} { - for {set j 0} { $j < $ndups } { incr j } { - set ret [eval {$db put} $txn \ - {$key$i DUP_$data$i:$j}] - error_check_good dbput:dup $ret 0 - } - } - if { $txnenv == 1 } { - error_check_good txn [$t commit] 0 - } - } - set i 0 - if { $txnenv == 1 } { - set t [$env txn] - error_check_good txn [is_valid_txn $t $env] TRUE - set txn "-txn $t" - } - set dbc [eval {$db cursor} $txn] - error_check_good db_cursor [is_valid_cursor $dbc $db] TRUE - set dbc2 [eval {$db cursor} $txn] - error_check_good db_cursor [is_valid_cursor $dbc2 $db] TRUE - puts "\tTest$tnum.j: \ - Get key of first dup with SET_RANGE, fix with 2 curs." - set ret [$dbc get -set_range $key$i] - error_check_bad dbc_get:set_range [llength $ret] 0 - - set ret2 [$dbc2 get -set_range $key$i] - error_check_bad dbc2_get:set_range [llength $ret] 0 - set curr $ret2 - - error_check_good dbc_compare $ret $ret2 - - puts "\tTest$tnum.k: Delete item by cursor, use SET_RANGE to verify." - set ret [$dbc2 del] - error_check_good dbc2_del $ret 0 - set ret [$dbc get -set_range $key$i] - error_check_bad dbc_get:set_range [llength $ret] 0 - error_check_bad dbc_get:set_range $ret $curr - - puts "\tTest$tnum.l: Cleanup." - error_check_good dbc_close [$dbc close] 0 - error_check_good dbc2_close [$dbc2 close] 0 - if { $txnenv == 1 } { - error_check_good txn [$t commit] 0 - } - error_check_good db_close [$db close] 0 - - puts "\tTest$tnum complete." -} diff --git a/storage/bdb/test/test048.tcl b/storage/bdb/test/test048.tcl deleted file mode 100644 index db73b2b6dcc..00000000000 --- a/storage/bdb/test/test048.tcl +++ /dev/null @@ -1,171 +0,0 @@ -# See the file LICENSE for redistribution information. -# -# Copyright (c) 1999-2004 -# Sleepycat Software. All rights reserved. -# -# $Id: test048.tcl,v 11.22 2004/05/13 18:51:43 mjc Exp $ -# -# TEST test048 -# TEST Cursor stability across Btree splits. -proc test048 { method args } { - global errorCode - global is_je_test - source ./include.tcl - - set tnum 048 - set args [convert_args $method $args] - - if { [is_btree $method] != 1 } { - puts "Test$tnum skipping for method $method." - return - } - set pgindex [lsearch -exact $args "-pagesize"] - if { $pgindex != -1 } { - incr pgindex - if { [lindex $args $pgindex] > 8192 } { - puts "Test048: Skipping for large pagesizes" - return - } - } - - set method "-btree" - - puts "\tTest$tnum: Test of cursor stability across btree splits." - - set key "key" - set data "data" - set txn "" - set flags "" - - puts "\tTest$tnum.a: Create $method database." - set txnenv 0 - set eindex [lsearch -exact $args "-env"] - # - # If we are using an env, then testfile should just be the db name. - # Otherwise it is the test directory and the name. - if { $eindex == -1 } { - set testfile $testdir/test$tnum.db - set env NULL - } else { - set testfile test$tnum.db - incr eindex - set env [lindex $args $eindex] - set txnenv [is_txnenv $env] - if { $txnenv == 1 } { - append args " -auto_commit " - } - set testdir [get_home $env] - } - set t1 $testdir/t1 - cleanup $testdir $env - - set oflags "-create -mode 0644 $args $method" - set db [eval {berkdb_open} $oflags $testfile] - error_check_good dbopen [is_valid_db $db] TRUE - - set nkeys 5 - # Fill page w/ small key/data pairs, keep at leaf - # - puts "\tTest$tnum.b: Fill page with $nkeys small key/data pairs." - for { set i 0 } { $i < $nkeys } { incr i } { - if { $txnenv == 1 } { - set t [$env txn] - error_check_good txn [is_valid_txn $t $env] TRUE - set txn "-txn $t" - } - set ret [eval {$db put} $txn {key000$i $data$i}] - error_check_good dbput $ret 0 - if { $txnenv == 1 } { - error_check_good txn [$t commit] 0 - } - } - - # get db ordering, set cursors - puts "\tTest$tnum.c: Set cursors on each of $nkeys pairs." - if { $txnenv == 1 } { - set t [$env txn] - error_check_good txn [is_valid_txn $t $env] TRUE - set txn "-txn $t" - } - for {set i 0; set ret [$db get key000$i]} {\ - $i < $nkeys && [llength $ret] != 0} {\ - incr i; set ret [$db get key000$i]} { - set key_set($i) [lindex [lindex $ret 0] 0] - set data_set($i) [lindex [lindex $ret 0] 1] - set dbc [eval {$db cursor} $txn] - set dbc_set($i) $dbc - error_check_good db_cursor:$i \ - [is_valid_cursor $dbc_set($i) $db] TRUE - set ret [$dbc_set($i) get -set $key_set($i)] - error_check_bad dbc_set($i)_get:set [llength $ret] 0 - } - - # if mkeys is above 1000, need to adjust below for lexical order - set mkeys 1000 - puts "\tTest$tnum.d: Add $mkeys pairs to force split." - for {set i $nkeys} { $i < $mkeys } { incr i } { - if { $i >= 100 } { - set ret [eval {$db put} $txn {key0$i $data$i}] - } elseif { $i >= 10 } { - set ret [eval {$db put} $txn {key00$i $data$i}] - } else { - set ret [eval {$db put} $txn {key000$i $data$i}] - } - error_check_good dbput:more $ret 0 - } - - puts "\tTest$tnum.e: Make sure split happened." - # XXX We cannot call stat with active txns or we deadlock. - if { $txnenv != 1 && !$is_je_test } { - error_check_bad stat:check-split [is_substr [$db stat] \ - "{{Internal pages} 0}"] 1 - } - - puts "\tTest$tnum.f: Check to see that cursors maintained reference." - for {set i 0} { $i < $nkeys } {incr i} { - set ret [$dbc_set($i) get -current] - error_check_bad dbc$i:get:current [llength $ret] 0 - set ret2 [$dbc_set($i) get -set $key_set($i)] - error_check_bad dbc$i:get:set [llength $ret2] 0 - error_check_good dbc$i:get(match) $ret $ret2 - } - - puts "\tTest$tnum.g: Delete added keys to force reverse split." - for {set i $nkeys} { $i < $mkeys } { incr i } { - if { $i >= 100 } { - error_check_good db_del:$i \ - [eval {$db del} $txn {key0$i}] 0 - } elseif { $i >= 10 } { - error_check_good db_del:$i \ - [eval {$db del} $txn {key00$i}] 0 - } else { - error_check_good db_del:$i \ - [eval {$db del} $txn {key000$i}] 0 - } - } - - puts "\tTest$tnum.h: Verify cursor reference." - for {set i 0} { $i < $nkeys } {incr i} { - set ret [$dbc_set($i) get -current] - error_check_bad dbc$i:get:current [llength $ret] 0 - set ret2 [$dbc_set($i) get -set $key_set($i)] - error_check_bad dbc$i:get:set [llength $ret2] 0 - error_check_good dbc$i:get(match) $ret $ret2 - } - - puts "\tTest$tnum.i: Cleanup." - # close cursors - for {set i 0} { $i < $nkeys } {incr i} { - error_check_good dbc_close:$i [$dbc_set($i) close] 0 - } - if { $txnenv == 1 } { - error_check_good txn [$t commit] 0 - } - puts "\tTest$tnum.j: Verify reverse split." - error_check_good stat:check-reverse_split [is_substr [$db stat] \ - "{{Internal pages} 0}"] 1 - - error_check_good dbclose [$db close] 0 - - puts "\tTest$tnum complete." -} diff --git a/storage/bdb/test/test049.tcl b/storage/bdb/test/test049.tcl deleted file mode 100644 index f8d173380c8..00000000000 --- a/storage/bdb/test/test049.tcl +++ /dev/null @@ -1,184 +0,0 @@ -# See the file LICENSE for redistribution information. -# -# Copyright (c) 1999-2004 -# Sleepycat Software. All rights reserved. -# -# $Id: test049.tcl,v 11.24 2004/01/28 03:36:31 bostic Exp $ -# -# TEST test049 -# TEST Cursor operations on uninitialized cursors. -proc test049 { method args } { - global errorInfo - global errorCode - source ./include.tcl - - set tnum 049 - set renum [is_rrecno $method] - - set args [convert_args $method $args] - set omethod [convert_method $method] - - puts "\tTest$tnum: Test of cursor routines with uninitialized cursors." - - set key "key" - set data "data" - set txn "" - set flags "" - set rflags "" - - if { [is_record_based $method] == 1 } { - set key "" - } - - puts "\tTest$tnum.a: Create $method database." - set txnenv 0 - set eindex [lsearch -exact $args "-env"] - # - # If we are using an env, then testfile should just be the db name. - # Otherwise it is the test directory and the name. - if { $eindex == -1 } { - set testfile $testdir/test$tnum.db - set env NULL - } else { - set testfile test$tnum.db - incr eindex - set env [lindex $args $eindex] - set txnenv [is_txnenv $env] - if { $txnenv == 1 } { - append args " -auto_commit " - } - set testdir [get_home $env] - } - set t1 $testdir/t1 - cleanup $testdir $env - - set oflags "-create -mode 0644 $rflags $omethod $args" - if { [is_record_based $method] == 0 && [is_rbtree $method] != 1 } { - append oflags " -dup" - } - set db [eval {berkdb_open_noerr} $oflags $testfile] - error_check_good dbopen [is_valid_db $db] TRUE - - set nkeys 10 - puts "\tTest$tnum.b: Fill page with $nkeys small key/data pairs." - for { set i 1 } { $i <= $nkeys } { incr i } { - if { $txnenv == 1 } { - set t [$env txn] - error_check_good txn [is_valid_txn $t $env] TRUE - set txn "-txn $t" - } - set ret [eval {$db put} $txn {$key$i $data$i}] - error_check_good dbput:$i $ret 0 - if { $i == 1 } { - for {set j 0} { $j < [expr $nkeys / 2]} {incr j} { - set ret [eval {$db put} $txn \ - {$key$i DUPLICATE$j}] - error_check_good dbput:dup:$j $ret 0 - } - } - if { $txnenv == 1 } { - error_check_good txn [$t commit] 0 - } - } - - # DBC GET - if { $txnenv == 1 } { - set t [$env txn] - error_check_good txn [is_valid_txn $t $env] TRUE - set txn "-txn $t" - } - set dbc_u [eval {$db cursor} $txn] - error_check_good db:cursor [is_valid_cursor $dbc_u $db] TRUE - - puts "\tTest$tnum.c: Test dbc->get interfaces..." - set i 0 - foreach flag { current first last next prev nextdup} { - puts "\t\t...dbc->get($flag)" - catch {$dbc_u get -$flag} ret - error_check_good dbc:get:$flag [is_substr $errorCode EINVAL] 1 - } - - foreach flag { set set_range get_both} { - puts "\t\t...dbc->get($flag)" - if { [string compare $flag get_both] == 0} { - catch {$dbc_u get -$flag $key$i data0} ret - } else { - catch {$dbc_u get -$flag $key$i} ret - } - error_check_good dbc:get:$flag [is_substr $errorCode EINVAL] 1 - } - - puts "\t\t...dbc->get(current, partial)" - catch {$dbc_u get -current -partial {0 0}} ret - error_check_good dbc:get:partial [is_substr $errorCode EINVAL] 1 - - puts "\t\t...dbc->get(current, rmw)" - catch {$dbc_u get -rmw -current } ret - error_check_good dbc_get:rmw [is_substr $errorCode EINVAL] 1 - - puts "\tTest$tnum.d: Test dbc->put interface..." - # partial...depends on another - foreach flag { after before current keyfirst keylast } { - puts "\t\t...dbc->put($flag)" - if { [string match key* $flag] == 1 } { - if { [is_record_based $method] == 1 } { - # keyfirst/keylast not allowed in recno - puts "\t\t...Skipping dbc->put($flag) for $method." - continue - } else { - # keyfirst/last should succeed - puts "\t\t...dbc->put($flag)...should succeed for $method" - error_check_good dbcput:$flag \ - [$dbc_u put -$flag $key$i data0] 0 - - # now uninitialize cursor - error_check_good dbc_close [$dbc_u close] 0 - set dbc_u [eval {$db cursor} $txn] - error_check_good \ - db_cursor [is_substr $dbc_u $db] 1 - } - } elseif { [string compare $flag before ] == 0 || - [string compare $flag after ] == 0 } { - if { [is_record_based $method] == 0 && - [is_rbtree $method] == 0} { - set ret [$dbc_u put -$flag data0] - error_check_good "$dbc_u:put:-$flag" $ret 0 - } elseif { $renum == 1 } { - # Renumbering recno will return a record number - set currecno \ - [lindex [lindex [$dbc_u get -current] 0] 0] - set ret [$dbc_u put -$flag data0] - if { [string compare $flag after] == 0 } { - error_check_good "$dbc_u put $flag" \ - $ret [expr $currecno + 1] - } else { - error_check_good "$dbc_u put $flag" \ - $ret $currecno - } - } else { - puts "\t\tSkipping $flag for $method" - } - } else { - set ret [$dbc_u put -$flag data0] - error_check_good "$dbc_u:put:-$flag" $ret 0 - } - } - # and partial - puts "\t\t...dbc->put(partial)" - catch {$dbc_u put -partial {0 0} $key$i $data$i} ret - error_check_good dbc_put:partial [is_substr $errorCode EINVAL] 1 - - # XXX dbc->dup, db->join (dbc->get join_item) - # dbc del - puts "\tTest$tnum.e: Test dbc->del interface." - catch {$dbc_u del} ret - error_check_good dbc_del [is_substr $errorCode EINVAL] 1 - - error_check_good dbc_close [$dbc_u close] 0 - if { $txnenv == 1 } { - error_check_good txn [$t commit] 0 - } - error_check_good db_close [$db close] 0 - - puts "\tTest$tnum complete." -} diff --git a/storage/bdb/test/test050.tcl b/storage/bdb/test/test050.tcl deleted file mode 100644 index 72be5af67f9..00000000000 --- a/storage/bdb/test/test050.tcl +++ /dev/null @@ -1,221 +0,0 @@ -# See the file LICENSE for redistribution information. -# -# Copyright (c) 1999-2004 -# Sleepycat Software. All rights reserved. -# -# $Id: test050.tcl,v 11.23 2004/01/28 03:36:31 bostic Exp $ -# -# TEST test050 -# TEST Overwrite test of small/big key/data with cursor checks for Recno. -proc test050 { method args } { - global alphabet - global errorInfo - global errorCode - source ./include.tcl - - set tstn 050 - - set args [convert_args $method $args] - set omethod [convert_method $method] - - if { [is_rrecno $method] != 1 } { - puts "Test$tstn skipping for method $method." - return - } - - puts "\tTest$tstn:\ - Overwrite test with cursor and small/big key/data ($method)." - - set data "data" - set txn "" - set flags "" - - puts "\tTest$tstn: Create $method database." - set txnenv 0 - set eindex [lsearch -exact $args "-env"] - # - # If we are using an env, then testfile should just be the db name. - # Otherwise it is the test directory and the name. - if { $eindex == -1 } { - set testfile $testdir/test0$tstn.db - set env NULL - } else { - set testfile test0$tstn.db - incr eindex - set env [lindex $args $eindex] - set txnenv [is_txnenv $env] - if { $txnenv == 1 } { - append args " -auto_commit " - } - set testdir [get_home $env] - } - set t1 $testdir/t1 - cleanup $testdir $env - - set oflags "-create -mode 0644 $args $omethod" - set db [eval {berkdb_open_noerr} $oflags $testfile] - error_check_good dbopen [is_valid_db $db] TRUE - - # keep nkeys even - set nkeys 20 - - # Fill page w/ small key/data pairs - # - puts "\tTest$tstn: Fill page with $nkeys small key/data pairs." - for { set i 1 } { $i <= $nkeys } { incr i } { - if { $txnenv == 1 } { - set t [$env txn] - error_check_good txn [is_valid_txn $t $env] TRUE - set txn "-txn $t" - } - set ret [eval {$db put} $txn {$i [chop_data $method $data$i]}] - error_check_good dbput $ret 0 - if { $txnenv == 1 } { - error_check_good txn [$t commit] 0 - } - } - - if { $txnenv == 1 } { - set t [$env txn] - error_check_good txn [is_valid_txn $t $env] TRUE - set txn "-txn $t" - } - # open curs to db - set dbc [eval {$db cursor} $txn] - error_check_good db_cursor [is_valid_cursor $dbc $db] TRUE - - # get db order of keys - for {set i 0; set ret [$dbc get -first]} { [llength $ret] != 0} { \ - set ret [$dbc get -next]} { - set key_set($i) [lindex [lindex $ret 0] 0] - set data_set($i) [lindex [lindex $ret 0] 1] - incr i - } - - # verify ordering: should be unnecessary, but hey, why take chances? - # key_set is zero indexed but keys start at 1 - for {set i 0} { $i < $nkeys } {incr i} { - error_check_good \ - verify_order:$i $key_set($i) [pad_data $method [expr $i+1]] - } - - puts "\tTest$tstn.a: Inserts before/after by cursor." - puts "\t\tTest$tstn.a.1:\ - Insert with uninitialized cursor (should fail)." - error_check_good dbc_close [$dbc close] 0 - if { $txnenv == 1 } { - error_check_good txn [$t commit] 0 - } - if { $txnenv == 1 } { - set t [$env txn] - error_check_good txn [is_valid_txn $t $env] TRUE - set txn "-txn $t" - } - set dbc [eval {$db cursor} $txn] - error_check_good db_cursor [is_valid_cursor $dbc $db] TRUE - catch {$dbc put -before DATA1} ret - error_check_good dbc_put:before:uninit [is_substr $errorCode EINVAL] 1 - - catch {$dbc put -after DATA2} ret - error_check_good dbc_put:after:uninit [is_substr $errorCode EINVAL] 1 - - puts "\t\tTest$tstn.a.2: Insert with deleted cursor (should succeed)." - set ret [$dbc get -first] - error_check_bad dbc_get:first [llength $ret] 0 - error_check_good dbc_del [$dbc del] 0 - set ret [$dbc put -current DATAOVER1] - error_check_good dbc_put:current:deleted $ret 0 - - puts "\t\tTest$tstn.a.3: Insert by cursor before cursor (DB_BEFORE)." - set currecno [lindex [lindex [$dbc get -current] 0] 0] - set ret [$dbc put -before DATAPUTBEFORE] - error_check_good dbc_put:before $ret $currecno - set old1 [$dbc get -next] - error_check_bad dbc_get:next [llength $old1] 0 - error_check_good \ - dbc_get:next(compare) [lindex [lindex $old1 0] 1] DATAOVER1 - - puts "\t\tTest$tstn.a.4: Insert by cursor after cursor (DB_AFTER)." - set ret [$dbc get -first] - error_check_bad dbc_get:first [llength $ret] 0 - error_check_good dbc_get:first [lindex [lindex $ret 0] 1] DATAPUTBEFORE - set currecno [lindex [lindex [$dbc get -current] 0] 0] - set ret [$dbc put -after DATAPUTAFTER] - error_check_good dbc_put:after $ret [expr $currecno + 1] - set ret [$dbc get -prev] - error_check_bad dbc_get:prev [llength $ret] 0 - error_check_good \ - dbc_get:prev [lindex [lindex $ret 0] 1] DATAPUTBEFORE - - puts "\t\tTest$tstn.a.5: Verify that all keys have been renumbered." - # should be $nkeys + 2 keys, starting at 1 - for {set i 1; set ret [$dbc get -first]} { \ - $i <= $nkeys && [llength $ret] != 0 } {\ - incr i; set ret [$dbc get -next]} { - error_check_good check_renumber $i [lindex [lindex $ret 0] 0] - } - - # tested above - - puts "\tTest$tstn.b: Overwrite tests (cursor and key)." - # For the next part of the test, we need a db with no dups to test - # overwrites - # - # we should have ($nkeys + 2) keys, ordered: - # DATAPUTBEFORE, DATAPUTAFTER, DATAOVER1, data1, ..., data$nkeys - # - # Prepare cursor on item - # - set ret [$dbc get -first] - error_check_bad dbc_get:first [llength $ret] 0 - - # Prepare unique big/small values for an initial - # and an overwrite set of data - set databig DATA_BIG_[repeat alphabet 250] - set datasmall DATA_SMALL - - # Now, we want to overwrite data: - # by key and by cursor - # 1. small by small - # 2. small by big - # 3. big by small - # 4. big by big - # - set i 0 - # Do all overwrites for key and cursor - foreach type { by_key by_cursor } { - incr i - puts "\tTest$tstn.b.$i: Overwrites $type." - foreach pair { {small small} \ - {small big} {big small} {big big} } { - # put in initial type - set data $data[lindex $pair 0] - set ret [$dbc put -current $data] - error_check_good dbc_put:curr:init:($pair) $ret 0 - - # Now, try to overwrite: dups not supported in this db - if { [string compare $type by_key] == 0 } { - puts "\t\tTest$tstn.b.$i:\ - Overwrite:($pair):$type" - set ret [eval {$db put} $txn \ - 1 {OVER$pair$data[lindex $pair 1]}] - error_check_good dbput:over:($pair) $ret 0 - } else { - # This is a cursor overwrite - puts "\t\tTest$tstn.b.$i:\ - Overwrite:($pair) by cursor." - set ret [$dbc put \ - -current OVER$pair$data[lindex $pair 1]] - error_check_good dbcput:over:($pair) $ret 0 - } - } ;# foreach pair - } ;# foreach type key/cursor - - puts "\tTest$tstn.c: Cleanup and close cursor." - error_check_good dbc_close [$dbc close] 0 - if { $txnenv == 1 } { - error_check_good txn [$t commit] 0 - } - error_check_good db_close [$db close] 0 - -} diff --git a/storage/bdb/test/test051.tcl b/storage/bdb/test/test051.tcl deleted file mode 100644 index 5e09835e3c2..00000000000 --- a/storage/bdb/test/test051.tcl +++ /dev/null @@ -1,222 +0,0 @@ -# See the file LICENSE for redistribution information. -# -# Copyright (c) 1999-2004 -# Sleepycat Software. All rights reserved. -# -# $Id: test051.tcl,v 11.25 2004/01/28 03:36:31 bostic Exp $ -# -# TEST test051 -# TEST Fixed-length record Recno test. -# TEST 0. Test various flags (legal and illegal) to open -# TEST 1. Test partial puts where dlen != size (should fail) -# TEST 2. Partial puts for existent record -- replaces at beg, mid, and -# TEST end of record, as well as full replace -proc test051 { method { args "" } } { - global fixed_len - global errorInfo - global errorCode - source ./include.tcl - - set args [convert_args $method $args] - set omethod [convert_method $method] - - puts "Test051: Test of the fixed length records." - if { [is_fixed_length $method] != 1 } { - puts "Test051: skipping for method $method" - return - } - - # Create the database and open the dictionary - set txnenv 0 - set eindex [lsearch -exact $args "-env"] - # - # If we are using an env, then testfile should just be the db name. - # Otherwise it is the test directory and the name. - if { $eindex == -1 } { - set testfile $testdir/test051.db - set testfile1 $testdir/test051a.db - set env NULL - } else { - set testfile test051.db - set testfile1 test051a.db - incr eindex - set env [lindex $args $eindex] - set txnenv [is_txnenv $env] - if { $txnenv == 1 } { - append args " -auto_commit " - } - set testdir [get_home $env] - } - cleanup $testdir $env - set oflags "-create -mode 0644 $args" - - # Test various flags (legal and illegal) to open - puts "\tTest051.a: Test correct flag behavior on open." - set errorCode NONE - foreach f { "-dup" "-dup -dupsort" "-recnum" } { - puts "\t\tTest051.a: Test flag $f" - set stat [catch {eval {berkdb_open_noerr} $oflags $f $omethod \ - $testfile} ret] - error_check_good dbopen:flagtest:catch $stat 1 - error_check_good \ - dbopen:flagtest:$f [is_substr $errorCode EINVAL] 1 - set errorCode NONE - } - set f "-renumber" - puts "\t\tTest051.a: Test $f" - if { [is_frecno $method] == 1 } { - set db [eval {berkdb_open} $oflags $f $omethod $testfile] - error_check_good dbopen:flagtest:$f [is_valid_db $db] TRUE - $db close - } else { - error_check_good \ - dbopen:flagtest:catch [catch {eval {berkdb_open_noerr}\ - $oflags $f $omethod $testfile} ret] 1 - error_check_good \ - dbopen:flagtest:$f [is_substr $errorCode EINVAL] 1 - } - - # Test partial puts where dlen != size (should fail) - # it is an error to specify a partial put w/ different - # dlen and size in fixed length recno/queue - set key 1 - set data "" - set txn "" - set test_char "a" - - set db [eval {berkdb_open_noerr} $oflags $omethod $testfile1] - error_check_good dbopen [is_valid_db $db] TRUE - - if { $txnenv == 1 } { - set t [$env txn] - error_check_good txn [is_valid_txn $t $env] TRUE - set txn "-txn $t" - } - puts "\tTest051.b: Partial puts with dlen != size." - foreach dlen { 1 16 20 32 } { - foreach doff { 0 10 20 32 } { - # dlen < size - puts "\t\tTest051.e: dlen: $dlen, doff: $doff, \ - size: [expr $dlen+1]" - set data [repeat $test_char [expr $dlen + 1]] - error_check_good \ - catch:put 1 [catch {eval {$db put -partial \ - [list $doff $dlen]} $txn {$key $data}} ret] - - # We don't get back the server error string just - # the result. - if { $eindex == -1 } { - error_check_good "dbput:partial: dlen < size" \ - [is_substr \ - $errorInfo "Record length error"] 1 - } else { - error_check_good "dbput:partial: dlen < size" \ - [is_substr $errorCode "EINVAL"] 1 - } - - # dlen > size - puts "\t\tTest051.e: dlen: $dlen, doff: $doff, \ - size: [expr $dlen-1]" - set data [repeat $test_char [expr $dlen - 1]] - error_check_good \ - catch:put 1 [catch {eval {$db put -partial \ - [list $doff $dlen]} $txn {$key $data}} ret] - if { $eindex == -1 } { - error_check_good "dbput:partial: dlen > size" \ - [is_substr \ - $errorInfo "Record length error"] 1 - } else { - error_check_good "dbput:partial: dlen < size" \ - [is_substr $errorCode "EINVAL"] 1 - } - } - } - - if { $txnenv == 1 } { - error_check_good txn [$t commit] 0 - } - $db close - - # Partial puts for existent record -- replaces at beg, mid, and - # end of record, as well as full replace - puts "\tTest051.f: Partial puts within existent record." - set db [eval {berkdb_open} $oflags $omethod $testfile] - error_check_good dbopen [is_valid_db $db] TRUE - - puts "\t\tTest051.f: First try a put and then a full replace." - set data [repeat "a" $fixed_len] - - if { $txnenv == 1 } { - set t [$env txn] - error_check_good txn [is_valid_txn $t $env] TRUE - set txn "-txn $t" - } - set ret [eval {$db put} $txn {1 $data}] - error_check_good dbput $ret 0 - set ret [eval {$db get} $txn {-recno 1}] - error_check_good dbget $data [lindex [lindex $ret 0] 1] - - set data [repeat "b" $fixed_len] - set ret [eval {$db put -partial [list 0 $fixed_len]} $txn {1 $data}] - error_check_good dbput $ret 0 - set ret [eval {$db get} $txn {-recno 1}] - error_check_good dbget $data [lindex [lindex $ret 0] 1] - if { $txnenv == 1 } { - error_check_good txn [$t commit] 0 - } - - set data "InitialData" - set pdata "PUT" - set dlen [string length $pdata] - set ilen [string length $data] - set mid [expr $ilen/2] - - # put initial data - set key 0 - - set offlist [list 0 $mid [expr $ilen -1] [expr $fixed_len - $dlen]] - puts "\t\tTest051.g: Now replace at different offsets ($offlist)." - foreach doff $offlist { - incr key - if { $txnenv == 1 } { - set t [$env txn] - error_check_good txn [is_valid_txn $t $env] TRUE - set txn "-txn $t" - } - set ret [eval {$db put} $txn {$key $data}] - error_check_good dbput:init $ret 0 - - puts "\t\tTest051.g: Replace at offset $doff." - set ret [eval {$db put -partial [list $doff $dlen]} $txn \ - {$key $pdata}] - error_check_good dbput:partial $ret 0 - if { $txnenv == 1 } { - error_check_good txn [$t commit] 0 - } - - if { $doff == 0} { - set beg "" - set end [string range $data $dlen $ilen] - } else { - set beg [string range $data 0 [expr $doff - 1]] - set end [string range $data [expr $doff + $dlen] $ilen] - } - if { $doff > $ilen } { - # have to put padding between record and inserted - # string - set newdata [format %s%s $beg $end] - set diff [expr $doff - $ilen] - set nlen [string length $newdata] - set newdata [binary \ - format a[set nlen]x[set diff]a$dlen $newdata $pdata] - } else { - set newdata [make_fixed_length \ - frecno [format %s%s%s $beg $pdata $end]] - } - set ret [$db get -recno $key] - error_check_good compare($newdata,$ret) \ - [binary_compare [lindex [lindex $ret 0] 1] $newdata] 0 - } - - $db close -} diff --git a/storage/bdb/test/test052.tcl b/storage/bdb/test/test052.tcl deleted file mode 100644 index c7d891d33c7..00000000000 --- a/storage/bdb/test/test052.tcl +++ /dev/null @@ -1,269 +0,0 @@ -# See the file LICENSE for redistribution information. -# -# Copyright (c) 1999-2004 -# Sleepycat Software. All rights reserved. -# -# $Id: test052.tcl,v 11.20 2004/09/20 17:06:16 sue Exp $ -# -# TEST test052 -# TEST Renumbering record Recno test. -proc test052 { method args } { - global alphabet - global errorInfo - global errorCode - source ./include.tcl - - set args [convert_args $method $args] - set omethod [convert_method $method] - - puts "Test052: Test of renumbering recno." - if { [is_rrecno $method] != 1} { - puts "Test052: skipping for method $method." - return - } - - set data "data" - set txn "" - set flags "" - - puts "\tTest052: Create $method database." - set txnenv 0 - set eindex [lsearch -exact $args "-env"] - # - # If we are using an env, then testfile should just be the db name. - # Otherwise it is the test directory and the name. - if { $eindex == -1 } { - set testfile $testdir/test052.db - set env NULL - } else { - set testfile test052.db - incr eindex - set env [lindex $args $eindex] - set txnenv [is_txnenv $env] - if { $txnenv == 1 } { - append args " -auto_commit " - } - set testdir [get_home $env] - } - set t1 $testdir/t1 - cleanup $testdir $env - - set oflags "-create -mode 0644 $args $omethod" - set db [eval {berkdb_open} $oflags $testfile] - error_check_good dbopen [is_valid_db $db] TRUE - - # keep nkeys even - set nkeys 20 - - # Fill page w/ small key/data pairs - puts "\tTest052: Fill page with $nkeys small key/data pairs." - for { set i 1 } { $i <= $nkeys } { incr i } { - if { $txnenv == 1 } { - set t [$env txn] - error_check_good txn [is_valid_txn $t $env] TRUE - set txn "-txn $t" - } - set ret [eval {$db put} $txn {$i $data$i}] - error_check_good dbput $ret 0 - if { $txnenv == 1 } { - error_check_good txn [$t commit] 0 - } - } - - # open curs to db - if { $txnenv == 1 } { - set t [$env txn] - error_check_good txn [is_valid_txn $t $env] TRUE - set txn "-txn $t" - } - set dbc [eval {$db cursor} $txn] - error_check_good db_cursor [is_valid_cursor $dbc $db] TRUE - - # get db order of keys - for {set i 1; set ret [$dbc get -first]} { [llength $ret] != 0} { \ - set ret [$dbc get -next]} { - set keys($i) [lindex [lindex $ret 0] 0] - set darray($i) [lindex [lindex $ret 0] 1] - incr i - } - - puts "\tTest052: Deletes by key." - puts "\tTest052.a: Get data with SET, then delete before cursor." - # get key in middle of page, call this the nth set curr to it - set i [expr $nkeys/2] - set k $keys($i) - set ret [$dbc get -set $k] - error_check_bad dbc_get:set [llength $ret] 0 - error_check_good dbc_get:set [lindex [lindex $ret 0] 1] $darray($i) - - # delete by key before current - set i [incr i -1] - error_check_good db_del:before [eval {$db del} $txn {$keys($i)}] 0 - # with renumber, current's data should be constant, but key==--key - set i [incr i +1] - error_check_good dbc:data \ - [lindex [lindex [$dbc get -current] 0] 1] $darray($i) - error_check_good dbc:keys \ - [lindex [lindex [$dbc get -current] 0] 0] $keys([expr $nkeys/2 - 1]) - - puts "\tTest052.b: Delete cursor item by key." - set i [expr $nkeys/2 ] - - set ret [$dbc get -set $keys($i)] - error_check_bad dbc:get [llength $ret] 0 - error_check_good dbc:get:curs [lindex [lindex $ret 0] 1] \ - $darray([expr $i + 1]) - error_check_good db_del:curr [eval {$db del} $txn {$keys($i)}] 0 - set ret [$dbc get -current] - - # After a delete, cursor should return DB_NOTFOUND. - error_check_good dbc:get:key [llength [lindex [lindex $ret 0] 0]] 0 - error_check_good dbc:get:data [llength [lindex [lindex $ret 0] 1]] 0 - - # And the item after the cursor should now be - # key: $nkeys/2, data: $nkeys/2 + 2 - set ret [$dbc get -next] - error_check_bad dbc:getnext [llength $ret] 0 - error_check_good dbc:getnext:data \ - [lindex [lindex $ret 0] 1] $darray([expr $i + 2]) - error_check_good dbc:getnext:keys \ - [lindex [lindex $ret 0] 0] $keys($i) - - puts "\tTest052.c: Delete item after cursor." - # should be { keys($nkeys/2), darray($nkeys/2 + 2) } - set i [expr $nkeys/2] - # deleting data for key after current (key $nkeys/2 + 1) - error_check_good db_del [eval {$db del} $txn {$keys([expr $i + 1])}] 0 - - # current should be constant - set ret [$dbc get -current] - error_check_bad dbc:get:current [llength $ret] 0 - error_check_good dbc:get:keys [lindex [lindex $ret 0] 0] \ - $keys($i) - error_check_good dbc:get:data [lindex [lindex $ret 0] 1] \ - $darray([expr $i + 2]) - - puts "\tTest052: Deletes by cursor." - puts "\tTest052.d: Delete, do DB_NEXT." - set i 1 - set ret [$dbc get -first] - error_check_bad dbc_get:first [llength $ret] 0 - error_check_good dbc_get:first [lindex [lindex $ret 0] 1] $darray($i) - error_check_good dbc_del [$dbc del] 0 - set ret [$dbc get -current] - error_check_good dbc_get:current [llength $ret] 0 - - set ret [$dbc get -next] - error_check_bad dbc_get:next [llength $ret] 0 - error_check_good dbc:get:curs \ - [lindex [lindex $ret 0] 1] $darray([expr $i + 1]) - error_check_good dbc:get:keys \ - [lindex [lindex $ret 0] 0] $keys($i) - - # Move one more forward, so we're not on the first item. - error_check_bad dbc:getnext [llength [$dbc get -next]] 0 - - puts "\tTest052.e: Delete, do DB_PREV." - error_check_good dbc:del [$dbc del] 0 - set ret [$dbc get -current] - error_check_good dbc:get:curr [llength $ret] 0 - - # next should now reference the record that was previously after - # old current - set ret [$dbc get -next] - error_check_bad get:next [llength $ret] 0 - error_check_good dbc:get:next:data \ - [lindex [lindex $ret 0] 1] $darray([expr $i + 3]) - error_check_good dbc:get:next:keys \ - [lindex [lindex $ret 0] 0] $keys([expr $i + 1]) - - - set ret [$dbc get -prev] - error_check_bad dbc:get:curr [llength $ret] 0 - error_check_good dbc:get:curr:compare \ - [lindex [lindex $ret 0] 1] $darray([expr $i + 1]) - error_check_good dbc:get:curr:keys \ - [lindex [lindex $ret 0] 0] $keys($i) - - # The rest of the test was written with the old rrecno semantics, - # which required a separate c_del(CURRENT) test; to leave - # the database in the expected state, we now delete the first item. - set ret [$dbc get -first] - error_check_bad getfirst [llength $ret] 0 - error_check_good delfirst [$dbc del] 0 - - puts "\tTest052: Inserts." - puts "\tTest052.g: Insert before (DB_BEFORE)." - set i 1 - set ret [$dbc get -first] - error_check_bad dbc:get:first [llength $ret] 0 - error_check_good dbc_get:first \ - [lindex [lindex $ret 0] 0] $keys($i) - error_check_good dbc_get:first:data \ - [lindex [lindex $ret 0] 1] $darray([expr $i + 3]) - - set ret [$dbc put -before $darray($i)] - # should return new key, which should be $keys($i) - error_check_good dbc_put:before $ret $keys($i) - # cursor should adjust to point to new item - set ret [$dbc get -current] - error_check_bad dbc_get:curr [llength $ret] 0 - error_check_good dbc_put:before:keys \ - [lindex [lindex $ret 0] 0] $keys($i) - error_check_good dbc_put:before:data \ - [lindex [lindex $ret 0] 1] $darray($i) - - set ret [$dbc get -next] - error_check_bad dbc_get:next [llength $ret] 0 - error_check_good dbc_get:next:compare \ - $ret [list [list $keys([expr $i + 1]) $darray([expr $i + 3])]] - set ret [$dbc get -prev] - error_check_bad dbc_get:prev [llength $ret] 0 - - puts "\tTest052.h: Insert by cursor after (DB_AFTER)." - set i [incr i] - set ret [$dbc put -after $darray($i)] - # should return new key, which should be $keys($i) - error_check_good dbcput:after $ret $keys($i) - # cursor should reference new item - set ret [$dbc get -current] - error_check_good dbc:get:current:keys \ - [lindex [lindex $ret 0] 0] $keys($i) - error_check_good dbc:get:current:data \ - [lindex [lindex $ret 0] 1] $darray($i) - - # items after curs should be adjusted - set ret [$dbc get -next] - error_check_bad dbc:get:next [llength $ret] 0 - error_check_good dbc:get:next:compare \ - $ret [list [list $keys([expr $i + 1]) $darray([expr $i + 2])]] - - puts "\tTest052.i: Insert (overwrite) current item (DB_CURRENT)." - set i 1 - set ret [$dbc get -first] - error_check_bad dbc_get:first [llength $ret] 0 - # choose a datum that is not currently in db - set ret [$dbc put -current $darray([expr $i + 2])] - error_check_good dbc_put:curr $ret 0 - # curs should be on new item - set ret [$dbc get -current] - error_check_bad dbc_get:current [llength $ret] 0 - error_check_good dbc_get:curr:keys \ - [lindex [lindex $ret 0] 0] $keys($i) - error_check_good dbc_get:curr:data \ - [lindex [lindex $ret 0] 1] $darray([expr $i + 2]) - - set ret [$dbc get -next] - error_check_bad dbc_get:next [llength $ret] 0 - set i [incr i] - error_check_good dbc_get:next \ - $ret [list [list $keys($i) $darray($i)]] - - error_check_good dbc_close [$dbc close] 0 - if { $txnenv == 1 } { - error_check_good txn [$t commit] 0 - } - error_check_good db_close [$db close] 0 - - puts "\tTest052 complete." -} diff --git a/storage/bdb/test/test053.tcl b/storage/bdb/test/test053.tcl deleted file mode 100644 index 5c5e060ec48..00000000000 --- a/storage/bdb/test/test053.tcl +++ /dev/null @@ -1,231 +0,0 @@ -# See the file LICENSE for redistribution information. -# -# Copyright (c) 1999-2004 -# Sleepycat Software. All rights reserved. -# -# $Id: test053.tcl,v 11.21 2004/05/13 18:51:43 mjc Exp $ -# -# TEST test053 -# TEST Test of the DB_REVSPLITOFF flag in the Btree and Btree-w-recnum -# TEST methods. -proc test053 { method args } { - global alphabet - global errorCode - global is_je_test - source ./include.tcl - - set args [convert_args $method $args] - set omethod [convert_method $method] - - puts "\tTest053: Test of cursor stability across btree splits." - if { [is_btree $method] != 1 && [is_rbtree $method] != 1 } { - puts "Test053: skipping for method $method." - return - } - - set pgindex [lsearch -exact $args "-pagesize"] - if { $pgindex != -1 } { - puts "Test053: skipping for specific pagesizes" - return - } - - set txn "" - set flags "" - - puts "\tTest053.a: Create $omethod $args database." - set txnenv 0 - set eindex [lsearch -exact $args "-env"] - # - # If we are using an env, then testfile should just be the db name. - # Otherwise it is the test directory and the name. - if { $eindex == -1 } { - set testfile $testdir/test053.db - set env NULL - } else { - set testfile test053.db - incr eindex - set env [lindex $args $eindex] - set txnenv [is_txnenv $env] - if { $txnenv == 1 } { - append args " -auto_commit " - } - set testdir [get_home $env] - } - set t1 $testdir/t1 - cleanup $testdir $env - - set oflags \ - "-create -revsplitoff -pagesize 1024 $args $omethod" - set db [eval {berkdb_open} $oflags $testfile] - error_check_good dbopen [is_valid_db $db] TRUE - - set nkeys 8 - set npages 15 - - # We want to create a db with npages leaf pages, and have each page - # be near full with keys that we can predict. We set pagesize above - # to 1024 bytes, it should breakdown as follows (per page): - # - # ~20 bytes overhead - # key: ~4 bytes overhead, XXX0N where X is a letter, N is 0-9 - # data: ~4 bytes overhead, + 100 bytes - # - # then, with 8 keys/page we should be just under 1024 bytes - puts "\tTest053.b: Create $npages pages with $nkeys pairs on each." - set keystring [string range $alphabet 0 [expr $npages -1]] - set data [repeat DATA 22] - for { set i 0 } { $i < $npages } {incr i } { - set key "" - set keyroot \ - [repeat [string toupper [string range $keystring $i $i]] 3] - set key_set($i) $keyroot - for {set j 0} { $j < $nkeys} {incr j} { - if { $j < 10 } { - set key [set keyroot]0$j - } else { - set key $keyroot$j - } - if { $txnenv == 1 } { - set t [$env txn] - error_check_good txn [is_valid_txn $t $env] TRUE - set txn "-txn $t" - } - set ret [eval {$db put} $txn {$key $data}] - error_check_good dbput $ret 0 - if { $txnenv == 1 } { - error_check_good txn [$t commit] 0 - } - } - } - - if { !$is_je_test } { - puts "\tTest053.c: Check page count." - error_check_good page_count:check \ - [is_substr [$db stat] "{Leaf pages} $npages"] 1 - } - - puts "\tTest053.d: Delete all but one key per page." - for {set i 0} { $i < $npages } {incr i } { - for {set j 1} { $j < $nkeys } {incr j } { - if { $txnenv == 1 } { - set t [$env txn] - error_check_good txn [is_valid_txn $t $env] TRUE - set txn "-txn $t" - } - set ret [eval {$db del} $txn {$key_set($i)0$j}] - error_check_good dbdel $ret 0 - if { $txnenv == 1 } { - error_check_good txn [$t commit] 0 - } - } - } - - if { !$is_je_test } { - puts "\tTest053.e: Check to make sure all pages are still there." - error_check_good page_count:check \ - [is_substr [$db stat] "{Leaf pages} $npages"] 1 - } - - if { $txnenv == 1 } { - set t [$env txn] - error_check_good txn [is_valid_txn $t $env] TRUE - set txn "-txn $t" - } - set dbc [eval {$db cursor} $txn] - error_check_good db:cursor [is_valid_cursor $dbc $db] TRUE - - # walk cursor through tree forward, backward. - # delete one key, repeat - for {set i 0} { $i < $npages} {incr i} { - puts -nonewline \ - "\tTest053.f.$i: Walk curs through tree: forward..." - for { set j $i; set curr [$dbc get -first]} { $j < $npages} { \ - incr j; set curr [$dbc get -next]} { - error_check_bad dbc:get:next [llength $curr] 0 - error_check_good dbc:get:keys \ - [lindex [lindex $curr 0] 0] $key_set($j)00 - } - puts -nonewline "backward..." - for { set j [expr $npages - 1]; set curr [$dbc get -last]} { \ - $j >= $i } { \ - set j [incr j -1]; set curr [$dbc get -prev]} { - error_check_bad dbc:get:prev [llength $curr] 0 - error_check_good dbc:get:keys \ - [lindex [lindex $curr 0] 0] $key_set($j)00 - } - puts "complete." - - if { [is_rbtree $method] == 1} { - puts "\t\tTest053.f.$i:\ - Walk through tree with record numbers." - for {set j 1} {$j <= [expr $npages - $i]} {incr j} { - set curr [eval {$db get} $txn {-recno $j}] - error_check_bad \ - db_get:recno:$j [llength $curr] 0 - error_check_good db_get:recno:keys:$j \ - [lindex [lindex $curr 0] 0] \ - $key_set([expr $j + $i - 1])00 - } - } - puts "\tTest053.g.$i:\ - Delete single key ([expr $npages - $i] keys left)." - set ret [eval {$db del} $txn {$key_set($i)00}] - error_check_good dbdel $ret 0 - error_check_good del:check \ - [llength [eval {$db get} $txn {$key_set($i)00}]] 0 - } - - # end for loop, verify db_notfound - set ret [$dbc get -first] - error_check_good dbc:get:verify [llength $ret] 0 - - # loop: until single key restored on each page - for {set i 0} { $i < $npages} {incr i} { - puts "\tTest053.i.$i:\ - Restore single key ([expr $i + 1] keys in tree)." - set ret [eval {$db put} $txn {$key_set($i)00 $data}] - error_check_good dbput $ret 0 - - puts -nonewline \ - "\tTest053.j: Walk cursor through tree: forward..." - for { set j 0; set curr [$dbc get -first]} { $j <= $i} {\ - incr j; set curr [$dbc get -next]} { - error_check_bad dbc:get:next [llength $curr] 0 - error_check_good dbc:get:keys \ - [lindex [lindex $curr 0] 0] $key_set($j)00 - } - error_check_good dbc:get:next [llength $curr] 0 - - puts -nonewline "backward..." - for { set j $i; set curr [$dbc get -last]} { \ - $j >= 0 } { \ - set j [incr j -1]; set curr [$dbc get -prev]} { - error_check_bad dbc:get:prev [llength $curr] 0 - error_check_good dbc:get:keys \ - [lindex [lindex $curr 0] 0] $key_set($j)00 - } - puts "complete." - error_check_good dbc:get:prev [llength $curr] 0 - - if { [is_rbtree $method] == 1} { - puts "\t\tTest053.k.$i:\ - Walk through tree with record numbers." - for {set j 1} {$j <= [expr $i + 1]} {incr j} { - set curr [eval {$db get} $txn {-recno $j}] - error_check_bad \ - db_get:recno:$j [llength $curr] 0 - error_check_good db_get:recno:keys:$j \ - [lindex [lindex $curr 0] 0] \ - $key_set([expr $j - 1])00 - } - } - } - - error_check_good dbc_close [$dbc close] 0 - if { $txnenv == 1 } { - error_check_good txn [$t commit] 0 - } - error_check_good db_close [$db close] 0 - - puts "Test053 complete." -} diff --git a/storage/bdb/test/test054.tcl b/storage/bdb/test/test054.tcl deleted file mode 100644 index 44d0335f469..00000000000 --- a/storage/bdb/test/test054.tcl +++ /dev/null @@ -1,460 +0,0 @@ -# See the file LICENSE for redistribution information. -# -# Copyright (c) 1996-2004 -# Sleepycat Software. All rights reserved. -# -# $Id: test054.tcl,v 11.26 2004/09/20 17:06:16 sue Exp $ -# -# TEST test054 -# TEST Cursor maintenance during key/data deletion. -# TEST -# TEST This test checks for cursor maintenance in the presence of deletes. -# TEST There are N different scenarios to tests: -# TEST 1. No duplicates. Cursor A deletes a key, do a GET for the key. -# TEST 2. No duplicates. Cursor is positioned right before key K, Delete K, -# TEST do a next on the cursor. -# TEST 3. No duplicates. Cursor is positioned on key K, do a regular delete -# TEST of K, do a current get on K. -# TEST 4. Repeat 3 but do a next instead of current. -# TEST 5. Duplicates. Cursor A is on the first item of a duplicate set, A -# TEST does a delete. Then we do a non-cursor get. -# TEST 6. Duplicates. Cursor A is in a duplicate set and deletes the item. -# TEST do a delete of the entire Key. Test cursor current. -# TEST 7. Continue last test and try cursor next. -# TEST 8. Duplicates. Cursor A is in a duplicate set and deletes the item. -# TEST Cursor B is in the same duplicate set and deletes a different item. -# TEST Verify that the cursor is in the right place. -# TEST 9. Cursors A and B are in the place in the same duplicate set. A -# TEST deletes its item. Do current on B. -# TEST 10. Continue 8 and do a next on B. -proc test054 { method args } { - global errorInfo - source ./include.tcl - - set args [convert_args $method $args] - set omethod [convert_method $method] - - append args " -create -mode 0644" - puts "Test054 ($method $args):\ - interspersed cursor and normal operations" - if { [is_record_based $method] == 1 } { - puts "Test054 skipping for method $method" - return - } - - # Find the environment in the argument list, we'll need it - # later. - set txnenv 0 - set eindex [lsearch -exact $args "-env"] - if { $eindex != -1 } { - incr eindex - } - - # Create the database and open the dictionary - # - # If we are using an env, then testfile should just be the db name. - # Otherwise it is the test directory and the name. - if { $eindex == -1 } { - set testfile $testdir/test054-nodup.db - set env NULL - } else { - set testfile test054-nodup.db - set env [lindex $args $eindex] - set txnenv [is_txnenv $env] - if { $txnenv == 1 } { - append args " -auto_commit " - } - set testdir [get_home $env] - } - cleanup $testdir $env - - set flags "" - set txn "" - - puts "\tTest054.a: No Duplicate Tests" - set db [eval {berkdb_open} $args {$omethod $testfile}] - error_check_good db_open:nodup [is_valid_db $db] TRUE - - # Put three keys in the database - for { set key 1 } { $key <= 3 } {incr key} { - if { $txnenv == 1 } { - set t [$env txn] - error_check_good txn [is_valid_txn $t $env] TRUE - set txn "-txn $t" - } - set r [eval {$db put} $txn $flags {$key datum$key}] - error_check_good put $r 0 - if { $txnenv == 1 } { - error_check_good txn [$t commit] 0 - } - } - - if { $txnenv == 1 } { - set t [$env txn] - error_check_good txn [is_valid_txn $t $env] TRUE - set txn "-txn $t" - } - set curs [eval {$db cursor} $txn] - error_check_good curs_open:nodup [is_valid_cursor $curs $db] TRUE - - # Retrieve keys sequentially so we can figure out their order - set i 1 - for {set d [$curs get -first] } \ - {[llength $d] != 0 } \ - {set d [$curs get -next] } { - set key_set($i) [lindex [lindex $d 0] 0] - incr i - } - - # Test case #1. - puts "\tTest054.a1: Delete w/cursor, regular get" - - # Now set the cursor on the middle on. - set r [$curs get -set $key_set(2)] - error_check_bad cursor_get:DB_SET [llength $r] 0 - set k [lindex [lindex $r 0] 0] - set d [lindex [lindex $r 0] 1] - error_check_good curs_get:DB_SET:key $k $key_set(2) - error_check_good curs_get:DB_SET:data $d datum$key_set(2) - - # Now do the delete - set r [$curs del] - error_check_good curs_del $r 0 - - # Now do the get - set r [eval {$db get} $txn {$key_set(2)}] - error_check_good get_after_del [llength $r] 0 - - # Free up the cursor. - error_check_good cursor_close [eval {$curs close}] 0 - if { $txnenv == 1 } { - error_check_good txn [$t commit] 0 - } - - # Test case #2. - puts "\tTest054.a2: Cursor before K, delete K, cursor next" - - # Replace key 2 - if { $txnenv == 1 } { - set t [$env txn] - error_check_good txn [is_valid_txn $t $env] TRUE - set txn "-txn $t" - } - set r [eval {$db put} $txn {$key_set(2) datum$key_set(2)}] - error_check_good put $r 0 - if { $txnenv == 1 } { - error_check_good txn [$t commit] 0 - } - - # Open and position cursor on first item. - if { $txnenv == 1 } { - set t [$env txn] - error_check_good txn [is_valid_txn $t $env] TRUE - set txn "-txn $t" - } - set curs [eval {$db cursor} $txn] - error_check_good curs_open:nodup [is_valid_cursor $curs $db] TRUE - - # Retrieve keys sequentially so we can figure out their order - set i 1 - for {set d [eval {$curs get} -first] } \ - {[llength $d] != 0 } \ - {set d [$curs get -nextdup] } { - set key_set($i) [lindex [lindex $d 0] 0] - incr i - } - - set r [eval {$curs get} -set {$key_set(1)} ] - error_check_bad cursor_get:DB_SET [llength $r] 0 - set k [lindex [lindex $r 0] 0] - set d [lindex [lindex $r 0] 1] - error_check_good curs_get:DB_SET:key $k $key_set(1) - error_check_good curs_get:DB_SET:data $d datum$key_set(1) - - # Now delete (next item) $key_set(2) - error_check_good \ - db_del:$key_set(2) [eval {$db del} $txn {$key_set(2)}] 0 - - # Now do next on cursor - set r [$curs get -next] - error_check_bad cursor_get:DB_NEXT [llength $r] 0 - set k [lindex [lindex $r 0] 0] - set d [lindex [lindex $r 0] 1] - error_check_good curs_get:DB_NEXT:key $k $key_set(3) - error_check_good curs_get:DB_NEXT:data $d datum$key_set(3) - - # Test case #3. - puts "\tTest054.a3: Cursor on K, delete K, cursor current" - - # delete item 3 - error_check_good \ - db_del:$key_set(3) [eval {$db del} $txn {$key_set(3)}] 0 - # NEEDS TO COME BACK IN, BUG CHECK - set ret [$curs get -current] - error_check_good current_after_del $ret "" - error_check_good cursor_close [$curs close] 0 - if { $txnenv == 1 } { - error_check_good txn [$t commit] 0 - } - - puts "\tTest054.a4: Cursor on K, delete K, cursor next" - - # Restore keys 2 and 3 - if { $txnenv == 1 } { - set t [$env txn] - error_check_good txn [is_valid_txn $t $env] TRUE - set txn "-txn $t" - } - set r [eval {$db put} $txn {$key_set(2) datum$key_set(2)}] - error_check_good put $r 0 - set r [eval {$db put} $txn {$key_set(3) datum$key_set(3)}] - error_check_good put $r 0 - if { $txnenv == 1 } { - error_check_good txn [$t commit] 0 - } - - if { $txnenv == 1 } { - set t [$env txn] - error_check_good txn [is_valid_txn $t $env] TRUE - set txn "-txn $t" - } - # Create the new cursor and put it on 1 - set curs [eval {$db cursor} $txn] - error_check_good curs_open:nodup [is_valid_cursor $curs $db] TRUE - set r [$curs get -set $key_set(1)] - error_check_bad cursor_get:DB_SET [llength $r] 0 - set k [lindex [lindex $r 0] 0] - set d [lindex [lindex $r 0] 1] - error_check_good curs_get:DB_SET:key $k $key_set(1) - error_check_good curs_get:DB_SET:data $d datum$key_set(1) - - # Delete 2 - error_check_good \ - db_del:$key_set(2) [eval {$db del} $txn {$key_set(2)}] 0 - - # Now do next on cursor - set r [$curs get -next] - error_check_bad cursor_get:DB_NEXT [llength $r] 0 - set k [lindex [lindex $r 0] 0] - set d [lindex [lindex $r 0] 1] - error_check_good curs_get:DB_NEXT:key $k $key_set(3) - error_check_good curs_get:DB_NEXT:data $d datum$key_set(3) - - # Close cursor - error_check_good curs_close [$curs close] 0 - if { $txnenv == 1 } { - error_check_good txn [$t commit] 0 - } - error_check_good db_close [$db close] 0 - - # Now get ready for duplicate tests - - if { [is_rbtree $method] == 1 } { - puts "Test054: skipping remainder of test for method $method." - return - } - - puts "\tTest054.b: Duplicate Tests" - append args " -dup" - - # Open a new database for the dup tests so -truncate is not needed. - # If we are using an env, then testfile should just be the db name. - # Otherwise it is the test directory and the name. - if { $eindex == -1 } { - set testfile $testdir/test054-dup.db - set env NULL - } else { - set testfile test054-dup.db - set env [lindex $args $eindex] - set testdir [get_home $env] - } - cleanup $testdir $env - - set flags "" - set txn "" - - set db [eval {berkdb_open} $args {$omethod $testfile}] - error_check_good db_open:dup [is_valid_db $db] TRUE - - # Put three keys in the database - for { set key 1 } { $key <= 3 } {incr key} { - if { $txnenv == 1 } { - set t [$env txn] - error_check_good txn [is_valid_txn $t $env] TRUE - set txn "-txn $t" - } - set r [eval {$db put} $txn $flags {$key datum$key}] - error_check_good put $r 0 - if { $txnenv == 1 } { - error_check_good txn [$t commit] 0 - } - } - - # Retrieve keys sequentially so we can figure out their order - if { $txnenv == 1 } { - set t [$env txn] - error_check_good txn [is_valid_txn $t $env] TRUE - set txn "-txn $t" - } - set curs [eval {$db cursor} $txn] - error_check_good curs_open:dup [is_valid_cursor $curs $db] TRUE - - set i 1 - for {set d [$curs get -first] } \ - {[llength $d] != 0 } \ - {set d [$curs get -nextdup] } { - set key_set($i) [lindex [lindex $d 0] 0] - incr i - } - - # Now put in a bunch of duplicates for key 2 - for { set d 1 } { $d <= 5 } {incr d} { - set r [eval {$db put} $txn $flags {$key_set(2) dup_$d}] - error_check_good dup:put $r 0 - } - - # Test case #5. - puts "\tTest054.b1: Delete dup w/cursor on first item. Get on key." - - # Now set the cursor on the first of the duplicate set. - set r [eval {$curs get} -set {$key_set(2)}] - error_check_bad cursor_get:DB_SET [llength $r] 0 - set k [lindex [lindex $r 0] 0] - set d [lindex [lindex $r 0] 1] - error_check_good curs_get:DB_SET:key $k $key_set(2) - error_check_good curs_get:DB_SET:data $d datum$key_set(2) - - # Now do the delete - set r [$curs del] - error_check_good curs_del $r 0 - - # Now do the get - set r [eval {$db get} $txn {$key_set(2)}] - error_check_good get_after_del [lindex [lindex $r 0] 1] dup_1 - - # Test case #6. - puts "\tTest054.b2: Now get the next duplicate from the cursor." - - # Now do next on cursor - set r [$curs get -nextdup] - error_check_bad cursor_get:DB_NEXT [llength $r] 0 - set k [lindex [lindex $r 0] 0] - set d [lindex [lindex $r 0] 1] - error_check_good curs_get:DB_NEXT:key $k $key_set(2) - error_check_good curs_get:DB_NEXT:data $d dup_1 - - # Test case #3. - puts "\tTest054.b3: Two cursors in set; each delete different items" - - # Open a new cursor. - set curs2 [eval {$db cursor} $txn] - error_check_good curs_open [is_valid_cursor $curs2 $db] TRUE - - # Set on last of duplicate set. - set r [$curs2 get -set $key_set(3)] - error_check_bad cursor_get:DB_SET [llength $r] 0 - set k [lindex [lindex $r 0] 0] - set d [lindex [lindex $r 0] 1] - error_check_good curs_get:DB_SET:key $k $key_set(3) - error_check_good curs_get:DB_SET:data $d datum$key_set(3) - - set r [$curs2 get -prev] - error_check_bad cursor_get:DB_PREV [llength $r] 0 - set k [lindex [lindex $r 0] 0] - set d [lindex [lindex $r 0] 1] - error_check_good curs_get:DB_PREV:key $k $key_set(2) - error_check_good curs_get:DB_PREV:data $d dup_5 - - # Delete the item at cursor 1 (dup_1) - error_check_good curs1_del [$curs del] 0 - - # Verify curs1 and curs2 - # current should fail - set ret [$curs get -current] - error_check_good curs1_get_after_del $ret "" - - set r [$curs2 get -current] - error_check_bad curs2_get [llength $r] 0 - set k [lindex [lindex $r 0] 0] - set d [lindex [lindex $r 0] 1] - error_check_good curs_get:DB_CURRENT:key $k $key_set(2) - error_check_good curs_get:DB_CURRENT:data $d dup_5 - - # Now delete the item at cursor 2 (dup_5) - error_check_good curs2_del [$curs2 del] 0 - - # Verify curs1 and curs2 - set ret [$curs get -current] - error_check_good curs1_get:del2 $ret "" - - set ret [$curs2 get -current] - error_check_good curs2_get:del2 $ret "" - - # Now verify that next and prev work. - - set r [$curs2 get -prev] - error_check_bad cursor_get:DB_PREV [llength $r] 0 - set k [lindex [lindex $r 0] 0] - set d [lindex [lindex $r 0] 1] - error_check_good curs_get:DB_PREV:key $k $key_set(2) - error_check_good curs_get:DB_PREV:data $d dup_4 - - set r [$curs get -next] - error_check_bad cursor_get:DB_NEXT [llength $r] 0 - set k [lindex [lindex $r 0] 0] - set d [lindex [lindex $r 0] 1] - error_check_good curs_get:DB_NEXT:key $k $key_set(2) - error_check_good curs_get:DB_NEXT:data $d dup_2 - - puts "\tTest054.b4: Two cursors same item, one delete, one get" - - # Move curs2 onto dup_2 - set r [$curs2 get -prev] - error_check_bad cursor_get:DB_PREV [llength $r] 0 - set k [lindex [lindex $r 0] 0] - set d [lindex [lindex $r 0] 1] - error_check_good curs_get:DB_PREV:key $k $key_set(2) - error_check_good curs_get:DB_PREV:data $d dup_3 - - set r [$curs2 get -prev] - error_check_bad cursor_get:DB_PREV [llength $r] 0 - set k [lindex [lindex $r 0] 0] - set d [lindex [lindex $r 0] 1] - error_check_good curs_get:DB_PREV:key $k $key_set(2) - error_check_good curs_get:DB_PREV:data $d dup_2 - - # delete on curs 1 - error_check_good curs1_del [$curs del] 0 - - # Verify gets on both 1 and 2 - set ret [$curs get -current] - error_check_good \ - curs1_get:deleted $ret "" - set ret [$curs2 get -current] - error_check_good \ - curs2_get:deleted $ret "" - - puts "\tTest054.b5: Now do a next on both cursors" - - set r [$curs get -next] - error_check_bad cursor_get:DB_NEXT [llength $r] 0 - set k [lindex [lindex $r 0] 0] - set d [lindex [lindex $r 0] 1] - error_check_good curs_get:DB_NEXT:key $k $key_set(2) - error_check_good curs_get:DB_NEXT:data $d dup_3 - - set r [$curs2 get -next] - error_check_bad cursor_get:DB_NEXT [llength $r] 0 - set k [lindex [lindex $r 0] 0] - set d [lindex [lindex $r 0] 1] - error_check_good curs_get:DB_NEXT:key $k $key_set(2) - error_check_good curs_get:DB_NEXT:data $d dup_3 - - # Close cursor - error_check_good curs_close [$curs close] 0 - error_check_good curs2_close [$curs2 close] 0 - if { $txnenv == 1 } { - error_check_good txn [$t commit] 0 - } - error_check_good db_close [$db close] 0 -} diff --git a/storage/bdb/test/test055.tcl b/storage/bdb/test/test055.tcl deleted file mode 100644 index 96bf108c308..00000000000 --- a/storage/bdb/test/test055.tcl +++ /dev/null @@ -1,141 +0,0 @@ -# See the file LICENSE for redistribution information. -# -# Copyright (c) 1996-2004 -# Sleepycat Software. All rights reserved. -# -# $Id: test055.tcl,v 11.18 2004/01/28 03:36:31 bostic Exp $ -# -# TEST test055 -# TEST Basic cursor operations. -# TEST This test checks basic cursor operations. -# TEST There are N different scenarios to tests: -# TEST 1. (no dups) Set cursor, retrieve current. -# TEST 2. (no dups) Set cursor, retrieve next. -# TEST 3. (no dups) Set cursor, retrieve prev. -proc test055 { method args } { - global errorInfo - source ./include.tcl - - set args [convert_args $method $args] - set omethod [convert_method $method] - - puts "Test055: $method interspersed cursor and normal operations" - - # Create the database and open the dictionary - set txnenv 0 - set eindex [lsearch -exact $args "-env"] - # - # If we are using an env, then testfile should just be the db name. - # Otherwise it is the test directory and the name. - if { $eindex == -1 } { - set testfile $testdir/test055.db - set env NULL - } else { - set testfile test055.db - incr eindex - set env [lindex $args $eindex] - set txnenv [is_txnenv $env] - if { $txnenv == 1 } { - append args " -auto_commit " - } - set testdir [get_home $env] - } - cleanup $testdir $env - - set flags "" - set txn "" - - puts "\tTest055.a: No duplicates" - set db [eval {berkdb_open -create -mode 0644 $omethod } \ - $args {$testfile}] - error_check_good db_open:nodup [is_valid_db $db] TRUE - - # Put three keys in the database - for { set key 1 } { $key <= 3 } {incr key} { - if { $txnenv == 1 } { - set t [$env txn] - error_check_good txn [is_valid_txn $t $env] TRUE - set txn "-txn $t" - } - set r [eval {$db put} $txn $flags {$key datum$key}] - error_check_good put $r 0 - if { $txnenv == 1 } { - error_check_good txn [$t commit] 0 - } - } - - # Retrieve keys sequentially so we can figure out their order - set i 1 - if { $txnenv == 1 } { - set t [$env txn] - error_check_good txn [is_valid_txn $t $env] TRUE - set txn "-txn $t" - } - set curs [eval {$db cursor} $txn] - error_check_good curs_open:nodup [is_valid_cursor $curs $db] TRUE - - for {set d [$curs get -first] } { [llength $d] != 0 } {\ - set d [$curs get -next] } { - set key_set($i) [lindex [lindex $d 0] 0] - incr i - } - - # Test case #1. - puts "\tTest055.a1: Set cursor, retrieve current" - - # Now set the cursor on the middle on. - set r [$curs get -set $key_set(2)] - error_check_bad cursor_get:DB_SET [llength $r] 0 - set k [lindex [lindex $r 0] 0] - set d [lindex [lindex $r 0] 1] - error_check_good curs_get:DB_SET:key $k $key_set(2) - error_check_good \ - curs_get:DB_SET:data $d [pad_data $method datum$key_set(2)] - - # Now retrieve current - set r [$curs get -current] - error_check_bad cursor_get:DB_CURRENT [llength $r] 0 - set k [lindex [lindex $r 0] 0] - set d [lindex [lindex $r 0] 1] - error_check_good curs_get:DB_CURRENT:key $k $key_set(2) - error_check_good \ - curs_get:DB_CURRENT:data $d [pad_data $method datum$key_set(2)] - - # Test case #2. - puts "\tTest055.a2: Set cursor, retrieve previous" - set r [$curs get -prev] - error_check_bad cursor_get:DB_PREV [llength $r] 0 - set k [lindex [lindex $r 0] 0] - set d [lindex [lindex $r 0] 1] - error_check_good curs_get:DB_PREV:key $k $key_set(1) - error_check_good \ - curs_get:DB_PREV:data $d [pad_data $method datum$key_set(1)] - - # Test case #3. - puts "\tTest055.a2: Set cursor, retrieve next" - - # Now set the cursor on the middle one. - set r [$curs get -set $key_set(2)] - error_check_bad cursor_get:DB_SET [llength $r] 0 - set k [lindex [lindex $r 0] 0] - set d [lindex [lindex $r 0] 1] - error_check_good curs_get:DB_SET:key $k $key_set(2) - error_check_good \ - curs_get:DB_SET:data $d [pad_data $method datum$key_set(2)] - - # Now retrieve next - set r [$curs get -next] - error_check_bad cursor_get:DB_NEXT [llength $r] 0 - set k [lindex [lindex $r 0] 0] - set d [lindex [lindex $r 0] 1] - error_check_good curs_get:DB_NEXT:key $k $key_set(3) - error_check_good \ - curs_get:DB_NEXT:data $d [pad_data $method datum$key_set(3)] - - # Close cursor and database. - error_check_good curs_close [$curs close] 0 - if { $txnenv == 1 } { - error_check_good txn [$t commit] 0 - } - error_check_good db_close [$db close] 0 -} diff --git a/storage/bdb/test/test056.tcl b/storage/bdb/test/test056.tcl deleted file mode 100644 index f689d659230..00000000000 --- a/storage/bdb/test/test056.tcl +++ /dev/null @@ -1,169 +0,0 @@ -# See the file LICENSE for redistribution information. -# -# Copyright (c) 1996-2004 -# Sleepycat Software. All rights reserved. -# -# $Id: test056.tcl,v 11.21 2004/09/20 17:06:16 sue Exp $ -# -# TEST test056 -# TEST Cursor maintenance during deletes. -# TEST Check if deleting a key when a cursor is on a duplicate of that -# TEST key works. -proc test056 { method args } { - global errorInfo - source ./include.tcl - - set args [convert_args $method $args] - set omethod [convert_method $method] - - append args " -create -mode 0644 -dup " - if { [is_record_based $method] == 1 || [is_rbtree $method] } { - puts "Test056: skipping for method $method" - return - } - puts "Test056: $method delete of key in presence of cursor" - - # Create the database and open the dictionary - set txnenv 0 - set eindex [lsearch -exact $args "-env"] - # - # If we are using an env, then testfile should just be the db name. - # Otherwise it is the test directory and the name. - if { $eindex == -1 } { - set testfile $testdir/test056.db - set env NULL - } else { - set testfile test056.db - incr eindex - set env [lindex $args $eindex] - set txnenv [is_txnenv $env] - if { $txnenv == 1 } { - append args " -auto_commit " - } - set testdir [get_home $env] - } - cleanup $testdir $env - - set flags "" - set txn "" - - set db [eval {berkdb_open} $args {$omethod $testfile}] - error_check_good db_open:dup [is_valid_db $db] TRUE - - puts "\tTest056.a: Key delete with cursor on duplicate." - # Put three keys in the database - for { set key 1 } { $key <= 3 } {incr key} { - if { $txnenv == 1 } { - set t [$env txn] - error_check_good txn [is_valid_txn $t $env] TRUE - set txn "-txn $t" - } - set r [eval {$db put} $txn $flags {$key datum$key}] - error_check_good put $r 0 - if { $txnenv == 1 } { - error_check_good txn [$t commit] 0 - } - } - - # Retrieve keys sequentially so we can figure out their order - set i 1 - if { $txnenv == 1 } { - set t [$env txn] - error_check_good txn [is_valid_txn $t $env] TRUE - set txn "-txn $t" - } - set curs [eval {$db cursor} $txn] - error_check_good curs_open:dup [is_valid_cursor $curs $db] TRUE - - for {set d [$curs get -first] } { [llength $d] != 0 } { - set d [$curs get -next] } { - set key_set($i) [lindex [lindex $d 0] 0] - incr i - } - - # Now put in a bunch of duplicates for key 2 - for { set d 1 } { $d <= 5 } {incr d} { - set r [eval {$db put} $txn $flags {$key_set(2) dup_$d}] - error_check_good dup:put $r 0 - } - - # Now put the cursor on a duplicate of key 2 - - # Now set the cursor on the first of the duplicate set. - set r [$curs get -set $key_set(2)] - error_check_bad cursor_get:DB_SET [llength $r] 0 - set k [lindex [lindex $r 0] 0] - set d [lindex [lindex $r 0] 1] - error_check_good curs_get:DB_SET:key $k $key_set(2) - error_check_good curs_get:DB_SET:data $d datum$key_set(2) - - # Now do two nexts - set r [$curs get -next] - error_check_bad cursor_get:DB_NEXT [llength $r] 0 - set k [lindex [lindex $r 0] 0] - set d [lindex [lindex $r 0] 1] - error_check_good curs_get:DB_NEXT:key $k $key_set(2) - error_check_good curs_get:DB_NEXT:data $d dup_1 - - set r [$curs get -next] - error_check_bad cursor_get:DB_NEXT [llength $r] 0 - set k [lindex [lindex $r 0] 0] - set d [lindex [lindex $r 0] 1] - error_check_good curs_get:DB_NEXT:key $k $key_set(2) - error_check_good curs_get:DB_NEXT:data $d dup_2 - - # Now do the delete - set r [eval {$db del} $txn $flags {$key_set(2)}] - error_check_good delete $r 0 - - # Now check the get current on the cursor. - set ret [$curs get -current] - error_check_good curs_after_del $ret "" - - # Now check that the rest of the database looks intact. There - # should be only two keys, 1 and 3. - - set r [$curs get -first] - error_check_bad cursor_get:DB_FIRST [llength $r] 0 - set k [lindex [lindex $r 0] 0] - set d [lindex [lindex $r 0] 1] - error_check_good curs_get:DB_FIRST:key $k $key_set(1) - error_check_good curs_get:DB_FIRST:data $d datum$key_set(1) - - set r [$curs get -next] - error_check_bad cursor_get:DB_NEXT [llength $r] 0 - set k [lindex [lindex $r 0] 0] - set d [lindex [lindex $r 0] 1] - error_check_good curs_get:DB_NEXT:key $k $key_set(3) - error_check_good curs_get:DB_NEXT:data $d datum$key_set(3) - - set r [$curs get -next] - error_check_good cursor_get:DB_NEXT [llength $r] 0 - - puts "\tTest056.b:\ - Cursor delete of first item, followed by cursor FIRST" - # Set to beginning - set r [$curs get -first] - error_check_bad cursor_get:DB_FIRST [llength $r] 0 - set k [lindex [lindex $r 0] 0] - set d [lindex [lindex $r 0] 1] - error_check_good curs_get:DB_FIRST:key $k $key_set(1) - error_check_good curs_get:DB_FIRST:data $d datum$key_set(1) - - # Now do delete - error_check_good curs_del [$curs del] 0 - - # Now do DB_FIRST - set r [$curs get -first] - error_check_bad cursor_get:DB_FIRST [llength $r] 0 - set k [lindex [lindex $r 0] 0] - set d [lindex [lindex $r 0] 1] - error_check_good curs_get:DB_FIRST:key $k $key_set(3) - error_check_good curs_get:DB_FIRST:data $d datum$key_set(3) - - error_check_good curs_close [$curs close] 0 - if { $txnenv == 1 } { - error_check_good txn [$t commit] 0 - } - error_check_good db_close [$db close] 0 -} diff --git a/storage/bdb/test/test057.tcl b/storage/bdb/test/test057.tcl deleted file mode 100644 index 56fe2bf44ec..00000000000 --- a/storage/bdb/test/test057.tcl +++ /dev/null @@ -1,201 +0,0 @@ -# See the file LICENSE for redistribution information. -# -# Copyright (c) 1996-2004 -# Sleepycat Software. All rights reserved. -# -# $Id: test057.tcl,v 11.26 2004/09/20 17:06:16 sue Exp $ -# -# TEST test057 -# TEST Cursor maintenance during key deletes. -# TEST 1. Delete a key with a cursor. Add the key back with a regular -# TEST put. Make sure the cursor can't get the new item. -# TEST 2. Put two cursors on one item. Delete through one cursor, -# TEST check that the other sees the change. -# TEST 3. Same as 2, with the two cursors on a duplicate. - -proc test057 { method args } { - global errorInfo - source ./include.tcl - - set args [convert_args $method $args] - set omethod [convert_method $method] - - append args " -create -mode 0644 -dup " - if { [is_record_based $method] == 1 || [is_rbtree $method] == 1 } { - puts "Test057: skipping for method $method" - return - } - puts "Test057: $method delete and replace in presence of cursor." - - # Create the database and open the dictionary - set txnenv 0 - set eindex [lsearch -exact $args "-env"] - # - # If we are using an env, then testfile should just be the db name. - # Otherwise it is the test directory and the name. - if { $eindex == -1 } { - set testfile $testdir/test057.db - set env NULL - } else { - set testfile test057.db - incr eindex - set env [lindex $args $eindex] - set txnenv [is_txnenv $env] - if { $txnenv == 1 } { - append args " -auto_commit " - } - set testdir [get_home $env] - } - cleanup $testdir $env - - set flags "" - set txn "" - - set db [eval {berkdb_open} $args {$omethod $testfile}] - error_check_good dbopen:dup [is_valid_db $db] TRUE - - puts "\tTest057.a: Set cursor, delete cursor, put with key." - # Put three keys in the database - for { set key 1 } { $key <= 3 } {incr key} { - if { $txnenv == 1 } { - set t [$env txn] - error_check_good txn [is_valid_txn $t $env] TRUE - set txn "-txn $t" - } - set r [eval {$db put} $txn $flags {$key datum$key}] - error_check_good put $r 0 - if { $txnenv == 1 } { - error_check_good txn [$t commit] 0 - } - } - - # Retrieve keys sequentially so we can figure out their order - set i 1 - if { $txnenv == 1 } { - set t [$env txn] - error_check_good txn [is_valid_txn $t $env] TRUE - set txn "-txn $t" - } - set curs [eval {$db cursor} $txn] - error_check_good curs_open:dup [is_valid_cursor $curs $db] TRUE - - for {set d [$curs get -first] } {[llength $d] != 0 } \ - {set d [$curs get -next] } { - set key_set($i) [lindex [lindex $d 0] 0] - incr i - } - - # Now put in a bunch of duplicates for key 2 - for { set d 1 } { $d <= 5 } {incr d} { - set r [eval {$db put} $txn $flags {$key_set(2) dup_$d}] - error_check_good dup:put $r 0 - } - - # Now put the cursor on key 1 - - # Now set the cursor on the first of the duplicate set. - set r [$curs get -set $key_set(1)] - error_check_bad cursor_get:DB_SET [llength $r] 0 - set k [lindex [lindex $r 0] 0] - set d [lindex [lindex $r 0] 1] - error_check_good curs_get:DB_SET:key $k $key_set(1) - error_check_good curs_get:DB_SET:data $d datum$key_set(1) - - # Now do the delete - set r [$curs del] - error_check_good delete $r 0 - - # Now check the get current on the cursor. - error_check_good curs_get:del [$curs get -current] "" - - # Now do a put on the key - set r [eval {$db put} $txn $flags {$key_set(1) new_datum$key_set(1)}] - error_check_good put $r 0 - - # Do a get - set r [eval {$db get} $txn {$key_set(1)}] - error_check_good get [lindex [lindex $r 0] 1] new_datum$key_set(1) - - # Recheck cursor - error_check_good curs_get:deleted [$curs get -current] "" - - # Move cursor and see if we get the key. - set r [$curs get -first] - error_check_bad cursor_get:DB_FIRST [llength $r] 0 - set k [lindex [lindex $r 0] 0] - set d [lindex [lindex $r 0] 1] - error_check_good curs_get:DB_FIRST:key $k $key_set(1) - error_check_good curs_get:DB_FIRST:data $d new_datum$key_set(1) - - puts "\tTest057.b: Set two cursor on a key, delete one, overwrite other" - set curs2 [eval {$db cursor} $txn] - error_check_good curs2_open [is_valid_cursor $curs2 $db] TRUE - - # Set both cursors on the 4rd key - set r [$curs get -set $key_set(3)] - error_check_bad cursor_get:DB_SET [llength $r] 0 - set k [lindex [lindex $r 0] 0] - set d [lindex [lindex $r 0] 1] - error_check_good curs_get:DB_SET:key $k $key_set(3) - error_check_good curs_get:DB_SET:data $d datum$key_set(3) - - set r [$curs2 get -set $key_set(3)] - error_check_bad cursor2_get:DB_SET [llength $r] 0 - set k [lindex [lindex $r 0] 0] - set d [lindex [lindex $r 0] 1] - error_check_good curs2_get:DB_SET:key $k $key_set(3) - error_check_good curs2_get:DB_SET:data $d datum$key_set(3) - - # Now delete through cursor 1 - error_check_good curs1_del [$curs del] 0 - - # Verify gets on both 1 and 2 - error_check_good curs_get:deleted [$curs get -current] "" - error_check_good curs_get:deleted [$curs2 get -current] "" - - puts "\tTest057.c:\ - Set two cursors on a dup, delete one, overwrite other" - - # Set both cursors on the 2nd duplicate of key 2 - set r [$curs get -set $key_set(2)] - error_check_bad cursor_get:DB_SET [llength $r] 0 - set k [lindex [lindex $r 0] 0] - set d [lindex [lindex $r 0] 1] - error_check_good curs_get:DB_SET:key $k $key_set(2) - error_check_good curs_get:DB_SET:data $d datum$key_set(2) - - set r [$curs get -next] - error_check_bad cursor_get:DB_NEXT [llength $r] 0 - set k [lindex [lindex $r 0] 0] - set d [lindex [lindex $r 0] 1] - error_check_good curs_get:DB_NEXT:key $k $key_set(2) - error_check_good curs_get:DB_NEXT:data $d dup_1 - - set r [$curs2 get -set $key_set(2)] - error_check_bad cursor2_get:DB_SET [llength $r] 0 - set k [lindex [lindex $r 0] 0] - set d [lindex [lindex $r 0] 1] - error_check_good curs2_get:DB_SET:key $k $key_set(2) - error_check_good curs2_get:DB_SET:data $d datum$key_set(2) - - set r [$curs2 get -next] - error_check_bad cursor2_get:DB_NEXT [llength $r] 0 - set k [lindex [lindex $r 0] 0] - set d [lindex [lindex $r 0] 1] - error_check_good curs2_get:DB_NEXT:key $k $key_set(2) - error_check_good curs2_get:DB_NEXT:data $d dup_1 - - # Now delete through cursor 1 - error_check_good curs1_del [$curs del] 0 - - # Verify gets on both 1 and 2 - error_check_good curs_get:deleted [$curs get -current] "" - error_check_good curs_get:deleted [$curs2 get -current] "" - - error_check_good curs2_close [$curs2 close] 0 - error_check_good curs_close [$curs close] 0 - if { $txnenv == 1 } { - error_check_good txn [$t commit] 0 - } - error_check_good db_close [$db close] 0 -} diff --git a/storage/bdb/test/test058.tcl b/storage/bdb/test/test058.tcl deleted file mode 100644 index 4213e7279b4..00000000000 --- a/storage/bdb/test/test058.tcl +++ /dev/null @@ -1,103 +0,0 @@ -# See the file LICENSE for redistribution information. -# -# Copyright (c) 1996-2004 -# Sleepycat Software. All rights reserved. -# -# $Id: test058.tcl,v 11.22 2004/01/28 03:36:31 bostic Exp $ -# -# TEST test058 -# TEST Verify that deleting and reading duplicates results in correct ordering. -proc test058 { method args } { - source ./include.tcl - - # - # If we are using an env, then skip this test. It needs its own. - set eindex [lsearch -exact $args "-env"] - if { $eindex != -1 } { - incr eindex - set env [lindex $args $eindex] - puts "Test058 skipping for env $env" - return - } - set args [convert_args $method $args] - set encargs "" - set args [split_encargs $args encargs] - set omethod [convert_method $method] - - if { [is_record_based $method] == 1 || [is_rbtree $method] == 1 } { - puts "Test058: skipping for method $method" - return - } - puts "Test058: $method delete dups after inserting after duped key." - - # environment - env_cleanup $testdir - set eflags "-create -txn $encargs -home $testdir" - set env [eval {berkdb_env} $eflags] - error_check_good env [is_valid_env $env] TRUE - - # db open - set flags "-auto_commit -create -mode 0644 -dup -env $env $args" - set db [eval {berkdb_open} $flags $omethod "test058.db"] - error_check_good dbopen [is_valid_db $db] TRUE - - set tn "" - set tid "" - set tn [$env txn] - set tflags "-txn $tn" - - puts "\tTest058.a: Adding 10 duplicates" - # Add a bunch of dups - for { set i 0 } { $i < 10 } {incr i} { - set ret \ - [eval {$db put} $tflags {doghouse $i"DUPLICATE_DATA_VALUE"}] - error_check_good db_put $ret 0 - } - - puts "\tTest058.b: Adding key after duplicates" - # Now add one more key/data AFTER the dup set. - set ret [eval {$db put} $tflags {zebrahouse NOT_A_DUP}] - error_check_good db_put $ret 0 - - error_check_good txn_commit [$tn commit] 0 - - set tn [$env txn] - error_check_good txnbegin [is_substr $tn $env] 1 - set tflags "-txn $tn" - - # Now delete everything - puts "\tTest058.c: Deleting duplicated key" - set ret [eval {$db del} $tflags {doghouse}] - error_check_good del $ret 0 - - # Now reput everything - set pad \ - abcdefghijklmnopqrtsuvabcdefghijklmnopqrtsuvabcdefghijklmnopqrtsuvabcdefghijklmnopqrtsuvabcdefghijklmnopqrtsuvabcdefghijklmnopqrtsuvabcdefghijklmnopqrtsuvabcdefghijklmnopqrtsuvabcdefghijklmnopqrtsuvabcdefghijklmnopqrtsuv - - puts "\tTest058.d: Reputting duplicates with big data vals" - for { set i 0 } { $i < 10 } {incr i} { - set ret [eval {$db put} \ - $tflags {doghouse $i"DUPLICATE_DATA_VALUE"$pad}] - error_check_good db_put $ret 0 - } - error_check_good txn_commit [$tn commit] 0 - - # Check duplicates for order - set dbc [$db cursor] - error_check_good db_cursor [is_substr $dbc $db] 1 - - puts "\tTest058.e: Verifying that duplicates are in order." - set i 0 - for { set ret [$dbc get -set doghouse] } \ - {$i < 10 && [llength $ret] != 0} \ - { set ret [$dbc get -nextdup] } { - set data [lindex [lindex $ret 0] 1] - error_check_good \ - duplicate_value $data $i"DUPLICATE_DATA_VALUE"$pad - incr i - } - - error_check_good dbc_close [$dbc close] 0 - error_check_good db_close [$db close] 0 - reset_env $env -} diff --git a/storage/bdb/test/test059.tcl b/storage/bdb/test/test059.tcl deleted file mode 100644 index 887ce6d94db..00000000000 --- a/storage/bdb/test/test059.tcl +++ /dev/null @@ -1,150 +0,0 @@ -# See the file LICENSE for redistribution information. -# -# Copyright (c) 1996-2004 -# Sleepycat Software. All rights reserved. -# -# $Id: test059.tcl,v 11.20 2004/01/28 03:36:31 bostic Exp $ -# -# TEST test059 -# TEST Cursor ops work with a partial length of 0. -# TEST Make sure that we handle retrieves of zero-length data items correctly. -# TEST The following ops, should allow a partial data retrieve of 0-length. -# TEST db_get -# TEST db_cget FIRST, NEXT, LAST, PREV, CURRENT, SET, SET_RANGE -proc test059 { method args } { - source ./include.tcl - - set args [convert_args $method $args] - set omethod [convert_method $method] - - puts "Test059: $method 0-length partial data retrieval" - - # Create the database and open the dictionary - set txnenv 0 - set eindex [lsearch -exact $args "-env"] - # - # If we are using an env, then testfile should just be the db name. - # Otherwise it is the test directory and the name. - if { $eindex == -1 } { - set testfile $testdir/test059.db - set env NULL - } else { - set testfile test059.db - incr eindex - set env [lindex $args $eindex] - set txnenv [is_txnenv $env] - if { $txnenv == 1 } { - append args " -auto_commit " - } - set testdir [get_home $env] - } - cleanup $testdir $env - - set pflags "" - set gflags "" - set txn "" - set count 0 - - if { [is_record_based $method] == 1 } { - append gflags " -recno" - } - - puts "\tTest059.a: Populate a database" - set oflags "-create -mode 0644 $omethod $args $testfile" - set db [eval {berkdb_open} $oflags] - error_check_good db_create [is_substr $db db] 1 - - # Put ten keys in the database - for { set key 1 } { $key <= 10 } {incr key} { - if { $txnenv == 1 } { - set t [$env txn] - error_check_good txn [is_valid_txn $t $env] TRUE - set txn "-txn $t" - } - set r [eval {$db put} $txn $pflags {$key datum$key}] - error_check_good put $r 0 - if { $txnenv == 1 } { - error_check_good txn [$t commit] 0 - } - } - - # Retrieve keys sequentially so we can figure out their order - set i 1 - if { $txnenv == 1 } { - set t [$env txn] - error_check_good txn [is_valid_txn $t $env] TRUE - set txn "-txn $t" - } - set curs [eval {$db cursor} $txn] - error_check_good db_curs [is_valid_cursor $curs $db] TRUE - - for {set d [$curs get -first] } { [llength $d] != 0 } { - set d [$curs get -next] } { - set key_set($i) [lindex [lindex $d 0] 0] - incr i - } - - puts "\tTest059.a: db get with 0 partial length retrieve" - - # Now set the cursor on the middle one. - set ret [eval {$db get -partial {0 0}} $txn $gflags {$key_set(5)}] - error_check_bad db_get_0 [llength $ret] 0 - - puts "\tTest059.a: db cget FIRST with 0 partial length retrieve" - set ret [$curs get -first -partial {0 0}] - set data [lindex [lindex $ret 0] 1] - set key [lindex [lindex $ret 0] 0] - error_check_good key_check_first $key $key_set(1) - error_check_good db_cget_first [string length $data] 0 - - puts "\tTest059.b: db cget NEXT with 0 partial length retrieve" - set ret [$curs get -next -partial {0 0}] - set data [lindex [lindex $ret 0] 1] - set key [lindex [lindex $ret 0] 0] - error_check_good key_check_next $key $key_set(2) - error_check_good db_cget_next [string length $data] 0 - - puts "\tTest059.c: db cget LAST with 0 partial length retrieve" - set ret [$curs get -last -partial {0 0}] - set data [lindex [lindex $ret 0] 1] - set key [lindex [lindex $ret 0] 0] - error_check_good key_check_last $key $key_set(10) - error_check_good db_cget_last [string length $data] 0 - - puts "\tTest059.d: db cget PREV with 0 partial length retrieve" - set ret [$curs get -prev -partial {0 0}] - set data [lindex [lindex $ret 0] 1] - set key [lindex [lindex $ret 0] 0] - error_check_good key_check_prev $key $key_set(9) - error_check_good db_cget_prev [string length $data] 0 - - puts "\tTest059.e: db cget CURRENT with 0 partial length retrieve" - set ret [$curs get -current -partial {0 0}] - set data [lindex [lindex $ret 0] 1] - set key [lindex [lindex $ret 0] 0] - error_check_good key_check_current $key $key_set(9) - error_check_good db_cget_current [string length $data] 0 - - puts "\tTest059.f: db cget SET with 0 partial length retrieve" - set ret [$curs get -set -partial {0 0} $key_set(7)] - set data [lindex [lindex $ret 0] 1] - set key [lindex [lindex $ret 0] 0] - error_check_good key_check_set $key $key_set(7) - error_check_good db_cget_set [string length $data] 0 - - if {[is_btree $method] == 1} { - puts "\tTest059.g:\ - db cget SET_RANGE with 0 partial length retrieve" - set ret [$curs get -set_range -partial {0 0} $key_set(5)] - set data [lindex [lindex $ret 0] 1] - set key [lindex [lindex $ret 0] 0] - error_check_good key_check_set $key $key_set(5) - error_check_good db_cget_set [string length $data] 0 - } - - error_check_good curs_close [$curs close] 0 - if { $txnenv == 1 } { - error_check_good txn [$t commit] 0 - } - error_check_good db_close [$db close] 0 -} diff --git a/storage/bdb/test/test060.tcl b/storage/bdb/test/test060.tcl deleted file mode 100644 index 770fb5d052f..00000000000 --- a/storage/bdb/test/test060.tcl +++ /dev/null @@ -1,60 +0,0 @@ -# See the file LICENSE for redistribution information. -# -# Copyright (c) 1996-2004 -# Sleepycat Software. All rights reserved. -# -# $Id: test060.tcl,v 11.12 2004/01/28 03:36:31 bostic Exp $ -# -# TEST test060 -# TEST Test of the DB_EXCL flag to DB->open(). -# TEST 1) Attempt to open and create a nonexistent database; verify success. -# TEST 2) Attempt to reopen it; verify failure. -proc test060 { method args } { - global errorCode - source ./include.tcl - - set args [convert_args $method $args] - set omethod [convert_method $method] - - puts "Test060: $method ($args) Test of the DB_EXCL flag to DB->open" - - # Set the database location and make sure the db doesn't exist yet - set txnenv 0 - set eindex [lsearch -exact $args "-env"] - # - # If we are using an env, then testfile should just be the db name. - # Otherwise it is the test directory and the name. - if { $eindex == -1 } { - set testfile $testdir/test060.db - set env NULL - } else { - set testfile test060.db - incr eindex - set env [lindex $args $eindex] - set txnenv [is_txnenv $env] - if { $txnenv == 1 } { - append args " -auto_commit " - } - set testdir [get_home $env] - } - cleanup $testdir $env - - # Create the database and check success - puts "\tTest060.a: open and close non-existent file with DB_EXCL" - set db [eval {berkdb_open \ - -create -excl -mode 0644} $args {$omethod $testfile}] - error_check_good dbopen:excl [is_valid_db $db] TRUE - - # Close it and check success - error_check_good db_close [$db close] 0 - - # Try to open it again, and make sure the open fails - puts "\tTest060.b: open it again with DB_EXCL and make sure it fails" - set errorCode NONE - error_check_good open:excl:catch [catch { \ - set db [eval {berkdb_open_noerr \ - -create -excl -mode 0644} $args {$omethod $testfile}] - } ret ] 1 - - error_check_good dbopen:excl [is_substr $errorCode EEXIST] 1 -} diff --git a/storage/bdb/test/test061.tcl b/storage/bdb/test/test061.tcl deleted file mode 100644 index f4b12c45df5..00000000000 --- a/storage/bdb/test/test061.tcl +++ /dev/null @@ -1,226 +0,0 @@ -# See the file LICENSE for redistribution information. -# -# Copyright (c) 1999-2004 -# Sleepycat Software. All rights reserved. -# -# $Id: test061.tcl,v 11.20 2004/01/28 03:36:31 bostic Exp $ -# -# TEST test061 -# TEST Test of txn abort and commit for in-memory databases. -# TEST a) Put + abort: verify absence of data -# TEST b) Put + commit: verify presence of data -# TEST c) Overwrite + abort: verify that data is unchanged -# TEST d) Overwrite + commit: verify that data has changed -# TEST e) Delete + abort: verify that data is still present -# TEST f) Delete + commit: verify that data has been deleted -proc test061 { method args } { - global alphabet - global encrypt - global errorCode - global passwd - source ./include.tcl - - # - # If we are using an env, then skip this test. It needs its own. - set eindex [lsearch -exact $args "-env"] - if { $eindex != -1 } { - incr eindex - set env [lindex $args $eindex] - puts "Test061 skipping for env $env" - return - } - set args [convert_args $method $args] - set omethod [convert_method $method] - if { [is_queueext $method] == 1} { - puts "Test061 skipping for method $method" - return - } - set encargs "" - set args [split_encargs $args encargs] - - puts "Test061: Transaction abort and commit test for in-memory data." - puts "Test061: $method $args" - - set key "key" - set data "data" - set otherdata "otherdata" - set txn "" - set flags "" - set gflags "" - - if { [is_record_based $method] == 1} { - set key 1 - set gflags " -recno" - } - - puts "\tTest061: Create environment and $method database." - env_cleanup $testdir - - # create environment - set eflags "-create -txn $encargs -home $testdir" - set dbenv [eval {berkdb_env} $eflags] - error_check_good dbenv [is_valid_env $dbenv] TRUE - - # db open -- no file specified, in-memory database - set flags "-auto_commit -create $args $omethod" - set db [eval {berkdb_open -env} $dbenv $flags] - error_check_good dbopen [is_valid_db $db] TRUE - - # Here we go with the six test cases. Since we need to verify - # a different thing each time, and since we can't just reuse - # the same data if we're to test overwrite, we just - # plow through rather than writing some impenetrable loop code; - # each of the cases is only a few lines long, anyway. - - puts "\tTest061.a: put/abort" - - # txn_begin - set txn [$dbenv txn] - error_check_good txn_begin [is_valid_txn $txn $dbenv] TRUE - - # put a key - set ret [eval {$db put} -txn $txn {$key [chop_data $method $data]}] - error_check_good db_put $ret 0 - - # check for existence - set ret [eval {$db get} -txn $txn $gflags {$key}] - error_check_good get $ret [list [list $key [pad_data $method $data]]] - - # abort - error_check_good txn_abort [$txn abort] 0 - - # check for *non*-existence - set ret [eval {$db get} $gflags {$key}] - error_check_good get $ret {} - - puts "\tTest061.b: put/commit" - - # txn_begin - set txn [$dbenv txn] - error_check_good txn_begin [is_valid_txn $txn $dbenv] TRUE - - # put a key - set ret [eval {$db put} -txn $txn {$key [chop_data $method $data]}] - error_check_good db_put $ret 0 - - # check for existence - set ret [eval {$db get} -txn $txn $gflags {$key}] - error_check_good get $ret [list [list $key [pad_data $method $data]]] - - # commit - error_check_good txn_commit [$txn commit] 0 - - # check again for existence - set ret [eval {$db get} $gflags {$key}] - error_check_good get $ret [list [list $key [pad_data $method $data]]] - - puts "\tTest061.c: overwrite/abort" - - # txn_begin - set txn [$dbenv txn] - error_check_good txn_begin [is_valid_txn $txn $dbenv] TRUE - - # overwrite {key,data} with {key,otherdata} - set ret [eval {$db put} -txn $txn {$key [chop_data $method $otherdata]}] - error_check_good db_put $ret 0 - - # check for existence - set ret [eval {$db get} -txn $txn $gflags {$key}] - error_check_good get $ret \ - [list [list $key [pad_data $method $otherdata]]] - - # abort - error_check_good txn_abort [$txn abort] 0 - - # check that data is unchanged ($data not $otherdata) - set ret [eval {$db get} $gflags {$key}] - error_check_good get $ret [list [list $key [pad_data $method $data]]] - - puts "\tTest061.d: overwrite/commit" - - # txn_begin - set txn [$dbenv txn] - error_check_good txn_begin [is_valid_txn $txn $dbenv] TRUE - - # overwrite {key,data} with {key,otherdata} - set ret [eval {$db put} -txn $txn {$key [chop_data $method $otherdata]}] - error_check_good db_put $ret 0 - - # check for existence - set ret [eval {$db get} -txn $txn $gflags {$key}] - error_check_good get $ret \ - [list [list $key [pad_data $method $otherdata]]] - - # commit - error_check_good txn_commit [$txn commit] 0 - - # check that data has changed ($otherdata not $data) - set ret [eval {$db get} $gflags {$key}] - error_check_good get $ret \ - [list [list $key [pad_data $method $otherdata]]] - - puts "\tTest061.e: delete/abort" - - # txn_begin - set txn [$dbenv txn] - error_check_good txn_begin [is_valid_txn $txn $dbenv] TRUE - - # delete - set ret [eval {$db del} -txn $txn {$key}] - error_check_good db_put $ret 0 - - # check for nonexistence - set ret [eval {$db get} -txn $txn $gflags {$key}] - error_check_good get $ret {} - - # abort - error_check_good txn_abort [$txn abort] 0 - - # check for existence - set ret [eval {$db get} $gflags {$key}] - error_check_good get $ret \ - [list [list $key [pad_data $method $otherdata]]] - - puts "\tTest061.f: delete/commit" - - # txn_begin - set txn [$dbenv txn] - error_check_good txn_begin [is_valid_txn $txn $dbenv] TRUE - - # put a key - set ret [eval {$db del} -txn $txn {$key}] - error_check_good db_put $ret 0 - - # check for nonexistence - set ret [eval {$db get} -txn $txn $gflags {$key}] - error_check_good get $ret {} - - # commit - error_check_good txn_commit [$txn commit] 0 - - # check for continued nonexistence - set ret [eval {$db get} $gflags {$key}] - error_check_good get $ret {} - - # We're done; clean up. - error_check_good db_close [eval {$db close}] 0 - error_check_good env_close [eval {$dbenv close}] 0 - - # Now run db_recover and ensure that it runs cleanly. - set utilflag "" - if { $encrypt != 0 } { - set utilflag "-P $passwd" - } - puts "\tTest061.g: Running db_recover -h" - set ret [catch {eval {exec} $util_path/db_recover -h $testdir \ - $utilflag} res] - if { $ret != 0 } { - puts "FAIL: db_recover outputted $res" - } - error_check_good db_recover $ret 0 - - puts "\tTest061.h: Running db_recover -c -h" - set ret [catch {eval {exec} $util_path/db_recover -c -h $testdir \ - $utilflag} res] - error_check_good db_recover-c $ret 0 -} diff --git a/storage/bdb/test/test062.tcl b/storage/bdb/test/test062.tcl deleted file mode 100644 index f26c66d6a1c..00000000000 --- a/storage/bdb/test/test062.tcl +++ /dev/null @@ -1,153 +0,0 @@ -# See the file LICENSE for redistribution information. -# -# Copyright (c) 1999-2004 -# Sleepycat Software. All rights reserved. -# -# $Id: test062.tcl,v 11.23 2004/01/28 03:36:31 bostic Exp $ -# -# TEST test062 -# TEST Test of partial puts (using DB_CURRENT) onto duplicate pages. -# TEST Insert the first 200 words into the dictionary 200 times each with -# TEST self as key and :self as data. Use partial puts to -# TEST append self again to data; verify correctness. -proc test062 { method {nentries 200} {ndups 200} {tnum "062"} args } { - global alphabet - global rand_init - source ./include.tcl - - berkdb srand $rand_init - - set args [convert_args $method $args] - set omethod [convert_method $method] - - if { [is_record_based $method] == 1 || [is_rbtree $method] == 1 } { - puts "Test$tnum skipping for method $omethod" - return - } - # Create the database and open the dictionary - set txnenv 0 - set eindex [lsearch -exact $args "-env"] - # - # If we are using an env, then testfile should just be the db name. - # Otherwise it is the test directory and the name. - if { $eindex == -1 } { - set testfile $testdir/test$tnum.db - set env NULL - } else { - set testfile test$tnum.db - incr eindex - set env [lindex $args $eindex] - set txnenv [is_txnenv $env] - if { $txnenv == 1 } { - append args " -auto_commit " - # - # If we are using txns and running with the - # default, set the default down a bit. - # - if { $nentries == 200 } { - set nentries 100 - } - reduce_dups nentries ndups - } - set testdir [get_home $env] - } - cleanup $testdir $env - - puts "Test$tnum:\ - $method ($args) $nentries Partial puts and $ndups duplicates." - set db [eval {berkdb_open -create -mode 0644 \ - $omethod -dup} $args {$testfile} ] - error_check_good dbopen [is_valid_db $db] TRUE - set did [open $dict] - - set pflags "" - set gflags "" - set txn "" - set count 0 - - # Here is the loop where we put each key/data pair - puts "\tTest$tnum.a: Put loop (initialize database)" - while { [gets $did str] != -1 && $count < $nentries } { - for { set i 1 } { $i <= $ndups } { incr i } { - set pref \ - [string index $alphabet [berkdb random_int 0 25]] - set datastr $pref:$str - if { $txnenv == 1 } { - set t [$env txn] - error_check_good txn [is_valid_txn $t $env] TRUE - set txn "-txn $t" - } - set ret [eval {$db put} \ - $txn $pflags {$str [chop_data $method $datastr]}] - error_check_good put $ret 0 - if { $txnenv == 1 } { - error_check_good txn [$t commit] 0 - } - } - set keys($count) $str - - incr count - } - close $did - - puts "\tTest$tnum.b: Partial puts." - if { $txnenv == 1 } { - set t [$env txn] - error_check_good txn [is_valid_txn $t $env] TRUE - set txn "-txn $t" - } - set dbc [eval {$db cursor} $txn] - error_check_good cursor_open [is_substr $dbc $db] 1 - - # Do a partial write to extend each datum in - # the regular db by the corresponding dictionary word. - # We have to go through each key's dup set using -set - # because cursors are not stable in the hash AM and we - # want to make sure we hit all the keys. - for { set i 0 } { $i < $count } { incr i } { - set key $keys($i) - for {set ret [$dbc get -set $key]} \ - {[llength $ret] != 0} \ - {set ret [$dbc get -nextdup]} { - - set k [lindex [lindex $ret 0] 0] - set orig_d [lindex [lindex $ret 0] 1] - set d [string range $orig_d 2 end] - set doff [expr [string length $d] + 2] - set dlen 0 - error_check_good data_and_key_sanity $d $k - - set ret [$dbc get -current] - error_check_good before_sanity \ - [lindex [lindex $ret 0] 0] \ - [string range [lindex [lindex $ret 0] 1] 2 end] - - error_check_good partial_put [eval {$dbc put -current \ - -partial [list $doff $dlen] $d}] 0 - - set ret [$dbc get -current] - error_check_good partial_put_correct \ - [lindex [lindex $ret 0] 1] $orig_d$d - } - } - - puts "\tTest$tnum.c: Double-checking get loop." - # Double-check that each datum in the regular db has - # been appropriately modified. - - for {set ret [$dbc get -first]} \ - {[llength $ret] != 0} \ - {set ret [$dbc get -next]} { - - set k [lindex [lindex $ret 0] 0] - set d [lindex [lindex $ret 0] 1] - error_check_good modification_correct \ - [string range $d 2 end] [repeat $k 2] - } - - error_check_good dbc_close [$dbc close] 0 - if { $txnenv == 1 } { - error_check_good txn [$t commit] 0 - } - error_check_good db_close [$db close] 0 -} diff --git a/storage/bdb/test/test063.tcl b/storage/bdb/test/test063.tcl deleted file mode 100644 index 8918fa4fa00..00000000000 --- a/storage/bdb/test/test063.tcl +++ /dev/null @@ -1,174 +0,0 @@ -# See the file LICENSE for redistribution information. -# -# Copyright (c) 1999-2004 -# Sleepycat Software. All rights reserved. -# -# $Id: test063.tcl,v 11.20 2004/01/28 03:36:31 bostic Exp $ -# -# TEST test063 -# TEST Test of the DB_RDONLY flag to DB->open -# TEST Attempt to both DB->put and DBC->c_put into a database -# TEST that has been opened DB_RDONLY, and check for failure. -proc test063 { method args } { - global errorCode - source ./include.tcl - - set args [convert_args $method $args] - set omethod [convert_method $method] - set tnum "063" - - set txnenv 0 - set eindex [lsearch -exact $args "-env"] - # - # If we are using an env, then testfile should just be the db name. - # Otherwise it is the test directory and the name. - if { $eindex == -1 } { - set testfile $testdir/test$tnum.db - set env NULL - } else { - set testfile test$tnum.db - incr eindex - set env [lindex $args $eindex] - set txnenv [is_txnenv $env] - if { $txnenv == 1 } { - append args " -auto_commit " - } - set testdir [get_home $env] - } - cleanup $testdir $env - - set key "key" - set data "data" - set key2 "another_key" - set data2 "more_data" - - set gflags "" - set txn "" - - if { [is_record_based $method] == 1 } { - set key "1" - set key2 "2" - append gflags " -recno" - } - - puts "Test$tnum: $method ($args) DB_RDONLY test." - - # Create a test database. - puts "\tTest$tnum.a: Creating test database." - set db [eval {berkdb_open_noerr -create -mode 0644} \ - $omethod $args $testfile] - error_check_good db_create [is_valid_db $db] TRUE - - # Put and get an item so it's nonempty. - if { $txnenv == 1 } { - set t [$env txn] - error_check_good txn [is_valid_txn $t $env] TRUE - set txn "-txn $t" - } - set ret [eval {$db put} $txn {$key [chop_data $method $data]}] - error_check_good initial_put $ret 0 - - set dbt [eval {$db get} $txn $gflags {$key}] - error_check_good initial_get $dbt \ - [list [list $key [pad_data $method $data]]] - - if { $txnenv == 1 } { - error_check_good txn [$t commit] 0 - } - error_check_good db_close [$db close] 0 - - if { $eindex == -1 } { - # Confirm that database is writable. If we are - # using an env (that may be remote on a server) - # we cannot do this check. - error_check_good writable [file writable $testfile] 1 - } - - puts "\tTest$tnum.b: Re-opening DB_RDONLY and attempting to put." - - # Now open it read-only and make sure we can get but not put. - set db [eval {berkdb_open_noerr -rdonly} $args {$testfile}] - error_check_good db_open [is_valid_db $db] TRUE - - if { $txnenv == 1 } { - set t [$env txn] - error_check_good txn [is_valid_txn $t $env] TRUE - set txn "-txn $t" - } - set dbt [eval {$db get} $txn $gflags {$key}] - error_check_good db_get $dbt \ - [list [list $key [pad_data $method $data]]] - - set ret [catch {eval {$db put} $txn \ - {$key2 [chop_data $method $data]}} res] - error_check_good put_failed $ret 1 - error_check_good db_put_rdonly [is_substr $errorCode "EACCES"] 1 - if { $txnenv == 1 } { - error_check_good txn [$t commit] 0 - } - - set errorCode "NONE" - - puts "\tTest$tnum.c: Attempting cursor put." - - if { $txnenv == 1 } { - set t [$env txn] - error_check_good txn [is_valid_txn $t $env] TRUE - set txn "-txn $t" - } - set dbc [eval {$db cursor} $txn] - error_check_good cursor_create [is_valid_cursor $dbc $db] TRUE - - error_check_good cursor_set [$dbc get -first] $dbt - set ret [catch {eval {$dbc put} -current $data} res] - error_check_good c_put_failed $ret 1 - error_check_good dbc_put_rdonly [is_substr $errorCode "EACCES"] 1 - - set dbt [eval {$db get} $gflags {$key2}] - error_check_good db_get_key2 $dbt "" - - puts "\tTest$tnum.d: Attempting ordinary delete." - - set errorCode "NONE" - set ret [catch {eval {$db del} $txn {$key}} 1] - error_check_good del_failed $ret 1 - error_check_good db_del_rdonly [is_substr $errorCode "EACCES"] 1 - - set dbt [eval {$db get} $txn $gflags {$key}] - error_check_good db_get_key $dbt \ - [list [list $key [pad_data $method $data]]] - - puts "\tTest$tnum.e: Attempting cursor delete." - # Just set the cursor to the beginning; we don't care what's there... - # yet. - set dbt2 [$dbc get -first] - error_check_good db_get_first_key $dbt2 $dbt - set errorCode "NONE" - set ret [catch {$dbc del} res] - error_check_good c_del_failed $ret 1 - error_check_good dbc_del_rdonly [is_substr $errorCode "EACCES"] 1 - - set dbt2 [$dbc get -current] - error_check_good db_get_key $dbt2 $dbt - - puts "\tTest$tnum.f: Close, reopen db; verify unchanged." - - error_check_good dbc_close [$dbc close] 0 - if { $txnenv == 1 } { - error_check_good txn [$t commit] 0 - } - error_check_good db_close [$db close] 0 - - set db [eval {berkdb_open} $omethod $args $testfile] - error_check_good db_reopen [is_valid_db $db] TRUE - - set dbc [$db cursor] - error_check_good cursor_create [is_valid_cursor $dbc $db] TRUE - - error_check_good first_there [$dbc get -first] \ - [list [list $key [pad_data $method $data]]] - error_check_good nomore_there [$dbc get -next] "" - - error_check_good dbc_close [$dbc close] 0 - error_check_good db_close [$db close] 0 -} diff --git a/storage/bdb/test/test064.tcl b/storage/bdb/test/test064.tcl deleted file mode 100644 index 13a64657463..00000000000 --- a/storage/bdb/test/test064.tcl +++ /dev/null @@ -1,69 +0,0 @@ -# See the file LICENSE for redistribution information. -# -# Copyright (c) 1999-2004 -# Sleepycat Software. All rights reserved. -# -# $Id: test064.tcl,v 11.16 2004/01/28 03:36:31 bostic Exp $ -# -# TEST test064 -# TEST Test of DB->get_type -# TEST Create a database of type specified by method. -# TEST Make sure DB->get_type returns the right thing with both a normal -# TEST and DB_UNKNOWN open. -proc test064 { method args } { - source ./include.tcl - - set args [convert_args $method $args] - set omethod [convert_method $method] - set tnum "064" - - set txnenv 0 - set eindex [lsearch -exact $args "-env"] - # - # If we are using an env, then testfile should just be the db name. - # Otherwise it is the test directory and the name. - if { $eindex == -1 } { - set testfile $testdir/test$tnum.db - set env NULL - } else { - set testfile test$tnum.db - incr eindex - set env [lindex $args $eindex] - set txnenv [is_txnenv $env] - if { $txnenv == 1 } { - append args " -auto_commit " - } - set testdir [get_home $env] - } - cleanup $testdir $env - - puts "Test$tnum: $method ($args) DB->get_type test." - - # Create a test database. - puts "\tTest$tnum.a: Creating test database of type $method." - set db [eval {berkdb_open -create -mode 0644} \ - $omethod $args $testfile] - error_check_good db_create [is_valid_db $db] TRUE - - error_check_good db_close [$db close] 0 - - puts "\tTest$tnum.b: get_type after method specifier." - - set db [eval {berkdb_open} $omethod $args {$testfile}] - error_check_good db_open [is_valid_db $db] TRUE - - set type [$db get_type] - error_check_good get_type $type [string range $omethod 1 end] - - error_check_good db_close [$db close] 0 - - puts "\tTest$tnum.c: get_type after DB_UNKNOWN." - - set db [eval {berkdb_open} $args $testfile] - error_check_good db_open [is_valid_db $db] TRUE - - set type [$db get_type] - error_check_good get_type $type [string range $omethod 1 end] - - error_check_good db_close [$db close] 0 -} diff --git a/storage/bdb/test/test065.tcl b/storage/bdb/test/test065.tcl deleted file mode 100644 index cb29b106d24..00000000000 --- a/storage/bdb/test/test065.tcl +++ /dev/null @@ -1,208 +0,0 @@ -# See the file LICENSE for redistribution information. -# -# Copyright (c) 1999-2004 -# Sleepycat Software. All rights reserved. -# -# $Id: test065.tcl,v 11.22 2004/09/22 18:01:06 bostic Exp $ -# -# TEST test065 -# TEST Test of DB->stat, both -DB_FAST_STAT and row -# TEST counts with DB->stat -txn. -proc test065 { method args } { - source ./include.tcl - global errorCode - global alphabet - - set nentries 10000 - set args [convert_args $method $args] - set omethod [convert_method $method] - set tnum "065" - - set txnenv 0 - set eindex [lsearch -exact $args "-env"] - # - # If we are using an env, then testfile should just be the db name. - # Otherwise it is the test directory and the name. - if { $eindex == -1 } { - set testfile $testdir/test$tnum.db - set env NULL - } else { - set testfile test$tnum.db - incr eindex - set env [lindex $args $eindex] - set txnenv [is_txnenv $env] - if { $txnenv == 1 } { - append args " -auto_commit " - # - # If we are using txns and running with the - # default, set the default down a bit. - # - if { $nentries == 10000 } { - set nentries 100 - } - } - set testdir [get_home $env] - } - cleanup $testdir $env - - puts "Test$tnum: $method ($args) DB->stat(DB_FAST_STAT) test." - - puts "\tTest$tnum.a: Create database and check it while empty." - - set db [eval {berkdb_open_noerr -create -mode 0644} \ - $omethod $args $testfile] - error_check_good db_open [is_valid_db $db] TRUE - - set ret [catch {eval $db stat -faststat} res] - - error_check_good db_close [$db close] 0 - - if { ([is_record_based $method] && ![is_queue $method]) \ - || [is_rbtree $method] } { - error_check_good recordcount_ok [is_substr $res \ - "{{Number of keys} 0}"] 1 - } else { - puts "\tTest$tnum: Test complete for method $method." - return - } - - # If we've got this far, we're on an access method for - # which record counts makes sense. Thus, we no longer - # catch EINVALs, and no longer care about __db_errs. - set db [eval {berkdb_open -create -mode 0644} $omethod $args $testfile] - - puts "\tTest$tnum.b: put $nentries keys." - - if { [is_record_based $method] } { - set gflags " -recno " - set keypfx "" - } else { - set gflags "" - set keypfx "key" - } - - set txn "" - set data [pad_data $method $alphabet] - - for { set ndx 1 } { $ndx <= $nentries } { incr ndx } { - if { $txnenv == 1 } { - set t [$env txn] - error_check_good txn [is_valid_txn $t $env] TRUE - set txn "-txn $t" - } - set ret [eval {$db put} $txn {$keypfx$ndx $data}] - error_check_good db_put $ret 0 - set statret [eval {$db stat} $txn] - set rowcount [getstats $statret "Number of records"] - error_check_good rowcount $rowcount $ndx - if { $txnenv == 1 } { - error_check_good txn [$t commit] 0 - } - } - - set ret [$db stat -faststat] - error_check_good recordcount_after_puts \ - [is_substr $ret "{{Number of keys} $nentries}"] 1 - - puts "\tTest$tnum.c: delete 90% of keys." - set end [expr {$nentries / 10 * 9}] - for { set ndx 1 } { $ndx <= $end } { incr ndx } { - if { $txnenv == 1 } { - set t [$env txn] - error_check_good txn [is_valid_txn $t $env] TRUE - set txn "-txn $t" - } - if { [is_rrecno $method] == 1 } { - # if we're renumbering, when we hit key 5001 we'll - # have deleted 5000 and we'll croak! So delete key - # 1, repeatedly. - set ret [eval {$db del} $txn {[concat $keypfx 1]}] - set statret [eval {$db stat} $txn] - set rowcount [getstats $statret "Number of records"] - error_check_good rowcount $rowcount [expr $nentries - $ndx] - } else { - set ret [eval {$db del} $txn {$keypfx$ndx}] - set rowcount [getstats $statret "Number of records"] - error_check_good rowcount $rowcount $nentries - } - error_check_good db_del $ret 0 - if { $txnenv == 1 } { - error_check_good txn [$t commit] 0 - } - } - - set ret [$db stat -faststat] - if { [is_rrecno $method] == 1 || [is_rbtree $method] == 1 } { - # We allow renumbering--thus the stat should return 10% - # of nentries. - error_check_good recordcount_after_dels [is_substr $ret \ - "{{Number of keys} [expr {$nentries / 10}]}"] 1 - } else { - # No renumbering--no change in RECORDCOUNT! - error_check_good recordcount_after_dels \ - [is_substr $ret "{{Number of keys} $nentries}"] 1 - } - - puts "\tTest$tnum.d: put new keys at the beginning." - set end [expr {$nentries / 10 * 8}] - for { set ndx 1 } { $ndx <= $end } {incr ndx } { - if { $txnenv == 1 } { - set t [$env txn] - error_check_good txn [is_valid_txn $t $env] TRUE - set txn "-txn $t" - } - set ret [eval {$db put} $txn {$keypfx$ndx $data}] - error_check_good db_put_beginning $ret 0 - if { $txnenv == 1 } { - error_check_good txn [$t commit] 0 - } - } - - set ret [$db stat -faststat] - if { [is_rrecno $method] == 1 } { - # With renumbering we're back up to 80% of $nentries - error_check_good recordcount_after_dels [is_substr $ret \ - "{{Number of keys} [expr {$nentries / 10 * 8}]}"] 1 - } elseif { [is_rbtree $method] == 1 } { - # Total records in a btree is now 90% of $nentries - error_check_good recordcount_after_dels [is_substr $ret \ - "{{Number of keys} [expr {$nentries / 10 * 9}]}"] 1 - } else { - # No renumbering--still no change in RECORDCOUNT. - error_check_good recordcount_after_dels [is_substr $ret \ - "{{Number of keys} $nentries}"] 1 - } - - puts "\tTest$tnum.e: put new keys at the end." - set start [expr {1 + $nentries / 10 * 9}] - set end [expr {($nentries / 10 * 9) + ($nentries / 10 * 8)}] - for { set ndx $start } { $ndx <= $end } { incr ndx } { - if { $txnenv == 1 } { - set t [$env txn] - error_check_good txn [is_valid_txn $t $env] TRUE - set txn "-txn $t" - } - set ret [eval {$db put} $txn {$keypfx$ndx $data}] - error_check_good db_put_end $ret 0 - if { $txnenv == 1 } { - error_check_good txn [$t commit] 0 - } - } - - set ret [$db stat -faststat] - if { [is_rbtree $method] != 1 } { - # If this is a recno database, the record count should be up - # to (1.7 x nentries), the largest number we've seen, with - # or without renumbering. - error_check_good recordcount_after_puts2 [is_substr $ret \ - "{{Number of keys} [expr {$start - 1 + $nentries / 10 * 8}]}"] 1 - } else { - # In an rbtree, 1000 of those keys were overwrites, so there - # are (.7 x nentries) new keys and (.9 x nentries) old keys - # for a total of (1.6 x nentries). - error_check_good recordcount_after_puts2 [is_substr $ret \ - "{{Number of keys} [expr {$start -1 + $nentries / 10 * 7}]}"] 1 - } - - error_check_good db_close [$db close] 0 -} diff --git a/storage/bdb/test/test066.tcl b/storage/bdb/test/test066.tcl deleted file mode 100644 index 6f31209071a..00000000000 --- a/storage/bdb/test/test066.tcl +++ /dev/null @@ -1,99 +0,0 @@ -# See the file LICENSE for redistribution information. -# -# Copyright (c) 1999-2004 -# Sleepycat Software. All rights reserved. -# -# $Id: test066.tcl,v 11.15 2004/01/28 03:36:31 bostic Exp $ -# -# TEST test066 -# TEST Test of cursor overwrites of DB_CURRENT w/ duplicates. -# TEST -# TEST Make sure a cursor put to DB_CURRENT acts as an overwrite in a -# TEST database with duplicates. -proc test066 { method args } { - set omethod [convert_method $method] - set args [convert_args $method $args] - - set tnum "066" - - if { [is_record_based $method] || [is_rbtree $method] } { - puts "Test$tnum: Skipping for method $method." - return - } - - puts "Test$tnum: Test of cursor put to DB_CURRENT with duplicates." - - source ./include.tcl - - set txnenv 0 - set eindex [lsearch -exact $args "-env"] - # - # If we are using an env, then testfile should just be the db name. - # Otherwise it is the test directory and the name. - if { $eindex == -1 } { - set testfile $testdir/test066.db - set env NULL - } else { - set testfile test066.db - incr eindex - set env [lindex $args $eindex] - set txnenv [is_txnenv $env] - if { $txnenv == 1 } { - append args " -auto_commit " - } - set testdir [get_home $env] - } - cleanup $testdir $env - - set txn "" - set key "test" - set data "olddata" - - set db [eval {berkdb_open -create -mode 0644 -dup} $omethod $args \ - $testfile] - error_check_good db_open [is_valid_db $db] TRUE - - if { $txnenv == 1 } { - set t [$env txn] - error_check_good txn [is_valid_txn $t $env] TRUE - set txn "-txn $t" - } - set ret [eval {$db put} $txn {$key [chop_data $method $data]}] - error_check_good db_put $ret 0 - if { $txnenv == 1 } { - error_check_good txn [$t commit] 0 - } - - if { $txnenv == 1 } { - set t [$env txn] - error_check_good txn [is_valid_txn $t $env] TRUE - set txn "-txn $t" - } - set dbc [eval {$db cursor} $txn] - error_check_good db_cursor [is_valid_cursor $dbc $db] TRUE - - set ret [$dbc get -first] - error_check_good db_get $ret [list [list $key [pad_data $method $data]]] - - set newdata "newdata" - set ret [$dbc put -current [chop_data $method $newdata]] - error_check_good dbc_put $ret 0 - - # There should be only one (key,data) pair in the database, and this - # is it. - set ret [$dbc get -first] - error_check_good db_get_first $ret \ - [list [list $key [pad_data $method $newdata]]] - - # and this one should come up empty. - set ret [$dbc get -next] - error_check_good db_get_next $ret "" - - error_check_good dbc_close [$dbc close] 0 - if { $txnenv == 1 } { - error_check_good txn [$t commit] 0 - } - error_check_good db_close [$db close] 0 - - puts "\tTest$tnum: Test completed successfully." -} diff --git a/storage/bdb/test/test067.tcl b/storage/bdb/test/test067.tcl deleted file mode 100644 index 710c6b9c201..00000000000 --- a/storage/bdb/test/test067.tcl +++ /dev/null @@ -1,162 +0,0 @@ -# See the file LICENSE for redistribution information. -# -# Copyright (c) 1999-2004 -# Sleepycat Software. All rights reserved. -# -# $Id: test067.tcl,v 11.25 2004/09/22 18:01:06 bostic Exp $ -# -# TEST test067 -# TEST Test of DB_CURRENT partial puts onto almost empty duplicate -# TEST pages, with and without DB_DUP_SORT. -# TEST -# TEST Test of DB_CURRENT partial puts on almost-empty duplicate pages. -# TEST This test was written to address the following issue, #2 in the -# TEST list of issues relating to bug #0820: -# TEST -# TEST 2. DBcursor->put, DB_CURRENT flag, off-page duplicates, hash and btree: -# TEST In Btree, the DB_CURRENT overwrite of off-page duplicate records -# TEST first deletes the record and then puts the new one -- this could -# TEST be a problem if the removal of the record causes a reverse split. -# TEST Suggested solution is to acquire a cursor to lock down the current -# TEST record, put a new record after that record, and then delete using -# TEST the held cursor. -# TEST -# TEST It also tests the following, #5 in the same list of issues: -# TEST 5. DBcursor->put, DB_AFTER/DB_BEFORE/DB_CURRENT flags, DB_DBT_PARTIAL -# TEST set, duplicate comparison routine specified. -# TEST The partial change does not change how data items sort, but the -# TEST record to be put isn't built yet, and that record supplied is the -# TEST one that's checked for ordering compatibility. -proc test067 { method {ndups 1000} {tnum "067"} args } { - source ./include.tcl - global alphabet - global errorCode - global is_je_test - - set args [convert_args $method $args] - set omethod [convert_method $method] - - set txn "" - set txnenv 0 - set eindex [lsearch -exact $args "-env"] - - # If we are using an env, then testfile should just be the db name. - # Otherwise it is the test directory and the name. - if { $eindex == -1 } { - set testfile $testdir/test$tnum.db - set env NULL - } else { - set testfile test$tnum.db - incr eindex - set env [lindex $args $eindex] - set txnenv [is_txnenv $env] - if { $txnenv == 1 } { - append args " -auto_commit " - if { $ndups == 1000 } { - set ndups 100 - } - } - set testdir [get_home $env] - } - - cleanup $testdir $env - if { [is_record_based $method] == 1 || [is_rbtree $method] == 1 } { - puts "\tTest$tnum: skipping for method $method." - return - } - - puts "Test$tnum:\ - $method ($args) Partial puts on near-empty duplicate pages." - - foreach dupopt { "-dup" "-dup -dupsort" } { - if { $is_je_test && $dupopt == "-dup" } { - continue - } - - # - # Testdir might get reset from the env's home dir back - # to the default if this calls something that sources - # include.tcl, since testdir is a global. Set it correctly - # here each time through the loop. - # - if { $env != "NULL" } { - set testdir [get_home $env] - } - cleanup $testdir $env - set db [eval {berkdb_open -create -mode 0644 \ - $omethod} $args $dupopt {$testfile}] - error_check_good db_open [is_valid_db $db] TRUE - - puts "\tTest$tnum.a ($dupopt): Put $ndups duplicates." - - set key "key_test$tnum" - - for { set ndx 0 } { $ndx < $ndups } { incr ndx } { - set data $alphabet$ndx - - if { $txnenv == 1 } { - set t [$env txn] - error_check_good txn [is_valid_txn $t $env] TRUE - set txn "-txn $t" - } - # No need for pad_data since we're skipping recno. - set ret [eval {$db put} $txn {$key $data}] - error_check_good put($key,$data) $ret 0 - if { $txnenv == 1 } { - error_check_good txn [$t commit] 0 - } - } - - # Sync so we can inspect database if the next section bombs. - error_check_good db_sync [$db sync] 0 - puts "\tTest$tnum.b ($dupopt):\ - Deleting dups (last first), overwriting each." - - if { $txnenv == 1 } { - set t [$env txn] - error_check_good txn [is_valid_txn $t $env] TRUE - set txn "-txn $t" - } - set dbc [eval {$db cursor} $txn] - error_check_good cursor_create [is_valid_cursor $dbc $db] TRUE - - set count 0 - while { $count < $ndups - 1 } { - # set cursor to last item in db - set ret [$dbc get -last] - error_check_good \ - verify_key [lindex [lindex $ret 0] 0] $key - - # for error reporting - set currdatum [lindex [lindex $ret 0] 1] - - # partial-overwrite it - # (overwrite offsets 1-4 with "bcde"--which they - # already are) - - # Even though we expect success, we catch this - # since it might return EINVAL, and we want that - # to FAIL. - set errorCode NONE - set ret [catch {eval $dbc put -current \ - {-partial [list 1 4]} "bcde"} \ - res] - error_check_good \ - partial_put_valid($currdatum) $errorCode NONE - error_check_good partial_put($currdatum) $res 0 - - # delete it - error_check_good dbc_del [$dbc del] 0 - - #puts $currdatum - - incr count - } - - error_check_good dbc_close [$dbc close] 0 - if { $txnenv == 1 } { - error_check_good txn [$t commit] 0 - } - error_check_good db_close [$db close] 0 - } -} diff --git a/storage/bdb/test/test068.tcl b/storage/bdb/test/test068.tcl deleted file mode 100644 index a4fb56da091..00000000000 --- a/storage/bdb/test/test068.tcl +++ /dev/null @@ -1,231 +0,0 @@ -# See the file LICENSE for redistribution information. -# -# Copyright (c) 1999-2004 -# Sleepycat Software. All rights reserved. -# -# $Id: test068.tcl,v 11.21 2004/05/13 18:51:43 mjc Exp $ -# -# TEST test068 -# TEST Test of DB_BEFORE and DB_AFTER with partial puts. -# TEST Make sure DB_BEFORE and DB_AFTER work properly with partial puts, and -# TEST check that they return EINVAL if DB_DUPSORT is set or if DB_DUP is not. -proc test068 { method args } { - source ./include.tcl - global alphabet - global errorCode - global is_je_test - - set tnum "068" - - set args [convert_args $method $args] - set omethod [convert_method $method] - - set txnenv 0 - set eindex [lsearch -exact $args "-env"] - # - # If we are using an env, then testfile should just be the db name. - # Otherwise it is the test directory and the name. - set nkeys 1000 - if { $eindex == -1 } { - set testfile $testdir/test$tnum.db - set env NULL - } else { - set testfile test$tnum.db - incr eindex - set env [lindex $args $eindex] - set txnenv [is_txnenv $env] - if { $txnenv == 1 } { - append args " -auto_commit " - set nkeys 100 - } - set testdir [get_home $env] - } - - puts "Test$tnum:\ - $method ($args) Test of DB_BEFORE/DB_AFTER and partial puts." - if { [is_record_based $method] == 1 } { - puts "\tTest$tnum: skipping for method $method." - return - } - - # Create a list of $nkeys words to insert into db. - puts "\tTest$tnum.a: Initialize word list." - set txn "" - set wordlist {} - set count 0 - set did [open $dict] - while { [gets $did str] != -1 && $count < $nkeys } { - lappend wordlist $str - incr count - } - close $did - - # Sanity check: did we get $nkeys words? - error_check_good enough_keys [llength $wordlist] $nkeys - - # rbtree can't handle dups, so just test the non-dup case - # if it's the current method. - if { [is_rbtree $method] == 1 } { - set dupoptlist { "" } - } else { - set dupoptlist { "" "-dup" "-dup -dupsort" } - } - - foreach dupopt $dupoptlist { - if { $is_je_test && $dupopt == "-dup" } { - continue - } - - # - # Testdir might be reset in the loop by some proc sourcing - # include.tcl. Reset it to the env's home here, before - # cleanup. - if { $env != "NULL" } { - set testdir [get_home $env] - } - cleanup $testdir $env - set db [eval {berkdb_open_noerr -create -mode 0644 \ - $omethod} $args $dupopt {$testfile}] - error_check_good db_open [is_valid_db $db] TRUE - - puts "\tTest$tnum.b ($dupopt): DB initialization: put loop." - foreach word $wordlist { - if { $txnenv == 1 } { - set t [$env txn] - error_check_good txn [is_valid_txn $t $env] TRUE - set txn "-txn $t" - } - set ret [eval {$db put} $txn {$word $word}] - error_check_good db_put $ret 0 - if { $txnenv == 1 } { - error_check_good txn [$t commit] 0 - } - } - - puts "\tTest$tnum.c ($dupopt): get loop." - foreach word $wordlist { - # Make sure that the Nth word has been correctly - # inserted, and also that the Nth word is the - # Nth one we pull out of the database using a cursor. - - set dbt [$db get $word] - error_check_good get_key [list [list $word $word]] $dbt - } - - if { $txnenv == 1 } { - set t [$env txn] - error_check_good txn [is_valid_txn $t $env] TRUE - set txn "-txn $t" - } - set dbc [eval {$db cursor} $txn] - error_check_good cursor_open [is_valid_cursor $dbc $db] TRUE - - puts "\tTest$tnum.d ($dupopt): DBC->put w/ DB_AFTER." - - # Set cursor to the first key; make sure it succeeds. - # With an unsorted wordlist, we can't be sure that the - # first item returned will equal the first item in the - # wordlist, so we just make sure it got something back. - set dbt [eval {$dbc get -first}] - error_check_good \ - dbc_get_first [llength $dbt] 1 - - # If -dup is not set, or if -dupsort is set too, we - # need to verify that DB_BEFORE and DB_AFTER fail - # and then move on to the next $dupopt. - if { $dupopt != "-dup" } { - set errorCode "NONE" - set ret [catch {eval $dbc put -after \ - {-partial [list 6 0]} "after"} res] - error_check_good dbc_put_after_fail $ret 1 - error_check_good dbc_put_after_einval \ - [is_substr $errorCode EINVAL] 1 - puts "\tTest$tnum ($dupopt): DB_AFTER returns EINVAL." - set errorCode "NONE" - set ret [catch {eval $dbc put -before \ - {-partial [list 6 0]} "before"} res] - error_check_good dbc_put_before_fail $ret 1 - error_check_good dbc_put_before_einval \ - [is_substr $errorCode EINVAL] 1 - puts "\tTest$tnum ($dupopt): DB_BEFORE returns EINVAL." - puts "\tTest$tnum ($dupopt): Correct error returns,\ - skipping further test." - # continue with broad foreach - error_check_good dbc_close [$dbc close] 0 - if { $txnenv == 1 } { - error_check_good txn [$t commit] 0 - } - error_check_good db_close [$db close] 0 - continue - } - - puts "\tTest$tnum.e ($dupopt): DBC->put(DB_AFTER) loop." - foreach word $wordlist { - # set cursor to $word - set dbt [$dbc get -set $word] - error_check_good \ - dbc_get_set $dbt [list [list $word $word]] - # put after it - set ret [$dbc put -after -partial {4 0} after] - error_check_good dbc_put_after $ret 0 - } - - puts "\tTest$tnum.f ($dupopt): DBC->put(DB_BEFORE) loop." - foreach word $wordlist { - # set cursor to $word - set dbt [$dbc get -set $word] - error_check_good \ - dbc_get_set $dbt [list [list $word $word]] - # put before it - set ret [$dbc put -before -partial {6 0} before] - error_check_good dbc_put_before $ret 0 - } - - error_check_good dbc_close [$dbc close] 0 - if { $txnenv == 1 } { - error_check_good txn [$t commit] 0 - } - - eval $db sync - puts "\tTest$tnum.g ($dupopt): Verify correctness." - - if { $txnenv == 1 } { - set t [$env txn] - error_check_good txn [is_valid_txn $t $env] TRUE - set txn "-txn $t" - } - set dbc [eval {$db cursor} $txn] - error_check_good db_cursor [is_valid_cursor $dbc $db] TRUE - - # loop through the whole db beginning to end, - # make sure we have, in order, {$word "\0\0\0\0\0\0before"}, - # {$word $word}, {$word "\0\0\0\0after"} for each word. - set count 0 - while { $count < $nkeys } { - # Get the first item of each set of three. - # We don't know what the word is, but set $word to - # the key and check that the data is - # "\0\0\0\0\0\0before". - set dbt [$dbc get -next] - set word [lindex [lindex $dbt 0] 0] - - error_check_good dbc_get_one $dbt \ - [list [list $word "\0\0\0\0\0\0before"]] - - set dbt [$dbc get -next] - error_check_good \ - dbc_get_two $dbt [list [list $word $word]] - - set dbt [$dbc get -next] - error_check_good dbc_get_three $dbt \ - [list [list $word "\0\0\0\0after"]] - - incr count - } - error_check_good dbc_close [$dbc close] 0 - if { $txnenv == 1 } { - error_check_good txn [$t commit] 0 - } - error_check_good db_close [$db close] 0 - } -} diff --git a/storage/bdb/test/test069.tcl b/storage/bdb/test/test069.tcl deleted file mode 100644 index 46104ffa28b..00000000000 --- a/storage/bdb/test/test069.tcl +++ /dev/null @@ -1,14 +0,0 @@ -# See the file LICENSE for redistribution information. -# -# Copyright (c) 1999-2004 -# Sleepycat Software. All rights reserved. -# -# $Id: test069.tcl,v 11.10 2004/01/28 03:36:31 bostic Exp $ -# -# TEST test069 -# TEST Test of DB_CURRENT partial puts without duplicates-- test067 w/ -# TEST small ndups to ensure that partial puts to DB_CURRENT work -# TEST correctly in the absence of duplicate pages. -proc test069 { method {ndups 50} {tnum "069"} args } { - eval test067 $method $ndups $tnum $args -} diff --git a/storage/bdb/test/test070.tcl b/storage/bdb/test/test070.tcl deleted file mode 100644 index 9d124a77b64..00000000000 --- a/storage/bdb/test/test070.tcl +++ /dev/null @@ -1,136 +0,0 @@ -# See the file LICENSE for redistribution information. -# -# Copyright (c) 1999-2004 -# Sleepycat Software. All rights reserved. -# -# $Id: test070.tcl,v 11.33 2004/02/17 16:29:07 dda Exp $ -# -# TEST test070 -# TEST Test of DB_CONSUME (Four consumers, 1000 items.) -# TEST -# TEST Fork off six processes, four consumers and two producers. -# TEST The producers will each put 20000 records into a queue; -# TEST the consumers will each get 10000. -# TEST Then, verify that no record was lost or retrieved twice. -proc test070 { method {nconsumers 4} {nproducers 2} \ - {nitems 1000} {mode CONSUME } {start 0} {txn -txn} {tnum "070"} args } { - source ./include.tcl - global alphabet - global encrypt - - # - # If we are using an env, then skip this test. It needs its own. - set eindex [lsearch -exact $args "-env"] - if { $eindex != -1 } { - incr eindex - set env [lindex $args $eindex] - puts "Test$tnum skipping for env $env" - return - } - set omethod [convert_method $method] - set args [convert_args $method $args] - if { $encrypt != 0 } { - puts "Test$tnum skipping for security" - return - } - - puts "Test$tnum: $method ($args) Test of DB_$mode flag to DB->get." - puts "\tUsing $txn environment." - - error_check_good enough_consumers [expr $nconsumers > 0] 1 - error_check_good enough_producers [expr $nproducers > 0] 1 - - if { [is_queue $method] != 1 } { - puts "\tSkipping Test$tnum for method $method." - return - } - - env_cleanup $testdir - set testfile test$tnum.db - - # Create environment - set dbenv [eval {berkdb_env -create $txn -home } $testdir] - error_check_good dbenv_create [is_valid_env $dbenv] TRUE - - # Create database - set db [eval {berkdb_open -create -mode 0644 -queue}\ - -env $dbenv $args $testfile] - error_check_good db_open [is_valid_db $db] TRUE - - if { $start != 0 } { - error_check_good set_seed [$db put $start "consumer data"] 0 - puts "\tTest$tnum: starting at $start." - } else { - incr start - } - - set pidlist {} - - # Divvy up the total number of records amongst the consumers and - # producers. - error_check_good cons_div_evenly [expr $nitems % $nconsumers] 0 - error_check_good prod_div_evenly [expr $nitems % $nproducers] 0 - set nperconsumer [expr $nitems / $nconsumers] - set nperproducer [expr $nitems / $nproducers] - - set consumerlog $testdir/CONSUMERLOG. - - # Fork consumer processes (we want them to be hungry) - for { set ndx 0 } { $ndx < $nconsumers } { incr ndx } { - set output $consumerlog$ndx - set p [exec $tclsh_path $test_path/wrap.tcl \ - conscript.tcl $testdir/conscript.log.consumer$ndx \ - $testdir $testfile $mode $nperconsumer $output $tnum \ - $args &] - lappend pidlist $p - } - for { set ndx 0 } { $ndx < $nproducers } { incr ndx } { - set p [exec $tclsh_path $test_path/wrap.tcl \ - conscript.tcl $testdir/conscript.log.producer$ndx \ - $testdir $testfile PRODUCE $nperproducer "" $tnum \ - $args &] - lappend pidlist $p - } - - # Wait for all children. - watch_procs $pidlist 10 - - # Verify: slurp all record numbers into list, sort, and make - # sure each appears exactly once. - puts "\tTest$tnum: Verifying results." - set reclist {} - for { set ndx 0 } { $ndx < $nconsumers } { incr ndx } { - set input $consumerlog$ndx - set iid [open $input r] - while { [gets $iid str] != -1 } { - lappend reclist $str - } - close $iid - } - set sortreclist [lsort -command int32_compare $reclist] - - set nitems [expr $start + $nitems] - for { set ndx $start } { $ndx < $nitems } { set ndx [expr $ndx + 1] } { - # Wrap if $ndx goes beyond 32 bits because our - # recno wrapped if it did. - if { $ndx > 0xffffffff } { - set cmp [expr $ndx - 0xffffffff] - } else { - set cmp [expr $ndx + 0] - } - # Skip 0 if we are wrapping around - if { $cmp == 0 } { - incr ndx - incr nitems - incr cmp - } - # Be sure to convert ndx to a number before comparing. - error_check_good pop_num [lindex $sortreclist 0] $cmp - set sortreclist [lreplace $sortreclist 0 0] - } - error_check_good list_ends_empty $sortreclist {} - error_check_good db_close [$db close] 0 - error_check_good dbenv_close [$dbenv close] 0 - - puts "\tTest$tnum completed successfully." -} diff --git a/storage/bdb/test/test071.tcl b/storage/bdb/test/test071.tcl deleted file mode 100644 index 7834c82ad4d..00000000000 --- a/storage/bdb/test/test071.tcl +++ /dev/null @@ -1,16 +0,0 @@ -# See the file LICENSE for redistribution information. -# -# Copyright (c) 1999-2004 -# Sleepycat Software. All rights reserved. -# -# $Id: test071.tcl,v 11.14 2004/01/28 03:36:31 bostic Exp $ -# -# TEST test071 -# TEST Test of DB_CONSUME (One consumer, 10000 items.) -# TEST This is DB Test 70, with one consumer, one producers, and 10000 items. -proc test071 { method {nconsumers 1} {nproducers 1} {nitems 10000} \ - {mode CONSUME} {start 0 } {txn -txn} {tnum "071"} args } { - - eval test070 $method \ - $nconsumers $nproducers $nitems $mode $start $txn $tnum $args -} diff --git a/storage/bdb/test/test072.tcl b/storage/bdb/test/test072.tcl deleted file mode 100644 index 3a8ff7aa28e..00000000000 --- a/storage/bdb/test/test072.tcl +++ /dev/null @@ -1,257 +0,0 @@ -# See the file LICENSE for redistribution information. -# -# Copyright (c) 1999-2004 -# Sleepycat Software. All rights reserved. -# -# $Id: test072.tcl,v 11.34 2004/05/13 18:51:44 mjc Exp $ -# -# TEST test072 -# TEST Test of cursor stability when duplicates are moved off-page. -proc test072 { method {pagesize 512} {ndups 20} {tnum "072"} args } { - source ./include.tcl - global alphabet - global is_je_test - - set omethod [convert_method $method] - set args [convert_args $method $args] - - set txnenv 0 - set eindex [lsearch -exact $args "-env"] - # - # If we are using an env, then testfile name should just be - # the db name. Otherwise it is the test directory and the name. - if { $eindex == -1 } { - set basename $testdir/test$tnum - set env NULL - } else { - set basename test$tnum - incr eindex - set env [lindex $args $eindex] - set txnenv [is_txnenv $env] - if { $txnenv == 1 } { - append args " -auto_commit " - } - set testdir [get_home $env] - } - cleanup $testdir $env - - # Keys must sort $prekey < $key < $postkey. - set prekey "a key" - set key "the key" - set postkey "z key" - - # Make these distinguishable from each other and from the - # alphabets used for the $key's data. - set predatum "1234567890" - set postdatum "0987654321" - - puts -nonewline "Test$tnum $omethod ($args): " - if { [is_record_based $method] || [is_rbtree $method] } { - puts "Skipping for method $method." - return - } else { - puts "\nTest$tnum: Test of cursor stability when\ - duplicates are moved off-page." - } - set pgindex [lsearch -exact $args "-pagesize"] - if { $pgindex != -1 } { - puts "Test$tnum: skipping for specific pagesizes" - return - } - - append args " -pagesize $pagesize " - set txn "" - - set dlist [list "-dup" "-dup -dupsort"] - set testid 0 - foreach dupopt $dlist { - if { $is_je_test && $dupopt == "-dup" } { - continue - } - - incr testid - set duptestfile $basename$testid.db - set db [eval {berkdb_open -create -mode 0644} \ - $omethod $args $dupopt {$duptestfile}] - error_check_good "db open" [is_valid_db $db] TRUE - - puts \ -"\tTest$tnum.a: ($dupopt) Set up surrounding keys and cursors." - if { $txnenv == 1 } { - set t [$env txn] - error_check_good txn [is_valid_txn $t $env] TRUE - set txn "-txn $t" - } - set ret [eval {$db put} $txn {$prekey $predatum}] - error_check_good pre_put $ret 0 - set ret [eval {$db put} $txn {$postkey $postdatum}] - error_check_good post_put $ret 0 - - set precursor [eval {$db cursor} $txn] - error_check_good precursor [is_valid_cursor $precursor \ - $db] TRUE - set postcursor [eval {$db cursor} $txn] - error_check_good postcursor [is_valid_cursor $postcursor \ - $db] TRUE - error_check_good preset [$precursor get -set $prekey] \ - [list [list $prekey $predatum]] - error_check_good postset [$postcursor get -set $postkey] \ - [list [list $postkey $postdatum]] - - puts "\tTest$tnum.b: Put/create cursor/verify all cursor loop." - - for { set i 0 } { $i < $ndups } { incr i } { - set datum [format "%4d$alphabet" [expr $i + 1000]] - set data($i) $datum - - # Uncomment these lines to see intermediate steps. - # error_check_good db_sync($i) [$db sync] 0 - # error_check_good db_dump($i) \ - # [catch {exec $util_path/db_dump \ - # -da $duptestfile > $testdir/out.$i}] 0 - - set ret [eval {$db put} $txn {$key $datum}] - error_check_good "db put ($i)" $ret 0 - - set dbc($i) [eval {$db cursor} $txn] - error_check_good "db cursor ($i)"\ - [is_valid_cursor $dbc($i) $db] TRUE - - error_check_good "dbc get -get_both ($i)"\ - [$dbc($i) get -get_both $key $datum]\ - [list [list $key $datum]] - - for { set j 0 } { $j < $i } { incr j } { - set dbt [$dbc($j) get -current] - set k [lindex [lindex $dbt 0] 0] - set d [lindex [lindex $dbt 0] 1] - - #puts "cursor $j after $i: $d" - - eval {$db sync} - - error_check_good\ - "cursor $j key correctness after $i puts" \ - $k $key - error_check_good\ - "cursor $j data correctness after $i puts" \ - $d $data($j) - } - - # Check correctness of pre- and post- cursors. Do an - # error_check_good on the lengths first so that we don't - # spew garbage as the "got" field and screw up our - # terminal. (It's happened here.) - set pre_dbt [$precursor get -current] - set post_dbt [$postcursor get -current] - error_check_good \ - "key earlier cursor correctness after $i puts" \ - [string length [lindex [lindex $pre_dbt 0] 0]] \ - [string length $prekey] - error_check_good \ - "data earlier cursor correctness after $i puts" \ - [string length [lindex [lindex $pre_dbt 0] 1]] \ - [string length $predatum] - error_check_good \ - "key later cursor correctness after $i puts" \ - [string length [lindex [lindex $post_dbt 0] 0]] \ - [string length $postkey] - error_check_good \ - "data later cursor correctness after $i puts" \ - [string length [lindex [lindex $post_dbt 0] 1]]\ - [string length $postdatum] - - error_check_good \ - "earlier cursor correctness after $i puts" \ - $pre_dbt [list [list $prekey $predatum]] - error_check_good \ - "later cursor correctness after $i puts" \ - $post_dbt [list [list $postkey $postdatum]] - } - - puts "\tTest$tnum.c: Reverse Put/create cursor/verify all cursor loop." - set end [expr $ndups * 2 - 1] - for { set i $end } { $i >= $ndups } { set i [expr $i - 1] } { - set datum [format "%4d$alphabet" [expr $i + 1000]] - set data($i) $datum - - # Uncomment these lines to see intermediate steps. - # error_check_good db_sync($i) [$db sync] 0 - # error_check_good db_dump($i) \ - # [catch {exec $util_path/db_dump \ - # -da $duptestfile > $testdir/out.$i}] 0 - - set ret [eval {$db put} $txn {$key $datum}] - error_check_good "db put ($i)" $ret 0 - - error_check_bad dbc($i)_stomped [info exists dbc($i)] 1 - set dbc($i) [eval {$db cursor} $txn] - error_check_good "db cursor ($i)"\ - [is_valid_cursor $dbc($i) $db] TRUE - - error_check_good "dbc get -get_both ($i)"\ - [$dbc($i) get -get_both $key $datum]\ - [list [list $key $datum]] - - for { set j $i } { $j < $end } { incr j } { - set dbt [$dbc($j) get -current] - set k [lindex [lindex $dbt 0] 0] - set d [lindex [lindex $dbt 0] 1] - - #puts "cursor $j after $i: $d" - - eval {$db sync} - - error_check_good\ - "cursor $j key correctness after $i puts" \ - $k $key - error_check_good\ - "cursor $j data correctness after $i puts" \ - $d $data($j) - } - - # Check correctness of pre- and post- cursors. Do an - # error_check_good on the lengths first so that we don't - # spew garbage as the "got" field and screw up our - # terminal. (It's happened here.) - set pre_dbt [$precursor get -current] - set post_dbt [$postcursor get -current] - error_check_good \ - "key earlier cursor correctness after $i puts" \ - [string length [lindex [lindex $pre_dbt 0] 0]] \ - [string length $prekey] - error_check_good \ - "data earlier cursor correctness after $i puts" \ - [string length [lindex [lindex $pre_dbt 0] 1]] \ - [string length $predatum] - error_check_good \ - "key later cursor correctness after $i puts" \ - [string length [lindex [lindex $post_dbt 0] 0]] \ - [string length $postkey] - error_check_good \ - "data later cursor correctness after $i puts" \ - [string length [lindex [lindex $post_dbt 0] 1]]\ - [string length $postdatum] - - error_check_good \ - "earlier cursor correctness after $i puts" \ - $pre_dbt [list [list $prekey $predatum]] - error_check_good \ - "later cursor correctness after $i puts" \ - $post_dbt [list [list $postkey $postdatum]] - } - - # Close cursors. - puts "\tTest$tnum.d: Closing cursors." - for { set i 0 } { $i <= $end } { incr i } { - error_check_good "dbc close ($i)" [$dbc($i) close] 0 - } - unset dbc - error_check_good precursor_close [$precursor close] 0 - error_check_good postcursor_close [$postcursor close] 0 - if { $txnenv == 1 } { - error_check_good txn [$t commit] 0 - } - error_check_good "db close" [$db close] 0 - } -} diff --git a/storage/bdb/test/test073.tcl b/storage/bdb/test/test073.tcl deleted file mode 100644 index bac753ea6c5..00000000000 --- a/storage/bdb/test/test073.tcl +++ /dev/null @@ -1,290 +0,0 @@ -# See the file LICENSE for redistribution information. -# -# Copyright (c) 1999-2004 -# Sleepycat Software. All rights reserved. -# -# $Id: test073.tcl,v 11.26 2004/01/28 03:36:31 bostic Exp $ -# -# TEST test073 -# TEST Test of cursor stability on duplicate pages. -# TEST -# TEST Does the following: -# TEST a. Initialize things by DB->putting ndups dups and -# TEST setting a reference cursor to point to each. -# TEST b. c_put ndups dups (and correspondingly expanding -# TEST the set of reference cursors) after the last one, making sure -# TEST after each step that all the reference cursors still point to -# TEST the right item. -# TEST c. Ditto, but before the first one. -# TEST d. Ditto, but after each one in sequence first to last. -# TEST e. Ditto, but after each one in sequence from last to first. -# TEST occur relative to the new datum) -# TEST f. Ditto for the two sequence tests, only doing a -# TEST DBC->c_put(DB_CURRENT) of a larger datum instead of adding a -# TEST new one. -proc test073 { method {pagesize 512} {ndups 50} {tnum "073"} args } { - source ./include.tcl - global alphabet - - set omethod [convert_method $method] - set args [convert_args $method $args] - - set txnenv 0 - set eindex [lsearch -exact $args "-env"] - # - # If we are using an env, then testfile should just be the db name. - # Otherwise it is the test directory and the name. - if { $eindex == -1 } { - set testfile $testdir/test$tnum.db - set env NULL - } else { - set testfile test$tnum.db - incr eindex - set env [lindex $args $eindex] - set txnenv [is_txnenv $env] - if { $txnenv == 1 } { - append args " -auto_commit " - } - set testdir [get_home $env] - } - cleanup $testdir $env - - set key "the key" - set txn "" - - puts -nonewline "Test$tnum $omethod ($args): " - if { [is_record_based $method] || [is_rbtree $method] } { - puts "Skipping for method $method." - return - } else { - puts "cursor stability on duplicate pages." - } - set pgindex [lsearch -exact $args "-pagesize"] - if { $pgindex != -1 } { - puts "Test073: skipping for specific pagesizes" - return - } - - append args " -pagesize $pagesize -dup" - - set db [eval {berkdb_open \ - -create -mode 0644} $omethod $args $testfile] - error_check_good "db open" [is_valid_db $db] TRUE - - # Number of outstanding keys. - set keys 0 - - puts "\tTest$tnum.a.1: Initializing put loop; $ndups dups, short data." - - for { set i 0 } { $i < $ndups } { incr i } { - set datum [makedatum_t73 $i 0] - - if { $txnenv == 1 } { - set t [$env txn] - error_check_good txn [is_valid_txn $t $env] TRUE - set txn "-txn $t" - } - set ret [eval {$db put} $txn {$key $datum}] - error_check_good "db put ($i)" $ret 0 - if { $txnenv == 1 } { - error_check_good txn [$t commit] 0 - } - - set is_long($i) 0 - incr keys - } - - puts "\tTest$tnum.a.2: Initializing cursor get loop; $keys dups." - if { $txnenv == 1 } { - set t [$env txn] - error_check_good txn [is_valid_txn $t $env] TRUE - set txn "-txn $t" - } - for { set i 0 } { $i < $keys } { incr i } { - set datum [makedatum_t73 $i 0] - - set dbc($i) [eval {$db cursor} $txn] - error_check_good "db cursor ($i)"\ - [is_valid_cursor $dbc($i) $db] TRUE - error_check_good "dbc get -get_both ($i)"\ - [$dbc($i) get -get_both $key $datum]\ - [list [list $key $datum]] - } - - puts "\tTest$tnum.b: Cursor put (DB_KEYLAST); $ndups new dups,\ - short data." - - for { set i 0 } { $i < $ndups } { incr i } { - # !!! keys contains the number of the next dup - # to be added (since they start from zero) - - set datum [makedatum_t73 $keys 0] - set curs [eval {$db cursor} $txn] - error_check_good "db cursor create" [is_valid_cursor $curs $db]\ - TRUE - error_check_good "c_put(DB_KEYLAST, $keys)"\ - [$curs put -keylast $key $datum] 0 - - set dbc($keys) $curs - set is_long($keys) 0 - incr keys - - verify_t73 is_long dbc $keys $key - } - - puts "\tTest$tnum.c: Cursor put (DB_KEYFIRST); $ndups new dups,\ - short data." - - for { set i 0 } { $i < $ndups } { incr i } { - # !!! keys contains the number of the next dup - # to be added (since they start from zero) - - set datum [makedatum_t73 $keys 0] - set curs [eval {$db cursor} $txn] - error_check_good "db cursor create" [is_valid_cursor $curs $db]\ - TRUE - error_check_good "c_put(DB_KEYFIRST, $keys)"\ - [$curs put -keyfirst $key $datum] 0 - - set dbc($keys) $curs - set is_long($keys) 0 - incr keys - - verify_t73 is_long dbc $keys $key - } - - puts "\tTest$tnum.d: Cursor put (DB_AFTER) first to last;\ - $keys new dups, short data" - # We want to add a datum after each key from 0 to the current - # value of $keys, which we thus need to save. - set keysnow $keys - for { set i 0 } { $i < $keysnow } { incr i } { - set datum [makedatum_t73 $keys 0] - set curs [eval {$db cursor} $txn] - error_check_good "db cursor create" [is_valid_cursor $curs $db]\ - TRUE - - # Which datum to insert this guy after. - set curdatum [makedatum_t73 $i 0] - error_check_good "c_get(DB_GET_BOTH, $i)"\ - [$curs get -get_both $key $curdatum]\ - [list [list $key $curdatum]] - error_check_good "c_put(DB_AFTER, $i)"\ - [$curs put -after $datum] 0 - - set dbc($keys) $curs - set is_long($keys) 0 - incr keys - - verify_t73 is_long dbc $keys $key - } - - puts "\tTest$tnum.e: Cursor put (DB_BEFORE) last to first;\ - $keys new dups, short data" - - for { set i [expr $keys - 1] } { $i >= 0 } { incr i -1 } { - set datum [makedatum_t73 $keys 0] - set curs [eval {$db cursor} $txn] - error_check_good "db cursor create" [is_valid_cursor $curs $db]\ - TRUE - - # Which datum to insert this guy before. - set curdatum [makedatum_t73 $i 0] - error_check_good "c_get(DB_GET_BOTH, $i)"\ - [$curs get -get_both $key $curdatum]\ - [list [list $key $curdatum]] - error_check_good "c_put(DB_BEFORE, $i)"\ - [$curs put -before $datum] 0 - - set dbc($keys) $curs - set is_long($keys) 0 - incr keys - - if { $i % 10 == 1 } { - verify_t73 is_long dbc $keys $key - } - } - verify_t73 is_long dbc $keys $key - - puts "\tTest$tnum.f: Cursor put (DB_CURRENT), first to last,\ - growing $keys data." - set keysnow $keys - for { set i 0 } { $i < $keysnow } { incr i } { - set olddatum [makedatum_t73 $i 0] - set newdatum [makedatum_t73 $i 1] - set curs [eval {$db cursor} $txn] - error_check_good "db cursor create" [is_valid_cursor $curs $db]\ - TRUE - - error_check_good "c_get(DB_GET_BOTH, $i)"\ - [$curs get -get_both $key $olddatum]\ - [list [list $key $olddatum]] - error_check_good "c_put(DB_CURRENT, $i)"\ - [$curs put -current $newdatum] 0 - - error_check_good "cursor close" [$curs close] 0 - - set is_long($i) 1 - - if { $i % 10 == 1 } { - verify_t73 is_long dbc $keys $key - } - } - verify_t73 is_long dbc $keys $key - - # Close cursors. - puts "\tTest$tnum.g: Closing cursors." - for { set i 0 } { $i < $keys } { incr i } { - error_check_good "dbc close ($i)" [$dbc($i) close] 0 - } - if { $txnenv == 1 } { - error_check_good txn [$t commit] 0 - } - error_check_good "db close" [$db close] 0 -} - -# !!!: This procedure is also used by test087. -proc makedatum_t73 { num is_long } { - global alphabet - if { $is_long == 1 } { - set a $alphabet$alphabet$alphabet - } else { - set a abcdefghijklm - } - - # format won't do leading zeros, alas. - if { $num / 1000 > 0 } { - set i $num - } elseif { $num / 100 > 0 } { - set i 0$num - } elseif { $num / 10 > 0 } { - set i 00$num - } else { - set i 000$num - } - - return $i$a -} - -# !!!: This procedure is also used by test087. -proc verify_t73 { is_long_array curs_array numkeys key } { - upvar $is_long_array is_long - upvar $curs_array dbc - upvar db db - - #useful for debugging, perhaps. - eval $db sync - - for { set j 0 } { $j < $numkeys } { incr j } { - set dbt [$dbc($j) get -current] - set k [lindex [lindex $dbt 0] 0] - set d [lindex [lindex $dbt 0] 1] - - error_check_good\ - "cursor $j key correctness (with $numkeys total items)"\ - $k $key - error_check_good\ - "cursor $j data correctness (with $numkeys total items)"\ - $d [makedatum_t73 $j $is_long($j)] - } -} diff --git a/storage/bdb/test/test074.tcl b/storage/bdb/test/test074.tcl deleted file mode 100644 index 8302a230221..00000000000 --- a/storage/bdb/test/test074.tcl +++ /dev/null @@ -1,275 +0,0 @@ -# See the file LICENSE for redistribution information. -# -# Copyright (c) 1999-2004 -# Sleepycat Software. All rights reserved. -# -# $Id: test074.tcl,v 11.22 2004/09/22 18:01:06 bostic Exp $ -# -# TEST test074 -# TEST Test of DB_NEXT_NODUP. -proc test074 { method {dir -nextnodup} {nitems 100} {tnum "074"} args } { - source ./include.tcl - global alphabet - global is_je_test - global rand_init - - set omethod [convert_method $method] - set args [convert_args $method $args] - - berkdb srand $rand_init - - # Data prefix--big enough that we get a mix of on-page, off-page, - # and multi-off-page dups with the default nitems - if { [is_fixed_length $method] == 1 } { - set globaldata "somedata" - } else { - set globaldata [repeat $alphabet 4] - } - - puts "Test$tnum $omethod ($args): Test of $dir" - - # First, test non-dup (and not-very-interesting) case with - # all db types. - - puts "\tTest$tnum.a: No duplicates." - - set txnenv 0 - set eindex [lsearch -exact $args "-env"] - # - # If we are using an env, then testfile should just be the db name. - # Otherwise it is the test directory and the name. - if { $eindex == -1 } { - set testfile $testdir/test$tnum-nodup.db - set env NULL - } else { - set testfile test$tnum-nodup.db - incr eindex - set env [lindex $args $eindex] - set txnenv [is_txnenv $env] - if { $txnenv == 1 } { - append args " -auto_commit " - } - set testdir [get_home $env] - } - cleanup $testdir $env - set db [eval {berkdb_open -create -mode 0644} $omethod\ - $args {$testfile}] - error_check_good db_open [is_valid_db $db] TRUE - set txn "" - - # Insert nitems items. - puts "\t\tTest$tnum.a.1: Put loop." - for {set i 1} {$i <= $nitems} {incr i} { - # - # If record based, set key to $i * 2 to leave - # holes/unused entries for further testing. - # - if {[is_record_based $method] == 1} { - set key [expr $i * 2] - } else { - set key "key$i" - } - set data "$globaldata$i" - if { $txnenv == 1 } { - set t [$env txn] - error_check_good txn [is_valid_txn $t $env] TRUE - set txn "-txn $t" - } - set ret [eval {$db put} $txn {$key \ - [chop_data $method $data]}] - error_check_good put($i) $ret 0 - if { $txnenv == 1 } { - error_check_good txn [$t commit] 0 - } - } - - puts "\t\tTest$tnum.a.2: Get($dir)" - - # foundarray($i) is set when key number i is found in the database - if { $txnenv == 1 } { - set t [$env txn] - error_check_good txn [is_valid_txn $t $env] TRUE - set txn "-txn $t" - } - set dbc [eval {$db cursor} $txn] - error_check_good db_cursor [is_valid_cursor $dbc $db] TRUE - - # Initialize foundarray($i) to zero for all $i - for {set i 1} {$i < $nitems} {incr i} { - set foundarray($i) 0 - } - - # Walk database using $dir and record each key gotten. - for {set i 1} {$i <= $nitems} {incr i} { - set dbt [$dbc get $dir] - set key [lindex [lindex $dbt 0] 0] - if {[is_record_based $method] == 1} { - set num [expr $key / 2] - set desired_key $key - error_check_good $method:num $key [expr $num * 2] - } else { - set num [string range $key 3 end] - set desired_key key$num - } - - error_check_good dbt_correct($i) $dbt\ - [list [list $desired_key\ - [pad_data $method $globaldata$num]]] - - set foundarray($num) 1 - } - - puts "\t\tTest$tnum.a.3: Final key." - error_check_good last_db_get [$dbc get $dir] [list] - - puts "\t\tTest$tnum.a.4: Verify loop." - for { set i 1 } { $i <= $nitems } { incr i } { - error_check_good found_key($i) $foundarray($i) 1 - } - - error_check_good dbc_close(nodup) [$dbc close] 0 - if { $txnenv == 1 } { - error_check_good txn [$t commit] 0 - } - - # If we are a method that doesn't allow dups, verify that - # we get an empty list if we try to use DB_NEXT_DUP - if { [is_record_based $method] == 1 || [is_rbtree $method] == 1 } { - if { $txnenv == 1 } { - set t [$env txn] - error_check_good txn [is_valid_txn $t $env] TRUE - set txn "-txn $t" - } - puts "\t\tTest$tnum.a.5: Check DB_NEXT_DUP for $method." - set dbc [eval {$db cursor} $txn] - error_check_good db_cursor [is_valid_cursor $dbc $db] TRUE - - set dbt [$dbc get $dir] - error_check_good $method:nextdup [$dbc get -nextdup] [list] - error_check_good dbc_close(nextdup) [$dbc close] 0 - if { $txnenv == 1 } { - error_check_good txn [$t commit] 0 - } - } - error_check_good db_close(nodup) [$db close] 0 - - # Quit here if we're a method that won't allow dups. - if { [is_record_based $method] == 1 || [is_rbtree $method] == 1 } { - puts "\tTest$tnum: Skipping remainder for method $method." - return - } - - foreach opt { "-dup" "-dupsort" } { - if { $is_je_test && $opt == "-dup" } { - continue - } - - # - # If we are using an env, then testfile should just be the - # db name. Otherwise it is the test directory and the name. - if { $eindex == -1 } { - set testfile $testdir/test$tnum$opt.db - } else { - set testfile test$tnum$opt.db - } - - if { [string compare $opt "-dupsort"] == 0 } { - set opt "-dup -dupsort" - } - - puts "\tTest$tnum.b: Duplicates ($opt)." - - puts "\t\tTest$tnum.b.1 ($opt): Put loop." - set db [eval {berkdb_open -create -mode 0644}\ - $opt $omethod $args {$testfile}] - error_check_good db_open [is_valid_db $db] TRUE - - # Insert nitems different keys such that key i has i dups. - for {set i 1} {$i <= $nitems} {incr i} { - set key key$i - - for {set j 1} {$j <= $i} {incr j} { - if { $j < 10 } { - set data "${globaldata}00$j" - } elseif { $j < 100 } { - set data "${globaldata}0$j" - } else { - set data "$globaldata$j" - } - - if { $txnenv == 1 } { - set t [$env txn] - error_check_good txn \ - [is_valid_txn $t $env] TRUE - set txn "-txn $t" - } - set ret [eval {$db put} $txn {$key $data}] - error_check_good put($i,$j) $ret 0 - if { $txnenv == 1 } { - error_check_good txn [$t commit] 0 - } - } - } - - # Initialize foundarray($i) to 0 for all i. - unset foundarray - for { set i 1 } { $i <= $nitems } { incr i } { - set foundarray($i) 0 - } - - # Get loop--after each get, move forward a random increment - # within the duplicate set. - puts "\t\tTest$tnum.b.2 ($opt): Get loop." - set one "001" - if { $txnenv == 1 } { - set t [$env txn] - error_check_good txn [is_valid_txn $t $env] TRUE - set txn "-txn $t" - } - set dbc [eval {$db cursor} $txn] - error_check_good dbc($opt) [is_valid_cursor $dbc $db] TRUE - for { set i 1 } { $i <= $nitems } { incr i } { - set dbt [$dbc get $dir] - set key [lindex [lindex $dbt 0] 0] - set num [string range $key 3 end] - - set desired_key key$num - if { [string compare $dir "-prevnodup"] == 0 } { - if { $num < 10 } { - set one "00$num" - } elseif { $num < 100 } { - set one "0$num" - } else { - set one $num - } - } - - error_check_good dbt_correct($i) $dbt\ - [list [list $desired_key\ - "$globaldata$one"]] - - set foundarray($num) 1 - - # Go forward by some number w/i dup set. - set inc [berkdb random_int 0 [expr $num - 1]] - for { set j 0 } { $j < $inc } { incr j } { - eval {$dbc get -nextdup} - } - } - - puts "\t\tTest$tnum.b.3 ($opt): Final key." - error_check_good last_db_get($opt) [$dbc get $dir] [list] - - # Verify - puts "\t\tTest$tnum.b.4 ($opt): Verify loop." - for { set i 1 } { $i <= $nitems } { incr i } { - error_check_good found_key($i) $foundarray($i) 1 - } - - error_check_good dbc_close [$dbc close] 0 - if { $txnenv == 1 } { - error_check_good txn [$t commit] 0 - } - error_check_good db_close [$db close] 0 - } -} diff --git a/storage/bdb/test/test075.tcl b/storage/bdb/test/test075.tcl deleted file mode 100644 index 540d8f0ed73..00000000000 --- a/storage/bdb/test/test075.tcl +++ /dev/null @@ -1,205 +0,0 @@ -# See the file LICENSE for redistribution information. -# -# Copyright (c) 2000-2002 -# Sleepycat Software. All rights reserved. -# -# $Id: test075.tcl,v 11.21 2002/08/08 15:38:11 bostic Exp $ -# -# TEST test075 -# TEST Test of DB->rename(). -# TEST (formerly test of DB_TRUNCATE cached page invalidation [#1487]) -proc test075 { method { tnum 75 } args } { - global encrypt - global errorCode - global errorInfo - - source ./include.tcl - set omethod [convert_method $method] - set args [convert_args $method $args] - - puts "Test0$tnum: $method ($args): Test of DB->rename()" - # If we are using an env, then testfile should just be the - # db name. Otherwise it is the test directory and the name. - set eindex [lsearch -exact $args "-env"] - if { $eindex != -1 } { - # If we are using an env, then skip this test. - # It needs its own. - incr eindex - set env [lindex $args $eindex] - puts "Skipping test075 for env $env" - return - } - if { $encrypt != 0 } { - puts "Skipping test075 for security" - return - } - - # Define absolute pathnames - set curdir [pwd] - cd $testdir - set fulldir [pwd] - cd $curdir - set reldir $testdir - - # Set up absolute and relative pathnames for test - set paths [list $fulldir $reldir] - foreach path $paths { - puts "\tTest0$tnum: starting test of $path path" - set oldfile $path/test0$tnum-old.db - set newfile $path/test0$tnum.db - set env NULL - set envargs "" - - # Loop through test using the following rename options - # 1. no environment, not in transaction - # 2. with environment, not in transaction - # 3. rename with auto-commit - # 4. rename in committed transaction - # 5. rename in aborted transaction - - foreach op "noenv env auto commit abort" { - - puts "\tTest0$tnum.a: Create/rename file with $op" - - # Make sure we're starting with a clean slate. - - if { $op == "noenv" } { - cleanup $path $env - if { $env == "NULL" } { - error_check_bad "$oldfile exists" \ - [file exists $oldfile] 1 - error_check_bad "$newfile exists" \ - [file exists $newfile] 1 - } - } - - if { $op == "env" } { - env_cleanup $path - set env [berkdb_env -create -home $path] - set envargs "-env $env" - error_check_good env_open [is_valid_env $env] TRUE - } - - if { $op == "auto" || $op == "commit" || $op == "abort" } { - env_cleanup $path - set env [berkdb_env -create -home $path -txn] - set envargs "-env $env" - error_check_good env_open [is_valid_env $env] TRUE - } - - puts "\t\tTest0$tnum.a.1: create" - set db [eval {berkdb_open -create -mode 0644} \ - $omethod $envargs $args $oldfile] - error_check_good dbopen [is_valid_db $db] TRUE - - if { $env == "NULL" } { - error_check_bad \ - "$oldfile exists" [file exists $oldfile] 0 - error_check_bad \ - "$newfile exists" [file exists $newfile] 1 - } - - # The nature of the key and data are unimportant; - # use numeric key to record-based methods don't need - # special treatment. - set key 1 - set data [pad_data $method data] - - error_check_good dbput [$db put $key $data] 0 - error_check_good dbclose [$db close] 0 - - puts "\t\tTest0$tnum.a.2: rename" - if { $env == "NULL" } { - error_check_bad \ - "$oldfile exists" [file exists $oldfile] 0 - error_check_bad \ - "$newfile exists" [file exists $newfile] 1 - } - - # Regular renames use berkdb dbrename but transaction - # protected renames must use $env dbrename. - if { $op == "noenv" || $op == "env" } { - error_check_good rename_file [eval {berkdb dbrename} \ - $envargs $oldfile $newfile] 0 - } elseif { $op == "auto" } { - error_check_good rename_file [eval {$env dbrename} \ - -auto_commit $oldfile $newfile] 0 - } else { - # $op is "abort" or "commit" - set txn [$env txn] - error_check_good rename_file [eval {$env dbrename} \ - -txn $txn $oldfile $newfile] 0 - error_check_good txn_$op [$txn $op] 0 - } - - if { $env == "NULL" } { - error_check_bad \ - "$oldfile exists" [file exists $oldfile] 1 - error_check_bad \ - "$newfile exists" [file exists $newfile] 0 - } - - puts "\t\tTest0$tnum.a.3: check" - # Open again with create to make sure we're not caching or - # anything silly. In the normal case (no env), we already - # know the file doesn't exist. - set odb [eval {berkdb_open -create -mode 0644} \ - $envargs $omethod $args $oldfile] - set ndb [eval {berkdb_open -create -mode 0644} \ - $envargs $omethod $args $newfile] - error_check_good odb_open [is_valid_db $odb] TRUE - error_check_good ndb_open [is_valid_db $ndb] TRUE - - # The DBT from the "old" database should be empty, - # not the "new" one, except in the case of an abort. - set odbt [$odb get $key] - if { $op == "abort" } { - error_check_good odbt_has_data [llength $odbt] 1 - } else { - set ndbt [$ndb get $key] - error_check_good odbt_empty [llength $odbt] 0 - error_check_bad ndbt_empty [llength $ndbt] 0 - error_check_good ndbt [lindex \ - [lindex $ndbt 0] 1] $data - } - error_check_good odb_close [$odb close] 0 - error_check_good ndb_close [$ndb close] 0 - - # Now there's both an old and a new. Rename the - # "new" to the "old" and make sure that fails. - # - # XXX Ideally we'd do this test even when there's - # an external environment, but that env has - # errpfx/errfile set now. :-( - puts "\tTest0$tnum.b: Make sure rename fails\ - instead of overwriting" - if { $env != "NULL" } { - error_check_good env_close [$env close] 0 - set env [berkdb_env_noerr -home $path] - error_check_good env_open2 \ - [is_valid_env $env] TRUE - set ret [catch {eval {berkdb dbrename} \ - -env $env $newfile $oldfile} res] - error_check_bad rename_overwrite $ret 0 - error_check_good rename_overwrite_ret \ - [is_substr $errorCode EEXIST] 1 - } - - # Verify and then start over from a clean slate. - verify_dir $path "\tTest0$tnum.c: " - cleanup $path $env - if { $env != "NULL" } { - error_check_good env_close [$env close] 0 - } - if { $env == "NULL" } { - error_check_bad "$oldfile exists" \ - [file exists $oldfile] 1 - error_check_bad "$newfile exists" \ - [file exists $newfile] 1 - - set oldfile test0$tnum-old.db - set newfile test0$tnum.db - } - } - } -} diff --git a/storage/bdb/test/test076.tcl b/storage/bdb/test/test076.tcl deleted file mode 100644 index 49827d35306..00000000000 --- a/storage/bdb/test/test076.tcl +++ /dev/null @@ -1,84 +0,0 @@ -# See the file LICENSE for redistribution information. -# -# Copyright (c) 2000-2004 -# Sleepycat Software. All rights reserved. -# -# $Id: test076.tcl,v 1.22 2004/01/28 03:36:31 bostic Exp $ -# -# TEST test076 -# TEST Test creation of many small databases in a single environment. [#1528]. -proc test076 { method { ndbs 1000 } { tnum "076" } args } { - global is_qnx_test - source ./include.tcl - - set args [convert_args $method $args] - set encargs "" - set args [split_encargs $args encargs] - set omethod [convert_method $method] - - if { [is_record_based $method] == 1 } { - set key "" - } else { - set key "key" - } - set data "datamoredatamoredata" - - # Create an env if we weren't passed one. - set txnenv 0 - set eindex [lsearch -exact $args "-env"] - if { $eindex == -1 } { - set deleteenv 1 - env_cleanup $testdir - set env [eval {berkdb_env -create -home} $testdir $encargs] - error_check_good env [is_valid_env $env] TRUE - set args "$args -env $env" - } else { - set deleteenv 0 - incr eindex - set env [lindex $args $eindex] - set txnenv [is_txnenv $env] - if { $txnenv == 1 } { - append args " -auto_commit " - if { $ndbs == 1000 } { - set ndbs 100 - } - } - set testdir [get_home $env] - } - if { $is_qnx_test && $ndbs > 100 } { - set ndbs 100 - } - puts -nonewline "Test$tnum $method ($args): " - puts -nonewline "Create $ndbs" - puts " small databases in one env." - - cleanup $testdir $env - set txn "" - - for { set i 1 } { $i <= $ndbs } { incr i } { - set testfile test$tnum.$i.db - - set db [eval {berkdb_open -create -mode 0644}\ - $args $omethod $testfile] - error_check_good db_open($i) [is_valid_db $db] TRUE - - if { $txnenv == 1 } { - set t [$env txn] - error_check_good txn [is_valid_txn $t $env] TRUE - set txn "-txn $t" - } - set ret [eval {$db put} $txn {$key$i \ - [chop_data $method $data$i]}] - error_check_good db_put($i) $ret 0 - if { $txnenv == 1 } { - error_check_good txn [$t commit] 0 - } - error_check_good db_close($i) [$db close] 0 - } - - if { $deleteenv == 1 } { - error_check_good env_close [$env close] 0 - } - - puts "\tTest$tnum passed." -} diff --git a/storage/bdb/test/test077.tcl b/storage/bdb/test/test077.tcl deleted file mode 100644 index 3c1fd869b22..00000000000 --- a/storage/bdb/test/test077.tcl +++ /dev/null @@ -1,93 +0,0 @@ -# See the file LICENSE for redistribution information. -# -# Copyright (c) 2000-2004 -# Sleepycat Software. All rights reserved. -# -# $Id: test077.tcl,v 1.14 2004/01/28 03:36:31 bostic Exp $ -# -# TEST test077 -# TEST Test of DB_GET_RECNO [#1206]. -proc test077 { method { nkeys 1000 } { tnum "077" } args } { - source ./include.tcl - global alphabet - - set omethod [convert_method $method] - set args [convert_args $method $args] - - puts "Test$tnum: Test of DB_GET_RECNO." - - if { [is_rbtree $method] != 1 } { - puts "\tTest$tnum: Skipping for method $method." - return - } - - set data $alphabet - - set txnenv 0 - set eindex [lsearch -exact $args "-env"] - if { $eindex == -1 } { - set testfile $testdir/test$tnum.db - set env NULL - } else { - set testfile test$tnum.db - incr eindex - set env [lindex $args $eindex] - set txnenv [is_txnenv $env] - if { $txnenv == 1 } { - append args " -auto_commit " - } - set testdir [get_home $env] - } - cleanup $testdir $env - - set db [eval {berkdb_open -create -mode 0644} \ - $omethod $args {$testfile}] - error_check_good db_open [is_valid_db $db] TRUE - - puts "\tTest$tnum.a: Populating database." - set txn "" - - for { set i 1 } { $i <= $nkeys } { incr i } { - set key [format %5d $i] - if { $txnenv == 1 } { - set t [$env txn] - error_check_good txn [is_valid_txn $t $env] TRUE - set txn "-txn $t" - } - set ret [eval {$db put} $txn {$key $data}] - error_check_good db_put($key) $ret 0 - if { $txnenv == 1 } { - error_check_good txn [$t commit] 0 - } - } - - puts "\tTest$tnum.b: Verifying record numbers." - - if { $txnenv == 1 } { - set t [$env txn] - error_check_good txn [is_valid_txn $t $env] TRUE - set txn "-txn $t" - } - set dbc [eval {$db cursor} $txn] - error_check_good dbc_open [is_valid_cursor $dbc $db] TRUE - - set i 1 - for { set dbt [$dbc get -first] } \ - { [string length $dbt] != 0 } \ - { set dbt [$dbc get -next] } { - set recno [$dbc get -get_recno] - set keynum [expr [lindex [lindex $dbt 0] 0]] - - # Verify that i, the number that is the key, and recno - # are all equal. - error_check_good key($i) $keynum $i - error_check_good recno($i) $recno $i - incr i - } - - error_check_good dbc_close [$dbc close] 0 - if { $txnenv == 1 } { - error_check_good txn [$t commit] 0 - } - error_check_good db_close [$db close] 0 -} diff --git a/storage/bdb/test/test078.tcl b/storage/bdb/test/test078.tcl deleted file mode 100644 index 549fc13c7e7..00000000000 --- a/storage/bdb/test/test078.tcl +++ /dev/null @@ -1,245 +0,0 @@ -# See the file LICENSE for redistribution information. -# -# Copyright (c) 2000-2004 -# Sleepycat Software. All rights reserved. -# -# $Id: test078.tcl,v 1.26 2004/09/22 18:01:06 bostic Exp $ -# -# TEST test078 -# TEST Test of DBC->c_count(). [#303] -proc test078 { method { nkeys 100 } { pagesize 512 } { tnum "078" } args } { - source ./include.tcl - global alphabet - global is_je_test - global rand_init - - set args [convert_args $method $args] - set omethod [convert_method $method] - - puts "Test$tnum ($method): Test of key counts." - - berkdb srand $rand_init - - set txnenv 0 - set eindex [lsearch -exact $args "-env"] - if { $eindex != -1 } { - incr eindex - } - - if { $eindex == -1 } { - set testfile $testdir/test$tnum-a.db - set env NULL - } else { - set testfile test$tnum-a.db - set env [lindex $args $eindex] - set txnenv [is_txnenv $env] - if { $txnenv == 1 } { - set nkeys 50 - append args " -auto_commit " - } - set testdir [get_home $env] - } - cleanup $testdir $env - - set pgindex [lsearch -exact $args "-pagesize"] - if { $pgindex != -1 } { - puts "Test078: skipping for specific pagesizes" - return - } - puts "\tTest$tnum.a: No duplicates, trivial answer." - puts "\t\tTest$tnum.a.1: Populate database, verify dup counts." - set db [eval {berkdb_open -create -mode 0644\ - -pagesize $pagesize} $omethod $args {$testfile}] - error_check_good db_open [is_valid_db $db] TRUE - set txn "" - - for { set i 1 } { $i <= $nkeys } { incr i } { - if { $txnenv == 1 } { - set t [$env txn] - error_check_good txn [is_valid_txn $t $env] TRUE - set txn "-txn $t" - } - set ret [eval {$db put} $txn {$i\ - [pad_data $method $alphabet$i]}] - error_check_good put.a($i) $ret 0 - if { $txnenv == 1 } { - error_check_good txn [$t commit] 0 - } - error_check_good count.a [$db count $i] 1 - } - - if { [is_rrecno $method] == 1 } { - error_check_good db_close.a [$db close] 0 - puts "\tTest$tnum.a2: Skipping remainder of test078 for -rrecno." - return - } - - puts "\t\tTest$tnum.a.2: Delete items, verify dup counts again." - for { set i 1 } { $i <= $nkeys } { incr i } { - if { $txnenv == 1 } { - set t [$env txn] - error_check_good txn [is_valid_txn $t $env] TRUE - set txn "-txn $t" - } - set ret [eval {$db del} $txn $i] - error_check_good del.a($i) $ret 0 - if { $txnenv == 1 } { - error_check_good txn [$t commit] 0 - } - error_check_good count.a [$db count $i] 0 - } - - - error_check_good db_close.a [$db close] 0 - - if { [is_record_based $method] == 1 || [is_rbtree $method] == 1 } { - puts \ - "\tTest$tnum.b: Duplicates not supported in $method, skipping." - return - } - - foreach {let descrip dupopt} \ - {b sorted "-dup -dupsort" c unsorted "-dup"} { - - if { $eindex == -1 } { - set testfile $testdir/test$tnum-b.db - set env NULL - } else { - set testfile test$tnum-b.db - set env [lindex $args $eindex] - if { $is_je_test && $dupopt == "-dup" } { - continue - } - set testdir [get_home $env] - } - cleanup $testdir $env - - puts "\tTest$tnum.$let: Duplicates ($descrip)." - puts "\t\tTest$tnum.$let.1: Populating database." - - set db [eval {berkdb_open -create -mode 0644\ - -pagesize $pagesize} $dupopt $omethod $args {$testfile}] - error_check_good db_open [is_valid_db $db] TRUE - - for { set i 1 } { $i <= $nkeys } { incr i } { - for { set j 0 } { $j < $i } { incr j } { - if { $txnenv == 1 } { - set t [$env txn] - error_check_good txn \ - [is_valid_txn $t $env] TRUE - set txn "-txn $t" - } - set ret [eval {$db put} $txn {$i\ - [pad_data $method $j$alphabet]}] - error_check_good put.$let,$i $ret 0 - if { $txnenv == 1 } { - error_check_good txn [$t commit] 0 - } - } - } - - puts -nonewline "\t\tTest$tnum.$let.2: " - puts "Verifying duplicate counts." - for { set i 1 } { $i <= $nkeys } { incr i } { - error_check_good count.$let,$i \ - [$db count $i] $i - } - - puts -nonewline "\t\tTest$tnum.$let.3: " - puts "Delete every other dup by cursor, verify counts." - - # Delete every other item by cursor and check counts. - for { set i 1 } { $i <= $nkeys } { incr i } { - if { $txnenv == 1 } { - set t [$env txn] - error_check_good txn [is_valid_txn $t $env] TRUE - set txn "-txn $t" - } - set c [eval {$db cursor} $txn] - error_check_good db_cursor [is_valid_cursor $c $db] TRUE - set j 0 - - for { set ret [$c get -first]} { [llength $ret] > 0 } \ - { set ret [$c get -next]} { - set key [lindex [lindex $ret 0] 0] - if { $key == $i } { - set data [lindex [lindex $ret 0 ] 1] - set num [string range $data 0 \ - end-[string length $alphabet]] - if { [expr $num % 2] == 0 } { - error_check_good \ - c_del [$c del] 0 - incr j - } - if { $txnenv == 0 } { - error_check_good count.$let.$i-$j \ - [$db count $i] [expr $i - $j] - } - } - } - error_check_good curs_close [$c close] 0 - if { $txnenv == 1 } { - error_check_good txn_commit [$t commit] 0 - } - error_check_good count.$let.$i-$j \ - [$db count $i] [expr $i - $j] - } - - puts -nonewline "\t\tTest$tnum.$let.4: " - puts "Delete all items by cursor, verify counts." - for { set i 1 } { $i <= $nkeys } { incr i } { - if { $txnenv == 1 } { - set t [$env txn] - error_check_good txn [is_valid_txn $t $env] TRUE - set txn "-txn $t" - } - set c [eval {$db cursor} $txn] - error_check_good db_cursor [is_valid_cursor $c $db] TRUE - for { set ret [$c get -first]} { [llength $ret] > 0 } \ - { set ret [$c get -next]} { - set key [lindex [lindex $ret 0] 0] - if { $key == $i } { - error_check_good c_del [$c del] 0 - } - } - error_check_good curs_close [$c close] 0 - if { $txnenv == 1 } { - error_check_good txn_commit [$t commit] 0 - } - error_check_good db_count_zero [$db count $i] 0 - } - - puts -nonewline "\t\tTest$tnum.$let.5: " - puts "Add back one item, verify counts." - for { set i 1 } { $i <= $nkeys } { incr i } { - if { $txnenv == 1 } { - set t [$env txn] - error_check_good txn [is_valid_txn $t $env] TRUE - set txn "-txn $t" - } - set ret [eval {$db put} $txn {$i\ - [pad_data $method $alphabet]}] - error_check_good put.$let,$i $ret 0 - if { $txnenv == 1 } { - error_check_good txn [$t commit] 0 - } - error_check_good add_one [$db count $i] 1 - } - - puts -nonewline "\t\tTest$tnum.$let.6: " - puts "Delete remaining entries, verify counts." - for { set i 1 } { $i <= $nkeys } { incr i } { - if { $txnenv == 1 } { - set t [$env txn] - error_check_good txn [is_valid_txn $t $env] TRUE - set txn "-txn $t" - } - error_check_good db_del [eval {$db del} $txn {$i}] 0 - if { $txnenv == 1 } { - error_check_good txn [$t commit] 0 - } - error_check_good count.$let.$i [$db count $i] 0 - } - error_check_good db_close.$let [$db close] 0 - } -} diff --git a/storage/bdb/test/test079.tcl b/storage/bdb/test/test079.tcl deleted file mode 100644 index d5dbc330409..00000000000 --- a/storage/bdb/test/test079.tcl +++ /dev/null @@ -1,29 +0,0 @@ -# See the file LICENSE for redistribution information. -# -# Copyright (c) 2000-2004 -# Sleepycat Software. All rights reserved. -# -# $Id: test079.tcl,v 11.12 2004/01/28 03:36:31 bostic Exp $ -# -# TEST test079 -# TEST Test of deletes in large trees. (test006 w/ sm. pagesize). -# TEST -# TEST Check that delete operations work in large btrees. 10000 entries -# TEST and a pagesize of 512 push this out to a four-level btree, with a -# TEST small fraction of the entries going on overflow pages. -proc test079 { method {nentries 10000} {pagesize 512} {tnum "079"} \ - {ndups 20} args} { - if { [ is_queueext $method ] == 1 } { - set method "queue"; - lappend args "-extent" "20" - } - - set pgindex [lsearch -exact $args "-pagesize"] - if { $pgindex != -1 } { - puts "Test$tnum: skipping for specific pagesizes" - return - } - - eval {test006 $method $nentries 1 $tnum $ndups -pagesize \ - $pagesize} $args -} diff --git a/storage/bdb/test/test080.tcl b/storage/bdb/test/test080.tcl deleted file mode 100644 index 9f649496f68..00000000000 --- a/storage/bdb/test/test080.tcl +++ /dev/null @@ -1,126 +0,0 @@ -# See the file LICENSE for redistribution information. -# -# Copyright (c) 2000-2002 -# Sleepycat Software. All rights reserved. -# -# $Id: test080.tcl,v 11.16 2002/08/08 15:38:12 bostic Exp $ -# -# TEST test080 -# TEST Test of DB->remove() -proc test080 { method {tnum 80} args } { - source ./include.tcl - - set args [convert_args $method $args] - set omethod [convert_method $method] - - puts "Test0$tnum: Test of DB->remove()" - - # Determine full path - set curdir [pwd] - cd $testdir - set fulldir [pwd] - cd $curdir - - # Test both relative and absolute path - set paths [list $fulldir $testdir] - - # If we are using an env, then skip this test. - # It needs its own. - set eindex [lsearch -exact $args "-env"] - set encargs "" - set args [split_encargs $args encargs] - if { $encargs != ""} { - puts "Skipping test080 for security" - return - } - if { $eindex != -1 } { - incr eindex - set e [lindex $args $eindex] - puts "Skipping test080 for env $e" - return - } - - foreach path $paths { - - set dbfile test0$tnum.db - set testfile $path/$dbfile - - # Loop through test using the following remove options - # 1. no environment, not in transaction - # 2. with environment, not in transaction - # 3. rename with auto-commit - # 4. rename in committed transaction - # 5. rename in aborted transaction - - foreach op "noenv env auto commit abort" { - - # Make sure we're starting with a clean slate. - env_cleanup $testdir - if { $op == "noenv" } { - set dbfile $testfile - set e NULL - set envargs "" - } else { - if { $op == "env" } { - set largs "" - } else { - set largs " -txn" - } - set e [eval {berkdb_env -create -home $path} $largs] - set envargs "-env $e" - error_check_good env_open [is_valid_env $e] TRUE - } - - puts "\tTest0$tnum: dbremove with $op in $path" - puts "\tTest0$tnum.a.1: Create file" - set db [eval {berkdb_open -create -mode 0644} $omethod \ - $envargs $args {$dbfile}] - error_check_good db_open [is_valid_db $db] TRUE - - # The nature of the key and data are unimportant; - # use numeric key to record-based methods don't need - # special treatment. - set key 1 - set data [pad_data $method data] - - error_check_good dbput [$db put $key $data] 0 - error_check_good dbclose [$db close] 0 - error_check_good file_exists_before \ - [file exists $testfile] 1 - - # Use berkdb dbremove for non-transactional tests - # and $env dbremove for transactional tests - puts "\tTest0$tnum.a.2: Remove file" - if { $op == "noenv" || $op == "env" } { - error_check_good remove_$op \ - [eval {berkdb dbremove} $envargs $dbfile] 0 - } elseif { $op == "auto" } { - error_check_good remove_$op \ - [eval {$e dbremove} -auto_commit $dbfile] 0 - } else { - # $op is "abort" or "commit" - set txn [$e txn] - error_check_good remove_$op \ - [eval {$e dbremove} -txn $txn $dbfile] 0 - error_check_good txn_$op [$txn $op] 0 - } - - puts "\tTest0$tnum.a.3: Check that file is gone" - # File should now be gone, except in the case of an abort. - if { $op != "abort" } { - error_check_good exists_after \ - [file exists $testfile] 0 - } else { - error_check_good exists_after \ - [file exists $testfile] 1 - } - - if { $e != "NULL" } { - error_check_good env_close [$e close] 0 - } - - set dbfile test0$tnum-old.db - set testfile $path/$dbfile - } - } -} diff --git a/storage/bdb/test/test081.tcl b/storage/bdb/test/test081.tcl deleted file mode 100644 index a9c7f5cfc86..00000000000 --- a/storage/bdb/test/test081.tcl +++ /dev/null @@ -1,15 +0,0 @@ -# See the file LICENSE for redistribution information. -# -# Copyright (c) 1999-2004 -# Sleepycat Software. All rights reserved. -# -# $Id: test081.tcl,v 11.9 2004/01/28 03:36:31 bostic Exp $ -# -# TEST test081 -# TEST Test off-page duplicates and overflow pages together with -# TEST very large keys (key/data as file contents). -proc test081 { method {ndups 13} {tnum "081"} args} { - source ./include.tcl - - eval {test017 $method 1 $ndups $tnum} $args -} diff --git a/storage/bdb/test/test082.tcl b/storage/bdb/test/test082.tcl deleted file mode 100644 index fb4d71c5c49..00000000000 --- a/storage/bdb/test/test082.tcl +++ /dev/null @@ -1,14 +0,0 @@ -# See the file LICENSE for redistribution information. -# -# Copyright (c) 2000-2004 -# Sleepycat Software. All rights reserved. -# -# $Id: test082.tcl,v 11.8 2004/01/28 03:36:31 bostic Exp $ -# -# TEST test082 -# TEST Test of DB_PREV_NODUP (uses test074). -proc test082 { method {dir -prevnodup} {nitems 100} {tnum "082"} args} { - source ./include.tcl - - eval {test074 $method $dir $nitems $tnum} $args -} diff --git a/storage/bdb/test/test083.tcl b/storage/bdb/test/test083.tcl deleted file mode 100644 index 7e4a8b960a4..00000000000 --- a/storage/bdb/test/test083.tcl +++ /dev/null @@ -1,166 +0,0 @@ -# See the file LICENSE for redistribution information. -# -# Copyright (c) 2000-2004 -# Sleepycat Software. All rights reserved. -# -# $Id: test083.tcl,v 11.16 2004/01/28 03:36:31 bostic Exp $ -# -# TEST test083 -# TEST Test of DB->key_range. -proc test083 { method {pgsz 512} {maxitems 5000} {step 2} args} { - source ./include.tcl - - global rand_init - error_check_good set_random_seed [berkdb srand $rand_init] 0 - - set omethod [convert_method $method] - set args [convert_args $method $args] - - puts "Test083 $method ($args): Test of DB->key_range" - if { [is_btree $method] != 1 } { - puts "\tTest083: Skipping for method $method." - return - } - set pgindex [lsearch -exact $args "-pagesize"] - if { $pgindex != -1 } { - puts "Test083: skipping for specific pagesizes" - return - } - - # If we are using an env, then testfile should just be the db name. - # Otherwise it is the test directory and the name. - set txnenv 0 - set eindex [lsearch -exact $args "-env"] - if { $eindex == -1 } { - set testfile $testdir/test083.db - set env NULL - } else { - set testfile test083.db - incr eindex - set env [lindex $args $eindex] - set txnenv [is_txnenv $env] - if { $txnenv == 1 } { - append args " -auto_commit " - } - set testdir [get_home $env] - } - - # We assume that numbers will be at most six digits wide - error_check_bad maxitems_range [expr $maxitems > 999999] 1 - - # We want to test key_range on a variety of sizes of btree. - # Start at ten keys and work up to $maxitems keys, at each step - # multiplying the number of keys by $step. - for { set nitems 10 } { $nitems <= $maxitems }\ - { set nitems [expr $nitems * $step] } { - - puts "\tTest083.a: Opening new database" - if { $env != "NULL"} { - set testdir [get_home $env] - } - cleanup $testdir $env - set db [eval {berkdb_open -create -mode 0644} \ - -pagesize $pgsz $omethod $args $testfile] - error_check_good dbopen [is_valid_db $db] TRUE - - t83_build $db $nitems $env $txnenv - t83_test $db $nitems $env $txnenv - - error_check_good db_close [$db close] 0 - } -} - -proc t83_build { db nitems env txnenv } { - source ./include.tcl - - puts "\tTest083.b: Populating database with $nitems keys" - - set keylist {} - puts "\t\tTest083.b.1: Generating key list" - for { set i 0 } { $i < $nitems } { incr i } { - lappend keylist $i - } - - # With randomly ordered insertions, the range of errors we - # get from key_range can be unpredictably high [#2134]. For now, - # just skip the randomization step. - #puts "\t\tTest083.b.2: Randomizing key list" - #set keylist [randomize_list $keylist] - #puts "\t\tTest083.b.3: Populating database with randomized keys" - - puts "\t\tTest083.b.2: Populating database" - set data [repeat . 50] - set txn "" - foreach keynum $keylist { - if { $txnenv == 1 } { - set t [$env txn] - error_check_good txn [is_valid_txn $t $env] TRUE - set txn "-txn $t" - } - set ret [eval {$db put} $txn {key[format %6d $keynum] $data}] - error_check_good db_put $ret 0 - if { $txnenv == 1 } { - error_check_good txn [$t commit] 0 - } - } -} - -proc t83_test { db nitems env txnenv } { - # Look at the first key, then at keys about 1/4, 1/2, 3/4, and - # all the way through the database. Make sure the key_ranges - # aren't off by more than 10%. - - if { $txnenv == 1 } { - set t [$env txn] - error_check_good txn [is_valid_txn $t $env] TRUE - set txn "-txn $t" - } else { - set txn "" - } - set dbc [eval {$db cursor} $txn] - error_check_good dbc [is_valid_cursor $dbc $db] TRUE - - puts "\tTest083.c: Verifying ranges..." - - for { set i 0 } { $i < $nitems } \ - { incr i [expr $nitems / [berkdb random_int 3 16]] } { - puts "\t\t...key $i" - error_check_bad key0 [llength [set dbt [$dbc get -first]]] 0 - - for { set j 0 } { $j < $i } { incr j } { - error_check_bad key$j \ - [llength [set dbt [$dbc get -next]]] 0 - } - - set ranges [$db keyrange [lindex [lindex $dbt 0] 0]] - - #puts $ranges - error_check_good howmanyranges [llength $ranges] 3 - - set lessthan [lindex $ranges 0] - set morethan [lindex $ranges 2] - - set rangesum [expr $lessthan + [lindex $ranges 1] + $morethan] - - roughly_equal $rangesum 1 0.05 - - # Wild guess. - if { $nitems < 500 } { - set tol 0.3 - } elseif { $nitems > 500 } { - set tol 0.15 - } - - roughly_equal $lessthan [expr $i * 1.0 / $nitems] $tol - - } - - error_check_good dbc_close [$dbc close] 0 - if { $txnenv == 1 } { - error_check_good txn [$t commit] 0 - } -} - -proc roughly_equal { a b tol } { - error_check_good "$a =~ $b" [expr $a - $b < $tol] 1 -} diff --git a/storage/bdb/test/test084.tcl b/storage/bdb/test/test084.tcl deleted file mode 100644 index 036c1c41118..00000000000 --- a/storage/bdb/test/test084.tcl +++ /dev/null @@ -1,53 +0,0 @@ -# See the file LICENSE for redistribution information. -# -# Copyright (c) 2000-2004 -# Sleepycat Software. All rights reserved. -# -# $Id: test084.tcl,v 11.15 2004/01/28 03:36:31 bostic Exp $ -# -# TEST test084 -# TEST Basic sanity test (test001) with large (64K) pages. -proc test084 { method {nentries 10000} {tnum "084"} {pagesize 65536} args} { - source ./include.tcl - - set txnenv 0 - set eindex [lsearch -exact $args "-env"] - # - # If we are using an env, then testfile should just be the db name. - # Otherwise it is the test directory and the name. - if { $eindex == -1 } { - set testfile $testdir/test$tnum-empty.db - set env NULL - } else { - set testfile test$tnum-empty.db - incr eindex - set env [lindex $args $eindex] - set txnenv [is_txnenv $env] - if { $txnenv == 1 } { - append args " -auto_commit " - } - set testdir [get_home $env] - } - - set pgindex [lsearch -exact $args "-pagesize"] - if { $pgindex != -1 } { - puts "Test084: skipping for specific pagesizes" - return - } - - cleanup $testdir $env - - set args "-pagesize $pagesize $args" - - eval {test001 $method $nentries 0 0 $tnum} $args - - set omethod [convert_method $method] - set args [convert_args $method $args] - - # For good measure, create a second database that's empty - # with the large page size. (There was a verifier bug that - # choked on empty 64K pages. [#2408]) - set db [eval {berkdb_open -create -mode 0644} $args $omethod $testfile] - error_check_good empty_db [is_valid_db $db] TRUE - error_check_good empty_db_close [$db close] 0 -} diff --git a/storage/bdb/test/test085.tcl b/storage/bdb/test/test085.tcl deleted file mode 100644 index 373db33a5ad..00000000000 --- a/storage/bdb/test/test085.tcl +++ /dev/null @@ -1,331 +0,0 @@ -# See the file LICENSE for redistribution information. -# -# Copyright (c) 2000-2004 -# Sleepycat Software. All rights reserved. -# -# $Id: test085.tcl,v 1.18 2004/09/20 17:29:32 carol Exp $ -# -# TEST test085 -# TEST Test of cursor behavior when a cursor is pointing to a deleted -# TEST btree key which then has duplicates added. [#2473] -proc test085 { method {pagesize 512} {onp 3} {offp 10} {tnum "085"} args } { - source ./include.tcl - global alphabet - - set omethod [convert_method $method] - set args [convert_args $method $args] - set encargs "" - set args [split_encargs $args encargs] - - set txnenv 0 - set eindex [lsearch -exact $args "-env"] - # - # If we are using an env, then testfile should just be the db name. - # Otherwise it is the test directory and the name. - if { $eindex == -1 } { - set testfile $testdir/test$tnum.db - set env NULL - } else { - set testfile test$tnum.db - incr eindex - set env [lindex $args $eindex] - set txnenv [is_txnenv $env] - if { $txnenv == 1 } { - append args " -auto_commit " - } - set testdir [get_home $env] - } - - set pgindex [lsearch -exact $args "-pagesize"] - if { $pgindex != -1 } { - puts "Test085: skipping for specific pagesizes" - return - } - cleanup $testdir $env - - # Keys must sort $prekey < $key < $postkey. - set prekey "AA" - set key "BBB" - set postkey "CCCC" - - # Make these distinguishable from each other and from the - # alphabets used for the $key's data. - set predatum "1234567890" - set datum $alphabet - set postdatum "0987654321" - set txn "" - - append args " -pagesize $pagesize -dup" - - puts -nonewline "Test$tnum $omethod ($args): " - - # Skip for all non-btrees. (Rbtrees don't count as btrees, for - # now, since they don't support dups.) - if { [is_btree $method] != 1 } { - puts "Skipping for method $method." - return - } else { - puts "Duplicates w/ deleted item cursor." - } - - # Repeat the test with both on-page and off-page numbers of dups. - foreach ndups "$onp $offp" { - # Put operations we want to test on a cursor set to the - # deleted item, the key to use with them, and what should - # come before and after them given a placement of - # the deleted item at the beginning or end of the dupset. - set final [expr $ndups - 1] - set putops { - {{-before} "" $predatum {[test085_ddatum 0]} beginning} - {{-before} "" {[test085_ddatum $final]} $postdatum end} - {{-keyfirst} $key $predatum {[test085_ddatum 0]} beginning} - {{-keyfirst} $key $predatum {[test085_ddatum 0]} end} - {{-keylast} $key {[test085_ddatum $final]} $postdatum beginning} - {{-keylast} $key {[test085_ddatum $final]} $postdatum end} - {{-after} "" $predatum {[test085_ddatum 0]} beginning} - {{-after} "" {[test085_ddatum $final]} $postdatum end} - } - - # Get operations we want to test on a cursor set to the - # deleted item, any args to get, and the expected key/data pair. - set getops { - {{-current} "" "" "" beginning} - {{-current} "" "" "" end} - {{-next} "" $key {[test085_ddatum 0]} beginning} - {{-next} "" $postkey $postdatum end} - {{-prev} "" $prekey $predatum beginning} - {{-prev} "" $key {[test085_ddatum $final]} end} - {{-first} "" $prekey $predatum beginning} - {{-first} "" $prekey $predatum end} - {{-last} "" $postkey $postdatum beginning} - {{-last} "" $postkey $postdatum end} - {{-nextdup} "" $key {[test085_ddatum 0]} beginning} - {{-nextdup} "" EMPTYLIST "" end} - {{-nextnodup} "" $postkey $postdatum beginning} - {{-nextnodup} "" $postkey $postdatum end} - {{-prevnodup} "" $prekey $predatum beginning} - {{-prevnodup} "" $prekey $predatum end} - } - - set txn "" - foreach pair $getops { - set op [lindex $pair 0] - puts "\tTest$tnum: Get ($op) with $ndups duplicates,\ - cursor at the [lindex $pair 4]." - set db [eval {berkdb_open -create \ - -mode 0644} $omethod $encargs $args $testfile] - error_check_good "db open" [is_valid_db $db] TRUE - - if { $txnenv == 1 } { - set t [$env txn] - error_check_good txn \ - [is_valid_txn $t $env] TRUE - set txn "-txn $t" - } - set dbc [test085_setup $db $txn] - - set beginning [expr [string compare \ - [lindex $pair 4] "beginning"] == 0] - - for { set i 0 } { $i < $ndups } { incr i } { - if { $beginning } { - error_check_good db_put($i) \ - [eval {$db put} $txn \ - {$key [test085_ddatum $i]}] 0 - } else { - set c [eval {$db cursor} $txn] - set j [expr $ndups - $i - 1] - error_check_good db_cursor($j) \ - [is_valid_cursor $c $db] TRUE - set d [test085_ddatum $j] - error_check_good dbc_put($j) \ - [$c put -keyfirst $key $d] 0 - error_check_good c_close [$c close] 0 - } - } - - set gargs [lindex $pair 1] - set ekey "" - set edata "" - eval set ekey [lindex $pair 2] - eval set edata [lindex $pair 3] - - set dbt [eval $dbc get $op $gargs] - if { [string compare $ekey EMPTYLIST] == 0 || \ - [string compare $op -current] == 0 } { - error_check_good dbt($op,$ndups) \ - [llength $dbt] 0 - } else { - error_check_good dbt($op,$ndups) $dbt \ - [list [list $ekey $edata]] - } - error_check_good "dbc close" [$dbc close] 0 - if { $txnenv == 1 } { - error_check_good txn [$t commit] 0 - } - error_check_good "db close" [$db close] 0 - verify_dir $testdir "\t\t" - - # Remove testfile so we can do without truncate flag. - # This is okay because we've already done verify and - # dump/load. - if { $env == "NULL" } { - set ret [eval {berkdb dbremove} \ - $encargs $testfile] - } elseif { $txnenv == 1 } { - set ret [eval "$env dbremove" \ - -auto_commit $encargs $testfile] - } else { - set ret [eval {berkdb dbremove} \ - -env $env $encargs $testfile] - } - error_check_good dbremove $ret 0 - - } - - foreach pair $putops { - # Open and set up database. - set op [lindex $pair 0] - puts "\tTest$tnum: Put ($op) with $ndups duplicates,\ - cursor at the [lindex $pair 4]." - set db [eval {berkdb_open -create \ - -mode 0644} $omethod $args $encargs $testfile] - error_check_good "db open" [is_valid_db $db] TRUE - - set beginning [expr [string compare \ - [lindex $pair 4] "beginning"] == 0] - - if { $txnenv == 1 } { - set t [$env txn] - error_check_good txn [is_valid_txn $t $env] TRUE - set txn "-txn $t" - } - set dbc [test085_setup $db $txn] - - # Put duplicates. - for { set i 0 } { $i < $ndups } { incr i } { - if { $beginning } { - error_check_good db_put($i) \ - [eval {$db put} $txn \ - {$key [test085_ddatum $i]}] 0 - } else { - set c [eval {$db cursor} $txn] - set j [expr $ndups - $i - 1] - error_check_good db_cursor($j) \ - [is_valid_cursor $c $db] TRUE - set d [test085_ddatum $j] - error_check_good dbc_put($j) \ - [$c put -keyfirst $key $d] 0 - error_check_good c_close [$c close] 0 - } - } - - # Set up cursors for stability test. - set pre_dbc [eval {$db cursor} $txn] - error_check_good pre_set [$pre_dbc get -set $prekey] \ - [list [list $prekey $predatum]] - set post_dbc [eval {$db cursor} $txn] - error_check_good post_set [$post_dbc get -set $postkey]\ - [list [list $postkey $postdatum]] - set first_dbc [eval {$db cursor} $txn] - error_check_good first_set \ - [$first_dbc get -get_both $key [test085_ddatum 0]] \ - [list [list $key [test085_ddatum 0]]] - set last_dbc [eval {$db cursor} $txn] - error_check_good last_set \ - [$last_dbc get -get_both $key [test085_ddatum \ - [expr $ndups - 1]]] \ - [list [list $key [test085_ddatum [expr $ndups -1]]]] - - set k [lindex $pair 1] - set d_before "" - set d_after "" - eval set d_before [lindex $pair 2] - eval set d_after [lindex $pair 3] - set newdatum "NewDatum" - error_check_good dbc_put($op,$ndups) \ - [eval $dbc put $op $k $newdatum] 0 - error_check_good dbc_prev($op,$ndups) \ - [lindex [lindex [$dbc get -prev] 0] 1] \ - $d_before - error_check_good dbc_current($op,$ndups) \ - [lindex [lindex [$dbc get -next] 0] 1] \ - $newdatum - - error_check_good dbc_next($op,$ndups) \ - [lindex [lindex [$dbc get -next] 0] 1] \ - $d_after - - # Verify stability of pre- and post- cursors. - error_check_good pre_stable [$pre_dbc get -current] \ - [list [list $prekey $predatum]] - error_check_good post_stable [$post_dbc get -current] \ - [list [list $postkey $postdatum]] - error_check_good first_stable \ - [$first_dbc get -current] \ - [list [list $key [test085_ddatum 0]]] - error_check_good last_stable \ - [$last_dbc get -current] \ - [list [list $key [test085_ddatum [expr $ndups -1]]]] - - foreach c "$pre_dbc $post_dbc $first_dbc $last_dbc" { - error_check_good ${c}_close [$c close] 0 - } - - error_check_good "dbc close" [$dbc close] 0 - if { $txnenv == 1 } { - error_check_good txn [$t commit] 0 - } - error_check_good "db close" [$db close] 0 - verify_dir $testdir "\t\t" - - # Remove testfile so we can do without truncate flag. - # This is okay because we've already done verify and - # dump/load. - if { $env == "NULL" } { - set ret [eval {berkdb dbremove} \ - $encargs $testfile] - } elseif { $txnenv == 1 } { - set ret [eval "$env dbremove" \ - -auto_commit $encargs $testfile] - } else { - set ret [eval {berkdb dbremove} \ - -env $env $encargs $testfile] - } - error_check_good dbremove $ret 0 - } - } -} - -# Set up the test database; put $prekey, $key, and $postkey with their -# respective data, and then delete $key with a new cursor. Return that -# cursor, still pointing to the deleted item. -proc test085_setup { db txn } { - upvar key key - upvar prekey prekey - upvar postkey postkey - upvar predatum predatum - upvar postdatum postdatum - - # no one else should ever see this one! - set datum "bbbbbbbb" - - error_check_good pre_put [eval {$db put} $txn {$prekey $predatum}] 0 - error_check_good main_put [eval {$db put} $txn {$key $datum}] 0 - error_check_good post_put [eval {$db put} $txn {$postkey $postdatum}] 0 - - set dbc [eval {$db cursor} $txn] - error_check_good db_cursor [is_valid_cursor $dbc $db] TRUE - - error_check_good dbc_getset [$dbc get -get_both $key $datum] \ - [list [list $key $datum]] - - error_check_good dbc_del [$dbc del] 0 - - return $dbc -} - -proc test085_ddatum { a } { - global alphabet - return $a$alphabet -} diff --git a/storage/bdb/test/test086.tcl b/storage/bdb/test/test086.tcl deleted file mode 100644 index 8b2f7db81be..00000000000 --- a/storage/bdb/test/test086.tcl +++ /dev/null @@ -1,166 +0,0 @@ -# See the file LICENSE for redistribution information. -# -# Copyright (c) 1999-2004 -# Sleepycat Software. All rights reserved. -# -# $Id: test086.tcl,v 11.12 2004/01/28 03:36:31 bostic Exp $ -# -# TEST test086 -# TEST Test of cursor stability across btree splits/rsplits with -# TEST subtransaction aborts (a variant of test048). [#2373] -proc test086 { method args } { - global errorCode - source ./include.tcl - - set tnum 086 - set args [convert_args $method $args] - set encargs "" - set args [split_encargs $args encargs] - - if { [is_btree $method] != 1 } { - puts "Test$tnum skipping for method $method." - return - } - - set method "-btree" - - puts "\tTest$tnum: Test of cursor stability across aborted\ - btree splits." - - set key "key" - set data "data" - set txn "" - set flags "" - - set eindex [lsearch -exact $args "-env"] - # - # If we are using an env, then this test won't work. - if { $eindex == -1 } { - # But we will be using our own env... - set testfile test$tnum.db - } else { - puts "\tTest$tnum: Environment provided; skipping test." - return - } - set t1 $testdir/t1 - env_cleanup $testdir - - set env [eval {berkdb_env -create -home $testdir -txn} $encargs] - error_check_good berkdb_env [is_valid_env $env] TRUE - - puts "\tTest$tnum.a: Create $method database." - set oflags "-auto_commit -create -env $env -mode 0644 $args $method" - set db [eval {berkdb_open} $oflags $testfile] - error_check_good dbopen [is_valid_db $db] TRUE - - set nkeys 5 - # Fill page w/ small key/data pairs, keep at leaf - # - puts "\tTest$tnum.b: Fill page with $nkeys small key/data pairs." - set txn [$env txn] - error_check_good txn [is_valid_txn $txn $env] TRUE - for { set i 0 } { $i < $nkeys } { incr i } { - set ret [$db put -txn $txn key000$i $data$i] - error_check_good dbput $ret 0 - } - error_check_good commit [$txn commit] 0 - - # get db ordering, set cursors - puts "\tTest$tnum.c: Set cursors on each of $nkeys pairs." - set txn [$env txn] - error_check_good txn [is_valid_txn $txn $env] TRUE - for {set i 0; set ret [$db get -txn $txn key000$i]} {\ - $i < $nkeys && [llength $ret] != 0} {\ - incr i; set ret [$db get -txn $txn key000$i]} { - set key_set($i) [lindex [lindex $ret 0] 0] - set data_set($i) [lindex [lindex $ret 0] 1] - set dbc [$db cursor -txn $txn] - set dbc_set($i) $dbc - error_check_good db_cursor:$i [is_substr $dbc_set($i) $db] 1 - set ret [$dbc_set($i) get -set $key_set($i)] - error_check_bad dbc_set($i)_get:set [llength $ret] 0 - } - - # Create child txn. - set ctxn [$env txn -parent $txn] - error_check_good ctxn [is_valid_txn $txn $env] TRUE - - # if mkeys is above 1000, need to adjust below for lexical order - set mkeys 1000 - puts "\tTest$tnum.d: Add $mkeys pairs to force split." - for {set i $nkeys} { $i < $mkeys } { incr i } { - if { $i >= 100 } { - set ret [$db put -txn $ctxn key0$i $data$i] - } elseif { $i >= 10 } { - set ret [$db put -txn $ctxn key00$i $data$i] - } else { - set ret [$db put -txn $ctxn key000$i $data$i] - } - error_check_good dbput:more $ret 0 - } - - puts "\tTest$tnum.e: Abort." - error_check_good ctxn_abort [$ctxn abort] 0 - - puts "\tTest$tnum.f: Check and see that cursors maintained reference." - for {set i 0} { $i < $nkeys } {incr i} { - set ret [$dbc_set($i) get -current] - error_check_bad dbc$i:get:current [llength $ret] 0 - set ret2 [$dbc_set($i) get -set $key_set($i)] - error_check_bad dbc$i:get:set [llength $ret2] 0 - error_check_good dbc$i:get(match) $ret $ret2 - } - - # Put (and this time keep) the keys that caused the split. - # We'll delete them to test reverse splits. - puts "\tTest$tnum.g: Put back added keys." - for {set i $nkeys} { $i < $mkeys } { incr i } { - if { $i >= 100 } { - set ret [$db put -txn $txn key0$i $data$i] - } elseif { $i >= 10 } { - set ret [$db put -txn $txn key00$i $data$i] - } else { - set ret [$db put -txn $txn key000$i $data$i] - } - error_check_good dbput:more $ret 0 - } - - puts "\tTest$tnum.h: Delete added keys to force reverse split." - set ctxn [$env txn -parent $txn] - error_check_good ctxn [is_valid_txn $txn $env] TRUE - for {set i $nkeys} { $i < $mkeys } { incr i } { - if { $i >= 100 } { - error_check_good db_del:$i [$db del -txn $ctxn key0$i] 0 - } elseif { $i >= 10 } { - error_check_good db_del:$i \ - [$db del -txn $ctxn key00$i] 0 - } else { - error_check_good db_del:$i \ - [$db del -txn $ctxn key000$i] 0 - } - } - - puts "\tTest$tnum.i: Abort." - error_check_good ctxn_abort [$ctxn abort] 0 - - puts "\tTest$tnum.j: Verify cursor reference." - for {set i 0} { $i < $nkeys } {incr i} { - set ret [$dbc_set($i) get -current] - error_check_bad dbc$i:get:current [llength $ret] 0 - set ret2 [$dbc_set($i) get -set $key_set($i)] - error_check_bad dbc$i:get:set [llength $ret2] 0 - error_check_good dbc$i:get(match) $ret $ret2 - } - - puts "\tTest$tnum.j: Cleanup." - # close cursors - for {set i 0} { $i < $nkeys } {incr i} { - error_check_good dbc_close:$i [$dbc_set($i) close] 0 - } - - error_check_good commit [$txn commit] 0 - error_check_good dbclose [$db close] 0 - error_check_good envclose [$env close] 0 - - puts "\tTest$tnum complete." -} diff --git a/storage/bdb/test/test087.tcl b/storage/bdb/test/test087.tcl deleted file mode 100644 index 7501f4ce3f6..00000000000 --- a/storage/bdb/test/test087.tcl +++ /dev/null @@ -1,290 +0,0 @@ -# See the file LICENSE for redistribution information. -# -# Copyright (c) 1999-2004 -# Sleepycat Software. All rights reserved. -# -# $Id: test087.tcl,v 11.19 2004/01/28 03:36:31 bostic Exp $ -# -# TEST test087 -# TEST Test of cursor stability when converting to and modifying -# TEST off-page duplicate pages with subtransaction aborts. [#2373] -# TEST -# TEST Does the following: -# TEST a. Initialize things by DB->putting ndups dups and -# TEST setting a reference cursor to point to each. Do each put twice, -# TEST first aborting, then committing, so we're sure to abort the move -# TEST to off-page dups at some point. -# TEST b. c_put ndups dups (and correspondingly expanding -# TEST the set of reference cursors) after the last one, making sure -# TEST after each step that all the reference cursors still point to -# TEST the right item. -# TEST c. Ditto, but before the first one. -# TEST d. Ditto, but after each one in sequence first to last. -# TEST e. Ditto, but after each one in sequence from last to first. -# TEST occur relative to the new datum) -# TEST f. Ditto for the two sequence tests, only doing a -# TEST DBC->c_put(DB_CURRENT) of a larger datum instead of adding a -# TEST new one. -proc test087 { method {pagesize 512} {ndups 50} {tnum "087"} args } { - source ./include.tcl - global alphabet - - set args [convert_args $method $args] - set encargs "" - set args [split_encargs $args encargs] - set omethod [convert_method $method] - - puts "Test$tnum $omethod ($args): " - set eindex [lsearch -exact $args "-env"] - # - # If we are using an env, then return - if { $eindex != -1 } { - puts "Environment specified; skipping." - return - } - set pgindex [lsearch -exact $args "-pagesize"] - if { $pgindex != -1 } { - puts "Test087: skipping for specific pagesizes" - return - } - env_cleanup $testdir - set testfile test$tnum.db - set key "the key" - append args " -pagesize $pagesize -dup" - - if { [is_record_based $method] || [is_rbtree $method] } { - puts "Skipping for method $method." - return - } else { - puts "Test$tnum: Cursor stability on dup. pages w/ aborts." - } - - set env [eval {berkdb_env -create -home $testdir -txn} $encargs] - error_check_good env_create [is_valid_env $env] TRUE - - set db [eval {berkdb_open -auto_commit \ - -create -env $env -mode 0644} $omethod $args $testfile] - error_check_good "db open" [is_valid_db $db] TRUE - - # Number of outstanding keys. - set keys $ndups - - puts "\tTest$tnum.a: put/abort/put/commit loop;\ - $ndups dups, short data." - set txn [$env txn] - error_check_good txn [is_valid_txn $txn $env] TRUE - for { set i 0 } { $i < $ndups } { incr i } { - set datum [makedatum_t73 $i 0] - - set ctxn [$env txn -parent $txn] - error_check_good ctxn(abort,$i) [is_valid_txn $ctxn $env] TRUE - error_check_good "db put/abort ($i)" \ - [$db put -txn $ctxn $key $datum] 0 - error_check_good ctxn_abort($i) [$ctxn abort] 0 - - verify_t73 is_long dbc [expr $i - 1] $key - - set ctxn [$env txn -parent $txn] - error_check_good ctxn(commit,$i) [is_valid_txn $ctxn $env] TRUE - error_check_good "db put/commit ($i)" \ - [$db put -txn $ctxn $key $datum] 0 - error_check_good ctxn_commit($i) [$ctxn commit] 0 - - set is_long($i) 0 - - set dbc($i) [$db cursor -txn $txn] - error_check_good "db cursor ($i)"\ - [is_valid_cursor $dbc($i) $db] TRUE - error_check_good "dbc get -get_both ($i)"\ - [$dbc($i) get -get_both $key $datum]\ - [list [list $key $datum]] - - verify_t73 is_long dbc $i $key - } - - puts "\tTest$tnum.b: Cursor put (DB_KEYLAST); $ndups new dups,\ - short data." - - set ctxn [$env txn -parent $txn] - error_check_good ctxn($i) [is_valid_txn $ctxn $env] TRUE - for { set i 0 } { $i < $ndups } { incr i } { - # !!! keys contains the number of the next dup - # to be added (since they start from zero) - set datum [makedatum_t73 $keys 0] - set curs [$db cursor -txn $ctxn] - error_check_good "db cursor create" [is_valid_cursor $curs $db]\ - TRUE - error_check_good "c_put(DB_KEYLAST, $keys)"\ - [$curs put -keylast $key $datum] 0 - - # We can't do a verification while a child txn is active, - # or we'll run into trouble when DEBUG_ROP is enabled. - # If this test has trouble, though, uncommenting this - # might be illuminating--it makes things a bit more rigorous - # and works fine when DEBUG_ROP is not enabled. - # verify_t73 is_long dbc $keys $key - error_check_good curs_close [$curs close] 0 - } - error_check_good ctxn_abort [$ctxn abort] 0 - verify_t73 is_long dbc $keys $key - - puts "\tTest$tnum.c: Cursor put (DB_KEYFIRST); $ndups new dups,\ - short data." - - set ctxn [$env txn -parent $txn] - error_check_good ctxn($i) [is_valid_txn $ctxn $env] TRUE - for { set i 0 } { $i < $ndups } { incr i } { - # !!! keys contains the number of the next dup - # to be added (since they start from zero) - - set datum [makedatum_t73 $keys 0] - set curs [$db cursor -txn $ctxn] - error_check_good "db cursor create" [is_valid_cursor $curs $db]\ - TRUE - error_check_good "c_put(DB_KEYFIRST, $keys)"\ - [$curs put -keyfirst $key $datum] 0 - - # verify_t73 is_long dbc $keys $key - error_check_good curs_close [$curs close] 0 - } - # verify_t73 is_long dbc $keys $key - # verify_t73 is_long dbc $keys $key - error_check_good ctxn_abort [$ctxn abort] 0 - verify_t73 is_long dbc $keys $key - - puts "\tTest$tnum.d: Cursor put (DB_AFTER) first to last;\ - $keys new dups, short data" - # We want to add a datum after each key from 0 to the current - # value of $keys, which we thus need to save. - set ctxn [$env txn -parent $txn] - error_check_good ctxn($i) [is_valid_txn $ctxn $env] TRUE - set keysnow $keys - for { set i 0 } { $i < $keysnow } { incr i } { - set datum [makedatum_t73 $keys 0] - set curs [$db cursor -txn $ctxn] - error_check_good "db cursor create" [is_valid_cursor $curs $db]\ - TRUE - - # Which datum to insert this guy after. - set curdatum [makedatum_t73 $i 0] - error_check_good "c_get(DB_GET_BOTH, $i)"\ - [$curs get -get_both $key $curdatum]\ - [list [list $key $curdatum]] - error_check_good "c_put(DB_AFTER, $i)"\ - [$curs put -after $datum] 0 - - # verify_t73 is_long dbc $keys $key - error_check_good curs_close [$curs close] 0 - } - error_check_good ctxn_abort [$ctxn abort] 0 - verify_t73 is_long dbc $keys $key - - puts "\tTest$tnum.e: Cursor put (DB_BEFORE) last to first;\ - $keys new dups, short data" - set ctxn [$env txn -parent $txn] - error_check_good ctxn($i) [is_valid_txn $ctxn $env] TRUE - for { set i [expr $keys - 1] } { $i >= 0 } { incr i -1 } { - set datum [makedatum_t73 $keys 0] - set curs [$db cursor -txn $ctxn] - error_check_good "db cursor create" [is_valid_cursor $curs $db]\ - TRUE - - # Which datum to insert this guy before. - set curdatum [makedatum_t73 $i 0] - error_check_good "c_get(DB_GET_BOTH, $i)"\ - [$curs get -get_both $key $curdatum]\ - [list [list $key $curdatum]] - error_check_good "c_put(DB_BEFORE, $i)"\ - [$curs put -before $datum] 0 - - # verify_t73 is_long dbc $keys $key - error_check_good curs_close [$curs close] 0 - } - error_check_good ctxn_abort [$ctxn abort] 0 - verify_t73 is_long dbc $keys $key - - puts "\tTest$tnum.f: Cursor put (DB_CURRENT), first to last,\ - growing $keys data." - set ctxn [$env txn -parent $txn] - error_check_good ctxn($i) [is_valid_txn $ctxn $env] TRUE - for { set i 0 } { $i < $keysnow } { incr i } { - set olddatum [makedatum_t73 $i 0] - set newdatum [makedatum_t73 $i 1] - set curs [$db cursor -txn $ctxn] - error_check_good "db cursor create" [is_valid_cursor $curs $db]\ - TRUE - - error_check_good "c_get(DB_GET_BOTH, $i)"\ - [$curs get -get_both $key $olddatum]\ - [list [list $key $olddatum]] - error_check_good "c_put(DB_CURRENT, $i)"\ - [$curs put -current $newdatum] 0 - - set is_long($i) 1 - - # verify_t73 is_long dbc $keys $key - error_check_good curs_close [$curs close] 0 - } - error_check_good ctxn_abort [$ctxn abort] 0 - for { set i 0 } { $i < $keysnow } { incr i } { - set is_long($i) 0 - } - verify_t73 is_long dbc $keys $key - - # Now delete the first item, abort the deletion, and make sure - # we're still sane. - puts "\tTest$tnum.g: Cursor delete first item, then abort delete." - set ctxn [$env txn -parent $txn] - error_check_good ctxn($i) [is_valid_txn $ctxn $env] TRUE - set curs [$db cursor -txn $ctxn] - error_check_good "db cursor create" [is_valid_cursor $curs $db] TRUE - set datum [makedatum_t73 0 0] - error_check_good "c_get(DB_GET_BOTH, 0)"\ - [$curs get -get_both $key $datum] [list [list $key $datum]] - error_check_good "c_del(0)" [$curs del] 0 - error_check_good curs_close [$curs close] 0 - error_check_good ctxn_abort [$ctxn abort] 0 - verify_t73 is_long dbc $keys $key - - # Ditto, for the last item. - puts "\tTest$tnum.h: Cursor delete last item, then abort delete." - set ctxn [$env txn -parent $txn] - error_check_good ctxn($i) [is_valid_txn $ctxn $env] TRUE - set curs [$db cursor -txn $ctxn] - error_check_good "db cursor create" [is_valid_cursor $curs $db] TRUE - set datum [makedatum_t73 [expr $keys - 1] 0] - error_check_good "c_get(DB_GET_BOTH, [expr $keys - 1])"\ - [$curs get -get_both $key $datum] [list [list $key $datum]] - error_check_good "c_del(0)" [$curs del] 0 - error_check_good curs_close [$curs close] 0 - error_check_good ctxn_abort [$ctxn abort] 0 - verify_t73 is_long dbc $keys $key - - # Ditto, for all the items. - puts "\tTest$tnum.i: Cursor delete all items, then abort delete." - set ctxn [$env txn -parent $txn] - error_check_good ctxn($i) [is_valid_txn $ctxn $env] TRUE - set curs [$db cursor -txn $ctxn] - error_check_good "db cursor create" [is_valid_cursor $curs $db] TRUE - set datum [makedatum_t73 0 0] - error_check_good "c_get(DB_GET_BOTH, 0)"\ - [$curs get -get_both $key $datum] [list [list $key $datum]] - error_check_good "c_del(0)" [$curs del] 0 - for { set i 1 } { $i < $keys } { incr i } { - error_check_good "c_get(DB_NEXT, $i)"\ - [$curs get -next] [list [list $key [makedatum_t73 $i 0]]] - error_check_good "c_del($i)" [$curs del] 0 - } - error_check_good curs_close [$curs close] 0 - error_check_good ctxn_abort [$ctxn abort] 0 - verify_t73 is_long dbc $keys $key - - # Close cursors. - puts "\tTest$tnum.j: Closing cursors." - for { set i 0 } { $i < $keys } { incr i } { - error_check_good "dbc close ($i)" [$dbc($i) close] 0 - } - error_check_good txn_commit [$txn commit] 0 - error_check_good "db close" [$db close] 0 - error_check_good "env close" [$env close] 0 -} diff --git a/storage/bdb/test/test088.tcl b/storage/bdb/test/test088.tcl deleted file mode 100644 index 2d3ec739630..00000000000 --- a/storage/bdb/test/test088.tcl +++ /dev/null @@ -1,174 +0,0 @@ -# See the file LICENSE for redistribution information. -# -# Copyright (c) 1999-2004 -# Sleepycat Software. All rights reserved. -# -# $Id: test088.tcl,v 11.15 2004/05/13 18:51:44 mjc Exp $ -# -# TEST test088 -# TEST Test of cursor stability across btree splits with very -# TEST deep trees (a variant of test048). [#2514] -proc test088 { method args } { - source ./include.tcl - global alphabet - global errorCode - global is_je_test - - set tstn 088 - set args [convert_args $method $args] - - if { [is_btree $method] != 1 } { - puts "Test$tstn skipping for method $method." - return - } - set pgindex [lsearch -exact $args "-pagesize"] - if { $pgindex != -1 } { - puts "Test088: skipping for specific pagesizes" - return - } - - set method "-btree" - - puts "\tTest$tstn: Test of cursor stability across btree splits." - - set key "key$alphabet$alphabet$alphabet" - set data "data$alphabet$alphabet$alphabet" - set txn "" - set flags "" - - puts "\tTest$tstn.a: Create $method database." - set txnenv 0 - set eindex [lsearch -exact $args "-env"] - # - # If we are using an env, then testfile should just be the db name. - # Otherwise it is the test directory and the name. - if { $eindex == -1 } { - set testfile $testdir/test$tstn.db - set env NULL - } else { - set testfile test$tstn.db - incr eindex - set env [lindex $args $eindex] - set txnenv [is_txnenv $env] - if { $txnenv == 1 } { - append args " -auto_commit " - } - set testdir [get_home $env] - } - set t1 $testdir/t1 - cleanup $testdir $env - - set ps 512 - set txn "" - set oflags "-create -pagesize $ps -mode 0644 $args $method" - set db [eval {berkdb_open} $oflags $testfile] - error_check_good dbopen [is_valid_db $db] TRUE - - set nkeys 5 - # Fill page w/ key/data pairs. - # - puts "\tTest$tstn.b: Fill page with $nkeys small key/data pairs." - for { set i 0 } { $i < $nkeys } { incr i } { - if { $txnenv == 1 } { - set t [$env txn] - error_check_good txn [is_valid_txn $t $env] TRUE - set txn "-txn $t" - } - set ret [eval {$db put} $txn {${key}00000$i $data$i}] - error_check_good dbput $ret 0 - if { $txnenv == 1 } { - error_check_good txn [$t commit] 0 - } - } - - # get db ordering, set cursors - puts "\tTest$tstn.c: Set cursors on each of $nkeys pairs." - # if mkeys is above 1000, need to adjust below for lexical order - set mkeys 30000 - if { $txnenv == 1 } { - set t [$env txn] - error_check_good txn [is_valid_txn $t $env] TRUE - set txn "-txn $t" - set mkeys 300 - } - for {set i 0; set ret [$db get ${key}00000$i]} {\ - $i < $nkeys && [llength $ret] != 0} {\ - incr i; set ret [$db get ${key}00000$i]} { - set key_set($i) [lindex [lindex $ret 0] 0] - set data_set($i) [lindex [lindex $ret 0] 1] - set dbc [eval {$db cursor} $txn] - set dbc_set($i) $dbc - error_check_good db_cursor:$i [is_substr $dbc_set($i) $db] 1 - set ret [$dbc_set($i) get -set $key_set($i)] - error_check_bad dbc_set($i)_get:set [llength $ret] 0 - } - - puts "\tTest$tstn.d: Add $mkeys pairs to force splits." - for {set i $nkeys} { $i < $mkeys } { incr i } { - if { $i >= 10000 } { - set ret [eval {$db put} $txn {${key}0$i $data$i}] - } elseif { $i >= 1000 } { - set ret [eval {$db put} $txn {${key}00$i $data$i}] - } elseif { $i >= 100 } { - set ret [eval {$db put} $txn {${key}000$i $data$i}] - } elseif { $i >= 10 } { - set ret [eval {$db put} $txn {${key}0000$i $data$i}] - } else { - set ret [eval {$db put} $txn {${key}00000$i $data$i}] - } - error_check_good dbput:more $ret 0 - } - - puts "\tTest$tstn.e: Make sure splits happened." - # XXX cannot execute stat in presence of txns and cursors. - if { $txnenv == 0 && !$is_je_test } { - error_check_bad stat:check-split [is_substr [$db stat] \ - "{{Internal pages} 0}"] 1 - } - - puts "\tTest$tstn.f: Check to see that cursors maintained reference." - for {set i 0} { $i < $nkeys } {incr i} { - set ret [$dbc_set($i) get -current] - error_check_bad dbc$i:get:current [llength $ret] 0 - set ret2 [$dbc_set($i) get -set $key_set($i)] - error_check_bad dbc$i:get:set [llength $ret2] 0 - error_check_good dbc$i:get(match) $ret $ret2 - } - - puts "\tTest$tstn.g: Delete added keys to force reverse splits." - for {set i $nkeys} { $i < $mkeys } { incr i } { - if { $i >= 10000 } { - set ret [eval {$db del} $txn {${key}0$i}] - } elseif { $i >= 1000 } { - set ret [eval {$db del} $txn {${key}00$i}] - } elseif { $i >= 100 } { - set ret [eval {$db del} $txn {${key}000$i}] - } elseif { $i >= 10 } { - set ret [eval {$db del} $txn {${key}0000$i}] - } else { - set ret [eval {$db del} $txn {${key}00000$i}] - } - error_check_good dbput:more $ret 0 - } - - puts "\tTest$tstn.h: Verify cursor reference." - for {set i 0} { $i < $nkeys } {incr i} { - set ret [$dbc_set($i) get -current] - error_check_bad dbc$i:get:current [llength $ret] 0 - set ret2 [$dbc_set($i) get -set $key_set($i)] - error_check_bad dbc$i:get:set [llength $ret2] 0 - error_check_good dbc$i:get(match) $ret $ret2 - } - - puts "\tTest$tstn.i: Cleanup." - # close cursors - for {set i 0} { $i < $nkeys } {incr i} { - error_check_good dbc_close:$i [$dbc_set($i) close] 0 - } - if { $txnenv == 1 } { - error_check_good txn [$t commit] 0 - } - error_check_good dbclose [$db close] 0 - - puts "\tTest$tstn complete." -} diff --git a/storage/bdb/test/test089.tcl b/storage/bdb/test/test089.tcl deleted file mode 100644 index 3eb2cd88d06..00000000000 --- a/storage/bdb/test/test089.tcl +++ /dev/null @@ -1,264 +0,0 @@ -# See the file LICENSE for redistribution information. -# -# Copyright (c) 1996-2004 -# Sleepycat Software. All rights reserved. -# -# $Id: test089.tcl,v 11.9 2004/01/28 03:36:32 bostic Exp $ -# -# TEST test089 -# TEST Concurrent Data Store test (CDB) -# TEST -# TEST Enhanced CDB testing to test off-page dups, cursor dups and -# TEST cursor operations like c_del then c_get. -proc test089 { method {nentries 1000} args } { - global datastr - global encrypt - source ./include.tcl - - # - # If we are using an env, then skip this test. It needs its own. - set eindex [lsearch -exact $args "-env"] - if { $eindex != -1 } { - incr eindex - set env [lindex $args $eindex] - puts "Test089 skipping for env $env" - return - } - set encargs "" - set args [convert_args $method $args] - set oargs [split_encargs $args encargs] - set omethod [convert_method $method] - - puts "Test089: ($oargs) $method CDB Test cursor/dup operations" - - # Process arguments - # Create the database and open the dictionary - set testfile test089.db - set testfile1 test089a.db - - env_cleanup $testdir - - set env [eval {berkdb_env -create -cdb} $encargs -home $testdir] - error_check_good dbenv [is_valid_env $env] TRUE - - set db [eval {berkdb_open -env $env -create \ - -mode 0644 $omethod} $oargs {$testfile}] - error_check_good dbopen [is_valid_db $db] TRUE - - set db1 [eval {berkdb_open -env $env -create \ - -mode 0644 $omethod} $oargs {$testfile1}] - error_check_good dbopen [is_valid_db $db1] TRUE - - set pflags "" - set gflags "" - set txn "" - set count 0 - - # Here is the loop where we put each key/data pair - puts "\tTest089.a: Put loop" - set did [open $dict] - while { [gets $did str] != -1 && $count < $nentries } { - if { [is_record_based $method] == 1 } { - set key [expr $count + 1] - } else { - set key $str - } - set ret [eval {$db put} \ - $txn $pflags {$key [chop_data $method $datastr]}] - error_check_good put:$db $ret 0 - set ret [eval {$db1 put} \ - $txn $pflags {$key [chop_data $method $datastr]}] - error_check_good put:$db1 $ret 0 - incr count - } - close $did - error_check_good close:$db [$db close] 0 - error_check_good close:$db1 [$db1 close] 0 - - # Database is created, now set up environment - - # Remove old mpools and Open/create the lock and mpool regions - error_check_good env:close:$env [$env close] 0 - set ret [eval {berkdb envremove} $encargs -home $testdir] - error_check_good env_remove $ret 0 - - set env [eval {berkdb_env_noerr -create -cdb} $encargs -home $testdir] - error_check_good dbenv [is_valid_widget $env env] TRUE - - puts "\tTest089.b: CDB cursor dups" - - set db1 [eval {berkdb_open_noerr -env $env -create \ - -mode 0644 $omethod} $oargs {$testfile1}] - error_check_good dbopen [is_valid_db $db1] TRUE - - # Create a read-only cursor and make sure we can't write with it. - set dbcr [$db1 cursor] - error_check_good dbcursor [is_valid_cursor $dbcr $db1] TRUE - set ret [$dbcr get -first] - catch { [$dbcr put -current data] } ret - error_check_good is_read_only \ - [is_substr $ret "Write attempted on read-only cursor"] 1 - error_check_good dbcr_close [$dbcr close] 0 - - # Create a write cursor and duplicate it. - set dbcw [$db1 cursor -update] - error_check_good dbcursor [is_valid_cursor $dbcw $db1] TRUE - set dup_dbcw [$dbcw dup] - error_check_good dup_write_cursor [is_valid_cursor $dup_dbcw $db1] TRUE - - # Position both cursors at get -first. They should find the same data. - set get_first [$dbcw get -first] - set get_first_dup [$dup_dbcw get -first] - error_check_good dup_read $get_first $get_first_dup - - # Test that the write cursors can both write and that they - # read each other's writes correctly. First write reversed - # datastr with original cursor and read with dup cursor. - error_check_good put_current_orig \ - [$dbcw put -current [chop_data $method [reverse $datastr]]] 0 - set reversed [$dup_dbcw get -current] - error_check_good check_with_dup [lindex [lindex $reversed 0] 1] \ - [chop_data $method [reverse $datastr]] - - # Write forward datastr with dup cursor and read with original. - error_check_good put_current_dup \ - [$dup_dbcw put -current [chop_data $method $datastr]] 0 - set forward [$dbcw get -current] - error_check_good check_with_orig $forward $get_first - - error_check_good dbcw_close [$dbcw close] 0 - error_check_good dup_dbcw_close [$dup_dbcw close] 0 - - # This tests the failure found in #1923 - puts "\tTest089.c: Test delete then get" - - set dbc [$db1 cursor -update] - error_check_good dbcursor [is_valid_cursor $dbc $db1] TRUE - - for {set kd [$dbc get -first] } { [llength $kd] != 0 } \ - {set kd [$dbc get -next] } { - error_check_good dbcdel [$dbc del] 0 - } - error_check_good dbc_close [$dbc close] 0 - error_check_good db_close [$db1 close] 0 - error_check_good env_close [$env close] 0 - - if { [is_btree $method] != 1 } { - puts "Skipping rest of test089 for $method method." - return - } - set pgindex [lsearch -exact $args "-pagesize"] - if { $pgindex != -1 } { - puts "Skipping rest of test089 for specific pagesizes" - return - } - append oargs " -dup " - test089_dup $testdir $encargs $oargs $omethod $nentries - append oargs " -dupsort " - test089_dup $testdir $encargs $oargs $omethod $nentries -} - -proc test089_dup { testdir encargs oargs method nentries } { - env_cleanup $testdir - set env [eval {berkdb_env -create -cdb} $encargs -home $testdir] - error_check_good dbenv [is_valid_env $env] TRUE - - # - # Set pagesize small to generate lots of off-page dups - # - set page 512 - set nkeys 5 - set data "data" - set key "test089_key" - set testfile test089.db - puts "\tTest089.d: CDB ($oargs) off-page dups" - set oflags "-env $env -create -mode 0644 $oargs $method" - set db [eval {berkdb_open} -pagesize $page $oflags $testfile] - error_check_good dbopen [is_valid_db $db] TRUE - - puts "\tTest089.e: Fill page with $nkeys keys, with $nentries dups" - for { set k 0 } { $k < $nkeys } { incr k } { - for { set i 0 } { $i < $nentries } { incr i } { - set ret [$db put $key$k $i$data$k] - error_check_good dbput $ret 0 - } - } - - # Verify we have off-page duplicates - set stat [$db stat] - error_check_bad stat:offpage [is_substr $stat "{{Internal pages} 0}"] 1 - - # This tests the failure reported in #6950. Skip for -dupsort. - puts "\tTest089.f: Clear locks for duped off-page dup cursors." - if { [is_substr $oargs dupsort] != 1 } { - # Create a read cursor, put it on an off-page dup. - set dbcr [$db cursor] - error_check_good dbcr [is_valid_cursor $dbcr $db] TRUE - set offpage [$dbcr get -get_both test089_key4 900data4] - error_check_bad offpage [llength $offpage] 0 - - # Create a write cursor, put it on an off-page dup. - set dbcw [$db cursor -update] - error_check_good dbcw [is_valid_cursor $dbcw $db] TRUE - set offpage [$dbcw get -get_both test089_key3 900data3] - error_check_bad offpage [llength $offpage] 0 - - # Add a new item using the write cursor, then close the cursor. - error_check_good add_dup [$dbcw put -after $data] 0 - error_check_good close_dbcw [$dbcw close] 0 - - # Get next dup with read cursor, then close the cursor. - set nextdup [$dbcr get -nextdup] - error_check_good close_dbcr [$dbcr close] 0 - } - - puts "\tTest089.g: CDB duplicate write cursors with off-page dups" - # Create a write cursor and duplicate it. - set dbcw [$db cursor -update] - error_check_good dbcursor [is_valid_cursor $dbcw $db] TRUE - set dup_dbcw [$dbcw dup] - error_check_good dup_write_cursor [is_valid_cursor $dup_dbcw $db] TRUE - - # Position both cursors at get -first. They should find the same data. - set get_first [$dbcw get -first] - set get_first_dup [$dup_dbcw get -first] - error_check_good dup_read $get_first $get_first_dup - - # Test with -after and -before. Skip for -dupsort. - if { [is_substr $oargs dupsort] != 1 } { - # Original and duplicate cursors both point to first item. - # Do a put -before of new string with original cursor, - # and a put -after of new string with duplicate cursor. - set newdata "newdata" - error_check_good put_before [$dbcw put -before $newdata] 0 - error_check_good put_after [$dup_dbcw put -after $newdata] 0 - - # Now walk forward with original cursor ... - set first [$dbcw get -first] - error_check_good check_first [lindex [lindex $first 0] 1] $newdata - set next1 [$dbcw get -next] - error_check_good check_next1 $next1 $get_first - set next2 [$dbcw get -next] - error_check_good check_next2 [lindex [lindex $next2 0] 1] $newdata - - # ... and backward with duplicate cursor. - set current [$dup_dbcw get -current] - error_check_good check_current [lindex [lindex $current 0] 1] $newdata - set prev1 [$dup_dbcw get -prev] - error_check_good check_prev1 $prev1 $get_first - set prev2 [$dup_dbcw get -prev] - error_check_good check_prev2 [lindex [lindex $prev2 0] 1] $newdata - } - - puts "\tTest089.h: test delete then get of off-page dups" - for {set kd [$dbcw get -first] } { [llength $kd] != 0 } \ - {set kd [$dbcw get -next] } { - error_check_good dbcdel [$dbcw del] 0 - } - - error_check_good dbcw_close [$dbcw close] 0 - error_check_good dup_dbcw_close [$dup_dbcw close] 0 - - error_check_good db_close [$db close] 0 - error_check_good env_close [$env close] 0 -} diff --git a/storage/bdb/test/test090.tcl b/storage/bdb/test/test090.tcl deleted file mode 100644 index 7c46c56c0a7..00000000000 --- a/storage/bdb/test/test090.tcl +++ /dev/null @@ -1,16 +0,0 @@ -# See the file LICENSE for redistribution information. -# -# Copyright (c) 2000-2004 -# Sleepycat Software. All rights reserved. -# -# $Id: test090.tcl,v 11.15 2004/01/28 03:36:32 bostic Exp $ -# -# TEST test090 -# TEST Test for functionality near the end of the queue using test001. -proc test090 { method {nentries 10000} {tnum "090"} args} { - if { [is_queueext $method ] == 0 } { - puts "Skipping test$tnum for $method." - return; - } - eval {test001 $method $nentries 4294967000 0 $tnum} $args -} diff --git a/storage/bdb/test/test091.tcl b/storage/bdb/test/test091.tcl deleted file mode 100644 index 81cabb867b9..00000000000 --- a/storage/bdb/test/test091.tcl +++ /dev/null @@ -1,20 +0,0 @@ -# See the file LICENSE for redistribution information. -# -# Copyright (c) 2000-2004 -# Sleepycat Software. All rights reserved. -# -# $Id: test091.tcl,v 11.10 2004/01/28 03:36:32 bostic Exp $ -# -# TEST test091 -# TEST Test of DB_CONSUME_WAIT. -proc test091 { method {nconsumers 4} \ - {nproducers 2} {nitems 1000} {start 0 } {tnum "091"} args} { - if { [is_queue $method ] == 0 } { - puts "Skipping test0$tnum for $method." - return; - } - eval {test070 $method \ - $nconsumers $nproducers $nitems WAIT $start -txn $tnum } $args - eval {test070 $method \ - $nconsumers $nproducers $nitems WAIT $start -cdb $tnum } $args -} diff --git a/storage/bdb/test/test092.tcl b/storage/bdb/test/test092.tcl deleted file mode 100644 index ef4c822d821..00000000000 --- a/storage/bdb/test/test092.tcl +++ /dev/null @@ -1,247 +0,0 @@ -# See the file LICENSE for redistribution information. -# -# Copyright (c) 1996-2004 -# Sleepycat Software. All rights reserved. -# -# $Id: test092.tcl,v 11.18 2004/09/22 18:01:06 bostic Exp $ -# -# TEST test092 -# TEST Test of DB_DIRTY_READ [#3395] -# TEST -# TEST We set up a database with nentries in it. We then open the -# TEST database read-only twice. One with dirty read and one without. -# TEST We open the database for writing and update some entries in it. -# TEST Then read those new entries via db->get (clean and dirty), and -# TEST via cursors (clean and dirty). -proc test092 { method {nentries 1000} args } { - source ./include.tcl - # - # If we are using an env, then skip this test. It needs its own. - set eindex [lsearch -exact $args "-env"] - if { $eindex != -1 } { - incr eindex - set env [lindex $args $eindex] - puts "Test092 skipping for env $env" - return - } - set args [convert_args $method $args] - set encargs "" - set args [split_encargs $args encargs] - set omethod [convert_method $method] - - puts "Test092: Dirty Read Test $method $nentries" - - # Create the database and open the dictionary - set testfile test092.db - set t1 $testdir/t1 - set t2 $testdir/t2 - set t3 $testdir/t3 - - env_cleanup $testdir - - set lmax [expr $nentries * 2] - set lomax [expr $nentries * 2] - set env [eval {berkdb_env -create -txn} $encargs -home $testdir \ - -lock_max_locks $lmax -lock_max_objects $lomax] - error_check_good dbenv [is_valid_env $env] TRUE - - set db [eval {berkdb_open -env $env -create \ - -mode 0644 $omethod} $args {$testfile}] - error_check_good dbopen [is_valid_db $db] TRUE - - # Here is the loop where we put each key/data pair. - # Key is entry, data is entry also. - puts "\tTest092.a: put loop" - set count 0 - set did [open $dict] - while { [gets $did str] != -1 && $count < $nentries } { - if { [is_record_based $method] == 1 } { - global kvals - - set key [expr $count + 1] - set kvals($key) [pad_data $method $str] - } else { - set key $str - } - set ret [eval {$db put} {$key [chop_data $method $str]}] - error_check_good put:$db $ret 0 - incr count - } - close $did - error_check_good close:$db [$db close] 0 - - puts "\tTest092.b: Opening all the handles" - # - # Open all of our handles. - # We need: - # 1. Our main txn (t). - # 2. A txn that can read dirty data (tdr). - # 3. A db handle for writing via txn (dbtxn). - # 4. A db handle for clean data (dbcl). - # 5. A db handle for dirty data (dbdr). - # 6. A cursor handle for dirty txn data (clean db handle using - # the dirty txn handle on the cursor call) (dbccl1). - # 7. A cursor handle for dirty data (dirty on get call) (dbcdr0). - # 8. A cursor handle for dirty data (dirty on cursor call) (dbcdr1). - set t [$env txn] - error_check_good txnbegin [is_valid_txn $t $env] TRUE - - set tdr [$env txn -dirty] - error_check_good txnbegin:dr [is_valid_txn $tdr $env] TRUE - set dbtxn [eval {berkdb_open -auto_commit -env $env -dirty \ - -mode 0644 $omethod} {$testfile}] - error_check_good dbopen:dbtxn [is_valid_db $dbtxn] TRUE - - set dbcl [eval {berkdb_open -auto_commit -env $env \ - -rdonly -mode 0644 $omethod} {$testfile}] - error_check_good dbopen:dbcl [is_valid_db $dbcl] TRUE - - set dbdr [eval {berkdb_open -auto_commit -env $env -dirty \ - -rdonly -mode 0644 $omethod} {$testfile}] - error_check_good dbopen:dbdr [is_valid_db $dbdr] TRUE - - set dbccl [$dbcl cursor -txn $tdr] - error_check_good dbcurs:dbcl [is_valid_cursor $dbccl $dbcl] TRUE - - set dbcdr0 [$dbdr cursor] - error_check_good dbcurs:dbdr0 [is_valid_cursor $dbcdr0 $dbdr] TRUE - - set dbcdr1 [$dbdr cursor -dirty] - error_check_good dbcurs:dbdr1 [is_valid_cursor $dbcdr1 $dbdr] TRUE - - # Test that $db stat can use -dirty flag. - puts "\tTest092.c: Smoke test for db_stat -txn -dirty" - if { [catch {set statret [$dbcl stat -txn $t -dirty]} res] } { - puts "FAIL: db_stat -txn -dirty returned $res" - } - - # - # Now that we have all of our handles, change all the data in there - # to be the key and data the same, but data is capitalized. - puts "\tTest092.d: put/get data within a txn" - set gflags "" - if { [is_record_based $method] == 1 } { - set checkfunc test092dr_recno.check - append gflags " -recno" - } else { - set checkfunc test092dr.check - } - set count 0 - set did [open $dict] - while { [gets $did str] != -1 && $count < $nentries } { - if { [is_record_based $method] == 1 } { - set key [expr $count + 1] - } else { - set key $str - } - set ustr [string toupper $str] - set clret [list [list $key [pad_data $method $str]]] - set drret [list [list $key [pad_data $method $ustr]]] - # - # Put the data in the txn. - # - set ret [eval {$dbtxn put} -txn $t \ - {$key [chop_data $method $ustr]}] - error_check_good put:$dbtxn $ret 0 - - # - # Now get the data using the different db handles and - # make sure it is dirty or clean data. - # - # Using the dirty txn should show us dirty data - set ret [eval {$dbcl get -txn $tdr} $gflags {$key}] - error_check_good dbdr2:get $ret $drret - - set ret [eval {$dbdr get -dirty} $gflags {$key}] - error_check_good dbdr1:get $ret $drret - - set ret [eval {$dbdr get -txn $tdr} $gflags {$key}] - error_check_good dbdr2:get $ret $drret - - incr count - } - close $did - - puts "\tTest092.e: Check dirty data using dirty txn and clean db/cursor" - dump_file_walk $dbccl $t1 $checkfunc "-first" "-next" - - puts "\tTest092.f: Check dirty data using -dirty cget flag" - dump_file_walk $dbcdr0 $t2 $checkfunc "-first" "-next" "-dirty" - - puts "\tTest092.g: Check dirty data using -dirty cursor" - dump_file_walk $dbcdr1 $t3 $checkfunc "-first" "-next" - - # - # We must close these before aborting the real txn - # because they all hold read locks on the pages. - # - error_check_good dbccl:close [$dbccl close] 0 - error_check_good dbcdr0:close [$dbcdr0 close] 0 - error_check_good dbcdr1:close [$dbcdr1 close] 0 - - # - # Now abort the modifying transaction and rerun the data checks. - # - puts "\tTest092.h: Aborting the write-txn" - error_check_good txnabort [$t abort] 0 - - set dbccl [$dbcl cursor -txn $tdr] - error_check_good dbcurs:dbcl [is_valid_cursor $dbccl $dbcl] TRUE - - set dbcdr0 [$dbdr cursor] - error_check_good dbcurs:dbdr0 [is_valid_cursor $dbcdr0 $dbdr] TRUE - - set dbcdr1 [$dbdr cursor -dirty] - error_check_good dbcurs:dbdr1 [is_valid_cursor $dbcdr1 $dbdr] TRUE - - if { [is_record_based $method] == 1 } { - set checkfunc test092cl_recno.check - } else { - set checkfunc test092cl.check - } - puts "\tTest092.i: Check clean data using -dirty cget flag" - dump_file_walk $dbccl $t1 $checkfunc "-first" "-next" - - puts "\tTest092.j: Check clean data using -dirty cget flag" - dump_file_walk $dbcdr0 $t2 $checkfunc "-first" "-next" "-dirty" - - puts "\tTest092.k: Check clean data using -dirty cursor" - dump_file_walk $dbcdr1 $t3 $checkfunc "-first" "-next" - - # Clean up our handles - error_check_good dbccl:close [$dbccl close] 0 - error_check_good tdrcommit [$tdr commit] 0 - error_check_good dbcdr0:close [$dbcdr0 close] 0 - error_check_good dbcdr1:close [$dbcdr1 close] 0 - error_check_good dbclose [$dbcl close] 0 - error_check_good dbclose [$dbdr close] 0 - error_check_good dbclose [$dbtxn close] 0 - error_check_good envclose [$env close] 0 -} - -# Check functions for test092; keys and data are identical -# Clean checks mean keys and data are identical. -# Dirty checks mean data are uppercase versions of keys. -proc test092cl.check { key data } { - error_check_good "key/data mismatch" $key $data -} - -proc test092cl_recno.check { key data } { - global kvals - - error_check_good key"$key"_exists [info exists kvals($key)] 1 - error_check_good "key/data mismatch, key $key" $data $kvals($key) -} - -proc test092dr.check { key data } { - error_check_good "key/data mismatch" $key [string tolower $data] -} - -proc test092dr_recno.check { key data } { - global kvals - - error_check_good key"$key"_exists [info exists kvals($key)] 1 - error_check_good "key/data mismatch, key $key" $data \ - [string toupper $kvals($key)] -} - diff --git a/storage/bdb/test/test093.tcl b/storage/bdb/test/test093.tcl deleted file mode 100644 index 3ed4b596403..00000000000 --- a/storage/bdb/test/test093.tcl +++ /dev/null @@ -1,393 +0,0 @@ -# See the file LICENSE for redistribution information. -# -# Copyright (c) 1996-2004 -# Sleepycat Software. All rights reserved. -# -# $Id: test093.tcl,v 11.23 2004/01/28 03:36:32 bostic Exp $ -# -# TEST test093 -# TEST Test using set_bt_compare. -# TEST -# TEST Use the first 10,000 entries from the dictionary. -# TEST Insert each with self as key and data; retrieve each. -# TEST After all are entered, retrieve all; compare output to original. -# TEST Close file, reopen, do retrieve and re-verify. -proc test093 { method {nentries 10000} {tnum "093"} args} { - source ./include.tcl - global btvals - global btvalsck - global errorInfo - - set dbargs [convert_args $method $args] - set omethod [convert_method $method] - - if { [is_btree $method] != 1 } { - puts "Test$tnum: skipping for method $method." - return - } - set txnenv 0 - set eindex [lsearch -exact $dbargs "-env"] - if { $eindex != -1 } { - set testfile test$tnum.db - incr eindex - set env [lindex $dbargs $eindex] - set rpcenv [is_rpcenv $env] - if { $rpcenv == 1 } { - puts "Test$tnum: skipping for RPC" - return - } - set txnenv [is_txnenv $env] - if { $txnenv == 1 } { - append dbargs " -auto_commit " - if { $nentries == 10000 } { - set nentries 100 - } - } - set testdir [get_home $env] - cleanup $testdir $env - } - puts "Test$tnum: $method ($args) $nentries using btcompare" - - - test093_run $omethod $dbargs $nentries $tnum test093_cmp1 test093_sort1 - test093_runbig $omethod $dbargs $nentries $tnum \ - test093_cmp1 test093_sort1 - test093_run $omethod $dbargs $nentries $tnum test093_cmp2 test093_sort2 - # - # Don't bother running the second, really slow, comparison - # function on test093_runbig (file contents). - - # Clean up so verification doesn't fail. (There's currently - # no way to specify a comparison function to berkdb dbverify.) - # If we are using an env, then testfile should just be the db name. - # Otherwise it is the test directory and the name. - set eindex [lsearch -exact $dbargs "-env"] - if { $eindex == -1 } { - set env NULL - } else { - incr eindex - set env [lindex $dbargs $eindex] - set testdir [get_home $env] - } - cleanup $testdir $env -} - -proc test093_run { method dbargs nentries tnum cmpfunc sortfunc } { - source ./include.tcl - global btvals - global btvalsck - - # Create the database and open the dictionary - set eindex [lsearch -exact $dbargs "-env"] - # - # If we are using an env, then testfile should just be the db name. - # Otherwise it is the test directory and the name. - set txnenv 0 - if { $eindex == -1 } { - set testfile $testdir/test$tnum.db - set env NULL - } else { - set testfile test$tnum.db - incr eindex - set env [lindex $dbargs $eindex] - set txnenv [is_txnenv $env] - set testdir [get_home $env] - } - cleanup $testdir $env - - set db [eval {berkdb_open -btcompare $cmpfunc \ - -create -mode 0644} $method $dbargs $testfile] - error_check_good dbopen [is_valid_db $db] TRUE - set did [open $dict] - - set t1 $testdir/t1 - set t2 $testdir/t2 - set t3 $testdir/t3 - set pflags "" - set gflags "" - set txn "" - set btvals {} - set btvalsck {} - set checkfunc test093_check - puts "\tTest$tnum.a: put/get loop" - # Here is the loop where we put and get each key/data pair - set count 0 - while { [gets $did str] != -1 && $count < $nentries } { - set key $str - set str [reverse $str] - if { $txnenv == 1 } { - set t [$env txn] - error_check_good txn [is_valid_txn $t $env] TRUE - set txn "-txn $t" - } - set ret [eval \ - {$db put} $txn $pflags {$key [chop_data $method $str]}] - error_check_good put $ret 0 - if { $txnenv == 1 } { - error_check_good txn [$t commit] 0 - } - - lappend btvals $key - - set ret [eval {$db get} $gflags {$key}] - error_check_good \ - get $ret [list [list $key [pad_data $method $str]]] - - incr count - } - close $did - # Now we will get each key from the DB and compare the results - # to the original. - puts "\tTest$tnum.b: dump file" - if { $txnenv == 1 } { - set t [$env txn] - error_check_good txn [is_valid_txn $t $env] TRUE - set txn "-txn $t" - } - dump_file $db $txn $t1 $checkfunc - if { $txnenv == 1 } { - error_check_good txn [$t commit] 0 - } - error_check_good db_close [$db close] 0 - - # Now compare the keys to see if they match the dictionary (or ints) - set q q - filehead $nentries $dict $t2 - filesort $t2 $t3 - file rename -force $t3 $t2 - filesort $t1 $t3 - - error_check_good Test$tnum:diff($t3,$t2) \ - [filecmp $t3 $t2] 0 - - puts "\tTest$tnum.c: dump file in order" - # Now, reopen the file and run the last test again. - # We open it here, ourselves, because all uses of the db - # need to have the correct comparison func set. Then - # call dump_file_direction directly. - set btvalsck {} - set db [eval {berkdb_open -btcompare $cmpfunc -rdonly} \ - $dbargs $method $testfile] - error_check_good dbopen [is_valid_db $db] TRUE - if { $txnenv == 1 } { - set t [$env txn] - error_check_good txn [is_valid_txn $t $env] TRUE - set txn "-txn $t" - } - dump_file_direction $db $txn $t1 $checkfunc "-first" "-next" - if { $txnenv == 1 } { - error_check_good txn [$t commit] 0 - } - error_check_good db_close [$db close] 0 - - # - # We need to sort btvals according to the comparison function. - # Once that is done, btvalsck and btvals should be the same. - puts "\tTest$tnum.d: check file order" - - $sortfunc - - error_check_good btvals:len [llength $btvals] [llength $btvalsck] - for {set i 0} {$i < $nentries} {incr i} { - error_check_good vals:$i [lindex $btvals $i] \ - [lindex $btvalsck $i] - } -} - -proc test093_runbig { method dbargs nentries tnum cmpfunc sortfunc } { - source ./include.tcl - global btvals - global btvalsck - - # Create the database and open the dictionary - set eindex [lsearch -exact $dbargs "-env"] - # - # If we are using an env, then testfile should just be the db name. - # Otherwise it is the test directory and the name. - set txnenv 0 - if { $eindex == -1 } { - set testfile $testdir/test$tnum.db - set env NULL - } else { - set testfile test$tnum.db - incr eindex - set env [lindex $dbargs $eindex] - set txnenv [is_txnenv $env] - set testdir [get_home $env] - } - cleanup $testdir $env - - set db [eval {berkdb_open -btcompare $cmpfunc \ - -create -mode 0644} $method $dbargs $testfile] - error_check_good dbopen [is_valid_db $db] TRUE - - set t1 $testdir/t1 - set t2 $testdir/t2 - set t3 $testdir/t3 - set t4 $testdir/t4 - set t5 $testdir/t5 - set pflags "" - set gflags "" - set txn "" - set btvals {} - set btvalsck {} - set checkfunc test093_checkbig - puts "\tTest$tnum.e:\ - big key put/get loop key=filecontents data=filename" - - # Here is the loop where we put and get each key/data pair - set file_list [get_file_list 1] - - set count 0 - foreach f $file_list { - set fid [open $f r] - fconfigure $fid -translation binary - set key [read $fid] - close $fid - - set key $f$key - - set fcopy [open $t5 w] - fconfigure $fcopy -translation binary - puts -nonewline $fcopy $key - close $fcopy - - if { $txnenv == 1 } { - set t [$env txn] - error_check_good txn [is_valid_txn $t $env] TRUE - set txn "-txn $t" - } - set ret [eval {$db put} $txn $pflags {$key \ - [chop_data $method $f]}] - error_check_good put_file $ret 0 - if { $txnenv == 1 } { - error_check_good txn [$t commit] 0 - } - - lappend btvals $key - - # Should really catch errors - set fid [open $t4 w] - fconfigure $fid -translation binary - if [catch {eval {$db get} $gflags {$key}} data] { - puts -nonewline $fid $data - } else { - # Data looks like {{key data}} - set key [lindex [lindex $data 0] 0] - puts -nonewline $fid $key - } - close $fid - error_check_good \ - Test093:diff($t5,$t4) [filecmp $t5 $t4] 0 - - incr count - } - - # Now we will get each key from the DB and compare the results - # to the original. - puts "\tTest$tnum.f: big dump file" - if { $txnenv == 1 } { - set t [$env txn] - error_check_good txn [is_valid_txn $t $env] TRUE - set txn "-txn $t" - } - dump_file $db $txn $t1 $checkfunc - if { $txnenv == 1 } { - error_check_good txn [$t commit] 0 - } - error_check_good db_close [$db close] 0 - - puts "\tTest$tnum.g: dump file in order" - # Now, reopen the file and run the last test again. - # We open it here, ourselves, because all uses of the db - # need to have the correct comparison func set. Then - # call dump_file_direction directly. - - set btvalsck {} - set db [eval {berkdb_open -btcompare $cmpfunc -rdonly} \ - $dbargs $method $testfile] - error_check_good dbopen [is_valid_db $db] TRUE - if { $txnenv == 1 } { - set t [$env txn] - error_check_good txn [is_valid_txn $t $env] TRUE - set txn "-txn $t" - } - dump_file_direction $db $txn $t1 $checkfunc "-first" "-next" - if { $txnenv == 1 } { - error_check_good txn [$t commit] 0 - } - error_check_good db_close [$db close] 0 - - # - # We need to sort btvals according to the comparison function. - # Once that is done, btvalsck and btvals should be the same. - puts "\tTest$tnum.h: check file order" - - $sortfunc - error_check_good btvals:len [llength $btvals] [llength $btvalsck] - - set end [llength $btvals] - for {set i 0} {$i < $end} {incr i} { - error_check_good vals:$i [lindex $btvals $i] \ - [lindex $btvalsck $i] - } -} - -# Simple bt comparison. -proc test093_cmp1 { a b } { - return [string compare $b $a] -} - -# Simple bt sorting. -proc test093_sort1 {} { - global btvals - # - # This one is easy, just sort in reverse. - # - set btvals [lsort -decreasing $btvals] -} - -proc test093_cmp2 { a b } { - set arev [reverse $a] - set brev [reverse $b] - return [string compare $arev $brev] -} - -proc test093_sort2 {} { - global btvals - - # We have to reverse them, then sorts them. - # Then reverse them back to real words. - set rbtvals {} - foreach i $btvals { - lappend rbtvals [reverse $i] - } - set rbtvals [lsort -increasing $rbtvals] - set newbtvals {} - foreach i $rbtvals { - lappend newbtvals [reverse $i] - } - set btvals $newbtvals -} - -# Check function for test093; keys and data are identical -proc test093_check { key data } { - global btvalsck - - error_check_good "key/data mismatch" $data [reverse $key] - lappend btvalsck $key -} - -# Check function for test093 big keys; -proc test093_checkbig { key data } { - source ./include.tcl - global btvalsck - - set fid [open $data r] - fconfigure $fid -translation binary - set cont [read $fid] - close $fid - error_check_good "key/data mismatch" $key $data$cont - lappend btvalsck $key -} - diff --git a/storage/bdb/test/test094.tcl b/storage/bdb/test/test094.tcl deleted file mode 100644 index 20f2b3af3ab..00000000000 --- a/storage/bdb/test/test094.tcl +++ /dev/null @@ -1,189 +0,0 @@ -# See the file LICENSE for redistribution information. -# -# Copyright (c) 1996-2004 -# Sleepycat Software. All rights reserved. -# -# $Id: test094.tcl,v 11.21 2004/06/29 14:26:17 carol Exp $ -# -# TEST test094 -# TEST Test using set_dup_compare. -# TEST -# TEST Use the first 10,000 entries from the dictionary. -# TEST Insert each with self as key and data; retrieve each. -# TEST After all are entered, retrieve all; compare output to original. -# TEST Close file, reopen, do retrieve and re-verify. -proc test094 { method {nentries 10000} {ndups 10} {tnum "094"} args} { - source ./include.tcl - global errorInfo - - set dbargs [convert_args $method $args] - set omethod [convert_method $method] - - if { [is_btree $method] != 1 && [is_hash $method] != 1 } { - puts "Test$tnum: skipping for method $method." - return - } - - set txnenv 0 - set eindex [lsearch -exact $dbargs "-env"] - # Create the database and open the dictionary - # - # If we are using an env, then testfile should just be the db name. - # Otherwise it is the test directory and the name. - if { $eindex == -1 } { - set testfile $testdir/test$tnum-a.db - set env NULL - } else { - set testfile test$tnum-a.db - incr eindex - set env [lindex $dbargs $eindex] - set rpcenv [is_rpcenv $env] - if { $rpcenv == 1 } { - puts "Test$tnum: skipping for RPC" - return - } - set txnenv [is_txnenv $env] - if { $txnenv == 1 } { - append dbargs " -auto_commit " - if { $nentries == 10000 } { - set nentries 100 - } - reduce_dups nentries ndups - } - set testdir [get_home $env] - } - puts "Test$tnum: $method ($args) $nentries \ - with $ndups dups using dupcompare" - - cleanup $testdir $env - - set db [eval {berkdb_open -dupcompare test094_cmp \ - -dup -dupsort -create -mode 0644} $omethod $dbargs {$testfile}] - error_check_good dbopen [is_valid_db $db] TRUE - - set did [open $dict] - set t1 $testdir/t1 - set pflags "" - set gflags "" - set txn "" - puts "\tTest$tnum.a: $nentries put/get duplicates loop" - # Here is the loop where we put and get each key/data pair - set count 0 - set dlist {} - for {set i 0} {$i < $ndups} {incr i} { - set dlist [linsert $dlist 0 $i] - } - while { [gets $did str] != -1 && $count < $nentries } { - set key $str - for {set i 0} {$i < $ndups} {incr i} { - set data $i:$str - if { $txnenv == 1 } { - set t [$env txn] - error_check_good txn [is_valid_txn $t $env] TRUE - set txn "-txn $t" - } - set ret [eval {$db put} \ - $txn $pflags {$key [chop_data $omethod $data]}] - error_check_good put $ret 0 - if { $txnenv == 1 } { - error_check_good txn [$t commit] 0 - } - } - - set ret [eval {$db get} $gflags {$key}] - error_check_good get [llength $ret] $ndups - incr count - } - close $did - # Now we will get each key from the DB and compare the results - # to the original. - puts "\tTest$tnum.b: traverse checking duplicates before close" - if { $txnenv == 1 } { - set t [$env txn] - error_check_good txn [is_valid_txn $t $env] TRUE - set txn "-txn $t" - } - dup_check $db $txn $t1 $dlist - if { $txnenv == 1 } { - error_check_good txn [$t commit] 0 - } - error_check_good db_close [$db close] 0 - - # Set up second testfile so truncate flag is not needed. - # If we are using an env, then testfile should just be the db name. - # Otherwise it is the test directory and the name. - if { $eindex == -1 } { - set testfile $testdir/test$tnum-b.db - set env NULL - } else { - set testfile test$tnum-b.db - set env [lindex $dbargs $eindex] - set testdir [get_home $env] - } - cleanup $testdir $env - - # - # Test dupcompare with data items big enough to force offpage dups. - # - puts "\tTest$tnum.c: big key put/get dup loop key=filename data=filecontents" - set db [eval {berkdb_open -dupcompare test094_cmp -dup -dupsort \ - -create -mode 0644} $omethod $dbargs $testfile] - error_check_good dbopen [is_valid_db $db] TRUE - - # Here is the loop where we put and get each key/data pair - set file_list [get_file_list 1] - if { [llength $file_list] > $nentries } { - set file_list [lrange $file_list 1 $nentries] - } - - set count 0 - foreach f $file_list { - set fid [open $f r] - fconfigure $fid -translation binary - set cont [read $fid] - close $fid - - set key $f - for {set i 0} {$i < $ndups} {incr i} { - set data $i:$cont - if { $txnenv == 1 } { - set t [$env txn] - error_check_good txn [is_valid_txn $t $env] TRUE - set txn "-txn $t" - } - set ret [eval {$db put} \ - $txn $pflags {$key [chop_data $omethod $data]}] - error_check_good put $ret 0 - if { $txnenv == 1 } { - error_check_good txn [$t commit] 0 - } - } - - set ret [eval {$db get} $gflags {$key}] - error_check_good get [llength $ret] $ndups - incr count - } - - puts "\tTest$tnum.d: traverse checking duplicates before close" - if { $txnenv == 1 } { - set t [$env txn] - error_check_good txn [is_valid_txn $t $env] TRUE - set txn "-txn $t" - } - dup_file_check $db $txn $t1 $dlist - if { $txnenv == 1 } { - error_check_good txn [$t commit] 0 - set testdir [get_home $env] - } - error_check_good db_close [$db close] 0 - - # Clean up the test directory, since there's currently - # no way to specify a dup_compare function to berkdb dbverify - # and without one it will fail. - cleanup $testdir $env -} - -# Simple dup comparison. -proc test094_cmp { a b } { - return [string compare $b $a] -} diff --git a/storage/bdb/test/test095.tcl b/storage/bdb/test/test095.tcl deleted file mode 100644 index 9c62a6a51ef..00000000000 --- a/storage/bdb/test/test095.tcl +++ /dev/null @@ -1,367 +0,0 @@ -# See the file LICENSE for redistribution information. -# -# Copyright (c) 2000-2004 -# Sleepycat Software. All rights reserved. -# -# $Id: test095.tcl,v 11.30 2004/09/22 18:01:06 bostic Exp $ -# -# TEST test095 -# TEST Bulk get test for methods supporting dups. [#2934] -proc test095 { method {tnum "095"} args } { - source ./include.tcl - global is_je_test - global is_qnx_test - - set args [convert_args $method $args] - set omethod [convert_method $method] - - set txnenv 0 - set eindex [lsearch -exact $args "-env"] - # - # If we are using an env, then testfile should just be the db name. - # Otherwise it is the test directory and the name. - if { $eindex == -1 } { - set basename $testdir/test$tnum - set env NULL - # If we've our own env, no reason to swap--this isn't - # an mpool test. - set carg { -cachesize {0 25000000 0} } - } else { - set basename test$tnum - incr eindex - set env [lindex $args $eindex] - set txnenv [is_txnenv $env] - if { $txnenv == 1 } { - puts "Skipping for environment with txns" - return - } - set testdir [get_home $env] - set carg {} - } - cleanup $testdir $env - - puts "Test$tnum: $method ($args) Bulk get test" - - # Tcl leaves a lot of memory allocated after this test - # is run in the tclsh. This ends up being a problem on - # QNX runs as later tests then run out of memory. - if { $is_qnx_test } { - puts "Test$tnum skipping for QNX" - return - } - if { [is_record_based $method] == 1 || [is_rbtree $method] == 1 } { - puts "Test$tnum skipping for method $method" - return - } - - # The test's success is dependent on the relationship between - # the amount of data loaded and the buffer sizes we pick, so - # these parameters don't belong on the command line. - set nsets 300 - set noverflows 25 - - # We run the meat of the test twice: once with unsorted dups, - # once with sorted dups. - foreach { dflag sort } { -dup unsorted {-dup -dupsort} sorted } { - if { $is_je_test && $sort == "unsorted" } { - continue - } - - set testfile $basename-$sort.db - set did [open $dict] - - # Open and populate the database with $nsets sets of dups. - # Each set contains as many dups as its number - puts "\tTest$tnum.a:\ - Creating database with $nsets sets of $sort dups." - set dargs "$dflag $carg $args" - set db [eval {berkdb_open_noerr -create} \ - $omethod $dargs $testfile] - error_check_good db_open [is_valid_db $db] TRUE - t95_populate $db $did $nsets 0 - - # Determine the pagesize so we can use it to size the buffer. - set stat [$db stat] - set pagesize [get_pagesize $stat] - - # Run basic get tests. - # - # A small buffer will fail if it is smaller than the pagesize. - # Skip small buffer tests if the page size is so small that - # we can't define a buffer smaller than the page size. - # (Buffers must be 1024 or multiples of 1024.) - # - # A big buffer of 66560 (64K + 1K) should always be large - # enough to contain the data, so the test should succeed - # on all platforms. We picked this number because it - # is larger than the largest allowed pagesize, so the test - # always fills more than a page at some point. - - set maxpage [expr 1024 * 64] - set bigbuf [expr $maxpage + 1024] - set smallbuf 1024 - - if { $pagesize > 1024 } { - t95_gettest $db $tnum b $smallbuf 1 - } else { - puts "Skipping small buffer test Test$tnum.b" - } - t95_gettest $db $tnum c $bigbuf 0 - - # Run cursor get tests. - if { $pagesize > 1024 } { - t95_cgettest $db $tnum b $smallbuf 1 - } else { - puts "Skipping small buffer test Test$tnum.d" - } - t95_cgettest $db $tnum e $bigbuf 0 - - # Run invalid flag combination tests - # Sync and reopen test file so errors won't be sent to stderr - error_check_good db_sync [$db sync] 0 - set noerrdb [eval berkdb_open_noerr $dargs $testfile] - t95_flagtest $noerrdb $tnum f [expr 8192] - t95_cflagtest $noerrdb $tnum g [expr 100] - error_check_good noerrdb_close [$noerrdb close] 0 - - # Set up for overflow tests - set max [expr 4096 * $noverflows] - puts "\tTest$tnum.h: Add $noverflows overflow sets\ - to database (max item size $max)" - t95_populate $db $did $noverflows 4096 - - # Run overflow get tests. The overflow test fails with - # our standard big buffer doubled, but succeeds with a - # buffer sized to handle $noverflows pairs of data of - # size $max. - t95_gettest $db $tnum i $bigbuf 1 - t95_gettest $db $tnum j [expr $bigbuf * 2] 1 - t95_gettest $db $tnum k [expr $max * $noverflows * 2] 0 - - # Run overflow cursor get tests. - t95_cgettest $db $tnum l $bigbuf 1 - # Expand buffer to accommodate basekey as well as the padding. - t95_cgettest $db $tnum m [expr ($max + 512) * 2] 0 - - error_check_good db_close [$db close] 0 - close $did - } -} - -proc t95_gettest { db tnum letter bufsize expectfail } { - t95_gettest_body $db $tnum $letter $bufsize $expectfail 0 -} -proc t95_cgettest { db tnum letter bufsize expectfail } { - t95_gettest_body $db $tnum $letter $bufsize $expectfail 1 -} -proc t95_flagtest { db tnum letter bufsize } { - t95_flagtest_body $db $tnum $letter $bufsize 0 -} -proc t95_cflagtest { db tnum letter bufsize } { - t95_flagtest_body $db $tnum $letter $bufsize 1 -} - -# Basic get test -proc t95_gettest_body { db tnum letter bufsize expectfail usecursor } { - global errorCode - - foreach flag { multi multi_key } { - if { $usecursor == 0 } { - if { $flag == "multi_key" } { - # db->get does not allow multi_key - continue - } else { - set action "db get -$flag" - } - } else { - set action "dbc get -$flag -set/-next" - } - puts "\tTest$tnum.$letter: $action with bufsize $bufsize" - set allpassed TRUE - set saved_err "" - - # Cursor for $usecursor. - if { $usecursor != 0 } { - set getcurs [$db cursor] - error_check_good getcurs [is_valid_cursor $getcurs $db] TRUE - } - - # Traverse DB with cursor; do get/c_get($flag) on each item. - set dbc [$db cursor] - error_check_good is_valid_dbc [is_valid_cursor $dbc $db] TRUE - for { set dbt [$dbc get -first] } { [llength $dbt] != 0 } \ - { set dbt [$dbc get -nextnodup] } { - set key [lindex [lindex $dbt 0] 0] - set datum [lindex [lindex $dbt 0] 1] - - if { $usecursor == 0 } { - set ret [catch {eval $db get -$flag $bufsize $key} res] - } else { - set res {} - for { set ret [catch {eval $getcurs get -$flag $bufsize\ - -set $key} tres] } \ - { $ret == 0 && [llength $tres] != 0 } \ - { set ret [catch {eval $getcurs get -$flag $bufsize\ - -nextdup} tres]} { - eval lappend res $tres - } - } - - # If we expect a failure, be more tolerant if the above - # fails; just make sure it's a DB_BUFFER_SMALL or an - # EINVAL (if the buffer is smaller than the pagesize, - # it's EINVAL), mark it, and move along. - if { $expectfail != 0 && $ret != 0 } { - if { [is_substr $errorCode DB_BUFFER_SMALL] != 1 && \ - [is_substr $errorCode EINVAL] != 1 } { - error_check_good \ - "$flag failure errcode" \ - $errorCode "DB_BUFFER_SMALL or EINVAL" - } - set allpassed FALSE - continue - } - error_check_good "get_$flag ($key)" $ret 0 - if { $flag == "multi_key" } { - t95_verify $res TRUE - } else { - t95_verify $res FALSE - } - } - set ret [catch {eval $db get -$flag $bufsize} res] - - if { $expectfail == 1 } { - error_check_good allpassed $allpassed FALSE - puts "\t\tTest$tnum.$letter:\ - returned at least one DB_BUFFER_SMALL (as expected)" - } else { - error_check_good allpassed $allpassed TRUE - puts "\t\tTest$tnum.$letter: succeeded (as expected)" - } - - error_check_good dbc_close [$dbc close] 0 - if { $usecursor != 0 } { - error_check_good getcurs_close [$getcurs close] 0 - } - } -} - -# Test of invalid flag combinations -proc t95_flagtest_body { db tnum letter bufsize usecursor } { - global errorCode - - foreach flag { multi multi_key } { - if { $usecursor == 0 } { - if { $flag == "multi_key" } { - # db->get does not allow multi_key - continue - } else { - set action "db get -$flag" - } - } else { - set action "dbc get -$flag" - } - puts "\tTest$tnum.$letter: $action with invalid flag combinations" - - # Cursor for $usecursor. - if { $usecursor != 0 } { - set getcurs [$db cursor] - error_check_good getcurs [is_valid_cursor $getcurs $db] TRUE - } - - if { $usecursor == 0 } { - # Disallowed flags for db->get - set badflags [list consume consume_wait {rmw some_key}] - - foreach badflag $badflags { - catch {eval $db get -$flag $bufsize -$badflag} ret - error_check_good \ - db:get:$flag:$badflag [is_substr $errorCode EINVAL] 1 - } - } else { - # Disallowed flags for db->cget - set cbadflags [list last get_recno join_item \ - {multi_key 1000} prev prevnodup] - - set dbc [$db cursor] - $dbc get -first - foreach badflag $cbadflags { - catch {eval $dbc get -$flag $bufsize -$badflag} ret - error_check_good dbc:get:$flag:$badflag \ - [is_substr $errorCode EINVAL] 1 - } - error_check_good dbc_close [$dbc close] 0 - } - if { $usecursor != 0 } { - error_check_good getcurs_close [$getcurs close] 0 - } - } - puts "\t\tTest$tnum.$letter completed" -} - -# Verify that a passed-in list of key/data pairs all match the predicted -# structure (e.g. {{thing1 thing1.0}}, {{key2 key2.0} {key2 key2.1}}). -proc t95_verify { res multiple_keys } { - global alphabet - - set i 0 - set orig_key [lindex [lindex $res 0] 0] - set nkeys [string trim $orig_key $alphabet'] - set base_key [string trim $orig_key 0123456789] - set datum_count 0 - - while { 1 } { - set key [lindex [lindex $res $i] 0] - set datum [lindex [lindex $res $i] 1] - if { $datum_count >= $nkeys } { - if { [llength $key] != 0 } { - # If there are keys beyond $nkeys, we'd - # better have multiple_keys set. - error_check_bad "keys beyond number $i allowed"\ - $multiple_keys FALSE - - # If multiple_keys is set, accept the new key. - set orig_key $key - set nkeys [eval string trim \ - $orig_key {$alphabet'}] - set base_key [eval string trim \ - $orig_key 0123456789] - set datum_count 0 - } else { - # datum_count has hit nkeys. We're done. - return - } - } - - error_check_good returned_key($i) $key $orig_key - error_check_good returned_datum($i) \ - $datum $base_key.[format %4u $datum_count] - incr datum_count - incr i - } -} - -# Add nsets dup sets, each consisting of {word$ndups word$n} pairs, -# with "word" having (i * pad_bytes) bytes extra padding. -proc t95_populate { db did nsets pad_bytes } { - set txn "" - for { set i 1 } { $i <= $nsets } { incr i } { - # basekey is a padded dictionary word - gets $did basekey - - append basekey [repeat "a" [expr $pad_bytes * $i]] - - # key is basekey with the number of dups stuck on. - set key $basekey$i - - for { set j 0 } { $j < $i } { incr j } { - set data $basekey.[format %4u $j] - error_check_good db_put($key,$data) \ - [eval {$db put} $txn {$key $data}] 0 - } - } - - # This will make debugging easier, and since the database is - # read-only from here out, it's cheap. - error_check_good db_sync [$db sync] 0 -} diff --git a/storage/bdb/test/test096.tcl b/storage/bdb/test/test096.tcl deleted file mode 100644 index ac8450069cc..00000000000 --- a/storage/bdb/test/test096.tcl +++ /dev/null @@ -1,371 +0,0 @@ -# See the file LICENSE for redistribution information. -# -# Copyright (c) 1999-2004 -# Sleepycat Software. All rights reserved. -# -# $Id: test096.tcl,v 11.26 2004/06/10 17:21:20 carol Exp $ -# -# TEST test096 -# TEST Db->truncate test. -# TEST For all methods: -# TEST Test that truncate empties an existing database. -# TEST Test that truncate-write in an aborted txn doesn't -# TEST change the original contents. -# TEST Test that truncate-write in a committed txn does -# TEST overwrite the original contents. -# TEST For btree and hash, do the same in a database with offpage dups. -proc test096 { method {pagesize 512} {nentries 1000} {ndups 19} args} { - global fixed_len - global alphabet - source ./include.tcl - - set orig_fixed_len $fixed_len - set args [convert_args $method $args] - set encargs "" - set args [split_encargs $args encargs] - set omethod [convert_method $method] - - puts "Test096: $method db truncate method test" - set pgindex [lsearch -exact $args "-pagesize"] - if { $pgindex != -1 } { - puts "Test096: Skipping for specific pagesizes" - return - } - - # Create the database and open the dictionary - set eindex [lsearch -exact $args "-env"] - set testfile test096.db - if { $eindex != -1 } { - incr eindex - set env [lindex $args $eindex] - set txnenv [is_txnenv $env] - if { $txnenv == 0 } { - puts "Environment w/o txns specified; skipping." - return - } - if { $nentries == 1000 } { - set nentries 100 - } - reduce_dups nentries ndups - set testdir [get_home $env] - set closeenv 0 - } else { - env_cleanup $testdir - - # We need an env for exclusive-use testing. Since we are - # using txns, we need at least 1 lock per record for queue. - set lockmax [expr $nentries * 2] - set env [eval {berkdb_env -create -home $testdir \ - -lock_max $lockmax -txn} $encargs] - error_check_good env_create [is_valid_env $env] TRUE - set closeenv 1 - } - - set t1 $testdir/t1 - - puts "\tTest096.a: Create database with $nentries entries" - set db [eval {berkdb_open -create -auto_commit \ - -env $env $omethod -mode 0644} $args $testfile] - error_check_good db_open [is_valid_db $db] TRUE - t96_populate $db $omethod $env $nentries - error_check_good dbclose [$db close] 0 - - puts "\tTest096.b: Truncate database" - set dbtr [eval {berkdb_open -create -auto_commit \ - -env $env $omethod -mode 0644} $args $testfile] - error_check_good db_open [is_valid_db $dbtr] TRUE - - set ret [$dbtr truncate -auto_commit] - error_check_good dbtrunc $ret $nentries - error_check_good db_close [$dbtr close] 0 - - set db [eval {berkdb_open -env $env} $args $testfile] - error_check_good dbopen [is_valid_db $db] TRUE - set number [number_of_entries $db $method] - error_check_good number_of_entries $number 0 - error_check_good dbclose [$db close] 0 - error_check_good dbverify [verify_dir $testdir "\tTest096.c: "] 0 - - # Remove and recreate database. - puts "\tTest096.d: Recreate database with $nentries entries" - set db [eval {berkdb_open -create -auto_commit \ - -env $env $omethod -mode 0644} $args $testfile] - error_check_good db_open [is_valid_db $db] TRUE - t96_populate $db $omethod $env $nentries - error_check_good dbclose [$db close] 0 - - puts "\tTest096.e: Truncate and write in a txn, then abort" - txn_truncate $env $omethod $testfile $nentries abort 1 - - set db [eval {berkdb_open -env $env} $args $testfile] - error_check_good dbopen [is_valid_db $db] TRUE - - # Database should have original contents since both the truncate - # and the write were aborted - set number [number_of_entries $db $method] - error_check_good number_of_entries $number $nentries - error_check_good dbclose [$db close] 0 - - error_check_good dbverify [verify_dir $testdir "\tTest096.f: "] 0 - - puts "\tTest096.g: Truncate and write in a txn, then commit" - txn_truncate $env $omethod $testfile $nentries commit 1 - - set db [eval {berkdb_open -env $env} $args $testfile] - error_check_good dbopen [is_valid_db $db] TRUE - - # Database should contain only the new items - set number [number_of_entries $db $method] - error_check_good number_of_entries $number [expr $nentries / 2] - error_check_good dbclose [$db close] 0 - error_check_good dbverify [verify_dir $testdir "\tTest096.h: "] 0 - - puts "\tTest096.i: Check proper handling of overflow pages." - # Large keys and data compared to page size guarantee - # overflow pages. - if { [is_fixed_length $method] == 1 } { - puts "Skipping overflow test for fixed-length method." - } else { - set overflowfile overflow096.db - set data [repeat $alphabet 600] - set db [eval {berkdb_open -create -auto_commit -pagesize 512 \ - -env $env $omethod -mode 0644} $args $overflowfile] - error_check_good db_open [is_valid_db $db] TRUE - - set noverflows 100 - for { set i 1 } { $i <= $noverflows } { incr i } { - set ret [eval {$db put} -auto_commit \ - $i [chop_data $method "$i$data"]] - } - - set stat [$db stat] - error_check_bad stat:overflow [is_substr $stat \ - "{{Overflow pages} 0}"] 1 - - error_check_good overflow_truncate [$db truncate] $noverflows - error_check_good overflow_close [$db close] 0 - } - - # Remove database and create a new one with dups. Skip - # the rest of the test for methods not supporting dups. - if { [is_record_based $method] == 1 || \ - [is_rbtree $method] == 1 } { - puts "Skipping remainder of test096 for method $method" - if { $closeenv == 1 } { - error_check_good envclose [$env close] 0 - } - return - } - set ret [berkdb dbremove -env $env -auto_commit $testfile] - set ret [berkdb dbremove -env $env -auto_commit $overflowfile] - - puts "\tTest096.j: Create $nentries entries with $ndups duplicates" - set db [eval {berkdb_open -pagesize $pagesize -dup -auto_commit \ - -create -env $env $omethod -mode 0644} $args $testfile] - error_check_good db_open [is_valid_db $db] TRUE - - t96_populate $db $omethod $env $nentries $ndups - - set dlist "" - for { set i 1 } {$i <= $ndups} {incr i} { - lappend dlist $i - } - set t [$env txn] - error_check_good txn [is_valid_txn $t $env] TRUE - set txn "-txn $t" - dup_check $db $txn $t1 $dlist - error_check_good txn [$t commit] 0 - puts "\tTest096.k: Verify off page duplicates status" - set stat [$db stat] - error_check_bad stat:offpage [is_substr $stat \ - "{{Duplicate pages} 0}"] 1 - - set recs [expr $ndups * $nentries] - error_check_good dbclose [$db close] 0 - - puts "\tTest096.l: Truncate database in a txn then abort" - txn_truncate $env $omethod $testfile $recs abort - - set db [eval {berkdb_open -auto_commit -env $env} $args $testfile] - error_check_good dbopen [is_valid_db $db] TRUE - - set number [number_of_entries $db $method] - error_check_good number_of_entries $number $recs - error_check_good dbclose [$db close] 0 - - puts "\tTest096.m: Truncate database in a txn then commit" - txn_truncate $env $omethod $testfile $recs commit - - set db [berkdb_open -auto_commit -env $env $testfile] - error_check_good dbopen [is_valid_db $db] TRUE - set number [number_of_entries $db $method] - error_check_good number_of_entries $number 0 - error_check_good dbclose [$db close] 0 - - set testdir [get_home $env] - error_check_good dbverify [verify_dir $testdir "\tTest096.n: "] 0 - - # Remove database, and create a new one with dups. Test - # truncate + write within a transaction. - puts "\tTest096.o: Create $nentries entries with $ndups duplicates" - set ret [berkdb dbremove -env $env -auto_commit $testfile] - set db [eval {berkdb_open -pagesize $pagesize -dup -auto_commit \ - -create -env $env $omethod -mode 0644} $args $testfile] - error_check_good db_open [is_valid_db $db] TRUE - - t96_populate $db $omethod $env $nentries $ndups - - set dlist "" - for { set i 1 } {$i <= $ndups} {incr i} { - lappend dlist $i - } - set t [$env txn] - error_check_good txn [is_valid_txn $t $env] TRUE - set txn "-txn $t" - dup_check $db $txn $t1 $dlist - error_check_good txn [$t commit] 0 - puts "\tTest096.p: Verify off page duplicates status" - set stat [$db stat] - error_check_bad stat:offpage [is_substr $stat \ - "{{Duplicate pages} 0}"] 1 - - set recs [expr $ndups * $nentries] - error_check_good dbclose [$db close] 0 - - puts "\tTest096.q: Truncate and write in a txn, then abort" - txn_truncate $env $omethod $testfile $recs abort 1 - - set db [eval {berkdb_open -auto_commit -env $env} $args $testfile] - error_check_good dbopen [is_valid_db $db] TRUE - set number [number_of_entries $db $method] - error_check_good number_of_entries $number $recs - error_check_good dbclose [$db close] 0 - - puts "\tTest096.r: Truncate and write in a txn, then commit" - txn_truncate $env $omethod $testfile $recs commit 1 - - set db [berkdb_open -auto_commit -env $env $testfile] - error_check_good dbopen [is_valid_db $db] TRUE - set number [number_of_entries $db $method] - error_check_good number_of_entries $number [expr $recs / 2] - error_check_good dbclose [$db close] 0 - - puts "\tTest096.s: Check overflow pages with dups." - set ndups 3 - set db [eval {berkdb_open -create -auto_commit -pagesize 512 \ - -env $env $omethod -dup -mode 0644} $args $overflowfile] - error_check_good db_open [is_valid_db $db] TRUE - - for { set i 1 } { $i <= $noverflows } { incr i } { - for { set j 0 } { $j < $ndups } { incr j } { - set ret [eval {$db put} -auto_commit \ - $i [chop_data $method "$i.$j$data"]] - } - } - - set stat [$db stat] - error_check_bad stat:overflow [is_substr $stat \ - "{{Overflow pages} 0}"] 1 - - set nentries [expr $noverflows * $ndups] - error_check_good overflow_truncate [$db truncate] $nentries - error_check_good overflow_close [$db close] 0 - - set testdir [get_home $env] - error_check_good dbverify [verify_dir $testdir "\tTest096.t: "] 0 - - if { $closeenv == 1 } { - error_check_good envclose [$env close] 0 - } -} - -proc t96_populate {db method env nentries {ndups 1}} { - global datastr - global pad_datastr - source ./include.tcl - - set did [open $dict] - set count 0 - set txn "" - set pflags "" - set gflags "" - - if { [is_record_based $method] == 1 } { - append gflags "-recno" - } - set pad_datastr [pad_data $method $datastr] - while { [gets $did str] != -1 && $count < $nentries } { - if { [is_record_based $method] == 1 } { - set key [expr $count + 1] - } else { - set key $str - } - if { $ndups > 1 } { - for { set i 1 } { $i <= $ndups } { incr i } { - set datastr $i:$str - set t [$env txn] - error_check_good txn [is_valid_txn $t $env] TRUE - set txn "-txn $t" - set ret [eval {$db put} $txn $pflags \ - {$key [chop_data $method $datastr]}] - error_check_good put $ret 0 - error_check_good txn [$t commit] 0 - } - } else { - set datastr [reverse $str] - set t [$env txn] - error_check_good txn [is_valid_txn $t $env] TRUE - set txn "-txn $t" - set ret [eval {$db put} \ - $txn $pflags {$key [chop_data $method $datastr]}] - error_check_good put $ret 0 - error_check_good txn [$t commit] 0 - } - set ret [eval {$db get} $gflags {$key}] - error_check_good $key:dbget [llength $ret] $ndups - incr count - } - close $did -} - -proc number_of_entries { db method } { - if { [is_record_based $method] == 1 } { - set dbc [$db cursor] - set last [$dbc get -last] - if {[llength $last] == 0} { - set number 0 - } else { - set number [lindex [lindex $last 0] 0] - } - } else { - set ret [$db get -glob *] - set number [llength $ret] - } - return $number -} - -# Open database. Truncate in a transaction, optionally with a write -# included in the transaction as well, then abort or commit. Close database. - -proc txn_truncate { env method testfile nentries op {write 0}} { - set db [eval {berkdb_open -create -auto_commit \ - -env $env $method -mode 0644} $testfile] - error_check_good db_open [is_valid_db $db] TRUE - - set txn [$env txn] - error_check_good txnbegin [is_valid_txn $txn $env] TRUE - - set ret [$db truncate -txn $txn] - error_check_good dbtrunc $ret $nentries - if { $write == 1 } { - for {set i 1} {$i <= [expr $nentries / 2]} {incr i} { - set ret [eval {$db put} -txn $txn \ - {$i [chop_data $method "aaaaaaaaaa"]}] - error_check_good write $ret 0 - } - } - - error_check_good txn$op [$txn $op] 0 - error_check_good db_close [$db close] 0 -} - diff --git a/storage/bdb/test/test097.tcl b/storage/bdb/test/test097.tcl deleted file mode 100644 index 2a6234e00a5..00000000000 --- a/storage/bdb/test/test097.tcl +++ /dev/null @@ -1,188 +0,0 @@ -# See the file LICENSE for redistribution information. -# -# Copyright (c) 1996-2004 -# Sleepycat Software. All rights reserved. -# -# $Id: test097.tcl,v 11.11 2004/01/28 03:36:32 bostic Exp $ -# -# TEST test097 -# TEST Open up a large set of database files simultaneously. -# TEST Adjust for local file descriptor resource limits. -# TEST Then use the first 1000 entries from the dictionary. -# TEST Insert each with self as key and a fixed, medium length data string; -# TEST retrieve each. After all are entered, retrieve all; compare output -# TEST to original. - -proc test097 { method {ndbs 500} {nentries 400} args } { - global pad_datastr - source ./include.tcl - - set largs [convert_args $method $args] - set encargs "" - set largs [split_encargs $largs encargs] - - # Open an environment, with a 1MB cache. - set eindex [lsearch -exact $largs "-env"] - if { $eindex != -1 } { - incr eindex - set env [lindex $largs $eindex] - puts "Test097: $method: skipping for env $env" - return - } - env_cleanup $testdir - set env [eval {berkdb_env -create \ - -cachesize { 0 1048576 1 } -txn} -home $testdir $encargs] - error_check_good dbenv [is_valid_env $env] TRUE - - # Create the database and open the dictionary - set basename test097 - set t1 $testdir/t1 - set t2 $testdir/t2 - set t3 $testdir/t3 - # - # When running with HAVE_MUTEX_SYSTEM_RESOURCES, - # we can run out of mutex lock slots due to the nature of this test. - # So, for this test, increase the number of pages per extent - # to consume fewer resources. - # - if { [is_queueext $method] } { - set numdb [expr $ndbs / 4] - set eindex [lsearch -exact $largs "-extent"] - error_check_bad extent $eindex -1 - incr eindex - set extval [lindex $largs $eindex] - set extval [expr $extval * 4] - set largs [lreplace $largs $eindex $eindex $extval] - } - puts -nonewline "Test097: $method ($largs) " - puts "$nentries entries in at most $ndbs simultaneous databases" - - puts "\tTest097.a: Simultaneous open" - set numdb [test097_open tdb $ndbs $method $env $basename $largs] - if { $numdb == 0 } { - puts "\tTest097: Insufficient resources available -- skipping." - error_check_good envclose [$env close] 0 - return - } - - set did [open $dict] - - set pflags "" - set gflags "" - set txn "" - set count 0 - - # Here is the loop where we put and get each key/data pair - if { [is_record_based $method] == 1 } { - append gflags "-recno" - } - puts "\tTest097.b: put/get on $numdb databases" - set datastr "abcdefghij" - set pad_datastr [pad_data $method $datastr] - while { [gets $did str] != -1 && $count < $nentries } { - if { [is_record_based $method] == 1 } { - set key [expr $count + 1] - } else { - set key $str - } - for { set i 1 } { $i <= $numdb } { incr i } { - set ret [eval {$tdb($i) put} $txn $pflags \ - {$key [chop_data $method $datastr]}] - error_check_good put $ret 0 - set ret [eval {$tdb($i) get} $gflags {$key}] - error_check_good get $ret [list [list $key \ - [pad_data $method $datastr]]] - } - incr count - } - close $did - - # Now we will get each key from the DB and compare the results - # to the original. - puts "\tTest097.c: dump and check files" - for { set j 1 } { $j <= $numdb } { incr j } { - dump_file $tdb($j) $txn $t1 test097.check - error_check_good db_close [$tdb($j) close] 0 - - # Now compare the keys to see if they match the dictionary - if { [is_record_based $method] == 1 } { - set oid [open $t2 w] - for {set i 1} {$i <= $nentries} {set i [incr i]} { - puts $oid $i - } - close $oid - filesort $t2 $t3 - file rename -force $t3 $t2 - } else { - set q q - filehead $nentries $dict $t3 - filesort $t3 $t2 - } - filesort $t1 $t3 - - error_check_good Test097:diff($t3,$t2) [filecmp $t3 $t2] 0 - } - error_check_good envclose [$env close] 0 -} - -# Check function for test097; data should be fixed are identical -proc test097.check { key data } { - global pad_datastr - error_check_good "data mismatch for key $key" $data $pad_datastr -} - -proc test097_open { tdb ndbs method env basename largs } { - global errorCode - upvar $tdb db - - set j 0 - set numdb $ndbs - if { [is_queueext $method] } { - set numdb [expr $ndbs / 4] - } - set omethod [convert_method $method] - for { set i 1 } {$i <= $numdb } { incr i } { - set stat [catch {eval {berkdb_open -env $env \ - -pagesize 512 -create -mode 0644} \ - $largs {$omethod $basename.$i.db}} db($i)] - # - # Check if we've reached our limit - # - if { $stat == 1 } { - set min 20 - set em [is_substr $errorCode EMFILE] - set en [is_substr $errorCode ENFILE] - error_check_good open_ret [expr $em || $en] 1 - puts \ - "\tTest097.a.1 Encountered resource limits opening $i files, adjusting" - if { [is_queueext $method] } { - set end [expr $j / 4] - set min 10 - } else { - set end [expr $j - 10] - } - # - # If we cannot open even $min files, then this test is - # not very useful. Close up shop and go back. - # - if { $end < $min } { - test097_close db 1 $j - return 0 - } - test097_close db [expr $end + 1] $j - return $end - } else { - error_check_good dbopen [is_valid_db $db($i)] TRUE - set j $i - } - } - return $j -} - -proc test097_close { tdb start end } { - upvar $tdb db - - for { set i $start } { $i <= $end } { incr i } { - error_check_good db($i)close [$db($i) close] 0 - } -} diff --git a/storage/bdb/test/test098.tcl b/storage/bdb/test/test098.tcl deleted file mode 100644 index af6b6a6c607..00000000000 --- a/storage/bdb/test/test098.tcl +++ /dev/null @@ -1,91 +0,0 @@ -# See the file LICENSE for redistribution information. -# -# Copyright (c) 2002-2004 -# Sleepycat Software. All rights reserved. -# -# $Id: test098.tcl,v 1.7 2004/01/28 03:36:32 bostic Exp $ -# -# TEST test098 -# TEST Test of DB_GET_RECNO and secondary indices. Open a primary and -# TEST a secondary, and do a normal cursor get followed by a get_recno. -# TEST (This is a smoke test for "Bug #1" in [#5811].) - -proc test098 { method args } { - source ./include.tcl - - set omethod [convert_method $method] - set args [convert_args $method $args] - - puts "Test098: $omethod ($args): DB_GET_RECNO and secondary indices." - - if { [is_rbtree $method] != 1 } { - puts "\tTest098: Skipping for method $method." - return - } - - set txnenv 0 - set eindex [lsearch -exact $args "-env"] - set txn "" - set auto "" - # - # If we are using an env, then testfile should just be the db name. - # Otherwise it is the test directory and the name. - if { $eindex == -1 } { - set base $testdir/test098 - set env NULL - } else { - set base test098 - incr eindex - set env [lindex $args $eindex] - set rpcenv [is_rpcenv $env] - if { $rpcenv == 1 } { - puts "Test098: Skipping for RPC" - return - } - set txnenv [is_txnenv $env] - if { $txnenv == 1 } { - set auto " -auto_commit " - } - set testdir [get_home $env] - } - cleanup $testdir $env - - puts "\tTest098.a: Set up databases." - - set adb [eval {berkdb_open} $omethod $args $auto \ - {-create} $base-primary.db] - error_check_good adb_create [is_valid_db $adb] TRUE - - set bdb [eval {berkdb_open} $omethod $args $auto \ - {-create} $base-secondary.db] - error_check_good bdb_create [is_valid_db $bdb] TRUE - - set ret [eval $adb associate $auto [callback_n 0] $bdb] - error_check_good associate $ret 0 - - if { $txnenv == 1 } { - set t [$env txn] - error_check_good txn [is_valid_txn $t $env] TRUE - set txn "-txn $t" - } - set ret [eval {$adb put} $txn aaa data1] - error_check_good put $ret 0 - if { $txnenv == 1 } { - error_check_good txn [$t commit] 0 - } - - set bc [$bdb cursor] - error_check_good cursor [is_valid_cursor $bc $bdb] TRUE - - puts "\tTest098.b: c_get(DB_FIRST) on the secondary." - error_check_good get_first [$bc get -first] \ - [list [list [[callback_n 0] aaa data1] data1]] - - puts "\tTest098.c: c_get(DB_GET_RECNO) on the secondary." - error_check_good get_recno [$bc get -get_recno] 1 - - error_check_good c_close [$bc close] 0 - - error_check_good bdb_close [$bdb close] 0 - error_check_good adb_close [$adb close] 0 -} diff --git a/storage/bdb/test/test099.tcl b/storage/bdb/test/test099.tcl deleted file mode 100644 index 9bdc0d7af42..00000000000 --- a/storage/bdb/test/test099.tcl +++ /dev/null @@ -1,276 +0,0 @@ -# See the file LICENSE for redistribution information. -# -# Copyright (c) 1996-2004 -# Sleepycat Software. All rights reserved. -# -# $Id: test099.tcl,v 1.7 2004/01/28 03:36:32 bostic Exp $ -# -# TEST test099 -# TEST -# TEST Test of DB->get and DBC->c_get with set_recno and get_recno. -# TEST -# TEST Populate a small btree -recnum database. -# TEST After all are entered, retrieve each using -recno with DB->get. -# TEST Open a cursor and do the same for DBC->c_get with set_recno. -# TEST Verify that set_recno sets the record number position properly. -# TEST Verify that get_recno returns the correct record numbers. -# TEST -# TEST Using the same database, open 3 cursors and position one at -# TEST the beginning, one in the middle, and one at the end. Delete -# TEST by cursor and check that record renumbering is done properly. -# -proc test099 { method {nentries 10000} args } { - source ./include.tcl - - set args [convert_args $method $args] - set omethod [convert_method $method] - - puts "Test099: Test of set_recno and get_recno in DBC->c_get." - if { [is_rbtree $method] != 1 } { - puts "Test099: skipping for method $method." - return - } - - set txnenv 0 - set eindex [lsearch -exact $args "-env"] - # - # If we are using an env, then testfile should just be the db name. - # Otherwise it is the test directory and the name. - if { $eindex == -1 } { - set testfile $testdir/test099.db - set env NULL - } else { - set testfile test099.db - incr eindex - set env [lindex $args $eindex] - set txnenv [is_txnenv $env] - if { $txnenv == 1 } { - append args " -auto_commit " - # - # If we are using txns and running with the - # default, set the default down a bit. - # - if { $nentries == 10000 } { - set nentries 100 - } - } - set testdir [get_home $env] - } - set t1 $testdir/t1 - cleanup $testdir $env - - # Create the database and open the dictionary - set db [eval {berkdb_open \ - -create -mode 0644} $args {$omethod $testfile}] - error_check_good dbopen [is_valid_db $db] TRUE - - set did [open $dict] - - set pflags "" - set gflags "" - set txn "" - set count 1 - - append gflags " -recno" - - puts "\tTest099.a: put loop" - # Here is the loop where we put each key/data pair - while { [gets $did str] != -1 && $count <= $nentries } { - set key $str - if { $txnenv == 1 } { - set t [$env txn] - error_check_good txn [is_valid_txn $t $env] TRUE - set txn "-txn $t" - } - set r [eval {$db put} \ - $txn $pflags {$key [chop_data $method $str]}] - error_check_good db_put $r 0 - if { $txnenv == 1 } { - error_check_good txn [$t commit] 0 - } - incr count - } - close $did - - puts "\tTest099.b: dump file" - if { $txnenv == 1 } { - set t [$env txn] - error_check_good txn [is_valid_txn $t $env] TRUE - set txn "-txn $t" - } - dump_file $db $txn $t1 test099.check - if { $txnenv == 1 } { - error_check_good txn [$t commit] 0 - } - error_check_good db_close [$db close] 0 - - puts "\tTest099.c: Test set_recno then get_recno" - set db [eval {berkdb_open -rdonly} $args $omethod $testfile ] - error_check_good dbopen [is_valid_db $db] TRUE - - # Open a cursor - if { $txnenv == 1 } { - set t [$env txn] - error_check_good txn [is_valid_txn $t $env] TRUE - set txn "-txn $t" - } - set dbc [eval {$db cursor} $txn] - error_check_good db_cursor [is_valid_cursor $dbc $db] TRUE - - set did [open $t1] - set recno 1 - - # Create key(recno) array to use for later comparison - while { [gets $did str] != -1 } { - set kvals($recno) $str - incr recno - } - - set recno 1 - set ret [$dbc get -first] - error_check_bad dbc_get_first [llength $ret] 0 - - # First walk forward through the database .... - while { $recno < $count } { - # Test set_recno: verify it sets the record number properly. - set current [$dbc get -current] - set r [$dbc get -set_recno $recno] - error_check_good set_recno $current $r - # Test set_recno: verify that we find the expected key - # at the current record number position. - set k [lindex [lindex $r 0] 0] - error_check_good set_recno $kvals($recno) $k - - # Test get_recno: verify that the return from - # get_recno matches the record number just set. - set g [$dbc get -get_recno] - error_check_good get_recno $recno $g - set ret [$dbc get -next] - incr recno - } - - # ... and then backward. - set recno [expr $count - 1] - while { $recno > 0 } { - # Test set_recno: verify that we find the expected key - # at the current record number position. - set r [$dbc get -set_recno $recno] - set k [lindex [lindex $r 0] 0] - error_check_good set_recno $kvals($recno) $k - - # Test get_recno: verify that the return from - # get_recno matches the record number just set. - set g [$dbc get -get_recno] - error_check_good get_recno $recno $g - set recno [expr $recno - 1] - } - - error_check_good cursor_close [$dbc close] 0 - if { $txnenv == 1 } { - error_check_good txn [$t commit] 0 - } - error_check_good db_close [$db close] 0 - close $did - - puts "\tTest099.d: Test record renumbering with cursor deletes." - # Reopen the database, this time with write permission. - set db [eval {berkdb_open} $args $omethod $testfile ] - error_check_good dbopen [is_valid_db $db] TRUE - - # Open three cursors. - if { $txnenv == 1 } { - set t [$env txn] - error_check_good txn [is_valid_txn $t $env] TRUE - set txn "-txn $t" - } - set dbc0 [eval {$db cursor} $txn] - error_check_good db_cursor [is_valid_cursor $dbc0 $db] TRUE - set dbc1 [eval {$db cursor} $txn] - error_check_good db_cursor [is_valid_cursor $dbc1 $db] TRUE - set dbc2 [eval {$db cursor} $txn] - error_check_good db_cursor [is_valid_cursor $dbc2 $db] TRUE - - # Initialize cursor positions. Set dbc0 at the beginning, - # dbc1 at the middle, and dbc2 at the end. - set ret [$dbc0 get -first] - error_check_bad dbc0_get_first [llength $ret] 0 - - set middle [expr $nentries / 2 + 1] - set ret [$dbc1 get -set_recno $middle] - error_check_bad dbc1_get_middle [llength $ret] 0 - - set ret [$dbc2 get -last] - error_check_bad dbc2_get_last [llength $ret] 0 - - # At each iteration, delete the first entry, delete the middle - # entry, and check the record number for beginning, middle and end. - set count 1 - while { $count <= [expr $nentries / 2] } { - # Delete first item. - error_check_good dbc0_del [$dbc0 del] 0 - - # For non-txn env's, check that db_stat is recalculating - # to adjust for items marked for deletion. We can't do this - # in txn env's because the live txn will cause deadlock. - if { $txnenv == 0 } { - set nkeys [expr $nentries - [expr $count * 2] + 1] - set stat [$db stat] - error_check_good keys_after_delete [is_substr $stat \ - "{Number of keys} $nkeys"] 1 - error_check_good records_after_delete [is_substr $stat \ - "{Number of records} $nkeys"] 1 - - # Now delete the same entry again (which should not - # change the database) and make sure db->stat returns - # the same number of keys and records as before. - catch {[$dbc0 del]} result - - set stat [$db stat] - error_check_good keys_after_baddelete [is_substr $stat \ - "{Number of keys} $nkeys"] 1 - error_check_good recs_after_baddelete [is_substr $stat \ - "{Number of records} $nkeys"] 1 - } - - # Reposition cursor to new first item, check that record number - # is 1. - set ret0 [$dbc0 get -next] - error_check_good beginning_recno [$dbc0 get -get_recno] 1 - - # Calculate the current middle recno and compare to actual. - set middle [$dbc1 get -get_recno] - set calcmiddle [expr [expr $nentries / 2] - $count + 1] - error_check_good middle_recno $middle $calcmiddle - - # Delete middle item, reposition cursor to next item. - error_check_good dbc1_del [$dbc1 del] 0 - set ret1 [$dbc1 get -next] - - # Calculate the expected end recno and compare to actual. - set end [$dbc2 get -get_recno] - set calcend [expr $nentries - [expr $count * 2]] - # On the last iteration, all items have been deleted so - # there is no recno. - if { $calcend == 0 } { - error_check_good end_recno $end "" - } else { - error_check_good end_recno $end $calcend - } - incr count - } - - # Close all three cursors. - error_check_good cursor_close [$dbc0 close] 0 - error_check_good cursor_close [$dbc1 close] 0 - error_check_good cursor_close [$dbc2 close] 0 - - if { $txnenv == 1 } { - error_check_good txn [$t commit] 0 - } - error_check_good db_close [$db close] 0 -} - -# Check function for dumped file; data should be fixed are identical -proc test099.check { key data } { - error_check_good "data mismatch for key $key" $key $data -} diff --git a/storage/bdb/test/test100.tcl b/storage/bdb/test/test100.tcl deleted file mode 100644 index 9d87331dc08..00000000000 --- a/storage/bdb/test/test100.tcl +++ /dev/null @@ -1,17 +0,0 @@ -# See the file LICENSE for redistribution information. -# -# Copyright (c) 2000-2004 -# Sleepycat Software. All rights reserved. -# -# $Id: test100.tcl,v 11.5 2004/01/28 03:36:32 bostic Exp $ -# -# TEST test100 -# TEST Test for functionality near the end of the queue -# TEST using test025 (DB_APPEND). -proc test100 { method {nentries 10000} {tnum "100"} args} { - if { [is_queueext $method ] == 0 } { - puts "Skipping test$tnum for $method." - return; - } - eval {test025 $method $nentries 4294967000 $tnum} $args -} diff --git a/storage/bdb/test/test101.tcl b/storage/bdb/test/test101.tcl deleted file mode 100644 index 63384dd1776..00000000000 --- a/storage/bdb/test/test101.tcl +++ /dev/null @@ -1,17 +0,0 @@ -# See the file LICENSE for redistribution information. -# -# Copyright (c) 2000-2004 -# Sleepycat Software. All rights reserved. -# -# $Id: test101.tcl,v 11.6 2004/01/28 03:36:32 bostic Exp $ -# -# TEST test101 -# TEST Test for functionality near the end of the queue -# TEST using test070 (DB_CONSUME). -proc test101 { method {nentries 1000} {txn -txn} {tnum "101"} args} { - if { [is_queueext $method ] == 0 } { - puts "Skipping test$tnum for $method." - return; - } - eval {test070 $method 4 2 $nentries WAIT 4294967000 $txn $tnum} $args -} diff --git a/storage/bdb/test/testparams.tcl b/storage/bdb/test/testparams.tcl deleted file mode 100644 index 16ef7c9b0b0..00000000000 --- a/storage/bdb/test/testparams.tcl +++ /dev/null @@ -1,336 +0,0 @@ -# See the file LICENSE for redistribution information. -# -# Copyright (c) 2000-2004 -# Sleepycat Software. All rights reserved. -# -# $Id: testparams.tcl,v 11.200 2004/10/12 16:22:14 sue Exp $ - -global one_test -global serial_tests -set serial_tests {rep002 rep005} - -set subs {bigfile dead env fop lock log memp mutex recd rep rpc rsrc\ - sdb sdbtest sec si test txn} - -set test_names(bigfile) [list bigfile001 bigfile002] -set test_names(dead) [list dead001 dead002 dead003 dead004 dead005 dead006 \ - dead007] -set test_names(elect) [list rep002 rep005 rep016 rep020 rep022] -set test_names(env) [list env001 env002 env003 env004 env005 env006 \ - env007 env008 env009 env010 env011] -set test_names(fop) [list fop001 fop002 fop003 fop004 fop005 fop006] -set test_names(lock) [list lock001 lock002 lock003 lock004 lock005 lock006] -set test_names(log) [list log001 log002 log003 log004 log005 log006] -set test_names(memp) [list memp001 memp002 memp003 memp004] -set test_names(mutex) [list mutex001 mutex002 mutex003] -set test_names(recd) [list recd001 recd002 recd003 recd004 recd005 recd006 \ - recd007 recd008 recd009 recd010 recd011 recd012 recd013 recd014 recd015 \ - recd016 recd017 recd018 recd019 recd020 ] -set test_names(rep) [list rep001 rep002 rep003 rep005 rep006 rep007 \ - rep008 rep009 rep010 rep011 rep012 rep013 rep014 rep015 rep016 rep017 \ - rep018 rep019 rep020 rep021 rep022 rep023 rep024 rep026 rep027 rep028 \ - rep029 rep030 rep031 rep032 rep033 rep034 rep035 rep036 rep037] -set test_names(rpc) [list rpc001 rpc002 rpc003 rpc004 rpc005 rpc006] -set test_names(rsrc) [list rsrc001 rsrc002 rsrc003 rsrc004] -set test_names(sdb) [list sdb001 sdb002 sdb003 sdb004 sdb005 sdb006 \ - sdb007 sdb008 sdb009 sdb010 sdb011 sdb012] -set test_names(sdbtest) [list sdbtest001 sdbtest002] -set test_names(sec) [list sec001 sec002] -set test_names(si) [list si001 si002 si003 si004 si005] -set test_names(test) [list test001 test002 test003 test004 test005 \ - test006 test007 test008 test009 test010 test011 test012 test013 test014 \ - test015 test016 test017 test018 test019 test020 test021 test022 test023 \ - test024 test025 test026 test027 test028 test029 test030 test031 test032 \ - test033 test034 test035 test036 test037 test038 test039 test040 test041 \ - test042 test043 test044 test045 test046 test047 test048 test049 test050 \ - test051 test052 test053 test054 test055 test056 test057 test058 test059 \ - test060 test061 test062 test063 test064 test065 test066 test067 test068 \ - test069 test070 test071 test072 test073 test074 test076 test077 \ - test078 test079 test081 test082 test083 test084 test085 test086 \ - test087 test088 test089 test090 test091 test092 test093 test094 test095 \ - test096 test097 test098 test099 test100 test101 test102 test103 test107 \ - test109 ] -set test_names(txn) [list txn001 txn002 txn003 txn004 txn005 txn006 \ - txn007 txn008 txn009 txn010 txn011] - -set rpc_tests(berkeley_db_svc) [concat $test_names(test) $test_names(sdb)] -set rpc_tests(berkeley_db_cxxsvc) $test_names(test) -set rpc_tests(berkeley_db_javasvc) $test_names(test) - -# JE tests are a subset of regular RPC tests -- exclude these ones. -# be fixable by modifying tests dealing with unsorted duplicates, second line -# will probably never work unless certain features are added to JE (record -# numbers, bulk get, etc.). -set je_exclude {(?x) # Turn on extended syntax - test(010|026|027|028|030|031|032|033|034| # These should be fixable by - 035|039|041|046|047|054|056|057|062| # modifying tests to avoid - 066|073|081|085)| # unsorted dups, etc. - - test(011|017|018|022|023|024|029|040|049| # Not expected to work with - 062|083|095) # JE until / unless features - # are added to JE (record - # numbers, bulk gets, etc.) -} -set rpc_tests(berkeley_dbje_svc) [lsearch -all -inline -not -regexp \ - $rpc_tests(berkeley_db_svc) $je_exclude] - -# Source all the tests, whether we're running one or many. -foreach sub $subs { - foreach test $test_names($sub) { - source $test_path/$test.tcl - } -} - -# Reset test_names if we're running only one test. -if { $one_test != "ALL" } { - foreach sub $subs { - set test_names($sub) "" - } - set type [string trim $one_test 0123456789] - set test_names($type) [list $one_test] -} - -source $test_path/archive.tcl -source $test_path/byteorder.tcl -source $test_path/dbm.tcl -source $test_path/foputils.tcl -source $test_path/hsearch.tcl -source $test_path/join.tcl -source $test_path/logtrack.tcl -source $test_path/ndbm.tcl -source $test_path/parallel.tcl -source $test_path/reputils.tcl -source $test_path/sdbutils.tcl -source $test_path/shelltest.tcl -source $test_path/sijointest.tcl -source $test_path/siutils.tcl -source $test_path/testutils.tcl -source $test_path/upgrade.tcl - -set parms(recd001) 0 -set parms(recd002) 0 -set parms(recd003) 0 -set parms(recd004) 0 -set parms(recd005) "" -set parms(recd006) 0 -set parms(recd007) "" -set parms(recd008) {4 4} -set parms(recd009) 0 -set parms(recd010) 0 -set parms(recd011) {200 15 1} -set parms(recd012) {0 49 25 100 5} -set parms(recd013) 100 -set parms(recd014) "" -set parms(recd015) "" -set parms(recd016) "" -set parms(recd017) 0 -set parms(recd018) 10 -set parms(recd019) 50 -set parms(recd020) "" -set parms(rep001) {1000 "001"} -set parms(rep002) {10 3 "002"} -set parms(rep003) "003" -set parms(rep005) "" -set parms(rep006) {1000 "006"} -set parms(rep007) {10 "007"} -set parms(rep008) {10 "008"} -set parms(rep009) {10 "009"} -set parms(rep010) {100 "010"} -set parms(rep011) "011" -set parms(rep012) {10 "012"} -set parms(rep013) {10 "013"} -set parms(rep014) {10 "014"} -set parms(rep015) {100 "015" 3} -set parms(rep016) "" -set parms(rep017) {10 "017"} -set parms(rep018) {10 "018"} -set parms(rep019) {3 "019"} -set parms(rep020) "" -set parms(rep021) {3 "021"} -set parms(rep022) "" -set parms(rep023) {10 "023"} -set parms(rep024) {1000 "024"} -set parms(rep026) "" -set parms(rep027) {1000 "027"} -set parms(rep028) {100 "028"} -set parms(rep029) {200 "029"} -set parms(rep030) {500 "030"} -set parms(rep031) {200 "031"} -set parms(rep032) {200 "032"} -set parms(rep033) {200 "033"} -set parms(rep034) {2 "034"} -set parms(rep035) {100 "035"} -set parms(rep036) {200 "036"} -set parms(subdb001) "" -set parms(subdb002) 10000 -set parms(subdb003) 1000 -set parms(subdb004) "" -set parms(subdb005) 100 -set parms(subdb006) 100 -set parms(subdb007) "" -set parms(subdb008) "" -set parms(subdb009) "" -set parms(subdb010) "" -set parms(subdb011) {13 10} -set parms(subdb012) "" -set parms(sdb001) "" -set parms(sdb002) 10000 -set parms(sdb003) 1000 -set parms(sdb004) "" -set parms(sdb005) 100 -set parms(sdb006) 100 -set parms(sdb007) "" -set parms(sdb008) "" -set parms(sdb009) "" -set parms(sdb010) "" -set parms(sdb011) {13 10} -set parms(sdb012) "" -set parms(si001) {200 1} -set parms(si002) {200 2} -set parms(si003) {200 3} -set parms(si004) {200 4} -set parms(si005) {200 5} -set parms(test001) {10000 0 0 "001"} -set parms(test002) 10000 -set parms(test003) "" -set parms(test004) {10000 "004" 0} -set parms(test005) 10000 -set parms(test006) {10000 0 "006" 5} -set parms(test007) {10000 "007" 5} -set parms(test008) {"008" 0} -set parms(test009) "" -set parms(test010) {10000 5 "010"} -set parms(test011) {10000 5 "011"} -set parms(test012) "" -set parms(test013) 10000 -set parms(test014) 10000 -set parms(test015) {7500 0} -set parms(test016) 10000 -set parms(test017) {0 19 "017"} -set parms(test018) 10000 -set parms(test019) 10000 -set parms(test020) 10000 -set parms(test021) 10000 -set parms(test022) "" -set parms(test023) "" -set parms(test024) 10000 -set parms(test025) {10000 0 "025"} -set parms(test026) {2000 5 "026"} -set parms(test027) {100} -set parms(test028) "" -set parms(test029) 10000 -set parms(test030) 10000 -set parms(test031) {10000 5 "031"} -set parms(test032) {10000 5 "032"} -set parms(test033) {10000 5 "033"} -set parms(test034) 10000 -set parms(test035) 10000 -set parms(test036) 10000 -set parms(test037) 100 -set parms(test038) {10000 5 "038"} -set parms(test039) {10000 5 "039"} -set parms(test040) 10000 -set parms(test041) 10000 -set parms(test042) 1000 -set parms(test043) 10000 -set parms(test044) {5 10 0} -set parms(test045) 1000 -set parms(test046) "" -set parms(test047) "" -set parms(test048) "" -set parms(test049) "" -set parms(test050) "" -set parms(test051) "" -set parms(test052) "" -set parms(test053) "" -set parms(test054) "" -set parms(test055) "" -set parms(test056) "" -set parms(test057) "" -set parms(test058) "" -set parms(test059) "" -set parms(test060) "" -set parms(test061) "" -set parms(test062) {200 200 "062"} -set parms(test063) "" -set parms(test064) "" -set parms(test065) "" -set parms(test066) "" -set parms(test067) {1000 "067"} -set parms(test068) "" -set parms(test069) {50 "069"} -set parms(test070) {4 2 1000 CONSUME 0 -txn "070"} -set parms(test071) {1 1 10000 CONSUME 0 -txn "071"} -set parms(test072) {512 20 "072"} -set parms(test073) {512 50 "073"} -set parms(test074) {-nextnodup 100 "074"} -set parms(test076) {1000 "076"} -set parms(test077) {1000 "077"} -set parms(test078) {100 512 "078"} -set parms(test079) {10000 512 "079" 20} -set parms(test081) {13 "081"} -set parms(test082) {-prevnodup 100 "082"} -set parms(test083) {512 5000 2} -set parms(test084) {10000 "084" 65536} -set parms(test085) {512 3 10 "085"} -set parms(test086) "" -set parms(test087) {512 50 "087"} -set parms(test088) "" -set parms(test089) 1000 -set parms(test090) {10000 "090"} -set parms(test091) {4 2 1000 0 "091"} -set parms(test092) {1000} -set parms(test093) {10000 "093"} -set parms(test094) {10000 10 "094"} -set parms(test095) {"095"} -set parms(test096) {512 1000 19} -set parms(test097) {500 400} -set parms(test098) "" -set parms(test099) 10000 -set parms(test100) {10000 "100"} -set parms(test101) {1000 -txn "101"} -set parms(test102) {1000 "102"} -set parms(test103) {100 4294967250 "103"} -set parms(test107) "" -set parms(test109) {"109"} - -# RPC server executables. Each of these is tested (if it exists) -# when running the RPC tests. -set svc_list { berkeley_db_svc berkeley_db_cxxsvc \ - berkeley_db_javasvc berkeley_dbje_svc } -set rpc_svc berkeley_db_svc - -# Shell script tests. Each list entry is a {directory filename} pair, -# invoked with "/bin/sh filename". -set shelltest_list { - { scr001 chk.code } - { scr002 chk.def } - { scr003 chk.define } - { scr004 chk.javafiles } - { scr005 chk.nl } - { scr006 chk.offt } - { scr007 chk.proto } - { scr008 chk.pubdef } - { scr009 chk.srcfiles } - { scr010 chk.str } - { scr011 chk.tags } - { scr012 chk.vx_code } - { scr013 chk.stats } - { scr014 chk.err } - { scr015 chk.cxxtests } - { scr016 chk.javatests } - { scr017 chk.db185 } - { scr018 chk.comma } - { scr019 chk.include } - { scr020 chk.inc } - { scr021 chk.flags } - { scr022 chk.rr } - { scr023 chk.q } - { scr024 chk.bdb } - { scr025 chk.cxxmulti } - { scr026 chk.method } - { scr027 chk.javas } - { scr028 chk.rtc } - { scr029 chk.get } - { scr030 chk.build } -} diff --git a/storage/bdb/test/testutils.tcl b/storage/bdb/test/testutils.tcl deleted file mode 100644 index ded9bc5ce7d..00000000000 --- a/storage/bdb/test/testutils.tcl +++ /dev/null @@ -1,3458 +0,0 @@ -# See the file LICENSE for redistribution information. -# -# Copyright (c) 1996-2004 -# Sleepycat Software. All rights reserved. -# -# $Id: testutils.tcl,v 11.198 2004/09/28 15:02:18 carol Exp $ -# -# Test system utilities -# -# Timestamp -- print time along with elapsed time since last invocation -# of timestamp. -proc timestamp {{opt ""}} { - global __timestamp_start - - set now [clock seconds] - - # -c accurate to the click, instead of the second. - # -r seconds since the Epoch - # -t current time in the format expected by db_recover -t. - # -w wallclock time - # else wallclock plus elapsed time. - if {[string compare $opt "-r"] == 0} { - return $now - } elseif {[string compare $opt "-t"] == 0} { - return [clock format $now -format "%y%m%d%H%M.%S"] - } elseif {[string compare $opt "-w"] == 0} { - return [clock format $now -format "%c"] - } else { - if {[string compare $opt "-c"] == 0} { - set printclicks 1 - } else { - set printclicks 0 - } - - if {[catch {set start $__timestamp_start}] != 0} { - set __timestamp_start $now - } - set start $__timestamp_start - - set elapsed [expr $now - $start] - set the_time [clock format $now -format ""] - set __timestamp_start $now - - if { $printclicks == 1 } { - set pc_print [format ".%08u" [__fix_num [clock clicks]]] - } else { - set pc_print "" - } - - format "%02d:%02d:%02d$pc_print (%02d:%02d:%02d)" \ - [__fix_num [clock format $now -format "%H"]] \ - [__fix_num [clock format $now -format "%M"]] \ - [__fix_num [clock format $now -format "%S"]] \ - [expr $elapsed / 3600] \ - [expr ($elapsed % 3600) / 60] \ - [expr ($elapsed % 3600) % 60] - } -} - -proc __fix_num { num } { - set num [string trimleft $num "0"] - if {[string length $num] == 0} { - set num "0" - } - return $num -} - -# Add a {key,data} pair to the specified database where -# key=filename and data=file contents. -proc put_file { db txn flags file } { - source ./include.tcl - - set fid [open $file r] - fconfigure $fid -translation binary - set data [read $fid] - close $fid - - set ret [eval {$db put} $txn $flags {$file $data}] - error_check_good put_file $ret 0 -} - -# Get a {key,data} pair from the specified database where -# key=filename and data=file contents and then write the -# data to the specified file. -proc get_file { db txn flags file outfile } { - source ./include.tcl - - set fid [open $outfile w] - fconfigure $fid -translation binary - if [catch {eval {$db get} $txn $flags {$file}} data] { - puts -nonewline $fid $data - } else { - # Data looks like {{key data}} - set data [lindex [lindex $data 0] 1] - puts -nonewline $fid $data - } - close $fid -} - -# Add a {key,data} pair to the specified database where -# key=file contents and data=file name. -proc put_file_as_key { db txn flags file } { - source ./include.tcl - - set fid [open $file r] - fconfigure $fid -translation binary - set filecont [read $fid] - close $fid - - # Use not the file contents, but the file name concatenated - # before the file contents, as a key, to ensure uniqueness. - set data $file$filecont - - set ret [eval {$db put} $txn $flags {$data $file}] - error_check_good put_file $ret 0 -} - -# Get a {key,data} pair from the specified database where -# key=file contents and data=file name -proc get_file_as_key { db txn flags file} { - source ./include.tcl - - set fid [open $file r] - fconfigure $fid -translation binary - set filecont [read $fid] - close $fid - - set data $file$filecont - - return [eval {$db get} $txn $flags {$data}] -} - -# open file and call dump_file to dumpkeys to tempfile -proc open_and_dump_file { - dbname env outfile checkfunc dump_func beg cont } { - global encrypt - global passwd - source ./include.tcl - - set encarg "" - if { $encrypt > 0 && $env == "NULL" } { - set encarg "-encryptany $passwd" - } - set envarg "" - set txn "" - set txnenv 0 - if { $env != "NULL" } { - append envarg " -env $env " - set txnenv [is_txnenv $env] - if { $txnenv == 1 } { - append envarg " -auto_commit " - set t [$env txn] - error_check_good txn [is_valid_txn $t $env] TRUE - set txn "-txn $t" - } - } - set db [eval {berkdb open} $envarg -rdonly -unknown $encarg $dbname] - error_check_good dbopen [is_valid_db $db] TRUE - $dump_func $db $txn $outfile $checkfunc $beg $cont - if { $txnenv == 1 } { - error_check_good txn [$t commit] 0 - } - error_check_good db_close [$db close] 0 -} - -# open file and call dump_file to dumpkeys to tempfile -proc open_and_dump_subfile { - dbname env outfile checkfunc dump_func beg cont subdb} { - global encrypt - global passwd - source ./include.tcl - - set encarg "" - if { $encrypt > 0 && $env == "NULL" } { - set encarg "-encryptany $passwd" - } - set envarg "" - set txn "" - set txnenv 0 - if { $env != "NULL" } { - append envarg "-env $env" - set txnenv [is_txnenv $env] - if { $txnenv == 1 } { - append envarg " -auto_commit " - set t [$env txn] - error_check_good txn [is_valid_txn $t $env] TRUE - set txn "-txn $t" - } - } - set db [eval {berkdb open -rdonly -unknown} \ - $envarg $encarg {$dbname $subdb}] - error_check_good dbopen [is_valid_db $db] TRUE - $dump_func $db $txn $outfile $checkfunc $beg $cont - if { $txnenv == 1 } { - error_check_good txn [$t commit] 0 - } - error_check_good db_close [$db close] 0 -} - -# Sequentially read a file and call checkfunc on each key/data pair. -# Dump the keys out to the file specified by outfile. -proc dump_file { db txn outfile checkfunc } { - source ./include.tcl - - dump_file_direction $db $txn $outfile $checkfunc "-first" "-next" -} - -proc dump_file_direction { db txn outfile checkfunc start continue } { - source ./include.tcl - - # Now we will get each key from the DB and dump to outfile - set c [eval {$db cursor} $txn] - error_check_good db_cursor [is_valid_cursor $c $db] TRUE - dump_file_walk $c $outfile $checkfunc $start $continue - error_check_good curs_close [$c close] 0 -} - -proc dump_file_walk { c outfile checkfunc start continue {flag ""} } { - set outf [open $outfile w] - for {set d [eval {$c get} $flag $start] } \ - { [llength $d] != 0 } \ - {set d [eval {$c get} $flag $continue] } { - set kd [lindex $d 0] - set k [lindex $kd 0] - set d2 [lindex $kd 1] - $checkfunc $k $d2 - puts $outf $k - # XXX: Geoff Mainland - # puts $outf "$k $d2" - } - close $outf -} - -proc dump_binkey_file { db txn outfile checkfunc } { - source ./include.tcl - - dump_binkey_file_direction $db $txn $outfile $checkfunc \ - "-first" "-next" -} -proc dump_bin_file { db txn outfile checkfunc } { - source ./include.tcl - - dump_bin_file_direction $db $txn $outfile $checkfunc "-first" "-next" -} - -# Note: the following procedure assumes that the binary-file-as-keys were -# inserted into the database by put_file_as_key, and consist of the file -# name followed by the file contents as key, to ensure uniqueness. -proc dump_binkey_file_direction { db txn outfile checkfunc begin cont } { - source ./include.tcl - - set d1 $testdir/d1 - - set outf [open $outfile w] - - # Now we will get each key from the DB and dump to outfile - set c [eval {$db cursor} $txn] - error_check_good db_cursor [is_valid_cursor $c $db] TRUE - - set inf $d1 - for {set d [$c get $begin] } { [llength $d] != 0 } \ - {set d [$c get $cont] } { - set kd [lindex $d 0] - set keyfile [lindex $kd 0] - set data [lindex $kd 1] - - set ofid [open $d1 w] - fconfigure $ofid -translation binary - - # Chop off the first few bytes--that's the file name, - # added for uniqueness in put_file_as_key, which we don't - # want in the regenerated file. - set namelen [string length $data] - set keyfile [string range $keyfile $namelen end] - puts -nonewline $ofid $keyfile - close $ofid - - $checkfunc $data $d1 - puts $outf $data - flush $outf - } - close $outf - error_check_good curs_close [$c close] 0 - fileremove $d1 -} - -proc dump_bin_file_direction { db txn outfile checkfunc begin cont } { - source ./include.tcl - - set d1 $testdir/d1 - - set outf [open $outfile w] - - # Now we will get each key from the DB and dump to outfile - set c [eval {$db cursor} $txn] - - for {set d [$c get $begin] } \ - { [llength $d] != 0 } {set d [$c get $cont] } { - set k [lindex [lindex $d 0] 0] - set data [lindex [lindex $d 0] 1] - set ofid [open $d1 w] - fconfigure $ofid -translation binary - puts -nonewline $ofid $data - close $ofid - - $checkfunc $k $d1 - puts $outf $k - } - close $outf - error_check_good curs_close [$c close] 0 - fileremove -f $d1 -} - -proc make_data_str { key } { - set datastr "" - for {set i 0} {$i < 10} {incr i} { - append datastr $key - } - return $datastr -} - -proc error_check_bad { func result bad {txn 0}} { - if { [binary_compare $result $bad] == 0 } { - if { $txn != 0 } { - $txn abort - } - flush stdout - flush stderr - error "FAIL:[timestamp] $func returned error value $bad" - } -} - -proc error_check_good { func result desired {txn 0} } { - if { [binary_compare $desired $result] != 0 } { - if { $txn != 0 } { - $txn abort - } - flush stdout - flush stderr - error "FAIL:[timestamp]\ - $func: expected $desired, got $result" - } -} - -# Locks have the prefix of their manager. -proc is_substr { str sub } { - if { [string first $sub $str] == -1 } { - return 0 - } else { - return 1 - } -} - -proc is_serial { str } { - global serial_tests - - foreach test $serial_tests { - if { [is_substr $str $test] == 1 } { - return 1 - } - } - return 0 -} - -proc release_list { l } { - - # Now release all the locks - foreach el $l { - catch { $el put } ret - error_check_good lock_put $ret 0 - } -} - -proc debug { {stop 0} } { - global __debug_on - global __debug_print - global __debug_test - - set __debug_on 1 - set __debug_print 1 - set __debug_test $stop -} - -# Check if each key appears exactly [llength dlist] times in the file with -# the duplicate tags matching those that appear in dlist. -proc dup_check { db txn tmpfile dlist {extra 0}} { - source ./include.tcl - - set outf [open $tmpfile w] - # Now we will get each key from the DB and dump to outfile - set c [eval {$db cursor} $txn] - set lastkey "" - set done 0 - while { $done != 1} { - foreach did $dlist { - set rec [$c get "-next"] - if { [string length $rec] == 0 } { - set done 1 - break - } - set key [lindex [lindex $rec 0] 0] - set fulldata [lindex [lindex $rec 0] 1] - set id [id_of $fulldata] - set d [data_of $fulldata] - if { [string compare $key $lastkey] != 0 && \ - $id != [lindex $dlist 0] } { - set e [lindex $dlist 0] - error "FAIL: \tKey \ - $key, expected dup id $e, got $id" - } - error_check_good dupget.data $d $key - error_check_good dupget.id $id $did - set lastkey $key - } - # - # Some tests add an extra dup (like overflow entries) - # Check id if it exists. - if { $extra != 0} { - set okey $key - set rec [$c get "-next"] - if { [string length $rec] != 0 } { - set key [lindex [lindex $rec 0] 0] - # - # If this key has no extras, go back for - # next iteration. - if { [string compare $key $lastkey] != 0 } { - set key $okey - set rec [$c get "-prev"] - } else { - set fulldata [lindex [lindex $rec 0] 1] - set id [id_of $fulldata] - set d [data_of $fulldata] - error_check_bad dupget.data1 $d $key - error_check_good dupget.id1 $id $extra - } - } - } - if { $done != 1 } { - puts $outf $key - } - } - close $outf - error_check_good curs_close [$c close] 0 -} - -# Check if each key appears exactly [llength dlist] times in the file with -# the duplicate tags matching those that appear in dlist. -proc dup_file_check { db txn tmpfile dlist } { - source ./include.tcl - - set outf [open $tmpfile w] - # Now we will get each key from the DB and dump to outfile - set c [eval {$db cursor} $txn] - set lastkey "" - set done 0 - while { $done != 1} { - foreach did $dlist { - set rec [$c get "-next"] - if { [string length $rec] == 0 } { - set done 1 - break - } - set key [lindex [lindex $rec 0] 0] - if { [string compare $key $lastkey] != 0 } { - # - # If we changed files read in new contents. - # - set fid [open $key r] - fconfigure $fid -translation binary - set filecont [read $fid] - close $fid - } - set fulldata [lindex [lindex $rec 0] 1] - set id [id_of $fulldata] - set d [data_of $fulldata] - if { [string compare $key $lastkey] != 0 && \ - $id != [lindex $dlist 0] } { - set e [lindex $dlist 0] - error "FAIL: \tKey \ - $key, expected dup id $e, got $id" - } - error_check_good dupget.data $d $filecont - error_check_good dupget.id $id $did - set lastkey $key - } - if { $done != 1 } { - puts $outf $key - } - } - close $outf - error_check_good curs_close [$c close] 0 -} - -# Parse duplicate data entries of the form N:data. Data_of returns -# the data part; id_of returns the numerical part -proc data_of {str} { - set ndx [string first ":" $str] - if { $ndx == -1 } { - return "" - } - return [ string range $str [expr $ndx + 1] end] -} - -proc id_of {str} { - set ndx [string first ":" $str] - if { $ndx == -1 } { - return "" - } - - return [ string range $str 0 [expr $ndx - 1]] -} - -proc nop { {args} } { - return -} - -# Partial put test procedure. -# Munges a data val through three different partial puts. Stores -# the final munged string in the dvals array so that you can check -# it later (dvals should be global). We take the characters that -# are being replaced, make them capitals and then replicate them -# some number of times (n_add). We do this at the beginning of the -# data, at the middle and at the end. The parameters are: -# db, txn, key -- as per usual. Data is the original data element -# from which we are starting. n_replace is the number of characters -# that we will replace. n_add is the number of times we will add -# the replaced string back in. -proc partial_put { method db txn gflags key data n_replace n_add } { - global dvals - source ./include.tcl - - # Here is the loop where we put and get each key/data pair - # We will do the initial put and then three Partial Puts - # for the beginning, middle and end of the string. - - eval {$db put} $txn {$key [chop_data $method $data]} - - # Beginning change - set s [string range $data 0 [ expr $n_replace - 1 ] ] - set repl [ replicate [string toupper $s] $n_add ] - - # This is gross, but necessary: if this is a fixed-length - # method, and the chopped length of $repl is zero, - # it's because the original string was zero-length and our data item - # is all nulls. Set repl to something non-NULL. - if { [is_fixed_length $method] && \ - [string length [chop_data $method $repl]] == 0 } { - set repl [replicate "." $n_add] - } - - set newstr [chop_data $method $repl[string range $data $n_replace end]] - set ret [eval {$db put} $txn {-partial [list 0 $n_replace] \ - $key [chop_data $method $repl]}] - error_check_good put $ret 0 - - set ret [eval {$db get} $gflags $txn {$key}] - error_check_good get $ret [list [list $key [pad_data $method $newstr]]] - - # End Change - set len [string length $newstr] - set spl [expr $len - $n_replace] - # Handle case where $n_replace > $len - if { $spl < 0 } { - set spl 0 - } - - set s [string range $newstr [ expr $len - $n_replace ] end ] - # Handle zero-length keys - if { [string length $s] == 0 } { set s "A" } - - set repl [ replicate [string toupper $s] $n_add ] - set newstr [chop_data $method \ - [string range $newstr 0 [expr $spl - 1 ] ]$repl] - - set ret [eval {$db put} $txn \ - {-partial [list $spl $n_replace] $key [chop_data $method $repl]}] - error_check_good put $ret 0 - - set ret [eval {$db get} $gflags $txn {$key}] - error_check_good get $ret [list [list $key [pad_data $method $newstr]]] - - # Middle Change - set len [string length $newstr] - set mid [expr $len / 2 ] - set beg [expr $mid - [expr $n_replace / 2] ] - set end [expr $beg + $n_replace - 1] - set s [string range $newstr $beg $end] - set repl [ replicate [string toupper $s] $n_add ] - set newstr [chop_data $method [string range $newstr 0 \ - [expr $beg - 1 ] ]$repl[string range $newstr [expr $end + 1] end]] - - set ret [eval {$db put} $txn {-partial [list $beg $n_replace] \ - $key [chop_data $method $repl]}] - error_check_good put $ret 0 - - set ret [eval {$db get} $gflags $txn {$key}] - error_check_good get $ret [list [list $key [pad_data $method $newstr]]] - - set dvals($key) [pad_data $method $newstr] -} - -proc replicate { str times } { - set res $str - for { set i 1 } { $i < $times } { set i [expr $i * 2] } { - append res $res - } - return $res -} - -proc repeat { str n } { - set ret "" - while { $n > 0 } { - set ret $str$ret - incr n -1 - } - return $ret -} - -proc isqrt { l } { - set s [expr sqrt($l)] - set ndx [expr [string first "." $s] - 1] - return [string range $s 0 $ndx] -} - -# If we run watch_procs multiple times without an intervening -# testdir cleanup, it's possible that old sentinel files will confuse -# us. Make sure they're wiped out before we spawn any other processes. -proc sentinel_init { } { - source ./include.tcl - - set filelist {} - set ret [catch {glob $testdir/begin.*} result] - if { $ret == 0 } { - set filelist $result - } - - set ret [catch {glob $testdir/end.*} result] - if { $ret == 0 } { - set filelist [concat $filelist $result] - } - - foreach f $filelist { - fileremove $f - } -} - -proc watch_procs { pidlist {delay 30} {max 3600} {quiet 0} } { - source ./include.tcl - - set elapsed 0 - - # Don't start watching the processes until a sentinel - # file has been created for each one. - foreach pid $pidlist { - while { [file exists $testdir/begin.$pid] == 0 } { - tclsleep $delay - incr elapsed $delay - # If pids haven't been created in one-tenth - # of the time allowed for the whole test, - # there's a problem. Report an error and fail. - if { $elapsed > [expr {$max / 10}] } { - puts "FAIL: begin.pid not created" - break - } - } - } - - while { 1 } { - - tclsleep $delay - incr elapsed $delay - - # Find the list of processes with outstanding sentinel - # files (i.e. a begin.pid and no end.pid). - set beginlist {} - set endlist {} - set ret [catch {glob $testdir/begin.*} result] - if { $ret == 0 } { - set beginlist $result - } - set ret [catch {glob $testdir/end.*} result] - if { $ret == 0 } { - set endlist $result - } - - set bpids {} - catch {unset epids} - foreach begfile $beginlist { - lappend bpids [string range $begfile \ - [string length $testdir/begin.] end] - } - foreach endfile $endlist { - set epids([string range $endfile \ - [string length $testdir/end.] end]) 1 - } - - # The set of processes that we still want to watch, $l, - # is the set of pids that have begun but not ended - # according to their sentinel files. - set l {} - foreach p $bpids { - if { [info exists epids($p)] == 0 } { - lappend l $p - } - } - - set rlist {} - foreach i $l { - set r [ catch { exec $KILL -0 $i } result ] - if { $r == 0 } { - lappend rlist $i - } - } - if { [ llength $rlist] == 0 } { - break - } else { - puts "[timestamp] processes running: $rlist" - } - - if { $elapsed > $max } { - # We have exceeded the limit; kill processes - # and report an error - foreach i $l { - tclkill $i - } - } - } - if { $quiet == 0 } { - puts "All processes have exited." - } -} - -# These routines are all used from within the dbscript.tcl tester. -proc db_init { dbp do_data } { - global a_keys - global l_keys - source ./include.tcl - - set txn "" - set nk 0 - set lastkey "" - - set a_keys() BLANK - set l_keys "" - - set c [$dbp cursor] - for {set d [$c get -first] } { [llength $d] != 0 } { - set d [$c get -next] } { - set k [lindex [lindex $d 0] 0] - set d2 [lindex [lindex $d 0] 1] - incr nk - if { $do_data == 1 } { - if { [info exists a_keys($k)] } { - lappend a_keys($k) $d2] - } else { - set a_keys($k) $d2 - } - } - - lappend l_keys $k - } - error_check_good curs_close [$c close] 0 - - return $nk -} - -proc pick_op { min max n } { - if { $n == 0 } { - return add - } - - set x [berkdb random_int 1 12] - if {$n < $min} { - if { $x <= 4 } { - return put - } elseif { $x <= 8} { - return get - } else { - return add - } - } elseif {$n > $max} { - if { $x <= 4 } { - return put - } elseif { $x <= 8 } { - return get - } else { - return del - } - - } elseif { $x <= 3 } { - return del - } elseif { $x <= 6 } { - return get - } elseif { $x <= 9 } { - return put - } else { - return add - } -} - -# random_data: Generate a string of random characters. -# If recno is 0 - Use average to pick a length between 1 and 2 * avg. -# If recno is non-0, generate a number between 1 and 2 ^ (avg * 2), -# that will fit into a 32-bit integer. -# If the unique flag is 1, then make sure that the string is unique -# in the array "where". -proc random_data { avg unique where {recno 0} } { - upvar #0 $where arr - global debug_on - set min 1 - set max [expr $avg+$avg-1] - if { $recno } { - # - # Tcl seems to have problems with values > 30. - # - if { $max > 30 } { - set max 30 - } - set maxnum [expr int(pow(2, $max))] - } - while {1} { - set len [berkdb random_int $min $max] - set s "" - if {$recno} { - set s [berkdb random_int 1 $maxnum] - } else { - for {set i 0} {$i < $len} {incr i} { - append s [int_to_char [berkdb random_int 0 25]] - } - } - - if { $unique == 0 || [info exists arr($s)] == 0 } { - break - } - } - - return $s -} - -proc random_key { } { - global l_keys - global nkeys - set x [berkdb random_int 0 [expr $nkeys - 1]] - return [lindex $l_keys $x] -} - -proc is_err { desired } { - set x [berkdb random_int 1 100] - if { $x <= $desired } { - return 1 - } else { - return 0 - } -} - -proc pick_cursput { } { - set x [berkdb random_int 1 4] - switch $x { - 1 { return "-keylast" } - 2 { return "-keyfirst" } - 3 { return "-before" } - 4 { return "-after" } - } -} - -proc random_cursor { curslist } { - global l_keys - global nkeys - - set x [berkdb random_int 0 [expr [llength $curslist] - 1]] - set dbc [lindex $curslist $x] - - # We want to randomly set the cursor. Pick a key. - set k [random_key] - set r [$dbc get "-set" $k] - error_check_good cursor_get:$k [is_substr Error $r] 0 - - # Now move forward or backward some hops to randomly - # position the cursor. - set dist [berkdb random_int -10 10] - - set dir "-next" - set boundary "-first" - if { $dist < 0 } { - set dir "-prev" - set boundary "-last" - set dist [expr 0 - $dist] - } - - for { set i 0 } { $i < $dist } { incr i } { - set r [ record $dbc get $dir $k ] - if { [llength $d] == 0 } { - set r [ record $dbc get $k $boundary ] - } - error_check_bad dbcget [llength $r] 0 - } - return { [linsert r 0 $dbc] } -} - -proc record { args } { -# Recording every operation makes tests ridiculously slow on -# NT, so we are commenting this out; for debugging purposes, -# it will undoubtedly be useful to uncomment this. -# puts $args -# flush stdout - return [eval $args] -} - -proc newpair { k data } { - global l_keys - global a_keys - global nkeys - - set a_keys($k) $data - lappend l_keys $k - incr nkeys -} - -proc rempair { k } { - global l_keys - global a_keys - global nkeys - - unset a_keys($k) - set n [lsearch $l_keys $k] - error_check_bad rempair:$k $n -1 - set l_keys [lreplace $l_keys $n $n] - incr nkeys -1 -} - -proc changepair { k data } { - global l_keys - global a_keys - global nkeys - - set a_keys($k) $data -} - -proc changedup { k olddata newdata } { - global l_keys - global a_keys - global nkeys - - set d $a_keys($k) - error_check_bad changedup:$k [llength $d] 0 - - set n [lsearch $d $olddata] - error_check_bad changedup:$k $n -1 - - set a_keys($k) [lreplace $a_keys($k) $n $n $newdata] -} - -# Insert a dup into the a_keys array with DB_KEYFIRST. -proc adddup { k olddata newdata } { - global l_keys - global a_keys - global nkeys - - set d $a_keys($k) - if { [llength $d] == 0 } { - lappend l_keys $k - incr nkeys - set a_keys($k) { $newdata } - } - - set ndx 0 - - set d [linsert d $ndx $newdata] - set a_keys($k) $d -} - -proc remdup { k data } { - global l_keys - global a_keys - global nkeys - - set d [$a_keys($k)] - error_check_bad changedup:$k [llength $d] 0 - - set n [lsearch $d $olddata] - error_check_bad changedup:$k $n -1 - - set a_keys($k) [lreplace $a_keys($k) $n $n] -} - -proc dump_full_file { db txn outfile checkfunc start continue } { - source ./include.tcl - - set outf [open $outfile w] - # Now we will get each key from the DB and dump to outfile - set c [eval {$db cursor} $txn] - error_check_good dbcursor [is_valid_cursor $c $db] TRUE - - for {set d [$c get $start] } { [string length $d] != 0 } { - set d [$c get $continue] } { - set k [lindex [lindex $d 0] 0] - set d2 [lindex [lindex $d 0] 1] - $checkfunc $k $d2 - puts $outf "$k\t$d2" - } - close $outf - error_check_good curs_close [$c close] 0 -} - -proc int_to_char { i } { - global alphabet - - return [string index $alphabet $i] -} - -proc dbcheck { key data } { - global l_keys - global a_keys - global nkeys - global check_array - - if { [lsearch $l_keys $key] == -1 } { - error "FAIL: Key |$key| not in list of valid keys" - } - - set d $a_keys($key) - - if { [info exists check_array($key) ] } { - set check $check_array($key) - } else { - set check {} - } - - if { [llength $d] > 1 } { - if { [llength $check] != [llength $d] } { - # Make the check array the right length - for { set i [llength $check] } { $i < [llength $d] } \ - {incr i} { - lappend check 0 - } - set check_array($key) $check - } - - # Find this data's index - set ndx [lsearch $d $data] - if { $ndx == -1 } { - error "FAIL: \ - Data |$data| not found for key $key. Found |$d|" - } - - # Set the bit in the check array - set check_array($key) [lreplace $check_array($key) $ndx $ndx 1] - } elseif { [string compare $d $data] != 0 } { - error "FAIL: \ - Invalid data |$data| for key |$key|. Expected |$d|." - } else { - set check_array($key) 1 - } -} - -# Dump out the file and verify it -proc filecheck { file txn } { - global check_array - global l_keys - global nkeys - global a_keys - source ./include.tcl - - if { [info exists check_array] == 1 } { - unset check_array - } - - open_and_dump_file $file NULL $file.dump dbcheck dump_full_file \ - "-first" "-next" - - # Check that everything we checked had all its data - foreach i [array names check_array] { - set count 0 - foreach j $check_array($i) { - if { $j != 1 } { - puts -nonewline "Key |$i| never found datum" - puts " [lindex $a_keys($i) $count]" - } - incr count - } - } - - # Check that all keys appeared in the checked array - set count 0 - foreach k $l_keys { - if { [info exists check_array($k)] == 0 } { - puts "filecheck: key |$k| not found. Data: $a_keys($k)" - } - incr count - } - - if { $count != $nkeys } { - puts "filecheck: Got $count keys; expected $nkeys" - } -} - -proc cleanup { dir env { quiet 0 } } { - global gen_upgrade - global is_qnx_test - global is_je_test - global old_encrypt - global passwd - source ./include.tcl - - if { $gen_upgrade == 1 } { - save_upgrade_files $dir - } - -# check_handles - set remfiles {} - set ret [catch { glob $dir/* } result] - if { $ret == 0 } { - foreach fileorig $result { - # - # We: - # - Ignore any env-related files, which are - # those that have __db.* or log.* if we are - # running in an env. Also ignore files whose - # names start with REPDIR_; these are replication - # subdirectories. - # - Call 'dbremove' on any databases. - # Remove any remaining temp files. - # - switch -glob -- $fileorig { - */DIR_* - - */__db.* - - */log.* - - */*.jdb { - if { $env != "NULL" } { - continue - } else { - if { $is_qnx_test } { - catch {berkdb envremove -force \ - -home $dir} r - } - lappend remfiles $fileorig - } - } - *.db { - set envargs "" - set encarg "" - # - # If in an env, it should be open crypto - # or not already. - # - if { $env != "NULL"} { - set file [file tail $fileorig] - set envargs " -env $env " - if { [is_txnenv $env] } { - append envargs " -auto_commit " - } - } else { - if { $old_encrypt != 0 } { - set encarg "-encryptany $passwd" - } - set file $fileorig - } - - # If a database is left in a corrupt - # state, dbremove might not be able to handle - # it (it does an open before the remove). - # Be prepared for this, and if necessary, - # just forcibly remove the file with a warning - # message. - set ret [catch \ - {eval {berkdb dbremove} $envargs $encarg \ - $file} res] - # If dbremove failed and we're not in an env, - # note that we don't have 100% certainty - # about whether the previous run used - # encryption. Try to remove with crypto if - # we tried without, and vice versa. - if { $ret != 0 } { - if { $env == "NULL" && \ - $old_encrypt == 0} { - set ret [catch \ - {eval {berkdb dbremove} \ - -encryptany $passwd \ - $file} res] - } - if { $env == "NULL" && \ - $old_encrypt == 1 } { - set ret [catch \ - {eval {berkdb dbremove} \ - $file} res] - } - if { $ret != 0 } { - if { $quiet == 0 } { - puts \ - "FAIL: dbremove in cleanup failed: $res" - } - set file $fileorig - lappend remfiles $file - } - } - } - default { - lappend remfiles $fileorig - } - } - } - if {[llength $remfiles] > 0} { - # - # In the HFS file system there are cases where not - # all files are removed on the first attempt. If - # it fails, try again a few times. - # - set count 0 - while { [catch {eval fileremove -f $remfiles}] == 1 \ - && $count < 5 } { - incr count - } - } - - if { $is_je_test } { - set rval [catch {eval {exec \ - $util_path/db_dump} -h $dir -l } res] - if { $rval == 0 } { - set envargs " -env $env " - if { [is_txnenv $env] } { - append envargs " -auto_commit " - } - - foreach db $res { - set ret [catch {eval \ - {berkdb dbremove} $envargs $db } res] - } - } - } - } -} - -proc log_cleanup { dir } { - source ./include.tcl - global gen_upgrade_log - - if { $gen_upgrade_log == 1 } { - save_upgrade_files $dir - } - - set files [glob -nocomplain $dir/log.*] - if { [llength $files] != 0} { - foreach f $files { - fileremove -f $f - } - } -} - -proc env_cleanup { dir } { - global old_encrypt - global passwd - source ./include.tcl - - set encarg "" - if { $old_encrypt != 0 } { - set encarg "-encryptany $passwd" - } - set stat [catch {eval {berkdb envremove -home} $dir $encarg} ret] - # - # If something failed and we are left with a region entry - # in /dev/shmem that is zero-length, the envremove will - # succeed, and the shm_unlink will succeed, but it will not - # remove the zero-length entry from /dev/shmem. Remove it - # using fileremove or else all other tests using an env - # will immediately fail. - # - if { $is_qnx_test == 1 } { - set region_files [glob -nocomplain /dev/shmem/$dir*] - if { [llength $region_files] != 0 } { - foreach f $region_files { - fileremove -f $f - } - } - } - log_cleanup $dir - cleanup $dir NULL -} - -# Start an RPC server. Don't return to caller until the -# server is up. Wait up to $maxwait seconds. -proc rpc_server_start { { encrypted 0 } { maxwait 30 } { args "" } } { - source ./include.tcl - global rpc_svc - global passwd - - set encargs "" - if { $encrypted == 1 } { - set encargs " -P $passwd " - } - - if { [string compare $rpc_server "localhost"] == 0 } { - set dpid [eval {exec $util_path/$rpc_svc \ - -h $rpc_testdir} $args $encargs &] - } else { - set dpid [eval {exec rsh $rpc_server \ - $rpc_path/$rpc_svc -h $rpc_testdir $args} &] - } - - # Wait a couple of seconds before we start looking for - # the server. - tclsleep 2 - set home [file tail $rpc_testdir] - if { $encrypted == 1 } { - set encargs " -encryptaes $passwd " - } - for { set i 0 } { $i < $maxwait } { incr i } { - # Try an operation -- while it fails with NOSERVER, sleep for - # a second and retry. - if {[catch {berkdb envremove -force -home "$home.FAIL" \ - -server $rpc_server} res] && \ - [is_substr $res DB_NOSERVER:]} { - tclsleep 1 - } else { - # Server is up, clean up and return to caller - break - } - if { $i >= $maxwait } { - puts "FAIL: RPC server\ - not started after $maxwait seconds" - } - } - return $dpid -} - -proc remote_cleanup { server dir localdir } { - set home [file tail $dir] - error_check_good cleanup:remove [berkdb envremove -home $home \ - -server $server] 0 - catch {exec rsh $server rm -f $dir/*} ret - cleanup $localdir NULL -} - -proc help { cmd } { - if { [info command $cmd] == $cmd } { - set is_proc [lsearch [info procs $cmd] $cmd] - if { $is_proc == -1 } { - # Not a procedure; must be a C command - # Let's hope that it takes some parameters - # and that it prints out a message - puts "Usage: [eval $cmd]" - } else { - # It is a tcl procedure - puts -nonewline "Usage: $cmd" - set args [info args $cmd] - foreach a $args { - set is_def [info default $cmd $a val] - if { $is_def != 0 } { - # Default value - puts -nonewline " $a=$val" - } elseif {$a == "args"} { - # Print out flag values - puts " options" - args - } else { - # No default value - puts -nonewline " $a" - } - } - puts "" - } - } else { - puts "$cmd is not a command" - } -} - -# Run a recovery test for a particular operation -# Notice that we catch the return from CP and do not do anything with it. -# This is because Solaris CP seems to exit non-zero on occasion, but -# everything else seems to run just fine. -# -# We split it into two functions so that the preparation and command -# could be executed in a different process than the recovery. -# -proc op_codeparse { encodedop op } { - set op1 "" - set op2 "" - switch $encodedop { - "abort" { - set op1 $encodedop - set op2 "" - } - "commit" { - set op1 $encodedop - set op2 "" - } - "prepare-abort" { - set op1 "prepare" - set op2 "abort" - } - "prepare-commit" { - set op1 "prepare" - set op2 "commit" - } - "prepare-discard" { - set op1 "prepare" - set op2 "discard" - } - } - - if { $op == "op" } { - return $op1 - } else { - return $op2 - } -} - -proc op_recover { encodedop dir env_cmd dbfile cmd msg } { - source ./include.tcl - - set op [op_codeparse $encodedop "op"] - set op2 [op_codeparse $encodedop "sub"] - puts "\t$msg $encodedop" - set gidf "" - if { $op == "prepare" } { - sentinel_init - - # Fork off a child to run the cmd - # We append the gid, so start here making sure - # we don't have old gid's around. - set outfile $testdir/childlog - fileremove -f $testdir/gidfile - set gidf $testdir/gidfile - set pidlist {} - # puts "$tclsh_path $test_path/recdscript.tcl $testdir/recdout \ - # $op $dir $env_cmd $dbfile $gidf $cmd" - set p [exec $tclsh_path $test_path/wrap.tcl recdscript.tcl \ - $testdir/recdout $op $dir $env_cmd $dbfile $gidf $cmd &] - lappend pidlist $p - watch_procs $pidlist 5 - set f1 [open $testdir/recdout r] - set r [read $f1] - puts -nonewline $r - close $f1 - fileremove -f $testdir/recdout - } else { - op_recover_prep $op $dir $env_cmd $dbfile $gidf $cmd - } - op_recover_rec $op $op2 $dir $env_cmd $dbfile $gidf -} - -proc op_recover_prep { op dir env_cmd dbfile gidf cmd } { - global log_log_record_types - global recd_debug - global recd_id - global recd_op - source ./include.tcl - - #puts "op_recover: $op $dir $env $dbfile $cmd" - - set init_file $dir/t1 - set afterop_file $dir/t2 - set final_file $dir/t3 - - # Keep track of the log types we've seen - if { $log_log_record_types == 1} { - logtrack_read $dir - } - - # Save the initial file and open the environment and the file - catch { file copy -force $dir/$dbfile $dir/$dbfile.init } res - copy_extent_file $dir $dbfile init - - convert_encrypt $env_cmd - set env [eval $env_cmd] - error_check_good envopen [is_valid_env $env] TRUE - - set db [berkdb open -auto_commit -env $env $dbfile] - error_check_good dbopen [is_valid_db $db] TRUE - - # Dump out file contents for initial case - open_and_dump_file $dbfile $env $init_file nop \ - dump_file_direction "-first" "-next" - - set t [$env txn] - error_check_bad txn_begin $t NULL - error_check_good txn_begin [is_substr $t "txn"] 1 - - # Now fill in the db, tmgr, and the txnid in the command - set exec_cmd $cmd - - set i [lsearch $cmd ENV] - if { $i != -1 } { - set exec_cmd [lreplace $exec_cmd $i $i $env] - } - - set i [lsearch $cmd TXNID] - if { $i != -1 } { - set exec_cmd [lreplace $exec_cmd $i $i $t] - } - - set i [lsearch $exec_cmd DB] - if { $i != -1 } { - set exec_cmd [lreplace $exec_cmd $i $i $db] - } - - # To test DB_CONSUME, we need to expect a record return, not "0". - set i [lsearch $exec_cmd "-consume"] - if { $i != -1 } { - set record_exec_cmd_ret 1 - } else { - set record_exec_cmd_ret 0 - } - - # For the DB_APPEND test, we need to expect a return other than - # 0; set this flag to be more lenient in the error_check_good. - set i [lsearch $exec_cmd "-append"] - if { $i != -1 } { - set lenient_exec_cmd_ret 1 - } else { - set lenient_exec_cmd_ret 0 - } - - # Execute command and commit/abort it. - set ret [eval $exec_cmd] - if { $record_exec_cmd_ret == 1 } { - error_check_good "\"$exec_cmd\"" [llength [lindex $ret 0]] 2 - } elseif { $lenient_exec_cmd_ret == 1 } { - error_check_good "\"$exec_cmd\"" [expr $ret > 0] 1 - } else { - error_check_good "\"$exec_cmd\"" $ret 0 - } - - set record_exec_cmd_ret 0 - set lenient_exec_cmd_ret 0 - - # Sync the file so that we can capture a snapshot to test recovery. - error_check_good sync:$db [$db sync] 0 - - catch { file copy -force $dir/$dbfile $dir/$dbfile.afterop } res - copy_extent_file $dir $dbfile afterop - open_and_dump_file $dir/$dbfile.afterop NULL \ - $afterop_file nop dump_file_direction "-first" "-next" - - #puts "\t\t\tExecuting txn_$op:$t" - if { $op == "prepare" } { - set gid [make_gid global:$t] - set gfd [open $gidf w+] - puts $gfd $gid - close $gfd - error_check_good txn_$op:$t [$t $op $gid] 0 - } else { - error_check_good txn_$op:$t [$t $op] 0 - } - - switch $op { - "commit" { puts "\t\tCommand executed and committed." } - "abort" { puts "\t\tCommand executed and aborted." } - "prepare" { puts "\t\tCommand executed and prepared." } - } - - # Sync the file so that we can capture a snapshot to test recovery. - error_check_good sync:$db [$db sync] 0 - - catch { file copy -force $dir/$dbfile $dir/$dbfile.final } res - copy_extent_file $dir $dbfile final - open_and_dump_file $dir/$dbfile.final NULL \ - $final_file nop dump_file_direction "-first" "-next" - - # If this is an abort or prepare-abort, it should match the - # original file. - # If this was a commit or prepare-commit, then this file should - # match the afterop file. - # If this was a prepare without an abort or commit, we still - # have transactions active, and peering at the database from - # another environment will show data from uncommitted transactions. - # Thus we just skip this in the prepare-only case; what - # we care about are the results of a prepare followed by a - # recovery, which we test later. - if { $op == "commit" } { - filesort $afterop_file $afterop_file.sort - filesort $final_file $final_file.sort - error_check_good \ - diff(post-$op,pre-commit):diff($afterop_file,$final_file) \ - [filecmp $afterop_file.sort $final_file.sort] 0 - } elseif { $op == "abort" } { - filesort $init_file $init_file.sort - filesort $final_file $final_file.sort - error_check_good \ - diff(initial,post-$op):diff($init_file,$final_file) \ - [filecmp $init_file.sort $final_file.sort] 0 - } else { - # Make sure this really is one of the prepare tests - error_check_good assert:prepare-test $op "prepare" - } - - # Running recovery on this database should not do anything. - # Flush all data to disk, close the environment and save the - # file. - # XXX DO NOT CLOSE FILE ON PREPARE -- if you are prepared, - # you really have an active transaction and you're not allowed - # to close files that are being acted upon by in-process - # transactions. - if { $op != "prepare" } { - error_check_good close:$db [$db close] 0 - } - - # - # If we are running 'prepare' don't close the env with an - # active transaction. Leave it alone so the close won't - # quietly abort it on us. - if { [is_substr $op "prepare"] != 1 } { - error_check_good envclose [$env close] 0 - } - return -} - -proc op_recover_rec { op op2 dir env_cmd dbfile gidf} { - global log_log_record_types - global recd_debug - global recd_id - global recd_op - global encrypt - global passwd - source ./include.tcl - - #puts "op_recover_rec: $op $op2 $dir $env_cmd $dbfile $gidf" - - set init_file $dir/t1 - set afterop_file $dir/t2 - set final_file $dir/t3 - - # Keep track of the log types we've seen - if { $log_log_record_types == 1} { - logtrack_read $dir - } - - berkdb debug_check - puts -nonewline "\t\top_recover_rec: Running recovery ... " - flush stdout - - set recargs "-h $dir -c " - if { $encrypt > 0 } { - append recargs " -P $passwd " - } - set stat [catch {eval exec $util_path/db_recover -e $recargs} result] - if { $stat == 1 } { - error "FAIL: Recovery error: $result." - } - puts -nonewline "complete ... " - - # - # We cannot run db_recover here because that will open an env, run - # recovery, then close it, which will abort the outstanding txns. - # We want to do it ourselves. - # - set env [eval $env_cmd] - error_check_good dbenv [is_valid_widget $env env] TRUE - - error_check_good db_verify [verify_dir $testdir "\t\t" 0 1] 0 - puts "verified" - - # If we left a txn as prepared, but not aborted or committed, - # we need to do a txn_recover. Make sure we have the same - # number of txns we want. - if { $op == "prepare"} { - set txns [$env txn_recover] - error_check_bad txnrecover [llength $txns] 0 - set gfd [open $gidf r] - set origgid [read -nonewline $gfd] - close $gfd - set txnlist [lindex $txns 0] - set t [lindex $txnlist 0] - set gid [lindex $txnlist 1] - error_check_good gidcompare $gid $origgid - puts "\t\t\tExecuting txn_$op2:$t" - error_check_good txn_$op2:$t [$t $op2] 0 - # - # If we are testing discard, we do need to resolve - # the txn, so get the list again and now abort it. - # - if { $op2 == "discard" } { - set txns [$env txn_recover] - error_check_bad txnrecover [llength $txns] 0 - set txnlist [lindex $txns 0] - set t [lindex $txnlist 0] - set gid [lindex $txnlist 1] - error_check_good gidcompare $gid $origgid - puts "\t\t\tExecuting txn_abort:$t" - error_check_good disc_txn_abort:$t [$t abort] 0 - } - } - - open_and_dump_file $dir/$dbfile NULL $final_file nop \ - dump_file_direction "-first" "-next" - if { $op == "commit" || $op2 == "commit" } { - filesort $afterop_file $afterop_file.sort - filesort $final_file $final_file.sort - error_check_good \ - diff(post-$op,pre-commit):diff($afterop_file,$final_file) \ - [filecmp $afterop_file.sort $final_file.sort] 0 - } else { - filesort $init_file $init_file.sort - filesort $final_file $final_file.sort - error_check_good \ - diff(initial,post-$op):diff($init_file,$final_file) \ - [filecmp $init_file.sort $final_file.sort] 0 - } - - # Now close the environment, substitute a file that will need - # recovery and try running recovery again. - reset_env $env - if { $op == "commit" || $op2 == "commit" } { - catch { file copy -force $dir/$dbfile.init $dir/$dbfile } res - move_file_extent $dir $dbfile init copy - } else { - catch { file copy -force $dir/$dbfile.afterop $dir/$dbfile } res - move_file_extent $dir $dbfile afterop copy - } - - berkdb debug_check - puts -nonewline "\t\tRunning recovery on pre-op database ... " - flush stdout - - set stat [catch {eval exec $util_path/db_recover $recargs} result] - if { $stat == 1 } { - error "FAIL: Recovery error: $result." - } - puts -nonewline "complete ... " - - error_check_good db_verify_preop [verify_dir $testdir "\t\t" 0 1] 0 - - puts "verified" - - set env [eval $env_cmd] - - open_and_dump_file $dir/$dbfile NULL $final_file nop \ - dump_file_direction "-first" "-next" - if { $op == "commit" || $op2 == "commit" } { - filesort $final_file $final_file.sort - filesort $afterop_file $afterop_file.sort - error_check_good \ - diff(post-$op,recovered):diff($afterop_file,$final_file) \ - [filecmp $afterop_file.sort $final_file.sort] 0 - } else { - filesort $init_file $init_file.sort - filesort $final_file $final_file.sort - error_check_good \ - diff(initial,post-$op):diff($init_file,$final_file) \ - [filecmp $init_file.sort $final_file.sort] 0 - } - - # This should just close the environment, not blow it away. - reset_env $env -} - -proc populate { db method txn n dups bigdata } { - source ./include.tcl - - set did [open $dict] - set count 0 - while { [gets $did str] != -1 && $count < $n } { - if { [is_record_based $method] == 1 } { - set key [expr $count + 1] - } elseif { $dups == 1 } { - set key duplicate_key - } else { - set key $str - } - if { $bigdata == 1 && [berkdb random_int 1 3] == 1} { - set str [replicate $str 1000] - } - - set ret [$db put -txn $txn $key $str] - error_check_good db_put:$key $ret 0 - incr count - } - close $did - return 0 -} - -proc big_populate { db txn n } { - source ./include.tcl - - set did [open $dict] - set count 0 - while { [gets $did str] != -1 && $count < $n } { - set key [replicate $str 50] - set ret [$db put -txn $txn $key $str] - error_check_good db_put:$key $ret 0 - incr count - } - close $did - return 0 -} - -proc unpopulate { db txn num } { - source ./include.tcl - - set c [eval {$db cursor} "-txn $txn"] - error_check_bad $db:cursor $c NULL - error_check_good $db:cursor [is_substr $c $db] 1 - - set i 0 - for {set d [$c get -first] } { [llength $d] != 0 } { - set d [$c get -next] } { - $c del - incr i - if { $num != 0 && $ >= $num } { - break - } - } - error_check_good cursor_close [$c close] 0 - return 0 -} - -proc reset_env { env } { - error_check_good env_close [$env close] 0 -} - -proc maxlocks { myenv locker_id obj_id num } { - return [countlocks $myenv $locker_id $obj_id $num ] -} - -proc maxwrites { myenv locker_id obj_id num } { - return [countlocks $myenv $locker_id $obj_id $num ] -} - -proc minlocks { myenv locker_id obj_id num } { - return [countlocks $myenv $locker_id $obj_id $num ] -} - -proc minwrites { myenv locker_id obj_id num } { - return [countlocks $myenv $locker_id $obj_id $num ] -} - -proc countlocks { myenv locker_id obj_id num } { - set locklist "" - for { set i 0} {$i < [expr $obj_id * 4]} { incr i } { - set r [catch {$myenv lock_get read $locker_id \ - [expr $obj_id * 1000 + $i]} l ] - if { $r != 0 } { - puts $l - return ERROR - } else { - error_check_good lockget:$obj_id [is_substr $l $myenv] 1 - lappend locklist $l - } - } - - # Now acquire one write lock, except for obj_id 1, which doesn't - # acquire any. We'll use obj_id 1 to test minwrites. - if { $obj_id != 1 } { - set r [catch {$myenv lock_get write $locker_id \ - [expr $obj_id * 1000 + 10]} l ] - if { $r != 0 } { - puts $l - return ERROR - } else { - error_check_good lockget:$obj_id [is_substr $l $myenv] 1 - lappend locklist $l - } - } - - # Get one extra write lock for obj_id 2. We'll use - # obj_id 2 to test maxwrites. - # - if { $obj_id == 2 } { - set extra [catch {$myenv lock_get write \ - $locker_id [expr $obj_id * 1000 + 11]} l ] - if { $extra != 0 } { - puts $l - return ERROR - } else { - error_check_good lockget:$obj_id [is_substr $l $myenv] 1 - lappend locklist $l - } - } - - set ret [ring $myenv $locker_id $obj_id $num] - - foreach l $locklist { - error_check_good lockput:$l [$l put] 0 - } - - return $ret -} - -# This routine will let us obtain a ring of deadlocks. -# Each locker will get a lock on obj_id, then sleep, and -# then try to lock (obj_id + 1) % num. -# When the lock is finally granted, we release our locks and -# return 1 if we got both locks and DEADLOCK if we deadlocked. -# The results here should be that 1 locker deadlocks and the -# rest all finish successfully. -proc ring { myenv locker_id obj_id num } { - source ./include.tcl - - if {[catch {$myenv lock_get write $locker_id $obj_id} lock1] != 0} { - puts $lock1 - return ERROR - } else { - error_check_good lockget:$obj_id [is_substr $lock1 $myenv] 1 - } - - tclsleep 30 - set nextobj [expr ($obj_id + 1) % $num] - set ret 1 - if {[catch {$myenv lock_get write $locker_id $nextobj} lock2] != 0} { - if {[string match "*DEADLOCK*" $lock2] == 1} { - set ret DEADLOCK - } else { - if {[string match "*NOTGRANTED*" $lock2] == 1} { - set ret DEADLOCK - } else { - puts $lock2 - set ret ERROR - } - } - } else { - error_check_good lockget:$obj_id [is_substr $lock2 $myenv] 1 - } - - # Now release the first lock - error_check_good lockput:$lock1 [$lock1 put] 0 - - if {$ret == 1} { - error_check_bad lockget:$obj_id $lock2 NULL - error_check_good lockget:$obj_id [is_substr $lock2 $myenv] 1 - error_check_good lockput:$lock2 [$lock2 put] 0 - } - return $ret -} - -# This routine will create massive deadlocks. -# Each locker will get a readlock on obj_id, then sleep, and -# then try to upgrade the readlock to a write lock. -# When the lock is finally granted, we release our first lock and -# return 1 if we got both locks and DEADLOCK if we deadlocked. -# The results here should be that 1 locker succeeds in getting all -# the locks and everyone else deadlocks. -proc clump { myenv locker_id obj_id num } { - source ./include.tcl - - set obj_id 10 - if {[catch {$myenv lock_get read $locker_id $obj_id} lock1] != 0} { - puts $lock1 - return ERROR - } else { - error_check_good lockget:$obj_id \ - [is_valid_lock $lock1 $myenv] TRUE - } - - tclsleep 30 - set ret 1 - if {[catch {$myenv lock_get write $locker_id $obj_id} lock2] != 0} { - if {[string match "*DEADLOCK*" $lock2] == 1} { - set ret DEADLOCK - } else { - if {[string match "*NOTGRANTED*" $lock2] == 1} { - set ret DEADLOCK - } else { - puts $lock2 - set ret ERROR - } - } - } else { - error_check_good \ - lockget:$obj_id [is_valid_lock $lock2 $myenv] TRUE - } - - # Now release the first lock - error_check_good lockput:$lock1 [$lock1 put] 0 - - if {$ret == 1} { - error_check_good \ - lockget:$obj_id [is_valid_lock $lock2 $myenv] TRUE - error_check_good lockput:$lock2 [$lock2 put] 0 - } - return $ret -} - -proc dead_check { t procs timeout dead clean other } { - error_check_good $t:$procs:other $other 0 - switch $t { - ring { - # With timeouts the number of deadlocks is - # unpredictable: test for at least one deadlock. - if { $timeout != 0 && $dead > 1 } { - set clean [ expr $clean + $dead - 1] - set dead 1 - } - error_check_good $t:$procs:deadlocks $dead 1 - error_check_good $t:$procs:success $clean \ - [expr $procs - 1] - } - clump { - # With timeouts the number of deadlocks is - # unpredictable: test for no more than one - # successful lock. - if { $timeout != 0 && $dead == $procs } { - set clean 1 - set dead [expr $procs - 1] - } - error_check_good $t:$procs:deadlocks $dead \ - [expr $procs - 1] - error_check_good $t:$procs:success $clean 1 - } - oldyoung { - error_check_good $t:$procs:deadlocks $dead 1 - error_check_good $t:$procs:success $clean \ - [expr $procs - 1] - } - maxlocks { - error_check_good $t:$procs:deadlocks $dead 1 - error_check_good $t:$procs:success $clean \ - [expr $procs - 1] - } - maxwrites { - error_check_good $t:$procs:deadlocks $dead 1 - error_check_good $t:$procs:success $clean \ - [expr $procs - 1] - } - minlocks { - error_check_good $t:$procs:deadlocks $dead 1 - error_check_good $t:$procs:success $clean \ - [expr $procs - 1] - } - minwrites { - error_check_good $t:$procs:deadlocks $dead 1 - error_check_good $t:$procs:success $clean \ - [expr $procs - 1] - } - default { - error "Test $t not implemented" - } - } -} - -proc rdebug { id op where } { - global recd_debug - global recd_id - global recd_op - - set recd_debug $where - set recd_id $id - set recd_op $op -} - -proc rtag { msg id } { - set tag [lindex $msg 0] - set tail [expr [string length $tag] - 2] - set tag [string range $tag $tail $tail] - if { $id == $tag } { - return 1 - } else { - return 0 - } -} - -proc zero_list { n } { - set ret "" - while { $n > 0 } { - lappend ret 0 - incr n -1 - } - return $ret -} - -proc check_dump { k d } { - puts "key: $k data: $d" -} - -proc reverse { s } { - set res "" - for { set i 0 } { $i < [string length $s] } { incr i } { - set res "[string index $s $i]$res" - } - - return $res -} - -# -# This is a internal only proc. All tests should use 'is_valid_db' etc. -# -proc is_valid_widget { w expected } { - # First N characters must match "expected" - set l [string length $expected] - incr l -1 - if { [string compare [string range $w 0 $l] $expected] != 0 } { - return $w - } - - # Remaining characters must be digits - incr l 1 - for { set i $l } { $i < [string length $w] } { incr i} { - set c [string index $w $i] - if { $c < "0" || $c > "9" } { - return $w - } - } - - return TRUE -} - -proc is_valid_db { db } { - return [is_valid_widget $db db] -} - -proc is_valid_env { env } { - return [is_valid_widget $env env] -} - -proc is_valid_cursor { dbc db } { - return [is_valid_widget $dbc $db.c] -} - -proc is_valid_lock { lock env } { - return [is_valid_widget $lock $env.lock] -} - -proc is_valid_logc { logc env } { - return [is_valid_widget $logc $env.logc] -} - -proc is_valid_mpool { mpool env } { - return [is_valid_widget $mpool $env.mp] -} - -proc is_valid_page { page mpool } { - return [is_valid_widget $page $mpool.pg] -} - -proc is_valid_txn { txn env } { - return [is_valid_widget $txn $env.txn] -} - -proc is_valid_mutex { m env } { - return [is_valid_widget $m $env.mutex] -} - -proc is_valid_lock {l env} { - return [is_valid_widget $l $env.lock] -} - -proc is_valid_locker {l } { - return [is_valid_widget $l ""] -} - -proc is_valid_seq { seq } { - return [is_valid_widget $seq seq] -} - -proc send_cmd { fd cmd {sleep 2}} { - source ./include.tcl - - puts $fd "if \[catch {set v \[$cmd\] ; puts \$v} ret\] { \ - puts \"FAIL: \$ret\" \ - }" - puts $fd "flush stdout" - flush $fd - berkdb debug_check - tclsleep $sleep - - set r [rcv_result $fd] - return $r -} - -proc rcv_result { fd } { - set r [gets $fd result] - error_check_bad remote_read $r -1 - - return $result -} - -proc send_timed_cmd { fd rcv_too cmd } { - set c1 "set start \[timestamp -r\]; " - set c2 "puts \[expr \[timestamp -r\] - \$start\]" - set full_cmd [concat $c1 $cmd ";" $c2] - - puts $fd $full_cmd - puts $fd "flush stdout" - flush $fd - return 0 -} - -# -# The rationale behind why we have *two* "data padding" routines is outlined -# below: -# -# Both pad_data and chop_data truncate data that is too long. However, -# pad_data also adds the pad character to pad data out to the fixed length -# record length. -# -# Which routine you call does not depend on the length of the data you're -# using, but on whether you're doing a put or a get. When we do a put, we -# have to make sure the data isn't longer than the size of a record because -# otherwise we'll get an error (use chop_data). When we do a get, we want to -# check that db padded everything correctly (use pad_data on the value against -# which we are comparing). -# -# We don't want to just use the pad_data routine for both purposes, because -# we want to be able to test whether or not db is padding correctly. For -# example, the queue access method had a bug where when a record was -# overwritten (*not* a partial put), only the first n bytes of the new entry -# were written, n being the new entry's (unpadded) length. So, if we did -# a put with key,value pair (1, "abcdef") and then a put (1, "z"), we'd get -# back (1,"zbcdef"). If we had used pad_data instead of chop_data, we would -# have gotten the "correct" result, but we wouldn't have found this bug. -proc chop_data {method data} { - global fixed_len - - if {[is_fixed_length $method] == 1 && \ - [string length $data] > $fixed_len} { - return [eval {binary format a$fixed_len $data}] - } else { - return $data - } -} - -proc pad_data {method data} { - global fixed_len - - if {[is_fixed_length $method] == 1} { - return [eval {binary format a$fixed_len $data}] - } else { - return $data - } -} - -proc make_fixed_length {method data {pad 0}} { - global fixed_len - - if {[is_fixed_length $method] == 1} { - if {[string length $data] > $fixed_len } { - error_check_bad make_fixed_len:TOO_LONG 1 1 - } - while { [string length $data] < $fixed_len } { - set data [format $data%c $pad] - } - } - return $data -} - -proc make_gid {data} { - while { [string length $data] < 128 } { - set data [format ${data}0] - } - return $data -} - -# shift data for partial -# pad with fixed pad (which is NULL) -proc partial_shift { data offset direction} { - global fixed_len - - set len [expr $fixed_len - 1] - - if { [string compare $direction "right"] == 0 } { - for { set i 1} { $i <= $offset } {incr i} { - set data [binary format x1a$len $data] - } - } elseif { [string compare $direction "left"] == 0 } { - for { set i 1} { $i <= $offset } {incr i} { - set data [string range $data 1 end] - set data [binary format a$len $data] - } - } - return $data -} - -# string compare does not always work to compare -# this data, nor does expr (==) -# specialized routine for comparison -# (for use in fixed len recno and q) -proc binary_compare { data1 data2 } { - if { [string length $data1] != [string length $data2] || \ - [string compare -length \ - [string length $data1] $data1 $data2] != 0 } { - return 1 - } else { - return 0 - } -} - -# This is a comparison function used with the lsort command. -# It treats its inputs as 32 bit signed integers for comparison, -# and is coded to work with both 32 bit and 64 bit versions of tclsh. -proc int32_compare { val1 val2 } { - # Big is set to 2^32 on a 64 bit machine, or 0 on 32 bit machine. - set big [expr 0xffffffff + 1] - if { $val1 >= 0x80000000 } { - set val1 [expr $val1 - $big] - } - if { $val2 >= 0x80000000 } { - set val2 [expr $val2 - $big] - } - return [expr $val1 - $val2] -} - -proc convert_method { method } { - switch -- $method { - -btree - - -dbtree - - dbtree - - -ddbtree - - ddbtree - - -rbtree - - BTREE - - DB_BTREE - - DB_RBTREE - - RBTREE - - bt - - btree - - db_btree - - db_rbtree - - rbt - - rbtree { return "-btree" } - - -dhash - - -ddhash - - -hash - - DB_HASH - - HASH - - dhash - - ddhash - - db_hash - - h - - hash { return "-hash" } - - -queue - - DB_QUEUE - - QUEUE - - db_queue - - q - - qam - - queue - - -iqueue - - DB_IQUEUE - - IQUEUE - - db_iqueue - - iq - - iqam - - iqueue { return "-queue" } - - -queueextent - - QUEUEEXTENT - - qe - - qamext - - -queueext - - queueextent - - queueext - - -iqueueextent - - IQUEUEEXTENT - - iqe - - iqamext - - -iqueueext - - iqueueextent - - iqueueext { return "-queue" } - - -frecno - - -recno - - -rrecno - - DB_FRECNO - - DB_RECNO - - DB_RRECNO - - FRECNO - - RECNO - - RRECNO - - db_frecno - - db_recno - - db_rrecno - - frec - - frecno - - rec - - recno - - rrec - - rrecno { return "-recno" } - - default { error "FAIL:[timestamp] $method: unknown method" } - } -} - -proc split_encargs { largs encargsp } { - global encrypt - upvar $encargsp e - set eindex [lsearch $largs "-encrypta*"] - if { $eindex == -1 } { - set e "" - set newl $largs - } else { - set eend [expr $eindex + 1] - set e [lrange $largs $eindex $eend] - set newl [lreplace $largs $eindex $eend "-encrypt"] - } - return $newl -} - -proc convert_encrypt { largs } { - global encrypt - global old_encrypt - - set old_encrypt $encrypt - set encrypt 0 - if { [lsearch $largs "-encrypt*"] != -1 } { - set encrypt 1 - } -} - -# If recno-with-renumbering or btree-with-renumbering is specified, then -# fix the arguments to specify the DB_RENUMBER/DB_RECNUM option for the -# -flags argument. -proc convert_args { method {largs ""} } { - global fixed_len - global gen_upgrade - global upgrade_be - source ./include.tcl - - if { [string first - $largs] == -1 &&\ - [string compare $largs ""] != 0 &&\ - [string compare $largs {{}}] != 0 } { - set errstring "args must contain a hyphen; does this test\ - have no numeric args?" - puts "FAIL:[timestamp] $errstring (largs was $largs)" - return -code return - } - - convert_encrypt $largs - if { $gen_upgrade == 1 && $upgrade_be == 1 } { - append largs " -lorder 4321 " - } elseif { $gen_upgrade == 1 && $upgrade_be != 1 } { - append largs " -lorder 1234 " - } - - if { [is_rrecno $method] == 1 } { - append largs " -renumber " - } elseif { [is_rbtree $method] == 1 } { - append largs " -recnum " - } elseif { [is_dbtree $method] == 1 } { - append largs " -dup " - } elseif { [is_ddbtree $method] == 1 } { - append largs " -dup " - append largs " -dupsort " - } elseif { [is_dhash $method] == 1 } { - append largs " -dup " - } elseif { [is_ddhash $method] == 1 } { - append largs " -dup " - append largs " -dupsort " - } elseif { [is_queueext $method] == 1 } { - append largs " -extent 4 " - } - - if { [is_iqueue $method] == 1 || [is_iqueueext $method] == 1 } { - append largs " -inorder " - } - - # Default padding character is ASCII nul. - set fixed_pad 0 - if {[is_fixed_length $method] == 1} { - append largs " -len $fixed_len -pad $fixed_pad " - } - return $largs -} - -proc is_btree { method } { - set names { -btree BTREE DB_BTREE bt btree } - if { [lsearch $names $method] >= 0 } { - return 1 - } else { - return 0 - } -} - -proc is_dbtree { method } { - set names { -dbtree dbtree } - if { [lsearch $names $method] >= 0 } { - return 1 - } else { - return 0 - } -} - -proc is_ddbtree { method } { - set names { -ddbtree ddbtree } - if { [lsearch $names $method] >= 0 } { - return 1 - } else { - return 0 - } -} - -proc is_rbtree { method } { - set names { -rbtree rbtree RBTREE db_rbtree DB_RBTREE rbt } - if { [lsearch $names $method] >= 0 } { - return 1 - } else { - return 0 - } -} - -proc is_recno { method } { - set names { -recno DB_RECNO RECNO db_recno rec recno} - if { [lsearch $names $method] >= 0 } { - return 1 - } else { - return 0 - } -} - -proc is_rrecno { method } { - set names { -rrecno rrecno RRECNO db_rrecno DB_RRECNO rrec } - if { [lsearch $names $method] >= 0 } { - return 1 - } else { - return 0 - } -} - -proc is_frecno { method } { - set names { -frecno frecno frec FRECNO db_frecno DB_FRECNO} - if { [lsearch $names $method] >= 0 } { - return 1 - } else { - return 0 - } -} - -proc is_hash { method } { - set names { -hash DB_HASH HASH db_hash h hash } - if { [lsearch $names $method] >= 0 } { - return 1 - } else { - return 0 - } -} - -proc is_dhash { method } { - set names { -dhash dhash } - if { [lsearch $names $method] >= 0 } { - return 1 - } else { - return 0 - } -} - -proc is_ddhash { method } { - set names { -ddhash ddhash } - if { [lsearch $names $method] >= 0 } { - return 1 - } else { - return 0 - } -} - -proc is_queue { method } { - if { [is_queueext $method] == 1 || [is_iqueue $method] == 1 || \ - [is_iqueueext $method] == 1 } { - return 1 - } - - set names { -queue DB_QUEUE QUEUE db_queue q queue qam } - if { [lsearch $names $method] >= 0 } { - return 1 - } else { - return 0 - } -} - -proc is_queueext { method } { - if { [is_iqueueext $method] == 1 } { - return 1 - } - - set names { -queueextent queueextent QUEUEEXTENT qe qamext \ - queueext -queueext } - if { [lsearch $names $method] >= 0 } { - return 1 - } else { - return 0 - } -} - -proc is_iqueue { method } { - if { [is_iqueueext $method] == 1 } { - return 1 - } - - set names { -iqueue DB_IQUEUE IQUEUE db_iqueue iq iqueue iqam } - if { [lsearch $names $method] >= 0 } { - return 1 - } else { - return 0 - } -} - -proc is_iqueueext { method } { - set names { -iqueueextent iqueueextent IQUEUEEXTENT iqe iqamext \ - iqueueext -iqueueext } - if { [lsearch $names $method] >= 0 } { - return 1 - } else { - return 0 - } -} - -proc is_record_based { method } { - if { [is_recno $method] || [is_frecno $method] || - [is_rrecno $method] || [is_queue $method] } { - return 1 - } else { - return 0 - } -} - -proc is_fixed_length { method } { - if { [is_queue $method] || [is_frecno $method] } { - return 1 - } else { - return 0 - } -} - -# Sort lines in file $in and write results to file $out. -# This is a more portable alternative to execing the sort command, -# which has assorted issues on NT [#1576]. -# The addition of a "-n" argument will sort numerically. -proc filesort { in out { arg "" } } { - set i [open $in r] - - set ilines {} - while { [gets $i line] >= 0 } { - lappend ilines $line - } - - if { [string compare $arg "-n"] == 0 } { - set olines [lsort -integer $ilines] - } else { - set olines [lsort $ilines] - } - - close $i - - set o [open $out w] - foreach line $olines { - puts $o $line - } - - close $o -} - -# Print lines up to the nth line of infile out to outfile, inclusive. -# The optional beg argument tells us where to start. -proc filehead { n infile outfile { beg 0 } } { - set in [open $infile r] - set out [open $outfile w] - - # Sed uses 1-based line numbers, and so we do too. - for { set i 1 } { $i < $beg } { incr i } { - if { [gets $in junk] < 0 } { - break - } - } - - for { } { $i <= $n } { incr i } { - if { [gets $in line] < 0 } { - break - } - puts $out $line - } - - close $in - close $out -} - -# Remove file (this replaces $RM). -# Usage: fileremove filenames =~ rm; fileremove -f filenames =~ rm -rf. -proc fileremove { args } { - set forceflag "" - foreach a $args { - if { [string first - $a] == 0 } { - # It's a flag. Better be f. - if { [string first f $a] != 1 } { - return -code error "bad flag to fileremove" - } else { - set forceflag "-force" - } - } else { - eval {file delete $forceflag $a} - } - } -} - -proc findfail { args } { - set errstring {} - foreach a $args { - if { [file exists $a] == 0 } { - continue - } - set f [open $a r] - while { [gets $f line] >= 0 } { - if { [string first FAIL $line] == 0 } { - lappend errstring $a:$line - } - } - close $f - } - return $errstring -} - -# Sleep for s seconds. -proc tclsleep { s } { - # On Windows, the system time-of-day clock may update as much - # as 55 ms late due to interrupt timing. Don't take any - # chances; sleep extra-long so that when tclsleep 1 returns, - # it's guaranteed to be a new second. - after [expr $s * 1000 + 56] -} - -# Kill a process. -proc tclkill { id } { - source ./include.tcl - - while { [ catch {exec $KILL -0 $id} ] == 0 } { - catch {exec $KILL -9 $id} - tclsleep 5 - } -} - -# Compare two files, a la diff. Returns 1 if non-identical, 0 if identical. -proc filecmp { file_a file_b } { - set fda [open $file_a r] - set fdb [open $file_b r] - - set nra 0 - set nrb 0 - - # The gets can't be in the while condition because we'll - # get short-circuit evaluated. - while { $nra >= 0 && $nrb >= 0 } { - set nra [gets $fda aline] - set nrb [gets $fdb bline] - - if { $nra != $nrb || [string compare $aline $bline] != 0} { - close $fda - close $fdb - return 1 - } - } - - close $fda - close $fdb - return 0 -} - -# Give two SORTED files, one of which is a complete superset of the other, -# extract out the unique portions of the superset and put them in -# the given outfile. -proc fileextract { superset subset outfile } { - set sup [open $superset r] - set sub [open $subset r] - set outf [open $outfile w] - - # The gets can't be in the while condition because we'll - # get short-circuit evaluated. - set nrp [gets $sup pline] - set nrb [gets $sub bline] - while { $nrp >= 0 } { - if { $nrp != $nrb || [string compare $pline $bline] != 0} { - puts $outf $pline - } else { - set nrb [gets $sub bline] - } - set nrp [gets $sup pline] - } - - close $sup - close $sub - close $outf - return 0 -} - -# Verify all .db files in the specified directory. -proc verify_dir { {directory $testdir} { pref "" } \ - { noredo 0 } { quiet 0 } { nodump 0 } { cachesize 0 } { unref 1 } } { - global encrypt - global passwd - - # If we're doing database verification between tests, we don't - # want to do verification twice without an intervening cleanup--some - # test was skipped. Always verify by default (noredo == 0) so - # that explicit calls to verify_dir during tests don't require - # cleanup commands. - if { $noredo == 1 } { - if { [file exists $directory/NOREVERIFY] == 1 } { - if { $quiet == 0 } { - puts "Skipping verification." - } - return 0 - } - set f [open $directory/NOREVERIFY w] - close $f - } - - if { [catch {glob $directory/*.db} dbs] != 0 } { - # No files matched - return 0 - } - set errfilearg "-errfile /dev/stderr " - set errpfxarg {-errpfx "FAIL: verify" } - set errarg $errfilearg$errpfxarg - set ret 0 - - # Open an env, so that we have a large enough cache. Pick - # a fairly generous default if we haven't specified something else. - - if { $cachesize == 0 } { - set cachesize [expr 1024 * 1024] - } - set encarg "" - if { $encrypt != 0 } { - set encarg "-encryptaes $passwd" - } - - set env [eval {berkdb_env -create -private} $encarg \ - {-cachesize [list 0 $cachesize 0]}] - set earg " -env $env $errarg " - - # The 'unref' flag means that we report unreferenced pages - # at all times. This is the default behavior. - # If we have a test which leaves unreferenced pages on systems - # where HAVE_FTRUNCATE is not on, then we call verify_dir with - # unref == 0. - set uflag "-unref" - if { $unref == 0 } { - set uflag "" - } - - foreach db $dbs { - if { [catch \ - {eval {berkdb dbverify} $uflag $earg $db} res] != 0 } { - puts $res - puts "FAIL:[timestamp] Verification of $db failed." - set ret 1 - continue - } else { - error_check_good verify:$db $res 0 - if { $quiet == 0 } { - puts "${pref}Verification of $db succeeded." - } - } - - # Skip the dump if it's dangerous to do it. - if { $nodump == 0 } { - if { [catch {eval dumploadtest $db} res] != 0 } { - puts $res - puts "FAIL:[timestamp] Dump/load of $db failed." - set ret 1 - continue - } else { - error_check_good dumpload:$db $res 0 - if { $quiet == 0 } { - puts \ - "${pref}Dump/load of $db succeeded." - } - } - } - } - - error_check_good vrfyenv_close [$env close] 0 - - return $ret -} - -# Is the database handle in $db a master database containing subdbs? -proc check_for_subdbs { db } { - set stat [$db stat] - for { set i 0 } { [string length [lindex $stat $i]] > 0 } { incr i } { - set elem [lindex $stat $i] - if { [string compare [lindex $elem 0] Flags] == 0 } { - # This is the list of flags; look for - # "subdatabases". - if { [is_substr [lindex $elem 1] subdatabases] } { - return 1 - } - } - } - return 0 -} - -proc db_compare { olddb newdb olddbname newdbname } { - # Walk through olddb and newdb and make sure their contents - # are identical. - set oc [$olddb cursor] - set nc [$newdb cursor] - error_check_good orig_cursor($olddbname) \ - [is_valid_cursor $oc $olddb] TRUE - error_check_good new_cursor($olddbname) \ - [is_valid_cursor $nc $newdb] TRUE - - for { set odbt [$oc get -first] } { [llength $odbt] > 0 } \ - { set odbt [$oc get -next] } { - set ndbt [$nc get -get_both \ - [lindex [lindex $odbt 0] 0] [lindex [lindex $odbt 0] 1]] - error_check_good db_compare($olddbname/$newdbname) $ndbt $odbt - } - - for { set ndbt [$nc get -first] } { [llength $ndbt] > 0 } \ - { set ndbt [$nc get -next] } { - set odbt [$oc get -get_both \ - [lindex [lindex $ndbt 0] 0] [lindex [lindex $ndbt 0] 1]] - error_check_good db_compare_back($olddbname) $odbt $ndbt - } - - error_check_good orig_cursor_close($olddbname) [$oc close] 0 - error_check_good new_cursor_close($newdbname) [$nc close] 0 - - return 0 -} - -proc dumploadtest { db } { - global util_path - global encrypt - global passwd - - set newdbname $db-dumpload.db - - set dbarg "" - set utilflag "" - if { $encrypt != 0 } { - set dbarg "-encryptany $passwd" - set utilflag "-P $passwd" - } - - # Dump/load the whole file, including all subdbs. - set rval [catch {eval {exec $util_path/db_dump} $utilflag -k \ - $db | $util_path/db_load $utilflag $newdbname} res] - error_check_good db_dump/db_load($db:$res) $rval 0 - - # If the old file was empty, there's no new file and we're done. - if { [file exists $newdbname] == 0 } { - return 0 - } - - # Open original database. - set olddb [eval {berkdb_open -rdonly} $dbarg $db] - error_check_good olddb($db) [is_valid_db $olddb] TRUE - - if { [check_for_subdbs $olddb] } { - # If $db has subdatabases, compare each one separately. - set oc [$olddb cursor] - error_check_good orig_cursor($db) \ - [is_valid_cursor $oc $olddb] TRUE - - for { set dbt [$oc get -first] } \ - { [llength $dbt] > 0 } \ - { set dbt [$oc get -next] } { - set subdb [lindex [lindex $dbt 0] 0] - - set oldsubdb \ - [eval {berkdb_open -rdonly} $dbarg {$db $subdb}] - error_check_good olddb($db) [is_valid_db $oldsubdb] TRUE - - # Open the new database. - set newdb \ - [eval {berkdb_open -rdonly} $dbarg {$newdbname $subdb}] - error_check_good newdb($db) [is_valid_db $newdb] TRUE - - db_compare $oldsubdb $newdb $db $newdbname - error_check_good new_db_close($db) [$newdb close] 0 - error_check_good old_subdb_close($oldsubdb) [$oldsubdb close] 0 - } - - error_check_good oldcclose [$oc close] 0 - } else { - # Open the new database. - set newdb [eval {berkdb_open -rdonly} $dbarg $newdbname] - error_check_good newdb($db) [is_valid_db $newdb] TRUE - - db_compare $olddb $newdb $db $newdbname - error_check_good new_db_close($db) [$newdb close] 0 - } - - error_check_good orig_db_close($db) [$olddb close] 0 - eval berkdb dbremove $dbarg $newdbname -} - -# Generate randomly ordered, guaranteed-unique four-character strings that can -# be used to differentiate duplicates without creating duplicate duplicates. -# (test031 & test032) randstring_init is required before the first call to -# randstring and initializes things for up to $i distinct strings; randstring -# gets the next string. -proc randstring_init { i } { - global rs_int_list alphabet - - # Fail if we can't generate sufficient unique strings. - if { $i > [expr 26 * 26 * 26 * 26] } { - set errstring\ - "Duplicate set too large for random string generator" - puts "FAIL:[timestamp] $errstring" - return -code return $errstring - } - - set rs_int_list {} - - # generate alphabet array - for { set j 0 } { $j < 26 } { incr j } { - set a($j) [string index $alphabet $j] - } - - # Generate a list with $i elements, { aaaa, aaab, ... aaaz, aaba ...} - for { set d1 0 ; set j 0 } { $d1 < 26 && $j < $i } { incr d1 } { - for { set d2 0 } { $d2 < 26 && $j < $i } { incr d2 } { - for { set d3 0 } { $d3 < 26 && $j < $i } { incr d3 } { - for { set d4 0 } { $d4 < 26 && $j < $i } \ - { incr d4 } { - lappend rs_int_list \ - $a($d1)$a($d2)$a($d3)$a($d4) - incr j - } - } - } - } - - # Randomize the list. - set rs_int_list [randomize_list $rs_int_list] -} - -# Randomize a list. Returns a randomly-reordered copy of l. -proc randomize_list { l } { - set i [llength $l] - - for { set j 0 } { $j < $i } { incr j } { - # Pick a random element from $j to the end - set k [berkdb random_int $j [expr $i - 1]] - - # Swap it with element $j - set t1 [lindex $l $j] - set t2 [lindex $l $k] - - set l [lreplace $l $j $j $t2] - set l [lreplace $l $k $k $t1] - } - - return $l -} - -proc randstring {} { - global rs_int_list - - if { [info exists rs_int_list] == 0 || [llength $rs_int_list] == 0 } { - set errstring "randstring uninitialized or used too often" - puts "FAIL:[timestamp] $errstring" - return -code return $errstring - } - - set item [lindex $rs_int_list 0] - set rs_int_list [lreplace $rs_int_list 0 0] - - return $item -} - -# Takes a variable-length arg list, and returns a list containing the list of -# the non-hyphenated-flag arguments, followed by a list of each alphanumeric -# flag it finds. -proc extractflags { args } { - set inflags 1 - set flags {} - while { $inflags == 1 } { - set curarg [lindex $args 0] - if { [string first "-" $curarg] == 0 } { - set i 1 - while {[string length [set f \ - [string index $curarg $i]]] > 0 } { - incr i - if { [string compare $f "-"] == 0 } { - set inflags 0 - break - } else { - lappend flags $f - } - } - set args [lrange $args 1 end] - } else { - set inflags 0 - } - } - return [list $args $flags] -} - -# Wrapper for berkdb open, used throughout the test suite so that we can -# set an errfile/errpfx as appropriate. -proc berkdb_open { args } { - global is_envmethod - - if { [info exists is_envmethod] == 0 } { - set is_envmethod 0 - } - - set errargs {} - if { $is_envmethod == 0 } { - append errargs " -errfile /dev/stderr " - append errargs " -errpfx \\F\\A\\I\\L" - } - - eval {berkdb open} $errargs $args -} - -# Version without errpfx/errfile, used when we're expecting a failure. -proc berkdb_open_noerr { args } { - eval {berkdb open} $args -} - -# Wrapper for berkdb env, used throughout the test suite so that we can -# set an errfile/errpfx as appropriate. -proc berkdb_env { args } { - global is_envmethod - - if { [info exists is_envmethod] == 0 } { - set is_envmethod 0 - } - - set errargs {} - if { $is_envmethod == 0 } { - append errargs " -errfile /dev/stderr " - append errargs " -errpfx \\F\\A\\I\\L" - } - - eval {berkdb env} $errargs $args -} - -# Version without errpfx/errfile, used when we're expecting a failure. -proc berkdb_env_noerr { args } { - eval {berkdb env} $args -} - -proc check_handles { {outf stdout} } { - global ohandles - - set handles [berkdb handles] - if {[llength $handles] != [llength $ohandles]} { - puts $outf "WARNING: Open handles during cleanup: $handles" - } - set ohandles $handles -} - -proc open_handles { } { - return [llength [berkdb handles]] -} - -proc move_file_extent { dir dbfile tag op } { - set curfiles [get_extfiles $dir $dbfile ""] - set tagfiles [get_extfiles $dir $dbfile $tag] - # - # We want to copy or rename only those that have been saved, - # so delete all the current extent files so that we don't - # end up with extra ones we didn't restore from our saved ones. - foreach extfile $curfiles { - file delete -force $extfile - } - foreach extfile $tagfiles { - set i [string last "." $extfile] - incr i - set extnum [string range $extfile $i end] - set dbq [make_ext_filename $dir $dbfile $extnum] - # - # We can either copy or rename - # - file $op -force $extfile $dbq - } -} - -proc copy_extent_file { dir dbfile tag { op copy } } { - set files [get_extfiles $dir $dbfile ""] - foreach extfile $files { - set i [string last "." $extfile] - incr i - set extnum [string range $extfile $i end] - file $op -force $extfile $dir/__dbq.$dbfile.$tag.$extnum - } -} - -proc get_extfiles { dir dbfile tag } { - if { $tag == "" } { - set filepat $dir/__dbq.$dbfile.\[0-9\]* - } else { - set filepat $dir/__dbq.$dbfile.$tag.\[0-9\]* - } - return [glob -nocomplain -- $filepat] -} - -proc make_ext_filename { dir dbfile extnum } { - return $dir/__dbq.$dbfile.$extnum -} - -# All pids for Windows 9X are negative values. When we want to have -# unsigned int values, unique to the process, we'll take the absolute -# value of the pid. This avoids unsigned/signed mistakes, yet -# guarantees uniqueness, since each system has pids that are all -# either positive or negative. -# -proc sanitized_pid { } { - set mypid [pid] - if { $mypid < 0 } { - set mypid [expr - $mypid] - } - puts "PID: [pid] $mypid\n" - return $mypid -} - -# -# Extract the page size field from a stat record. Return -1 if -# none is found. -# -proc get_pagesize { stat } { - foreach field $stat { - set title [lindex $field 0] - if {[string compare $title "Page size"] == 0} { - return [lindex $field 1] - } - } - return -1 -} - -# Get a globbed list of source files and executables to use as large -# data items in overflow page tests. -proc get_file_list { {small 0} } { - global is_windows_test - global is_qnx_test - global is_je_test - global src_root - - # Skip libraries if we have a debug build. - if { $is_qnx_test || $is_je_test || [is_debug] == 1 } { - set small 1 - } - - if { $small && $is_windows_test } { - set templist [glob $src_root/*/*.c */env*.obj] - } elseif { $small } { - set templist [glob $src_root/*/*.c ./env*.o] - } elseif { $is_windows_test } { - set templist \ - [glob $src_root/*/*.c */*.obj */libdb??.dll */libdb??d.dll] - } else { - set templist [glob $src_root/*/*.c ./*.o ./.libs/libdb-?.?.s?] - } - - # We don't want a huge number of files, but we do want a nice - # variety. If there are more than 200 files, pick out a list - # by taking every other, or every third, or every nth file. - set filelist {} - set nfiles 200 - if { [llength $templist] > $nfiles } { - set skip \ - [expr [llength $templist] / [expr [expr $nfiles / 3] * 2]] - set i $skip - while { $i < [llength $templist] } { - lappend filelist [lindex $templist $i] - incr i $skip - } - } else { - set filelist $templist - } - return $filelist -} - -proc is_cdbenv { env } { - set sys [$env attributes] - if { [lsearch $sys -cdb] != -1 } { - return 1 - } else { - return 0 - } -} - -proc is_lockenv { env } { - set sys [$env attributes] - if { [lsearch $sys -lock] != -1 } { - return 1 - } else { - return 0 - } -} - -proc is_logenv { env } { - set sys [$env attributes] - if { [lsearch $sys -log] != -1 } { - return 1 - } else { - return 0 - } -} - -proc is_mpoolenv { env } { - set sys [$env attributes] - if { [lsearch $sys -mpool] != -1 } { - return 1 - } else { - return 0 - } -} - -proc is_repenv { env } { - set sys [$env attributes] - if { [lsearch $sys -rep] != -1 } { - return 1 - } else { - return 0 - } -} - -proc is_rpcenv { env } { - set sys [$env attributes] - if { [lsearch $sys -rpc] != -1 } { - return 1 - } else { - return 0 - } -} - -proc is_secenv { env } { - set sys [$env attributes] - if { [lsearch $sys -crypto] != -1 } { - return 1 - } else { - return 0 - } -} - -proc is_txnenv { env } { - set sys [$env attributes] - if { [lsearch $sys -txn] != -1 } { - return 1 - } else { - return 0 - } -} - -proc get_home { env } { - set sys [$env attributes] - set h [lsearch $sys -home] - if { $h == -1 } { - return NULL - } - incr h - return [lindex $sys $h] -} - -proc reduce_dups { nent ndp } { - upvar $nent nentries - upvar $ndp ndups - - # If we are using a txnenv, assume it is using - # the default maximum number of locks, cut back - # so that we don't run out of locks. Reduce - # by 25% until we fit. - # - while { [expr $nentries * $ndups] > 5000 } { - set nentries [expr ($nentries / 4) * 3] - set ndups [expr ($ndups / 4) * 3] - } -} - -proc getstats { statlist field } { - foreach pair $statlist { - set txt [lindex $pair 0] - if { [string equal $txt $field] == 1 } { - return [lindex $pair 1] - } - } - return -1 -} - -# Return the value for a particular field in a set of statistics. -# Works for regular db stat as well as env stats (log_stat, -# lock_stat, txn_stat, rep_stat, etc.). -proc stat_field { handle which_stat field } { - set stat [$handle $which_stat] - return [getstats $stat $field ] -} - -proc big_endian { } { - global tcl_platform - set e $tcl_platform(byteOrder) - if { [string compare $e littleEndian] == 0 } { - return 0 - } elseif { [string compare $e bigEndian] == 0 } { - return 1 - } else { - error "FAIL: Unknown endianness $e" - } -} - -# Search logs to find if we have debug records. -proc log_has_debug_records { dir } { - source ./include.tcl - global encrypt - - set tmpfile $dir/printlog.out - set stat [catch \ - {exec $util_path/db_printlog -h $dir > $tmpfile} ret] - error_check_good db_printlog $stat 0 - - set f [open $tmpfile r] - while { [gets $f record] >= 0 } { - set r [regexp {\[[^\]]*\]\[[^\]]*\]([^\:]*)\:} $record whl name] - if { $r == 1 && [string match *_debug $name] != 1 } { - close $f - fileremove $tmpfile - return 1 - } - } - close $f - fileremove $tmpfile - return 0 -} - -# Set up a temporary database to check if this is a debug build. -proc is_debug { } { - source ./include.tcl - - set tempdir $testdir/temp - file mkdir $tempdir - set env [berkdb_env -create -log -home $testdir/temp] - error_check_good temp_env_open [is_valid_env $env] TRUE - - set file temp.db - set db [berkdb_open -create -env $env -btree $file] - error_check_good temp_db_open [is_valid_db $db] TRUE - - set key KEY - set data DATA - error_check_good temp_db_put [$db put $key $data] 0 - set ret [$db get $key] - error_check_good get_key [lindex [lindex $ret 0] 0] $key - error_check_good get_data [lindex [lindex $ret 0] 1] $data - error_check_good temp_db_close [$db close] 0 - error_check_good temp_db_remove [$env dbremove $file] 0 - error_check_good temp_env_close [$env close] 0 - - if { [log_has_debug_records $tempdir] == 1 } { - return 1 - } - return 0 -} - -proc adjust_logargs { logtype } { - if { $logtype == "in-memory" } { - set lbuf [expr 8 * [expr 1024 * 1024]] - set logargs " -log_inmemory -log_buffer $lbuf " - } elseif { $logtype == "on-disk" } { - set logargs "" - } else { - puts "FAIL: unrecognized log type $logtype" - } - return $logargs -} - -proc adjust_txnargs { logtype } { - if { $logtype == "in-memory" } { - set txnargs " -txn " - } elseif { $logtype == "on-disk" } { - set txnargs " -txn nosync " - } else { - puts "FAIL: unrecognized log type $logtype" - } - return $txnargs -} - diff --git a/storage/bdb/test/txn001.tcl b/storage/bdb/test/txn001.tcl deleted file mode 100644 index 583b7f21240..00000000000 --- a/storage/bdb/test/txn001.tcl +++ /dev/null @@ -1,116 +0,0 @@ -# See the file LICENSE for redistribution information. -# -# Copyright (c) 1996-2004 -# Sleepycat Software. All rights reserved. -# -# $Id: txn001.tcl,v 11.38 2004/01/28 03:36:32 bostic Exp $ -# - -# TEST txn001 -# TEST Begin, commit, abort testing. -proc txn001 { {tnum "001"} { max 1024 } { ntxns 50 } } { - source ./include.tcl - global txn_curid - global txn_maxid - - puts -nonewline "Txn$tnum: Basic begin, commit, abort" - - if { $tnum != "001"} { - puts " (with ID wrap)" - } else { - puts "" - } - - # Open environment - env_cleanup $testdir - - set env [eval {berkdb_env -create -mode 0644 -txn \ - -txn_max $max -home $testdir}] - error_check_good evn_open [is_valid_env $env] TRUE - error_check_good txn_id_set \ - [ $env txn_id_set $txn_curid $txn_maxid ] 0 - txn001_suba $ntxns $env $tnum - txn001_subb $ntxns $env $tnum - txn001_subc $ntxns $env $tnum - # Close and unlink the file - error_check_good env_close:$env [$env close] 0 -} - -proc txn001_suba { ntxns env tnum } { - source ./include.tcl - - # We will create a bunch of transactions and commit them. - set txn_list {} - set tid_list {} - puts "\tTxn$tnum.a: Beginning/Committing $ntxns Transactions in $env" - for { set i 0 } { $i < $ntxns } { incr i } { - set txn [$env txn] - error_check_good txn_begin [is_valid_txn $txn $env] TRUE - - lappend txn_list $txn - - set tid [$txn id] - error_check_good tid_check [lsearch $tid_list $tid] -1 - - lappend tid_list $tid - } - - # Now commit them all - foreach t $txn_list { - error_check_good txn_commit:$t [$t commit] 0 - } -} - -proc txn001_subb { ntxns env tnum } { - # We will create a bunch of transactions and abort them. - set txn_list {} - set tid_list {} - puts "\tTxn$tnum.b: Beginning/Aborting Transactions" - for { set i 0 } { $i < $ntxns } { incr i } { - set txn [$env txn] - error_check_good txn_begin [is_valid_txn $txn $env] TRUE - - lappend txn_list $txn - - set tid [$txn id] - error_check_good tid_check [lsearch $tid_list $tid] -1 - - lappend tid_list $tid - } - - # Now abort them all - foreach t $txn_list { - error_check_good txn_abort:$t [$t abort] 0 - } -} - -proc txn001_subc { ntxns env tnum } { - # We will create a bunch of transactions and commit them. - set txn_list {} - set tid_list {} - puts "\tTxn$tnum.c: Beginning/Prepare/Committing Transactions" - for { set i 0 } { $i < $ntxns } { incr i } { - set txn [$env txn] - error_check_good txn_begin [is_valid_txn $txn $env] TRUE - - lappend txn_list $txn - - set tid [$txn id] - error_check_good tid_check [lsearch $tid_list $tid] -1 - - lappend tid_list $tid - } - - # Now prepare them all - foreach t $txn_list { - error_check_good txn_prepare:$t \ - [$t prepare [make_gid global:$t]] 0 - } - - # Now commit them all - foreach t $txn_list { - error_check_good txn_commit:$t [$t commit] 0 - } - -} - diff --git a/storage/bdb/test/txn002.tcl b/storage/bdb/test/txn002.tcl deleted file mode 100644 index 1ecbf9df9df..00000000000 --- a/storage/bdb/test/txn002.tcl +++ /dev/null @@ -1,91 +0,0 @@ -# See the file LICENSE for redistribution information. -# -# Copyright (c) 1996-2004 -# Sleepycat Software. All rights reserved. -# -# $Id: txn002.tcl,v 11.41 2004/01/28 03:36:32 bostic Exp $ -# - -# TEST txn002 -# TEST Verify that read-only transactions do not write log records. -proc txn002 { {tnum "002" } { max 1024 } { ntxns 50 } } { - source ./include.tcl - global txn_curid - global txn_maxid - - puts -nonewline "Txn$tnum: Read-only transaction test ($max) ($ntxns)" - - if { $tnum != "002" } { - puts " (with ID wrap)" - } else { - puts "" - } - - env_cleanup $testdir - set env [berkdb \ - env -create -mode 0644 -txn -txn_max $max -home $testdir] - error_check_good dbenv [is_valid_env $env] TRUE - error_check_good txn_id_set \ - [$env txn_id_set $txn_curid $txn_maxid ] 0 - - # Save the current bytes in the log. - set off_start [txn002_logoff $env] - - # We will create a bunch of transactions and commit them. - set txn_list {} - set tid_list {} - puts "\tTxn$tnum.a: Beginning/Committing Transactions" - for { set i 0 } { $i < $ntxns } { incr i } { - set txn [$env txn] - error_check_good txn_begin [is_valid_txn $txn $env] TRUE - - lappend txn_list $txn - - set tid [$txn id] - error_check_good tid_check [lsearch $tid_list $tid] -1 - - lappend tid_list $tid - } - foreach t $txn_list { - error_check_good txn_commit:$t [$t commit] 0 - } - - # Make sure we haven't written any new log records except - # potentially some recycle records if we were wrapping txnids. - set off_stop [txn002_logoff $env] - if { $off_stop != $off_start } { - txn002_recycle_only $testdir - } - - error_check_good env_close [$env close] 0 -} - -proc txn002_logoff { env } { - set stat [$env log_stat] - foreach i $stat { - foreach {txt val} $i {break} - if { [string compare \ - $txt {Current log file offset}] == 0 } { - return $val - } - } -} - -# Make sure that the only log records found are txn_recycle records -proc txn002_recycle_only { dir } { - global util_path - - set tmpfile $dir/printlog.out - set stat [catch {exec $util_path/db_printlog -h $dir > $tmpfile} ret] - error_check_good db_printlog $stat 0 - - set f [open $tmpfile r] - while { [gets $f record] >= 0 } { - set r [regexp {\[[^\]]*\]\[[^\]]*\]([^\:]*)\:} $record whl name] - if { $r == 1 } { - error_check_good record_type __txn_recycle $name - } - } - close $f - fileremove $tmpfile -} diff --git a/storage/bdb/test/txn003.tcl b/storage/bdb/test/txn003.tcl deleted file mode 100644 index e6a6d6d14b6..00000000000 --- a/storage/bdb/test/txn003.tcl +++ /dev/null @@ -1,238 +0,0 @@ -# See the file LICENSE for redistribution information. -# -# Copyright (c) 1996-2004 -# Sleepycat Software. All rights reserved. -# -# $Id: txn003.tcl,v 11.43 2004/01/28 03:36:33 bostic Exp $ -# - -# TEST txn003 -# TEST Test abort/commit/prepare of txns with outstanding child txns. -proc txn003 { {tnum "003"} } { - source ./include.tcl - global txn_curid - global txn_maxid - - puts -nonewline "Txn$tnum: Outstanding child transaction test" - - if { $tnum != "003" } { - puts " (with ID wrap)" - } else { - puts "" - } - env_cleanup $testdir - set testfile txn003.db - - set env_cmd "berkdb_env_noerr -create -txn -home $testdir" - set env [eval $env_cmd] - error_check_good dbenv [is_valid_env $env] TRUE - error_check_good txn_id_set \ - [$env txn_id_set $txn_curid $txn_maxid] 0 - - set oflags {-auto_commit -create -btree -mode 0644 -env $env $testfile} - set db [eval {berkdb_open} $oflags] - error_check_good db_open [is_valid_db $db] TRUE - - # - # Put some data so that we can check commit or abort of child - # - set key 1 - set origdata some_data - set newdata this_is_new_data - set newdata2 some_other_new_data - - error_check_good db_put [$db put -auto_commit $key $origdata] 0 - error_check_good dbclose [$db close] 0 - - set db [eval {berkdb_open} $oflags] - error_check_good db_open [is_valid_db $db] TRUE - - txn003_check $db $key "Origdata" $origdata - - puts "\tTxn$tnum.a: Parent abort" - set parent [$env txn] - error_check_good txn_begin [is_valid_txn $parent $env] TRUE - set child [$env txn -parent $parent] - error_check_good txn_begin [is_valid_txn $child $env] TRUE - error_check_good db_put [$db put -txn $child $key $newdata] 0 - error_check_good parent_abort [$parent abort] 0 - txn003_check $db $key "parent_abort" $origdata - # Check child handle is invalid - set stat [catch {$child abort} ret] - error_check_good child_handle $stat 1 - error_check_good child_h2 [is_substr $ret "invalid command name"] 1 - - puts "\tTxn$tnum.b: Parent commit" - set parent [$env txn] - error_check_good txn_begin [is_valid_txn $parent $env] TRUE - set child [$env txn -parent $parent] - error_check_good txn_begin [is_valid_txn $child $env] TRUE - error_check_good db_put [$db put -txn $child $key $newdata] 0 - error_check_good parent_commit [$parent commit] 0 - txn003_check $db $key "parent_commit" $newdata - # Check child handle is invalid - set stat [catch {$child abort} ret] - error_check_good child_handle $stat 1 - error_check_good child_h2 [is_substr $ret "invalid command name"] 1 - error_check_good dbclose [$db close] 0 - error_check_good env_close [$env close] 0 - - # - # Since the data check assumes what has come before, the 'commit' - # operation must be last. - # - set hdr "\tTxn$tnum" - set rlist { - {begin ".c"} - {prepare ".d"} - {abort ".e"} - {commit ".f"} - } - set count 0 - foreach pair $rlist { - incr count - set op [lindex $pair 0] - set msg [lindex $pair 1] - set msg $hdr$msg - txn003_body $env_cmd $testfile $testdir $key $newdata2 $msg $op - set env [eval $env_cmd] - error_check_good dbenv [is_valid_env $env] TRUE - - berkdb debug_check - set db [eval {berkdb_open} $oflags] - error_check_good db_open [is_valid_db $db] TRUE - # - # For prepare we'll then just - # end up aborting after we test what we need to. - # So set gooddata to the same as abort. - switch $op { - abort { - set gooddata $newdata - } - begin { - set gooddata $newdata - } - commit { - set gooddata $newdata2 - } - prepare { - set gooddata $newdata - } - } - txn003_check $db $key "parent_$op" $gooddata - error_check_good dbclose [$db close] 0 - error_check_good env_close [$env close] 0 - } - - # We can't do the attempted child discard on Windows - # because it will leave open files that can't be removed. - # Skip the remainder of the test for Windows. - if { $is_windows_test == 1 } { - puts "Skipping remainder of test for Windows" - return - } - puts "\tTxn$tnum.g: Attempt child prepare" - set env [eval $env_cmd] - error_check_good dbenv [is_valid_env $env] TRUE - berkdb debug_check - set db [eval {berkdb_open_noerr} $oflags] - error_check_good db_open [is_valid_db $db] TRUE - - set parent [$env txn] - error_check_good txn_begin [is_valid_txn $parent $env] TRUE - set child [$env txn -parent $parent] - error_check_good txn_begin [is_valid_txn $child $env] TRUE - error_check_good db_put [$db put -txn $child $key $newdata] 0 - set gid [make_gid child_prepare:$child] - set stat [catch {$child prepare $gid} ret] - error_check_good child_prepare $stat 1 - error_check_good child_prep_err [is_substr $ret "txn prepare"] 1 - - puts "\tTxn$tnum.h: Attempt child discard" - set stat [catch {$child discard} ret] - error_check_good child_discard $stat 1 - - # We just panic'd the region, so the next operations will fail. - # No matter, we still have to clean up all the handles. - - set stat [catch {$parent commit} ret] - error_check_good parent_commit $stat 1 - error_check_good parent_commit:fail [is_substr $ret "DB_RUNRECOVERY"] 1 - - set stat [catch {$db close} ret] - error_check_good db_close $stat 1 - error_check_good db_close:fail [is_substr $ret "DB_RUNRECOVERY"] 1 - - set stat [catch {$env close} ret] - error_check_good env_close $stat 1 - error_check_good env_close:fail [is_substr $ret "DB_RUNRECOVERY"] 1 -} - -proc txn003_body { env_cmd testfile dir key newdata2 msg op } { - source ./include.tcl - - berkdb debug_check - sentinel_init - set gidf $dir/gidfile - fileremove -f $gidf - set pidlist {} - puts "$msg.0: Executing child script to prepare txns" - berkdb debug_check - set p [exec $tclsh_path $test_path/wrap.tcl txnscript.tcl \ - $testdir/txnout $env_cmd $testfile $gidf $key $newdata2 &] - lappend pidlist $p - watch_procs $pidlist 5 - set f1 [open $testdir/txnout r] - set r [read $f1] - puts $r - close $f1 - fileremove -f $testdir/txnout - - berkdb debug_check - puts -nonewline "$msg.1: Running recovery ... " - flush stdout - berkdb debug_check - set env [eval $env_cmd "-recover"] - error_check_good dbenv-recover [is_valid_env $env] TRUE - puts "complete" - - puts "$msg.2: getting txns from txn_recover" - set txnlist [$env txn_recover] - error_check_good txnlist_len [llength $txnlist] 1 - set tpair [lindex $txnlist 0] - - set gfd [open $gidf r] - set ret [gets $gfd parentgid] - close $gfd - set txn [lindex $tpair 0] - set gid [lindex $tpair 1] - if { $op == "begin" } { - puts "$msg.2: $op new txn" - } else { - puts "$msg.2: $op parent" - } - error_check_good gidcompare $gid $parentgid - if { $op == "prepare" } { - set gid [make_gid prepare_recover:$txn] - set stat [catch {$txn $op $gid} ret] - error_check_good prep_error $stat 1 - error_check_good prep_err \ - [is_substr $ret "transaction already prepared"] 1 - error_check_good txn:prep_abort [$txn abort] 0 - } elseif { $op == "begin" } { - set stat [catch {$env txn} ret] - error_check_good begin_error $stat 1 - error_check_good begin_err \ - [is_substr $ret "not yet committed transactions is incomplete"] 1 - error_check_good txn:prep_abort [$txn abort] 0 - } else { - error_check_good txn:$op [$txn $op] 0 - } - error_check_good envclose [$env close] 0 -} - -proc txn003_check { db key msg gooddata } { - set kd [$db get $key] - set data [lindex [lindex $kd 0] 1] - error_check_good $msg $data $gooddata -} diff --git a/storage/bdb/test/txn004.tcl b/storage/bdb/test/txn004.tcl deleted file mode 100644 index c7accddd9b5..00000000000 --- a/storage/bdb/test/txn004.tcl +++ /dev/null @@ -1,62 +0,0 @@ -# See the file LICENSE for redistribution information. -# -# Copyright (c) 1996-2004 -# Sleepycat Software. All rights reserved. -# -# $Id: txn004.tcl,v 11.42 2004/01/28 03:36:33 bostic Exp $ -# - -# TEST txn004 -# TEST Test of wraparound txnids (txn001) -proc txn004 { } { - source ./include.tcl - global txn_curid - global txn_maxid - - set orig_curid $txn_curid - set orig_maxid $txn_maxid - puts "\tTxn004.1: wraparound txnids" - set txn_curid [expr $txn_maxid - 2] - txn001 "004.1" - puts "\tTxn004.2: closer wraparound txnids" - set txn_curid [expr $txn_maxid - 3] - set txn_maxid [expr $txn_maxid - 2] - txn001 "004.2" - - puts "\tTxn004.3: test wraparound txnids" - txn_idwrap_check $testdir - set txn_curid $orig_curid - set txn_maxid $orig_maxid - return -} - -proc txn_idwrap_check { testdir } { - global txn_curid - global txn_maxid - - env_cleanup $testdir - - # Open/create the txn region - set e [berkdb_env -create -txn -home $testdir] - error_check_good env_open [is_substr $e env] 1 - - set txn1 [$e txn] - error_check_good txn1 [is_valid_txn $txn1 $e] TRUE - error_check_good txn_id_set \ - [$e txn_id_set [expr $txn_maxid - 1] $txn_maxid] 0 - - set txn2 [$e txn] - error_check_good txn2 [is_valid_txn $txn2 $e] TRUE - - # txn3 will require a wraparound txnid - # XXX How can we test it has a wrapped id? - set txn3 [$e txn] - error_check_good wrap_txn3 [is_valid_txn $txn3 $e] TRUE - - error_check_good free_txn1 [$txn1 commit] 0 - error_check_good free_txn2 [$txn2 commit] 0 - error_check_good free_txn3 [$txn3 commit] 0 - - error_check_good close [$e close] 0 -} - diff --git a/storage/bdb/test/txn005.tcl b/storage/bdb/test/txn005.tcl deleted file mode 100644 index e22581cd129..00000000000 --- a/storage/bdb/test/txn005.tcl +++ /dev/null @@ -1,75 +0,0 @@ -# See the file LICENSE for redistribution information. -# -# Copyright (c) 1996-2004 -# Sleepycat Software. All rights reserved. -# -# $Id: txn005.tcl,v 11.37 2004/01/28 03:36:33 bostic Exp $ -# - -# TEST txn005 -# TEST Test transaction ID wraparound and recovery. -proc txn005 {} { - source ./include.tcl - global txn_curid - global txn_maxid - - env_cleanup $testdir - puts "Txn005: Test transaction wraparound recovery" - - # Open/create the txn region - puts "\tTxn005.a: Create environment" - set e [berkdb_env -create -txn -home $testdir] - error_check_good env_open [is_valid_env $e] TRUE - - set txn1 [$e txn] - error_check_good txn1 [is_valid_txn $txn1 $e] TRUE - - set db [berkdb_open -env $e -txn $txn1 -create -btree txn005.db] - error_check_good db [is_valid_db $db] TRUE - error_check_good txn1_commit [$txn1 commit] 0 - - puts "\tTxn005.b: Set txn ids" - error_check_good txn_id_set \ - [$e txn_id_set [expr $txn_maxid - 1] $txn_maxid] 0 - - # txn2 and txn3 will require a wraparound txnid - set txn2 [$e txn] - error_check_good txn2 [is_valid_txn $txn2 $e] TRUE - - error_check_good put [$db put -txn $txn2 "a" ""] 0 - error_check_good txn2_commit [$txn2 commit] 0 - - error_check_good get_a [$db get "a"] "{a {}}" - - error_check_good close [$db close] 0 - - set txn3 [$e txn] - error_check_good txn3 [is_valid_txn $txn3 $e] TRUE - - set db [berkdb_open -env $e -txn $txn3 -btree txn005.db] - error_check_good db [is_valid_db $db] TRUE - - error_check_good put2 [$db put -txn $txn3 "b" ""] 0 - error_check_good sync [$db sync] 0 - error_check_good txn3_abort [$txn3 abort] 0 - error_check_good dbclose [$db close] 0 - error_check_good eclose [$e close] 0 - - puts "\tTxn005.c: Run recovery" - set stat [catch {exec $util_path/db_recover -h $testdir -e -c} result] - if { $stat == 1 } { - error "FAIL: Recovery error: $result." - } - - puts "\tTxn005.d: Check data" - set e [berkdb_env -txn -home $testdir] - error_check_good env_open [is_valid_env $e] TRUE - - set db [berkdb_open -env $e -auto_commit -btree txn005.db] - error_check_good db [is_valid_db $db] TRUE - - error_check_good get_a [$db get "a"] "{a {}}" - error_check_bad get_b [$db get "b"] "{b {}}" - error_check_good dbclose [$db close] 0 - error_check_good eclose [$e close] 0 -} diff --git a/storage/bdb/test/txn006.tcl b/storage/bdb/test/txn006.tcl deleted file mode 100644 index 14ada718e58..00000000000 --- a/storage/bdb/test/txn006.tcl +++ /dev/null @@ -1,47 +0,0 @@ -# See the file LICENSE for redistribution information. -# -# Copyright (c) 1996-2004 -# Sleepycat Software. All rights reserved. -# -# $Id: txn006.tcl,v 1.7 2004/01/28 03:36:33 bostic Exp $ -# -# -#TEST txn006 -#TEST Test dump/load in transactional environment. -proc txn006 { { iter 50 } } { - source ./include.tcl - set testfile txn006.db - - puts "Txn006: Test dump/load in transaction environment" - env_cleanup $testdir - - puts "\tTxn006.a: Create environment and database" - # Open/create the txn region - set e [berkdb_env -create -home $testdir -txn] - error_check_good env_open [is_valid_env $e] TRUE - - # Open/create database - set db [berkdb_open -auto_commit -env $e \ - -create -btree -dup $testfile] - error_check_good db_open [is_valid_db $db] TRUE - - # Start a transaction - set txn [$e txn] - error_check_good txn [is_valid_txn $txn $e] TRUE - - puts "\tTxn006.b: Put data" - # Put some data - for { set i 1 } { $i < $iter } { incr i } { - error_check_good put [$db put -txn $txn key$i data$i] 0 - } - - # End transaction, close db - error_check_good txn_commit [$txn commit] 0 - error_check_good db_close [$db close] 0 - error_check_good env_close [$e close] 0 - - puts "\tTxn006.c: dump/load" - # Dump and load - exec $util_path/db_dump -p -h $testdir $testfile | \ - $util_path/db_load -h $testdir $testfile -} diff --git a/storage/bdb/test/txn007.tcl b/storage/bdb/test/txn007.tcl deleted file mode 100644 index 2ef382b9714..00000000000 --- a/storage/bdb/test/txn007.tcl +++ /dev/null @@ -1,57 +0,0 @@ -# See the file LICENSE for redistribution information. -# -# Copyright (c) 1996-2004 -# Sleepycat Software. All rights reserved. -# -# $Id: txn007.tcl,v 11.5 2004/01/28 03:36:33 bostic Exp $ -# -#TEST txn007 -#TEST Test of DB_TXN_WRITE_NOSYNC -proc txn007 { { iter 50 } } { - source ./include.tcl - set testfile txn007.db - - puts "Txn007: DB_TXN_WRITE_NOSYNC" - env_cleanup $testdir - - # Open/create the txn region - puts "\tTxn007.a: Create env and database with -wrnosync" - set e [berkdb_env -create -home $testdir -txn -wrnosync] - error_check_good env_open [is_valid_env $e] TRUE - - # Open/create database - set db [berkdb open -auto_commit -env $e \ - -create -btree -dup $testfile] - error_check_good db_open [is_valid_db $db] TRUE - - # Put some data - puts "\tTxn007.b: Put $iter data items in individual transactions" - for { set i 1 } { $i < $iter } { incr i } { - # Start a transaction - set txn [$e txn] - error_check_good txn [is_valid_txn $txn $e] TRUE - $db put -txn $txn key$i data$i - error_check_good txn_commit [$txn commit] 0 - } - set stat [$e log_stat] - puts "\tTxn007.c: Check log stats" - foreach i $stat { - set txt [lindex $i 0] - if { [string equal $txt {Times log written}] == 1 } { - set wrval [lindex $i 1] - } - if { [string equal $txt {Times log flushed}] == 1 } { - set syncval [lindex $i 1] - } - } - error_check_good wrval [expr $wrval >= $iter] 1 - # - # We should have written at least 'iter' number of times, - # but not synced on any of those. - # - set val [expr $wrval - $iter] - error_check_good syncval [expr $syncval <= $val] 1 - - error_check_good db_close [$db close] 0 - error_check_good env_close [$e close] 0 -} diff --git a/storage/bdb/test/txn008.tcl b/storage/bdb/test/txn008.tcl deleted file mode 100644 index 8c89296f449..00000000000 --- a/storage/bdb/test/txn008.tcl +++ /dev/null @@ -1,32 +0,0 @@ -# See the file LICENSE for redistribution information. -# -# Copyright (c) 1996-2004 -# Sleepycat Software. All rights reserved. -# -# $Id: txn008.tcl,v 11.6 2004/01/28 03:36:33 bostic Exp $ -# - -# TEST txn008 -# TEST Test of wraparound txnids (txn002) -proc txn008 { } { - source ./include.tcl - global txn_curid - global txn_maxid - - set orig_curid $txn_curid - set orig_maxid $txn_maxid - puts "\tTxn008.1: wraparound txnids" - set txn_curid [expr $txn_maxid - 2] - txn002 "008.1" - puts "\tTxn008.2: closer wraparound txnids" - set txn_curid [expr $txn_maxid - 3] - set txn_maxid [expr $txn_maxid - 2] - txn002 "008.2" - - puts "\tTxn008.3: test wraparound txnids" - txn_idwrap_check $testdir - set txn_curid $orig_curid - set txn_maxid $orig_maxid - return -} - diff --git a/storage/bdb/test/txn009.tcl b/storage/bdb/test/txn009.tcl deleted file mode 100644 index b45538d7545..00000000000 --- a/storage/bdb/test/txn009.tcl +++ /dev/null @@ -1,32 +0,0 @@ -# See the file LICENSE for redistribution information. -# -# Copyright (c) 1996-2004 -# Sleepycat Software. All rights reserved. -# -# $Id: txn009.tcl,v 11.6 2004/01/28 03:36:33 bostic Exp $ -# - -# TEST txn009 -# TEST Test of wraparound txnids (txn003) -proc txn009 { } { - source ./include.tcl - global txn_curid - global txn_maxid - - set orig_curid $txn_curid - set orig_maxid $txn_maxid - puts "\tTxn009.1: wraparound txnids" - set txn_curid [expr $txn_maxid - 2] - txn003 "009.1" - puts "\tTxn009.2: closer wraparound txnids" - set txn_curid [expr $txn_maxid - 3] - set txn_maxid [expr $txn_maxid - 2] - txn003 "009.2" - - puts "\tTxn009.3: test wraparound txnids" - txn_idwrap_check $testdir - set txn_curid $orig_curid - set txn_maxid $orig_maxid - return -} - diff --git a/storage/bdb/test/txnscript.tcl b/storage/bdb/test/txnscript.tcl deleted file mode 100644 index 980f6ed5118..00000000000 --- a/storage/bdb/test/txnscript.tcl +++ /dev/null @@ -1,67 +0,0 @@ -# See the file LICENSE for redistribution information. -# -# Copyright (c) 1996-2004 -# Sleepycat Software. All rights reserved. -# -# $Id: txnscript.tcl,v 11.5 2004/01/28 03:36:33 bostic Exp $ -# -# Txn003 script - outstanding child prepare script -# Usage: txnscript envcmd dbcmd gidf key data -# envcmd: command to open env -# dbfile: name of database file -# gidf: name of global id file -# key: key to use -# data: new data to use - -source ./include.tcl -source $test_path/test.tcl -source $test_path/testutils.tcl - -set usage "txnscript envcmd dbfile gidfile key data" - -# Verify usage -if { $argc != 5 } { - puts stderr "FAIL:[timestamp] Usage: $usage" - exit -} - -# Initialize arguments -set envcmd [ lindex $argv 0 ] -set dbfile [ lindex $argv 1 ] -set gidfile [ lindex $argv 2 ] -set key [ lindex $argv 3 ] -set data [ lindex $argv 4 ] - -set dbenv [eval $envcmd] -error_check_good envopen [is_valid_env $dbenv] TRUE - -set usedb 1 -set db [berkdb_open -auto_commit -env $dbenv $dbfile] -error_check_good dbopen [is_valid_db $db] TRUE - -puts "\tTxnscript.a: begin parent and child txn" -set parent [$dbenv txn] -error_check_good parent [is_valid_txn $parent $dbenv] TRUE -set child [$dbenv txn -parent $parent] -error_check_good parent [is_valid_txn $child $dbenv] TRUE - -puts "\tTxnscript.b: Modify data" -error_check_good db_put [$db put -txn $child $key $data] 0 - -set gfd [open $gidfile w+] -set gid [make_gid txnscript:$parent] -puts $gfd $gid -puts "\tTxnscript.c: Prepare parent only" -error_check_good txn_prepare:$parent [$parent prepare $gid] 0 -close $gfd - -puts "\tTxnscript.d: Check child handle" -set stat [catch {$child abort} ret] -error_check_good child_handle $stat 1 -error_check_good child_h2 [is_substr $ret "invalid command name"] 1 - -# -# We do not close the db or env, but exit with the txns outstanding. -# -puts "\tTxnscript completed successfully" -flush stdout diff --git a/storage/bdb/test/update.tcl b/storage/bdb/test/update.tcl deleted file mode 100644 index 85c1a80071f..00000000000 --- a/storage/bdb/test/update.tcl +++ /dev/null @@ -1,93 +0,0 @@ -# See the file LICENSE for redistribution information. -# -# Copyright (c) 1999-2004 -# Sleepycat Software. All rights reserved. -# -# $Id: update.tcl,v 11.13 2004/01/28 03:36:33 bostic Exp $ - -source ./include.tcl -global update_dir -set update_dir "$test_path/update_test" - -proc update { } { - source ./include.tcl - global update_dir - - foreach version [glob $update_dir/*] { - regexp \[^\/\]*$ $version version - foreach method [glob $update_dir/$version/*] { - regexp \[^\/\]*$ $method method - foreach file [glob $update_dir/$version/$method/*] { - regexp (\[^\/\]*)\.tar\.gz$ $file dummy name - foreach endianness {"le" "be"} { - puts "Update:\ - $version $method $name $endianness" - set ret [catch {_update $update_dir $testdir $version $method $name $endianness 1 1} message] - if { $ret != 0 } { - puts $message - } - } - } - } - } -} - -proc _update { source_dir temp_dir \ - version method file endianness do_db_load_test do_update_test } { - source include.tcl - global errorInfo - - cleanup $temp_dir NULL - - exec sh -c \ -"gzcat $source_dir/$version/$method/$file.tar.gz | (cd $temp_dir && tar xf -)" - - if { $do_db_load_test } { - set ret [catch \ - {exec $util_path/db_load -f "$temp_dir/$file.dump" \ - "$temp_dir/update.db"} message] - error_check_good \ - "Update load: $version $method $file $message" $ret 0 - - set ret [catch \ - {exec $util_path/db_dump -f "$temp_dir/update.dump" \ - "$temp_dir/update.db"} message] - error_check_good \ - "Update dump: $version $method $file $message" $ret 0 - - error_check_good "Update diff.1.1: $version $method $file" \ - [filecmp "$temp_dir/$file.dump" "$temp_dir/update.dump"] 0 - error_check_good \ - "Update diff.1.2: $version $method $file" $ret "" - } - - if { $do_update_test } { - set ret [catch \ - {berkdb open -update "$temp_dir/$file-$endianness.db"} db] - if { $ret == 1 } { - if { ![is_substr $errorInfo "version upgrade"] } { - set fnl [string first "\n" $errorInfo] - set theError \ - [string range $errorInfo 0 [expr $fnl - 1]] - error $theError - } - } else { - error_check_good dbopen [is_valid_db $db] TRUE - error_check_good dbclose [$db close] 0 - - set ret [catch \ - {exec $util_path/db_dump -f \ - "$temp_dir/update.dump" \ - "$temp_dir/$file-$endianness.db"} message] - error_check_good "Update\ - dump: $version $method $file $message" $ret 0 - - error_check_good \ - "Update diff.2: $version $method $file" \ - [filecmp "$temp_dir/$file.dump" \ - "$temp_dir/update.dump"] 0 - error_check_good \ - "Update diff.2: $version $method $file" $ret "" - } - } -} diff --git a/storage/bdb/test/upgrade.tcl b/storage/bdb/test/upgrade.tcl deleted file mode 100644 index 0043c353afc..00000000000 --- a/storage/bdb/test/upgrade.tcl +++ /dev/null @@ -1,745 +0,0 @@ -# See the file LICENSE for redistribution information. -# -# Copyright (c) 1999-2004 -# Sleepycat Software. All rights reserved. -# -# $Id: upgrade.tcl,v 11.37 2004/10/27 20:29:29 carol Exp $ - -source ./include.tcl - -global upgrade_dir -# set upgrade_dir "$test_path/upgrade_test" -set upgrade_dir "$test_path/upgrade/databases" - -global gen_upgrade -set gen_upgrade 0 -global gen_chksum -set gen_chksum 0 -global gen_upgrade_log -set gen_upgrade_log 0 - -global upgrade_dir -global upgrade_be -global upgrade_method -global upgrade_name - -proc upgrade { { archived_test_loc "DEFAULT" } } { - source ./include.tcl - global upgrade_dir - global tcl_platform - global saved_logvers - - set saved_upgrade_dir $upgrade_dir - - # Identify endianness of the machine running upgrade. - if { [big_endian] == 1 } { - set myendianness be - } else { - set myendianness le - } - set e $tcl_platform(byteOrder) - - if { [file exists $archived_test_loc/logversion] == 1 } { - set fd [open $archived_test_loc/logversion r] - set saved_logvers [read $fd] - close $fd - } else { - puts "Old log version number must be available \ - in $archived_test_loc/logversion" - return - } - - fileremove -f UPGRADE.OUT - set o [open UPGRADE.OUT a] - - puts -nonewline $o "Upgrade test started at: " - puts $o [clock format [clock seconds] -format "%H:%M %D"] - puts $o [berkdb version -string] - puts $o "Testing $e files" - - puts -nonewline "Upgrade test started at: " - puts [clock format [clock seconds] -format "%H:%M %D"] - puts [berkdb version -string] - puts "Testing $e files" - - if { $archived_test_loc == "DEFAULT" } { - puts $o "Using default archived databases in $upgrade_dir." - puts "Using default archived databases in $upgrade_dir." - } else { - set upgrade_dir $archived_test_loc - puts $o "Using archived databases in $upgrade_dir." - puts "Using archived databases in $upgrade_dir." - } - close $o - - foreach version [glob $upgrade_dir/*] { - if { [string first CVS $version] != -1 } { continue } - regexp \[^\/\]*$ $version version - - # Test only files where the endianness of the db matches - # the endianness of the test platform. These are the - # meaningful tests: - # 1. File generated on le, tested on le - # 2. File generated on be, tested on be - # 3. Byte-swapped file generated on le, tested on be - # 4. Byte-swapped file generated on be, tested on le - # - set dbendianness [string range $version end-1 end] - if { [string compare $myendianness $dbendianness] != 0 } { - puts "Skipping test of $version \ - on $myendianness platform." - } else { - set release [string trim $version -lbe] - set o [open UPGRADE.OUT a] - puts $o "Files created on release $release" - close $o - puts "Files created on release $release" - - foreach method [glob $upgrade_dir/$version/*] { - regexp \[^\/\]*$ $method method - set o [open UPGRADE.OUT a] - puts $o "\nTesting $method files" - close $o - puts "\tTesting $method files" - - foreach file [lsort -dictionary \ - [glob -nocomplain \ - $upgrade_dir/$version/$method/*]] { - regexp (\[^\/\]*)\.tar\.gz$ \ - $file dummy name - - cleanup $testdir NULL 1 - set curdir [pwd] - cd $testdir - set tarfd [open "|tar xf -" w] - cd $curdir - - catch {exec gunzip -c \ - "$upgrade_dir/$version/$method/$name.tar.gz" \ - >@$tarfd} - close $tarfd - - set f [open $testdir/$name.tcldump \ - {RDWR CREAT}] - close $f - - # We exec a separate tclsh for each - # separate subtest to keep the - # testing process from consuming a - # tremendous amount of memory. - # - # First we test the .db files. - if { [file exists \ - $testdir/$name-$myendianness.db] } { - if { [catch {exec $tclsh_path \ - << "source \ - $test_path/test.tcl;\ - _upgrade_test $testdir \ - $version $method $name \ - $myendianness" >>& \ - UPGRADE.OUT } message] } { - set o [open \ - UPGRADE.OUT a] - puts $o "FAIL: $message" - close $o - } - if { [catch {exec $tclsh_path\ - << "source \ - $test_path/test.tcl;\ - _db_load_test $testdir \ - $version $method $name" >>&\ - UPGRADE.OUT } message] } { - set o [open \ - UPGRADE.OUT a] - puts $o "FAIL: $message" - close $o - } - } - # Then we test log files. - if { [file exists \ - $testdir/$name.prlog] } { - if { [catch {exec $tclsh_path \ - << "source \ - $test_path/test.tcl;\ - global saved_logvers;\ - set saved_logvers \ - $saved_logvers;\ - _log_test $testdir \ - $release $method \ - $name" >>& \ - UPGRADE.OUT } message] } { - set o [open \ - UPGRADE.OUT a] - puts $o "FAIL: $message" - close $o - } - } - } - } - } - } - set upgrade_dir $saved_upgrade_dir - - set o [open UPGRADE.OUT a] - puts -nonewline $o "Completed at: " - puts $o [clock format [clock seconds] -format "%H:%M %D"] - close $o - - puts -nonewline "Completed at: " - puts [clock format [clock seconds] -format "%H:%M %D"] - - # Don't provide a return value. - return -} - -proc _upgrade_test { temp_dir version method file endianness } { - source include.tcl - global errorInfo - global encrypt - - puts "Upgrade: $version $method $file $endianness" - - # Check whether we're working with an encrypted file. - if { [string match c-* $file] } { - set encrypt 1 - } - set ret [berkdb upgrade "$temp_dir/$file-$endianness.db"] - error_check_good dbupgrade $ret 0 - - error_check_good dbupgrade_verify [verify_dir $temp_dir "" 0 0 1] 0 - - upgrade_dump "$temp_dir/$file-$endianness.db" "$temp_dir/temp.dump" - - error_check_good "Upgrade diff.$endianness: $version $method $file" \ - [filecmp "$temp_dir/$file.tcldump" "$temp_dir/temp.dump"] 0 -} - -proc _db_load_test { temp_dir version method file } { - source include.tcl - global errorInfo - - puts "Db_load: $version $method $file" - - set ret [catch \ - {exec $util_path/db_load -f "$temp_dir/$file.dump" \ - "$temp_dir/upgrade.db"} message] - error_check_good \ - "Upgrade load: $version $method $file $message" $ret 0 - - upgrade_dump "$temp_dir/upgrade.db" "$temp_dir/temp.dump" - - error_check_good "Upgrade diff.1.1: $version $method $file" \ - [filecmp "$temp_dir/$file.tcldump" "$temp_dir/temp.dump"] 0 -} - -proc _log_test { temp_dir release method file } { - source ./include.tcl - global saved_logvers - global passwd - puts "Check log file: $temp_dir $release $method $file" - - # Get log version number of current system - set env [berkdb_env -create -log -home $testdir] - error_check_good is_valid_env [is_valid_env $env] TRUE - set current_logvers [get_log_vers $env] - error_check_good env_close [$env close] 0 - error_check_good env_remove [berkdb envremove -home $testdir] 0 - - # Rename recd001-x-log.000000000n to log.000000000n. - set logfiles [glob -nocomplain $temp_dir/*log.0*] - foreach logfile $logfiles { - set logname [string replace $logfile 0 \ - [string last - $logfile]] - file rename -force $logfile $temp_dir/$logname - } - - # Use db_printlog to dump the logs. If the current log file - # version is greater than the saved log file version, the log - # files are expected to be unreadable. If the log file is - # readable, check that the current printlog dump matches the - # archived printlog. - # - set ret [catch {exec $util_path/db_printlog -h $temp_dir \ - > $temp_dir/logs.prlog} message] - if { [is_substr $message "magic number"] } { - # The failure is probably due to encryption, try - # crypto printlog. - set ret [catch {exec $util_path/db_printlog -h $temp_dir \ - -P $passwd > $temp_dir/logs.prlog} message] - if { $ret == 1 } { - # If the failure is because of a historic - # log version, that's okay. - if { $current_logvers <= $saved_logvers } { - puts "db_printlog failed: $message" - } - } - } - - if { $current_logvers > $saved_logvers } { - error_check_good historic_log_version \ - [is_substr $message "historic log version"] 1 - } else { - error_check_good db_printlog:$message $ret 0 - # Compare logs.prlog and $file.prlog (should match) - error_check_good "Compare printlogs" [filecmp \ - "$temp_dir/logs.prlog" "$temp_dir/$file.prlog"] 0 - } -} - -proc gen_upgrade { dir { save_crypto 1 } { save_non_crypto 1 } } { - global gen_upgrade - global gen_upgrade_log - global gen_chksum - global upgrade_dir - global upgrade_be - global upgrade_method - global upgrade_name - global test_names - global parms - global encrypt - global passwd - source ./include.tcl - - set upgrade_dir $dir - env_cleanup $testdir - - fileremove -f GENERATE.OUT - set o [open GENERATE.OUT a] - - puts -nonewline $o "Generating upgrade files. Started at: " - puts $o [clock format [clock seconds] -format "%H:%M %D"] - puts $o [berkdb version -string] - - puts -nonewline "Generating upgrade files. Started at: " - puts [clock format [clock seconds] -format "%H:%M %D"] - puts [berkdb version -string] - - close $o - - # Create a file that contains the log version number. - # If necessary, create the directory to contain the file. - set env [berkdb_env -create -log -home $testdir] - error_check_good is_valid_env [is_valid_env $env] TRUE - - if { [file exists $dir] == 0 } { - file mkdir $dir - } - set lv [open $dir/logversion w] - puts $lv [get_log_vers $env] - close $lv - - error_check_good env_close [$env close] 0 - - # Generate test databases for each access method and endianness. - set gen_upgrade 1 - foreach method \ - "btree rbtree hash recno rrecno frecno queue queueext" { - set o [open GENERATE.OUT a] - puts $o "\nGenerating $method files" - close $o - puts "\tGenerating $method files" - set upgrade_method $method -#set test_names(test) "" - foreach test $test_names(test) { - if { [info exists parms($test)] != 1 } { - continue - } - - set o [open GENERATE.OUT a] - puts $o "\t\tGenerating files for $test" - close $o - puts "\t\tGenerating files for $test" - - if { $save_non_crypto == 1 } { - set encrypt 0 - foreach upgrade_be { 0 1 } { - set upgrade_name $test - if [catch {exec $tclsh_path \ - << "source $test_path/test.tcl;\ - global gen_upgrade upgrade_be;\ - global upgrade_method upgrade_name;\ - global encrypt;\ - set encrypt $encrypt;\ - set gen_upgrade 1;\ - set upgrade_be $upgrade_be;\ - set upgrade_method $upgrade_method;\ - set upgrade_name $upgrade_name;\ - run_method -$method $test" \ - >>& GENERATE.OUT} res] { - puts "FAIL: run_method \ - $test $method" - } - cleanup $testdir NULL 1 - } - # Save checksummed files for only one test. - # Checksumming should work in all or no cases. - set gen_chksum 1 - foreach upgrade_be { 0 1 } { - set upgrade_name $test - if { $test == "test001" } { - if { [catch {exec $tclsh_path \ - << "source $test_path/test.tcl;\ - global gen_upgrade;\ - global upgrade_be;\ - global upgrade_method;\ - global upgrade_name;\ - global encrypt gen_chksum;\ - set encrypt $encrypt;\ - set gen_upgrade 1;\ - set gen_chksum 1;\ - set upgrade_be $upgrade_be;\ - set upgrade_method \ - $upgrade_method;\ - set upgrade_name \ - $upgrade_name;\ - run_method -$method $test \ - 0 1 stdout -chksum" \ - >>& GENERATE.OUT} res] } { - puts "FAIL: run_method \ - $test $method \ - -chksum: $res" - } - cleanup $testdir NULL 1 - } - } - set gen_chksum 0 - } - # Save encrypted db's only of native endianness. - # Encrypted files are not portable across endianness. - if { $save_crypto == 1 } { - set upgrade_be [big_endian] - set encrypt 1 - set upgrade_name $test - if [catch {exec $tclsh_path \ - << "source $test_path/test.tcl;\ - global gen_upgrade upgrade_be;\ - global upgrade_method upgrade_name;\ - global encrypt passwd;\ - set encrypt $encrypt;\ - set passwd $passwd;\ - set gen_upgrade 1;\ - set upgrade_be $upgrade_be;\ - set upgrade_method $upgrade_method;\ - set upgrade_name $upgrade_name;\ - run_secmethod $method $test" \ - >>& GENERATE.OUT} res] { - puts "FAIL: run_secmethod \ - $test $method" - } - cleanup $testdir NULL 1 - } - } - } - set gen_upgrade 0 - # Set upgrade_be to the native value so log files go to the - # right place. - set upgrade_be [big_endian] - - # Generate log files. - set o [open GENERATE.OUT a] - puts $o "\tGenerating log files" - close $o - puts "\tGenerating log files" - - set gen_upgrade_log 1 - # Pass the global variables and their values to the new tclsh. - if { $save_non_crypto == 1 } { - set encrypt 0 - if [catch {exec $tclsh_path << "source $test_path/test.tcl;\ - global gen_upgrade_log upgrade_be upgrade_dir;\ - global encrypt;\ - set encrypt $encrypt;\ - set gen_upgrade_log $gen_upgrade_log; \ - set upgrade_be $upgrade_be;\ - set upgrade_dir $upgrade_dir;\ - run_recds" >>& GENERATE.OUT} res] { - puts "FAIL: run_recds: $res" - } - } - if { $save_crypto == 1 } { - set encrypt 1 - if [catch {exec $tclsh_path << "source $test_path/test.tcl;\ - global gen_upgrade_log upgrade_be upgrade_dir;\ - global encrypt;\ - set encrypt $encrypt;\ - set gen_upgrade_log $gen_upgrade_log; \ - set upgrade_be $upgrade_be;\ - set upgrade_dir $upgrade_dir;\ - run_recds " >>& GENERATE.OUT} res] { - puts "FAIL: run_recds with crypto: $res" - } - } - set gen_upgrade_log 0 - - set o [open GENERATE.OUT a] - puts -nonewline $o "Completed at: " - puts $o [clock format [clock seconds] -format "%H:%M %D"] - puts -nonewline "Completed at: " - puts [clock format [clock seconds] -format "%H:%M %D"] - close $o -} - -proc save_upgrade_files { dir } { - global upgrade_dir - global upgrade_be - global upgrade_method - global upgrade_name - global gen_upgrade - global gen_upgrade_log - global encrypt - global gen_chksum - global passwd - source ./include.tcl - - set vers [berkdb version] - set maj [lindex $vers 0] - set min [lindex $vers 1] - - # Is this machine big or little endian? We want to mark - # the test directories appropriately, since testing - # little-endian databases generated by a big-endian machine, - # and/or vice versa, is interesting. - if { [big_endian] } { - set myendianness be - } else { - set myendianness le - } - - if { $upgrade_be == 1 } { - set version_dir "$myendianness-$maj.${min}be" - set en be - } else { - set version_dir "$myendianness-$maj.${min}le" - set en le - } - - set dest $upgrade_dir/$version_dir/$upgrade_method - exec mkdir -p $dest - - if { $gen_upgrade == 1 } { - # Save db files from test001 - testxxx. - set dbfiles [glob -nocomplain $dir/*.db] - set dumpflag "" - # Encrypted files are identified by the prefix "c-". - if { $encrypt == 1 } { - set upgrade_name c-$upgrade_name - set dumpflag " -P $passwd " - } - # Checksummed files are identified by the prefix "s-". - if { $gen_chksum == 1 } { - set upgrade_name s-$upgrade_name - } - foreach dbfile $dbfiles { - set basename [string range $dbfile \ - [expr [string length $dir] + 1] end-3] - - set newbasename $upgrade_name-$basename - - # db_dump file - if { [catch {eval exec $util_path/db_dump -k $dumpflag \ - $dbfile > $dir/$newbasename.dump} res] } { - puts "FAIL: $res" - } - - # tcl_dump file - upgrade_dump $dbfile $dir/$newbasename.tcldump - - # Rename dbfile and any dbq files. - file rename $dbfile $dir/$newbasename-$en.db - foreach dbq \ - [glob -nocomplain $dir/__dbq.$basename.db.*] { - set s [string length $dir/__dbq.] - set newname [string replace $dbq $s \ - [expr [string length $basename] + $s - 1] \ - $newbasename-$en] - file rename $dbq $newname - } - set cwd [pwd] - cd $dir - catch {eval exec tar -cvf $dest/$newbasename.tar \ - [glob $newbasename* __dbq.$newbasename-$en.db.*]} - catch {exec gzip -9v $dest/$newbasename.tar} res - cd $cwd - } - } - - if { $gen_upgrade_log == 1 } { - # Save log files from recd tests. - set logfiles [glob -nocomplain $dir/log.*] - if { [llength $logfiles] > 0 } { - # More than one log.0000000001 file may be produced - # per recd test, so we generate unique names: - # recd001-0-log.0000000001, recd001-1-log.0000000001, - # and so on. - # We may also have log.0000000001, log.0000000002, - # and so on, and they will all be dumped together - # by db_printlog. - set count 0 - while { [file exists \ - $dest/$upgrade_name-$count-log.tar.gz] \ - == 1 } { - incr count - } - set newname $upgrade_name-$count-log - - # Run db_printlog on all the log files - if {[catch {exec $util_path/db_printlog -h $dir > \ - $dir/$newname.prlog} res] != 0} { - puts "Regular printlog failed, try encryption" - eval {exec $util_path/db_printlog} -h $dir \ - -P $passwd > $dir/$newname.prlog - } - - # Rename each log file so we can identify which - # recd test created it. - foreach logfile $logfiles { - set lognum [string range $logfile \ - end-9 end] - file rename $logfile $dir/$newname.$lognum - } - - set cwd [pwd] - cd $dir - - catch {eval exec tar -cvf $dest/$newname.tar \ - [glob $newname*]} - catch {exec gzip -9v $dest/$newname.tar} - cd $cwd - } - } -} - -proc upgrade_dump { database file {stripnulls 0} } { - global errorInfo - global encrypt - global passwd - - set encargs "" - if { $encrypt == 1 } { - set encargs " -encryptany $passwd " - } - set db [eval {berkdb open} -rdonly $encargs $database] - set dbc [$db cursor] - - set f [open $file w+] - fconfigure $f -encoding binary -translation binary - - # - # Get a sorted list of keys - # - set key_list "" - set pair [$dbc get -first] - - while { 1 } { - if { [llength $pair] == 0 } { - break - } - set k [lindex [lindex $pair 0] 0] - lappend key_list $k - set pair [$dbc get -next] - } - - # Discard duplicated keys; we now have a key for each - # duplicate, not each unique key, and we don't want to get each - # duplicate multiple times when we iterate over key_list. - set uniq_keys "" - foreach key $key_list { - if { [info exists existence_list($key)] == 0 } { - lappend uniq_keys $key - } - set existence_list($key) 1 - } - set key_list $uniq_keys - - set key_list [lsort -command _comp $key_list] - - # - # Get the data for each key - # - set i 0 - foreach key $key_list { - set pair [$dbc get -set $key] - if { $stripnulls != 0 } { - # the Tcl interface to db versions before 3.X - # added nulls at the end of all keys and data, so - # we provide functionality to strip that out. - set key [strip_null $key] - } - set data_list {} - catch { while { [llength $pair] != 0 } { - set data [lindex [lindex $pair 0] 1] - if { $stripnulls != 0 } { - set data [strip_null $data] - } - lappend data_list [list $data] - set pair [$dbc get -nextdup] - } } - #lsort -command _comp data_list - set data_list [lsort -command _comp $data_list] - puts -nonewline $f [binary format i [string length $key]] - puts -nonewline $f $key - puts -nonewline $f [binary format i [llength $data_list]] - for { set j 0 } { $j < [llength $data_list] } { incr j } { - puts -nonewline $f [binary format i [string length \ - [concat [lindex $data_list $j]]]] - puts -nonewline $f [concat [lindex $data_list $j]] - } - if { [llength $data_list] == 0 } { - puts "WARNING: zero-length data list" - } - incr i - } - - close $f - error_check_good upgrade_dump_c_close [$dbc close] 0 - error_check_good upgrade_dump_db_close [$db close] 0 -} - -proc _comp { a b } { - if { 0 } { - # XXX - set a [strip_null [concat $a]] - set b [strip_null [concat $b]] - #return [expr [concat $a] < [concat $b]] - } else { - set an [string first "\0" $a] - set bn [string first "\0" $b] - - if { $an != -1 } { - set a [string range $a 0 [expr $an - 1]] - } - if { $bn != -1 } { - set b [string range $b 0 [expr $bn - 1]] - } - } - #puts "$a $b" - return [string compare $a $b] -} - -proc strip_null { str } { - set len [string length $str] - set last [expr $len - 1] - - set termchar [string range $str $last $last] - if { [string compare $termchar \0] == 0 } { - set ret [string range $str 0 [expr $last - 1]] - } else { - set ret $str - } - - return $ret -} - -proc get_log_vers { env } { - set stat [$env log_stat] - foreach pair $stat { - set msg [lindex $pair 0] - set val [lindex $pair 1] - if { $msg == "Log file Version" } { - return $val - } - } - puts "FAIL: Log file Version not found in log_stat" - return 0 -} - diff --git a/storage/bdb/test/wordlist b/storage/bdb/test/wordlist deleted file mode 100644 index 03ea15f7277..00000000000 --- a/storage/bdb/test/wordlist +++ /dev/null @@ -1,10001 +0,0 @@ -cooperate -benighted -apologist's -addresser -cataract -colonially -atoned -avow -bathroom -anaesthesia -columnated -bogs -astral -barbed -captives -acclaims -adjutants -affidavits -baptisms -bubbling -classic -allaying -component -battlement -backtrack - -courage -bore -advertisement -attests -bunny's -airlifts -cajole -cataloging -airily -collected -abridged -compel -aftermath -barrow -approve -chillier -bequest -attendant -abjures -adjudication -banished -asymptotes -borrower -caustic -claim -cohabitation -corporacies -buoy -benchmark's -averting -anecdote's -caress -annihilate -cajoles -anywhere -apparitions -coves -bribed -casually -clue's -asserted -architects -abstained -attitude -accumulating -coalesced -angelic -agnostic -breathed -bother -congregating -amatory -caging -countryside -chapel -buttonhole -bartenders -bridging -bombardment -accurately -confirmed -alleviated -acquiring -bruise -antelope -albums -allusive -corker -cavity's -compliment -climb -caterpillar -almond -authenticated -balkan -assembly's -acidity -abases -bonny -been -abbots -abductor's -aerials -cancels -chalked -beeps -affirms -contrariness -clearest -appropriations -critiquing -affluence -bouts -abiding -comprises -brunches -biology -conceptualization's -assaying -abutter -adorable -beatable -appenders -aggressors -agrarian -bottleneck -angled -beholds -bereaved -creation -animated -candied -bar -aeronautics -cousin's -cleaver -alienation -billet -bungler -contention -businessman -braids -assert -boisterous -consolidate -breathing -ballot -averted -conscientiously -bellow -brazenness -coaches -bulldog -classify -checksum -almond's -cornered -caskets -capacitors -beefer -connoisseurs -consisted -adore -circumvented -colonels -addenda -boost -compatibility's -bumblebee -commonest -containment -active -absorption's -creaks -administer -beset -aborted -aforesaid -aridity -broken -azimuths -aerial -addition's -aggrieve -anthology -circuitous -checks -alley's -beam -boss -corrupting -absolutes -asteroid's -bandstands -beatitude's -analogue's -busts -confession -bedstead -affairs -blackmailers -collared -buckboard -assassin -accessor -adjudging -binders -constituent's -blister -aromas -approved -absorbent -barbarously -cat's -builder -brandish -assailing -constitute -christening -acutely -amount -blurry -blocks -advertise -chain -brigade's -confusion -beds -arrangers -colonizers -beautifying -bankruptcy -bedazzles -candidates -clearness -admonishment's -behind -abbreviations -basting -ballasts -amateurism -celled -constituted -bonfire -bugled -advisee's -battled -budded -burners -causeway's -calibrate -brambly -befuddles -azure -busiest -admiringly -appropriator -accumulator -cables -abhor -civil -botulinus -creaked -bismuth -astronomical -abscissas -bodice -aunt -cascades -cares -comradeship -assemblages -boater -bellmen -admission's -ambitious -baldness -abortive -controlled -chinked -coded -courtrooms -arteriolar -cooler's -cared -brewer -christians -barbecues -contacts -blackjack's -buzzing -blasters -accords -braziers -allegretto -catered -breveting -cleaning -amicably -bummed -consulted -allegro's -accumulator's -compartmented -condemned -concludes -bitwise -cheered -appropriator's -accessors -casting -carolina's -accompanying -budding -correspond -bach's -angel's -bearing -arresters -biweekly -character -badgering -cantankerous -avalanching -adjudges -barometer -append -continuations -burped -boxtop's -abstention -amp -axiomatized -bimonthlies -aghast -arresting -breakwater's -continuing -bridle -bobbin's -antagonistically -blindly -biochemical -biologically -antifundamentalist -confer -cloudiness -bonded -comfortingly -caption -blackmailed -bidders -breakpoint -brigadier -criminals -coyotes -casserole's -annex -cereals -breadboxes -belgian -conductivity -counterexample -anarchist -couches -atavistic -clipped -button -axiomatic -capping -correcting -chase -chastise -angle -burnished -beauteously -antipodes -crippling -crowns -amends -bah -brigadiers -alleged -correctives -bristles -buzzards -barbs -bagel -adaptation -caliber -browner -apprehensions -bonnet -anachronistically -composites -bothered -assurer -arc -chaser -bastards -calmed -bunches -apocalypse -countably -crowned -contrivance -boomerang's -airplane's -boarded -consumption -attuning -blamed -cooing -annihilation -abused -absence -coin -coronaries -applicatively -binomial -ablates -banishes -boating -companions -bilking -captivate -comment -claimants -admonish -ameliorated -bankruptcies -author -cheat -chocolates -botch -averring -beneath -crudely -creeping -acolytes -ass's -cheese's -checksum's -chillers -bracelet -archenemy -assistantship -baroque -butterfly -coolie's -anecdote -coring -cleansing -accreditation -ceaselessly -attitudes -bag -belong -assented -aped -constrains -balalaikas -consent -carpeting -conspiracy -allude -contradictory -adverb's -constitutive -arterial -admirable -begot -affectation -antiquate -attribution -competition's -bovine -commodores -alerters -abatements -corks -battlements -cave -buoys -credible -bowdlerizes -connector -amorphously -boredom -bashing -creams -arthropods -amalgamated -ballets -chafe -autograph -age -aid -colleague's -atrocious -carbonizing -chutes -barbecued -circuits -bandages -corporations -beehive -bandwagon -accommodated -councillor's -belted -airdrop -confrontations -chieftain's -canonicalization -amyl -abjectness -choke -consider -adjuster -crossover's -agreeing -consolations -capitalizers -binges -annihilating -callers -coordinate -banshees -biscuits -absorbency -corollary -corresponded -aristocrat's -banally -cruiser -bathtub's -abbreviated -balkiness -crew -acidulous -air -birdies -canvassing -concretion -blackjacks -controller's -aquarius -charm -clip -awarder -consistently -calibrated -bushwhacking -avaricious -ceaselessness -basically -accolades -adduction -commending -consulates -certifiable -admire -bankers -appropriateness -bandlimits -chill -adds -constable -chirping -cologne -cowardice -baklava -amusedly -blackberry -crises -bedeviling -botching -backbend -attaining -continuity -artistry -beginner -cleaner's -adores -commemorating -amusement -burial -bungalow's -abstinence -contractually -advancement's -conjecture -buckling -conferrer -cherub's -belonged -classifications -baseball -carbonation -craved -bans -aphid -arbor -ague -acropolis -applied -aspired -calibrating -abundance -appeased -chanted -ascent -convenes -beep -bottles -aborigines -clips -acquainting -aiming -creditor's -abolitionists -cloves -containments -bungling -bunt -anchors -brazed -communicator's -brew -accumulate -addicting -actively -befog -anachronisms -bumblers -closest -calculators -absurdity -colleagues -college -assesses -conflicted -associational -betide -conceptualization -adjutant -alliances -corresponding -barometers -cot -brooch's -coiled -arboreal -convicted -artless -certificates -bourbon -astonish -bust -correlate -amounts -anal -abstraction's -corns -conqueror's -boldly -bob's -beer -blanks -corpses -contingent -blackly -backed -appearances -cancers -actuating -apprehension's -colorings -anglicanism -armament -armer -bizarre -begotten -actions -archly -capriciously -clue -contractor -contributions -agendas -coached -blamable -annoyers -coupons -brooked -assortment -axes -celebrates -courageously -baroqueness -blasphemous -asserter -contents -correctly -challenged -bulldoze -casement -acknowledge -bitterness -belongs -allotments -chalice's -bequest's -adjacent -consumer's -conservatively -coalition -background's -backache -befouls -brushfire's -analysts -branch -airways -awaiting -breakfast -anoints -baying -contrary -bilge -chasm's -babes -afresh -centerpiece's -barked -coffin -assumed -actresses -accentuating -aching -abet -balancers -consumptively -cagers -backing -angiography -chord's -cheapened -bewailed -arson -begged -convergent -bowlers -conflicting -confiscated -bitch -bloody -brushfires -bleach -computation's -choppers -circuitously -chancing -bunker -concept's -alacrity -boyhood -ammo -bobwhites -carter -ardent -bier -airway's -brownies -aura -cannibalizing -confirms -australian -barrage -closures -assertive -abstainer -bicarbonate -clone -back -cipher -crown -cannibalizes -away -crafty -airings -amtrak -comical -burnish -continuum -apparition -apologizing -blot -blacker -characters -built -apparent -applicative -assiduous -attorneys -affectionately -bobbing -baggy -comic's -attempt -appealers -amortize -bonanza -backwards -bowers -anemometer -ambulance's -creeps -abduction's -coal -chiller -adjudications -clogging -ascending -bookkeeper -crawlers -battery's -artifacts -attributions -amusements -aftermost -allophones -bemoaned -comptroller -bugger's -buoyancy -booboo -award -amplifying -certify -bivariate -attunes -asteroidal -chant -collectively -chasteness -chapels -copiousness -benign -armies -competing -buss -awakened -breakpoint's -conceptualizing -cleansers -acorns -conveyance's -bluer -battle -budges -characteristically -be -contour -beguiling -awarding -armhole -airship's -bathtub -breathable -crowded -compiles -certain -brutalizing -bacteria -baronies -abode -blacksmith -brinkmanship -capitalizations -cousin -botany -avionic -companion -consists -connoisseur's -avalanched -claimant's -backstitches -affixes -bikes -atomically -cowed -asleep -becomingly -acorn's -complainers -appreciated -cross -cringed -booting -attitudinal -broadcasting -childishly -breeze's -craven -boll -clause's -burden -appendages -atemporal -allah -carnival's -anchorage -adjures -besought -abounding -crucifying -arrangements -antiquarians -burrows -antipode -canvas -constable's -coopers -ascended -companionship -bakery's -bayonets -conclusively -boasters -beneficiaries -conspicuous -contriver -architecture -breakthroughs -brownie's -blur -academics -antagonist -contemplates -arena -caravan's -administers -comprehensively -convey -bigot -blitz -bibliography's -coerced -assail -amazons -banned -alabaster -concluding -bouquet -barks -acquaintances -astonishment -constraint -backpack's -breakthroughes -blocking -accomplishers -catastrophe -bushels -algae -ailment's -anemometers -beginning's -chefs -converse -cornerstone -astound -assuring -adornment -anyone -alumni -club -bestselling -businessmen -constructed -attendee's -cooped -ablute -chronicler -alaska -clam -canonicals -concerned -aligned -creek -burrow -allay -admirals -blackens -compressing -confirm -cows -battleship's -belched -affixing -chalices -choirs -absentee's -baseboard's -apportionment -adheres -accounts -chef -access -clearings -accompanists -concentrating -ado -bathos -bailiff -continuance -ball -bearer -congress -cites -can't -balloon -crams -consults -bungled -bike's -apes -assassinations -colt's -consecrate -ancients -chick -analyst -adsorbing -burntly -accompanist's -apprehensive -bengal -boughs -ankles -anchored -benefits -accommodation -amiss -brink -chewers -blueberry's -chairs -adjoin -bivalve -autobiography's -automated -comparisons -climbed -artists -congruent -cold -atonement -cashier -armageddon -allocations -bereavements -bumblebees -blew -busboys -bottoming -alternations -apprenticed -bestial -cinder's -consumption's -abbey's -amended -continued -birefringent -barbados -ability's -compulsory -antler -centerpieces -accountant's -arrogant -ballads -ascenders -appliers -adjustment's -blabbed -baits -activity's -clod's -adjudicating -bleak -commutes -bumming -beating -cohesiveness -branded -acknowledger -communications -blockhouses -booklets -consenters -creek's -consulting -binary -coaster -ascription -bushwhack -boggles -affidavit's -arrangement's -congressionally -convenient -avoider -abaft -bootlegger's -befriending -ceases -carbonizes -clumps -commented -competence -conversing -butting -astonishing -armful -allegory's -crisis -critiques -concurred -conservative -aristotelian -blizzard's -corner -amateur's -compare -affiliations -bestseller -batch -cleanly -assayed -bravos -bowls -conceptualized -babe's -algorithm's -baptist -cheeks -conquerer -bidder's -behaving -briefcase's -analogues -amply -attitude's -apple -crossable -ambushed -besmirches -creditors -bandwagons -continentally -adjuncts -concerns -agers -cop -amoebas -bisected -bombing -appendices -cocking -bused -babied -compounds -asserts -believably -alert -apostate -catalysts -aureomycin -convex -beetle's -banishing -agitating -bystanders -bow -connotes -blanch -charmingly -animal's -baritones -brier -astronomer -company's -balding -actually -aunt's -avalanches -acquisition -base -compilations -bathtubs -actualization -chanced -atom -banged -befuddled -apologized -componentwise -britisher -began -conservationist -actuate -crosser -appended -bitten -ambivalence -acetate -conversions -buzzwords -askance -abolishing -birdied -creeds -anglers -colossal -bereft -chock -apprentice -cooper -besmirching -allocating -antiques -bikini's -bonders -afflictive -augmentation -atheist -bucket -bibliophile -annexes -beguiles -birdbaths -amendments -animators -asymptotically -communally -barber -biographers -arguable -confidant -apologies -adorns -contacting -coarsest -artichokes -arraign -absorbing -alden -commercially -cabbage's -coincides -clumping -cents -alleviater -buzzard -braked -anesthetized -bugling -capitalist -befriended -appreciatively -boomtown's -cozier -critic's -correspondent -bard -attenuator -bake -brings -chews -anechoic -brutal -colder -buckshot -canvassers -analytic -allies -alloys -awake -alienates -bin's -crimes -constructible -classifiers -bulb -cream -banquet -axiomatize -adjourn -converted -auditioned -comfortably -bandwidth -cannibalize -ascensions -bussing -balloons -contenders -commemoration -aspersions -consultation -cashes -belting -augurs -architectural -bluebird's -breastworks -absconded -bullets -bloodstain's -blunder -astronautics -coo -approves -authority -assure -amsterdam -acquitted -adversity -celebrate -bred -bridged -bloc's -bullied -affinity -breezes -baptistry's -constitutions -avouch -amazingly -consolation -abnormality -clashes -buttes -buzzard's -breathers -chipmunk -contented -carol's -armers -amazedly -comprehends -canonicalize -breakthrough -arbitrator -butterfat -cases -besiegers -affianced -amelia -bush -airplane -annulled -bike -alternated -attackers -convene -aficionado -anachronism's -crude -carelessness -akin -combated -assisting -clocker -attacked -briefed -antic's -attendants -attracting -cope -allotting -bandwidths -add -assaulting -breakage -climes -arrival's -burp -accelerator -capacitance -arabians -bankruptcy's -archeological -coins -browbeating -chasm -cardinalities -compartmentalize -courter -assess -abreaction -brakes -compatibly -compression -characterizable -briefing's -alto's -classifiable -contrast -correlation -colonial -applying -authorizers -contesters -basely -cherries -clicking -cornfield's -alarmingly -conferences -business's -banker -bloomed -airfield -attracts -building -commutative -atomization -competitions -boatsmen -acquirable -arkansas -command -beings -compactors -anodize -arguments -conforming -adsorption -accustomed -blends -bowstring's -blackout -appender -buggy -bricklaying -chart -calmer -cage -attractive -causation's -athenian -advise -cranks -containers -besotter -beret -attender -cone -bills -aligns -brushlike -brownest -bosom's -berth -accountably -bequeathed -affirmatively -boundless -alleyways -commute -bendable -abhors -calculation -affidavit -answerable -bellicose -counterfeiting -admiral's -chisel -bridesmaids -believers -aggregated -conspicuously -abased -armenian -conspirator -canonical -assignable -barrage's -clearance's -casts -administratively -befoul -chaffer -amazer -colorer -broaching -crevice -aniline -coursing -compassionate -adhesive -bibliographies -corrects -augments -between -causer -amorist -cellist's -acoustical -baseless -cigarettes -astuteness -appropriators -convincing -bellhop's -bemoaning -calmingly -chronologically -castles -algebraically -appointees -academic -blunderings -assassins -barrel -accuracy -amortized -ballpark -acrobat's -brazier's -abortively -coarser -airfields -contester -circus's -creased -amorphous -accomplisher -blabs -butchers -crackles -bachelor -aviators -chariot's -circumflex -binocular -alienating -artificially -agreement's -aglow -afghan -abrupt -annihilates -apologetic -barge -betters -algorithms -conjurer -chargeable -brindle -alphabetizes -coder -availing -bandpass -arrogance -convent's -advertiser -connected -basso -breakfaster -comic -congenial -beau -courters -adapters -abruptly -chemicals -bringed -creaming -butterer -attained -actuals -averred -brainwash -centerpiece -blabbermouth -byproduct's -adaptable -automata -art -cheery -beheld -beehive's -claimed -crucial -brokenness -agility -combating -cleft -amenity -after -configuration -contrasting -coarsely -brass -barnstormed -bowel -bridesmaid's -cornfield -crazing -autocracies -adult -conceptualizations -corroboration -bedders -arroyo -alarmist -boatman -chests -burglary -budgets -canary's -arraigning -chin -barnstorms -blamers -brimful -calculate -cellular -contended -challenges -brusque -bikinis -arithmetics -chairpersons -class -aircraft -capably -centralize -awhile -compacting -courteous -archaeologist's -cram -adagio -affronts -amplitude's -bureau's -audaciously -autism -blueberries -an -chips -confiner -chopper's -chronology -breaching -bead -amass -camouflage -compensation -aspect -broker -atrophy -balk -bloodless -barnyard -benefactor's -airdrops -caused -anthem -activist's -bottomless -arrogates -avoided -bouncy -clarified -articulate -almoner -communists -blokes -butternut -clockings -barium -blows -criticism's -associations -brute -bleeds -alliteration's -bluestocking -boxwood -clearer -allegiance -conceptualizes -captivating -bolshevik's -belabored -biographic -contaminates -chanticleer's -adjusted -childhood -arguing -cape -conversantly -compensating -collaborations -arraignment's -blasted -charging -aggregation -apprentices -bird -codifiers -ballistic -breve -bells -carolina -chalk -buckles -boyfriend's -adorn -accoutrements -availability -antisymmetry -blades -alluded -asterisks -bookcases -additive -consents -advanced -balalaika -coders -caliph -alundum -are -controllable -blazing -clattered -asiatic -axiomatizes -ace -coining -column -auditor's -carol -concatenated -arrayed -capital -cautioner -clan -beauteous -abbreviate -asteroids -canal's -consolidation -closets -concealer -crevices -abed -complex -conviction's -abide -arrests -begrudges -adolescent -conceals -cells -circles -bravest -compromiser -bagels -areas -afore -allergies -arrangement -attraction's -amulets -abstraction -captured -crouched -brothers -cash -achieving -bastard -compete -boiling -beaching -amphetamines -clerking -congestion -alleviates -angry -bared -comprehended -bloodstain -constituency's -automating -aerial's -counterfeit -besotted -basses -biofeedback -compilation's -band -consulate -appellant -cough -antennae -contend -anniversary -boor -artifactually -aerobics -booths -chubbiest -consumable -assignments -bromide's -confined -breakers -alongside -courtier -boisterously -bilaterally -alternation -auspiciously -arbitrated -condemning -burns -correspondents -composition -cavalierly -coverlets -capacities -clatter -apotheoses -cartography -ceased -capitalized -auditor -appendicitis -chops -barony -anemometry -befouled -briefer -chest -begetting -bloats -bookseller's -commitment -confides -carcass's -battering -altruistically -ballots -adornments -broaden -angularly -coefficient -cataloged -brae -advantage -anthems -calculated -counseling -agitate -accentuated -camel -ambivalent -bedposts -beacons -chubbier -cheerer -assumes -concord -autumns -convention's -alpha -adulterates -arbiters -archaically -criteria -achilles -cheaper -bulling -associators -bloater -brawler -ability -adherents -commonwealth -coyote's -centrally -bequeathing -abandonment -circumstantially -courteously -borrow -countermeasure's -capricious -allied -anagram's -absorptive -assuage -asset -booked -aspects -commits -crates -capacitive -condones -assimilates -carriage -competitor's -cocoons -aggravated -caravans -arbitrator's -baked -balanced -annihilated -addressable -autonomous -bandwagon's -contesting -burrowing -coroutines -abjection -correctable -applauded -bragged -code -aggressiveness -cluttered -attacking -chide -am -coasters -blizzard -contentment -altruism -certifier -capturing -combinators -carefree -activate -blindfolding -assassinating -approximate -biplane's -aplenty -arteriosclerosis -concentrates -antisymmetric -assurances -anarchist's -ascend -advancing -atrocities -butt's -bearable -craftiness -categorized -barn -contributor's -arises -bushy -bisque -coasted -bargaining -area's -couples -cabs -barter -bulletin -chisels -broadcasters -contingency -bywords -antimicrobial -coexisted -blinding -arithmetize -coweringly -convince -competed -bauble's -crab -boggling -advocacy -atlas -assembled -ancient -bloodstream -balking -bin -bully -affirm -cruelest -atone -conserved -confession's -bat -captive -aster -blames -colonel's -bones -borderline -cleanses -classified -crudest -contiguity -bailing -ablaze -bender -attendee -clobbers -aliasing -autopilot -coolers -cache -allayed -barnyards -britons -appointment -adaptor -blockers -abridges -bloodiest -betrothal -bombards -bony -bus -canary -antinomy -awash -comrades -ablating -collectible -boats -brand -church -bandy -adhering -barred -ammunition -chime -accompaniment's -battleground's -composing -caveats -armor -amoeba -composure -collides -avowed -banding -counsels -asymmetric -abbreviates -balky -adjudicates -anointing -accursed -copse -action -construction's -accents -ambition's -caressing -cosmetic -accession -clutters -censures -allusions -belittled -armchair -abode's -conception's -ascribe -aliases -ancestry -ax -companionable -aright -boxed -brighteners -alloy's -checkable -arraignments -bed -bunkhouses -abbeys -ceasing -companies -cherishing -chunk's -barony's -chinning -burdens -briskness -beggarly -beloved -clambered -constitutionality -beguiled -archers -alleyway -apostle's -consulate's -antiformant -categories -construct -aliments -acquired -blotted -alterations -adolescent's -cranes -bluntest -accusation -chafer -airstrips -abolished -bothersome -churchly -airy -bedded -awareness -alliterative -arose -amputates -civilization's -arenas -certifying -aspirators -carbon's -bunching -aerates -bilked -checking -cloned -administrations -canvasses -colorless -chamber -circumspectly -benedictine -advisedly -classifier -approachable -banners -concurrently -chores -agape -convention -bindings -budget -comedies -ants -ambassadors -chroniclers -carrots -colorful -bulkhead's -coherence -buyer -aggressions -congressional -commoners -cheapen -concealed -columnates -anarchy -actress's -baseboards -creature's -centuries -barbarian -concrete -bicycles -acceptably -acclimating -biceps -bloodhound's -becalmed -apostle -bible -conjunctive -comb -ballers -bickering -adulterous -austrian -applicable -blackberries -creasing -catalogs -avert -asparagus -cambridge -bird's -belgians -admonished -admirations -conscientious -crescent's -connectives -blissful -commenting -bagged -assimilate -abounded -copyright's -advancement -axiom's -compilation -circumlocution's -catheter -chances -concretely -codification -browned -clustering -bum's -clauses -boundlessness -arteriole's -alfresco -begrudged -blustered -anglican -adjoined -bamboo -bathed -consortium -carrot's -cloak -album -bunglers -approbate -colored -aim -cowboy -alienate -cleverest -ambiguous -confrontation's -clear -africa -bowline's -astronauts -belayed -censorship -animation -bedrooms -chasms -compared -cogitated -barbarians -accomplices -columnizes -beaming -busied -counterpointing -aluminum -coconut's -acclamation -chokers -biomedicine -basalt -buckwheat -cardinality's -bafflers -arid -chap's -abound -biblical -backbone -anticipation -condemner -angular -advisability -believing -boiler -arclike -abetter -bespeaks -axiomatically -coarse -auditions -bludgeoning -clam's -chief -arrow -cementing -anxiety -aberrations -brushes -cherub -corollary's -bunters -beefers -barbiturate -circumlocution -conjoined -charities -coverage -campaigner -burrowed -barracks -bristling -accomplice -abandoned -bull -caked -century's -bantu -bristled -airer -bench -bevy -chamberlain's -attention -cloning -camouflaging -alder -counter -credibly -approvingly -breakup -artillery -celestially -bail -baker -bullish -canvass -conversationally -bringers -augment -creditably -butterers -botswana -contemptible -bribing -adumbrate -barb -calico -alludes -amplified -chills -cloak's -aver -arthropod's -budgeter -bereavement -cellars -crewing -blackmailer -ayes -bedsteads -breachers -bazaar -centered -celebrity -blameless -abscissa -aerators -awaited -british -adversary -cowslip -buttons -confusing -buggy's -belts -canceled -addresses -bribes -condoning -bonneted -coarsen -amazement -angels -chemise -carbonates -apostolic -bandit's -contending -consummate -counterclockwise -beneficence -benefitted -contradicts -comfortabilities -anemone -conductive -articles -bookcase -burst -baptizes -countless -costs -agonizes -byte -creeper -begs -bunnies -attract -able -calories -baskets -american -brunt -cognition -closing -chef's -backbone's -complicates -cloister -bedsprings -arrays -brigs -archbishop -buckler -clove -catholic's -bellboys -chairmen -clap -clarifications -ambuscade -bight -bellyfull -allowance's -academy's -acquiescence -ambush -catches -at -billion -contact -bees -adopters -approximately -chiseled -attributively -criers -codification's -cowslips -contradictions -buttock's -categorically -counterpart's -confessor -appreciably -adjusts -altitude -construe -cancer -bay -aristocratic -alleviaters -binoculars -axiomatizing -changer -bustle -civic -bostonians -crops -authorizations -cogitation -baptize -caressed -abase -ariser -axiomatization -aggravates -confiscation -bowdlerize -backspaced -alters -clarity -blots -bland -belligerent's -burgher -cardinally -bookcase's -buggers -byte's -avarice -crowding -beriberi -allegories -coronets -cell -calculative -adduce -amperes -bladders -adages -contests -cognizant -actuates -ambiguity -brighten -concert -conviction -booty -ashtray -braves -blouses -avoiders -confederate -bombings -couplings -convictions -attractiveness -chronicled -corers -anger -covertly -aural -asynchrony -arrowheads -breakdown's -bulletins -ceremonialness -clipper -bracelets -anthropomorphically -benedict -connecting -bacterium -achievers -abutter's -autocorrelate -coupling -blanketer -continental -assignment -conundrum -arab -besides -cheerful -blowup -bastion -arrive -combines -agar -cookie -astronaut's -constraint's -article's -confiscations -bounded -adjudicate -belligerently -boron -brownness -adept -creep -abduction -accosting -asylum -autographed -clash -chiseler -clumsily -capitally -braking -absenting -bagatelle's -comet -basked -anything -buffeted -absentia -bounty -carols -characteristic's -constructive -comforting -aflame -brainwashed -booby -aspirations -adjudge -behaviorism -computability -assessment -consultations -bowstring -acknowledgment -arranger -chancellor -attest -compresses -concessions -asymmetrically -administering -clamoring -arraigned -archived -admonition -actor's -aimers -colorers -booklet -calibers -affix -bushel's -atomizes -creeks -bleedings -casuals -archives -certainly -animate -cons -affiliate -answered -coyote -coughed -alligator's -antagonized -arousal -assisted -aerated -competently -conquering -acclaimed -assign -announcer -controllers -amalgamation -comfort -antihistorical -availed -balsa -annoyed -basted -asymptomatically -cropped -combinational -barging -conversant -causality -botches -bedspread -considerately -bookstores -climate -blessing -accordion's -cdr -bonanza's -construing -bearings -bluster -backspaces -babyish -countermeasure -crime -battered -audit -associating -corps -application -archangel's -aided -breasted -compelled -acrobats -breakfasts -chronologies -beet's -averts -convergence -attributable -adverbial -churns -arrest -breastwork -beefs -brownie -create -contradistinctions -coordinators -abandoning -byline -beatitude -autosuggestibility -bipartite -annals -assents -conceives -amalgams -cleft's -clicked -appointers -bible's -boots -caret -attaches -controversy's -combinatorial -bazaars -cardinals -bored -catering -christian's -ashman -consequence's -austere -clay -birthday's -amongst -arbitrariness -brainstorms -chateaus -coaxer -applause -cautiousness -adorned -compromises -creatures -compliance -apartheid -archiving -amoeba's -communal -comedian's -aggressive -crop -ante -better -chalice -aristocrats -circling -belittle -abortion's -coldly -certification -befriends -courthouse -anesthesia -accorder -athletic -blithe -bedder -abasements -councils -beware -abductor -assonant -clench -aspersion -abortion -abating -birches -breakpoints -acyclic -ablate -canners -cistern -boxtop -composite -cloudless -computation -chastely -abusing -bunker's -compounding -alveolar -chaplains -bias -audiological -capability's -bangle -barren -antidote's -cranking -baptizing -bond -borders -automobile's -allegoric -chargers -baltic -autumn -columns -absolute -connoisseur -cranberry -contiguous -consoled -confirmations -argot -blouse -annotated -callous -astounded -crashed -autonavigators -chivalry -columnating -beefed -convincer -allegorical -bagger -assume -containable -artistically -calibration -architectonic -campaigns -addressability -crazier -buy -brightener -bastion's -blurb -awaits -commands -chocolate -bleaching -antenna -blowers -chorused -composers -assigners -aspires -coils -bid -application's -clamped -bedding -awkwardly -coppers -costumes -borax -caged -candler -badges -clutches -consign -apprised -buys -adiabatically -aggregately -canned -abstract -acrimony -coax -analytically -absurd -alluring -contradicted -aspersion's -bribe -boos -chattererz -backache's -complying -continent -cohabitate -causation -astronomer's -cities -bookie -bleating -cracking -bicameral -convoluted -adjustable -ambulance -can -boulders -consideration -announces -briars -antipode's -bartered -ancestor -biplanes -characterize -crested -bum -bridling -consolable -bungles -coffee -buffets -congratulation -commitment's -adequately -clown -capacitor's -broomsticks -agglutinate -activations -asians -canon's -authenticity -complexities -cripple -bracket -counselor's -beatably -bounced -baton's -crankiest -barbell's -caster -casseroles -ballad's -bob -batched -attenuated -beakers -biologist -bleary -condescend -blondes -augustness -boldface -battlefronts -acumen -bolting -articulatory -butyrate -bowel's -backwater's -colonel -creating -authorized -bijection -accruing -admirably -correctness -citadels -clasps -bandlimit -bib -appalachia -contrives -bundle -audiology -circumventing -blinker -choked -bilks -clears -affirmations -arbitrating -bites -bootstraps -capitals -commuters -billeted -authentication -choice -attentively -aggressor -arterioles -crowds -chestnut -backstitched -attachments -assimilating -bewilderment -atrophied -chintz -blackjack -armadillos -bonfire's -ballast -agonies -busier -coefficient's -adventurous -ballet's -coil -chewed -come -bonder -catalogue -coursed -arise -biennium -ceremony's -blanching -appraisers -acolyte -argues -beholden -appanage -astatine -banana's -coons -civilians -bodyguard -archipelago -bug's -candles -antique's -accidently -blighted -belgium -besieged -burned -abuse -asian -chute -awkwardness -abasing -bottler -ardently -blab -breakwater -cavity -cheated -befall -according -chronicle -airframes -bats -choring -authorize -consumed -chatter -annunciated -capers -anomalous -clustered -burner -acquaintance's -badger's -basic -affectations -buzzy -coast -attendances -activating -beams -cohesive -attainable -barbecueing -beautiful -acronyms -communion -client -atypical -antagonists -conservations -arguers -agglomerate -antigen -battalion -ambition -countered -assistant -classed -arming -alveoli -buff's -backplanes -busted -bermuda -converting -brutish -boot -acidities -confrontation -chapel's -berlin -ascender -behead -buddy's -commandment -actuated -brilliancy -chance -bedrock's -bridgeheads -arable -avid -arteries -caresser -ballyhoo -attested -african -comradely -consciences -commencing -antennas -annulments -bobolink's -advisee -acceptance -crack -ascendent -appendage's -accommodates -accumulated -clones -apocryphal -ages -cluster -capitols -camper -beading -amble -buffeting -circumspect -advances -analyzes -courier's -aperiodic -appealer -atonally -attentive -conspire -appropriating -armed -allergic -agglomeration -consternation -blinks -audibly -aspirins -bunions -adverbs -armload -bet's -caring -carryover -coordinator's -afterthoughts -allays -abided -brownish -baiting -capitalism -coined -conspirators -automatic -contradistinction -conductor's -backstitching -conjure -casings -accountant -clinched -constrain -alcohol -bee -anticompetitive -britain -bade -camera's -antimony -activated -burglarizes -compatible -cotyledon's -artificiality -bath -citadel -archivist -chandelier -addiction -ampersand -bitterer -constructively -afield -bing -attractor's -cringe -allergy's -bigots -assimilation -ate -capitalization -abridge -buzzword -befit -bandlimited -commandant -alabama -acculturated -brightening -bulldozing -cooky -bunks -centers -bespectacled -adherent's -abducts -another's -condensation -billeting -bye -chess -craziest -ballgown's -archaism -consorted -chinned -cowl -beat -bootlegger -bravado -classically -bulging -browbeat -accommodate -borne -bronzed -artifice -arcade -become -backlog -addressers -amphitheaters -befogging -crochet -aiding -celebrated -conversational -backbends -authentications -advertisement's -blockade -bulldozes -contraction's -bricklayer's -brain -conveying -anemia -chronology's -channeling -caution -commanding -crosses -artisan -conditions -admired -authenticator -airships -blunter -bridesmaid -counseled -cheeriness -chiefs -boils -clerical -atrocity's -balls -ambled -canvases -consoles -abscessed -abetting -blitzkrieg -bottlers -beveled -condemn -alumna -cords -admittance -annotates -citing -corrector -appreciative -branching -betrays -buttoned -ailment -boulevards -bottlenecks -chamberlains -bedbug -covenant's -crispness -considering -broadcasts -audubon -arousing -correction -barrack -closure -contrastingly -brittleness -assassin's -bursa -bungalows -balked -conceptual -carcasses -arabia -blueprint's -affectingly -consorting -buses -auger -appointed -brute's -bosoms -anyway -arrowed -anaphorically -clarify -approachability -assistance -buzzes -commonplace -bluebonnet's -adroitness -availers -aquifers -architecture's -action's -backgrounds -abduct -attired -briber -admissibility -cease -beck -auctioneers -birdbath's -atomic -crossing -considerate -biconvex -bulge -bedridden -arising -aggression's -cherish -bureaucratic -abater -amputating -atop -climber -clutched -afford -bisections -bonnets -commendations -bloke -abundant -clamp -aloes -aboard -atheistic -advantageously -buffs -chimney's -cheerily -benefactor -ample -bushwhacked -captain -buckskins -contextually -antiquarian's -browns -bubble -ban's -brine -acculturates -anhydrously -beaver's -advantaged -bibliographic -clasping -clattering -coerce -colorado -airmen -bandlimiting -balks -boners -attached -chosen -convened -bordello -composer -botanist -backtracks -civilization -commutativity -bloodshed -cohere -bunkhouse -archdiocese -boycotted -crosswords -bedspread's -anteaters -cove -apothecary -chute's -addressee -climatically -blower -bane -cask's -beetling -ambiguities -before -abstain -arachnids -bucket's -amateurs -blackouts -adverb -butchery -conjunction's -barricade -audiologists -aphorism -complete -butts -bishops -allotment's -confusingly -channeller's -blanches -bragging -bathe -comedians -celestial -citizens -couple -backpack -aphasic -brothels -axles -cancellations -bonus's -consolidates -authoritative -axle's -acclimatization -carolinas -chime's -antibiotic -bisons -biographically -achieve -bleachers -bicentennial -behavioral -accomplish -concealment -biddies -antitoxins -arriving -apprehend -affluent -cliffs -bleached -astronomers -connection -bride -backs -bog's -casket's -continual -ampere -cat -alternator -cotton -athletes -communicant's -best -befuddling -benefactors -appease -annoyingly -context -astonished -cracked -amnesty -autumn's -binder -babying -contributory -assumption -cowls -cocks -airless -consummated -atypically -beneficially -chairing -accusative -commanded -bufferrer's -alerter -arbiter -civilly -charms -backscattering -cheater -bushes -caverns -chieftain -calf -comparing -aurora -butyl -cower -bemoans -baptistry -carpenter's -capes -bordered -arrows -blocker -crest -appeal -arabic -conventions -axis -brains -bookkeeper's -circle -cooks -circumlocutions -adventists -barringer -affording -anatomically -basements -barbarities -configuration's -contributes -collaborating -beach -comet's -bakes -assigns -ballerina -cheapens -clinging -conquered -bisecting -closenesses -bugle -boatmen -beatings -complicator -bight's -banister's -archaic -anthropologists -clams -beginners -committee's -communicants -alone -bounteously -bastes -ascertain -alphabetical -bringing -batters -amazon's -constituent -benders -being -constitutionally -audiometric -blast -copings -bailiffs -colts -coolies -airlift's -boomerang -bifocal -clothes -cashiers -congenially -billows -boilerplate -biochemistry -betting -brimmed -complementers -breading -bragger -adducting -bisectors -abrogates -criticized -comrade -bucolic -birthright -blurs -challenger -complicated -bluebonnet -biscuit's -classmates -campus's -boundary -bedbug's -adjustor's -acre -bicycling -awe -additions -baiter -authorizes -beautify -copier -buffet -belfries -acquisitions -brooch -crickets -caterpillars -beefsteak -complicating -bedpost -criminal -celebrity's -bookseller -christened -coerces -clamors -all -boatyard's -canoe's -begin -anaerobic -bushing -agreers -concedes -countermeasures -beg -agglutinin -bunted -ammonium -aspiration's -bathrobes -changeable -beached -bestowal -beaner -catsup -admires -clockwise -agile -alarms -ached -chinks -buffer's -cartesian -annunciate -chanticleer -avenue -anchor -alliterations -blanking -bargained -breathtaking -crime's -assiduity -argentina -contiguously -aqua -bested -borderlands -appetite -captive's -bipolar -conceal -counters -costumed -arrestingly -bunting -blight -champagne -brusquely -address -bloodhounds -associative -creed -arithmetical -balustrade's -belabors -complementing -checkout -archivers -badlands -behaviors -ampoules -bridgehead's -antiquarian -clumsiness -considerable -apportions -anglicans -appealingly -barfly's -absorptions -awards -congregates -cloister's -armour -avoid -correctively -chucks -burps -bums -berry -batches -administration -atones -bishop's -blonde's -casualty's -cores -bodied -alter -assonance -apprise -antitoxin -avariciously -checkpoint's -affirmative -conjures -angstrom -aesthetically -canyon -binge -crazed -breastwork's -aids -boston -conceits -announcement's -beechen -accessory -authorities -constrained -automation -anaplasmosis -commander -commendation's -belabor -cornfields -artemis -asphalt -contracted -brochure -crafted -allegedly -alien's -auditory -blowfish -adducible -confederations -annuals -britches -acquaintance -appallingly -abounds -burglarproof -crossers -bayous -brisk -authority's -covetousness -averse -accomplished -aromatic -admiral -bijective -avenging -bran -boatyards -beseeching -challenging -bares -acts -abductions -compendium -compulsion's -calendar's -clad -blockage -conventional -craze -cajoling -acceptability -bungalow -buff -cramps -attackable -calculator's -asp -braved -colors -balling -contaminate -crackling -comes -complimenters -across -astronomy -aborigine -bobwhite's -autopilot's -chattered -appall -autonavigator -bashed -acoustics -beachhead's -apartments -convenience -blackout's -bands -autonomously -amounters -centripetal -achievable -astringency -attuned -concatenating -copyright -coding -assumption's -anastomoses -confiscate -asking -beneficial -adhesions -busboy -bronzes -audacity -bruises -crash -beau's -circuit's -aborts -baubles -beliefs -assuaged -costed -blinking -characterized -bowled -block -conquests -confesses -amusers -ceiling -berets -berliner -abstentions -child -authoritatively -closeness -bushel -considered -communicates -cheerlessly -autofluorescence -aquarium -affects -appurtenances -airbag -approaches -admonishments -bets -bounden -courtly -bodybuilder's -campus -brainstorm -americans -chairperson's -botanical -askew -amazon -bleed -clime's -cooperations -commonness -boatloads -blinked -courtyard -adapted -aforethought -backwater -burr -cathode -awaking -buzzed -bridgeable -arrives -adventuring -beseech -attrition -copied -colon -client's -bandstand's -advice -baptistries -antithetical -alcohol's -contradicting -ambidextrous -belches -category -bluntness -coupon's -assimilations -comfortable -caller -affliction's -attends -compactest -baler -beacon -blind -bleakness -beseeches -courts -couch -consequential -adulterers -craving -biggest -astray -bigoted -barfly -charges -ambiguity's -commentary -crankily -cowerer -carnival -bachelor's -bituminous -continuance's -calamities -claws -apiece -century -ascendancy -charts -animations -aggression -chickadee's -carve -confidence -actor -bubbled -becalming -convulsion -chivalrous -brightest -centralized -beautifies -amateurishness -birthrights -alligator -circumstantial -constructors -conceptions -arranging -cart -cent -ager -congruence -carrot -chariots -cloudier -captivity -conquerers -compartmentalizes -condensing -celebrities -chalks -accordance -chilled -conversations -apples -conceiving -average -blessed -creator -ant -cling -annoyer -aviation -cohesively -correspondences -boor's -apprehended -bessel -both -characterizes -bards -cots -acculturating -cemeteries -carting -alcohols -bitterest -ascetic's -conducts -caking -airspace -autocrats -ashes -chimes -broadcaster -commuter -basket -borderland's -broadened -boyish -allegretto's -ban -bidder -christen -blessings -bury -arranged -choir's -apathetic -boring -aryan -appearing -binds -cooperates -bounces -airspeed -complicators -adapting -babbled -agglomerates -bedraggled -addictions -bolt -calmly -blur's -boatload's -anesthetic -bugs -colt -completing -boxer -billers -affronting -absurdity's -chides -comparatively -braided -clipper's -cot's -calves -articulations -branchings -attraction -concatenates -alligators -cake -boom -crashing -afar -abler -beamed -adverse -adrenaline -agriculture -beehives -crankier -courthouses -advises -consigns -bisect -azimuth's -carpets -arthropod -brewery's -commonalities -altruist -astride -appreciate -carved -briefs -admitter -celery -congregate -clocking -assassinated -adding -canvasser -civics -contemptuously -calculates -advisees -bumbling -algorithmically -cloudy -algebras -addiction's -cop's -assurers -confidently -affector -analyzers -chimneys -burdening -antitrust -admix -avoidance -choking -coexists -accustoms -cellar -anchovy -constructor's -confinements -consequently -accelerations -accoutrement -churchman -biller -affected -brigades -cremating -corridor's -bagging -ah -berating -collective -acuteness -arrestors -cab's -border -agitation -animism -arches -alveolus -cessation's -averrer -abash -counterrevolution -attesting -animateness -bawdy -americana -bloodstained -applicator -annotating -annunciator -clamored -acting -aerosols -axiomatization's -brags -coalesces -avocation -combining -crazily -bravery -burying -adored -airfield's -accounting -broadeners -anise -chimney -added -avenges -bellicosity -cranberries -arsenic -communities -comparable -bunkered -architect -alphabetically -beautified -apogees -communist -anatomical -complexity -accost -autographing -browsing -ameliorate -bookers -bandaging -clinical -appellants -counteract -clairvoyantly -bootstrap's -canner -boastful -attainer -ash -beaded -brake -barest -befriend -burglarproofing -allegorically -bunts -believes -accession's -buck -boathouse's -byword's -anthracite -accuse -conjunction -burping -commandant's -creativity -affirming -bark -amuses -balcony's -auditors -counsel -clamber -borates -cowboy's -bickered -boors -combing -biting -breeze -crowder -corn -bloke's -bombast -bookstore -blared -bedlam -carbohydrate -coops -bundles -blistering -antarctic -anterior -bilinear -chocolate's -context's -alternating -annoyance -constancy -ambivalently -buddy -brutalize -bobbin -alleles -commotion -attributes -airborne -creed's -bolstering -coaxed -airframe -breaker -accept -abashes -attentional -contributor -comparability -auscultating -cocked -computationally -buffered -career's -analyzable -absently -courtyard's -buildups -apportioned -balkanized -annulling -cremation -buffetings -conditional -confided -airliner -bulldozer -approaching -anagram -apollonian -canaries -bloat -bluebird -collision -cool -connectedness -abasement -artisan's -avoidably -clerks -afflict -briton -corroborates -cameras -counted -boldest -burglars -brutes -brows -abhorrent -configuring -averaged -ace's -buying -abandon -bayou -cottons -auditioning -amplifies -clippers -brainstorm's -alto -brutalities -bunch -agricultural -bursts -blunting -archer -activity -carefulness -bedroom's -concomitant -balm's -artificer -barking -breathy -babies -acacia -bodies -cap's -criticised -conversed -crewed -ascendant -budgeting -coroutine's -charmed -bellboy's -conservatism -butler -acculturation -conclusion's -adapt -cellist -contempt -adumbrates -borrowed -confounds -allegiance's -blabbermouths -accrues -captor -coop -baseballs -cottages -apartment's -assertiveness -assent -artfully -bagger's -abolishment -acetylene -accessory's -blackbird -baptist's -consist -cavern -buttock -corporal's -autoregressive -bailiff's -birds -corder -bracketing -antlered -barbiturates -county's -addicted -agglutinated -abashed -competitively -captains -bloating -accepts -choose -ashamed -backyard's -apiary -contradiction -balalaika's -arctic -broom -anvils -coffee's -alliance's -agitator's -change -adjusters -cremates -complexes -bodyguard's -burl -antithyroid -ambient -airfoil -apricots -athleticism -abjectly -bankrupts -answerers -alternatively -confronter -breaking -baronial -cannibalized -appetites -breaded -blackboard's -battlegrounds -cosine -barrenness -abbreviation -budging -boolean -acrobatics -again -ashtrays -clashed -contingent's -compulsion -bedazzled -collapsing -comparison's -businesses -compassionately -achievement -buffering -candlesticks -austerely -awls -associate -absolved -annexed -airway -clipping -counselors -conscience -attempters -constructing -biases -cautioners -comma's -cosines -char -auscultates -afire -comely -amity -beverage's -anew -ballplayer's -adulterated -authorship -alterers -burdened -attributive -afflictions -blinded -barrier's -attachment -brotherhood -bridegroom -atoms -cobweb's -copes -controversies -complexion -crawling -atomized -adjust -accuracies -concern -cinders -authorization -appraisingly -bladder's -cooked -cowers -batter -commissioner -close -burglar's -allocated -anvil -aftershock -abrogating -chemistries -advisable -conduct -committee -blaring -appalling -braveness -alertly -artificialities -brevet -collision's -arizona -bower -creamers -awnings -arsenals -crane -city -contemplative -catheters -administrators -attorney -churned -attractions -columnation -bobbed -centipedes -bostonian's -apprises -buries -allege -botulism -adobe -ambassador's -covenants -boon -asynchronously -bigness -axial -chaffing -battleships -ant's -anthropological -accent -brushing -brassy -consumptions -battleship -absorb -beckons -brook -connectors -clinches -accesses -beaters -archaicness -bursitis -chided -bomb -assimilated -addicts -convening -arianists -counting -altar's -confusions -attachment's -clipping's -amazing -corset -bossed -attach -commandingly -animatedly -allegations -assuages -annulment -compress -aptitude -absurdities -autobiographic -aspect's -concentrator -burgesses -anagrams -bedeviled -assemblers -convinced -commentary's -agglomerated -biological -callousness -axolotl's -atmospheres -authoritarian -cancer's -above -charting -aldermen -battler -cistern's -bouncer -amassed -conquest -altering -arrogantly -brokenly -comparator -counsellor's -attenders -cackle -criticize -authored -ably -believed -compelling -accepter -cleansed -afflicted -backslash -computed -almighty -attache -braes -carriage's -benediction -brigadier's -contemporariness -boomtown -amplitudes -breakwaters -clod -catch -bar's -activist -caves -assenting -camp -attainments -brotherliness -continuances -appearance -applicator's -browbeats -banjos -addendum -became -adduces -armadillo -brothel -almanac -courageous -assault -chunk -coaching -atheist's -blunted -aperiodicity -congresses -boastfully -burglarproofed -broadest -bashfulness -affect -acne -bottleneck's -criticisms -corrupts -colonized -closeted -canonicalizing -auditorium -antenna's -awfully -anti -consumes -agonize -algebra's -championing -blush -bugger -antagonize -beethoven -blase -boycotts -compensatory -bugged -boroughs -anatomic -batons -arguably -affricates -appreciations -cavalry -alumna's -arcing -backpacks -braces -contextual -coupon -chillingly -allocates -abuts -contribution -commodity -admonishing -coolly -cabinet's -collapsed -confessions -adjured -capriciousness -chastising -babe -aerodynamics -accepting -concept -contour's -consequentialities -birthday -bankrupted -birthed -benefit -concentrations -azalea -channels -chestnuts -contenting -antedate -censors -contagious -abbot's -channellers -apt -commend -avocation's -admonition's -abolition -confederation -carried -clumsy -coincidences -bumper -burr's -bugles -bribers -attainably -consume -comma -creativeness -accuser -bombs -abbey -baffled -aside -clip's -appeases -compass -bundling -abstractionism -confide -creases -apropos -confronted -corrective -concurrencies -autocratic -alien -attending -antagonistic -broadcast -asymptote's -belied -breasts -contrapositives -coiner -accordingly -cohering -computers -cow -bibs -ancestral -controller -attacker -alerts -coconut -agency -alerted -alcoholism -ammoniac -actinometers -acquitter -bud -cessation -alleging -centralizes -articulators -council's -carvings -arduously -blown -anode's -arrogate -bisects -centimeters -burgeoning -course -appointee's -ascribable -communicate -contrivance's -adoptions -attune -acres -abyss's -corporal -certifiers -analyze -augusta -bestseller's -checkpoint -coexist -attainers -argon -bearded -crudeness -averaging -brick -adducing -annulment's -chicks -blocked -cisterns -afoul -affiliates -briskly -adhesion -ascertainable -appeasement -blueprints -agreements -blindfolds -communicator -characterization -annoyances -breeches -brushed -clinic -competes -chuckled -cradled -balmy -antisubmarine -alternate -armpits -barn's -conjuncts -adhere -allows -counteracted -appetizer -capturers -cleanse -avant -abbe -corpse's -arduousness -badge -begets -contemplated -caveat -copiously -athena -aggrieving -alibi -accumulation -basket's -aftershocks -bass -conjuncted -chaps -brunch -colonials -bibbed -clusters -antagonizing -constituencies -combings -bearish -continuously -adequacy -brow's -catalog -alderman -comedic -chemists -concernedly -conceded -alarm -arced -buckle -confidingly -coherent -closes -buffoon -brace -adjustably -crackers -contamination -burgess's -aerobic -constitutes -baptismal -broadness -blimps -concatenation -claiming -bard's -aerosolize -adjoins -copies -coats -boggle -corroborated -concreteness -bill -cautions -bantam -bearably -armchair's -birthright's -cravat's -cone's -courtiers -asunder -bulletin's -biopsies -alley -contrive -blasphemies -amuser -ballerinas -blushed -causticly -brandy -blinkers -complimenting -crimsoning -angola -apprehensiveness -bolster -columnate -byproducts -berths -accusal -chubby -arrived -camps -blemish's -anaconda -cook -airfoils -atlantic -boosted -converge -availer -appalachians -coffin's -boarding -alga -crouch -columnizing -consul's -chastises -angling -apple's -billiard -attentiveness -adroit -apprehensible -cereal -blouse's -browning -bodybuilder -coaxing -assertion's -connective's -commemorated -accountability -crooked -blips -chandeliers -aristocracy -bangs -coke -abutment -community -calculus -congregated -crepe -compromised -airlines -contributing -contingencies -coordinated -alginate -batted -contender -alma -antagonisms -accompanied -airport -administrator's -appraisal -breadbox -condemnation -backlog's -available -consequents -crooks -commonwealths -barring -channeller -crucially -archaeological -charming -adventist -credits -appetizing -breads -clients -climbing -aloneness -abstractness -appearer -astute -clockers -antagonizes -agonized -bastard's -conjectured -aqueducts -aureole -boatswains -conjured -chauffeur -complementer -behold -bustards -bivouac -cluck -anus -bless -catastrophic -bounty's -allowed -answer -concealers -brainchild's -coercion -buzzword's -bordellos -appertain -applier -couriers -aesthetic's -craft -capacitances -capped -coupler -category's -anvil's -conquest's -checksums -clucking -bronchus -acrimonious -changeably -accenting -argued -conditioning -brewing -backwardness -cascaded -atomize -contours -arianist -apart -conflict -carefully -banshee's -conveys -arbitrates -amphitheater's -amen -alimony -bound -buzz -courtroom -apparently -coalescing -circulating -amounter -bypasses -breadth -choral -completion -arisen -anticipating -bilges -contractions -bedspring -commune -blacklisted -beagle -alkaline -atolls -carelessly -blimp -corking -brevity -alterable -canada -bear -bluntly -cartridges -connoted -countries -corroborate -consecration -corrupted -appreciating -combatant's -alkalis -affecting -blues -casserole -ballad -bewitches -common -as -because -bathroom's -anchorages -beguile -connect -convenience's -counteracting -assorted -care -contains -centimeter -ancestors -briefings -busses -churchyards -breakable -amortizing -courthouse's -click -courses -ajar -county -covet -confidences -capitalizer -agog -backtracking -copious -bestsellers -chilliness -bringer -browse -centipede -bawled -bricklayer -breath -assailants -abysses -command's -characterizer -calculating -america's -aurally -contain -alias -commentators -confounded -appending -accidents -chatters -coordinates -bleeder -blueness -badger -bolsters -astounding -capitalist's -conservation's -commences -aimed -bun -comparators -competition -bauble -backbend's -bled -assassinate -chop -anemometer's -cobbler -coldness -audiometry -affinity's -amalgamates -cowardly -consolidating -beads -brackish -bookings -accuses -bog -compartmentalizing -clutching -calming -collars -clambers -banqueting -beaked -authoring -correspondence -apostrophes -affirmation's -bespeak -costing -brought -complainer -battalions -asymmetry -boathouse -canyon's -awarded -amplitude -anarchical -anticipatory -bolder -cooperatives -caterer -adviser -balkanizing -augur -cannibal's -balustrades -attaching -collector's -commercials -capaciously -coincidence's -bumps -ascot -bale -blackmail -baby -aftereffect -bloomers -buttresses -avenues -climaxes -aqueduct -cater -brainchild -avail -bypassed -bowl -california -cements -boxes -brained -bedevils -captors -acuity -ascends -breakthrough's -assigner -caner -bequests -ceilings -axers -bookshelf -autistic -celebrations -axons -chiding -asterisk -allophonic -blindingly -cherubim -boaster -confining -anxious -clowning -advisement -approach -anesthetic's -crescent -alertedly -birdbath -beardless -bras -auspices -choosers -approval's -afflicts -corrosion -arpeggio's -bodyweight -cranky -battlefront -affirmation -churchyard's -aeroacoustic -anders -adjustment -baneful -citation's -acetone -blend -binuclear -boner -annotation -announce -claimable -contemporary -clothing -acquitting -choosing -attacher -bananas -binaural -arrestor's -aches -conclude -collaborators -await -blaspheme -bequeaths -crows -balconies -begging -conducting -abstracts -assignee's -causations -approximation -articulated -considerably -apricot's -afferent -assertively -bonding -calms -cranberry's -cost -captaining -agenda -corridors -complaint -christens -aggravate -countess -arbitrators -ascribing -breech's -bellwether's -burglarized -confinement's -animating -adjectives -cannister's -bemoan -cleanest -acme -cheapest -activities -allophone -boy -belaboring -captions -compactor's -actuator's -befouling -arachnid's -computerizes -compile -absorption -bridled -absorber -convicts -birch -alkaloid's -cannot -bacilli -charitableness -abated -ceaseless -beavers -bookshelves -commensurate -appreciates -basil -cartoons -aides -buxom -cages -cantor's -acceptances -antiquated -amalgamate -babyhood -beers -conforms -bouquets -canner's -baste -cashed -argue -butcher -backbones -absolve -crib's -cafes -abstracted -book -committees -authentically -conference -antisera -bourgeoisie -attribute -biddy -autobiographies -chivalrousness -coverlet -ambiguously -calorie -anhydrous -alignments -around -archfool -advance -bedpost's -affective -contained -amain -bromides -clogs -bricker -arduous -consistent -amidst -confess -complain -anniversaries -coasting -cobwebs -aries -benchmark -aviaries -bombard -boxers -ashtray's -assyriology -blaze -ablative -chaos -burro -arguer -ashamedly -crier -allocator's -aggressively -carts -advisory -airship -alkali's -backup -chaining -continue -cartoon -circumference -breadwinners -autonomy -banking -armored -cabin -chunks -antigens -blistered -airers -breakaway -belief's -belays -coveting -auburn -careful -anybody -bumbled -cautious -adopter -ballplayers -anteater -citadel's -avails -agent's -caliphs -bridgehead -already -caterpillar's -coachman -centralizing -alphabet -concede -barbell -breadboard -ballast's -activators -attendance -blandly -calculator -codeword -addressee's -avenue's -alcoves -alternately -admonishes -concentrate -crossbars -adjoining -basset -carbons -beast -blonde -castle -clarification -bitch's -abrasion's -books -amputate -bicycler -aphonic -arraigns -acquiesce -buster -chaperon -advisements -buyer's -attack -birthdays -blazed -confuser -crag -ballet -airports -bison -counterexamples -arteriole -colony's -adamantly -blunders -chivalrously -adult's -authors -amplifiers -counterfeited -complicity -astrophysical -axolotl -bash -battleground -butterfly's -axioms -allegory -blitzes -blindfold -bufferrers -approximating -byways -computations -alight -avoiding -assurance's -barrages -canonicalized -callously -auditing -authenticating -bag's -asters -artistic -bonanzas -applaud -certainties -auto's -concession's -cascade -chubbiness -churchyard -afternoons -antigen's -baron's -amphibian -banister -capitalize -approval -appropriated -bureaucrat's -covets -cloisters -circulate -bivalve's -beta -collector -among -cane -birdlike -attenuating -conjunctions -appliance's -coral -crucify -abnormal -combined -classroom -buckskin -commissions -abolishments -arching -croak -americium -associates -car's -assuringly -agreer -anticoagulation -closure's -corkers -attend -alphabet's -awakening -composedly -attracted -construed -cricket's -applicability -autonavigator's -chloroplast's -ashen -beggars -corporation -another -conflicts -bootlegs -archeologist -alcove's -agitates -cargoes -creditor -cops -advisably -coronation -bourgeois -crochets -cropper's -cramp's -adulterer's -corroborations -changing -combinatorics -calm -comprehensible -blooms -coolness -copying -blacksmiths -commodore -compulsions -clump -afterward -crucified -brooder -buckets -accelerating -accented -boat -adventitious -baseline's -courier -calamity's -atoll's -brutalizes -bundled -chairperson -cheeses -continuation -celebrating -apologists -behest -bumpers -consonants -circulation -betraying -commuting -breezily -circumstance -coughing -benefiting -conquerors -chemically -commencement -adjustors -angel -congratulate -conspired -causally -bud's -conquers -augmented -bereaving -advisor -articulation -angler -admission -bide -competitors -amusement's -collecting -adder -arithmetized -cheek's -apostrophe -blockages -clockwork -bubbly -apricot -adjudicated -banter -amused -breacher -bracketed -aimer -comprehending -bunkers -canton -arcane -absent -capitol -consequence -cognitive -abjuring -clever -coronet -anathema -artichoke -controls -credulous -acid -crawled -coupled -boomtowns -aspen -acted -anyhow -burdensome -backdrop's -apocalyptic -cornerstone's -cautiously -blisters -conveniences -arbor's -accessories -alleges -clubs -accompaniment -blazes -annually -clique's -beamers -ballgown -autumnal -acreage -conjunct -balances -consoling -canvas's -competent -aggrieves -although -afraid -clearly -cognizance -acoustic -colleague -causing -absences -closers -airs -cinder -adversaries -altruistic -brews -ceremonially -appraisal's -commissioners -army's -assists -acceptor -comparison -cooling -conveniently -couching -changes -clinic's -confronting -adjunct's -blandness -alternates -bunter -consequent -clean -autos -accumulators -carver -aprons -awful -bobbins -blasphemy -assuming -abscess -assemble -cabinet -atomics -blacklists -audacious -assay -anthropology -barnstorm -awl -bumping -assembles -capture -compensates -coverable -amend -array -continually -absented -cigarette -antiresonance -backspace -branched -appellate -courtroom's -alienated -austerity -cement -asked -antelopes -cottager -bluebonnets -booze -amendment's -backslashes -begun -bijections -cafe's -boatload -collect -appeals -belittles -befit's -beauty -arrogated -academia -contagion -blemishes -coverlet's -comfortability -antecedent -controllably -congressman -complicate -coincide -arrears -clumped -credited -buffoon's -catholic -accompanist -beauty's -aster's -blatantly -bothering -bewilder -canceling -carbonizer -accentuation -backstairs -anticipations -bestowed -civilian -blooming -blunts -airlocks -argo -blueprint -aristocrat -cakes -complements -ale -camping -army -adrift -bengali -barely -blasphemes -briefcase -brooches -ailments -blazers -crevice's -bankrupt -archiver -articulator -alphabets -bonds -colliding -candidate -cashier's -bellwethers -airstrip -announcers -calendars -corrupter -aqueduct's -axiom -bathing -blusters -ascribed -admittedly -angrily -analytical -contraption -convertibility -abysmal -cathedral's -aversion's -algol -articulately -breveted -bickers -chatterer -adoptive -bijectively -cloudiest -coarseness -carted -cocktail's -capacious -anion -buffoons -bleeding -bedrock -adventurer -compositions -camouflages -brittle -chip's -aloe -chorus -cargo -critical -biographer's -abject -blasphemousness -charmer -betray -blacking -awoke -allele -bags -claimant -clover -biographies -confound -advertises -crafter -cripples -bygone -concentric -couldn't -contentions -acrid -costume -aft -aesthetic -bandits -adducts -constellations -coffer's -created -commercial -art's -cookie's -ammonia -adjunct -articulateness -congratulated -crags -brandishes -annual -byword -affection's -college's -aboriginal -bikini -buttering -allotter -console -advent -activates -beverage -april -acceptable -barrel's -boys -attractor -azimuth -critics -ballooner -aren't -adulterating -criticise -abeyance -automatically -collaborative -capabilities -crawls -anomaly's -climaxed -animately -aroma -belie -attires -argumentation -baseboard -bluebirds -cactus -byproduct -balancer -beholder -conservationist's -betrayer -agony -accusingly -convict -coaxes -breeds -agitated -championship -brevets -auscultate -counselling -cornerstones -america -canoes -aspirator -compensate -antiseptic -bereave -absinthe -compose -collide -alabamian -candid -civilized -clamps -authoritarianism -colonist -bugging -bins -abashing -battlers -canning -berate -assembler -amateurish -boasted -angriest -bluffs -colonize -balcony -bleat -bustard's -attenuate -contagiously -bicep -babel -beatniks -brush -analogy's -audiologist -assessment's -camera -arbitrary -alleyway's -concession -constructions -accompanies -accretion's -aroused -charcoaled -belated -bottom -bloodshot -bisques -advocate -arabs -cathodes -adamant -challenge -absurdly -abolitionist -cleavers -bludgeons -bassinet -clause -coiling -cask -boob -azalea's -afghanistan -carriages -blade's -bobby -asinine -acclaiming -absorbed -blacken -cheating -bootleg -anonymous -addict -astonishes -awry -adequate -categorization -casks -blaster -aspirants -abscesses -airing -assumptions -capitalists -board -asynchronism -body -aye -contraction -athens -arsine -cohabitations -below -bows -aviator's -ampoule -connective -adapter -authenticate -blackboard -brilliant -appoints -attics -conquer -boning -comestible -camped -blonds -aisle -coals -billboards -characterizers -crow -clout -admirer -actuarially -abstruse -accessing -bonfires -clenched -characteristic -catching -chars -canons -barrier -championed -butterflies -completely -calendar -artwork -abjections -burgher's -correlates -arrivals -accepters -circuses -breadboards -accomplishment -analyzed -appropriates -cancel -bordering -aperture -civilizing -assortments -blackest -blitz's -copy -commenced -admirers -cheers -croppers -cliff's -circumstance's -bibles -buttressed -consecutively -birefringence -automaton -cheerless -chopping -ballooned -convent -acknowledgers -appointing -belies -comeliness -bangle's -communication -bisector -avocations -clique -brainstem -campusses -allocators -bramble's -assaults -commemorate -appendix -agent -apportioning -bottled -artifact's -block's -archery -bagatelles -candies -catched -cognitively -creepers -concentrated -bout -balustrade -abodes -carrying -confirming -cannibal -chinners -carbonate -anguish -butt -colons -ablated -corporation's -cock -convincers -beret's -bluish -compressive -authenticates -commemorative -bureaucracies -coinage -coach -assigning -concentrators -capitalizing -appraisals -belaying -candy -blossomed -bricks -atonal -analogue -caters -barbaric -applique -clink -audio -actress -assyrian -apprehension -conversation -apsis -bedevil -comics -affricate -comings -buttress -angering -buckboards -bombed -adversely -adequacies -commended -causeways -adherers -codes -aquaria -ape -bulks -compactly -brainwashes -bleats -commandants -conditionally -adjourns -clobbering -allowances -buildings -complemented -blanker -algeria -brief -creak -adductor -categorizer -approacher -argument's -clocked -bedazzle -cause -coordinator -buildup -countenance -abhorrer -backtracked -bogus -closer -broilers -chirps -adjournment -belles -bitingly -befogged -contexts -amorous -breeding -abortions -blockage's -alternatives -bouncing -beryl -ballistics -banters -carpenters -auction -bowdlerizing -brazen -bonuses -circulated -adultery -archival -bears -baptized -burglaries -borrowing -barbarous -casher -adolescents -atrophic -busily -aerating -coatings -athenians -casing -consuming -alphanumeric -beaches -bisection's -conjecturing -aspirate -biography's -accompany -bureaucrat -broomstick's -colony -coalesce -clock -bequeath -collaborates -belonging -configured -burlesques -anode -consenter -bug -counterpoint -counts -bangladesh -analogical -accident -bulky -affinities -abysmally -boorish -assiduously -cannisters -autocollimator -bassinet's -barrelling -blurts -carbonize -candle -act -addressees -constraints -boast -complaining -coziness -avocado -coolest -blank -beadles -anytime -covetous -appellant's -angers -academies -ageless -chased -constitution -consonant's -boosting -ascetics -aerosol -apse -blushes -clang -confers -confidentiality -coolie -colon's -chickadees -badminton -argonaut -constituting -aloha -contracts -broomstick -brackets -attendant's -connection's -conciseness -abstractor's -composes -chaste -assures -conjuring -barbital -bunion -bases -clowns -barrelled -audience -auctioneer -complexly -aviator -conjectures -backscatters -cheerfulness -communicating -agreement -bricklayers -bilabial -abstruseness -cobol -cooperating -admit -blundering -accelerates -assaulted -concealing -anachronism -bowels -butane -anniversary's -converts -convoyed -climates -barriers -clubbing -additives -bask -confessing -caravan -colonizes -continuous -cheerlessness -boggled -armpit's -bridgework -allegro -cricket -cannon -adoption -clanging -auscultations -billowed -alphabetize -airlift -appointee -boyfriend -chaotic -corrections -bonus -contrasted -convulsion's -confessors -adumbrating -autocrat's -coronary -authentic -barley -brawling -aegis -appends -bolshevism -charted -applicant -aileron -considers -chin's -alkyl -amendment -boulevard's -avian -breather -canyons -cannon's -apportion -badgered -augers -advisers -censuses -beveling -aught -arthogram -anonymity -appliance -atmospheric -anesthetizing -ambulances -blustering -burnt -chestnut's -collects -aliment -anxieties -championship's -channeled -arrival -amassing -corpse -bedtime -blackbirds -cats -constants -chemistry -brewery -brother's -boasts -accentual -bellwether -bely -courted -baroness -configure -collection -aviary -achieves -belfry's -beech -baseman -bacterial -contestable -blond -contracting -comparably -consultation's -booster -conspiracies -belief -candidate's -boardinghouses -connectivity -check -crazy -collided -assistant's -critic -bilateral -cheapening -appalled -autopsy -balled -abnormally -acquires -aloofness -backwaters -combative -computerizing -craters -contributorily -behaved -comers -axiomatizations -analogously -banjo's -cleanser -capitalizes -chamberlain -aggregates -amenorrhea -begins -condone -cleaved -bustard -adsorb -airedale -bridles -audited -could -amour -checkbooks -admiring -arrested -commerce -asbestos -can's -clamping -bathers -acknowledgments -census -acrobat -bargains -apogee -creaking -busboy's -additional -chants -circumvents -afloat -anyplace -alumnae -anions -classroom's -ballerina's -convents -angered -climbers -citation -cools -clamor -capaciousness -beatific -abrades -advocating -coverings -claims -brethren -advertised -atrophies -coffer -beagle's -brazenly -bitterly -clergyman -braiding -compressible -convicting -agreeableness -antithesis -cogently -botanist's -bidirectional -bewilders -airlock -costumer -blamelessness -agglutinins -catalyst's -allocation -annunciates -borderings -accomplishes -confronters -clinically -breadbox's -canvassed -communicative -coercing -backpointer's -bramble -congregations -crave -courtesy's -cocoon's -admitting -chieftains -acclimate -consequences -cones -contradict -axolotls -contractual -artist -atrociously -consecutive -berated -bluing -attacks -choruses -blatant -balance -amplifier -assist -analyst's -ambler -conveyance -compromising -baffler -corridor -bed's -condoned -boulevard -anomie -averages -basics -apologia -cabbages -concretes -alcoholic -aliased -chocks -balsam -collies -censor -arouses -conundrum's -academically -bent -codings -coastal -allots -acclaim -citations -cantor -circularly -boarder -caribou -biologist's -cowling -connects -chasing -bootstrap -backscatter -abstractly -corrupt -alleviating -biasing -abrade -arraignment -beaten -blanketing -compactness -adage -coincided -borate -bra's -concepts -bootleger -christian -argos -basal -abate -campuses -abridging -confusers -cabin's -audition's -amphibians -attractively -adhesive's -ascendency -beforehand -ache -brokers -bowler -criminally -american's -chock's -artillerist -appropriation -characterization's -artifices -annoys -constituents -bottle -beaned -consisting -beholding -ceremony -carpeted -absolutely -anorexia -accredited -azaleas -amaze -commit -afflicting -contriving -adventure -blood -blabbing -absoluteness -appreciable -approachers -bumptious -behavioristic -anticipates -adults -barnyard's -banging -banana -bilge's -aware -coheres -bronchi -commissioned -arrogation -confines -core -attenuation -afterwards -clearing -applies -alphabetized -cemetery's -campaigning -abolishes -brig -cheer -combers -backtracker -clinker -clouds -clog -berries -advising -childish -clobbered -bride's -astrophysics -canker -concatenate -bite -chagrin -bodybuilders -calamity -admiralty -councillors -competitive -assessments -copper's -cabling -casket -conducted -backplane -boyfriends -bingo -broader -confiscates -communicated -baton -cocktails -albanians -boardinghouse's -brats -akimbo -categorizers -comparator's -blackbird's -accidentally -companion's -clippings -accosted -bell's -burly -aggregations -boathouses -airmails -abreactions -changers -carbon -cleaners -bookkeeping -correlations -backer -conclusions -brainstem's -anecdotes -chateau -cogitating -amphibious -compounded -completeness -comptroller's -boatswain's -bolstered -acquiescing -actors -calorie's -adaptability -abstractor -bimolecular -belly's -automobile -automotive -analyticities -awesome -colonizer -approximated -chemist -coronet's -classmate -anteater's -altars -adulthood -amid -assails -blizzards -corroborative -biographer -compartment -blooded -bipartisan -bluff -aloof -bronchiole -clincher -congratulations -ablation -caught -collier -chooses -antidotes -artery -clearance -civility -basketball -auscultated -behaviorally -crowning -autobiographical -cheaply -brutally -agonizing -clerk -comprising -baller -confuses -acquiesced -astonishingly -birthplace -covered -chopper -combinator -benignly -bedside -blasts -billboard -appraise -aboveground -comforter -credulousness -battlefield -barefoot -cleverness -apparatus -bartering -bromine -aerodynamic -crabs -chains -airflow -allegrettos -armchairs -blacklist -approvals -bait -collections -antecedent's -airbags -casted -content -conferrer's -crouching -coughs -canal -amphetamine -augustly -bedraggle -arithmetic -cataloger -alluding -credulity -coffees -crueler -beautifully -caresses -correlative -consul -criticizing -couched -baths -alchemy -bargain -accomplishments -conveyer -benevolence -broil -chilling -axed -attire -collisions -categorizes -cited -aeration -accommodating -coordinations -boxcar -cattle -bullion -afternoon's -captures -afghans -comets -component's -ark -bounds -adjusting -bravely -capability -chap -absolving -aspirating -arcs -conspires -collaborated -admonishment -astounds -brasses -compromise -changed -consumers -connoting -buttonholes -cordial -anionic -chastisers -archive -alleviate -burglarize -acquainted -copiers -cashers -antisocial -creations -bookie's -censure -beadle's -banded -circled -bulged -cheapness -attorney's -chewer -bookshelf's -councillor -assertion -broom's -contemplations -club's -balkans -cherubs -alas -chair -apologizes -compartments -beyond -aptly -censured -allegros -boosts -card -arithmetizes -attainment's -arrester -anding -asker -compatibilities -confidentially -commissioning -cleaner -aversion -cooperative -battalion's -cemented -charity's -conceited -capable -anymore -computing -aping -chiefly -affair -beaners -allying -caption's -antipathy -causal -abyss -botchers -burglarizing -confidant's -activator -continent's -census's -brat's -antagonism -bedspring's -antiserum -charge -connector's -alike -believable -belfry -cast's -bureaus -beneficiary -abolisher -artichoke's -broadly -concurrent -alteration -bookies -crafts -bays -ass -bouquet's -ave -chords -crazes -anemic -appoint -beets -billing -contest -assassination -allot -brindled -acute -absolves -adsorbed -auxiliaries -belatedly -businesslike -assassinates -bookkeepers -bevel -adders -automate -archangels -breakfasted -changeability -contested -cradles -combatants -besieging -certainty -attempts -bankrupting -compiler's -complications -banquets -ancestor's -ail -abbreviating -compacter -approvers -acknowledges -comically -almonds -counsellors -calmness -assailed -crane's -baser -big -corruption -circuitry -briefness -community's -banquetings -alms -bass's -bellowing -adoption's -blockading -compellingly -builders -befallen -bombproof -cartons -chore -crimson -anther -clucks -assemblies -beatitudes -aspiration -compels -angst -balancing -bowstrings -bayonet's -butte -biomedical -casualness -accolade -blackberry's -bunched -affright -clung -burlesque -bare -corrected -arbitrate -cropping -coherently -bloodhound -circularity -courtesies -articulating -concluded -analogy -brutalized -airmail -cooperator -cousins -centralization -bibbing -beside -bravo -abductors -cars -bovines -bump -absconding -chins -chasers -boundary's -antecedents -awed -counselled -aback -attenuator's -blazer -bettered -awaken -abreast -beagles -artisans -buckled -credence -control's -bewhiskered -calloused -breathe -collaring -blossoms -bring -actualities -bivalves -animals -cowboys -constituency -affordable -acrobatic -attiring -boatswain -concurrence -abrasions -babel's -cowerers -chiffon -bostonian -criterion -blinds -cased -affections -conditioners -clutter -accrued -attractors -botcher -compunction -bludgeoned -censored -allah's -chronic -burrs -commodity's -appraiser -asserters -cheaters -besting -anchorite -combine -afforded -cigarette's -bathrooms -apostles -chloroplast -bootlegging -bibliographical -beans -bylaw -benefited -brochure's -cordially -brashly -beastly -bologna -alderman's -burning -billow -convert -buffaloes -comparatives -assistances -camouflaged -announcement -bobwhite -brawl -adducted -cavern's -affectation's -bandying -brunette -architect's -aphorisms -cremate -bray -billed -conception -battlefield's -bandaged -broaches -bazaar's -beatification -bigotry -clergy -abstains -befits -bantering -conceivable -attachers -analogies -bimonthly -august -additionally -confirmation's -ballooning -cardboard -belle's -counterparts -candor -bishop -comprehension -affronted -bravura -courting -antidote -buggies -arisings -appendix's -bright -categorize -cooking -agnostic's -billets -amok -bewitching -audiograms -column's -bussed -checkbook -alteration's -atherosclerosis -broached -based -cacti -boardinghouse -bowdlerized -anchoritism -achievement's -bald -cover -codifications -capacitor -brashness -causes -acyclically -argument -boarders -audiometer -compute -contribute -crisply -bitters -circumvent -assailant -bosun -buyers -alibis -blurting -coasts -bivouacs -arrogating -albanian -attempted -acquisitiveness -applauding -alfalfa -cantors -canonicalizes -alkaloid -bruising -associativity -budgetary -carbolic -clashing -buffalo -acorn -analyzing -backyards -comedian -betwixt -aces -chartered -additivity -becalm -combat -characterizations -clinics -bulbs -bloc -amenable -civilian's -breech -attainment -bounding -compiler -cotyledons -billboard's -caper -aphasia -chester -combats -biddable -articulates -caps -assignees -bifocals -beady -chinese -assertions -allegation -championships -accrue -containment's -croaking -classifying -annum -brightened -bits -appointer -besieger -citizen's -cerebral -canto -bakers -capitol's -authorizer -blockaded -anodizes -alarmed -buttressing -attenuates -bumptiously -chronological -colleges -coward -contraption's -abstractions -controversial -boric -bids -agents -backpointer -bumped -bottoms -bowlines -captivated -article -cliche's -chases -choker -bremsstrahlung -consult -adjudged -auctioneer's -covers -accurateness -clues -bugler -bareness -cedar -alleviation -anesthetically -backpointers -arched -administered -arrowhead -continues -asks -confessor's -allure -backlogs -childishness -appointive -covering -conscience's -bellows -blanked -considerations -appalachian -aerate -budged -city's -accordion -cliche -collectors -comprehensive -boomed -chariot -baffling -bunkmate's -bumbles -contaminating -corroborating -applications -bursting -cabbage -befalling -acquittal -compromisers -components -arpeggio -brothel's -credibility -begrudge -confirmation -academy -appertains -calibrates -bureaucrats -bawl -costuming -biography -adoration -cloaks -aggregating -business -aphorism's -carters -admixture -coexistence -anomalously -adapts -amide -affiliation -capillary -biscuit -brainy -bellhops -chartings -cohered -austria -champions -basin's -cascading -consultants -bison's -admixed -arithmetically -clothed -betterments -conspirator's -addition -adolescence -bolsheviks -abominable -breathless -cozy -arouse -bumble -about -apace -astronaut -asteroid -cable -crab's -beachhead -assets -analyses -bisection -coconuts -alleys -armament's -bloodstains -arpeggios -apologist -blithely -anabaptist's -beadle -channelled -confuse -annoy -beautifiers -cheats -clenches -amuse -bewail -constitutional -birth -appendixes -amazed -berry's -bilingual -blustery -amplification -clogged -blackmailing -breakables -adduct -bondsmen -conferred -codewords -bequeathal -abundantly -banner's -atrocity -congested -closely -absolution -concatenations -anarchic -crag's -communicators -cavities -comptrollers -backstage -bewailing -charcoal -conveyances -collar -bores -briefest -comments -awning's -associator's -antarctica -correspondingly -bidden -ad -clings -bit's -apollo -bulldogs -chateau's -amounting -cogitates -bellhop -bookish -bout's -cannister -bicep's -asses -beef -battlefields -consort -auspicious -breezy -buried -beverages -approximates -conduction -bleakly -blanketers -ascertained -absentminded -bolivia -births -behave -bilk -breaths -charter -abstaining -appareled -boulder's -breadwinner's -correct -accessed -befitted -adulterer -axe -activation -betrothed -asymptote -bullet's -clusterings -baud -bustling -ballplayer -constraining -cleared -brown -affirmed -agencies -churches -backyard -burntness -bronchioles -charmers -backscattered -abridgment -claw -blow -adjourning -constantly -brightens -autobiography -cards -bypassing -alcibiades -concurrency -chuckles -bests -belligerents -adjustments -bolshevik -cabins -astronomically -cartridge -boxcars -boned -bottomed -burgeoned -adjourned -apprenticeship -chastiser -breached -boycott -butchered -coordinating -cottage -brainwashing -confinement -bandies -absentee -collapses -cruel -along -alloy -convoying -assignment's -crisp -ambidextrously -blindfolded -chilly -condenses -avers -broiler -anesthetics -beaker -cholera -brag -coffins -cranked -allocator -brutality -acquire -blushing -briar -abolish -crossovers -broiling -consolers -beatify -almanac's -cooled -commencements -clasp -committing -condemnations -altar -by -bombastic -confederates -bong -concerted -compilers -counterproductive -brig's -accurate -avidity -cleavage -blame -conceive -assessor -consolingly -concise -computes -alliance -clucked -axon's -annunciating -baseball's -allusion -brays -auras -blond's -bronchitis -ciphers -blowing -broth -canonically -baseness -byline's -appetite's -colonists -condensed -cawing -beaning -broadening -colonist's -apocrypha -chauffeured -cored -branding -carrier -assessed -collegiate -chirped -accounted -clubbed -antibodies -behalf -alphabetizing -conqueror -alpine -budgeters -casements -appropriate -compliments -cast -accountancy -cathedral -conserve -accorders -arbitrarily -cowing -bars -bagel's -climax -attention's -cautioning -centipede's -almost -abstractionist -carpenter -containing -arab's -courtesy -carton -accelerated -bowman -boastings -banal -bucking -accomplishment's -classification -baldly -abruptness -calibrations -blocs -biking -assenter -adversities -compartmentalized -chemical -attic -audiogram's -applauds -crests -bad -bounce -accelerators -contemptuous -attentions -cancellation -battles -aging -advantages -anthologies -answers -bruised -castes -any -coped -arcade's -adaptively -arsenal's -confessed -controllability -acceptor's -abrogated -abutted -amusingly -apology -broils -court -boundaries -bode -collie -adiabatic -ambitions -charged -awfulness -consorts -botanists -blurring -absents -batten -backwoods -breaks -certified -chattering -admitted -bathrobe's -analogous -corporacy -bijection's -combatant -checked -condition -amoral -bayed -bedroom -chanting -antics -charity -blip's -biped -brilliance -catchers -booted -anabaptist -clothe -comforted -complaints -coacher -admissible -bang -concisely -cookery -capita -assurance -codifying -benchmarks -aunts -commentaries -anon -applicators -constructor -associated -abuses -choicest -confiding -antislavery -apron -ashore -cheerfully -betterment -administration's -campaign -cremated -ambulatory -bleacher -afterthought -barkers -choir -crossly -conducive -cache's -battery -actinium -countryman -cajoled -appeasing -beamer -cleaves -anthem's -clearing's -cooperated -barker -crowing -apprising -accusation's -beginning -associator -booking -caved -amicable -codify -clairvoyant -bevels -becalms -brawn -bunkhouse's -arms -antiredeposition -belt -antiphonal -cried -brae's -bridal -acronym -clay's -checkers -auxiliary -bind -compares -agilely -askers -blankly -antagonist's -bimodal -captivation -creditable -concentration -calling -bartender's -autopsied -correspondent's -carnivals -abjure -bystander's -bungle -chanticleers -conceding -burghers -boards -accessions -compensations -arabian -churn -crowed -centering -abnormalities -courtier's -congregation -aberrant -annexing -blockhouse -anthropomorphic -bedder's -abutting -conundrums -affiliated -cancellation's -bolts -ballgowns -augmenting -bureaucracy's -bootlegged -audiometers -blueberry -affliction -appreciation -codifier -amasses -countering -crackle -canoe -consuls -breathes -broiled -amalgam's -bodes -ballooners -coating -corollaries -amphibology -agenda's -chafing -alcoholics -accredit -anisotropy -anchovies -carriers -acceptors -betrayed -buttocks -busy -bunny -cropper -accreditations -bumblebee's -adhesives -civilize -accedes -abroad -arch -crept -cotyledon -alphabetic -braille -amateur -adjure -ascertaining -budge -adulterate -additive's -cardiac -born -brewed -borneo -bun's -blue -cackled -acclimates -airline -blinder -brokerage -communicant -central -aggrieved -asynchronous -bough's -acidly -archaeology -complementary -animator's -bodyguards -climbs -apathy -constellation's -acculturate -archaeologists -contingents -control -anglophilia -billings -corporate -athlete -accusing -appear -announcing -accordions -computerize -combinations -bile -abut -charger -columnize -computer -blacks -converges -blamer -bulked -convincingly -checker -correspondence's -accelerate -accessible -conceivably -abscissa's -adsorbs -anglophobia -anomic -casters -churning -crease -brood -appendage -bulwark -bombers -arcaded -breadboard's -aphrodite -color -commodore's -answerer -bobolink -cloth -conversion -clime -artery's -birthplaces -compiled -arrack -beetles -bobs -compatibility -cocoon -counterpart -audible -colonies -airport's -beige -cogent -bromide -begrudging -acids -crucifies -beggary -archipelagoes -availably -counterfeiter -blanketed -amending -accelerometer's -advisors -byway -alignment -amber -austin -copyrights -beaus -brigantine -comforts -appointment's -crawler -bangles -contemplation -concur -characterizing -censoring -charters -catalogues -appropriately -builds -aeronautic -confused -comber -axially -cackler -coercive -ambassador -arcades -brash -amorality -belittling -battling -bloodied -acrylic -bantered -clasped -carcass -archangel -annunciators -aristotle -boulder -burglarproofs -chooser -abilities -calmest -bach -always -blaspheming -crossover -bakeries -clocks -ankle's -accidental -arbitration -chirp -aeronautical -boy's -acidic -bowline -anonymously -cod -couplers -beautifications -bluffing -backarrows -brow -covenant -acronym's -banning -albeit -ascetic -burn -animator -beatnik's -coveted -cipher's -broke -cap -bellman -bulldozed -clarifies -bathes -blip -availabilities -booth -clangs -audiences -cathedrals -confounding -bigot's -beecher -arts -company -attributed -avenged -bawling -caustics -alee -bordello's -banks -affords -complied -commas -collaborate -aquatic -ambitiously -burro's -beard -bittersweet -candlestick -bylaws -broadcastings -believe -barrels -braying -certifications -contrasts -crashes -audition -confine -bucks -abates -bureaucracy -ambles -besiege -broccoli -antibiotics -attenuators -accelerometer -caste -bib's -browbeaten -appurtenance -bauxite -asceticism -case -chewing -aerator -achievements -barricade's -agglutinates -bewildering -cartridge's -children -bufferrer -actuator -converging -bolted -chat -combs -chemist's -adduced -algebraic -circular -bloated -conclusion -burgess -certifies -absconds -comprise -benzedrine -bumbler -banjo -allow -appealing -cooperation -abraded -chaperoned -biracial -braced -censurer -acoustician -appraised -benefitting -constructs -convertible -administrative -asocial -area -creature -besetting -crater -begrudgingly -blanket -ablest -alba -airplanes -allowing -briefly -beneficences -concurring -adjective's -cork -aerospace -anomalies -asher -auger's -boilers -abhorring -broadenings -bladder -belay -approver -abdominal -commends -cringing -billiards -beater -auspice -contrasters -bights -absentees -atoll -cooler -activator's -basement -burgeon -allusiveness -codeword's -bandage -contemplate -adopted -coping -carving -baptism -colds -altos -background -closet -commuted -acre's -aliens -council -cans -cheese -ally -aseptic -belgian's -crossbar -addressed -commons -call -careers -breakfasting -brazilian -catholics -bachelors -consultant -brighter -crossword's -burglar -avoidable -batting -cigar -amps -axiological -combed -comforters -albumin -cookies -booming -archaize -canton's -bunkmate -combination -bondsman -anxiously -affixed -associatively -cigar's -backstitch -calls -captivates -commodities -atmosphere's -asserting -beaver -beatnik -container -activists -consoler -commoner -buttonhole's -abhorred -aggregate -cliff -antidisestablishmentarianism -broach -ambling -comer -bited -advocated -behaves -bosom -continents -conserves -bashful -ago -backarrow -circumventable -avocados -briar's -annuls -barnstorming -aired -carry -crossbar's -aspire -beards -abides -cliques -completes -brassiere -absorbs -annul -chairman -baron -battens -africans -abatement -colonization -carries -borough -allurement -breakfasters -alkali -acoustically -corners -capturer -casualties -asphyxia -animized -administrator -belying -basketballs -bylines -bandit -autopsies -braining -contradiction's -antic -butted -bacillus -blurt -conditioned -backers -agreeable -almanacs -cider -chicken -chambers -clutch -assailant's -conveyers -amazers -beribboned -breeder -caveat's -buffers -combination's -ampersand's -crafting -clanged -caving -aspirant -butlers -adjective -auckland -announced -creators -caches -baseline -codifies -baptism's -coarsened -cohesion -airman -avenge -backaches -budgeted -armpit -bicycled -converged -besmirched -autonomic -coming -assemblage's -chained -admissions -alcoholic's -branches -bunk -anciently -bloods -adventurers -amazes -coloring -abstractors -adaptation's -boar -amulet -agglutination -conquerable -booker -confronts -barometer's -bedbugs -barricades -cheap -bewitch -circus -backward -archeology -automobiles -bending -amino -beckoning -admits -berliners -borer -clambering -atomizing -banner -blissfully -catchable -breakdown -abjured -computerized -chaplain's -amphitheater -ballot's -craziness -croaks -counties -adopting -breast -airstrip's -basin -contemplating -commitments -critique -appears -bellies -baccalaureate -abducted -blackened -animosity -appraising -antiquity -assistants -asthma -bootstrapping -bounties -agleam -advertisements -benches -artful -broadens -chuck's -betrayal -blasphemed -brooms -castled -coroutine -conscious -beetle -banshee -advertising -baring -awakens -balm -billions -compromisingly -ballroom's -burrower -bayou's -ambiance -beheading -bought -adagios -adornment's -anointed -abolishment's -anesthetizes -badly -boyishness -consultant's -cheek -cannibals -breakdowns -assured -agates -bicker -appliances -cafe -bagpipes -adrenal -combinatorially -belligerence -bricked -adjacency -aimless -crook -cherry's -assessing -brushfire -cormorant -captained -blundered -conceptually -congress's -contraster -ambushes -bronze -autotransformer -corded -brisker -contently -announcements -bullet -apportionments -columnized -canon -conservation -algaecide -blackening -compassion -beaks -constructibility -chapter -abscond -costly -bacon -coldest -aptness -billionth -altercation -approbation -alternator's -criticizes -befell -canopy -buoyant -brazil -anticipate -absenteeism -champion -aesthetics -cadence -betroth -confidants -bean -braid -aphids -cluttering -cantankerously -bloom -barbarity -clawing -bogged -agreed -asia -abrasion -corporals -baselines -box -chartering -apotheosis -ampersands -conceit -creamer -adhered -circuit -carpet -accompaniments -boomerangs -blindness -chipmunks -bewitched -allocate -bicycle -compacted -cab -calcium -cellists -apex -borrows -completed -brightly -constables -ascertains -conspiracy's -badgers -bunion's -anabaptists -broadband -clefts -accepted -benched -catalogued -cadenced -alliteration -acquiesces -boxcar's -athlete's -bracing -cremations -analysis -crossings -assorts -apologize -brazier -configurable -basking -craves -belle -conversation's -belligerent -anesthetize -brewers -cackles -adventures -airlock's -booklet's -apply -anecdotal -bewails -computer's -autographs -acclimated -coefficients -avidly -beckoned -broadener -bulk -blacklisting -belly -acquit -convoy -achiever -aversions -advisor's -captor's -camel's -asset's -advantageous -basement's -confident -crescents -compiling -butler's -cartoon's -adaptive -chlorine -abets -cruelly -amiable -baleful -ceiling's -adumbrated -cherry -aspirant's -cashing -candidly -chaff -bitter -brim -alcove -bulb's -carbonizers -citizen -attic's -breed -consumer -conferrers -accommodations -contrapositive -beget -brilliantly -attentionality -continuation's -bosses -brave -configurations -benediction's -conferring -accessor's -bobolinks -bulled -cleanness -algorithm -advancements -altogether -accumulations -albacore -bowing -belching -apical -consequentiality -bagpipe's -ambrosial -bullying -cleans -attendance's -complimenter -blink -cager -assembling -coat -allowable -astringent -antiresonator -cardinal -clicks -commentator's -blossom -categorizing -amphibian's -commonality -consonant -classics -affable -accorded -aimlessly -archetype -administerings -boldness -anatomy -apprehensively -absence's -actuality -attempting -categorical -checkpoints -allemande -corer -behoove -bleaches -bough -blended -blotting -baptists -courtship -benevolent -bumptiousness -chum -anguished -auto -career -bookstore's -carbonized -autocratically -cherishes -attendees -contends -anastomotic -attributing -abbot -came -blunt -battlement's -affection -coordination -annotate -besets -bucked -boasting -benedictions -adherent -blimp's -acknowledging -cleverly -applejack -annexation -bat's -cantons -beetled -closed -country -creatively -bakery -blasphemously -chalking -bold -attended -crasher -backtrackers -artist's -bracelet's -allowably -affiliating -arrant -brayed -barbells -consigned -abolishers -climatic -atrophying -amigo -arsenal -ascribes -converses -aura's -allotted -bliss -classical -bigger -ahead -chopped -blade -casualty -acceded -bottling -axon -casement's -battlefront's -convinces -alerting -advertisers -blemish -agglutinating -commonplaces -autocorrelation -armistice -crediting -besmirch -amplify -auscultation -befalls -called -alnico -arbiter's -abort -argonauts -cessations -cribs -blare -aforementioned -condemners -contaminated -complained -bootstrapped -criticism -cooperatively -binding -bullies -basins -contrived -assort -adulterously -booms -abandons -also -appealed -count -contributed -beet -crashers -carryovers -clays -blackness -cosmetics -awkward -blurted -bothers -analyzer -backups -alarming -bicyclers -credit -abrogate -audience's -architecturally -alibi's -complicator's -chuckle -corporately -banishment -communist's -birdie -asymptotic -break -braze -benzene -bridgework's -beak -agitators -collateral -arranges -bayonet -breathlessly -counsellor -creates -convulsions -backdrops -applicants -altercation's -commission -breathtakingly -corresponds -backdrop -armaments -build -biannual -buttoning -computational -chaired -bather -critically -amanuensis -bantus -confidential -annoyance's -carder -authorizing -acquits -bipeds -cocktail -cinnamon -burros -brocade -abdomen's -creative -acquisition's -abdomen -baited -aristocratically -alive -committed -arrestor -cleaving -comedy's -baggage -bra -adaptors -afoot -bulls -contoured -amalgam -comprehensibility -amortizes -biographical -confront -covert -cravat -animates -booksellers -bypass -bootleggers -bedfast -affair's -buzzer -bellowed -aligning -bystander -acclimatized -accomplishing -against -blankness -adopt -addressing -croaked -boaters -behooves -audits -boatyard -cruise -agnostics -ailing -anchorage's -adaptations -conceptualize -advised -cries -bank -actuators -brazing -catalyst -beachheads -aplomb -compressed -amputated -contractor's -bedspreads -bowed -coon -chaplain -cannons -coffers -assembly -bouffant -converters -ampoule's -borderland -archaeologist -blankets -conserving -avalanche -assortment's -aspic -axle -bereaves -allowance -carbonization -bartender -clawed -coincidental -appeared -chipmunk's -countable -authenticators -bestow -alps -caw -aniseikonic -avows -blackmails -controlling -correlating -audiologist's -bit -approving -collapse -coon's -cleave -atheists -brigade -autopilots -bounteous -commercialness -accede -cavalierness -accustoming -burnishing -clobber -aspirates -brochures -cellar's -communes -berkelium -chickadee -cobweb -circumstances -chose -comprehend -baritone's -aggravation -adopts -cruelty -and -axer -cautioned -carbonic -babbles -bet -charitable -computable -cardinality -amenities -confiscating -catcher -audaciousness -complaint's -cooperator's -buddies -baking -constant -classmate's -accentuate -choices -crop's -authorization's -comedy -brushy -brotherly -canals -ads -causeway -abrading -cemetery -autocrat -briefing -abdomens -apparition's -consummately -alkaloids -bulkheads -cravats -bales -campaigners -bagpipe -accentuates -arm -barometric -bas -agitator -behavior -abutters -blockades -alertness -civilizes -chinner -anthropologist -artificialness -balkanize -automates -cackling -anarchists -amounted -cereal's -anodized -cobblers -acknowledgment's -blear -copper -alphabetics -blackboards -apish -answering -afternoon -arbors -accused -chickens -agency's -contractors -contraptions -cosmology -anomaly -bandstand -attempter -account -challengers -admiration -calculations -autocracy -analyticity -accord -buildup's -commonly -babbling -adjudication's -attain -ameliorating -candlestick's -chronicles -align -consensus -agate -adulation -aspirated -conclusive -biologists -cracks -conform -chambered -beryllium -connote -amusing -aquifer -ankle -batteries -conservationists -accountants -apiaries -actinometer -beckon -clearances -clouded -antitoxin's -consolation's -collectives -boxtops -bombarded -bombarding -bluest -allusion's -construction -ballpark's -codified -coincidence -celebration -chip -beginner's -algerian -boo -athletics -condenser -bytes -beauties -concerts -conductors -awl's -agitations -buttered -codifier's -armory -ascii -aspirin -arthritis -bylaw's -conformity -blasting -coinciding -aphid's -ceremonial -banisters -bristle -bid's -buckboard's -bandied -biopsy -ballrooms -chloroplasts -bidding -boil -algebra -constellation -chuck -cringes -cleanliness -apron's -cosmopolitan -bashes -abusive -believer -conductor -butters -breweries -allotment -artfulness -bunkmates -blares -connections -anticipated -classifies -commandments -beginnings -bend -brambles -blacked -basketball's -affectionate -cocoa -anacondas -busing -bone -birchen -creamed -aged -commemorates -brother -aberration -crawl -actuarial -apology's -alumnus -adversary's -anaphoric -aspiring -consciousness -cokes -assignee -boxing -blanched -camels -contemporaries -carnivorous -assigned -apologetically -corpus -accusations -beefing -champaign -claps -adherence -aloft -complication -citizenship -becomes -compound -arabesque -bronchiole's -appraises -breach -collection's -botched -bitches -biblically -bronchial -amalgamating -commoner's -barbarian's -arrange -cradle -conformed -complimentary -anodes -cowering -anoint -brocaded -bedazzling -avionics -burnishes -bulkhead -chink -consciously -contract -clinch -applicant's -awning -aloud -chandelier's -cathode's -babble -arachnid -biplane -clamorous -assuredly -consented -axing -avenger -commence -braving -brandishing -careless -burningly -boatsman -channelling -clarifying -beggar -berates -cite -cowered -buffer -condescending -admixes -bettering -bedazzlement -cord -burglary's -characteristics -aptitudes -adieu -agree -bends -ceremonies -accustom -accessibly -commanders -ask -cavalier -brayer -affront -courser -becoming -carves -configures -beasts -biters -conditionals -bodybuilding -accretions -chapter's -cleverer -corning -brat -classes -almsman -consumptive -antique -comprised -beholders -anthropologically -buns -bridge -accretion -acceptance's -confederacy -armorer -argumentative -crossword -cowslip's -analog -counselor -chastised -barters -clerked -americas -cloud -aide -alternators -admitters -bagatelle -bridges -civilizations -anion's -briton's -apartment -acquaints -consummation -chord -coated -barer -carnivorously -cheering -allergy -capacity -classrooms -assistantships -complimented -amphibiously -commandment's -audiogram -corked -badness -bewildered -assemblage -backplane's -asterisk's -blob -coexisting -approximations -counteractive -barns -adherer -aborigine's -brooding -conceived -adjustor -cabled -belongings -breadwinner -blot's -brightness -consigning -barflies -bisector's -basing -complement -conditioner -brazes -crank -antinomian -crowd -accelerometers -befitting -backlash -bastions -acceleration -briefcases -correlated -baffle -chew -accosts -agreeably -bassinets -cogitate -concerning -contouring -broadside -compact -brainstems -atom's -bondage -biter -archdioceses -basis -bellboy -blobs -barons -clods -campaigned -assessors -bubbles -annal -casual -altercations -clog's -biased -arianism -ancillary -collaborator -butter -bureau -blending -antiquities -brands -activism -crews -beats -broad -buds -baggers -cobbler's -condemns -cabinets -bomber -blinders -center -contacted -bewilderingly -circulates -burnings -achieved -belch -barbecue -angles -comparative -befuddle -cherished -chapters -chanter -allegation's -armstrong -converter -combinatoric -angrier -brooks -clinked -blubber -appointments -compactor -cleaned -car -contention's -artificial -cramp -consistency -aborting -collaboration -awarders -crippled -anaphora -creamy -buoyed -baptistery -altered -anchoring -alterer -adjuring -beacon's -commencement's -ascension -candidness -clouding -cigars -boiled -christmas -contingency's -alum -apparel -contributors -anisotropic -annotations -bushwhacks -brides -continuities -carton's -blurred -antibody -aorta -blankest -combinator's -banish -breaches -accumulates -bowling -braver -antibacterial -cooperators -banked -compensated -chartable -conjunctively -antelope's -bluefish -annoying -composed -barges -biconcave -australia -ballparks -bearers -acknowledged -advocates -crossed -competitor -blaming -andorra -baritone -collaborator's -accessibility -complains -commentator -bibliography -conference's -atmosphere -agrees -bedstead's -ardor -character's -conventionally -arena's -chokes -channel -bludgeon -convoys -condense -beautifier -ailerons -compacts -black -bell -completions -ballroom -besotting -conservatives -adventured -bulldog's -conversely -arroyos -compositional -alternative -association -broods -beefy -consolidated -balms -acquaint -animal -certificate -combustion -aims -cracker -abetted -cautionings -bread -attains -agriculturally -courtyards -bawls -country's -creator's -checkbook's -cliches -colonizing -biennial -aqueous -craftsman -contrivances -algorithmic -crate -barefooted -bodily -anthropologist's -but -climate's -campers -crackled -awakes -conveyed -borrowers -approached -avoids -crib -albania -bathrobe -admonitions -architectures -consenting -anastomosis -blob's -actual -arrowhead's -accountable -allegiances -commendation -appearers -comply -concurs -controversy -abstracting -artifact diff --git a/storage/bdb/test/wrap.tcl b/storage/bdb/test/wrap.tcl deleted file mode 100644 index 34ec451072c..00000000000 --- a/storage/bdb/test/wrap.tcl +++ /dev/null @@ -1,71 +0,0 @@ -# See the file LICENSE for redistribution information. -# -# Copyright (c) 2000-2004 -# Sleepycat Software. All rights reserved. -# -# $Id: wrap.tcl,v 11.8 2004/01/28 03:36:33 bostic Exp $ -# -# Sentinel file wrapper for multi-process tests. This is designed to avoid a -# set of nasty bugs, primarily on Windows, where pid reuse causes watch_procs -# to sit around waiting for some random process that's not DB's and is not -# exiting. - -source ./include.tcl -source $test_path/testutils.tcl - -# Arguments: -if { $argc < 3 } { - puts "FAIL: wrap.tcl: Usage: wrap.tcl script log scriptargs" - exit -} - -set script [lindex $argv 0] -set logfile [lindex $argv 1] -set args [lrange $argv 2 end] - -# Create a sentinel file to mark our creation and signal that watch_procs -# should look for us. -set parentpid [pid] -set parentsentinel $testdir/begin.$parentpid -set f [open $parentsentinel w] -close $f - -# Create a Tcl subprocess that will actually run the test. -set t [open "|$tclsh_path >& $logfile" w] - -# Create a sentinel for the subprocess. -set childpid [pid $t] -puts "Script watcher process $parentpid launching $script process $childpid." -set childsentinel $testdir/begin.$childpid -set f [open $childsentinel w] -close $f - -puts $t "source $test_path/test.tcl" -puts $t "set script $script" - -# Set up argv for the subprocess, since the args aren't passed in as true -# arguments thanks to the pipe structure. -puts $t "set argc [llength $args]" -puts $t "set argv [list $args]" - -puts $t {set ret [catch { source $test_path/$script } result]} -puts $t {if { [string length $result] > 0 } { puts $result }} -puts $t {error_check_good "$test_path/$script run: pid [pid]" $ret 0} - -# Close the pipe. This will flush the above commands and actually run the -# test, and will also return an error a la exec if anything bad happens -# to the subprocess. The magic here is that closing a pipe blocks -# and waits for the exit of processes in the pipeline, at least according -# to Ousterhout (p. 115). - -set ret [catch {close $t} res] - -# Write ending sentinel files--we're done. -set f [open $testdir/end.$childpid w] -close $f -set f [open $testdir/end.$parentpid w] -close $f - -error_check_good "Pipe close ($childpid: $script $argv: logfile $logfile)"\ - $ret 0 -exit $ret diff --git a/storage/bdb/txn/txn.c b/storage/bdb/txn/txn.c index f0e4f7d4c35..68ed2e6c28c 100644 --- a/storage/bdb/txn/txn.c +++ b/storage/bdb/txn/txn.c @@ -1,7 +1,7 @@ /*- * See the file LICENSE for redistribution information. * - * Copyright (c) 1996-2004 + * Copyright (c) 1996-2005 * Sleepycat Software. All rights reserved. */ /* @@ -35,7 +35,7 @@ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * - * $Id: txn.c,v 11.249 2004/10/15 16:59:44 bostic Exp $ + * $Id: txn.c,v 12.34 2005/11/01 00:44:35 bostic Exp $ */ #include "db_config.h" @@ -66,15 +66,16 @@ #include "dbinc/hash.h" #include "dbinc/lock.h" #include "dbinc/log.h" -#include "dbinc/mp.h" #include "dbinc/txn.h" -#define SET_LOG_FLAGS(dbenv, txnp, lflags) \ +#define SET_LOG_FLAGS(dbenv, txn, lflags) \ do { \ lflags = DB_LOG_COMMIT | DB_LOG_PERM; \ - if (F_ISSET(txnp, TXN_SYNC)) \ + if (F_ISSET(txn, TXN_SYNC)) \ lflags |= DB_FLUSH; \ - else if (!F_ISSET(txnp, TXN_NOSYNC) && \ + else if (F_ISSET(txn, TXN_WRITE_NOSYNC)) \ + lflags |= DB_LOG_WRNOSYNC; \ + else if (!F_ISSET(txn, TXN_NOSYNC) && \ !F_ISSET(dbenv, DB_ENV_TXN_NOSYNC)) { \ if (F_ISSET(dbenv, DB_ENV_TXN_WRITE_NOSYNC)) \ lflags |= DB_LOG_WRNOSYNC; \ @@ -98,13 +99,13 @@ typedef enum { static int __txn_abort_pp __P((DB_TXN *)); static int __txn_begin_int __P((DB_TXN *, int)); static int __txn_commit_pp __P((DB_TXN *, u_int32_t)); -static int __txn_discard_pp __P((DB_TXN *, u_int32_t)); +static int __txn_discard __P((DB_TXN *, u_int32_t)); +static int __txn_dispatch_undo + __P((DB_ENV *, DB_TXN *, DBT *, DB_LSN *, void *)); static int __txn_end __P((DB_TXN *, int)); -static int __txn_isvalid __P((const DB_TXN *, TXN_DETAIL **, txnop_t)); +static int __txn_isvalid __P((const DB_TXN *, txnop_t)); static int __txn_undo __P((DB_TXN *)); -static int __txn_dispatch_undo __P((DB_ENV *, - DB_TXN *, DBT *, DB_LSN *, void *)); -static void __txn_set_begin_lsnp __P((DB_TXN *txn, DB_LSN **)); +static void __txn_set_txn_lsnp __P((DB_TXN *, DB_LSN **, DB_LSN **)); /* * __txn_begin_pp -- @@ -118,6 +119,7 @@ __txn_begin_pp(dbenv, parent, txnpp, flags) DB_TXN *parent, **txnpp; u_int32_t flags; { + DB_THREAD_INFO *ip; int rep_check, ret; PANIC_CHECK(dbenv); @@ -125,17 +127,22 @@ __txn_begin_pp(dbenv, parent, txnpp, flags) if ((ret = __db_fchk(dbenv, "txn_begin", flags, - DB_DEGREE_2 | DB_DIRTY_READ | DB_TXN_NOWAIT | - DB_TXN_NOSYNC | DB_TXN_SYNC)) != 0) + DB_READ_COMMITTED | DB_READ_UNCOMMITTED | DB_TXN_NOWAIT | + DB_TXN_NOSYNC | DB_TXN_SYNC | DB_TXN_WRITE_NOSYNC)) != 0) return (ret); - if ((ret = __db_fcchk(dbenv, - "txn_begin", flags, DB_TXN_NOSYNC, DB_TXN_SYNC)) != 0) + if ((ret = __db_fcchk(dbenv, "txn_begin", flags, + DB_TXN_WRITE_NOSYNC | DB_TXN_NOSYNC, DB_TXN_SYNC)) != 0) return (ret); + if ((ret = __db_fcchk(dbenv, "txn_begin", + flags, DB_TXN_WRITE_NOSYNC, DB_TXN_NOSYNC)) != 0) + return (ret); + + ENV_ENTER(dbenv, ip); if (parent == NULL) { rep_check = IS_ENV_REPLICATED(dbenv) ? 1 : 0; - if (rep_check) - __op_rep_enter(dbenv); + if (rep_check && (ret = __op_rep_enter(dbenv)) != 0) + goto err; } else rep_check = 0; ret = __txn_begin(dbenv, parent, txnpp, flags); @@ -145,8 +152,9 @@ __txn_begin_pp(dbenv, parent, txnpp, flags) * txn is resolved by txn_commit, txn_abort, etc. */ if (ret != 0 && rep_check) - __op_rep_exit(dbenv); + (void)__op_rep_exit(dbenv); +err: ENV_LEAVE(dbenv, ip); return (ret); } @@ -172,6 +180,7 @@ __txn_begin(dbenv, parent, txnpp, flags) { DB_LOCKREGION *region; DB_TXN *txn; + TXN_DETAIL *ptd, *td; int ret; *txnpp = NULL; @@ -184,22 +193,28 @@ __txn_begin(dbenv, parent, txnpp, flags) TAILQ_INIT(&txn->events); STAILQ_INIT(&txn->logs); txn->flags = TXN_MALLOC; - if (LF_ISSET(DB_DEGREE_2)) - F_SET(txn, TXN_DEGREE_2); - if (LF_ISSET(DB_DIRTY_READ)) - F_SET(txn, TXN_DIRTY_READ); + if (LF_ISSET(DB_READ_COMMITTED)) + F_SET(txn, TXN_READ_COMMITTED); + if (LF_ISSET(DB_READ_UNCOMMITTED)) + F_SET(txn, TXN_READ_UNCOMMITTED); if (LF_ISSET(DB_TXN_NOSYNC)) F_SET(txn, TXN_NOSYNC); if (LF_ISSET(DB_TXN_SYNC)) F_SET(txn, TXN_SYNC); if (LF_ISSET(DB_TXN_NOWAIT)) F_SET(txn, TXN_NOWAIT); + if (LF_ISSET(DB_TXN_WRITE_NOSYNC)) + F_SET(txn, TXN_WRITE_NOSYNC); if ((ret = __txn_begin_int(txn, 0)) != 0) goto err; + td = txn->td; - if (parent != NULL) + if (parent != NULL) { + ptd = parent->td; TAILQ_INSERT_HEAD(&parent->kids, txn, klinks); + SH_TAILQ_INSERT_HEAD(&ptd->kids, td, klinks, __txn_detail); + } if (LOCKING_ON(dbenv)) { region = ((DB_LOCKTAB *)dbenv->lk_handle)->reginfo.primary; @@ -255,9 +270,7 @@ __txn_xa_begin(dbenv, txn) TAILQ_INIT(&txn->events); STAILQ_INIT(&txn->logs); txn->parent = NULL; - ZERO_LSN(txn->last_lsn); txn->txnid = TXN_INVALID; - txn->tid = 0; txn->cursors = 0; memset(&txn->lock_timeout, 0, sizeof(db_timeout_t)); memset(&txn->expire, 0, sizeof(db_timeout_t)); @@ -265,13 +278,58 @@ __txn_xa_begin(dbenv, txn) return (__txn_begin_int(txn, 0)); } +/* + * __txn_recycle_id -- + * Find a range of useable transaction ids. + * + * PUBLIC: int __txn_recycle_id __P((DB_ENV *)); + */ +int +__txn_recycle_id(dbenv) + DB_ENV *dbenv; +{ + DB_LSN null_lsn; + DB_TXNMGR *mgr; + DB_TXNREGION *region; + TXN_DETAIL *td; + u_int32_t *ids; + int nids, ret; + + mgr = dbenv->tx_handle; + region = mgr->reginfo.primary; + + if ((ret = __os_malloc(dbenv, + sizeof(u_int32_t) * region->maxtxns, &ids)) != 0) + return (ret); + nids = 0; + for (td = SH_TAILQ_FIRST(®ion->active_txn, __txn_detail); + td != NULL; + td = SH_TAILQ_NEXT(td, links, __txn_detail)) + ids[nids++] = td->txnid; + region->last_txnid = TXN_MINIMUM - 1; + region->cur_maxid = TXN_MAXIMUM; + if (nids != 0) + __db_idspace(ids, nids, + ®ion->last_txnid, ®ion->cur_maxid); + __os_free(dbenv, ids); + /* + * Check LOGGING_ON rather than DBENV_LOGGING as + * we want to emit this record at the end of recovery. + */ + if (LOGGING_ON(dbenv)) + ret = __txn_recycle_log(dbenv, NULL, &null_lsn, + 0, region->last_txnid + 1, region->cur_maxid); + + return (ret); +} + /* * __txn_compensate_begin * Begin an compensation transaction. This is a special interface * that is used only for transactions that must be started to compensate * for actions during an abort. Currently only used for allocations. * - * PUBLIC: int __txn_compensate_begin __P((DB_ENV *, DB_TXN **txnp)); + * PUBLIC: int __txn_compensate_begin __P((DB_ENV *, DB_TXN **)); */ int __txn_compensate_begin(dbenv, txnpp) @@ -306,19 +364,17 @@ __txn_begin_int(txn, internal) int internal; { DB_ENV *dbenv; - DB_LSN null_lsn; DB_TXNMGR *mgr; DB_TXNREGION *region; TXN_DETAIL *td; - size_t off; - u_int32_t id, *ids; - int nids, ret; + u_int32_t id; + int ret; mgr = txn->mgrp; dbenv = mgr->dbenv; region = mgr->reginfo.primary; - R_LOCK(dbenv, &mgr->reginfo); + TXN_SYSTEM_LOCK(dbenv); if (!F_ISSET(txn, TXN_COMPENSATE) && F_ISSET(region, TXN_IN_RECOVERY)) { __db_err(dbenv, "operation not permitted during recovery"); ret = EINVAL; @@ -341,26 +397,9 @@ __txn_begin_int(txn, internal) region->cur_maxid != TXN_MAXIMUM) region->last_txnid = TXN_MINIMUM - 1; - if (region->last_txnid == region->cur_maxid) { - if ((ret = __os_malloc(dbenv, - sizeof(u_int32_t) * region->maxtxns, &ids)) != 0) - goto err; - nids = 0; - for (td = SH_TAILQ_FIRST(®ion->active_txn, __txn_detail); - td != NULL; - td = SH_TAILQ_NEXT(td, links, __txn_detail)) - ids[nids++] = td->txnid; - region->last_txnid = TXN_MINIMUM - 1; - region->cur_maxid = TXN_MAXIMUM; - if (nids != 0) - __db_idspace(ids, nids, - ®ion->last_txnid, ®ion->cur_maxid); - __os_free(dbenv, ids); - if (DBENV_LOGGING(dbenv) && - (ret = __txn_recycle_log(dbenv, NULL, &null_lsn, - 0, region->last_txnid + 1, region->cur_maxid)) != 0) - goto err; - } + if (region->last_txnid == region->cur_maxid && + (ret = __txn_recycle_id(dbenv)) != 0) + goto err; /* Allocate a new transaction detail structure. */ if ((ret = @@ -379,30 +418,33 @@ __txn_begin_int(txn, internal) region->stat.st_maxnactive = region->stat.st_nactive; td->txnid = id; + dbenv->thread_id(dbenv, &td->pid, &td->tid); ZERO_LSN(td->last_lsn); ZERO_LSN(td->begin_lsn); + SH_TAILQ_INIT(&td->kids); if (txn->parent != NULL) - td->parent = txn->parent->off; + td->parent = R_OFFSET(&mgr->reginfo, txn->parent->td); else td->parent = INVALID_ROFF; + td->name = INVALID_ROFF; td->status = TXN_RUNNING; td->flags = 0; td->xa_status = 0; - off = R_OFFSET(&mgr->reginfo, td); - R_UNLOCK(dbenv, &mgr->reginfo); + TXN_SYSTEM_UNLOCK(dbenv); - ZERO_LSN(txn->last_lsn); txn->txnid = id; - txn->off = (u_int32_t)off; + txn->td = td; txn->abort = __txn_abort_pp; txn->commit = __txn_commit_pp; - txn->discard = __txn_discard_pp; + txn->discard = __txn_discard; + txn->get_name = __txn_get_name; txn->id = __txn_id; txn->prepare = __txn_prepare; + txn->set_txn_lsnp = __txn_set_txn_lsnp; + txn->set_name = __txn_set_name; txn->set_timeout = __txn_set_timeout; - txn->set_begin_lsnp = __txn_set_begin_lsnp; /* * If this is a transaction family, we must link the child to the @@ -414,34 +456,71 @@ __txn_begin_int(txn, internal) return (ret); if (F_ISSET(txn, TXN_MALLOC)) { - MUTEX_THREAD_LOCK(dbenv, mgr->mutexp); + MUTEX_LOCK(dbenv, mgr->mutex); TAILQ_INSERT_TAIL(&mgr->txn_chain, txn, links); - MUTEX_THREAD_UNLOCK(dbenv, mgr->mutexp); + MUTEX_UNLOCK(dbenv, mgr->mutex); } return (0); -err: R_UNLOCK(dbenv, &mgr->reginfo); +err: TXN_SYSTEM_UNLOCK(dbenv); return (ret); } +/* + * __txn_continue + * Fill in the fields of the local transaction structure given + * the detail transaction structure. + * + * PUBLIC: void __txn_continue __P((DB_ENV *, DB_TXN *, TXN_DETAIL *)); + */ +void +__txn_continue(env, txn, td) + DB_ENV *env; + DB_TXN *txn; + TXN_DETAIL *td; +{ + txn->mgrp = env->tx_handle; + txn->parent = NULL; + txn->txnid = td->txnid; + txn->td = td; + + txn->abort = __txn_abort_pp; + txn->commit = __txn_commit_pp; + txn->discard = __txn_discard; + txn->get_name = __txn_get_name; + txn->id = __txn_id; + txn->prepare = __txn_prepare; + txn->set_name = __txn_set_name; + + txn->flags = 0; + if (F_ISSET(td, TXN_DTL_RESTORED)) + F_SET(txn, TXN_RESTORED); +} + /* * __txn_commit_pp -- * Interface routine to TXN->commit. */ static int -__txn_commit_pp(txnp, flags) - DB_TXN *txnp; +__txn_commit_pp(txn, flags) + DB_TXN *txn; u_int32_t flags; { DB_ENV *dbenv; - int not_child, ret; + DB_THREAD_INFO *ip; + int not_child, ret, t_ret; - dbenv = txnp->mgrp->dbenv; - not_child = txnp->parent == NULL; - ret = __txn_commit(txnp, flags); - if (not_child && IS_ENV_REPLICATED(dbenv)) - __op_rep_exit(dbenv); + dbenv = txn->mgrp->dbenv; + not_child = txn->parent == NULL; + + ENV_ENTER(dbenv, ip); + + ret = __txn_commit(txn, flags); + if (not_child && IS_ENV_REPLICATED(dbenv) && + (t_ret = __op_rep_exit(dbenv)) != 0 && ret == 0) + ret = t_ret; + ENV_LEAVE(dbenv, ip); return (ret); } @@ -452,25 +531,35 @@ __txn_commit_pp(txnp, flags) * PUBLIC: int __txn_commit __P((DB_TXN *, u_int32_t)); */ int -__txn_commit(txnp, flags) - DB_TXN *txnp; +__txn_commit(txn, flags) + DB_TXN *txn; u_int32_t flags; { DBT list_dbt; DB_ENV *dbenv; DB_LOCKREQ request; DB_TXN *kid; + REGENV *renv; + REGINFO *infop; TXN_DETAIL *td; - u_int32_t lflags; + u_int32_t id, lflags; int ret, t_ret; - dbenv = txnp->mgrp->dbenv; + dbenv = txn->mgrp->dbenv; + td = txn->td; PANIC_CHECK(dbenv); - if ((ret = __txn_isvalid(txnp, &td, TXN_OP_COMMIT)) != 0) + if ((ret = __txn_isvalid(txn, TXN_OP_COMMIT)) != 0) return (ret); + infop = dbenv->reginfo; + renv = infop->primary; + /* + * No mutex is needed as envid is read-only once it is set. + */ + id = renv->envid; + /* * We clear flags that are incorrect, ignoring any flag errors, and * default to synchronous operations. By definition, transaction @@ -478,19 +567,24 @@ __txn_commit(txnp, flags) * happen, but we don't want to fail in the field 'cause the app is * specifying the wrong flag for some reason. */ - if (__db_fchk(dbenv, - "DB_TXN->commit", flags, DB_TXN_NOSYNC | DB_TXN_SYNC) != 0) + if (__db_fchk(dbenv, "DB_TXN->commit", flags, + DB_TXN_NOSYNC | DB_TXN_SYNC | DB_TXN_WRITE_NOSYNC) != 0) flags = DB_TXN_SYNC; - if (__db_fcchk(dbenv, - "DB_TXN->commit", flags, DB_TXN_NOSYNC, DB_TXN_SYNC) != 0) + if (__db_fcchk(dbenv, "DB_TXN->commit", flags, + DB_TXN_SYNC, DB_TXN_NOSYNC | DB_TXN_WRITE_NOSYNC) != 0) flags = DB_TXN_SYNC; + + if (LF_ISSET(DB_TXN_WRITE_NOSYNC)) { + F_CLR(txn, TXN_SYNC_FLAGS); + F_SET(txn, TXN_WRITE_NOSYNC); + } if (LF_ISSET(DB_TXN_NOSYNC)) { - F_CLR(txnp, TXN_SYNC); - F_SET(txnp, TXN_NOSYNC); + F_CLR(txn, TXN_SYNC_FLAGS); + F_SET(txn, TXN_NOSYNC); } if (LF_ISSET(DB_TXN_SYNC)) { - F_CLR(txnp, TXN_NOSYNC); - F_SET(txnp, TXN_SYNC); + F_CLR(txn, TXN_SYNC_FLAGS); + F_SET(txn, TXN_SYNC); } /* @@ -498,9 +592,9 @@ __txn_commit(txnp, flags) * then try to abort the rest of the kids and then abort the parent. * Abort should never fail; if it does, we bail out immediately. */ - while ((kid = TAILQ_FIRST(&txnp->kids)) != NULL) + while ((kid = TAILQ_FIRST(&txn->kids)) != NULL) if ((ret = __txn_commit(kid, flags)) != 0) - while ((kid = TAILQ_FIRST(&txnp->kids)) != NULL) + while ((kid = TAILQ_FIRST(&txn->kids)) != NULL) if ((t_ret = __txn_abort(kid)) != 0) return (__db_panic(dbenv, t_ret)); @@ -511,9 +605,9 @@ __txn_commit(txnp, flags) * abort (if its parent aborts), and otherwise its parent or ultimate * ancestor will write synchronously. */ - if (DBENV_LOGGING(dbenv) && (!IS_ZERO_LSN(txnp->last_lsn) || - STAILQ_FIRST(&txnp->logs) != NULL)) { - if (txnp->parent == NULL) { + if (DBENV_LOGGING(dbenv) && (!IS_ZERO_LSN(td->last_lsn) || + STAILQ_FIRST(&txn->logs) != NULL)) { + if (txn->parent == NULL) { /* * We are about to free all the read locks for this * transaction below. Some of those locks might be @@ -523,26 +617,26 @@ __txn_commit(txnp, flags) * release the locks below. */ if ((ret = - __txn_doevents(dbenv, txnp, TXN_PREPARE, 1)) != 0) + __txn_doevents(dbenv, txn, TXN_PREPARE, 1)) != 0) goto err; memset(&request, 0, sizeof(request)); if (LOCKING_ON(dbenv)) { request.op = DB_LOCK_PUT_READ; if (IS_REP_MASTER(dbenv) && - !IS_ZERO_LSN(txnp->last_lsn)) { + !IS_ZERO_LSN(td->last_lsn)) { memset(&list_dbt, 0, sizeof(list_dbt)); request.obj = &list_dbt; } ret = __lock_vec(dbenv, - txnp->txnid, 0, &request, 1, NULL); + txn->txnid, 0, &request, 1, NULL); } - if (ret == 0 && !IS_ZERO_LSN(txnp->last_lsn)) { - SET_LOG_FLAGS(dbenv, txnp, lflags); - ret = __txn_regop_log(dbenv, txnp, - &txnp->last_lsn, lflags, TXN_COMMIT, - (int32_t)time(NULL), request.obj); + if (ret == 0 && !IS_ZERO_LSN(td->last_lsn)) { + SET_LOG_FLAGS(dbenv, txn, lflags); + ret = __txn_regop_log(dbenv, txn, + &td->last_lsn, lflags, TXN_COMMIT, + (int32_t)time(NULL), id, request.obj); } if (request.obj != NULL && request.obj->data != NULL) @@ -551,23 +645,23 @@ __txn_commit(txnp, flags) goto err; } else { /* Log the commit in the parent! */ - if (!IS_ZERO_LSN(txnp->last_lsn) && - (ret = __txn_child_log(dbenv, - txnp->parent, &txnp->parent->last_lsn, - 0, txnp->txnid, &txnp->last_lsn)) != 0) { + if (!IS_ZERO_LSN(td->last_lsn) && + (ret = __txn_child_log(dbenv, txn->parent, + &((TXN_DETAIL *)txn->parent->td)->last_lsn, + 0, txn->txnid, &td->last_lsn)) != 0) { goto err; } - if (STAILQ_FIRST(&txnp->logs) != NULL) { + if (STAILQ_FIRST(&txn->logs) != NULL) { /* * Put the child first so we back it out first. * All records are undone in reverse order. */ - STAILQ_CONCAT(&txnp->logs, &txnp->parent->logs); - txnp->parent->logs = txnp->logs; - STAILQ_INIT(&txnp->logs); + STAILQ_CONCAT(&txn->logs, &txn->parent->logs); + txn->parent->logs = txn->logs; + STAILQ_INIT(&txn->logs); } - F_SET(txnp->parent, TXN_CHILDCOMMIT); + F_SET(txn->parent, TXN_CHILDCOMMIT); } } @@ -577,22 +671,22 @@ __txn_commit(txnp, flags) * undo other allocations, if necessary, without worrying about these * pages which were not on the free list before. */ - if (txnp->txn_list != NULL) { + if (txn->txn_list != NULL) { #ifndef HAVE_FTRUNCATE t_ret = __db_do_the_limbo(dbenv, - NULL, txnp, txnp->txn_list, LIMBO_NORMAL); + NULL, txn, txn->txn_list, LIMBO_NORMAL); if (t_ret != 0 && ret == 0) ret = t_ret; #endif - __db_txnlist_end(dbenv, txnp->txn_list); - txnp->txn_list = NULL; + __db_txnlist_end(dbenv, txn->txn_list); + txn->txn_list = NULL; } if (ret != 0) goto err; /* This is OK because __txn_end can only fail with a panic. */ - return (__txn_end(txnp, 1)); + return (__txn_end(txn, 1)); err: /* * If we are prepared, then we "must" be able to commit. We panic here @@ -605,7 +699,7 @@ err: /* if (td->status == TXN_PREPARED) return (__db_panic(dbenv, ret)); - if ((t_ret = __txn_abort(txnp)) != 0) + if ((t_ret = __txn_abort(txn)) != 0) ret = t_ret; return (ret); } @@ -615,17 +709,23 @@ err: /* * Interface routine to TXN->abort. */ static int -__txn_abort_pp(txnp) - DB_TXN *txnp; +__txn_abort_pp(txn) + DB_TXN *txn; { DB_ENV *dbenv; - int not_child, ret; + DB_THREAD_INFO *ip; + int not_child, ret, t_ret; - dbenv = txnp->mgrp->dbenv; - not_child = txnp->parent == NULL; - ret = __txn_abort(txnp); - if (not_child && IS_ENV_REPLICATED(dbenv)) - __op_rep_exit(dbenv); + dbenv = txn->mgrp->dbenv; + not_child = txn->parent == NULL; + + ENV_ENTER(dbenv, ip); + + ret = __txn_abort(txn); + if (not_child && IS_ENV_REPLICATED(dbenv) && + (t_ret = __op_rep_exit(dbenv)) != 0 && ret == 0) + ret = t_ret; + ENV_LEAVE(dbenv, ip); return (ret); } @@ -636,22 +736,25 @@ __txn_abort_pp(txnp) * PUBLIC: int __txn_abort __P((DB_TXN *)); */ int -__txn_abort(txnp) - DB_TXN *txnp; +__txn_abort(txn) + DB_TXN *txn; { DB_ENV *dbenv; DB_LOCKREQ request; DB_TXN *kid; + REGENV *renv; + REGINFO *infop; TXN_DETAIL *td; - u_int32_t lflags; + u_int32_t id, lflags; int ret; - dbenv = txnp->mgrp->dbenv; + dbenv = txn->mgrp->dbenv; + td = txn->td; PANIC_CHECK(dbenv); /* Ensure that abort always fails fatally. */ - if ((ret = __txn_isvalid(txnp, &td, TXN_OP_ABORT)) != 0) + if ((ret = __txn_isvalid(txn, TXN_OP_ABORT)) != 0) return (__db_panic(dbenv, ret)); /* @@ -661,10 +764,30 @@ __txn_abort(txnp) * see any failure, we just get out of here and return the panic * up. */ - while ((kid = TAILQ_FIRST(&txnp->kids)) != NULL) + while ((kid = TAILQ_FIRST(&txn->kids)) != NULL) if ((ret = __txn_abort(kid)) != 0) return (ret); + infop = dbenv->reginfo; + renv = infop->primary; + /* + * No mutex is needed as envid is read-only once it is set. + */ + id = renv->envid; + + /* + * Fast path -- no need to do anything fancy if there were no + * modifications (e.g., log records) for this transaction. + * We still call txn_undo to cleanup the txn_list from our + * children. + */ + if (IS_ZERO_LSN(td->last_lsn) && STAILQ_FIRST(&txn->logs) == NULL) { + if (txn->txn_list == NULL) + goto done; + else + goto undo; + } + if (LOCKING_ON(dbenv)) { /* * We are about to free all the read locks for this transaction @@ -673,25 +796,25 @@ __txn_abort(txnp) * handle is closed. Check the events and preprocess any * trades now so that we don't release the locks below. */ - if ((ret = __txn_doevents(dbenv, txnp, TXN_ABORT, 1)) != 0) + if ((ret = __txn_doevents(dbenv, txn, TXN_ABORT, 1)) != 0) return (__db_panic(dbenv, ret)); /* Turn off timeouts. */ if ((ret = __lock_set_timeout(dbenv, - txnp->txnid, 0, DB_SET_TXN_TIMEOUT)) != 0) + txn->txnid, 0, DB_SET_TXN_TIMEOUT)) != 0) return (__db_panic(dbenv, ret)); if ((ret = __lock_set_timeout(dbenv, - txnp->txnid, 0, DB_SET_LOCK_TIMEOUT)) != 0) + txn->txnid, 0, DB_SET_LOCK_TIMEOUT)) != 0) return (__db_panic(dbenv, ret)); request.op = DB_LOCK_UPGRADE_WRITE; request.obj = NULL; if ((ret = __lock_vec( - dbenv, txnp->txnid, DB_LOCK_ABORT, &request, 1, NULL)) != 0) + dbenv, txn->txnid, DB_LOCK_ABORT, &request, 1, NULL)) != 0) return (__db_panic(dbenv, ret)); } - if ((ret = __txn_undo(txnp)) != 0) +undo: if ((ret = __txn_undo(txn)) != 0) return (__db_panic(dbenv, ret)); /* @@ -700,33 +823,34 @@ __txn_abort(txnp) * then we log the abort so we know that this transaction * was actually completed. */ - SET_LOG_FLAGS(dbenv, txnp, lflags); +done: SET_LOG_FLAGS(dbenv, txn, lflags); if (DBENV_LOGGING(dbenv) && td->status == TXN_PREPARED && - (ret = __txn_regop_log(dbenv, txnp, &txnp->last_lsn, - lflags, TXN_ABORT, (int32_t)time(NULL), NULL)) != 0) + (ret = __txn_regop_log(dbenv, txn, &td->last_lsn, + lflags, TXN_ABORT, (int32_t)time(NULL), id, NULL)) != 0) return (__db_panic(dbenv, ret)); /* __txn_end always panics if it errors, so pass the return along. */ - return (__txn_end(txnp, 0)); + return (__txn_end(txn, 0)); } /* - * __txn_discard_pp -- + * __txn_discard -- * Interface routine to TXN->discard. */ static int -__txn_discard_pp(txnp, flags) - DB_TXN *txnp; +__txn_discard(txn, flags) + DB_TXN *txn; u_int32_t flags; { DB_ENV *dbenv; - int not_child, ret; + DB_THREAD_INFO *ip; + int ret; - dbenv = txnp->mgrp->dbenv; - not_child = txnp->parent == NULL; - ret = __txn_discard(txnp, flags); - if (not_child && IS_ENV_REPLICATED(dbenv)) - __op_rep_exit(dbenv); + dbenv = txn->mgrp->dbenv; + + ENV_ENTER(dbenv, ip); + ret = __txn_discard_int(txn, flags); + ENV_LEAVE(dbenv, ip); return (ret); } @@ -734,39 +858,40 @@ __txn_discard_pp(txnp, flags) * __txn_discard -- * Free the per-process resources associated with this txn handle. * - * PUBLIC: int __txn_discard __P((DB_TXN *, u_int32_t flags)); + * PUBLIC: int __txn_discard_int __P((DB_TXN *, u_int32_t flags)); */ int -__txn_discard(txnp, flags) - DB_TXN *txnp; +__txn_discard_int(txn, flags) + DB_TXN *txn; u_int32_t flags; { DB_ENV *dbenv; DB_TXN *freep; - TXN_DETAIL *td; + DB_TXNMGR *mgr; int ret; COMPQUIET(flags, 0); - dbenv = txnp->mgrp->dbenv; + mgr = txn->mgrp; + dbenv = mgr->dbenv; freep = NULL; PANIC_CHECK(dbenv); - if ((ret = __txn_isvalid(txnp, &td, TXN_OP_DISCARD)) != 0) + if ((ret = __txn_isvalid(txn, TXN_OP_DISCARD)) != 0) return (ret); /* Should be no children. */ - DB_ASSERT(TAILQ_FIRST(&txnp->kids) == NULL); + DB_ASSERT(TAILQ_FIRST(&txn->kids) == NULL); /* Free the space. */ - MUTEX_THREAD_LOCK(dbenv, txnp->mgrp->mutexp); - txnp->mgrp->n_discards++; - if (F_ISSET(txnp, TXN_MALLOC)) { - TAILQ_REMOVE(&txnp->mgrp->txn_chain, txnp, links); - freep = txnp; + MUTEX_LOCK(dbenv, mgr->mutex); + mgr->n_discards++; + if (F_ISSET(txn, TXN_MALLOC)) { + TAILQ_REMOVE(&mgr->txn_chain, txn, links); + freep = txn; } - MUTEX_THREAD_UNLOCK(dbenv, txnp->mgrp->mutexp); + MUTEX_UNLOCK(dbenv, mgr->mutex); if (freep != NULL) __os_free(dbenv, freep); @@ -780,35 +905,39 @@ __txn_discard(txnp, flags) * PUBLIC: int __txn_prepare __P((DB_TXN *, u_int8_t *)); */ int -__txn_prepare(txnp, gid) - DB_TXN *txnp; +__txn_prepare(txn, gid) + DB_TXN *txn; u_int8_t *gid; { DBT list_dbt, xid; DB_ENV *dbenv; DB_LOCKREQ request; + DB_THREAD_INFO *ip; DB_TXN *kid; TXN_DETAIL *td; u_int32_t lflags; int ret; - dbenv = txnp->mgrp->dbenv; + dbenv = txn->mgrp->dbenv; + td = txn->td; PANIC_CHECK(dbenv); - if ((ret = __txn_isvalid(txnp, &td, TXN_OP_PREPARE)) != 0) + if ((ret = __txn_isvalid(txn, TXN_OP_PREPARE)) != 0) return (ret); + ENV_ENTER(dbenv, ip); + /* Commit any unresolved children. */ - while ((kid = TAILQ_FIRST(&txnp->kids)) != NULL) + while ((kid = TAILQ_FIRST(&txn->kids)) != NULL) if ((ret = __txn_commit(kid, DB_TXN_NOSYNC)) != 0) - return (ret); + goto err; #ifndef HAVE_FTRUNCATE - if (txnp->txn_list != NULL && + if (txn->txn_list != NULL && (ret = __db_do_the_limbo(dbenv, - NULL, txnp, txnp->txn_list, LIMBO_PREPARE)) != 0) - return (ret); + NULL, txn, txn->txn_list, LIMBO_PREPARE)) != 0) + goto err; #endif /* * In XA, the global transaction ID in the txn_detail structure is @@ -818,19 +947,19 @@ __txn_prepare(txnp, gid) * of those states, then we are calling prepare directly and we need * to fill in the td->xid. */ - if ((ret = __txn_doevents(dbenv, txnp, TXN_PREPARE, 1)) != 0) - return (ret); + if ((ret = __txn_doevents(dbenv, txn, TXN_PREPARE, 1)) != 0) + goto err; memset(&request, 0, sizeof(request)); if (LOCKING_ON(dbenv)) { request.op = DB_LOCK_PUT_READ; if (IS_REP_MASTER(dbenv) && - IS_ZERO_LSN(txnp->last_lsn)) { + !IS_ZERO_LSN(td->last_lsn)) { memset(&list_dbt, 0, sizeof(list_dbt)); request.obj = &list_dbt; } if ((ret = __lock_vec(dbenv, - txnp->txnid, 0, &request, 1, NULL)) != 0) - return (ret); + txn->txnid, 0, &request, 1, NULL)) != 0) + goto err; } if (DBENV_LOGGING(dbenv)) { @@ -844,7 +973,7 @@ __txn_prepare(txnp, gid) xid.data = td->xid; lflags = DB_LOG_COMMIT | DB_LOG_PERM | DB_FLUSH; - if ((ret = __txn_xa_regop_log(dbenv, txnp, &txnp->last_lsn, + if ((ret = __txn_xa_regop_log(dbenv, txn, &td->last_lsn, lflags, TXN_PREPARE, &xid, td->format, td->gtrid, td->bqual, &td->begin_lsn, request.obj)) != 0) { __db_err(dbenv, "DB_TXN->prepare: log_write failed %s", @@ -853,14 +982,15 @@ __txn_prepare(txnp, gid) if (request.obj != NULL && request.obj->data != NULL) __os_free(dbenv, request.obj->data); if (ret != 0) - return (ret); + goto err; } - MUTEX_THREAD_LOCK(dbenv, txnp->mgrp->mutexp); + MUTEX_LOCK(dbenv, txn->mgrp->mutex); td->status = TXN_PREPARED; - MUTEX_THREAD_UNLOCK(dbenv, txnp->mgrp->mutexp); - return (0); + MUTEX_UNLOCK(dbenv, txn->mgrp->mutex); +err: ENV_LEAVE(dbenv, ip); + return (ret); } /* @@ -870,67 +1000,136 @@ __txn_prepare(txnp, gid) * PUBLIC: u_int32_t __txn_id __P((DB_TXN *)); */ u_int32_t -__txn_id(txnp) - DB_TXN *txnp; +__txn_id(txn) + DB_TXN *txn; { - return (txnp->txnid); + return (txn->txnid); +} + +/* + * __txn_get_name -- + * Get a descriptive string from a transaction. + * + * PUBLIC: int __txn_get_name __P((DB_TXN *, const char **)); + */ +int +__txn_get_name(txn, namep) + DB_TXN *txn; + const char **namep; +{ + *namep = txn->name; + + return (0); +} + +/* + * __txn_set_name -- + * Set a descriptive string for a transaction. + * + * PUBLIC: int __txn_set_name __P((DB_TXN *, const char *)); + */ +int +__txn_set_name(txn, name) + DB_TXN *txn; + const char *name; +{ + DB_ENV *dbenv; + DB_TXNMGR *mgr; + TXN_DETAIL *td; + size_t len; + int ret; + char *p; + + mgr = txn->mgrp; + dbenv = mgr->dbenv; + td = txn->td; + len = strlen(name) + 1; + + if ((ret = __os_realloc(dbenv, len, &txn->name)) != 0) + return (ret); + memcpy(txn->name, name, len); + + if (td->name != INVALID_ROFF) { + __db_shalloc_free( + &mgr->reginfo, R_ADDR(&mgr->reginfo, td->name)); + td->name = INVALID_ROFF; + } + if ((ret = __db_shalloc(&mgr->reginfo, len, 0, &p)) != 0) { + __db_err(dbenv, + "Unable to allocate memory for transaction name"); + + __os_free(dbenv, txn->name); + txn->name = NULL; + + return (ret); + } + td->name = R_OFFSET(&mgr->reginfo, p); + memcpy(p, name, len); + +#ifdef DIAGNOSTIC + /* + * If DIAGNOSTIC is set, map the name into the log so users can track + * operations through the log. + */ + if (DBENV_LOGGING(dbenv)) + (void)__log_printf(dbenv, txn, + "transaction %#lx named %s", (u_long)txn->txnid, name); +#endif + + return (0); } /* * __txn_set_timeout -- * DB_ENV->set_txn_timeout. - * * PUBLIC: int __txn_set_timeout __P((DB_TXN *, db_timeout_t, u_int32_t)); */ int -__txn_set_timeout(txnp, timeout, op) - DB_TXN *txnp; +__txn_set_timeout(txn, timeout, op) + DB_TXN *txn; db_timeout_t timeout; u_int32_t op; { if (op != DB_SET_TXN_TIMEOUT && op != DB_SET_LOCK_TIMEOUT) - return (__db_ferr(txnp->mgrp->dbenv, "DB_TXN->set_timeout", 0)); + return (__db_ferr(txn->mgrp->dbenv, "DB_TXN->set_timeout", 0)); return (__lock_set_timeout( - txnp->mgrp->dbenv, txnp->txnid, timeout, op)); + txn->mgrp->dbenv, txn->txnid, timeout, op)); } /* * __txn_isvalid -- - * Return 0 if the txnp is reasonable, otherwise panic. + * Return 0 if the DB_TXN is reasonable, otherwise panic. */ static int -__txn_isvalid(txnp, tdp, op) - const DB_TXN *txnp; - TXN_DETAIL **tdp; +__txn_isvalid(txn, op) + const DB_TXN *txn; txnop_t op; { DB_ENV *dbenv; - DB_TXNMGR *mgrp; + DB_TXNMGR *mgr; DB_TXNREGION *region; - TXN_DETAIL *tp; + TXN_DETAIL *td; - mgrp = txnp->mgrp; - dbenv = mgrp->dbenv; - region = mgrp->reginfo.primary; + mgr = txn->mgrp; + dbenv = mgr->dbenv; + region = mgr->reginfo.primary; /* Check for recovery. */ - if (!F_ISSET(txnp, TXN_COMPENSATE) && + if (!F_ISSET(txn, TXN_COMPENSATE) && F_ISSET(region, TXN_IN_RECOVERY)) { __db_err(dbenv, "operation not permitted during recovery"); goto err; } /* Check for live cursors. */ - if (txnp->cursors != 0) { + if (txn->cursors != 0) { __db_err(dbenv, "transaction has active cursors"); goto err; } /* Check transaction's state. */ - tp = R_ADDR(&mgrp->reginfo, txnp->off); - if (tdp != NULL) - *tdp = tp; + td = txn->td; /* Handle any operation specific checks. */ switch (op) { @@ -941,22 +1140,22 @@ __txn_isvalid(txnp, tdp, op) */ /* Transaction is already been reused. */ - if (txnp->txnid != tp->txnid) + if (txn->txnid != td->txnid) return (0); /* * What we've got had better be either a prepared or * restored transaction. */ - if (tp->status != TXN_PREPARED && - !F_ISSET(tp, TXN_DTL_RESTORED)) { + if (td->status != TXN_PREPARED && + !F_ISSET(td, TXN_DTL_RESTORED)) { __db_err(dbenv, "not a restored transaction"); return (__db_panic(dbenv, EINVAL)); } return (0); case TXN_OP_PREPARE: - if (txnp->parent != NULL) { + if (txn->parent != NULL) { /* * This is not fatal, because you could imagine an * application that simply prepares everybody because @@ -975,7 +1174,7 @@ __txn_isvalid(txnp, tdp, op) break; } - switch (tp->status) { + switch (td->status) { case TXN_PREPARED: if (op == TXN_OP_PREPARE) { __db_err(dbenv, "transaction already prepared"); @@ -993,7 +1192,7 @@ __txn_isvalid(txnp, tdp, op) case TXN_COMMITTED: default: __db_err(dbenv, "transaction already %s", - tp->status == TXN_COMMITTED ? "committed" : "aborted"); + td->status == TXN_COMMITTED ? "committed" : "aborted"); goto err; } @@ -1012,8 +1211,8 @@ err: /* * Internal transaction end routine. */ static int -__txn_end(txnp, is_commit) - DB_TXN *txnp; +__txn_end(txn, is_commit) + DB_TXN *txn; int is_commit; { DB_ENV *dbenv; @@ -1021,17 +1220,17 @@ __txn_end(txnp, is_commit) DB_TXNLOGREC *lr; DB_TXNMGR *mgr; DB_TXNREGION *region; - TXN_DETAIL *tp; + TXN_DETAIL *ptd, *td; int do_closefiles, ret; - mgr = txnp->mgrp; + mgr = txn->mgrp; dbenv = mgr->dbenv; region = mgr->reginfo.primary; do_closefiles = 0; /* Process commit events. */ if ((ret = __txn_doevents(dbenv, - txnp, is_commit ? TXN_COMMIT : TXN_ABORT, 0)) != 0) + txn, is_commit ? TXN_COMMIT : TXN_ABORT, 0)) != 0) return (__db_panic(dbenv, ret)); /* @@ -1044,25 +1243,34 @@ __txn_end(txnp, is_commit) * so DB_LOCK_DEADLOCK is just as fatal as any other error. */ if (LOCKING_ON(dbenv)) { - request.op = txnp->parent == NULL || + request.op = txn->parent == NULL || is_commit == 0 ? DB_LOCK_PUT_ALL : DB_LOCK_INHERIT; request.obj = NULL; if ((ret = __lock_vec(dbenv, - txnp->txnid, 0, &request, 1, NULL)) != 0) + txn->txnid, 0, &request, 1, NULL)) != 0) return (__db_panic(dbenv, ret)); } /* End the transaction. */ - R_LOCK(dbenv, &mgr->reginfo); + TXN_SYSTEM_LOCK(dbenv); - tp = R_ADDR(&mgr->reginfo, txnp->off); - SH_TAILQ_REMOVE(®ion->active_txn, tp, links, __txn_detail); - if (F_ISSET(tp, TXN_DTL_RESTORED)) { + td = txn->td; + SH_TAILQ_REMOVE(®ion->active_txn, td, links, __txn_detail); + if (F_ISSET(td, TXN_DTL_RESTORED)) { region->stat.st_nrestores--; do_closefiles = region->stat.st_nrestores == 0; } - __db_shalloc_free(&mgr->reginfo, tp); + if (td->name != INVALID_ROFF) { + __db_shalloc_free( + &mgr->reginfo, R_ADDR(&mgr->reginfo, td->name)); + td->name = INVALID_ROFF; + } + if (txn->parent != NULL) { + ptd = txn->parent->td; + SH_TAILQ_REMOVE(&ptd->kids, td, klinks, __txn_detail); + } + __db_shalloc_free(&mgr->reginfo, td); if (is_commit) region->stat.st_ncommits++; @@ -1070,29 +1278,33 @@ __txn_end(txnp, is_commit) region->stat.st_naborts++; --region->stat.st_nactive; - R_UNLOCK(dbenv, &mgr->reginfo); + TXN_SYSTEM_UNLOCK(dbenv); /* * The transaction cannot get more locks, remove its locker info, * if any. */ if (LOCKING_ON(dbenv) && (ret = - __lock_freefamilylocker(dbenv->lk_handle, txnp->txnid)) != 0) + __lock_freefamilylocker(dbenv->lk_handle, txn->txnid)) != 0) return (__db_panic(dbenv, ret)); - if (txnp->parent != NULL) - TAILQ_REMOVE(&txnp->parent->kids, txnp, klinks); + if (txn->parent != NULL) + TAILQ_REMOVE(&txn->parent->kids, txn, klinks); /* Free the space. */ - while ((lr = STAILQ_FIRST(&txnp->logs)) != NULL) { - STAILQ_REMOVE(&txnp->logs, lr, __txn_logrec, links); + while ((lr = STAILQ_FIRST(&txn->logs)) != NULL) { + STAILQ_REMOVE(&txn->logs, lr, __txn_logrec, links); __os_free(dbenv, lr); } - if (F_ISSET(txnp, TXN_MALLOC)) { - MUTEX_THREAD_LOCK(dbenv, mgr->mutexp); - TAILQ_REMOVE(&mgr->txn_chain, txnp, links); - MUTEX_THREAD_UNLOCK(dbenv, mgr->mutexp); + if (txn->name != NULL) { + __os_free(dbenv, txn->name); + txn->name = NULL; + } + if (F_ISSET(txn, TXN_MALLOC)) { + MUTEX_LOCK(dbenv, mgr->mutex); + TAILQ_REMOVE(&mgr->txn_chain, txn, links); + MUTEX_UNLOCK(dbenv, mgr->mutex); - __os_free(dbenv, txnp); + __os_free(dbenv, txn); } if (do_closefiles) { @@ -1107,9 +1319,9 @@ __txn_end(txnp, is_commit) } static int -__txn_dispatch_undo(dbenv, txnp, rdbt, key_lsn, txnlist) +__txn_dispatch_undo(dbenv, txn, rdbt, key_lsn, txnlist) DB_ENV *dbenv; - DB_TXN *txnp; + DB_TXN *txn; DBT *rdbt; DB_LSN *key_lsn; void *txnlist; @@ -1118,14 +1330,12 @@ __txn_dispatch_undo(dbenv, txnp, rdbt, key_lsn, txnlist) ret = __db_dispatch(dbenv, dbenv->recover_dtab, dbenv->recover_dtab_size, rdbt, key_lsn, DB_TXN_ABORT, txnlist); - if (F_ISSET(txnp, TXN_CHILDCOMMIT)) - (void)__db_txnlist_lsnadd(dbenv, - txnlist, key_lsn, 0); if (ret == DB_SURPRISE_KID) { - if ((ret = __db_txnlist_lsninit( - dbenv, txnlist, key_lsn)) == 0) - F_SET(txnp, TXN_CHILDCOMMIT); + F_SET(txn, TXN_CHILDCOMMIT); + ret = 0; } + if (ret == 0 && F_ISSET(txn, TXN_CHILDCOMMIT) && IS_ZERO_LSN(*key_lsn)) + ret = __db_txnlist_lsnget(dbenv, txnlist, key_lsn, 0); return (ret); } @@ -1135,20 +1345,20 @@ __txn_dispatch_undo(dbenv, txnp, rdbt, key_lsn, txnlist) * Undo the transaction with id txnid. */ static int -__txn_undo(txnp) - DB_TXN *txnp; +__txn_undo(txn) + DB_TXN *txn; { DBT rdbt; DB_ENV *dbenv; DB_LOGC *logc; DB_LSN key_lsn; DB_TXN *ptxn; + DB_TXNHEAD *txnlist; DB_TXNLOGREC *lr; DB_TXNMGR *mgr; int ret, t_ret; - void *txnlist; - mgr = txnp->mgrp; + mgr = txn->mgrp; dbenv = mgr->dbenv; logc = NULL; txnlist = NULL; @@ -1170,33 +1380,29 @@ __txn_undo(txnp) * so that aborted pages are recovered when that transaction * is committed or aborted. */ - for (ptxn = txnp->parent; ptxn != NULL && ptxn->parent != NULL;) + for (ptxn = txn->parent; ptxn != NULL && ptxn->parent != NULL;) ptxn = ptxn->parent; if (ptxn != NULL && ptxn->txn_list != NULL) txnlist = ptxn->txn_list; - else if (txnp->txn_list != NULL) - txnlist = txnp->txn_list; + else if (txn->txn_list != NULL) + txnlist = txn->txn_list; else if ((ret = __db_txnlist_init(dbenv, 0, 0, NULL, &txnlist)) != 0) return (ret); else if (ptxn != NULL) ptxn->txn_list = txnlist; - if (F_ISSET(txnp, TXN_CHILDCOMMIT) && - (ret = __db_txnlist_lsninit(dbenv, txnlist, &txnp->last_lsn)) != 0) - return (ret); - /* * Take log records from the linked list stored in the transaction, * then from the log. */ - for (lr = STAILQ_FIRST(&txnp->logs); + for (lr = STAILQ_FIRST(&txn->logs); lr != NULL; lr = STAILQ_NEXT(lr, links)) { rdbt.data = lr->data; rdbt.size = 0; LSN_NOT_LOGGED(key_lsn); ret = - __txn_dispatch_undo(dbenv, txnp, &rdbt, &key_lsn, txnlist); + __txn_dispatch_undo(dbenv, txn, &rdbt, &key_lsn, txnlist); if (ret != 0) { __db_err(dbenv, "DB_TXN->abort: In-memory log undo failed: %s", @@ -1205,7 +1411,7 @@ __txn_undo(txnp) } } - key_lsn = txnp->last_lsn; + key_lsn = ((TXN_DETAIL *)txn->td)->last_lsn; if (!IS_ZERO_LSN(key_lsn) && (ret = __log_cursor(dbenv, &logc)) != 0) @@ -1218,7 +1424,7 @@ __txn_undo(txnp) */ if ((ret = __log_c_get(logc, &key_lsn, &rdbt, DB_SET)) == 0) { ret = __txn_dispatch_undo(dbenv, - txnp, &rdbt, &key_lsn, txnlist); + txn, &rdbt, &key_lsn, txnlist); } if (ret != 0) { @@ -1231,7 +1437,7 @@ __txn_undo(txnp) } #ifndef HAVE_FTRUNCATE - ret = __db_do_the_limbo(dbenv, ptxn, txnp, txnlist, LIMBO_NORMAL); + ret = __db_do_the_limbo(dbenv, ptxn, txn, txnlist, LIMBO_NORMAL); #endif err: if (logc != NULL && (t_ret = __log_c_close(logc)) != 0 && ret == 0) @@ -1242,245 +1448,6 @@ err: if (logc != NULL && (t_ret = __log_c_close(logc)) != 0 && ret == 0) return (ret); } -/* - * __txn_checkpoint_pp -- - * DB_ENV->txn_checkpoint pre/post processing. - * - * PUBLIC: int __txn_checkpoint_pp - * PUBLIC: __P((DB_ENV *, u_int32_t, u_int32_t, u_int32_t)); - */ -int -__txn_checkpoint_pp(dbenv, kbytes, minutes, flags) - DB_ENV *dbenv; - u_int32_t kbytes, minutes, flags; -{ - int rep_check, ret; - - PANIC_CHECK(dbenv); - ENV_REQUIRES_CONFIG(dbenv, - dbenv->tx_handle, "txn_checkpoint", DB_INIT_TXN); - - /* - * On a replication client, all transactions are read-only; therefore, - * a checkpoint is a null-op. - * - * We permit txn_checkpoint, instead of just rendering it illegal, - * so that an application can just let a checkpoint thread continue - * to operate as it gets promoted or demoted between being a - * master and a client. - */ - if (IS_REP_CLIENT(dbenv)) - return (0); - - rep_check = IS_ENV_REPLICATED(dbenv) ? 1 : 0; - if (rep_check) - __env_rep_enter(dbenv); - ret = __txn_checkpoint(dbenv, kbytes, minutes, flags); - if (rep_check) - __env_db_rep_exit(dbenv); - return (ret); -} - -/* - * __txn_checkpoint -- - * DB_ENV->txn_checkpoint. - * - * PUBLIC: int __txn_checkpoint - * PUBLIC: __P((DB_ENV *, u_int32_t, u_int32_t, u_int32_t)); - */ -int -__txn_checkpoint(dbenv, kbytes, minutes, flags) - DB_ENV *dbenv; - u_int32_t kbytes, minutes, flags; -{ - DB_LSN ckp_lsn, last_ckp; - DB_TXNMGR *mgr; - DB_TXNREGION *region; - REGENV *renv; - REGINFO *infop; - time_t last_ckp_time, now; - u_int32_t bytes, gen, id, logflags, mbytes; - int ret; - - ret = gen = 0; - /* - * A client will only call through here during recovery, - * so just sync the Mpool and go home. - */ - if (IS_REP_CLIENT(dbenv)) { - if (MPOOL_ON(dbenv) && (ret = __memp_sync(dbenv, NULL)) != 0) { - __db_err(dbenv, - "txn_checkpoint: failed to flush the buffer cache %s", - db_strerror(ret)); - return (ret); - } else - return (0); - } - - mgr = dbenv->tx_handle; - region = mgr->reginfo.primary; - infop = dbenv->reginfo; - renv = infop->primary; - /* - * No mutex is needed as envid is read-only once it is set. - */ - id = renv->envid; - - /* - * The checkpoint LSN is an LSN such that all transactions begun before - * it are complete. Our first guess (corrected below based on the list - * of active transactions) is the last-written LSN. - */ - __log_txn_lsn(dbenv, &ckp_lsn, &mbytes, &bytes); - - if (!LF_ISSET(DB_FORCE)) { - /* Don't checkpoint a quiescent database. */ - if (bytes == 0 && mbytes == 0) - return (0); - - /* - * If either kbytes or minutes is non-zero, then only take the - * checkpoint if more than "minutes" minutes have passed or if - * more than "kbytes" of log data have been written since the - * last checkpoint. - */ - if (kbytes != 0 && - mbytes * 1024 + bytes / 1024 >= (u_int32_t)kbytes) - goto do_ckp; - - if (minutes != 0) { - (void)time(&now); - - R_LOCK(dbenv, &mgr->reginfo); - last_ckp_time = region->time_ckp; - R_UNLOCK(dbenv, &mgr->reginfo); - - if (now - last_ckp_time >= (time_t)(minutes * 60)) - goto do_ckp; - } - - /* - * If we checked time and data and didn't go to checkpoint, - * we're done. - */ - if (minutes != 0 || kbytes != 0) - return (0); - } - -do_ckp: - __txn_getactive(dbenv, &ckp_lsn); - - if (MPOOL_ON(dbenv) && (ret = __memp_sync(dbenv, NULL)) != 0) { - __db_err(dbenv, - "txn_checkpoint: failed to flush the buffer cache %s", - db_strerror(ret)); - return (ret); - } - - /* - * Because we can't be a replication client here, and because - * recovery (somewhat unusually) calls txn_checkpoint and expects - * it to write a log message, LOGGING_ON is the correct macro here. - */ - if (LOGGING_ON(dbenv)) { - R_LOCK(dbenv, &mgr->reginfo); - last_ckp = region->last_ckp; - R_UNLOCK(dbenv, &mgr->reginfo); - if (REP_ON(dbenv)) - __rep_get_gen(dbenv, &gen); - - /* - * Put out records for the open files before we log - * the checkpoint. The records are certain to be at - * or after ckp_lsn, but before the checkpoint record - * itself, so they're sure to be included if we start - * recovery from the ckp_lsn contained in this - * checkpoint. - */ - logflags = DB_LOG_PERM | DB_LOG_CHKPNT; - if (!IS_RECOVERING(dbenv)) - logflags |= DB_FLUSH; - if ((ret = __dbreg_log_files(dbenv)) != 0 || - (ret = __txn_ckp_log(dbenv, NULL, &ckp_lsn, logflags, - &ckp_lsn, &last_ckp, (int32_t)time(NULL), id, gen)) != 0) { - __db_err(dbenv, - "txn_checkpoint: log failed at LSN [%ld %ld] %s", - (long)ckp_lsn.file, (long)ckp_lsn.offset, - db_strerror(ret)); - return (ret); - } - - __txn_updateckp(dbenv, &ckp_lsn); - } - return (ret); -} - -/* - * __txn_getactive -- - * Find the oldest active transaction and figure out its "begin" LSN. - * This is the lowest LSN we can checkpoint, since any record written - * after it may be involved in a transaction and may therefore need - * to be undone in the case of an abort. - * - * We check both the file and offset for 0 since the lsn may be in - * transition. If it is then we don't care about this txn becuase it - * must be starting after we set the initial value of lsnp in the caller. - * All txns must initalize their begin_lsn before writing to the log. - * - * PUBLIC: void __txn_getactive __P((DB_ENV *, DB_LSN *)); - */ -void -__txn_getactive(dbenv, lsnp) - DB_ENV *dbenv; - DB_LSN *lsnp; -{ - DB_TXNMGR *mgr; - DB_TXNREGION *region; - TXN_DETAIL *txnp; - - mgr = dbenv->tx_handle; - region = mgr->reginfo.primary; - - R_LOCK(dbenv, &mgr->reginfo); - for (txnp = SH_TAILQ_FIRST(®ion->active_txn, __txn_detail); - txnp != NULL; - txnp = SH_TAILQ_NEXT(txnp, links, __txn_detail)) - if (txnp->begin_lsn.file != 0 && - txnp->begin_lsn.offset != 0 && - log_compare(&txnp->begin_lsn, lsnp) < 0) - *lsnp = txnp->begin_lsn; - R_UNLOCK(dbenv, &mgr->reginfo); -} - -/* - * __txn_getckp -- - * Get the LSN of the last transaction checkpoint. - * - * PUBLIC: int __txn_getckp __P((DB_ENV *, DB_LSN *)); - */ -int -__txn_getckp(dbenv, lsnp) - DB_ENV *dbenv; - DB_LSN *lsnp; -{ - DB_LSN lsn; - DB_TXNMGR *mgr; - DB_TXNREGION *region; - - mgr = dbenv->tx_handle; - region = mgr->reginfo.primary; - - R_LOCK(dbenv, &mgr->reginfo); - lsn = region->last_ckp; - R_UNLOCK(dbenv, &mgr->reginfo); - - if (IS_ZERO_LSN(lsn)) - return (DB_NOTFOUND); - - *lsnp = lsn; - return (0); -} - /* * __txn_activekids -- * Return if this transaction has any active children. @@ -1488,19 +1455,19 @@ __txn_getckp(dbenv, lsnp) * PUBLIC: int __txn_activekids __P((DB_ENV *, u_int32_t, DB_TXN *)); */ int -__txn_activekids(dbenv, rectype, txnp) +__txn_activekids(dbenv, rectype, txn) DB_ENV *dbenv; u_int32_t rectype; - DB_TXN *txnp; + DB_TXN *txn; { /* * On a child commit, we know that there are children (i.e., the * committing child at the least. In that case, skip this check. */ - if (F_ISSET(txnp, TXN_COMPENSATE) || rectype == DB___txn_child) + if (F_ISSET(txn, TXN_COMPENSATE) || rectype == DB___txn_child) return (0); - if (TAILQ_FIRST(&txnp->kids) != NULL) { + if (TAILQ_FIRST(&txn->kids) != NULL) { __db_err(dbenv, "Child transaction is active"); return (EPERM); } @@ -1582,16 +1549,16 @@ __txn_preclose(dbenv) DB_TXNREGION *region; int do_closefiles, ret; - mgr = (DB_TXNMGR *)dbenv->tx_handle; + mgr = dbenv->tx_handle; region = mgr->reginfo.primary; do_closefiles = 0; - R_LOCK(dbenv, &mgr->reginfo); + TXN_SYSTEM_LOCK(dbenv); if (region != NULL && region->stat.st_nrestores <= mgr->n_discards && mgr->n_discards != 0) do_closefiles = 1; - R_UNLOCK(dbenv, &mgr->reginfo); + TXN_SYSTEM_UNLOCK(dbenv); if (do_closefiles) { /* @@ -1630,55 +1597,25 @@ __txn_reset(dbenv) } /* - * __txn_updateckp -- - * Update the last_ckp field in the transaction region. This happens - * at the end of a normal checkpoint and also when a replication client - * receives a checkpoint record. - * - * PUBLIC: void __txn_updateckp __P((DB_ENV *, DB_LSN *)); - */ -void -__txn_updateckp(dbenv, lsnp) - DB_ENV *dbenv; - DB_LSN *lsnp; -{ - DB_TXNMGR *mgr; - DB_TXNREGION *region; - - mgr = dbenv->tx_handle; - region = mgr->reginfo.primary; - - /* - * We want to make sure last_ckp only moves forward; since we drop - * locks above and in log_put, it's possible for two calls to - * __txn_ckp_log to finish in a different order from how they were - * called. - */ - R_LOCK(dbenv, &mgr->reginfo); - if (log_compare(®ion->last_ckp, lsnp) < 0) { - region->last_ckp = *lsnp; - (void)time(®ion->time_ckp); - } - R_UNLOCK(dbenv, &mgr->reginfo); -} - -/* - * txn_set_begin_lsnp -- + * txn_set_txn_lsnp -- * Set the pointer to the begin_lsn field if that field is zero. + * Set the pointer to the last_lsn field. */ static void -__txn_set_begin_lsnp(txn, rlsnp) +__txn_set_txn_lsnp(txn, blsnp, llsnp) DB_TXN *txn; - DB_LSN **rlsnp; + DB_LSN **blsnp; + DB_LSN **llsnp; { DB_LSN *lsnp; TXN_DETAIL *td; - td = R_ADDR(&txn->mgrp->reginfo, txn->off); + td = txn->td; + *llsnp = &td->last_lsn; while (td->parent != INVALID_ROFF) td = R_ADDR(&txn->mgrp->reginfo, td->parent); lsnp = &td->begin_lsn; if (IS_ZERO_LSN(*lsnp)) - *rlsnp = lsnp; + *blsnp = lsnp; } diff --git a/storage/bdb/txn/txn.src b/storage/bdb/txn/txn.src index 34bd8bd0b3e..0b1212d7eea 100644 --- a/storage/bdb/txn/txn.src +++ b/storage/bdb/txn/txn.src @@ -1,10 +1,10 @@ /*- * See the file LICENSE for redistribution information. * - * Copyright (c) 1996-2004 + * Copyright (c) 1996-2005 * Sleepycat Software. All rights reserved. * - * $Id: txn.src,v 11.33 2004/07/27 12:35:19 bostic Exp $ + * $Id: txn.src,v 12.2 2005/09/21 18:34:07 sue Exp $ */ PREFIX __txn @@ -44,10 +44,13 @@ INCLUDE * Note that we are using an int32_t for the timestamp. This means that * in 2039 we will need to deprecate this log record and create one that * either changes the Epoch or has a 64-bit offset. + * envid: + * Environment ID of this operation. */ BEGIN regop 10 ARG opcode u_int32_t ld TIME timestamp int32_t ld +ARG envid u_int32_t ld LOCKS locks DBT s END diff --git a/storage/bdb/txn/txn_chkpt.c b/storage/bdb/txn/txn_chkpt.c new file mode 100644 index 00000000000..2e192adc74b --- /dev/null +++ b/storage/bdb/txn/txn_chkpt.c @@ -0,0 +1,353 @@ +/*- + * See the file LICENSE for redistribution information. + * + * Copyright (c) 1996-2005 + * Sleepycat Software. All rights reserved. + */ +/* + * Copyright (c) 1995, 1996 + * The President and Fellows of Harvard University. All rights reserved. + * + * This code is derived from software contributed to Berkeley by + * Margo Seltzer. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * $Id: txn_chkpt.c,v 12.19 2005/10/20 18:57:13 bostic Exp $ + */ + +#include "db_config.h" + +#ifndef NO_SYSTEM_INCLUDES +#include +#include + +#if TIME_WITH_SYS_TIME +#include +#include +#else +#if HAVE_SYS_TIME_H +#include +#else +#include +#endif +#endif + +#include +#endif + +#include "db_int.h" +#include "dbinc/db_shash.h" +#include "dbinc/log.h" +#include "dbinc/mp.h" +#include "dbinc/txn.h" + +/* + * __txn_checkpoint_pp -- + * DB_ENV->txn_checkpoint pre/post processing. + * + * PUBLIC: int __txn_checkpoint_pp + * PUBLIC: __P((DB_ENV *, u_int32_t, u_int32_t, u_int32_t)); + */ +int +__txn_checkpoint_pp(dbenv, kbytes, minutes, flags) + DB_ENV *dbenv; + u_int32_t kbytes, minutes, flags; +{ + DB_THREAD_INFO *ip; + int ret; + + PANIC_CHECK(dbenv); + ENV_REQUIRES_CONFIG(dbenv, + dbenv->tx_handle, "txn_checkpoint", DB_INIT_TXN); + + /* + * On a replication client, all transactions are read-only; therefore, + * a checkpoint is a null-op. + * + * We permit txn_checkpoint, instead of just rendering it illegal, + * so that an application can just let a checkpoint thread continue + * to operate as it gets promoted or demoted between being a + * master and a client. + */ + if (IS_REP_CLIENT(dbenv)) + return (0); + + ENV_ENTER(dbenv, ip); + REPLICATION_WRAP(dbenv, + (__txn_checkpoint(dbenv, kbytes, minutes, flags)), ret); + ENV_LEAVE(dbenv, ip); + return (ret); +} + +/* + * __txn_checkpoint -- + * DB_ENV->txn_checkpoint. + * + * PUBLIC: int __txn_checkpoint + * PUBLIC: __P((DB_ENV *, u_int32_t, u_int32_t, u_int32_t)); + */ +int +__txn_checkpoint(dbenv, kbytes, minutes, flags) + DB_ENV *dbenv; + u_int32_t kbytes, minutes, flags; +{ + DB_LSN ckp_lsn, last_ckp; + DB_TXNMGR *mgr; + DB_TXNREGION *region; + REGENV *renv; + REGINFO *infop; + time_t last_ckp_time, now; + u_int32_t bytes, gen, id, logflags, mbytes; + int ret; + + ret = gen = 0; + /* + * A client will only call through here during recovery, + * so just sync the Mpool and go home. + */ + if (IS_REP_CLIENT(dbenv)) { + if (MPOOL_ON(dbenv) && (ret = __memp_sync(dbenv, NULL)) != 0) { + __db_err(dbenv, + "txn_checkpoint: failed to flush the buffer cache %s", + db_strerror(ret)); + return (ret); + } else + return (0); + } + + mgr = dbenv->tx_handle; + region = mgr->reginfo.primary; + infop = dbenv->reginfo; + renv = infop->primary; + /* + * No mutex is needed as envid is read-only once it is set. + */ + id = renv->envid; + + /* + * The checkpoint LSN is an LSN such that all transactions begun before + * it are complete. Our first guess (corrected below based on the list + * of active transactions) is the last-written LSN. + */ + if ((ret = __log_current_lsn(dbenv, &ckp_lsn, &mbytes, &bytes)) != 0) + return (ret); + + if (!LF_ISSET(DB_FORCE)) { + /* Don't checkpoint a quiescent database. */ + if (bytes == 0 && mbytes == 0) + return (0); + + /* + * If either kbytes or minutes is non-zero, then only take the + * checkpoint if more than "minutes" minutes have passed or if + * more than "kbytes" of log data have been written since the + * last checkpoint. + */ + if (kbytes != 0 && + mbytes * 1024 + bytes / 1024 >= (u_int32_t)kbytes) + goto do_ckp; + + if (minutes != 0) { + (void)time(&now); + + TXN_SYSTEM_LOCK(dbenv); + last_ckp_time = region->time_ckp; + TXN_SYSTEM_UNLOCK(dbenv); + + if (now - last_ckp_time >= (time_t)(minutes * 60)) + goto do_ckp; + } + + /* + * If we checked time and data and didn't go to checkpoint, + * we're done. + */ + if (minutes != 0 || kbytes != 0) + return (0); + } + + /* + * We must single thread checkpoints otherwise the chk_lsn may get out + * of order. We need to capture the start of the earliest currently + * active transaction (chk_lsn) and then flush all buffers. While + * doing this we we could then be overtaken by another checkpoint that + * sees a later chk_lsn but competes first. An archive process could + * then remove a log this checkpoint depends on. + */ +do_ckp: MUTEX_LOCK(dbenv, region->mtx_ckp); + if ((ret = __txn_getactive(dbenv, &ckp_lsn)) != 0) + goto err; + + if (MPOOL_ON(dbenv) && (ret = __memp_sync(dbenv, NULL)) != 0) { + __db_err(dbenv, + "txn_checkpoint: failed to flush the buffer cache %s", + db_strerror(ret)); + goto err; + } + + /* + * Because we can't be a replication client here, and because + * recovery (somewhat unusually) calls txn_checkpoint and expects + * it to write a log message, LOGGING_ON is the correct macro here. + */ + if (LOGGING_ON(dbenv)) { + TXN_SYSTEM_LOCK(dbenv); + last_ckp = region->last_ckp; + TXN_SYSTEM_UNLOCK(dbenv); + if (REP_ON(dbenv) && (ret = __rep_get_gen(dbenv, &gen)) != 0) + goto err; + + /* + * Put out records for the open files before we log + * the checkpoint. The records are certain to be at + * or after ckp_lsn, but before the checkpoint record + * itself, so they're sure to be included if we start + * recovery from the ckp_lsn contained in this + * checkpoint. + */ + logflags = DB_LOG_PERM | DB_LOG_CHKPNT; + if (!IS_RECOVERING(dbenv)) + logflags |= DB_FLUSH; + if ((ret = __dbreg_log_files(dbenv)) != 0 || + (ret = __txn_ckp_log(dbenv, NULL, &ckp_lsn, logflags, + &ckp_lsn, &last_ckp, (int32_t)time(NULL), id, gen)) != 0) { + __db_err(dbenv, + "txn_checkpoint: log failed at LSN [%ld %ld] %s", + (long)ckp_lsn.file, (long)ckp_lsn.offset, + db_strerror(ret)); + goto err; + } + + if ((ret = __txn_updateckp(dbenv, &ckp_lsn)) != 0) + goto err; + } + +err: MUTEX_UNLOCK(dbenv, region->mtx_ckp); + return (ret); +} + +/* + * __txn_getactive -- + * Find the oldest active transaction and figure out its "begin" LSN. + * This is the lowest LSN we can checkpoint, since any record written + * after it may be involved in a transaction and may therefore need + * to be undone in the case of an abort. + * + * We check both the file and offset for 0 since the lsn may be in + * transition. If it is then we don't care about this txn because it + * must be starting after we set the initial value of lsnp in the caller. + * All txns must initalize their begin_lsn before writing to the log. + * + * PUBLIC: int __txn_getactive __P((DB_ENV *, DB_LSN *)); + */ +int +__txn_getactive(dbenv, lsnp) + DB_ENV *dbenv; + DB_LSN *lsnp; +{ + DB_TXNMGR *mgr; + DB_TXNREGION *region; + TXN_DETAIL *td; + + mgr = dbenv->tx_handle; + region = mgr->reginfo.primary; + + TXN_SYSTEM_LOCK(dbenv); + for (td = SH_TAILQ_FIRST(®ion->active_txn, __txn_detail); + td != NULL; + td = SH_TAILQ_NEXT(td, links, __txn_detail)) + if (td->begin_lsn.file != 0 && + td->begin_lsn.offset != 0 && + log_compare(&td->begin_lsn, lsnp) < 0) + *lsnp = td->begin_lsn; + TXN_SYSTEM_UNLOCK(dbenv); + + return (0); +} + +/* + * __txn_getckp -- + * Get the LSN of the last transaction checkpoint. + * + * PUBLIC: int __txn_getckp __P((DB_ENV *, DB_LSN *)); + */ +int +__txn_getckp(dbenv, lsnp) + DB_ENV *dbenv; + DB_LSN *lsnp; +{ + DB_LSN lsn; + DB_TXNMGR *mgr; + DB_TXNREGION *region; + + mgr = dbenv->tx_handle; + region = mgr->reginfo.primary; + + TXN_SYSTEM_LOCK(dbenv); + lsn = region->last_ckp; + TXN_SYSTEM_UNLOCK(dbenv); + + if (IS_ZERO_LSN(lsn)) + return (DB_NOTFOUND); + + *lsnp = lsn; + return (0); +} + +/* + * __txn_updateckp -- + * Update the last_ckp field in the transaction region. This happens + * at the end of a normal checkpoint and also when a replication client + * receives a checkpoint record. + * + * PUBLIC: int __txn_updateckp __P((DB_ENV *, DB_LSN *)); + */ +int +__txn_updateckp(dbenv, lsnp) + DB_ENV *dbenv; + DB_LSN *lsnp; +{ + DB_TXNMGR *mgr; + DB_TXNREGION *region; + + mgr = dbenv->tx_handle; + region = mgr->reginfo.primary; + + /* + * We want to make sure last_ckp only moves forward; since we drop + * locks above and in log_put, it's possible for two calls to + * __txn_ckp_log to finish in a different order from how they were + * called. + */ + TXN_SYSTEM_LOCK(dbenv); + if (log_compare(®ion->last_ckp, lsnp) < 0) { + region->last_ckp = *lsnp; + (void)time(®ion->time_ckp); + } + TXN_SYSTEM_UNLOCK(dbenv); + + return (0); +} diff --git a/storage/bdb/txn/txn_failchk.c b/storage/bdb/txn/txn_failchk.c new file mode 100644 index 00000000000..5348f8bb1f8 --- /dev/null +++ b/storage/bdb/txn/txn_failchk.c @@ -0,0 +1,100 @@ +/*- + * See the file LICENSE for redistribution information. + * + * Copyright (c) 2005 + * Sleepycat Software. All rights reserved. + * + * $Id: txn_failchk.c,v 12.2 2005/10/13 00:51:51 bostic Exp $ + */ + +#include "db_config.h" + +#ifndef NO_SYSTEM_INCLUDES +#include +#endif + +#include "db_int.h" +#include "dbinc/txn.h" + +/* + * __txn_failchk -- + * Check for transactions started by dead threads of control. + * + * PUBLIC: int __txn_failchk __P((DB_ENV *)); + */ +int +__txn_failchk(dbenv) + DB_ENV *dbenv; +{ + DB_TXN *ktxn, *txn; + DB_TXNMGR *mgr; + DB_TXNREGION *region; + TXN_DETAIL *ktd, *td; + db_threadid_t tid; + int ret; + char buf[DB_THREADID_STRLEN]; + pid_t pid; + + mgr = dbenv->tx_handle; + region = mgr->reginfo.primary; + +retry: TXN_SYSTEM_LOCK(dbenv); + + SH_TAILQ_FOREACH(td, ®ion->active_txn, links, __txn_detail) { + /* + * If this is a child transaction, skip it. + * The parent will take care of it. + */ + if (td->parent != INVALID_ROFF) + continue; + /* + * If the txn is prepared, then it does not matter + * what the state of the thread is. + */ + if (td->status == TXN_PREPARED) + continue; + + /* If the thread is still alive, it's not a problem. */ + if (dbenv->is_alive(dbenv, td->pid, td->tid)) + continue; + + if (F_ISSET(td, TXN_DTL_INMEMORY)) + return (__db_failed(dbenv, + "Transaction has in memory logs", + td->pid, td->tid)); + + /* Abort the transaction. */ + TXN_SYSTEM_UNLOCK(dbenv); + if ((ret = __os_calloc(dbenv, 1, sizeof(DB_TXN), &txn)) != 0) + return (ret); + __txn_continue(dbenv, txn, td); + F_SET(txn, TXN_MALLOC); + SH_TAILQ_FOREACH(ktd, &td->kids, klinks, __txn_detail) { + if (F_ISSET(ktd, TXN_DTL_INMEMORY)) + return (__db_failed(dbenv, + "Transaction has in memory logs", + td->pid, td->tid)); + if ((ret = + __os_calloc(dbenv, 1, sizeof(DB_TXN), &ktxn)) != 0) + return (ret); + __txn_continue(dbenv, ktxn, ktd); + F_SET(ktxn, TXN_MALLOC); + ktxn->parent = txn; + TAILQ_INSERT_HEAD(&txn->kids, txn, klinks); + } + TAILQ_INSERT_TAIL(&mgr->txn_chain, txn, links); + pid = td->pid; + tid = td->tid; + (void)dbenv->thread_id_string(dbenv, pid, tid, buf); + __db_msg(dbenv, + "Aborting txn %#lx: %s", (u_long)txn->txnid, buf); + if ((ret = __txn_abort(txn)) != 0) + return (__db_failed(dbenv, + "Transaction abort failed", pid, tid)); + goto retry; + } + + TXN_SYSTEM_UNLOCK(dbenv); + + return (0); +} diff --git a/storage/bdb/txn/txn_method.c b/storage/bdb/txn/txn_method.c index c13f86dee0f..db92f35017e 100644 --- a/storage/bdb/txn/txn_method.c +++ b/storage/bdb/txn/txn_method.c @@ -1,10 +1,10 @@ /*- * See the file LICENSE for redistribution information. * - * Copyright (c) 1996-2004 + * Copyright (c) 1996-2005 * Sleepycat Software. All rights reserved. * - * $Id: txn_method.c,v 11.72 2004/03/23 17:24:18 bostic Exp $ + * $Id: txn_method.c,v 12.2 2005/07/21 18:21:45 bostic Exp $ */ #include "db_config.h" @@ -12,28 +12,12 @@ #ifndef NO_SYSTEM_INCLUDES #include -#ifdef HAVE_RPC -#include -#endif - #include #endif -#ifdef HAVE_RPC -#include "db_server.h" -#endif - #include "db_int.h" #include "dbinc/txn.h" -#ifdef HAVE_RPC -#include "dbinc_auto/rpc_client_ext.h" -#endif - -static int __txn_get_tx_max __P((DB_ENV *, u_int32_t *)); -static int __txn_get_tx_timestamp __P((DB_ENV *, time_t *)); -static int __txn_set_tx_timestamp __P((DB_ENV *, time_t *)); - /* * __txn_dbenv_create -- * Transaction specific initialization of the DB_ENV structure. @@ -50,38 +34,13 @@ __txn_dbenv_create(dbenv) * state or turn off mutex locking, and so we can neither check * the panic state or acquire a mutex in the DB_ENV create path. */ - dbenv->tx_max = DEF_MAX_TXNS; - -#ifdef HAVE_RPC - if (F_ISSET(dbenv, DB_ENV_RPCCLIENT)) { - dbenv->get_tx_max = __dbcl_get_tx_max; - dbenv->set_tx_max = __dbcl_set_tx_max; - dbenv->get_tx_timestamp = __dbcl_get_tx_timestamp; - dbenv->set_tx_timestamp = __dbcl_set_tx_timestamp; - - dbenv->txn_checkpoint = __dbcl_txn_checkpoint; - dbenv->txn_recover = __dbcl_txn_recover; - dbenv->txn_stat = __dbcl_txn_stat; - dbenv->txn_stat_print = NULL; - dbenv->txn_begin = __dbcl_txn_begin; - } else -#endif - { - dbenv->get_tx_max = __txn_get_tx_max; - dbenv->set_tx_max = __txn_set_tx_max; - dbenv->get_tx_timestamp = __txn_get_tx_timestamp; - dbenv->set_tx_timestamp = __txn_set_tx_timestamp; - - dbenv->txn_checkpoint = __txn_checkpoint_pp; - dbenv->txn_recover = __txn_recover_pp; - dbenv->txn_stat = __txn_stat_pp; - dbenv->txn_stat_print = __txn_stat_print_pp; - dbenv->txn_begin = __txn_begin_pp; - } } -static int +/* + * PUBLIC: int __txn_get_tx_max __P((DB_ENV *, u_int32_t *)); + */ +int __txn_get_tx_max(dbenv, tx_maxp) DB_ENV *dbenv; u_int32_t *tx_maxp; @@ -115,7 +74,10 @@ __txn_set_tx_max(dbenv, tx_max) return (0); } -static int +/* + * PUBLIC: int __txn_get_tx_timestamp __P((DB_ENV *, time_t *)); + */ +int __txn_get_tx_timestamp(dbenv, timestamp) DB_ENV *dbenv; time_t *timestamp; @@ -127,8 +89,10 @@ __txn_get_tx_timestamp(dbenv, timestamp) /* * __txn_set_tx_timestamp -- * Set the transaction recovery timestamp. + * + * PUBLIC: int __txn_set_tx_timestamp __P((DB_ENV *, time_t *)); */ -static int +int __txn_set_tx_timestamp(dbenv, timestamp) DB_ENV *dbenv; time_t *timestamp; diff --git a/storage/bdb/txn/txn_rec.c b/storage/bdb/txn/txn_rec.c index ea885528f82..3e1c516a0b5 100644 --- a/storage/bdb/txn/txn_rec.c +++ b/storage/bdb/txn/txn_rec.c @@ -1,7 +1,7 @@ /*- * See the file LICENSE for redistribution information. * - * Copyright (c) 1996-2004 + * Copyright (c) 1996-2005 * Sleepycat Software. All rights reserved. */ /* @@ -32,7 +32,7 @@ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * - * $Id: txn_rec.c,v 11.64 2004/09/22 17:41:10 bostic Exp $ + * $Id: txn_rec.c,v 12.4 2005/10/19 15:10:45 bostic Exp $ */ #include "db_config.h" @@ -298,17 +298,15 @@ __txn_child_recover(dbenv, dbtp, lsnp, op, info) /* * This is a record in a PARENT's log trail indicating that a - * child committed. If we are aborting, we need to update the - * parent's LSN array. If we are in recovery, then if the + * child committed. If we are aborting, return the childs last + * record's LSN. If we are in recovery, then if the * parent is committing, we set ourselves up to commit, else * we do nothing. */ if (op == DB_TXN_ABORT) { - /* Note that __db_txnlist_lsnadd rewrites its LSN - * parameter, so you cannot reuse the argp->c_lsn field. - */ - ret = __db_txnlist_lsnadd(dbenv, - info, &argp->c_lsn, TXNLIST_NEW); + *lsnp = argp->c_lsn; + ret = __db_txnlist_lsnadd(dbenv, info, &argp->prev_lsn); + goto out; } else if (op == DB_TXN_BACKWARD_ROLL) { /* Child might exist -- look for it. */ ret = __db_txnlist_find(dbenv, info, argp->child, &c_stat); @@ -422,12 +420,12 @@ __txn_restore_txn(dbenv, lsnp, argp) mgr = dbenv->tx_handle; region = mgr->reginfo.primary; - R_LOCK(dbenv, &mgr->reginfo); + TXN_SYSTEM_LOCK(dbenv); /* Allocate a new transaction detail structure. */ if ((ret = __db_shalloc(&mgr->reginfo, sizeof(TXN_DETAIL), 0, &td)) != 0) { - R_UNLOCK(dbenv, &mgr->reginfo); + TXN_SYSTEM_UNLOCK(dbenv); return (ret); } @@ -451,7 +449,7 @@ __txn_restore_txn(dbenv, lsnp, argp) region->stat.st_nactive++; if (region->stat.st_nactive > region->stat.st_maxnactive) region->stat.st_maxnactive = region->stat.st_nactive; - R_UNLOCK(dbenv, &mgr->reginfo); + TXN_SYSTEM_UNLOCK(dbenv); return (0); } diff --git a/storage/bdb/txn/txn_recover.c b/storage/bdb/txn/txn_recover.c index 79b88b72744..00b4fabc520 100644 --- a/storage/bdb/txn/txn_recover.c +++ b/storage/bdb/txn/txn_recover.c @@ -1,10 +1,10 @@ /*- * See the file LICENSE for redistribution information. * - * Copyright (c) 2001-2004 + * Copyright (c) 2001-2005 * Sleepycat Software. All rights reserved. * - * $Id: txn_recover.c,v 1.54 2004/10/15 16:59:44 bostic Exp $ + * $Id: txn_recover.c,v 12.11 2005/10/14 21:12:18 ubell Exp $ */ #include "db_config.h" @@ -24,40 +24,6 @@ #include "dbinc_auto/crdel_auto.h" #include "dbinc_auto/db_ext.h" -/* - * __txn_continue - * Fill in the fields of the local transaction structure given - * the detail transaction structure. - * - * XXX - * I'm not sure that we work correctly with nested txns. - * - * PUBLIC: void __txn_continue __P((DB_ENV *, DB_TXN *, TXN_DETAIL *, size_t)); - */ -void -__txn_continue(env, txnp, td, off) - DB_ENV *env; - DB_TXN *txnp; - TXN_DETAIL *td; - size_t off; -{ - txnp->mgrp = env->tx_handle; - txnp->parent = NULL; - txnp->last_lsn = td->last_lsn; - txnp->txnid = td->txnid; - txnp->off = (roff_t)off; - - txnp->abort = __txn_abort; - txnp->commit = __txn_commit; - txnp->discard = __txn_discard; - txnp->id = __txn_id; - txnp->prepare = __txn_prepare; - - txnp->flags = 0; - if (F_ISSET(td, TXN_DTL_RESTORED)) - F_SET(txnp, TXN_RESTORED); -} - /* * __txn_map_gid * Return the txn that corresponds to this global ID. @@ -73,23 +39,23 @@ __txn_map_gid(dbenv, gid, tdp, offp) roff_t *offp; { DB_TXNMGR *mgr; - DB_TXNREGION *tmr; + DB_TXNREGION *region; mgr = dbenv->tx_handle; - tmr = mgr->reginfo.primary; + region = mgr->reginfo.primary; /* * Search the internal active transaction table to find the * matching xid. If this is a performance hit, then we * can create a hash table, but I doubt it's worth it. */ - R_LOCK(dbenv, &mgr->reginfo); - for (*tdp = SH_TAILQ_FIRST(&tmr->active_txn, __txn_detail); + TXN_SYSTEM_LOCK(dbenv); + for (*tdp = SH_TAILQ_FIRST(®ion->active_txn, __txn_detail); *tdp != NULL; *tdp = SH_TAILQ_NEXT(*tdp, links, __txn_detail)) if (memcmp(gid, (*tdp)->xid, sizeof((*tdp)->xid)) == 0) break; - R_UNLOCK(dbenv, &mgr->reginfo); + TXN_SYSTEM_UNLOCK(dbenv); if (*tdp == NULL) return (EINVAL); @@ -112,7 +78,8 @@ __txn_recover_pp(dbenv, preplist, count, retp, flags) long count, *retp; u_int32_t flags; { - int rep_check, ret; + DB_THREAD_INFO *ip; + int ret; PANIC_CHECK(dbenv); ENV_REQUIRES_CONFIG( @@ -125,12 +92,10 @@ __txn_recover_pp(dbenv, preplist, count, retp, flags) return (EINVAL); } - rep_check = IS_ENV_REPLICATED(dbenv) ? 1 : 0; - if (rep_check) - __env_rep_enter(dbenv); - ret = __txn_recover(dbenv, preplist, count, retp, flags); - if (rep_check) - __env_db_rep_exit(dbenv); + ENV_ENTER(dbenv, ip); + REPLICATION_WRAP(dbenv, + (__txn_recover(dbenv, preplist, count, retp, flags)), ret); + ENV_LEAVE(dbenv, ip); return (ret); } @@ -181,7 +146,7 @@ __txn_get_prepared(dbenv, xids, txns, count, retp, flags) DB_LSN min; DB_PREPLIST *prepp; DB_TXNMGR *mgr; - DB_TXNREGION *tmr; + DB_TXNREGION *region; TXN_DETAIL *td; XID *xidp; long i; @@ -204,7 +169,7 @@ __txn_get_prepared(dbenv, xids, txns, count, retp, flags) */ mgr = dbenv->tx_handle; - tmr = mgr->reginfo.primary; + region = mgr->reginfo.primary; /* * During this pass we need to figure out if we are going to need @@ -213,9 +178,9 @@ __txn_get_prepared(dbenv, xids, txns, count, retp, flags) * and the ones that we are collecting are restored (if they aren't * restored, then we never crashed; just the main server did). */ - R_LOCK(dbenv, &mgr->reginfo); + TXN_SYSTEM_LOCK(dbenv); if (flags == DB_FIRST) { - for (td = SH_TAILQ_FIRST(&tmr->active_txn, __txn_detail); + for (td = SH_TAILQ_FIRST(®ion->active_txn, __txn_detail); td != NULL; td = SH_TAILQ_NEXT(td, links, __txn_detail)) { if (F_ISSET(td, TXN_DTL_RESTORED)) @@ -229,7 +194,7 @@ __txn_get_prepared(dbenv, xids, txns, count, retp, flags) open_files = 0; /* Now begin collecting active transactions. */ - for (td = SH_TAILQ_FIRST(&tmr->active_txn, __txn_detail); + for (td = SH_TAILQ_FIRST(®ion->active_txn, __txn_detail); td != NULL && *retp < count; td = SH_TAILQ_NEXT(td, links, __txn_detail)) { if (td->status != TXN_PREPARED || @@ -252,11 +217,10 @@ __txn_get_prepared(dbenv, xids, txns, count, retp, flags) if (txns != NULL) { if ((ret = __os_calloc(dbenv, 1, sizeof(DB_TXN), &prepp->txn)) != 0) { - R_UNLOCK(dbenv, &mgr->reginfo); + TXN_SYSTEM_UNLOCK(dbenv); goto err; } - __txn_continue(dbenv, - prepp->txn, td, R_OFFSET(&mgr->reginfo, td)); + __txn_continue(dbenv, prepp->txn, td); F_SET(prepp->txn, TXN_MALLOC); memcpy(prepp->gid, td->xid, sizeof(td->xid)); prepp++; @@ -268,17 +232,20 @@ __txn_get_prepared(dbenv, xids, txns, count, retp, flags) (*retp)++; F_SET(td, TXN_DTL_COLLECTED); + if (IS_ENV_REPLICATED(dbenv) && + (ret = __op_rep_enter(dbenv)) != 0) + goto err; } - R_UNLOCK(dbenv, &mgr->reginfo); + TXN_SYSTEM_UNLOCK(dbenv); /* * Now link all the transactions into the transaction manager's list. */ if (txns != NULL) { - MUTEX_THREAD_LOCK(dbenv, mgr->mutexp); + MUTEX_LOCK(dbenv, mgr->mutex); for (i = 0; i < *retp; i++) TAILQ_INSERT_TAIL(&mgr->txn_chain, txns[i].txn, links); - MUTEX_THREAD_UNLOCK(dbenv, mgr->mutexp); + MUTEX_UNLOCK(dbenv, mgr->mutex); } if (open_files && nrestores && *retp != 0 && !IS_MAX_LSN(min)) { @@ -286,7 +253,9 @@ __txn_get_prepared(dbenv, xids, txns, count, retp, flags) ret = __txn_openfiles(dbenv, &min, 0); F_CLR((DB_LOG *)dbenv->lg_handle, DBLOG_RECOVER); } -err: + return (0); + +err: TXN_SYSTEM_UNLOCK(dbenv); return (ret); } @@ -305,9 +274,9 @@ __txn_openfiles(dbenv, min, force) DBT data; DB_LOGC *logc; DB_LSN open_lsn; + DB_TXNHEAD *txninfo; __txn_ckp_args *ckp_args; int ret, t_ret; - void *txninfo; /* * Figure out the last checkpoint before the smallest diff --git a/storage/bdb/txn/txn_region.c b/storage/bdb/txn/txn_region.c index 82d4543af89..bfce068d4d1 100644 --- a/storage/bdb/txn/txn_region.c +++ b/storage/bdb/txn/txn_region.c @@ -1,10 +1,10 @@ /*- * See the file LICENSE for redistribution information. * - * Copyright (c) 1996-2004 + * Copyright (c) 1996-2005 * Sleepycat Software. All rights reserved. * - * $Id: txn_region.c,v 11.87 2004/10/15 16:59:44 bostic Exp $ + * $Id: txn_region.c,v 12.10 2005/10/14 21:12:18 ubell Exp $ */ #include "db_config.h" @@ -43,56 +43,49 @@ int __txn_open(dbenv) DB_ENV *dbenv; { - DB_TXNMGR *tmgrp; + DB_TXNMGR *mgr; int ret; /* Create/initialize the transaction manager structure. */ - if ((ret = __os_calloc(dbenv, 1, sizeof(DB_TXNMGR), &tmgrp)) != 0) + if ((ret = __os_calloc(dbenv, 1, sizeof(DB_TXNMGR), &mgr)) != 0) return (ret); - TAILQ_INIT(&tmgrp->txn_chain); - tmgrp->dbenv = dbenv; + TAILQ_INIT(&mgr->txn_chain); + mgr->dbenv = dbenv; /* Join/create the txn region. */ - tmgrp->reginfo.dbenv = dbenv; - tmgrp->reginfo.type = REGION_TYPE_TXN; - tmgrp->reginfo.id = INVALID_REGION_ID; - tmgrp->reginfo.flags = REGION_JOIN_OK; + mgr->reginfo.dbenv = dbenv; + mgr->reginfo.type = REGION_TYPE_TXN; + mgr->reginfo.id = INVALID_REGION_ID; + mgr->reginfo.flags = REGION_JOIN_OK; if (F_ISSET(dbenv, DB_ENV_CREATE)) - F_SET(&tmgrp->reginfo, REGION_CREATE_OK); + F_SET(&mgr->reginfo, REGION_CREATE_OK); if ((ret = __db_r_attach(dbenv, - &tmgrp->reginfo, __txn_region_size(dbenv))) != 0) + &mgr->reginfo, __txn_region_size(dbenv))) != 0) goto err; /* If we created the region, initialize it. */ - if (F_ISSET(&tmgrp->reginfo, REGION_CREATE)) - if ((ret = __txn_init(dbenv, tmgrp)) != 0) + if (F_ISSET(&mgr->reginfo, REGION_CREATE)) + if ((ret = __txn_init(dbenv, mgr)) != 0) goto err; /* Set the local addresses. */ - tmgrp->reginfo.primary = - R_ADDR(&tmgrp->reginfo, tmgrp->reginfo.rp->primary); + mgr->reginfo.primary = + R_ADDR(&mgr->reginfo, mgr->reginfo.rp->primary); - /* Acquire a mutex to protect the active TXN list. */ - if (F_ISSET(dbenv, DB_ENV_THREAD) && - (ret = __db_mutex_setup(dbenv, &tmgrp->reginfo, &tmgrp->mutexp, - MUTEX_ALLOC | MUTEX_NO_RLOCK | MUTEX_THREAD)) != 0) + /* If threaded, acquire a mutex to protect the active TXN list. */ + if ((ret = __mutex_alloc( + dbenv, MTX_TXN_ACTIVE, DB_MUTEX_THREAD, &mgr->mutex)) != 0) goto err; - R_UNLOCK(dbenv, &tmgrp->reginfo); - - dbenv->tx_handle = tmgrp; + dbenv->tx_handle = mgr; return (0); -err: if (tmgrp->reginfo.addr != NULL) { - if (F_ISSET(&tmgrp->reginfo, REGION_CREATE)) - ret = __db_panic(dbenv, ret); - R_UNLOCK(dbenv, &tmgrp->reginfo); +err: dbenv->tx_handle = NULL; + if (mgr->reginfo.addr != NULL) + (void)__db_r_detach(dbenv, &mgr->reginfo, 0); - (void)__db_r_detach(dbenv, &tmgrp->reginfo, 0); - } - if (tmgrp->mutexp != NULL) - __db_mutex_free(dbenv, &tmgrp->reginfo, tmgrp->mutexp); - __os_free(dbenv, tmgrp); + (void)__mutex_free(dbenv, &mgr->mutex); + __os_free(dbenv, mgr); return (ret); } @@ -101,16 +94,13 @@ err: if (tmgrp->reginfo.addr != NULL) { * Initialize a transaction region in shared memory. */ static int -__txn_init(dbenv, tmgrp) +__txn_init(dbenv, mgr) DB_ENV *dbenv; - DB_TXNMGR *tmgrp; + DB_TXNMGR *mgr; { DB_LSN last_ckp; DB_TXNREGION *region; int ret; -#ifdef HAVE_MUTEX_SYSTEM_RESOURCES - u_int8_t *addr; -#endif /* * Find the last checkpoint in the log. @@ -121,7 +111,8 @@ __txn_init(dbenv, tmgrp) * The log system has already walked through the last * file. Get the LSN of a checkpoint it may have found. */ - __log_get_cached_ckp_lsn(dbenv, &last_ckp); + if ((ret = __log_get_cached_ckp_lsn(dbenv, &last_ckp)) != 0) + return (ret); /* * If that didn't work, look backwards from the beginning of @@ -132,20 +123,28 @@ __txn_init(dbenv, tmgrp) return (ret); } - if ((ret = __db_shalloc(&tmgrp->reginfo, - sizeof(DB_TXNREGION), 0, &tmgrp->reginfo.primary)) != 0) { + if ((ret = __db_shalloc(&mgr->reginfo, + sizeof(DB_TXNREGION), 0, &mgr->reginfo.primary)) != 0) { __db_err(dbenv, "Unable to allocate memory for the transaction region"); return (ret); } - tmgrp->reginfo.rp->primary = - R_OFFSET(&tmgrp->reginfo, tmgrp->reginfo.primary); - region = tmgrp->reginfo.primary; + mgr->reginfo.rp->primary = + R_OFFSET(&mgr->reginfo, mgr->reginfo.primary); + region = mgr->reginfo.primary; memset(region, 0, sizeof(*region)); + if ((ret = __mutex_alloc( + dbenv, MTX_TXN_REGION, 0, ®ion->mtx_region)) != 0) + return (ret); + region->maxtxns = dbenv->tx_max; region->last_txnid = TXN_MINIMUM; region->cur_maxid = TXN_MAXIMUM; + + if ((ret = __mutex_alloc( + dbenv, MTX_TXN_CHKPT, 0, ®ion->mtx_ckp)) != 0) + return (ret); region->last_ckp = last_ckp; region->time_ckp = time(NULL); @@ -153,18 +152,7 @@ __txn_init(dbenv, tmgrp) region->stat.st_maxtxns = region->maxtxns; SH_TAILQ_INIT(®ion->active_txn); -#ifdef HAVE_MUTEX_SYSTEM_RESOURCES - /* Allocate room for the txn maintenance info and initialize it. */ - if ((ret = __db_shalloc(&tmgrp->reginfo, - sizeof(REGMAINT) + TXN_MAINT_SIZE, 0, &addr)) != 0) { - __db_err(dbenv, - "Unable to allocate memory for mutex maintenance"); - return (ret); - } - __db_maintinit(&tmgrp->reginfo, addr, TXN_MAINT_SIZE); - region->maint_off = R_OFFSET(&tmgrp->reginfo, addr); -#endif - return (0); + return (ret); } /* @@ -187,6 +175,8 @@ __txn_findlastckp(dbenv, lsnp, max_lsn) int ret, t_ret; u_int32_t rectype; + ZERO_LSN(*lsnp); + if ((ret = __log_cursor(dbenv, &logc)) != 0) return (ret); @@ -194,17 +184,15 @@ __txn_findlastckp(dbenv, lsnp, max_lsn) memset(&dbt, 0, sizeof(dbt)); if (max_lsn != NULL) { lsn = *max_lsn; - ZERO_LSN(*lsnp); if ((ret = __log_c_get(logc, &lsn, &dbt, DB_SET)) != 0) goto err; } else { if ((ret = __log_c_get(logc, &lsn, &dbt, DB_LAST)) != 0) goto err; /* - * Twiddle the last LSN so it points to the - * beginning of the last file; we know there's - * no checkpoint after that, since the log - * system already looked there. + * Twiddle the last LSN so it points to the beginning of the + * last file; we know there's no checkpoint after that, since + * the log system already looked there. */ lsn.offset = 0; } @@ -222,6 +210,7 @@ __txn_findlastckp(dbenv, lsnp, max_lsn) err: if ((t_ret = __log_c_close(logc)) != 0 && ret == 0) ret = t_ret; + /* * Not finding a checkpoint is not an error; there may not exist * one in the log. @@ -239,16 +228,15 @@ int __txn_dbenv_refresh(dbenv) DB_ENV *dbenv; { - DB_TXN *txnp; - DB_TXNMGR *tmgrp; + DB_TXN *txn; + DB_TXNMGR *mgr; REGINFO *reginfo; - TXN_DETAIL *td; u_int32_t txnid; int aborted, ret, t_ret; ret = 0; - tmgrp = dbenv->tx_handle; - reginfo = &tmgrp->reginfo; + mgr = dbenv->tx_handle; + reginfo = &mgr->reginfo; /* * This function can only be called once per process (i.e., not @@ -262,13 +250,12 @@ __txn_dbenv_refresh(dbenv) * to a known state. */ aborted = 0; - if (TAILQ_FIRST(&tmgrp->txn_chain) != NULL) { - while ((txnp = TAILQ_FIRST(&tmgrp->txn_chain)) != NULL) { + if (TAILQ_FIRST(&mgr->txn_chain) != NULL) { + while ((txn = TAILQ_FIRST(&mgr->txn_chain)) != NULL) { /* Prepared transactions are OK. */ - td = R_ADDR(reginfo, txnp->off); - txnid = txnp->txnid; - if (td->status == TXN_PREPARED) { - if ((ret = __txn_discard(txnp, 0)) != 0) { + txnid = txn->txnid; + if (((TXN_DETAIL *)txn->td)->status == TXN_PREPARED) { + if ((ret = __txn_discard_int(txn, 0)) != 0) { __db_err(dbenv, "Unable to discard txn 0x%x: %s", txnid, db_strerror(ret)); @@ -277,7 +264,7 @@ __txn_dbenv_refresh(dbenv) continue; } aborted = 1; - if ((t_ret = __txn_abort(txnp)) != 0) { + if ((t_ret = __txn_abort(txn)) != 0) { __db_err(dbenv, "Unable to abort transaction 0x%x: %s", txnid, db_strerror(t_ret)); @@ -299,14 +286,14 @@ __txn_dbenv_refresh(dbenv) ret = t_ret; /* Discard the per-thread lock. */ - if (tmgrp->mutexp != NULL) - __db_mutex_free(dbenv, reginfo, tmgrp->mutexp); + if ((t_ret = __mutex_free(dbenv, &mgr->mutex)) != 0 && ret == 0) + ret = t_ret; /* Detach from the region. */ if ((t_ret = __db_r_detach(dbenv, reginfo, 0)) != 0 && ret == 0) ret = t_ret; - __os_free(dbenv, tmgrp); + __os_free(dbenv, mgr); dbenv->tx_handle = NULL; return (ret); @@ -328,45 +315,9 @@ __txn_region_size(dbenv) s = sizeof(DB_TXNREGION) + dbenv->tx_max * sizeof(TXN_DETAIL) + 10 * 1024; -#ifdef HAVE_MUTEX_SYSTEM_RESOURCES - if (F_ISSET(dbenv, DB_ENV_THREAD)) - s += sizeof(REGMAINT) + TXN_MAINT_SIZE; -#endif return (s); } -/* - * __txn_region_destroy - * Destroy any region maintenance info. - * - * PUBLIC: void __txn_region_destroy __P((DB_ENV *, REGINFO *)); - */ -void -__txn_region_destroy(dbenv, infop) - DB_ENV *dbenv; - REGINFO *infop; -{ - /* - * This routine is called in two cases: when discarding the mutexes - * from a previous Berkeley DB run, during recovery, and two, when - * discarding the mutexes as we shut down the database environment. - * In the latter case, we also need to discard shared memory segments, - * this is the last time we use them, and the last region-specific - * call we make. - */ -#ifdef HAVE_MUTEX_SYSTEM_RESOURCES - DB_TXNREGION *region; - - region = R_ADDR(infop, infop->rp->primary); - - __db_shlocks_destroy(infop, R_ADDR(infop, region->maint_off)); - if (infop->primary != NULL && F_ISSET(dbenv, DB_ENV_PRIVATE)) - __db_shalloc_free(infop, R_ADDR(infop, region->maint_off)); -#endif - if (infop->primary != NULL && F_ISSET(dbenv, DB_ENV_PRIVATE)) - __db_shalloc_free(infop, infop->primary); -} - /* * __txn_id_set -- * Set the current transaction ID and current maximum unused ID (for diff --git a/storage/bdb/txn/txn_stat.c b/storage/bdb/txn/txn_stat.c index 2f9b8357c59..9c02a07b019 100644 --- a/storage/bdb/txn/txn_stat.c +++ b/storage/bdb/txn/txn_stat.c @@ -1,10 +1,10 @@ /*- * See the file LICENSE for redistribution information. * - * Copyright (c) 1996-2004 + * Copyright (c) 1996-2005 * Sleepycat Software. All rights reserved. * - * $Id: txn_stat.c,v 11.37 2004/10/15 16:59:44 bostic Exp $ + * $Id: txn_stat.c,v 12.8 2005/10/07 20:21:43 ubell Exp $ */ #include "db_config.h" @@ -52,7 +52,8 @@ __txn_stat_pp(dbenv, statp, flags) DB_TXN_STAT **statp; u_int32_t flags; { - int rep_check, ret; + DB_THREAD_INFO *ip; + int ret; PANIC_CHECK(dbenv); ENV_REQUIRES_CONFIG(dbenv, @@ -62,12 +63,9 @@ __txn_stat_pp(dbenv, statp, flags) "DB_ENV->txn_stat", flags, DB_STAT_CLEAR)) != 0) return (ret); - rep_check = IS_ENV_REPLICATED(dbenv) ? 1 : 0; - if (rep_check) - __env_rep_enter(dbenv); - ret = __txn_stat(dbenv, statp, flags); - if (rep_check) - __env_db_rep_exit(dbenv); + ENV_ENTER(dbenv, ip); + REPLICATION_WRAP(dbenv, (__txn_stat(dbenv, statp, flags)), ret); + ENV_LEAVE(dbenv, ip); return (ret); } @@ -84,7 +82,7 @@ __txn_stat(dbenv, statp, flags) DB_TXNMGR *mgr; DB_TXNREGION *region; DB_TXN_STAT *stats; - TXN_DETAIL *txnp; + TXN_DETAIL *td; size_t nbytes; u_int32_t maxtxn, ndx; int ret; @@ -108,7 +106,7 @@ __txn_stat(dbenv, statp, flags) if ((ret = __os_umalloc(dbenv, nbytes, &stats)) != 0) return (ret); - R_LOCK(dbenv, &mgr->reginfo); + TXN_SYSTEM_LOCK(dbenv); memcpy(stats, ®ion->stat, sizeof(*stats)); stats->st_last_txnid = region->last_txnid; stats->st_last_ckp = region->last_ckp; @@ -116,35 +114,44 @@ __txn_stat(dbenv, statp, flags) stats->st_txnarray = (DB_TXN_ACTIVE *)&stats[1]; for (ndx = 0, - txnp = SH_TAILQ_FIRST(®ion->active_txn, __txn_detail); - txnp != NULL && ndx < maxtxn; - txnp = SH_TAILQ_NEXT(txnp, links, __txn_detail), ++ndx) { - stats->st_txnarray[ndx].txnid = txnp->txnid; - if (txnp->parent == INVALID_ROFF) + td = SH_TAILQ_FIRST(®ion->active_txn, __txn_detail); + td != NULL && ndx < maxtxn; + td = SH_TAILQ_NEXT(td, links, __txn_detail), ++ndx) { + stats->st_txnarray[ndx].txnid = td->txnid; + if (td->parent == INVALID_ROFF) stats->st_txnarray[ndx].parentid = TXN_INVALID; else stats->st_txnarray[ndx].parentid = ((TXN_DETAIL *)R_ADDR(&mgr->reginfo, - txnp->parent))->txnid; - stats->st_txnarray[ndx].lsn = txnp->begin_lsn; - if ((stats->st_txnarray[ndx].xa_status = txnp->xa_status) != 0) + td->parent))->txnid; + stats->st_txnarray[ndx].pid = td->pid; + stats->st_txnarray[ndx].tid = td->tid; + stats->st_txnarray[ndx].lsn = td->begin_lsn; + if ((stats->st_txnarray[ndx].xa_status = td->xa_status) != 0) memcpy(stats->st_txnarray[ndx].xid, - txnp->xid, DB_XIDDATASIZE); + td->xid, DB_XIDDATASIZE); + if (td->name != INVALID_ROFF) { + (void)strncpy(stats->st_txnarray[ndx].name, + R_ADDR(&mgr->reginfo, td->name), + sizeof(stats->st_txnarray[ndx].name) - 1); + stats->st_txnarray[ndx].name[ + sizeof(stats->st_txnarray[ndx].name) - 1] = '\0'; + } else + stats->st_txnarray[ndx].name[0] = '\0'; } - stats->st_region_wait = mgr->reginfo.rp->mutex.mutex_set_wait; - stats->st_region_nowait = mgr->reginfo.rp->mutex.mutex_set_nowait; + __mutex_set_wait_info(dbenv, region->mtx_region, + &stats->st_region_wait, &stats->st_region_nowait); stats->st_regsize = mgr->reginfo.rp->size; if (LF_ISSET(DB_STAT_CLEAR)) { - mgr->reginfo.rp->mutex.mutex_set_wait = 0; - mgr->reginfo.rp->mutex.mutex_set_nowait = 0; + __mutex_clear(dbenv, region->mtx_region); memset(®ion->stat, 0, sizeof(region->stat)); region->stat.st_maxtxns = region->maxtxns; region->stat.st_maxnactive = region->stat.st_nactive = stats->st_nactive; } - R_UNLOCK(dbenv, &mgr->reginfo); + TXN_SYSTEM_UNLOCK(dbenv); *statp = stats; return (0); @@ -161,7 +168,8 @@ __txn_stat_print_pp(dbenv, flags) DB_ENV *dbenv; u_int32_t flags; { - int rep_check, ret; + DB_THREAD_INFO *ip; + int ret; PANIC_CHECK(dbenv); ENV_REQUIRES_CONFIG(dbenv, @@ -171,12 +179,9 @@ __txn_stat_print_pp(dbenv, flags) flags, DB_STAT_ALL | DB_STAT_CLEAR)) != 0) return (ret); - rep_check = IS_ENV_REPLICATED(dbenv) ? 1 : 0; - if (rep_check) - __env_rep_enter(dbenv); - ret = __txn_stat_print(dbenv, flags); - if (rep_check) - __env_db_rep_exit(dbenv); + ENV_ENTER(dbenv, ip); + REPLICATION_WRAP(dbenv, (__txn_stat_print(dbenv, flags)), ret); + ENV_LEAVE(dbenv, ip); return (ret); } @@ -222,6 +227,7 @@ __txn_print_stats(dbenv, flags) DB_TXN_STAT *sp; u_int32_t i; int ret; + char buf[DB_THREADID_STRLEN]; if ((ret = __txn_stat(dbenv, &sp, flags)) != 0) return (ret); @@ -262,12 +268,14 @@ __txn_print_stats(dbenv, flags) qsort(sp->st_txnarray, sp->st_nactive, sizeof(sp->st_txnarray[0]), __txn_compare); - __db_msg(dbenv, "List of active transactions:"); + __db_msg(dbenv, "Active transactions:"); DB_MSGBUF_INIT(&mb); for (i = 0; i < sp->st_nactive; ++i) { - __db_msgadd(dbenv, - &mb, "\tID: %lx; begin LSN: file/offset %lu/%lu", + __db_msgadd(dbenv, &mb, + "\t%lx: pid/thread %s; begin LSN: file/offset %lu/%lu", (u_long)sp->st_txnarray[i].txnid, + dbenv->thread_id_string(dbenv, + sp->st_txnarray[i].pid, sp->st_txnarray[i].tid, buf), (u_long)sp->st_txnarray[i].lsn.file, (u_long)sp->st_txnarray[i].lsn.offset); if (sp->st_txnarray[i].parentid != 0) @@ -275,6 +283,9 @@ __txn_print_stats(dbenv, flags) (u_long)sp->st_txnarray[i].parentid); if (sp->st_txnarray[i].xa_status != 0) __txn_xid_stats(dbenv, &mb, &sp->st_txnarray[i]); + if (sp->st_txnarray[i].name[0] != '\0') + __db_msgadd( + dbenv, &mb, "; \"%s\"", sp->st_txnarray[i].name); DB_MSGBUF_FLUSH(dbenv, &mb); } @@ -302,23 +313,26 @@ __txn_print_all(dbenv, flags) mgr = dbenv->tx_handle; region = mgr->reginfo.primary; - R_LOCK(dbenv, &mgr->reginfo); + TXN_SYSTEM_LOCK(dbenv); __db_print_reginfo(dbenv, &mgr->reginfo, "Transaction"); __db_msg(dbenv, "%s", DB_GLOBAL(db_line)); __db_msg(dbenv, "DB_TXNMGR handle information:"); - - __db_print_mutex(dbenv, NULL, mgr->mutexp, "DB_TXNMGR mutex", flags); + __mutex_print_debug_single(dbenv, "DB_TXNMGR mutex", mgr->mutex, flags); __db_dl(dbenv, "Number of transactions discarded", (u_long)mgr->n_discards); __db_msg(dbenv, "%s", DB_GLOBAL(db_line)); __db_msg(dbenv, "DB_TXNREGION handle information:"); + __mutex_print_debug_single( + dbenv, "DB_TXNREGION region mutex", region->mtx_region, flags); STAT_ULONG("Maximum number of active txns", region->maxtxns); STAT_HEX("Last transaction ID allocated", region->last_txnid); STAT_HEX("Current maximum unused ID", region->cur_maxid); + __mutex_print_debug_single( + dbenv, "checkpoint mutex", region->mtx_ckp, flags); STAT_LSN("Last checkpoint LSN", ®ion->last_ckp); __db_msg(dbenv, "%.24s\tLast checkpoint timestamp", @@ -334,23 +348,23 @@ __txn_print_all(dbenv, flags) * Display list of XA transactions in the DB_ENV handle. */ - R_UNLOCK(dbenv, &mgr->reginfo); + TXN_SYSTEM_UNLOCK(dbenv); return (0); } static void -__txn_xid_stats(dbenv, mbp, txnp) +__txn_xid_stats(dbenv, mbp, txn_active) DB_ENV *dbenv; DB_MSGBUF *mbp; - DB_TXN_ACTIVE *txnp; + DB_TXN_ACTIVE *txn_active; { u_int32_t v, *xp; u_int i; int cnt; const char *s; - switch (txnp->xa_status) { + switch (txn_active->xa_status) { case TXN_XA_ABORTED: s = "ABORTED"; break; @@ -372,14 +386,14 @@ __txn_xid_stats(dbenv, mbp, txnp) default: s = "UNKNOWN STATE"; __db_err(dbenv, - "XA: unknown state: %lu", (u_long)txnp->xa_status); + "XA: unknown state: %lu", (u_long)txn_active->xa_status); break; } __db_msgadd(dbenv, mbp, "\tXA: %s; XID:\n\t\t", s == NULL ? "" : s); - for (cnt = 0, xp = (u_int32_t *)txnp->xid, + for (cnt = 0, xp = (u_int32_t *)txn_active->xid, i = 0; i < DB_XIDDATASIZE; i += sizeof(u_int32_t)) { memcpy(&v, xp++, sizeof(u_int32_t)); - __db_msgadd(dbenv, mbp, "%#x ", v); + __db_msgadd(dbenv, mbp, "%#lx ", (u_long)v); if (++cnt == 4) { DB_MSGBUF_FLUSH(dbenv, mbp); __db_msgadd(dbenv, mbp, "\t\t"); diff --git a/storage/bdb/txn/txn_util.c b/storage/bdb/txn/txn_util.c index ff94cd7dbb5..ac9ea6d94c8 100644 --- a/storage/bdb/txn/txn_util.c +++ b/storage/bdb/txn/txn_util.c @@ -1,10 +1,10 @@ /*- * See the file LICENSE for redistribution information. * - * Copyright (c) 2001-2004 + * Copyright (c) 2001-2005 * Sleepycat Software. All rights reserved. * - * $Id: txn_util.c,v 11.28 2004/09/16 17:55:19 margo Exp $ + * $Id: txn_util.c,v 12.2 2005/09/28 17:45:20 margo Exp $ */ #include "db_config.h" @@ -35,6 +35,7 @@ struct __txn_event { /* Delayed remove. */ char *name; u_int8_t *fileid; + int inmem; } r; struct { /* Lock event. */ @@ -79,14 +80,15 @@ __txn_closeevent(dbenv, txn, dbp) * Creates a remove event that can be added to the commit list. * * PUBLIC: int __txn_remevent __P((DB_ENV *, - * PUBLIC: DB_TXN *, const char *, u_int8_t*)); + * PUBLIC: DB_TXN *, const char *, u_int8_t *, int)); */ int -__txn_remevent(dbenv, txn, name, fileid) +__txn_remevent(dbenv, txn, name, fileid, inmem) DB_ENV *dbenv; DB_TXN *txn; const char *name; u_int8_t *fileid; + int inmem; { int ret; TXN_EVENT *e; @@ -105,6 +107,7 @@ __txn_remevent(dbenv, txn, name, fileid) memcpy(e->u.r.fileid, fileid, DB_FILE_ID_LEN); } + e->u.r.inmem = inmem; e->op = TXN_REMOVE; TAILQ_INSERT_TAIL(&txn->events, e, links); @@ -115,9 +118,10 @@ err: if (e != NULL) return (ret); } + /* * __txn_remrem -- - * Remove a remove event because the remove has be superceeded, + * Remove a remove event because the remove has been superceeded, * by a create of the same name, for example. * * PUBLIC: void __txn_remrem __P((DB_ENV *, DB_TXN *, const char *)); @@ -285,8 +289,8 @@ __txn_doevents(dbenv, txn, opcode, preprocess) case TXN_REMOVE: if (e->u.r.fileid != NULL) { if ((t_ret = __memp_nameop(dbenv, - e->u.r.fileid, - NULL, e->u.r.name, NULL)) != 0 && ret == 0) + e->u.r.fileid, NULL, e->u.r.name, + NULL, e->u.r.inmem)) != 0 && ret == 0) ret = t_ret; } else if ((t_ret = __os_unlink(dbenv, e->u.r.name)) != 0 && ret == 0) diff --git a/storage/bdb/xa/xa.c b/storage/bdb/xa/xa.c index 00f3461e3ba..5ba4586f566 100644 --- a/storage/bdb/xa/xa.c +++ b/storage/bdb/xa/xa.c @@ -1,10 +1,10 @@ /*- * See the file LICENSE for redistribution information. * - * Copyright (c) 1998-2004 + * Copyright (c) 1998-2005 * Sleepycat Software. All rights reserved. * - * $Id: xa.c,v 11.35 2004/10/15 16:59:45 bostic Exp $ + * $Id: xa.c,v 12.5 2005/11/01 00:44:39 bostic Exp $ */ #include "db_config.h" @@ -19,17 +19,17 @@ #include "db_int.h" #include "dbinc/txn.h" -static int __db_xa_close __P((char *, int, long)); -static int __db_xa_commit __P((XID *, int, long)); -static int __db_xa_complete __P((int *, int *, int, long)); -static int __db_xa_end __P((XID *, int, long)); -static int __db_xa_forget __P((XID *, int, long)); -static int __db_xa_open __P((char *, int, long)); -static int __db_xa_prepare __P((XID *, int, long)); -static int __db_xa_recover __P((XID *, long, int, long)); -static int __db_xa_rollback __P((XID *, int, long)); -static int __db_xa_start __P((XID *, int, long)); -static void __xa_put_txn __P((DB_ENV *, DB_TXN *)); +static int __db_xa_close __P((char *, int, long)); +static int __db_xa_commit __P((XID *, int, long)); +static int __db_xa_complete __P((int *, int *, int, long)); +static int __db_xa_end __P((XID *, int, long)); +static int __db_xa_forget __P((XID *, int, long)); +static int __db_xa_open __P((char *, int, long)); +static int __db_xa_prepare __P((XID *, int, long)); +static int __db_xa_recover __P((XID *, long, int, long)); +static int __db_xa_rollback __P((XID *, int, long)); +static int __db_xa_start __P((XID *, int, long)); +static int __xa_put_txn __P((DB_ENV *, DB_TXN *)); /* * Possible flag values: @@ -58,23 +58,12 @@ const struct xa_switch_t db_xa_switch = { }; /* - * If you want your XA server to be multi-threaded, then you must + * If you want your XA server to be multi-threaded, then you must (at least) * edit this file and change: * #undef XA_MULTI_THREAD * to: * #define XA_MULTI_THREAD 1 - * - * You must then modify the FILL ME IN; section below to assign a - * 32-bit unsigned, unique thread ID to the variable tid. If no - * such thread ID is available, e.g., you're using pthreads on a POSIX - * 1003.1 system where a pthread_t is declared as a structure, you - * will probably want to change the tid field of the DB_TXN structure - * to be the correct type in which to define a thread ID on the system, - * and then modify both the FILL ME IN section and then subsequent - * comparison of the thread IDs in the XA transaction list, as the - * current simple equality tests may not work. */ - #undef XA_MULTI_THREAD /* @@ -91,55 +80,64 @@ __xa_get_txn(dbenv, txnp, do_init) DB_TXN **txnp; int do_init; { - int ret; #ifdef XA_MULTI_THREAD DB_TXN *t; - u_int32_t tid; DB_TXNMGR *mgr; -#else - COMPQUIET(do_init, 0); + TXN_DETAIL *td; + db_threadid_t tid; + pid_t pid; #endif + int ret; + ret = 0; #ifdef XA_MULTI_THREAD - /* Specify Thread-ID retrieval here. */ - tid = FILL ME IN + dbenv->thread_id(dbenv, &pid, &tid); *txnp = NULL; + + DB_ASSERT(dbenv->tx_handle != NULL); mgr = (DB_TXNMGR *)dbenv->tx_handle; /* - * We need to protect the xa_txn linked list, but the - * environment does not have a mutex. Since we are in - * an XA transaction environment, we know that there is - * a transaction structure, so use its mutex. + * We need to protect the xa_txn linked list, but the environment does + * not have a mutex. Since we are in an XA transaction environment, + * we know there is a transaction structure, we can use its mutex. */ - DB_ASSERT(dbenv->tx_handle != NULL); - MUTEX_THREAD_LOCK(mgr->mutexp); + MUTEX_LOCK(dbenv, mgr->mutex); for (t = TAILQ_FIRST(&dbenv->xa_txn); t != NULL; - t = TAILQ_NEXT(t, xalinks)) - /* - * FILL ME IN; if tids are not a 32-bit integral type; - * put a comparison here that will work. - */ + t = TAILQ_NEXT(t, xalinks)) { + td = t->td; + if (td->pid != pid) + continue; +#ifdef HAVE_INTEGRAL_THREAD_TYPE if (t->tid == tid) { *txnp = t; break; } - MUTEX_THREAD_UNLOCK(mgr->mutexp); +#else + if (memcmp(&t->tid, &tid, sizeof(tid)) == 0) { + *txnp = t; + break; + } +#endif + } + MUTEX_UNLOCK(dbenv, mgr->mutex); if (*txnp == NULL) { if (!do_init) ret = EINVAL; else if ((ret = - __os_malloc(dbenv, sizeof(DB_TXN), NULL, txnp)) == 0) { + __os_malloc(dbenv, sizeof(DB_TXN), txnp)) == 0) { (*txnp)->tid = tid; - MUTEX_THREAD_LOCK(mgr->mutexp); + MUTEX_LOCK(dbenv, mgr->mutex); TAILQ_INSERT_HEAD(&dbenv->xa_txn, *txnp, xalinks); - MUTEX_THREAD_UNLOCK(mgr->mutexp); + MUTEX_UNLOCK(dbenv, mgr->mutex); } } #else + COMPQUIET(do_init, 0); + *txnp = TAILQ_FIRST(&dbenv->xa_txn); if (*txnp == NULL && (ret = __os_calloc(dbenv, 1, sizeof(DB_TXN), txnp)) == 0) { @@ -151,7 +149,7 @@ __xa_get_txn(dbenv, txnp, do_init) return (ret); } -static void +static int __xa_put_txn(dbenv, txnp) DB_ENV *dbenv; DB_TXN *txnp; @@ -160,14 +158,15 @@ __xa_put_txn(dbenv, txnp) DB_TXNMGR *mgr; mgr = (DB_TXNMGR *)dbenv->tx_handle; - MUTEX_THREAD_LOCK(mgr->mutexp); + MUTEX_LOCK(dbenv, mgr->mutex); TAILQ_REMOVE(&dbenv->xa_txn, txnp, xalinks); - MUTEX_THREAD_UNLOCK(mgr->mutexp); + MUTEX_UNLOCK(dbenv, mgr->mutex); __os_free(dbenv, txnp); #else COMPQUIET(dbenv, NULL); txnp->txnid = TXN_INVALID; #endif + return (0); } #ifdef XA_MULTI_THREAD @@ -213,8 +212,6 @@ __db_xa_open(xa_info, rmid, arg_flags) /* Verify if we already have this environment open. */ if (__db_rmid_to_env(rmid, &dbenv) == 0) return (XA_OK); - if (__os_calloc(dbenv, 1, sizeof(DB_ENV), &dbenv) != 0) - return (XAER_RMERR); /* Open a new environment. */ if (db_env_create(&dbenv, 0) != 0) @@ -347,16 +344,15 @@ __db_xa_start(xid, rmid, arg_flags) /* Now, fill in the global transaction structure. */ if (__xa_get_txn(dbenv, &txnp, 1) != 0) return (XAER_RMERR); - __txn_continue(dbenv, txnp, td, off); + __txn_continue(dbenv, txnp, td); td->xa_status = TXN_XA_STARTED; } else { if (__xa_get_txn(dbenv, &txnp, 1) != 0) return (XAER_RMERR); if (__txn_xa_begin(dbenv, txnp)) return (XAER_RMERR); - (void)__db_map_xid(dbenv, xid, txnp->off); - td = R_ADDR( - &((DB_TXNMGR *)dbenv->tx_handle)->reginfo, txnp->off); + (void)__db_map_xid(dbenv, xid, txnp->td); + td = txnp->td; td->xa_status = TXN_XA_STARTED; } return (XA_OK); @@ -389,10 +385,10 @@ __db_xa_end(xid, rmid, flags) if (__xa_get_txn(dbenv, &txn, 0) != 0) return (XAER_RMERR); - if (off != txn->off) + td = R_ADDR(&((DB_TXNMGR *)dbenv->tx_handle)->reginfo, off); + if (td != txn->td) return (XAER_PROTO); - td = R_ADDR(&((DB_TXNMGR *)dbenv->tx_handle)->reginfo, off); if (td->xa_status == TXN_XA_DEADLOCKED) return (XA_RBDEADLOCK); @@ -402,9 +398,6 @@ __db_xa_end(xid, rmid, flags) if (td->xa_status != TXN_XA_STARTED) return (XAER_PROTO); - /* Update the shared memory last_lsn field */ - td->last_lsn = txn->last_lsn; - /* * If we ever support XA migration, we cannot keep SUSPEND/END * status in the shared region; it would have to be process local. @@ -414,7 +407,11 @@ __db_xa_end(xid, rmid, flags) else td->xa_status = TXN_XA_ENDED; - __xa_put_txn(dbenv, txn); + /* + * XXX + * This can fail in XA_MULTI_THREAD mode. + */ + (void)__xa_put_txn(dbenv, txn); return (XA_OK); } @@ -462,15 +459,18 @@ __db_xa_prepare(xid, rmid, arg_flags) /* Now, fill in the global transaction structure. */ if (__xa_get_txn(dbenv, &txnp, 0) != 0) return (XAER_PROTO); - __txn_continue(dbenv, txnp, td, off); + __txn_continue(dbenv, txnp, td); if (txnp->prepare(txnp, (u_int8_t *)xid->data) != 0) return (XAER_RMERR); td->xa_status = TXN_XA_PREPARED; - /* No fatal value that would require an XAER_RMFAIL. */ - __xa_put_txn(dbenv, txnp); + /* + * XXX + * This can fail in XA_MULTI_THREAD mode. + */ + (void)__xa_put_txn(dbenv, txnp); return (XA_OK); } @@ -526,13 +526,16 @@ __db_xa_commit(xid, rmid, arg_flags) /* Now, fill in the global transaction structure. */ if (__xa_get_txn(dbenv, &txnp, 0) != 0) return (XAER_RMERR); - __txn_continue(dbenv, txnp, td, off); + __txn_continue(dbenv, txnp, td); if (txnp->commit(txnp, 0) != 0) return (XAER_RMERR); - /* No fatal value that would require an XAER_RMFAIL. */ - __xa_put_txn(dbenv, txnp); + /* + * XXX + * This can fail in XA_MULTI_THREAD mode. + */ + (void)__xa_put_txn(dbenv, txnp); return (XA_OK); } @@ -616,12 +619,15 @@ __db_xa_rollback(xid, rmid, arg_flags) /* Now, fill in the global transaction structure. */ if (__xa_get_txn(dbenv, &txnp, 0) != 0) return (XAER_RMERR); - __txn_continue(dbenv, txnp, td, off); + __txn_continue(dbenv, txnp, td); if (txnp->abort(txnp) != 0) return (XAER_RMERR); - /* No fatal value that would require an XAER_RMFAIL. */ - __xa_put_txn(dbenv, txnp); + /* + * XXX + * This can fail in XA_MULTI_THREAD mode. + */ + (void)__xa_put_txn(dbenv, txnp); return (XA_OK); } diff --git a/storage/bdb/xa/xa_db.c b/storage/bdb/xa/xa_db.c index 550d47dd82a..54751900c11 100644 --- a/storage/bdb/xa/xa_db.c +++ b/storage/bdb/xa/xa_db.c @@ -1,10 +1,10 @@ /*- * See the file LICENSE for redistribution information. * - * Copyright (c) 1998-2004 + * Copyright (c) 1998-2005 * Sleepycat Software. All rights reserved. * - * $Id: xa_db.c,v 11.26 2004/01/28 03:36:40 bostic Exp $ + * $Id: xa_db.c,v 12.4 2005/10/20 18:57:16 bostic Exp $ */ #include "db_config.h" @@ -23,6 +23,8 @@ static int __xa_get __P((DB *, DB_TXN *, DBT *, DBT *, u_int32_t)); static int __xa_open __P((DB *, DB_TXN *, const char *, const char *, DBTYPE, u_int32_t, int)); static int __xa_put __P((DB *, DB_TXN *, DBT *, DBT *, u_int32_t)); +static int __xa_set_txn __P((DB *, DB_TXN **, int)); +static int __xa_truncate __P((DB *, DB_TXN *, u_int32_t *, u_int32_t)); typedef struct __xa_methods { int (*close) __P((DB *, u_int32_t)); @@ -32,18 +34,56 @@ typedef struct __xa_methods { int (*open) __P((DB *, DB_TXN *, const char *, const char *, DBTYPE, u_int32_t, int)); int (*put) __P((DB *, DB_TXN *, DBT *, DBT *, u_int32_t)); + int (*truncate) __P((DB *, DB_TXN *, u_int32_t *, u_int32_t)); } XA_METHODS; -#define SET_TXN(PARAM, LOCAL) { \ - (LOCAL) = NULL; \ - if (!LF_ISSET(DB_AUTO_COMMIT)) { \ - if ((PARAM) != NULL) \ - (LOCAL) = (PARAM); \ - else if (__xa_get_txn(dbp->dbenv, &(LOCAL), 0) != 0) \ - (LOCAL) = NULL; \ - else if ((LOCAL) != NULL && (LOCAL)->txnid == TXN_INVALID) \ - (LOCAL) = NULL; \ - } \ +/* + * __xa_set_txn -- + * Find a transaction handle. + */ +static int +__xa_set_txn(dbp, txnpp, no_xa_txn) + DB *dbp; + DB_TXN **txnpp; + int no_xa_txn; +{ + DB_ENV *dbenv; + int ret; + + dbenv = dbp->dbenv; + + /* + * It doesn't make sense for a server to specify a DB_TXN handle. + * As the server can't know if other operations it has done have + * committed/aborted, it can self-deadlock. If the server wants + * other transactions, it can open other DB handles and use them. + * Disallow specified DB_TXN handles. + */ + if (*txnpp != NULL) { + __db_err(dbenv, + "transaction handles should not be directly specified to XA interfaces"); + return (EINVAL); + } + + /* See if the TM has declared a transaction. */ + if ((ret = __xa_get_txn(dbenv, txnpp, 0)) != 0) + return (ret); + if ((*txnpp)->txnid != TXN_INVALID) + return (0); + + /* + * We may be opening databases in the server initialization routine. + * In that case, it's reasonable not to have an XA transaction. It's + * also reasonable to open a database as part of an XA transaction, + * allow both. + */ + if (no_xa_txn) { + *txnpp = NULL; + return (0); + } + + __db_err(dbenv, "no XA transaction declared"); + return (EINVAL); } /* @@ -60,8 +100,8 @@ __db_xa_create(dbp) int ret; /* - * Interpose XA routines in front of any method that takes a TXN - * ID as an argument. + * Allocate the XA internal structure, and wrap the open and close + * calls. */ if ((ret = __os_calloc(dbp->dbenv, 1, sizeof(XA_METHODS), &xam)) != 0) return (ret); @@ -79,7 +119,6 @@ __db_xa_create(dbp) * __xa_open -- * XA open wrapper. */ - static int __xa_open(dbp, txn, name, subdb, type, flags, mode) DB *dbp; @@ -89,24 +128,28 @@ __xa_open(dbp, txn, name, subdb, type, flags, mode) u_int32_t flags; int mode; { - DB_TXN *t; XA_METHODS *xam; int ret; xam = (XA_METHODS *)dbp->xa_internal; - SET_TXN(txn, t); - if ((ret = xam->open(dbp, t, name, subdb, type, flags, mode)) != 0) + if ((ret = + __xa_set_txn(dbp, &txn, LF_ISSET(DB_AUTO_COMMIT) ? 1 : 0)) != 0) + return (ret); + if ((ret = xam->open(dbp, txn, name, subdb, type, flags, mode)) != 0) return (ret); + /* Wrap any DB handle method that takes a TXN ID as an argument. */ xam->cursor = dbp->cursor; xam->del = dbp->del; xam->get = dbp->get; xam->put = dbp->put; + xam->truncate = dbp->truncate; dbp->cursor = __xa_cursor; dbp->del = __xa_del; dbp->get = __xa_get; dbp->put = __xa_put; + dbp->truncate = __xa_truncate; return (0); } @@ -118,15 +161,12 @@ __xa_cursor(dbp, txn, dbcp, flags) DBC **dbcp; u_int32_t flags; { - DB_TXN *t; + int ret; - if (txn != NULL) - t = txn; - else if (__xa_get_txn(dbp->dbenv, &t, 0) != 0 || - t->txnid== TXN_INVALID) - t = NULL; - - return (((XA_METHODS *)dbp->xa_internal)->cursor (dbp, t, dbcp, flags)); + if ((ret = __xa_set_txn(dbp, &txn, 0)) != 0) + return (ret); + return (((XA_METHODS *) + dbp->xa_internal)->cursor(dbp, txn, dbcp, flags)); } static int @@ -136,10 +176,11 @@ __xa_del(dbp, txn, key, flags) DBT *key; u_int32_t flags; { - DB_TXN *t; + int ret; - SET_TXN(txn, t); - return (((XA_METHODS *)dbp->xa_internal)->del(dbp, t, key, flags)); + if ((ret = __xa_set_txn(dbp, &txn, 0)) != 0) + return (ret); + return (((XA_METHODS *)dbp->xa_internal)->del(dbp, txn, key, flags)); } static int @@ -164,11 +205,12 @@ __xa_get(dbp, txn, key, data, flags) DBT *key, *data; u_int32_t flags; { - DB_TXN *t; + int ret; - SET_TXN(txn, t); - return (((XA_METHODS *)dbp->xa_internal)->get - (dbp, t, key, data, flags)); + if ((ret = __xa_set_txn(dbp, &txn, 0)) != 0) + return (ret); + return (((XA_METHODS *) + dbp->xa_internal)->get(dbp, txn, key, data, flags)); } static int @@ -178,10 +220,24 @@ __xa_put(dbp, txn, key, data, flags) DBT *key, *data; u_int32_t flags; { - DB_TXN *t; + int ret; - SET_TXN(txn, t); - - return (((XA_METHODS *)dbp->xa_internal)->put - (dbp, t, key, data, flags)); + if ((ret = __xa_set_txn(dbp, &txn, 0)) != 0) + return (ret); + return (((XA_METHODS *) + dbp->xa_internal)->put(dbp, txn, key, data, flags)); +} + +static int +__xa_truncate(dbp, txn, countp, flags) + DB *dbp; + DB_TXN *txn; + u_int32_t *countp, flags; +{ + int ret; + + if ((ret = __xa_set_txn(dbp, &txn, 0)) != 0) + return (ret); + return (((XA_METHODS *) + dbp->xa_internal)->truncate(dbp, txn, countp, flags)); } diff --git a/storage/bdb/xa/xa_map.c b/storage/bdb/xa/xa_map.c index 80fb3dac48c..f2bce94f252 100644 --- a/storage/bdb/xa/xa_map.c +++ b/storage/bdb/xa/xa_map.c @@ -1,10 +1,10 @@ /*- * See the file LICENSE for redistribution information. * - * Copyright (c) 1996-2004 + * Copyright (c) 1996-2005 * Sleepycat Software. All rights reserved. * - * $Id: xa_map.c,v 11.25 2004/10/15 16:59:46 bostic Exp $ + * $Id: xa_map.c,v 12.4 2005/10/13 20:42:34 bostic Exp $ */ #include "db_config.h" @@ -117,29 +117,23 @@ __db_unmap_rmid(rmid) /* * __db_map_xid - * Create a mapping between this XID and the transaction at - * "off" in the shared region. + * Create a mapping between this XID and the transaction + * "td" in the shared region. * - * PUBLIC: int __db_map_xid __P((DB_ENV *, XID *, size_t)); + * PUBLIC: int __db_map_xid __P((DB_ENV *, XID *, TXN_DETAIL *)); */ int -__db_map_xid(dbenv, xid, off) +__db_map_xid(dbenv, xid, td) DB_ENV *dbenv; XID *xid; - size_t off; -{ - REGINFO *infop; TXN_DETAIL *td; - - infop = &((DB_TXNMGR *)dbenv->tx_handle)->reginfo; - td = R_ADDR(infop, off); - - R_LOCK(dbenv, infop); +{ + TXN_SYSTEM_LOCK(dbenv); memcpy(td->xid, xid->data, XIDDATASIZE); td->bqual = (u_int32_t)xid->bqual_length; td->gtrid = (u_int32_t)xid->gtrid_length; td->format = (int32_t)xid->formatID; - R_UNLOCK(dbenv, infop); + TXN_SYSTEM_UNLOCK(dbenv); return (0); }