diff options
author | cinap_lenrek <cinap_lenrek@localhost> | 2011-05-03 11:25:13 +0000 |
---|---|---|
committer | cinap_lenrek <cinap_lenrek@localhost> | 2011-05-03 11:25:13 +0000 |
commit | 458120dd40db6b4df55a4e96b650e16798ef06a0 (patch) | |
tree | 8f82685be24fef97e715c6f5ca4c68d34d5074ee /sys/lib/python/bsddb | |
parent | 3a742c699f6806c1145aea5149bf15de15a0afd7 (diff) |
add hg and python
Diffstat (limited to 'sys/lib/python/bsddb')
28 files changed, 6525 insertions, 0 deletions
diff --git a/sys/lib/python/bsddb/__init__.py b/sys/lib/python/bsddb/__init__.py new file mode 100644 index 000000000..cf3266886 --- /dev/null +++ b/sys/lib/python/bsddb/__init__.py @@ -0,0 +1,397 @@ +#---------------------------------------------------------------------- +# Copyright (c) 1999-2001, Digital Creations, Fredericksburg, VA, USA +# and Andrew Kuchling. All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# o Redistributions of source code must retain the above copyright +# notice, this list of conditions, and the disclaimer that follows. +# +# o Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions, and the following disclaimer in +# the documentation and/or other materials provided with the +# distribution. +# +# o Neither the name of Digital Creations nor the names of its +# contributors may be used to endorse or promote products derived +# from this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY DIGITAL CREATIONS AND CONTRIBUTORS *AS +# IS* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED +# TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A +# PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL DIGITAL +# CREATIONS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, +# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS +# OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR +# TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH +# DAMAGE. +#---------------------------------------------------------------------- + + +"""Support for BerkeleyDB 3.3 through 4.4 with a simple interface. + +For the full featured object oriented interface use the bsddb.db module +instead. It mirrors the Sleepycat BerkeleyDB C API. +""" + +try: + if __name__ == 'bsddb3': + # import _pybsddb binary as it should be the more recent version from + # a standalone pybsddb addon package than the version included with + # python as bsddb._bsddb. + import _pybsddb + _bsddb = _pybsddb + from bsddb3.dbutils import DeadlockWrap as _DeadlockWrap + else: + import _bsddb + from bsddb.dbutils import DeadlockWrap as _DeadlockWrap +except ImportError: + # Remove ourselves from sys.modules + import sys + del sys.modules[__name__] + raise + +# bsddb3 calls it db, but provide _db for backwards compatibility +db = _db = _bsddb +__version__ = db.__version__ + +error = db.DBError # So bsddb.error will mean something... + +#---------------------------------------------------------------------- + +import sys, os + +# for backwards compatibility with python versions older than 2.3, the +# iterator interface is dynamically defined and added using a mixin +# class. old python can't tokenize it due to the yield keyword. +if sys.version >= '2.3': + import UserDict + from weakref import ref + exec """ +class _iter_mixin(UserDict.DictMixin): + def _make_iter_cursor(self): + cur = _DeadlockWrap(self.db.cursor) + key = id(cur) + self._cursor_refs[key] = ref(cur, self._gen_cref_cleaner(key)) + return cur + + def _gen_cref_cleaner(self, key): + # use generate the function for the weakref callback here + # to ensure that we do not hold a strict reference to cur + # in the callback. + return lambda ref: self._cursor_refs.pop(key, None) + + def __iter__(self): + try: + cur = self._make_iter_cursor() + + # FIXME-20031102-greg: race condition. cursor could + # be closed by another thread before this call. + + # since we're only returning keys, we call the cursor + # methods with flags=0, dlen=0, dofs=0 + key = _DeadlockWrap(cur.first, 0,0,0)[0] + yield key + + next = cur.next + while 1: + try: + key = _DeadlockWrap(next, 0,0,0)[0] + yield key + except _bsddb.DBCursorClosedError: + cur = self._make_iter_cursor() + # FIXME-20031101-greg: race condition. cursor could + # be closed by another thread before this call. + _DeadlockWrap(cur.set, key,0,0,0) + next = cur.next + except _bsddb.DBNotFoundError: + return + except _bsddb.DBCursorClosedError: + # the database was modified during iteration. abort. + return + + def iteritems(self): + if not self.db: + return + try: + cur = self._make_iter_cursor() + + # FIXME-20031102-greg: race condition. cursor could + # be closed by another thread before this call. + + kv = _DeadlockWrap(cur.first) + key = kv[0] + yield kv + + next = cur.next + while 1: + try: + kv = _DeadlockWrap(next) + key = kv[0] + yield kv + except _bsddb.DBCursorClosedError: + cur = self._make_iter_cursor() + # FIXME-20031101-greg: race condition. cursor could + # be closed by another thread before this call. + _DeadlockWrap(cur.set, key,0,0,0) + next = cur.next + except _bsddb.DBNotFoundError: + return + except _bsddb.DBCursorClosedError: + # the database was modified during iteration. abort. + return +""" +else: + class _iter_mixin: pass + + +class _DBWithCursor(_iter_mixin): + """ + A simple wrapper around DB that makes it look like the bsddbobject in + the old module. It uses a cursor as needed to provide DB traversal. + """ + def __init__(self, db): + self.db = db + self.db.set_get_returns_none(0) + + # FIXME-20031101-greg: I believe there is still the potential + # for deadlocks in a multithreaded environment if someone + # attempts to use the any of the cursor interfaces in one + # thread while doing a put or delete in another thread. The + # reason is that _checkCursor and _closeCursors are not atomic + # operations. Doing our own locking around self.dbc, + # self.saved_dbc_key and self._cursor_refs could prevent this. + # TODO: A test case demonstrating the problem needs to be written. + + # self.dbc is a DBCursor object used to implement the + # first/next/previous/last/set_location methods. + self.dbc = None + self.saved_dbc_key = None + + # a collection of all DBCursor objects currently allocated + # by the _iter_mixin interface. + self._cursor_refs = {} + + def __del__(self): + self.close() + + def _checkCursor(self): + if self.dbc is None: + self.dbc = _DeadlockWrap(self.db.cursor) + if self.saved_dbc_key is not None: + _DeadlockWrap(self.dbc.set, self.saved_dbc_key) + self.saved_dbc_key = None + + # This method is needed for all non-cursor DB calls to avoid + # BerkeleyDB deadlocks (due to being opened with DB_INIT_LOCK + # and DB_THREAD to be thread safe) when intermixing database + # operations that use the cursor internally with those that don't. + def _closeCursors(self, save=1): + if self.dbc: + c = self.dbc + self.dbc = None + if save: + try: + self.saved_dbc_key = _DeadlockWrap(c.current, 0,0,0)[0] + except db.DBError: + pass + _DeadlockWrap(c.close) + del c + for cref in self._cursor_refs.values(): + c = cref() + if c is not None: + _DeadlockWrap(c.close) + + def _checkOpen(self): + if self.db is None: + raise error, "BSDDB object has already been closed" + + def isOpen(self): + return self.db is not None + + def __len__(self): + self._checkOpen() + return _DeadlockWrap(lambda: len(self.db)) # len(self.db) + + def __getitem__(self, key): + self._checkOpen() + return _DeadlockWrap(lambda: self.db[key]) # self.db[key] + + def __setitem__(self, key, value): + self._checkOpen() + self._closeCursors() + def wrapF(): + self.db[key] = value + _DeadlockWrap(wrapF) # self.db[key] = value + + def __delitem__(self, key): + self._checkOpen() + self._closeCursors() + def wrapF(): + del self.db[key] + _DeadlockWrap(wrapF) # del self.db[key] + + def close(self): + self._closeCursors(save=0) + if self.dbc is not None: + _DeadlockWrap(self.dbc.close) + v = 0 + if self.db is not None: + v = _DeadlockWrap(self.db.close) + self.dbc = None + self.db = None + return v + + def keys(self): + self._checkOpen() + return _DeadlockWrap(self.db.keys) + + def has_key(self, key): + self._checkOpen() + return _DeadlockWrap(self.db.has_key, key) + + def set_location(self, key): + self._checkOpen() + self._checkCursor() + return _DeadlockWrap(self.dbc.set_range, key) + + def next(self): + self._checkOpen() + self._checkCursor() + rv = _DeadlockWrap(self.dbc.next) + return rv + + def previous(self): + self._checkOpen() + self._checkCursor() + rv = _DeadlockWrap(self.dbc.prev) + return rv + + def first(self): + self._checkOpen() + self._checkCursor() + rv = _DeadlockWrap(self.dbc.first) + return rv + + def last(self): + self._checkOpen() + self._checkCursor() + rv = _DeadlockWrap(self.dbc.last) + return rv + + def sync(self): + self._checkOpen() + return _DeadlockWrap(self.db.sync) + + +#---------------------------------------------------------------------- +# Compatibility object factory functions + +def hashopen(file, flag='c', mode=0666, pgsize=None, ffactor=None, nelem=None, + cachesize=None, lorder=None, hflags=0): + + flags = _checkflag(flag, file) + e = _openDBEnv(cachesize) + d = db.DB(e) + d.set_flags(hflags) + if pgsize is not None: d.set_pagesize(pgsize) + if lorder is not None: d.set_lorder(lorder) + if ffactor is not None: d.set_h_ffactor(ffactor) + if nelem is not None: d.set_h_nelem(nelem) + d.open(file, db.DB_HASH, flags, mode) + return _DBWithCursor(d) + +#---------------------------------------------------------------------- + +def btopen(file, flag='c', mode=0666, + btflags=0, cachesize=None, maxkeypage=None, minkeypage=None, + pgsize=None, lorder=None): + + flags = _checkflag(flag, file) + e = _openDBEnv(cachesize) + d = db.DB(e) + if pgsize is not None: d.set_pagesize(pgsize) + if lorder is not None: d.set_lorder(lorder) + d.set_flags(btflags) + if minkeypage is not None: d.set_bt_minkey(minkeypage) + if maxkeypage is not None: d.set_bt_maxkey(maxkeypage) + d.open(file, db.DB_BTREE, flags, mode) + return _DBWithCursor(d) + +#---------------------------------------------------------------------- + + +def rnopen(file, flag='c', mode=0666, + rnflags=0, cachesize=None, pgsize=None, lorder=None, + rlen=None, delim=None, source=None, pad=None): + + flags = _checkflag(flag, file) + e = _openDBEnv(cachesize) + d = db.DB(e) + if pgsize is not None: d.set_pagesize(pgsize) + if lorder is not None: d.set_lorder(lorder) + d.set_flags(rnflags) + if delim is not None: d.set_re_delim(delim) + if rlen is not None: d.set_re_len(rlen) + if source is not None: d.set_re_source(source) + if pad is not None: d.set_re_pad(pad) + d.open(file, db.DB_RECNO, flags, mode) + return _DBWithCursor(d) + +#---------------------------------------------------------------------- + +def _openDBEnv(cachesize): + e = db.DBEnv() + if cachesize is not None: + if cachesize >= 20480: + e.set_cachesize(0, cachesize) + else: + raise error, "cachesize must be >= 20480" + e.set_lk_detect(db.DB_LOCK_DEFAULT) + e.open('.', db.DB_PRIVATE | db.DB_CREATE | db.DB_THREAD | db.DB_INIT_LOCK | db.DB_INIT_MPOOL) + return e + +def _checkflag(flag, file): + if flag == 'r': + flags = db.DB_RDONLY + elif flag == 'rw': + flags = 0 + elif flag == 'w': + flags = db.DB_CREATE + elif flag == 'c': + flags = db.DB_CREATE + elif flag == 'n': + flags = db.DB_CREATE + #flags = db.DB_CREATE | db.DB_TRUNCATE + # we used db.DB_TRUNCATE flag for this before but BerkeleyDB + # 4.2.52 changed to disallowed truncate with txn environments. + if file is not None and os.path.isfile(file): + os.unlink(file) + else: + raise error, "flags should be one of 'r', 'w', 'c' or 'n'" + return flags | db.DB_THREAD + +#---------------------------------------------------------------------- + + +# This is a silly little hack that allows apps to continue to use the +# DB_THREAD flag even on systems without threads without freaking out +# BerkeleyDB. +# +# This assumes that if Python was built with thread support then +# BerkeleyDB was too. + +try: + import thread + del thread + if db.version() < (3, 3, 0): + db.DB_THREAD = 0 +except ImportError: + db.DB_THREAD = 0 + +#---------------------------------------------------------------------- diff --git a/sys/lib/python/bsddb/db.py b/sys/lib/python/bsddb/db.py new file mode 100644 index 000000000..3bd0c8ba4 --- /dev/null +++ b/sys/lib/python/bsddb/db.py @@ -0,0 +1,51 @@ +#---------------------------------------------------------------------- +# Copyright (c) 1999-2001, Digital Creations, Fredericksburg, VA, USA +# and Andrew Kuchling. All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# o Redistributions of source code must retain the above copyright +# notice, this list of conditions, and the disclaimer that follows. +# +# o Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions, and the following disclaimer in +# the documentation and/or other materials provided with the +# distribution. +# +# o Neither the name of Digital Creations nor the names of its +# contributors may be used to endorse or promote products derived +# from this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY DIGITAL CREATIONS AND CONTRIBUTORS *AS +# IS* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED +# TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A +# PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL DIGITAL +# CREATIONS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, +# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS +# OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR +# TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH +# DAMAGE. +#---------------------------------------------------------------------- + + +# This module is just a placeholder for possible future expansion, in +# case we ever want to augment the stuff in _db in any way. For now +# it just simply imports everything from _db. + +if __name__.startswith('bsddb3.'): + # import _pybsddb binary as it should be the more recent version from + # a standalone pybsddb addon package than the version included with + # python as bsddb._bsddb. + from _pybsddb import * + from _pybsddb import __version__ +else: + from _bsddb import * + from _bsddb import __version__ + +if version() < (3, 2, 0): + raise ImportError, "correct BerkeleyDB symbols not found. Perhaps python was statically linked with an older version?" diff --git a/sys/lib/python/bsddb/dbobj.py b/sys/lib/python/bsddb/dbobj.py new file mode 100644 index 000000000..b74ee72f7 --- /dev/null +++ b/sys/lib/python/bsddb/dbobj.py @@ -0,0 +1,254 @@ +#------------------------------------------------------------------------- +# This file contains real Python object wrappers for DB and DBEnv +# C "objects" that can be usefully subclassed. The previous SWIG +# based interface allowed this thanks to SWIG's shadow classes. +# -- Gregory P. Smith +#------------------------------------------------------------------------- +# +# (C) Copyright 2001 Autonomous Zone Industries +# +# License: This is free software. You may use this software for any +# purpose including modification/redistribution, so long as +# this header remains intact and that you do not claim any +# rights of ownership or authorship of this software. This +# software has been tested, but no warranty is expressed or +# implied. +# + +# +# TODO it would be *really nice* to have an automatic shadow class populator +# so that new methods don't need to be added here manually after being +# added to _bsddb.c. +# + +import db + +try: + from UserDict import DictMixin +except ImportError: + # DictMixin is new in Python 2.3 + class DictMixin: pass + +class DBEnv: + def __init__(self, *args, **kwargs): + self._cobj = apply(db.DBEnv, args, kwargs) + + def close(self, *args, **kwargs): + return apply(self._cobj.close, args, kwargs) + def open(self, *args, **kwargs): + return apply(self._cobj.open, args, kwargs) + def remove(self, *args, **kwargs): + return apply(self._cobj.remove, args, kwargs) + def set_shm_key(self, *args, **kwargs): + return apply(self._cobj.set_shm_key, args, kwargs) + def set_cachesize(self, *args, **kwargs): + return apply(self._cobj.set_cachesize, args, kwargs) + def set_data_dir(self, *args, **kwargs): + return apply(self._cobj.set_data_dir, args, kwargs) + def set_flags(self, *args, **kwargs): + return apply(self._cobj.set_flags, args, kwargs) + def set_lg_bsize(self, *args, **kwargs): + return apply(self._cobj.set_lg_bsize, args, kwargs) + def set_lg_dir(self, *args, **kwargs): + return apply(self._cobj.set_lg_dir, args, kwargs) + def set_lg_max(self, *args, **kwargs): + return apply(self._cobj.set_lg_max, args, kwargs) + def set_lk_detect(self, *args, **kwargs): + return apply(self._cobj.set_lk_detect, args, kwargs) + if db.version() < (4,5): + def set_lk_max(self, *args, **kwargs): + return apply(self._cobj.set_lk_max, args, kwargs) + def set_lk_max_locks(self, *args, **kwargs): + return apply(self._cobj.set_lk_max_locks, args, kwargs) + def set_lk_max_lockers(self, *args, **kwargs): + return apply(self._cobj.set_lk_max_lockers, args, kwargs) + def set_lk_max_objects(self, *args, **kwargs): + return apply(self._cobj.set_lk_max_objects, args, kwargs) + def set_mp_mmapsize(self, *args, **kwargs): + return apply(self._cobj.set_mp_mmapsize, args, kwargs) + def set_timeout(self, *args, **kwargs): + return apply(self._cobj.set_timeout, args, kwargs) + def set_tmp_dir(self, *args, **kwargs): + return apply(self._cobj.set_tmp_dir, args, kwargs) + def txn_begin(self, *args, **kwargs): + return apply(self._cobj.txn_begin, args, kwargs) + def txn_checkpoint(self, *args, **kwargs): + return apply(self._cobj.txn_checkpoint, args, kwargs) + def txn_stat(self, *args, **kwargs): + return apply(self._cobj.txn_stat, args, kwargs) + def set_tx_max(self, *args, **kwargs): + return apply(self._cobj.set_tx_max, args, kwargs) + def set_tx_timestamp(self, *args, **kwargs): + return apply(self._cobj.set_tx_timestamp, args, kwargs) + def lock_detect(self, *args, **kwargs): + return apply(self._cobj.lock_detect, args, kwargs) + def lock_get(self, *args, **kwargs): + return apply(self._cobj.lock_get, args, kwargs) + def lock_id(self, *args, **kwargs): + return apply(self._cobj.lock_id, args, kwargs) + def lock_put(self, *args, **kwargs): + return apply(self._cobj.lock_put, args, kwargs) + def lock_stat(self, *args, **kwargs): + return apply(self._cobj.lock_stat, args, kwargs) + def log_archive(self, *args, **kwargs): + return apply(self._cobj.log_archive, args, kwargs) + + def set_get_returns_none(self, *args, **kwargs): + return apply(self._cobj.set_get_returns_none, args, kwargs) + + if db.version() >= (4,0): + def log_stat(self, *args, **kwargs): + return apply(self._cobj.log_stat, args, kwargs) + + if db.version() >= (4,1): + def dbremove(self, *args, **kwargs): + return apply(self._cobj.dbremove, args, kwargs) + def dbrename(self, *args, **kwargs): + return apply(self._cobj.dbrename, args, kwargs) + def set_encrypt(self, *args, **kwargs): + return apply(self._cobj.set_encrypt, args, kwargs) + + if db.version() >= (4,4): + def lsn_reset(self, *args, **kwargs): + return apply(self._cobj.lsn_reset, args, kwargs) + + +class DB(DictMixin): + def __init__(self, dbenv, *args, **kwargs): + # give it the proper DBEnv C object that its expecting + self._cobj = apply(db.DB, (dbenv._cobj,) + args, kwargs) + + # TODO are there other dict methods that need to be overridden? + def __len__(self): + return len(self._cobj) + def __getitem__(self, arg): + return self._cobj[arg] + def __setitem__(self, key, value): + self._cobj[key] = value + def __delitem__(self, arg): + del self._cobj[arg] + + def append(self, *args, **kwargs): + return apply(self._cobj.append, args, kwargs) + def associate(self, *args, **kwargs): + return apply(self._cobj.associate, args, kwargs) + def close(self, *args, **kwargs): + return apply(self._cobj.close, args, kwargs) + def consume(self, *args, **kwargs): + return apply(self._cobj.consume, args, kwargs) + def consume_wait(self, *args, **kwargs): + return apply(self._cobj.consume_wait, args, kwargs) + def cursor(self, *args, **kwargs): + return apply(self._cobj.cursor, args, kwargs) + def delete(self, *args, **kwargs): + return apply(self._cobj.delete, args, kwargs) + def fd(self, *args, **kwargs): + return apply(self._cobj.fd, args, kwargs) + def get(self, *args, **kwargs): + return apply(self._cobj.get, args, kwargs) + def pget(self, *args, **kwargs): + return apply(self._cobj.pget, args, kwargs) + def get_both(self, *args, **kwargs): + return apply(self._cobj.get_both, args, kwargs) + def get_byteswapped(self, *args, **kwargs): + return apply(self._cobj.get_byteswapped, args, kwargs) + def get_size(self, *args, **kwargs): + return apply(self._cobj.get_size, args, kwargs) + def get_type(self, *args, **kwargs): + return apply(self._cobj.get_type, args, kwargs) + def join(self, *args, **kwargs): + return apply(self._cobj.join, args, kwargs) + def key_range(self, *args, **kwargs): + return apply(self._cobj.key_range, args, kwargs) + def has_key(self, *args, **kwargs): + return apply(self._cobj.has_key, args, kwargs) + def items(self, *args, **kwargs): + return apply(self._cobj.items, args, kwargs) + def keys(self, *args, **kwargs): + return apply(self._cobj.keys, args, kwargs) + def open(self, *args, **kwargs): + return apply(self._cobj.open, args, kwargs) + def put(self, *args, **kwargs): + return apply(self._cobj.put, args, kwargs) + def remove(self, *args, **kwargs): + return apply(self._cobj.remove, args, kwargs) + def rename(self, *args, **kwargs): + return apply(self._cobj.rename, args, kwargs) + def set_bt_minkey(self, *args, **kwargs): + return apply(self._cobj.set_bt_minkey, args, kwargs) + def set_bt_compare(self, *args, **kwargs): + return apply(self._cobj.set_bt_compare, args, kwargs) + def set_cachesize(self, *args, **kwargs): + return apply(self._cobj.set_cachesize, args, kwargs) + def set_flags(self, *args, **kwargs): + return apply(self._cobj.set_flags, args, kwargs) + def set_h_ffactor(self, *args, **kwargs): + return apply(self._cobj.set_h_ffactor, args, kwargs) + def set_h_nelem(self, *args, **kwargs): + return apply(self._cobj.set_h_nelem, args, kwargs) + def set_lorder(self, *args, **kwargs): + return apply(self._cobj.set_lorder, args, kwargs) + def set_pagesize(self, *args, **kwargs): + return apply(self._cobj.set_pagesize, args, kwargs) + def set_re_delim(self, *args, **kwargs): + return apply(self._cobj.set_re_delim, args, kwargs) + def set_re_len(self, *args, **kwargs): + return apply(self._cobj.set_re_len, args, kwargs) + def set_re_pad(self, *args, **kwargs): + return apply(self._cobj.set_re_pad, args, kwargs) + def set_re_source(self, *args, **kwargs): + return apply(self._cobj.set_re_source, args, kwargs) + def set_q_extentsize(self, *args, **kwargs): + return apply(self._cobj.set_q_extentsize, args, kwargs) + def stat(self, *args, **kwargs): + return apply(self._cobj.stat, args, kwargs) + def sync(self, *args, **kwargs): + return apply(self._cobj.sync, args, kwargs) + def type(self, *args, **kwargs): + return apply(self._cobj.type, args, kwargs) + def upgrade(self, *args, **kwargs): + return apply(self._cobj.upgrade, args, kwargs) + def values(self, *args, **kwargs): + return apply(self._cobj.values, args, kwargs) + def verify(self, *args, **kwargs): + return apply(self._cobj.verify, args, kwargs) + def set_get_returns_none(self, *args, **kwargs): + return apply(self._cobj.set_get_returns_none, args, kwargs) + + if db.version() >= (4,1): + def set_encrypt(self, *args, **kwargs): + return apply(self._cobj.set_encrypt, args, kwargs) + + +class DBSequence: + def __init__(self, *args, **kwargs): + self._cobj = apply(db.DBSequence, args, kwargs) + + def close(self, *args, **kwargs): + return apply(self._cobj.close, args, kwargs) + def get(self, *args, **kwargs): + return apply(self._cobj.get, args, kwargs) + def get_dbp(self, *args, **kwargs): + return apply(self._cobj.get_dbp, args, kwargs) + def get_key(self, *args, **kwargs): + return apply(self._cobj.get_key, args, kwargs) + def init_value(self, *args, **kwargs): + return apply(self._cobj.init_value, args, kwargs) + def open(self, *args, **kwargs): + return apply(self._cobj.open, args, kwargs) + def remove(self, *args, **kwargs): + return apply(self._cobj.remove, args, kwargs) + def stat(self, *args, **kwargs): + return apply(self._cobj.stat, args, kwargs) + def set_cachesize(self, *args, **kwargs): + return apply(self._cobj.set_cachesize, args, kwargs) + def set_flags(self, *args, **kwargs): + return apply(self._cobj.set_flags, args, kwargs) + def set_range(self, *args, **kwargs): + return apply(self._cobj.set_range, args, kwargs) + def get_cachesize(self, *args, **kwargs): + return apply(self._cobj.get_cachesize, args, kwargs) + def get_flags(self, *args, **kwargs): + return apply(self._cobj.get_flags, args, kwargs) + def get_range(self, *args, **kwargs): + return apply(self._cobj.get_range, args, kwargs) diff --git a/sys/lib/python/bsddb/dbrecio.py b/sys/lib/python/bsddb/dbrecio.py new file mode 100644 index 000000000..d439f3255 --- /dev/null +++ b/sys/lib/python/bsddb/dbrecio.py @@ -0,0 +1,190 @@ + +""" +File-like objects that read from or write to a bsddb record. + +This implements (nearly) all stdio methods. + +f = DBRecIO(db, key, txn=None) +f.close() # explicitly release resources held +flag = f.isatty() # always false +pos = f.tell() # get current position +f.seek(pos) # set current position +f.seek(pos, mode) # mode 0: absolute; 1: relative; 2: relative to EOF +buf = f.read() # read until EOF +buf = f.read(n) # read up to n bytes +f.truncate([size]) # truncate file at to at most size (default: current pos) +f.write(buf) # write at current position +f.writelines(list) # for line in list: f.write(line) + +Notes: +- fileno() is left unimplemented so that code which uses it triggers + an exception early. +- There's a simple test set (see end of this file) - not yet updated + for DBRecIO. +- readline() is not implemented yet. + + +From: + Itamar Shtull-Trauring <itamar@maxnm.com> +""" + +import errno +import string + +class DBRecIO: + def __init__(self, db, key, txn=None): + self.db = db + self.key = key + self.txn = txn + self.len = None + self.pos = 0 + self.closed = 0 + self.softspace = 0 + + def close(self): + if not self.closed: + self.closed = 1 + del self.db, self.txn + + def isatty(self): + if self.closed: + raise ValueError, "I/O operation on closed file" + return 0 + + def seek(self, pos, mode = 0): + if self.closed: + raise ValueError, "I/O operation on closed file" + if mode == 1: + pos = pos + self.pos + elif mode == 2: + pos = pos + self.len + self.pos = max(0, pos) + + def tell(self): + if self.closed: + raise ValueError, "I/O operation on closed file" + return self.pos + + def read(self, n = -1): + if self.closed: + raise ValueError, "I/O operation on closed file" + if n < 0: + newpos = self.len + else: + newpos = min(self.pos+n, self.len) + + dlen = newpos - self.pos + + r = self.db.get(self.key, txn=self.txn, dlen=dlen, doff=self.pos) + self.pos = newpos + return r + + __fixme = """ + def readline(self, length=None): + if self.closed: + raise ValueError, "I/O operation on closed file" + if self.buflist: + self.buf = self.buf + string.joinfields(self.buflist, '') + self.buflist = [] + i = string.find(self.buf, '\n', self.pos) + if i < 0: + newpos = self.len + else: + newpos = i+1 + if length is not None: + if self.pos + length < newpos: + newpos = self.pos + length + r = self.buf[self.pos:newpos] + self.pos = newpos + return r + + def readlines(self, sizehint = 0): + total = 0 + lines = [] + line = self.readline() + while line: + lines.append(line) + total += len(line) + if 0 < sizehint <= total: + break + line = self.readline() + return lines + """ + + def truncate(self, size=None): + if self.closed: + raise ValueError, "I/O operation on closed file" + if size is None: + size = self.pos + elif size < 0: + raise IOError(errno.EINVAL, + "Negative size not allowed") + elif size < self.pos: + self.pos = size + self.db.put(self.key, "", txn=self.txn, dlen=self.len-size, doff=size) + + def write(self, s): + if self.closed: + raise ValueError, "I/O operation on closed file" + if not s: return + if self.pos > self.len: + self.buflist.append('\0'*(self.pos - self.len)) + self.len = self.pos + newpos = self.pos + len(s) + self.db.put(self.key, s, txn=self.txn, dlen=len(s), doff=self.pos) + self.pos = newpos + + def writelines(self, list): + self.write(string.joinfields(list, '')) + + def flush(self): + if self.closed: + raise ValueError, "I/O operation on closed file" + + +""" +# A little test suite + +def _test(): + import sys + if sys.argv[1:]: + file = sys.argv[1] + else: + file = '/etc/passwd' + lines = open(file, 'r').readlines() + text = open(file, 'r').read() + f = StringIO() + for line in lines[:-2]: + f.write(line) + f.writelines(lines[-2:]) + if f.getvalue() != text: + raise RuntimeError, 'write failed' + length = f.tell() + print 'File length =', length + f.seek(len(lines[0])) + f.write(lines[1]) + f.seek(0) + print 'First line =', repr(f.readline()) + here = f.tell() + line = f.readline() + print 'Second line =', repr(line) + f.seek(-len(line), 1) + line2 = f.read(len(line)) + if line != line2: + raise RuntimeError, 'bad result after seek back' + f.seek(len(line2), 1) + list = f.readlines() + line = list[-1] + f.seek(f.tell() - len(line)) + line2 = f.read() + if line != line2: + raise RuntimeError, 'bad result after seek back from EOF' + print 'Read', len(list), 'more lines' + print 'File length =', f.tell() + if f.tell() != length: + raise RuntimeError, 'bad length' + f.close() + +if __name__ == '__main__': + _test() +""" diff --git a/sys/lib/python/bsddb/dbshelve.py b/sys/lib/python/bsddb/dbshelve.py new file mode 100644 index 000000000..d341ab791 --- /dev/null +++ b/sys/lib/python/bsddb/dbshelve.py @@ -0,0 +1,299 @@ +#!/bin/env python +#------------------------------------------------------------------------ +# Copyright (c) 1997-2001 by Total Control Software +# All Rights Reserved +#------------------------------------------------------------------------ +# +# Module Name: dbShelve.py +# +# Description: A reimplementation of the standard shelve.py that +# forces the use of cPickle, and DB. +# +# Creation Date: 11/3/97 3:39:04PM +# +# License: This is free software. You may use this software for any +# purpose including modification/redistribution, so long as +# this header remains intact and that you do not claim any +# rights of ownership or authorship of this software. This +# software has been tested, but no warranty is expressed or +# implied. +# +# 13-Dec-2000: Updated to be used with the new bsddb3 package. +# Added DBShelfCursor class. +# +#------------------------------------------------------------------------ + +"""Manage shelves of pickled objects using bsddb database files for the +storage. +""" + +#------------------------------------------------------------------------ + +import cPickle +try: + from UserDict import DictMixin +except ImportError: + # DictMixin is new in Python 2.3 + class DictMixin: pass +import db + +#------------------------------------------------------------------------ + + +def open(filename, flags=db.DB_CREATE, mode=0660, filetype=db.DB_HASH, + dbenv=None, dbname=None): + """ + A simple factory function for compatibility with the standard + shleve.py module. It can be used like this, where key is a string + and data is a pickleable object: + + from bsddb import dbshelve + db = dbshelve.open(filename) + + db[key] = data + + db.close() + """ + if type(flags) == type(''): + sflag = flags + if sflag == 'r': + flags = db.DB_RDONLY + elif sflag == 'rw': + flags = 0 + elif sflag == 'w': + flags = db.DB_CREATE + elif sflag == 'c': + flags = db.DB_CREATE + elif sflag == 'n': + flags = db.DB_TRUNCATE | db.DB_CREATE + else: + raise db.DBError, "flags should be one of 'r', 'w', 'c' or 'n' or use the bsddb.db.DB_* flags" + + d = DBShelf(dbenv) + d.open(filename, dbname, filetype, flags, mode) + return d + +#--------------------------------------------------------------------------- + +class DBShelf(DictMixin): + """A shelf to hold pickled objects, built upon a bsddb DB object. It + automatically pickles/unpickles data objects going to/from the DB. + """ + def __init__(self, dbenv=None): + self.db = db.DB(dbenv) + self.binary = 1 + + + def __del__(self): + self.close() + + + def __getattr__(self, name): + """Many methods we can just pass through to the DB object. + (See below) + """ + return getattr(self.db, name) + + + #----------------------------------- + # Dictionary access methods + + def __len__(self): + return len(self.db) + + + def __getitem__(self, key): + data = self.db[key] + return cPickle.loads(data) + + + def __setitem__(self, key, value): + data = cPickle.dumps(value, self.binary) + self.db[key] = data + + + def __delitem__(self, key): + del self.db[key] + + + def keys(self, txn=None): + if txn != None: + return self.db.keys(txn) + else: + return self.db.keys() + + + def items(self, txn=None): + if txn != None: + items = self.db.items(txn) + else: + items = self.db.items() + newitems = [] + + for k, v in items: + newitems.append( (k, cPickle.loads(v)) ) + return newitems + + def values(self, txn=None): + if txn != None: + values = self.db.values(txn) + else: + values = self.db.values() + + return map(cPickle.loads, values) + + #----------------------------------- + # Other methods + + def __append(self, value, txn=None): + data = cPickle.dumps(value, self.binary) + return self.db.append(data, txn) + + def append(self, value, txn=None): + if self.get_type() != db.DB_RECNO: + self.append = self.__append + return self.append(value, txn=txn) + raise db.DBError, "append() only supported when dbshelve opened with filetype=dbshelve.db.DB_RECNO" + + + def associate(self, secondaryDB, callback, flags=0): + def _shelf_callback(priKey, priData, realCallback=callback): + data = cPickle.loads(priData) + return realCallback(priKey, data) + return self.db.associate(secondaryDB, _shelf_callback, flags) + + + #def get(self, key, default=None, txn=None, flags=0): + def get(self, *args, **kw): + # We do it with *args and **kw so if the default value wasn't + # given nothing is passed to the extension module. That way + # an exception can be raised if set_get_returns_none is turned + # off. + data = apply(self.db.get, args, kw) + try: + return cPickle.loads(data) + except (TypeError, cPickle.UnpicklingError): + return data # we may be getting the default value, or None, + # so it doesn't need unpickled. + + def get_both(self, key, value, txn=None, flags=0): + data = cPickle.dumps(value, self.binary) + data = self.db.get(key, data, txn, flags) + return cPickle.loads(data) + + + def cursor(self, txn=None, flags=0): + c = DBShelfCursor(self.db.cursor(txn, flags)) + c.binary = self.binary + return c + + + def put(self, key, value, txn=None, flags=0): + data = cPickle.dumps(value, self.binary) + return self.db.put(key, data, txn, flags) + + + def join(self, cursorList, flags=0): + raise NotImplementedError + + + #---------------------------------------------- + # Methods allowed to pass-through to self.db + # + # close, delete, fd, get_byteswapped, get_type, has_key, + # key_range, open, remove, rename, stat, sync, + # upgrade, verify, and all set_* methods. + + +#--------------------------------------------------------------------------- + +class DBShelfCursor: + """ + """ + def __init__(self, cursor): + self.dbc = cursor + + def __del__(self): + self.close() + + + def __getattr__(self, name): + """Some methods we can just pass through to the cursor object. (See below)""" + return getattr(self.dbc, name) + + + #---------------------------------------------- + + def dup(self, flags=0): + return DBShelfCursor(self.dbc.dup(flags)) + + + def put(self, key, value, flags=0): + data = cPickle.dumps(value, self.binary) + return self.dbc.put(key, data, flags) + + + def get(self, *args): + count = len(args) # a method overloading hack + method = getattr(self, 'get_%d' % count) + apply(method, args) + + def get_1(self, flags): + rec = self.dbc.get(flags) + return self._extract(rec) + + def get_2(self, key, flags): + rec = self.dbc.get(key, flags) + return self._extract(rec) + + def get_3(self, key, value, flags): + data = cPickle.dumps(value, self.binary) + rec = self.dbc.get(key, flags) + return self._extract(rec) + + + def current(self, flags=0): return self.get_1(flags|db.DB_CURRENT) + def first(self, flags=0): return self.get_1(flags|db.DB_FIRST) + def last(self, flags=0): return self.get_1(flags|db.DB_LAST) + def next(self, flags=0): return self.get_1(flags|db.DB_NEXT) + def prev(self, flags=0): return self.get_1(flags|db.DB_PREV) + def consume(self, flags=0): return self.get_1(flags|db.DB_CONSUME) + def next_dup(self, flags=0): return self.get_1(flags|db.DB_NEXT_DUP) + def next_nodup(self, flags=0): return self.get_1(flags|db.DB_NEXT_NODUP) + def prev_nodup(self, flags=0): return self.get_1(flags|db.DB_PREV_NODUP) + + + def get_both(self, key, value, flags=0): + data = cPickle.dumps(value, self.binary) + rec = self.dbc.get_both(key, flags) + return self._extract(rec) + + + def set(self, key, flags=0): + rec = self.dbc.set(key, flags) + return self._extract(rec) + + def set_range(self, key, flags=0): + rec = self.dbc.set_range(key, flags) + return self._extract(rec) + + def set_recno(self, recno, flags=0): + rec = self.dbc.set_recno(recno, flags) + return self._extract(rec) + + set_both = get_both + + def _extract(self, rec): + if rec is None: + return None + else: + key, data = rec + return key, cPickle.loads(data) + + #---------------------------------------------- + # Methods allowed to pass-through to self.dbc + # + # close, count, delete, get_recno, join_item + + +#--------------------------------------------------------------------------- diff --git a/sys/lib/python/bsddb/dbtables.py b/sys/lib/python/bsddb/dbtables.py new file mode 100644 index 000000000..253331169 --- /dev/null +++ b/sys/lib/python/bsddb/dbtables.py @@ -0,0 +1,706 @@ +#----------------------------------------------------------------------- +# +# Copyright (C) 2000, 2001 by Autonomous Zone Industries +# Copyright (C) 2002 Gregory P. Smith +# +# License: This is free software. You may use this software for any +# purpose including modification/redistribution, so long as +# this header remains intact and that you do not claim any +# rights of ownership or authorship of this software. This +# software has been tested, but no warranty is expressed or +# implied. +# +# -- Gregory P. Smith <greg@electricrain.com> + +# This provides a simple database table interface built on top of +# the Python BerkeleyDB 3 interface. +# +_cvsid = '$Id: dbtables.py 46858 2006-06-11 08:35:14Z neal.norwitz $' + +import re +import sys +import copy +import xdrlib +import random +from types import ListType, StringType +import cPickle as pickle + +try: + # For Pythons w/distutils pybsddb + from bsddb3.db import * +except ImportError: + # For Python 2.3 + from bsddb.db import * + +# XXX(nnorwitz): is this correct? DBIncompleteError is conditional in _bsddb.c +try: + DBIncompleteError +except NameError: + class DBIncompleteError(Exception): + pass + +class TableDBError(StandardError): + pass +class TableAlreadyExists(TableDBError): + pass + + +class Cond: + """This condition matches everything""" + def __call__(self, s): + return 1 + +class ExactCond(Cond): + """Acts as an exact match condition function""" + def __init__(self, strtomatch): + self.strtomatch = strtomatch + def __call__(self, s): + return s == self.strtomatch + +class PrefixCond(Cond): + """Acts as a condition function for matching a string prefix""" + def __init__(self, prefix): + self.prefix = prefix + def __call__(self, s): + return s[:len(self.prefix)] == self.prefix + +class PostfixCond(Cond): + """Acts as a condition function for matching a string postfix""" + def __init__(self, postfix): + self.postfix = postfix + def __call__(self, s): + return s[-len(self.postfix):] == self.postfix + +class LikeCond(Cond): + """ + Acts as a function that will match using an SQL 'LIKE' style + string. Case insensitive and % signs are wild cards. + This isn't perfect but it should work for the simple common cases. + """ + def __init__(self, likestr, re_flags=re.IGNORECASE): + # escape python re characters + chars_to_escape = '.*+()[]?' + for char in chars_to_escape : + likestr = likestr.replace(char, '\\'+char) + # convert %s to wildcards + self.likestr = likestr.replace('%', '.*') + self.re = re.compile('^'+self.likestr+'$', re_flags) + def __call__(self, s): + return self.re.match(s) + +# +# keys used to store database metadata +# +_table_names_key = '__TABLE_NAMES__' # list of the tables in this db +_columns = '._COLUMNS__' # table_name+this key contains a list of columns + +def _columns_key(table): + return table + _columns + +# +# these keys are found within table sub databases +# +_data = '._DATA_.' # this+column+this+rowid key contains table data +_rowid = '._ROWID_.' # this+rowid+this key contains a unique entry for each + # row in the table. (no data is stored) +_rowid_str_len = 8 # length in bytes of the unique rowid strings + +def _data_key(table, col, rowid): + return table + _data + col + _data + rowid + +def _search_col_data_key(table, col): + return table + _data + col + _data + +def _search_all_data_key(table): + return table + _data + +def _rowid_key(table, rowid): + return table + _rowid + rowid + _rowid + +def _search_rowid_key(table): + return table + _rowid + +def contains_metastrings(s) : + """Verify that the given string does not contain any + metadata strings that might interfere with dbtables database operation. + """ + if (s.find(_table_names_key) >= 0 or + s.find(_columns) >= 0 or + s.find(_data) >= 0 or + s.find(_rowid) >= 0): + # Then + return 1 + else: + return 0 + + +class bsdTableDB : + def __init__(self, filename, dbhome, create=0, truncate=0, mode=0600, + recover=0, dbflags=0): + """bsdTableDB(filename, dbhome, create=0, truncate=0, mode=0600) + + Open database name in the dbhome BerkeleyDB directory. + Use keyword arguments when calling this constructor. + """ + self.db = None + myflags = DB_THREAD + if create: + myflags |= DB_CREATE + flagsforenv = (DB_INIT_MPOOL | DB_INIT_LOCK | DB_INIT_LOG | + DB_INIT_TXN | dbflags) + # DB_AUTO_COMMIT isn't a valid flag for env.open() + try: + dbflags |= DB_AUTO_COMMIT + except AttributeError: + pass + if recover: + flagsforenv = flagsforenv | DB_RECOVER + self.env = DBEnv() + # enable auto deadlock avoidance + self.env.set_lk_detect(DB_LOCK_DEFAULT) + self.env.open(dbhome, myflags | flagsforenv) + if truncate: + myflags |= DB_TRUNCATE + self.db = DB(self.env) + # this code relies on DBCursor.set* methods to raise exceptions + # rather than returning None + self.db.set_get_returns_none(1) + # allow duplicate entries [warning: be careful w/ metadata] + self.db.set_flags(DB_DUP) + self.db.open(filename, DB_BTREE, dbflags | myflags, mode) + self.dbfilename = filename + # Initialize the table names list if this is a new database + txn = self.env.txn_begin() + try: + if not self.db.has_key(_table_names_key, txn): + self.db.put(_table_names_key, pickle.dumps([], 1), txn=txn) + # Yes, bare except + except: + txn.abort() + raise + else: + txn.commit() + # TODO verify more of the database's metadata? + self.__tablecolumns = {} + + def __del__(self): + self.close() + + def close(self): + if self.db is not None: + self.db.close() + self.db = None + if self.env is not None: + self.env.close() + self.env = None + + def checkpoint(self, mins=0): + try: + self.env.txn_checkpoint(mins) + except DBIncompleteError: + pass + + def sync(self): + try: + self.db.sync() + except DBIncompleteError: + pass + + def _db_print(self) : + """Print the database to stdout for debugging""" + print "******** Printing raw database for debugging ********" + cur = self.db.cursor() + try: + key, data = cur.first() + while 1: + print repr({key: data}) + next = cur.next() + if next: + key, data = next + else: + cur.close() + return + except DBNotFoundError: + cur.close() + + + def CreateTable(self, table, columns): + """CreateTable(table, columns) - Create a new table in the database. + + raises TableDBError if it already exists or for other DB errors. + """ + assert isinstance(columns, ListType) + txn = None + try: + # checking sanity of the table and column names here on + # table creation will prevent problems elsewhere. + if contains_metastrings(table): + raise ValueError( + "bad table name: contains reserved metastrings") + for column in columns : + if contains_metastrings(column): + raise ValueError( + "bad column name: contains reserved metastrings") + + columnlist_key = _columns_key(table) + if self.db.has_key(columnlist_key): + raise TableAlreadyExists, "table already exists" + + txn = self.env.txn_begin() + # store the table's column info + self.db.put(columnlist_key, pickle.dumps(columns, 1), txn=txn) + + # add the table name to the tablelist + tablelist = pickle.loads(self.db.get(_table_names_key, txn=txn, + flags=DB_RMW)) + tablelist.append(table) + # delete 1st, in case we opened with DB_DUP + self.db.delete(_table_names_key, txn) + self.db.put(_table_names_key, pickle.dumps(tablelist, 1), txn=txn) + + txn.commit() + txn = None + except DBError, dberror: + if txn: + txn.abort() + raise TableDBError, dberror[1] + + + def ListTableColumns(self, table): + """Return a list of columns in the given table. + [] if the table doesn't exist. + """ + assert isinstance(table, StringType) + if contains_metastrings(table): + raise ValueError, "bad table name: contains reserved metastrings" + + columnlist_key = _columns_key(table) + if not self.db.has_key(columnlist_key): + return [] + pickledcolumnlist = self.db.get(columnlist_key) + if pickledcolumnlist: + return pickle.loads(pickledcolumnlist) + else: + return [] + + def ListTables(self): + """Return a list of tables in this database.""" + pickledtablelist = self.db.get(_table_names_key) + if pickledtablelist: + return pickle.loads(pickledtablelist) + else: + return [] + + def CreateOrExtendTable(self, table, columns): + """CreateOrExtendTable(table, columns) + + Create a new table in the database. + + If a table of this name already exists, extend it to have any + additional columns present in the given list as well as + all of its current columns. + """ + assert isinstance(columns, ListType) + try: + self.CreateTable(table, columns) + except TableAlreadyExists: + # the table already existed, add any new columns + txn = None + try: + columnlist_key = _columns_key(table) + txn = self.env.txn_begin() + + # load the current column list + oldcolumnlist = pickle.loads( + self.db.get(columnlist_key, txn=txn, flags=DB_RMW)) + # create a hash table for fast lookups of column names in the + # loop below + oldcolumnhash = {} + for c in oldcolumnlist: + oldcolumnhash[c] = c + + # create a new column list containing both the old and new + # column names + newcolumnlist = copy.copy(oldcolumnlist) + for c in columns: + if not oldcolumnhash.has_key(c): + newcolumnlist.append(c) + + # store the table's new extended column list + if newcolumnlist != oldcolumnlist : + # delete the old one first since we opened with DB_DUP + self.db.delete(columnlist_key, txn) + self.db.put(columnlist_key, + pickle.dumps(newcolumnlist, 1), + txn=txn) + + txn.commit() + txn = None + + self.__load_column_info(table) + except DBError, dberror: + if txn: + txn.abort() + raise TableDBError, dberror[1] + + + def __load_column_info(self, table) : + """initialize the self.__tablecolumns dict""" + # check the column names + try: + tcolpickles = self.db.get(_columns_key(table)) + except DBNotFoundError: + raise TableDBError, "unknown table: %r" % (table,) + if not tcolpickles: + raise TableDBError, "unknown table: %r" % (table,) + self.__tablecolumns[table] = pickle.loads(tcolpickles) + + def __new_rowid(self, table, txn) : + """Create a new unique row identifier""" + unique = 0 + while not unique: + # Generate a random 64-bit row ID string + # (note: this code has <64 bits of randomness + # but it's plenty for our database id needs!) + p = xdrlib.Packer() + p.pack_int(int(random.random()*2147483647)) + p.pack_int(int(random.random()*2147483647)) + newid = p.get_buffer() + + # Guarantee uniqueness by adding this key to the database + try: + self.db.put(_rowid_key(table, newid), None, txn=txn, + flags=DB_NOOVERWRITE) + except DBKeyExistError: + pass + else: + unique = 1 + + return newid + + + def Insert(self, table, rowdict) : + """Insert(table, datadict) - Insert a new row into the table + using the keys+values from rowdict as the column values. + """ + txn = None + try: + if not self.db.has_key(_columns_key(table)): + raise TableDBError, "unknown table" + + # check the validity of each column name + if not self.__tablecolumns.has_key(table): + self.__load_column_info(table) + for column in rowdict.keys() : + if not self.__tablecolumns[table].count(column): + raise TableDBError, "unknown column: %r" % (column,) + + # get a unique row identifier for this row + txn = self.env.txn_begin() + rowid = self.__new_rowid(table, txn=txn) + + # insert the row values into the table database + for column, dataitem in rowdict.items(): + # store the value + self.db.put(_data_key(table, column, rowid), dataitem, txn=txn) + + txn.commit() + txn = None + + except DBError, dberror: + # WIBNI we could just abort the txn and re-raise the exception? + # But no, because TableDBError is not related to DBError via + # inheritance, so it would be backwards incompatible. Do the next + # best thing. + info = sys.exc_info() + if txn: + txn.abort() + self.db.delete(_rowid_key(table, rowid)) + raise TableDBError, dberror[1], info[2] + + + def Modify(self, table, conditions={}, mappings={}): + """Modify(table, conditions={}, mappings={}) - Modify items in rows matching 'conditions' using mapping functions in 'mappings' + + * table - the table name + * conditions - a dictionary keyed on column names containing + a condition callable expecting the data string as an + argument and returning a boolean. + * mappings - a dictionary keyed on column names containing a + condition callable expecting the data string as an argument and + returning the new string for that column. + """ + try: + matching_rowids = self.__Select(table, [], conditions) + + # modify only requested columns + columns = mappings.keys() + for rowid in matching_rowids.keys(): + txn = None + try: + for column in columns: + txn = self.env.txn_begin() + # modify the requested column + try: + dataitem = self.db.get( + _data_key(table, column, rowid), + txn) + self.db.delete( + _data_key(table, column, rowid), + txn) + except DBNotFoundError: + # XXXXXXX row key somehow didn't exist, assume no + # error + dataitem = None + dataitem = mappings[column](dataitem) + if dataitem <> None: + self.db.put( + _data_key(table, column, rowid), + dataitem, txn=txn) + txn.commit() + txn = None + + # catch all exceptions here since we call unknown callables + except: + if txn: + txn.abort() + raise + + except DBError, dberror: + raise TableDBError, dberror[1] + + def Delete(self, table, conditions={}): + """Delete(table, conditions) - Delete items matching the given + conditions from the table. + + * conditions - a dictionary keyed on column names containing + condition functions expecting the data string as an + argument and returning a boolean. + """ + try: + matching_rowids = self.__Select(table, [], conditions) + + # delete row data from all columns + columns = self.__tablecolumns[table] + for rowid in matching_rowids.keys(): + txn = None + try: + txn = self.env.txn_begin() + for column in columns: + # delete the data key + try: + self.db.delete(_data_key(table, column, rowid), + txn) + except DBNotFoundError: + # XXXXXXX column may not exist, assume no error + pass + + try: + self.db.delete(_rowid_key(table, rowid), txn) + except DBNotFoundError: + # XXXXXXX row key somehow didn't exist, assume no error + pass + txn.commit() + txn = None + except DBError, dberror: + if txn: + txn.abort() + raise + except DBError, dberror: + raise TableDBError, dberror[1] + + + def Select(self, table, columns, conditions={}): + """Select(table, columns, conditions) - retrieve specific row data + Returns a list of row column->value mapping dictionaries. + + * columns - a list of which column data to return. If + columns is None, all columns will be returned. + * conditions - a dictionary keyed on column names + containing callable conditions expecting the data string as an + argument and returning a boolean. + """ + try: + if not self.__tablecolumns.has_key(table): + self.__load_column_info(table) + if columns is None: + columns = self.__tablecolumns[table] + matching_rowids = self.__Select(table, columns, conditions) + except DBError, dberror: + raise TableDBError, dberror[1] + # return the matches as a list of dictionaries + return matching_rowids.values() + + + def __Select(self, table, columns, conditions): + """__Select() - Used to implement Select and Delete (above) + Returns a dictionary keyed on rowids containing dicts + holding the row data for columns listed in the columns param + that match the given conditions. + * conditions is a dictionary keyed on column names + containing callable conditions expecting the data string as an + argument and returning a boolean. + """ + # check the validity of each column name + if not self.__tablecolumns.has_key(table): + self.__load_column_info(table) + if columns is None: + columns = self.tablecolumns[table] + for column in (columns + conditions.keys()): + if not self.__tablecolumns[table].count(column): + raise TableDBError, "unknown column: %r" % (column,) + + # keyed on rows that match so far, containings dicts keyed on + # column names containing the data for that row and column. + matching_rowids = {} + # keys are rowids that do not match + rejected_rowids = {} + + # attempt to sort the conditions in such a way as to minimize full + # column lookups + def cmp_conditions(atuple, btuple): + a = atuple[1] + b = btuple[1] + if type(a) is type(b): + if isinstance(a, PrefixCond) and isinstance(b, PrefixCond): + # longest prefix first + return cmp(len(b.prefix), len(a.prefix)) + if isinstance(a, LikeCond) and isinstance(b, LikeCond): + # longest likestr first + return cmp(len(b.likestr), len(a.likestr)) + return 0 + if isinstance(a, ExactCond): + return -1 + if isinstance(b, ExactCond): + return 1 + if isinstance(a, PrefixCond): + return -1 + if isinstance(b, PrefixCond): + return 1 + # leave all unknown condition callables alone as equals + return 0 + + conditionlist = conditions.items() + conditionlist.sort(cmp_conditions) + + # Apply conditions to column data to find what we want + cur = self.db.cursor() + column_num = -1 + for column, condition in conditionlist: + column_num = column_num + 1 + searchkey = _search_col_data_key(table, column) + # speedup: don't linear search columns within loop + if column in columns: + savethiscolumndata = 1 # save the data for return + else: + savethiscolumndata = 0 # data only used for selection + + try: + key, data = cur.set_range(searchkey) + while key[:len(searchkey)] == searchkey: + # extract the rowid from the key + rowid = key[-_rowid_str_len:] + + if not rejected_rowids.has_key(rowid): + # if no condition was specified or the condition + # succeeds, add row to our match list. + if not condition or condition(data): + if not matching_rowids.has_key(rowid): + matching_rowids[rowid] = {} + if savethiscolumndata: + matching_rowids[rowid][column] = data + else: + if matching_rowids.has_key(rowid): + del matching_rowids[rowid] + rejected_rowids[rowid] = rowid + + key, data = cur.next() + + except DBError, dberror: + if dberror[0] != DB_NOTFOUND: + raise + continue + + cur.close() + + # we're done selecting rows, garbage collect the reject list + del rejected_rowids + + # extract any remaining desired column data from the + # database for the matching rows. + if len(columns) > 0: + for rowid, rowdata in matching_rowids.items(): + for column in columns: + if rowdata.has_key(column): + continue + try: + rowdata[column] = self.db.get( + _data_key(table, column, rowid)) + except DBError, dberror: + if dberror[0] != DB_NOTFOUND: + raise + rowdata[column] = None + + # return the matches + return matching_rowids + + + def Drop(self, table): + """Remove an entire table from the database""" + txn = None + try: + txn = self.env.txn_begin() + + # delete the column list + self.db.delete(_columns_key(table), txn) + + cur = self.db.cursor(txn) + + # delete all keys containing this tables column and row info + table_key = _search_all_data_key(table) + while 1: + try: + key, data = cur.set_range(table_key) + except DBNotFoundError: + break + # only delete items in this table + if key[:len(table_key)] != table_key: + break + cur.delete() + + # delete all rowids used by this table + table_key = _search_rowid_key(table) + while 1: + try: + key, data = cur.set_range(table_key) + except DBNotFoundError: + break + # only delete items in this table + if key[:len(table_key)] != table_key: + break + cur.delete() + + cur.close() + + # delete the tablename from the table name list + tablelist = pickle.loads( + self.db.get(_table_names_key, txn=txn, flags=DB_RMW)) + try: + tablelist.remove(table) + except ValueError: + # hmm, it wasn't there, oh well, that's what we want. + pass + # delete 1st, incase we opened with DB_DUP + self.db.delete(_table_names_key, txn) + self.db.put(_table_names_key, pickle.dumps(tablelist, 1), txn=txn) + + txn.commit() + txn = None + + if self.__tablecolumns.has_key(table): + del self.__tablecolumns[table] + + except DBError, dberror: + if txn: + txn.abort() + raise TableDBError, dberror[1] diff --git a/sys/lib/python/bsddb/dbutils.py b/sys/lib/python/bsddb/dbutils.py new file mode 100644 index 000000000..6dcfdd5b5 --- /dev/null +++ b/sys/lib/python/bsddb/dbutils.py @@ -0,0 +1,77 @@ +#------------------------------------------------------------------------ +# +# Copyright (C) 2000 Autonomous Zone Industries +# +# License: This is free software. You may use this software for any +# purpose including modification/redistribution, so long as +# this header remains intact and that you do not claim any +# rights of ownership or authorship of this software. This +# software has been tested, but no warranty is expressed or +# implied. +# +# Author: Gregory P. Smith <greg@electricrain.com> +# +# Note: I don't know how useful this is in reality since when a +# DBLockDeadlockError happens the current transaction is supposed to be +# aborted. If it doesn't then when the operation is attempted again +# the deadlock is still happening... +# --Robin +# +#------------------------------------------------------------------------ + + +# +# import the time.sleep function in a namespace safe way to allow +# "from bsddb.dbutils import *" +# +from time import sleep as _sleep + +import db + +# always sleep at least N seconds between retrys +_deadlock_MinSleepTime = 1.0/128 +# never sleep more than N seconds between retrys +_deadlock_MaxSleepTime = 3.14159 + +# Assign a file object to this for a "sleeping" message to be written to it +# each retry +_deadlock_VerboseFile = None + + +def DeadlockWrap(function, *_args, **_kwargs): + """DeadlockWrap(function, *_args, **_kwargs) - automatically retries + function in case of a database deadlock. + + This is a function intended to be used to wrap database calls such + that they perform retrys with exponentially backing off sleeps in + between when a DBLockDeadlockError exception is raised. + + A 'max_retries' parameter may optionally be passed to prevent it + from retrying forever (in which case the exception will be reraised). + + d = DB(...) + d.open(...) + DeadlockWrap(d.put, "foo", data="bar") # set key "foo" to "bar" + """ + sleeptime = _deadlock_MinSleepTime + max_retries = _kwargs.get('max_retries', -1) + if _kwargs.has_key('max_retries'): + del _kwargs['max_retries'] + while True: + try: + return function(*_args, **_kwargs) + except db.DBLockDeadlockError: + if _deadlock_VerboseFile: + _deadlock_VerboseFile.write( + 'dbutils.DeadlockWrap: sleeping %1.3f\n' % sleeptime) + _sleep(sleeptime) + # exponential backoff in the sleep time + sleeptime *= 2 + if sleeptime > _deadlock_MaxSleepTime: + sleeptime = _deadlock_MaxSleepTime + max_retries -= 1 + if max_retries == -1: + raise + + +#------------------------------------------------------------------------ diff --git a/sys/lib/python/bsddb/test/__init__.py b/sys/lib/python/bsddb/test/__init__.py new file mode 100644 index 000000000..e69de29bb --- /dev/null +++ b/sys/lib/python/bsddb/test/__init__.py diff --git a/sys/lib/python/bsddb/test/test_1413192.py b/sys/lib/python/bsddb/test/test_1413192.py new file mode 100644 index 000000000..436f40786 --- /dev/null +++ b/sys/lib/python/bsddb/test/test_1413192.py @@ -0,0 +1,21 @@ + +# http://python.org/sf/1413192 +# +# This test relies on the variable names, see the bug report for details. +# The problem was that the env was deallocated prior to the txn. + +try: + # For Pythons w/distutils and add-on pybsddb + from bsddb3 import db +except ImportError: + # For Python >= 2.3 builtin bsddb distribution + from bsddb import db + +env_name = '.' + +env = db.DBEnv() +env.open(env_name, db.DB_CREATE | db.DB_INIT_TXN | db.DB_INIT_MPOOL) +the_txn = env.txn_begin() + +map = db.DB(env) +map.open('xxx.db', "p", db.DB_HASH, db.DB_CREATE, 0666, txn=the_txn) diff --git a/sys/lib/python/bsddb/test/test_all.py b/sys/lib/python/bsddb/test/test_all.py new file mode 100644 index 000000000..ad8b1e9e9 --- /dev/null +++ b/sys/lib/python/bsddb/test/test_all.py @@ -0,0 +1,91 @@ +"""Run all test cases. +""" + +import sys +import os +import unittest +try: + # For Pythons w/distutils pybsddb + from bsddb3 import db +except ImportError: + # For Python 2.3 + from bsddb import db + +verbose = 0 +if 'verbose' in sys.argv: + verbose = 1 + sys.argv.remove('verbose') + +if 'silent' in sys.argv: # take care of old flag, just in case + verbose = 0 + sys.argv.remove('silent') + + +def print_versions(): + print + print '-=' * 38 + print db.DB_VERSION_STRING + print 'bsddb.db.version(): %s' % (db.version(), ) + print 'bsddb.db.__version__: %s' % db.__version__ + print 'bsddb.db.cvsid: %s' % db.cvsid + print 'python version: %s' % sys.version + print 'My pid: %s' % os.getpid() + print '-=' * 38 + + +class PrintInfoFakeTest(unittest.TestCase): + def testPrintVersions(self): + print_versions() + + +# This little hack is for when this module is run as main and all the +# other modules import it so they will still be able to get the right +# verbose setting. It's confusing but it works. +import test_all +test_all.verbose = verbose + + +def suite(): + try: + # this is special, it used to segfault the interpreter + import test_1413192 + except: + pass + + test_modules = [ + 'test_associate', + 'test_basics', + 'test_compat', + 'test_compare', + 'test_dbobj', + 'test_dbshelve', + 'test_dbtables', + 'test_env_close', + 'test_get_none', + 'test_join', + 'test_lock', + 'test_misc', + 'test_pickle', + 'test_queue', + 'test_recno', + 'test_thread', + 'test_sequence', + 'test_cursor_pget_bug', + ] + + alltests = unittest.TestSuite() + for name in test_modules: + module = __import__(name) + alltests.addTest(module.test_suite()) + return alltests + + +def test_suite(): + suite = unittest.TestSuite() + suite.addTest(unittest.makeSuite(PrintInfoFakeTest)) + return suite + + +if __name__ == '__main__': + print_versions() + unittest.main(defaultTest='suite') diff --git a/sys/lib/python/bsddb/test/test_associate.py b/sys/lib/python/bsddb/test/test_associate.py new file mode 100644 index 000000000..05ef83cb6 --- /dev/null +++ b/sys/lib/python/bsddb/test/test_associate.py @@ -0,0 +1,478 @@ +""" +TestCases for DB.associate. +""" + +import sys, os, string +import tempfile +import time +from pprint import pprint + +try: + from threading import Thread, currentThread + have_threads = 1 +except ImportError: + have_threads = 0 + +import unittest +from test_all import verbose + +try: + # For Pythons w/distutils pybsddb + from bsddb3 import db, dbshelve +except ImportError: + # For Python 2.3 + from bsddb import db, dbshelve + + +#---------------------------------------------------------------------- + + +musicdata = { +1 : ("Bad English", "The Price Of Love", "Rock"), +2 : ("DNA featuring Suzanne Vega", "Tom's Diner", "Rock"), +3 : ("George Michael", "Praying For Time", "Rock"), +4 : ("Gloria Estefan", "Here We Are", "Rock"), +5 : ("Linda Ronstadt", "Don't Know Much", "Rock"), +6 : ("Michael Bolton", "How Am I Supposed To Live Without You", "Blues"), +7 : ("Paul Young", "Oh Girl", "Rock"), +8 : ("Paula Abdul", "Opposites Attract", "Rock"), +9 : ("Richard Marx", "Should've Known Better", "Rock"), +10: ("Rod Stewart", "Forever Young", "Rock"), +11: ("Roxette", "Dangerous", "Rock"), +12: ("Sheena Easton", "The Lover In Me", "Rock"), +13: ("Sinead O'Connor", "Nothing Compares 2 U", "Rock"), +14: ("Stevie B.", "Because I Love You", "Rock"), +15: ("Taylor Dayne", "Love Will Lead You Back", "Rock"), +16: ("The Bangles", "Eternal Flame", "Rock"), +17: ("Wilson Phillips", "Release Me", "Rock"), +18: ("Billy Joel", "Blonde Over Blue", "Rock"), +19: ("Billy Joel", "Famous Last Words", "Rock"), +20: ("Billy Joel", "Lullabye (Goodnight, My Angel)", "Rock"), +21: ("Billy Joel", "The River Of Dreams", "Rock"), +22: ("Billy Joel", "Two Thousand Years", "Rock"), +23: ("Janet Jackson", "Alright", "Rock"), +24: ("Janet Jackson", "Black Cat", "Rock"), +25: ("Janet Jackson", "Come Back To Me", "Rock"), +26: ("Janet Jackson", "Escapade", "Rock"), +27: ("Janet Jackson", "Love Will Never Do (Without You)", "Rock"), +28: ("Janet Jackson", "Miss You Much", "Rock"), +29: ("Janet Jackson", "Rhythm Nation", "Rock"), +30: ("Janet Jackson", "State Of The World", "Rock"), +31: ("Janet Jackson", "The Knowledge", "Rock"), +32: ("Spyro Gyra", "End of Romanticism", "Jazz"), +33: ("Spyro Gyra", "Heliopolis", "Jazz"), +34: ("Spyro Gyra", "Jubilee", "Jazz"), +35: ("Spyro Gyra", "Little Linda", "Jazz"), +36: ("Spyro Gyra", "Morning Dance", "Jazz"), +37: ("Spyro Gyra", "Song for Lorraine", "Jazz"), +38: ("Yes", "Owner Of A Lonely Heart", "Rock"), +39: ("Yes", "Rhythm Of Love", "Rock"), +40: ("Cusco", "Dream Catcher", "New Age"), +41: ("Cusco", "Geronimos Laughter", "New Age"), +42: ("Cusco", "Ghost Dance", "New Age"), +43: ("Blue Man Group", "Drumbone", "New Age"), +44: ("Blue Man Group", "Endless Column", "New Age"), +45: ("Blue Man Group", "Klein Mandelbrot", "New Age"), +46: ("Kenny G", "Silhouette", "Jazz"), +47: ("Sade", "Smooth Operator", "Jazz"), +48: ("David Arkenstone", "Papillon (On The Wings Of The Butterfly)", + "New Age"), +49: ("David Arkenstone", "Stepping Stars", "New Age"), +50: ("David Arkenstone", "Carnation Lily Lily Rose", "New Age"), +51: ("David Lanz", "Behind The Waterfall", "New Age"), +52: ("David Lanz", "Cristofori's Dream", "New Age"), +53: ("David Lanz", "Heartsounds", "New Age"), +54: ("David Lanz", "Leaves on the Seine", "New Age"), +99: ("unknown artist", "Unnamed song", "Unknown"), +} + +#---------------------------------------------------------------------- + +class AssociateErrorTestCase(unittest.TestCase): + def setUp(self): + self.filename = self.__class__.__name__ + '.db' + homeDir = os.path.join(os.path.dirname(sys.argv[0]), 'db_home') + self.homeDir = homeDir + try: + os.mkdir(homeDir) + except os.error: + import glob + files = glob.glob(os.path.join(self.homeDir, '*')) + for file in files: + os.remove(file) + self.env = db.DBEnv() + self.env.open(homeDir, db.DB_CREATE | db.DB_INIT_MPOOL) + + def tearDown(self): + self.env.close() + self.env = None + import glob + files = glob.glob(os.path.join(self.homeDir, '*')) + for file in files: + os.remove(file) + + + def test00_associateDBError(self): + if verbose: + print '\n', '-=' * 30 + print "Running %s.test00_associateDBError..." % \ + self.__class__.__name__ + + dupDB = db.DB(self.env) + dupDB.set_flags(db.DB_DUP) + dupDB.open(self.filename, "primary", db.DB_BTREE, db.DB_CREATE) + + secDB = db.DB(self.env) + secDB.open(self.filename, "secondary", db.DB_BTREE, db.DB_CREATE) + + # dupDB has been configured to allow duplicates, it can't + # associate with a secondary. BerkeleyDB will return an error. + try: + def f(a,b): return a+b + dupDB.associate(secDB, f) + except db.DBError: + # good + secDB.close() + dupDB.close() + else: + secDB.close() + dupDB.close() + self.fail("DBError exception was expected") + + + +#---------------------------------------------------------------------- + + +class AssociateTestCase(unittest.TestCase): + keytype = '' + envFlags = 0 + dbFlags = 0 + + def setUp(self): + self.filename = self.__class__.__name__ + '.db' + homeDir = os.path.join(os.path.dirname(sys.argv[0]), 'db_home') + self.homeDir = homeDir + try: + os.mkdir(homeDir) + except os.error: + import glob + files = glob.glob(os.path.join(self.homeDir, '*')) + for file in files: + os.remove(file) + self.env = db.DBEnv() + self.env.open(homeDir, db.DB_CREATE | db.DB_INIT_MPOOL | + db.DB_INIT_LOCK | db.DB_THREAD | self.envFlags) + + def tearDown(self): + self.closeDB() + self.env.close() + self.env = None + import glob + files = glob.glob(os.path.join(self.homeDir, '*')) + for file in files: + os.remove(file) + + def addDataToDB(self, d, txn=None): + for key, value in musicdata.items(): + if type(self.keytype) == type(''): + key = "%02d" % key + d.put(key, string.join(value, '|'), txn=txn) + + def createDB(self, txn=None): + self.cur = None + self.secDB = None + self.primary = db.DB(self.env) + self.primary.set_get_returns_none(2) + if db.version() >= (4, 1): + self.primary.open(self.filename, "primary", self.dbtype, + db.DB_CREATE | db.DB_THREAD | self.dbFlags, txn=txn) + else: + self.primary.open(self.filename, "primary", self.dbtype, + db.DB_CREATE | db.DB_THREAD | self.dbFlags) + + def closeDB(self): + if self.cur: + self.cur.close() + self.cur = None + if self.secDB: + self.secDB.close() + self.secDB = None + self.primary.close() + self.primary = None + + def getDB(self): + return self.primary + + + def test01_associateWithDB(self): + if verbose: + print '\n', '-=' * 30 + print "Running %s.test01_associateWithDB..." % \ + self.__class__.__name__ + + self.createDB() + + self.secDB = db.DB(self.env) + self.secDB.set_flags(db.DB_DUP) + self.secDB.set_get_returns_none(2) + self.secDB.open(self.filename, "secondary", db.DB_BTREE, + db.DB_CREATE | db.DB_THREAD | self.dbFlags) + self.getDB().associate(self.secDB, self.getGenre) + + self.addDataToDB(self.getDB()) + + self.finish_test(self.secDB) + + + def test02_associateAfterDB(self): + if verbose: + print '\n', '-=' * 30 + print "Running %s.test02_associateAfterDB..." % \ + self.__class__.__name__ + + self.createDB() + self.addDataToDB(self.getDB()) + + self.secDB = db.DB(self.env) + self.secDB.set_flags(db.DB_DUP) + self.secDB.open(self.filename, "secondary", db.DB_BTREE, + db.DB_CREATE | db.DB_THREAD | self.dbFlags) + + # adding the DB_CREATE flag will cause it to index existing records + self.getDB().associate(self.secDB, self.getGenre, db.DB_CREATE) + + self.finish_test(self.secDB) + + + def finish_test(self, secDB, txn=None): + # 'Blues' should not be in the secondary database + vals = secDB.pget('Blues', txn=txn) + assert vals == None, vals + + vals = secDB.pget('Unknown', txn=txn) + assert vals[0] == 99 or vals[0] == '99', vals + vals[1].index('Unknown') + vals[1].index('Unnamed') + vals[1].index('unknown') + + if verbose: + print "Primary key traversal:" + self.cur = self.getDB().cursor(txn) + count = 0 + rec = self.cur.first() + while rec is not None: + if type(self.keytype) == type(''): + assert string.atoi(rec[0]) # for primary db, key is a number + else: + assert rec[0] and type(rec[0]) == type(0) + count = count + 1 + if verbose: + print rec + rec = self.cur.next() + assert count == len(musicdata) # all items accounted for + + + if verbose: + print "Secondary key traversal:" + self.cur = secDB.cursor(txn) + count = 0 + + # test cursor pget + vals = self.cur.pget('Unknown', flags=db.DB_LAST) + assert vals[1] == 99 or vals[1] == '99', vals + assert vals[0] == 'Unknown' + vals[2].index('Unknown') + vals[2].index('Unnamed') + vals[2].index('unknown') + + vals = self.cur.pget('Unknown', data='wrong value', flags=db.DB_GET_BOTH) + assert vals == None, vals + + rec = self.cur.first() + assert rec[0] == "Jazz" + while rec is not None: + count = count + 1 + if verbose: + print rec + rec = self.cur.next() + # all items accounted for EXCEPT for 1 with "Blues" genre + assert count == len(musicdata)-1 + + self.cur = None + + def getGenre(self, priKey, priData): + assert type(priData) == type("") + if verbose: + print 'getGenre key: %r data: %r' % (priKey, priData) + genre = string.split(priData, '|')[2] + if genre == 'Blues': + return db.DB_DONOTINDEX + else: + return genre + + +#---------------------------------------------------------------------- + + +class AssociateHashTestCase(AssociateTestCase): + dbtype = db.DB_HASH + +class AssociateBTreeTestCase(AssociateTestCase): + dbtype = db.DB_BTREE + +class AssociateRecnoTestCase(AssociateTestCase): + dbtype = db.DB_RECNO + keytype = 0 + +#---------------------------------------------------------------------- + +class AssociateBTreeTxnTestCase(AssociateBTreeTestCase): + envFlags = db.DB_INIT_TXN + dbFlags = 0 + + def txn_finish_test(self, sDB, txn): + try: + self.finish_test(sDB, txn=txn) + finally: + if self.cur: + self.cur.close() + self.cur = None + if txn: + txn.commit() + + def test13_associate_in_transaction(self): + if verbose: + print '\n', '-=' * 30 + print "Running %s.test13_associateAutoCommit..." % \ + self.__class__.__name__ + + txn = self.env.txn_begin() + try: + self.createDB(txn=txn) + + self.secDB = db.DB(self.env) + self.secDB.set_flags(db.DB_DUP) + self.secDB.set_get_returns_none(2) + self.secDB.open(self.filename, "secondary", db.DB_BTREE, + db.DB_CREATE | db.DB_THREAD, txn=txn) + if db.version() >= (4,1): + self.getDB().associate(self.secDB, self.getGenre, txn=txn) + else: + self.getDB().associate(self.secDB, self.getGenre) + + self.addDataToDB(self.getDB(), txn=txn) + except: + txn.abort() + raise + + self.txn_finish_test(self.secDB, txn=txn) + + +#---------------------------------------------------------------------- + +class ShelveAssociateTestCase(AssociateTestCase): + + def createDB(self): + self.primary = dbshelve.open(self.filename, + dbname="primary", + dbenv=self.env, + filetype=self.dbtype) + + def addDataToDB(self, d): + for key, value in musicdata.items(): + if type(self.keytype) == type(''): + key = "%02d" % key + d.put(key, value) # save the value as is this time + + + def getGenre(self, priKey, priData): + assert type(priData) == type(()) + if verbose: + print 'getGenre key: %r data: %r' % (priKey, priData) + genre = priData[2] + if genre == 'Blues': + return db.DB_DONOTINDEX + else: + return genre + + +class ShelveAssociateHashTestCase(ShelveAssociateTestCase): + dbtype = db.DB_HASH + +class ShelveAssociateBTreeTestCase(ShelveAssociateTestCase): + dbtype = db.DB_BTREE + +class ShelveAssociateRecnoTestCase(ShelveAssociateTestCase): + dbtype = db.DB_RECNO + keytype = 0 + + +#---------------------------------------------------------------------- + +class ThreadedAssociateTestCase(AssociateTestCase): + + def addDataToDB(self, d): + t1 = Thread(target = self.writer1, + args = (d, )) + t2 = Thread(target = self.writer2, + args = (d, )) + + t1.start() + t2.start() + t1.join() + t2.join() + + def writer1(self, d): + for key, value in musicdata.items(): + if type(self.keytype) == type(''): + key = "%02d" % key + d.put(key, string.join(value, '|')) + + def writer2(self, d): + for x in range(100, 600): + key = 'z%2d' % x + value = [key] * 4 + d.put(key, string.join(value, '|')) + + +class ThreadedAssociateHashTestCase(ShelveAssociateTestCase): + dbtype = db.DB_HASH + +class ThreadedAssociateBTreeTestCase(ShelveAssociateTestCase): + dbtype = db.DB_BTREE + +class ThreadedAssociateRecnoTestCase(ShelveAssociateTestCase): + dbtype = db.DB_RECNO + keytype = 0 + + +#---------------------------------------------------------------------- + +def test_suite(): + suite = unittest.TestSuite() + + if db.version() >= (3, 3, 11): + suite.addTest(unittest.makeSuite(AssociateErrorTestCase)) + + suite.addTest(unittest.makeSuite(AssociateHashTestCase)) + suite.addTest(unittest.makeSuite(AssociateBTreeTestCase)) + suite.addTest(unittest.makeSuite(AssociateRecnoTestCase)) + + if db.version() >= (4, 1): + suite.addTest(unittest.makeSuite(AssociateBTreeTxnTestCase)) + + suite.addTest(unittest.makeSuite(ShelveAssociateHashTestCase)) + suite.addTest(unittest.makeSuite(ShelveAssociateBTreeTestCase)) + suite.addTest(unittest.makeSuite(ShelveAssociateRecnoTestCase)) + + if have_threads: + suite.addTest(unittest.makeSuite(ThreadedAssociateHashTestCase)) + suite.addTest(unittest.makeSuite(ThreadedAssociateBTreeTestCase)) + suite.addTest(unittest.makeSuite(ThreadedAssociateRecnoTestCase)) + + return suite + + +if __name__ == '__main__': + unittest.main(defaultTest='test_suite') diff --git a/sys/lib/python/bsddb/test/test_basics.py b/sys/lib/python/bsddb/test/test_basics.py new file mode 100644 index 000000000..e6022ba51 --- /dev/null +++ b/sys/lib/python/bsddb/test/test_basics.py @@ -0,0 +1,989 @@ +""" +Basic TestCases for BTree and hash DBs, with and without a DBEnv, with +various DB flags, etc. +""" + +import os +import sys +import errno +import shutil +import string +import tempfile +from pprint import pprint +import unittest +import time + +try: + # For Pythons w/distutils pybsddb + from bsddb3 import db +except ImportError: + # For Python 2.3 + from bsddb import db + +from test_all import verbose + +DASH = '-' + + +#---------------------------------------------------------------------- + +class VersionTestCase(unittest.TestCase): + def test00_version(self): + info = db.version() + if verbose: + print '\n', '-=' * 20 + print 'bsddb.db.version(): %s' % (info, ) + print db.DB_VERSION_STRING + print '-=' * 20 + assert info == (db.DB_VERSION_MAJOR, db.DB_VERSION_MINOR, + db.DB_VERSION_PATCH) + +#---------------------------------------------------------------------- + +class BasicTestCase(unittest.TestCase): + dbtype = db.DB_UNKNOWN # must be set in derived class + dbopenflags = 0 + dbsetflags = 0 + dbmode = 0660 + dbname = None + useEnv = 0 + envflags = 0 + envsetflags = 0 + + _numKeys = 1002 # PRIVATE. NOTE: must be an even value + + def setUp(self): + if self.useEnv: + homeDir = os.path.join(os.path.dirname(sys.argv[0]), 'db_home') + self.homeDir = homeDir + try: + shutil.rmtree(homeDir) + except OSError, e: + # unix returns ENOENT, windows returns ESRCH + if e.errno not in (errno.ENOENT, errno.ESRCH): raise + os.mkdir(homeDir) + try: + self.env = db.DBEnv() + self.env.set_lg_max(1024*1024) + self.env.set_tx_max(30) + self.env.set_tx_timestamp(int(time.time())) + self.env.set_flags(self.envsetflags, 1) + self.env.open(homeDir, self.envflags | db.DB_CREATE) + tempfile.tempdir = homeDir + self.filename = os.path.split(tempfile.mktemp())[1] + tempfile.tempdir = None + # Yes, a bare except is intended, since we're re-raising the exc. + except: + shutil.rmtree(homeDir) + raise + else: + self.env = None + self.filename = tempfile.mktemp() + + # create and open the DB + self.d = db.DB(self.env) + self.d.set_flags(self.dbsetflags) + if self.dbname: + self.d.open(self.filename, self.dbname, self.dbtype, + self.dbopenflags|db.DB_CREATE, self.dbmode) + else: + self.d.open(self.filename, # try out keyword args + mode = self.dbmode, + dbtype = self.dbtype, + flags = self.dbopenflags|db.DB_CREATE) + + self.populateDB() + + + def tearDown(self): + self.d.close() + if self.env is not None: + self.env.close() + shutil.rmtree(self.homeDir) + ## Make a new DBEnv to remove the env files from the home dir. + ## (It can't be done while the env is open, nor after it has been + ## closed, so we make a new one to do it.) + #e = db.DBEnv() + #e.remove(self.homeDir) + #os.remove(os.path.join(self.homeDir, self.filename)) + else: + os.remove(self.filename) + + + + def populateDB(self, _txn=None): + d = self.d + + for x in range(self._numKeys/2): + key = '%04d' % (self._numKeys - x) # insert keys in reverse order + data = self.makeData(key) + d.put(key, data, _txn) + + d.put('empty value', '', _txn) + + for x in range(self._numKeys/2-1): + key = '%04d' % x # and now some in forward order + data = self.makeData(key) + d.put(key, data, _txn) + + if _txn: + _txn.commit() + + num = len(d) + if verbose: + print "created %d records" % num + + + def makeData(self, key): + return DASH.join([key] * 5) + + + + #---------------------------------------- + + def test01_GetsAndPuts(self): + d = self.d + + if verbose: + print '\n', '-=' * 30 + print "Running %s.test01_GetsAndPuts..." % self.__class__.__name__ + + for key in ['0001', '0100', '0400', '0700', '0999']: + data = d.get(key) + if verbose: + print data + + assert d.get('0321') == '0321-0321-0321-0321-0321' + + # By default non-existant keys return None... + assert d.get('abcd') == None + + # ...but they raise exceptions in other situations. Call + # set_get_returns_none() to change it. + try: + d.delete('abcd') + except db.DBNotFoundError, val: + assert val[0] == db.DB_NOTFOUND + if verbose: print val + else: + self.fail("expected exception") + + + d.put('abcd', 'a new record') + assert d.get('abcd') == 'a new record' + + d.put('abcd', 'same key') + if self.dbsetflags & db.DB_DUP: + assert d.get('abcd') == 'a new record' + else: + assert d.get('abcd') == 'same key' + + + try: + d.put('abcd', 'this should fail', flags=db.DB_NOOVERWRITE) + except db.DBKeyExistError, val: + assert val[0] == db.DB_KEYEXIST + if verbose: print val + else: + self.fail("expected exception") + + if self.dbsetflags & db.DB_DUP: + assert d.get('abcd') == 'a new record' + else: + assert d.get('abcd') == 'same key' + + + d.sync() + d.close() + del d + + self.d = db.DB(self.env) + if self.dbname: + self.d.open(self.filename, self.dbname) + else: + self.d.open(self.filename) + d = self.d + + assert d.get('0321') == '0321-0321-0321-0321-0321' + if self.dbsetflags & db.DB_DUP: + assert d.get('abcd') == 'a new record' + else: + assert d.get('abcd') == 'same key' + + rec = d.get_both('0555', '0555-0555-0555-0555-0555') + if verbose: + print rec + + assert d.get_both('0555', 'bad data') == None + + # test default value + data = d.get('bad key', 'bad data') + assert data == 'bad data' + + # any object can pass through + data = d.get('bad key', self) + assert data == self + + s = d.stat() + assert type(s) == type({}) + if verbose: + print 'd.stat() returned this dictionary:' + pprint(s) + + + #---------------------------------------- + + def test02_DictionaryMethods(self): + d = self.d + + if verbose: + print '\n', '-=' * 30 + print "Running %s.test02_DictionaryMethods..." % \ + self.__class__.__name__ + + for key in ['0002', '0101', '0401', '0701', '0998']: + data = d[key] + assert data == self.makeData(key) + if verbose: + print data + + assert len(d) == self._numKeys + keys = d.keys() + assert len(keys) == self._numKeys + assert type(keys) == type([]) + + d['new record'] = 'a new record' + assert len(d) == self._numKeys+1 + keys = d.keys() + assert len(keys) == self._numKeys+1 + + d['new record'] = 'a replacement record' + assert len(d) == self._numKeys+1 + keys = d.keys() + assert len(keys) == self._numKeys+1 + + if verbose: + print "the first 10 keys are:" + pprint(keys[:10]) + + assert d['new record'] == 'a replacement record' + + assert d.has_key('0001') == 1 + assert d.has_key('spam') == 0 + + items = d.items() + assert len(items) == self._numKeys+1 + assert type(items) == type([]) + assert type(items[0]) == type(()) + assert len(items[0]) == 2 + + if verbose: + print "the first 10 items are:" + pprint(items[:10]) + + values = d.values() + assert len(values) == self._numKeys+1 + assert type(values) == type([]) + + if verbose: + print "the first 10 values are:" + pprint(values[:10]) + + + + #---------------------------------------- + + def test03_SimpleCursorStuff(self, get_raises_error=0, set_raises_error=0): + if verbose: + print '\n', '-=' * 30 + print "Running %s.test03_SimpleCursorStuff (get_error %s, set_error %s)..." % \ + (self.__class__.__name__, get_raises_error, set_raises_error) + + if self.env and self.dbopenflags & db.DB_AUTO_COMMIT: + txn = self.env.txn_begin() + else: + txn = None + c = self.d.cursor(txn=txn) + + rec = c.first() + count = 0 + while rec is not None: + count = count + 1 + if verbose and count % 100 == 0: + print rec + try: + rec = c.next() + except db.DBNotFoundError, val: + if get_raises_error: + assert val[0] == db.DB_NOTFOUND + if verbose: print val + rec = None + else: + self.fail("unexpected DBNotFoundError") + assert c.get_current_size() == len(c.current()[1]), "%s != len(%r)" % (c.get_current_size(), c.current()[1]) + + assert count == self._numKeys + + + rec = c.last() + count = 0 + while rec is not None: + count = count + 1 + if verbose and count % 100 == 0: + print rec + try: + rec = c.prev() + except db.DBNotFoundError, val: + if get_raises_error: + assert val[0] == db.DB_NOTFOUND + if verbose: print val + rec = None + else: + self.fail("unexpected DBNotFoundError") + + assert count == self._numKeys + + rec = c.set('0505') + rec2 = c.current() + assert rec == rec2 + assert rec[0] == '0505' + assert rec[1] == self.makeData('0505') + assert c.get_current_size() == len(rec[1]) + + # make sure we get empty values properly + rec = c.set('empty value') + assert rec[1] == '' + assert c.get_current_size() == 0 + + try: + n = c.set('bad key') + except db.DBNotFoundError, val: + assert val[0] == db.DB_NOTFOUND + if verbose: print val + else: + if set_raises_error: + self.fail("expected exception") + if n != None: + self.fail("expected None: %r" % (n,)) + + rec = c.get_both('0404', self.makeData('0404')) + assert rec == ('0404', self.makeData('0404')) + + try: + n = c.get_both('0404', 'bad data') + except db.DBNotFoundError, val: + assert val[0] == db.DB_NOTFOUND + if verbose: print val + else: + if get_raises_error: + self.fail("expected exception") + if n != None: + self.fail("expected None: %r" % (n,)) + + if self.d.get_type() == db.DB_BTREE: + rec = c.set_range('011') + if verbose: + print "searched for '011', found: ", rec + + rec = c.set_range('011',dlen=0,doff=0) + if verbose: + print "searched (partial) for '011', found: ", rec + if rec[1] != '': self.fail('expected empty data portion') + + ev = c.set_range('empty value') + if verbose: + print "search for 'empty value' returned", ev + if ev[1] != '': self.fail('empty value lookup failed') + + c.set('0499') + c.delete() + try: + rec = c.current() + except db.DBKeyEmptyError, val: + if get_raises_error: + assert val[0] == db.DB_KEYEMPTY + if verbose: print val + else: + self.fail("unexpected DBKeyEmptyError") + else: + if get_raises_error: + self.fail('DBKeyEmptyError exception expected') + + c.next() + c2 = c.dup(db.DB_POSITION) + assert c.current() == c2.current() + + c2.put('', 'a new value', db.DB_CURRENT) + assert c.current() == c2.current() + assert c.current()[1] == 'a new value' + + c2.put('', 'er', db.DB_CURRENT, dlen=0, doff=5) + assert c2.current()[1] == 'a newer value' + + c.close() + c2.close() + if txn: + txn.commit() + + # time to abuse the closed cursors and hope we don't crash + methods_to_test = { + 'current': (), + 'delete': (), + 'dup': (db.DB_POSITION,), + 'first': (), + 'get': (0,), + 'next': (), + 'prev': (), + 'last': (), + 'put':('', 'spam', db.DB_CURRENT), + 'set': ("0505",), + } + for method, args in methods_to_test.items(): + try: + if verbose: + print "attempting to use a closed cursor's %s method" % \ + method + # a bug may cause a NULL pointer dereference... + apply(getattr(c, method), args) + except db.DBError, val: + assert val[0] == 0 + if verbose: print val + else: + self.fail("no exception raised when using a buggy cursor's" + "%s method" % method) + + # + # free cursor referencing a closed database, it should not barf: + # + oldcursor = self.d.cursor(txn=txn) + self.d.close() + + # this would originally cause a segfault when the cursor for a + # closed database was cleaned up. it should not anymore. + # SF pybsddb bug id 667343 + del oldcursor + + def test03b_SimpleCursorWithoutGetReturnsNone0(self): + # same test but raise exceptions instead of returning None + if verbose: + print '\n', '-=' * 30 + print "Running %s.test03b_SimpleCursorStuffWithoutGetReturnsNone..." % \ + self.__class__.__name__ + + old = self.d.set_get_returns_none(0) + assert old == 2 + self.test03_SimpleCursorStuff(get_raises_error=1, set_raises_error=1) + + def test03b_SimpleCursorWithGetReturnsNone1(self): + # same test but raise exceptions instead of returning None + if verbose: + print '\n', '-=' * 30 + print "Running %s.test03b_SimpleCursorStuffWithoutGetReturnsNone..." % \ + self.__class__.__name__ + + old = self.d.set_get_returns_none(1) + self.test03_SimpleCursorStuff(get_raises_error=0, set_raises_error=1) + + + def test03c_SimpleCursorGetReturnsNone2(self): + # same test but raise exceptions instead of returning None + if verbose: + print '\n', '-=' * 30 + print "Running %s.test03c_SimpleCursorStuffWithoutSetReturnsNone..." % \ + self.__class__.__name__ + + old = self.d.set_get_returns_none(1) + assert old == 2 + old = self.d.set_get_returns_none(2) + assert old == 1 + self.test03_SimpleCursorStuff(get_raises_error=0, set_raises_error=0) + + #---------------------------------------- + + def test04_PartialGetAndPut(self): + d = self.d + if verbose: + print '\n', '-=' * 30 + print "Running %s.test04_PartialGetAndPut..." % \ + self.__class__.__name__ + + key = "partialTest" + data = "1" * 1000 + "2" * 1000 + d.put(key, data) + assert d.get(key) == data + assert d.get(key, dlen=20, doff=990) == ("1" * 10) + ("2" * 10) + + d.put("partialtest2", ("1" * 30000) + "robin" ) + assert d.get("partialtest2", dlen=5, doff=30000) == "robin" + + # There seems to be a bug in DB here... Commented out the test for + # now. + ##assert d.get("partialtest2", dlen=5, doff=30010) == "" + + if self.dbsetflags != db.DB_DUP: + # Partial put with duplicate records requires a cursor + d.put(key, "0000", dlen=2000, doff=0) + assert d.get(key) == "0000" + + d.put(key, "1111", dlen=1, doff=2) + assert d.get(key) == "0011110" + + #---------------------------------------- + + def test05_GetSize(self): + d = self.d + if verbose: + print '\n', '-=' * 30 + print "Running %s.test05_GetSize..." % self.__class__.__name__ + + for i in range(1, 50000, 500): + key = "size%s" % i + #print "before ", i, + d.put(key, "1" * i) + #print "after", + assert d.get_size(key) == i + #print "done" + + #---------------------------------------- + + def test06_Truncate(self): + if db.version() < (3,3): + # truncate is a feature of BerkeleyDB 3.3 and above + return + + d = self.d + if verbose: + print '\n', '-=' * 30 + print "Running %s.test99_Truncate..." % self.__class__.__name__ + + d.put("abcde", "ABCDE"); + num = d.truncate() + assert num >= 1, "truncate returned <= 0 on non-empty database" + num = d.truncate() + assert num == 0, "truncate on empty DB returned nonzero (%r)" % (num,) + + #---------------------------------------- + + +#---------------------------------------------------------------------- + + +class BasicBTreeTestCase(BasicTestCase): + dbtype = db.DB_BTREE + + +class BasicHashTestCase(BasicTestCase): + dbtype = db.DB_HASH + + +class BasicBTreeWithThreadFlagTestCase(BasicTestCase): + dbtype = db.DB_BTREE + dbopenflags = db.DB_THREAD + + +class BasicHashWithThreadFlagTestCase(BasicTestCase): + dbtype = db.DB_HASH + dbopenflags = db.DB_THREAD + + +class BasicWithEnvTestCase(BasicTestCase): + dbopenflags = db.DB_THREAD + useEnv = 1 + envflags = db.DB_THREAD | db.DB_INIT_MPOOL | db.DB_INIT_LOCK + + #---------------------------------------- + + def test07_EnvRemoveAndRename(self): + if not self.env: + return + + if verbose: + print '\n', '-=' * 30 + print "Running %s.test07_EnvRemoveAndRename..." % self.__class__.__name__ + + # can't rename or remove an open DB + self.d.close() + + newname = self.filename + '.renamed' + self.env.dbrename(self.filename, None, newname) + self.env.dbremove(newname) + + # dbremove and dbrename are in 4.1 and later + if db.version() < (4,1): + del test07_EnvRemoveAndRename + + #---------------------------------------- + +class BasicBTreeWithEnvTestCase(BasicWithEnvTestCase): + dbtype = db.DB_BTREE + + +class BasicHashWithEnvTestCase(BasicWithEnvTestCase): + dbtype = db.DB_HASH + + +#---------------------------------------------------------------------- + +class BasicTransactionTestCase(BasicTestCase): + dbopenflags = db.DB_THREAD | db.DB_AUTO_COMMIT + useEnv = 1 + envflags = (db.DB_THREAD | db.DB_INIT_MPOOL | db.DB_INIT_LOCK | + db.DB_INIT_TXN) + envsetflags = db.DB_AUTO_COMMIT + + + def tearDown(self): + self.txn.commit() + BasicTestCase.tearDown(self) + + + def populateDB(self): + txn = self.env.txn_begin() + BasicTestCase.populateDB(self, _txn=txn) + + self.txn = self.env.txn_begin() + + + def test06_Transactions(self): + d = self.d + if verbose: + print '\n', '-=' * 30 + print "Running %s.test06_Transactions..." % self.__class__.__name__ + + assert d.get('new rec', txn=self.txn) == None + d.put('new rec', 'this is a new record', self.txn) + assert d.get('new rec', txn=self.txn) == 'this is a new record' + self.txn.abort() + assert d.get('new rec') == None + + self.txn = self.env.txn_begin() + + assert d.get('new rec', txn=self.txn) == None + d.put('new rec', 'this is a new record', self.txn) + assert d.get('new rec', txn=self.txn) == 'this is a new record' + self.txn.commit() + assert d.get('new rec') == 'this is a new record' + + self.txn = self.env.txn_begin() + c = d.cursor(self.txn) + rec = c.first() + count = 0 + while rec is not None: + count = count + 1 + if verbose and count % 100 == 0: + print rec + rec = c.next() + assert count == self._numKeys+1 + + c.close() # Cursors *MUST* be closed before commit! + self.txn.commit() + + # flush pending updates + try: + self.env.txn_checkpoint (0, 0, 0) + except db.DBIncompleteError: + pass + + if db.version() >= (4,0): + statDict = self.env.log_stat(0); + assert statDict.has_key('magic') + assert statDict.has_key('version') + assert statDict.has_key('cur_file') + assert statDict.has_key('region_nowait') + + # must have at least one log file present: + logs = self.env.log_archive(db.DB_ARCH_ABS | db.DB_ARCH_LOG) + assert logs != None + for log in logs: + if verbose: + print 'log file: ' + log + if db.version() >= (4,2): + logs = self.env.log_archive(db.DB_ARCH_REMOVE) + assert not logs + + self.txn = self.env.txn_begin() + + #---------------------------------------- + + def test07_TxnTruncate(self): + if db.version() < (3,3): + # truncate is a feature of BerkeleyDB 3.3 and above + return + + d = self.d + if verbose: + print '\n', '-=' * 30 + print "Running %s.test07_TxnTruncate..." % self.__class__.__name__ + + d.put("abcde", "ABCDE"); + txn = self.env.txn_begin() + num = d.truncate(txn) + assert num >= 1, "truncate returned <= 0 on non-empty database" + num = d.truncate(txn) + assert num == 0, "truncate on empty DB returned nonzero (%r)" % (num,) + txn.commit() + + #---------------------------------------- + + def test08_TxnLateUse(self): + txn = self.env.txn_begin() + txn.abort() + try: + txn.abort() + except db.DBError, e: + pass + else: + raise RuntimeError, "DBTxn.abort() called after DB_TXN no longer valid w/o an exception" + + txn = self.env.txn_begin() + txn.commit() + try: + txn.commit() + except db.DBError, e: + pass + else: + raise RuntimeError, "DBTxn.commit() called after DB_TXN no longer valid w/o an exception" + + +class BTreeTransactionTestCase(BasicTransactionTestCase): + dbtype = db.DB_BTREE + +class HashTransactionTestCase(BasicTransactionTestCase): + dbtype = db.DB_HASH + + + +#---------------------------------------------------------------------- + +class BTreeRecnoTestCase(BasicTestCase): + dbtype = db.DB_BTREE + dbsetflags = db.DB_RECNUM + + def test07_RecnoInBTree(self): + d = self.d + if verbose: + print '\n', '-=' * 30 + print "Running %s.test07_RecnoInBTree..." % self.__class__.__name__ + + rec = d.get(200) + assert type(rec) == type(()) + assert len(rec) == 2 + if verbose: + print "Record #200 is ", rec + + c = d.cursor() + c.set('0200') + num = c.get_recno() + assert type(num) == type(1) + if verbose: + print "recno of d['0200'] is ", num + + rec = c.current() + assert c.set_recno(num) == rec + + c.close() + + + +class BTreeRecnoWithThreadFlagTestCase(BTreeRecnoTestCase): + dbopenflags = db.DB_THREAD + +#---------------------------------------------------------------------- + +class BasicDUPTestCase(BasicTestCase): + dbsetflags = db.DB_DUP + + def test08_DuplicateKeys(self): + d = self.d + if verbose: + print '\n', '-=' * 30 + print "Running %s.test08_DuplicateKeys..." % \ + self.__class__.__name__ + + d.put("dup0", "before") + for x in "The quick brown fox jumped over the lazy dog.".split(): + d.put("dup1", x) + d.put("dup2", "after") + + data = d.get("dup1") + assert data == "The" + if verbose: + print data + + c = d.cursor() + rec = c.set("dup1") + assert rec == ('dup1', 'The') + + next = c.next() + assert next == ('dup1', 'quick') + + rec = c.set("dup1") + count = c.count() + assert count == 9 + + next_dup = c.next_dup() + assert next_dup == ('dup1', 'quick') + + rec = c.set('dup1') + while rec is not None: + if verbose: + print rec + rec = c.next_dup() + + c.set('dup1') + rec = c.next_nodup() + assert rec[0] != 'dup1' + if verbose: + print rec + + c.close() + + + +class BTreeDUPTestCase(BasicDUPTestCase): + dbtype = db.DB_BTREE + +class HashDUPTestCase(BasicDUPTestCase): + dbtype = db.DB_HASH + +class BTreeDUPWithThreadTestCase(BasicDUPTestCase): + dbtype = db.DB_BTREE + dbopenflags = db.DB_THREAD + +class HashDUPWithThreadTestCase(BasicDUPTestCase): + dbtype = db.DB_HASH + dbopenflags = db.DB_THREAD + + +#---------------------------------------------------------------------- + +class BasicMultiDBTestCase(BasicTestCase): + dbname = 'first' + + def otherType(self): + if self.dbtype == db.DB_BTREE: + return db.DB_HASH + else: + return db.DB_BTREE + + def test09_MultiDB(self): + d1 = self.d + if verbose: + print '\n', '-=' * 30 + print "Running %s.test09_MultiDB..." % self.__class__.__name__ + + d2 = db.DB(self.env) + d2.open(self.filename, "second", self.dbtype, + self.dbopenflags|db.DB_CREATE) + d3 = db.DB(self.env) + d3.open(self.filename, "third", self.otherType(), + self.dbopenflags|db.DB_CREATE) + + for x in "The quick brown fox jumped over the lazy dog".split(): + d2.put(x, self.makeData(x)) + + for x in string.letters: + d3.put(x, x*70) + + d1.sync() + d2.sync() + d3.sync() + d1.close() + d2.close() + d3.close() + + self.d = d1 = d2 = d3 = None + + self.d = d1 = db.DB(self.env) + d1.open(self.filename, self.dbname, flags = self.dbopenflags) + d2 = db.DB(self.env) + d2.open(self.filename, "second", flags = self.dbopenflags) + d3 = db.DB(self.env) + d3.open(self.filename, "third", flags = self.dbopenflags) + + c1 = d1.cursor() + c2 = d2.cursor() + c3 = d3.cursor() + + count = 0 + rec = c1.first() + while rec is not None: + count = count + 1 + if verbose and (count % 50) == 0: + print rec + rec = c1.next() + assert count == self._numKeys + + count = 0 + rec = c2.first() + while rec is not None: + count = count + 1 + if verbose: + print rec + rec = c2.next() + assert count == 9 + + count = 0 + rec = c3.first() + while rec is not None: + count = count + 1 + if verbose: + print rec + rec = c3.next() + assert count == 52 + + + c1.close() + c2.close() + c3.close() + + d2.close() + d3.close() + + + +# Strange things happen if you try to use Multiple DBs per file without a +# DBEnv with MPOOL and LOCKing... + +class BTreeMultiDBTestCase(BasicMultiDBTestCase): + dbtype = db.DB_BTREE + dbopenflags = db.DB_THREAD + useEnv = 1 + envflags = db.DB_THREAD | db.DB_INIT_MPOOL | db.DB_INIT_LOCK + +class HashMultiDBTestCase(BasicMultiDBTestCase): + dbtype = db.DB_HASH + dbopenflags = db.DB_THREAD + useEnv = 1 + envflags = db.DB_THREAD | db.DB_INIT_MPOOL | db.DB_INIT_LOCK + + +#---------------------------------------------------------------------- +#---------------------------------------------------------------------- + +def test_suite(): + suite = unittest.TestSuite() + + suite.addTest(unittest.makeSuite(VersionTestCase)) + suite.addTest(unittest.makeSuite(BasicBTreeTestCase)) + suite.addTest(unittest.makeSuite(BasicHashTestCase)) + suite.addTest(unittest.makeSuite(BasicBTreeWithThreadFlagTestCase)) + suite.addTest(unittest.makeSuite(BasicHashWithThreadFlagTestCase)) + suite.addTest(unittest.makeSuite(BasicBTreeWithEnvTestCase)) + suite.addTest(unittest.makeSuite(BasicHashWithEnvTestCase)) + suite.addTest(unittest.makeSuite(BTreeTransactionTestCase)) + suite.addTest(unittest.makeSuite(HashTransactionTestCase)) + suite.addTest(unittest.makeSuite(BTreeRecnoTestCase)) + suite.addTest(unittest.makeSuite(BTreeRecnoWithThreadFlagTestCase)) + suite.addTest(unittest.makeSuite(BTreeDUPTestCase)) + suite.addTest(unittest.makeSuite(HashDUPTestCase)) + suite.addTest(unittest.makeSuite(BTreeDUPWithThreadTestCase)) + suite.addTest(unittest.makeSuite(HashDUPWithThreadTestCase)) + suite.addTest(unittest.makeSuite(BTreeMultiDBTestCase)) + suite.addTest(unittest.makeSuite(HashMultiDBTestCase)) + + return suite + + +if __name__ == '__main__': + unittest.main(defaultTest='test_suite') diff --git a/sys/lib/python/bsddb/test/test_compare.py b/sys/lib/python/bsddb/test/test_compare.py new file mode 100644 index 000000000..59a45ec50 --- /dev/null +++ b/sys/lib/python/bsddb/test/test_compare.py @@ -0,0 +1,249 @@ +""" +TestCases for python DB Btree key comparison function. +""" + +import sys, os, re +import test_all +from cStringIO import StringIO + +import unittest +try: + # For Pythons w/distutils pybsddb + from bsddb3 import db, dbshelve +except ImportError: + # For Python 2.3 + from bsddb import db, dbshelve + +lexical_cmp = cmp + +def lowercase_cmp(left, right): + return cmp (left.lower(), right.lower()) + +def make_reverse_comparator (cmp): + def reverse (left, right, delegate=cmp): + return - delegate (left, right) + return reverse + +_expected_lexical_test_data = ['', 'CCCP', 'a', 'aaa', 'b', 'c', 'cccce', 'ccccf'] +_expected_lowercase_test_data = ['', 'a', 'aaa', 'b', 'c', 'CC', 'cccce', 'ccccf', 'CCCP'] + +class ComparatorTests (unittest.TestCase): + def comparator_test_helper (self, comparator, expected_data): + data = expected_data[:] + data.sort (comparator) + self.failUnless (data == expected_data, + "comparator `%s' is not right: %s vs. %s" + % (comparator, expected_data, data)) + def test_lexical_comparator (self): + self.comparator_test_helper (lexical_cmp, _expected_lexical_test_data) + def test_reverse_lexical_comparator (self): + rev = _expected_lexical_test_data[:] + rev.reverse () + self.comparator_test_helper (make_reverse_comparator (lexical_cmp), + rev) + def test_lowercase_comparator (self): + self.comparator_test_helper (lowercase_cmp, + _expected_lowercase_test_data) + +class AbstractBtreeKeyCompareTestCase (unittest.TestCase): + env = None + db = None + + def setUp (self): + self.filename = self.__class__.__name__ + '.db' + homeDir = os.path.join (os.path.dirname (sys.argv[0]), 'db_home') + self.homeDir = homeDir + try: + os.mkdir (homeDir) + except os.error: + pass + + env = db.DBEnv () + env.open (homeDir, + db.DB_CREATE | db.DB_INIT_MPOOL + | db.DB_INIT_LOCK | db.DB_THREAD) + self.env = env + + def tearDown (self): + self.closeDB () + if self.env is not None: + self.env.close () + self.env = None + import glob + map (os.remove, glob.glob (os.path.join (self.homeDir, '*'))) + + def addDataToDB (self, data): + i = 0 + for item in data: + self.db.put (item, str (i)) + i = i + 1 + + def createDB (self, key_comparator): + self.db = db.DB (self.env) + self.setupDB (key_comparator) + self.db.open (self.filename, "test", db.DB_BTREE, db.DB_CREATE) + + def setupDB (self, key_comparator): + self.db.set_bt_compare (key_comparator) + + def closeDB (self): + if self.db is not None: + self.db.close () + self.db = None + + def startTest (self): + pass + + def finishTest (self, expected = None): + if expected is not None: + self.check_results (expected) + self.closeDB () + + def check_results (self, expected): + curs = self.db.cursor () + try: + index = 0 + rec = curs.first () + while rec: + key, ignore = rec + self.failUnless (index < len (expected), + "to many values returned from cursor") + self.failUnless (expected[index] == key, + "expected value `%s' at %d but got `%s'" + % (expected[index], index, key)) + index = index + 1 + rec = curs.next () + self.failUnless (index == len (expected), + "not enough values returned from cursor") + finally: + curs.close () + +class BtreeKeyCompareTestCase (AbstractBtreeKeyCompareTestCase): + def runCompareTest (self, comparator, data): + self.startTest () + self.createDB (comparator) + self.addDataToDB (data) + self.finishTest (data) + + def test_lexical_ordering (self): + self.runCompareTest (lexical_cmp, _expected_lexical_test_data) + + def test_reverse_lexical_ordering (self): + expected_rev_data = _expected_lexical_test_data[:] + expected_rev_data.reverse () + self.runCompareTest (make_reverse_comparator (lexical_cmp), + expected_rev_data) + + def test_compare_function_useless (self): + self.startTest () + def socialist_comparator (l, r): + return 0 + self.createDB (socialist_comparator) + self.addDataToDB (['b', 'a', 'd']) + # all things being equal the first key will be the only key + # in the database... (with the last key's value fwiw) + self.finishTest (['b']) + + +class BtreeExceptionsTestCase (AbstractBtreeKeyCompareTestCase): + def test_raises_non_callable (self): + self.startTest () + self.assertRaises (TypeError, self.createDB, 'abc') + self.assertRaises (TypeError, self.createDB, None) + self.finishTest () + + def test_set_bt_compare_with_function (self): + self.startTest () + self.createDB (lexical_cmp) + self.finishTest () + + def check_results (self, results): + pass + + def test_compare_function_incorrect (self): + self.startTest () + def bad_comparator (l, r): + return 1 + # verify that set_bt_compare checks that comparator('', '') == 0 + self.assertRaises (TypeError, self.createDB, bad_comparator) + self.finishTest () + + def verifyStderr(self, method, successRe): + """ + Call method() while capturing sys.stderr output internally and + call self.fail() if successRe.search() does not match the stderr + output. This is used to test for uncatchable exceptions. + """ + stdErr = sys.stderr + sys.stderr = StringIO() + try: + method() + finally: + temp = sys.stderr + sys.stderr = stdErr + errorOut = temp.getvalue() + if not successRe.search(errorOut): + self.fail("unexpected stderr output:\n"+errorOut) + + def _test_compare_function_exception (self): + self.startTest () + def bad_comparator (l, r): + if l == r: + # pass the set_bt_compare test + return 0 + raise RuntimeError, "i'm a naughty comparison function" + self.createDB (bad_comparator) + #print "\n*** test should print 2 uncatchable tracebacks ***" + self.addDataToDB (['a', 'b', 'c']) # this should raise, but... + self.finishTest () + + def test_compare_function_exception(self): + self.verifyStderr( + self._test_compare_function_exception, + re.compile('(^RuntimeError:.* naughty.*){2}', re.M|re.S) + ) + + def _test_compare_function_bad_return (self): + self.startTest () + def bad_comparator (l, r): + if l == r: + # pass the set_bt_compare test + return 0 + return l + self.createDB (bad_comparator) + #print "\n*** test should print 2 errors about returning an int ***" + self.addDataToDB (['a', 'b', 'c']) # this should raise, but... + self.finishTest () + + def test_compare_function_bad_return(self): + self.verifyStderr( + self._test_compare_function_bad_return, + re.compile('(^TypeError:.* return an int.*){2}', re.M|re.S) + ) + + + def test_cannot_assign_twice (self): + + def my_compare (a, b): + return 0 + + self.startTest () + self.createDB (my_compare) + try: + self.db.set_bt_compare (my_compare) + assert False, "this set should fail" + + except RuntimeError, msg: + pass + +def test_suite (): + res = unittest.TestSuite () + + res.addTest (unittest.makeSuite (ComparatorTests)) + if db.version () >= (3, 3, 11): + res.addTest (unittest.makeSuite (BtreeExceptionsTestCase)) + res.addTest (unittest.makeSuite (BtreeKeyCompareTestCase)) + return res + +if __name__ == '__main__': + unittest.main (defaultTest = 'suite') diff --git a/sys/lib/python/bsddb/test/test_compat.py b/sys/lib/python/bsddb/test/test_compat.py new file mode 100644 index 000000000..b108db4c6 --- /dev/null +++ b/sys/lib/python/bsddb/test/test_compat.py @@ -0,0 +1,191 @@ +""" +Test cases adapted from the test_bsddb.py module in Python's +regression test suite. +""" + +import sys, os, string +import unittest +import tempfile + +from test_all import verbose + +try: + # For Pythons w/distutils pybsddb + from bsddb3 import db, hashopen, btopen, rnopen +except ImportError: + # For Python 2.3 + from bsddb import db, hashopen, btopen, rnopen + + +class CompatibilityTestCase(unittest.TestCase): + def setUp(self): + self.filename = tempfile.mktemp() + + def tearDown(self): + try: + os.remove(self.filename) + except os.error: + pass + + + def test01_btopen(self): + self.do_bthash_test(btopen, 'btopen') + + def test02_hashopen(self): + self.do_bthash_test(hashopen, 'hashopen') + + def test03_rnopen(self): + data = string.split("The quick brown fox jumped over the lazy dog.") + if verbose: + print "\nTesting: rnopen" + + f = rnopen(self.filename, 'c') + for x in range(len(data)): + f[x+1] = data[x] + + getTest = (f[1], f[2], f[3]) + if verbose: + print '%s %s %s' % getTest + + assert getTest[1] == 'quick', 'data mismatch!' + + rv = f.set_location(3) + if rv != (3, 'brown'): + self.fail('recno database set_location failed: '+repr(rv)) + + f[25] = 'twenty-five' + f.close() + del f + + f = rnopen(self.filename, 'w') + f[20] = 'twenty' + + def noRec(f): + rec = f[15] + self.assertRaises(KeyError, noRec, f) + + def badKey(f): + rec = f['a string'] + self.assertRaises(TypeError, badKey, f) + + del f[3] + + rec = f.first() + while rec: + if verbose: + print rec + try: + rec = f.next() + except KeyError: + break + + f.close() + + + def test04_n_flag(self): + f = hashopen(self.filename, 'n') + f.close() + + + def do_bthash_test(self, factory, what): + if verbose: + print '\nTesting: ', what + + f = factory(self.filename, 'c') + if verbose: + print 'creation...' + + # truth test + if f: + if verbose: print "truth test: true" + else: + if verbose: print "truth test: false" + + f['0'] = '' + f['a'] = 'Guido' + f['b'] = 'van' + f['c'] = 'Rossum' + f['d'] = 'invented' + # 'e' intentionally left out + f['f'] = 'Python' + if verbose: + print '%s %s %s' % (f['a'], f['b'], f['c']) + + if verbose: + print 'key ordering...' + start = f.set_location(f.first()[0]) + if start != ('0', ''): + self.fail("incorrect first() result: "+repr(start)) + while 1: + try: + rec = f.next() + except KeyError: + assert rec == f.last(), 'Error, last <> last!' + f.previous() + break + if verbose: + print rec + + assert f.has_key('f'), 'Error, missing key!' + + # test that set_location() returns the next nearest key, value + # on btree databases and raises KeyError on others. + if factory == btopen: + e = f.set_location('e') + if e != ('f', 'Python'): + self.fail('wrong key,value returned: '+repr(e)) + else: + try: + e = f.set_location('e') + except KeyError: + pass + else: + self.fail("set_location on non-existant key did not raise KeyError") + + f.sync() + f.close() + # truth test + try: + if f: + if verbose: print "truth test: true" + else: + if verbose: print "truth test: false" + except db.DBError: + pass + else: + self.fail("Exception expected") + + del f + + if verbose: + print 'modification...' + f = factory(self.filename, 'w') + f['d'] = 'discovered' + + if verbose: + print 'access...' + for key in f.keys(): + word = f[key] + if verbose: + print word + + def noRec(f): + rec = f['no such key'] + self.assertRaises(KeyError, noRec, f) + + def badKey(f): + rec = f[15] + self.assertRaises(TypeError, badKey, f) + + f.close() + + +#---------------------------------------------------------------------- + + +def test_suite(): + return unittest.makeSuite(CompatibilityTestCase) + + +if __name__ == '__main__': + unittest.main(defaultTest='test_suite') diff --git a/sys/lib/python/bsddb/test/test_cursor_pget_bug.py b/sys/lib/python/bsddb/test/test_cursor_pget_bug.py new file mode 100644 index 000000000..de47e6d04 --- /dev/null +++ b/sys/lib/python/bsddb/test/test_cursor_pget_bug.py @@ -0,0 +1,65 @@ +import unittest +import sys, os, glob + +try: + # For Pythons w/distutils pybsddb + from bsddb3 import db +except ImportError: + # For Python 2.3 + from bsddb import db + + +#---------------------------------------------------------------------- + +class pget_bugTestCase(unittest.TestCase): + """Verify that cursor.pget works properly""" + db_name = 'test-cursor_pget.db' + + def setUp(self): + self.homeDir = os.path.join(os.path.dirname(sys.argv[0]), 'db_home') + try: + os.mkdir(self.homeDir) + except os.error: + pass + self.env = db.DBEnv() + self.env.open(self.homeDir, db.DB_CREATE | db.DB_INIT_MPOOL) + self.primary_db = db.DB(self.env) + self.primary_db.open(self.db_name, 'primary', db.DB_BTREE, db.DB_CREATE) + self.secondary_db = db.DB(self.env) + self.secondary_db.set_flags(db.DB_DUP) + self.secondary_db.open(self.db_name, 'secondary', db.DB_BTREE, db.DB_CREATE) + self.primary_db.associate(self.secondary_db, lambda key, data: data) + self.primary_db.put('salad', 'eggs') + self.primary_db.put('spam', 'ham') + self.primary_db.put('omelet', 'eggs') + + + def tearDown(self): + self.secondary_db.close() + self.primary_db.close() + self.env.close() + del self.secondary_db + del self.primary_db + del self.env + for file in glob.glob(os.path.join(self.homeDir, '*')): + os.remove(file) + os.removedirs(self.homeDir) + + def test_pget(self): + cursor = self.secondary_db.cursor() + + self.assertEquals(('eggs', 'salad', 'eggs'), cursor.pget(key='eggs', flags=db.DB_SET)) + self.assertEquals(('eggs', 'omelet', 'eggs'), cursor.pget(db.DB_NEXT_DUP)) + self.assertEquals(None, cursor.pget(db.DB_NEXT_DUP)) + + self.assertEquals(('ham', 'spam', 'ham'), cursor.pget('ham', 'spam', flags=db.DB_SET)) + self.assertEquals(None, cursor.pget(db.DB_NEXT_DUP)) + + cursor.close() + + +def test_suite(): + return unittest.makeSuite(pget_bugTestCase) + +if __name__ == '__main__': + unittest.main(defaultTest='test_suite') diff --git a/sys/lib/python/bsddb/test/test_dbobj.py b/sys/lib/python/bsddb/test/test_dbobj.py new file mode 100644 index 000000000..1ef382e0f --- /dev/null +++ b/sys/lib/python/bsddb/test/test_dbobj.py @@ -0,0 +1,82 @@ + +import sys, os, string +import unittest +import glob + +try: + # For Pythons w/distutils pybsddb + from bsddb3 import db, dbobj +except ImportError: + # For Python 2.3 + from bsddb import db, dbobj + + +#---------------------------------------------------------------------- + +class dbobjTestCase(unittest.TestCase): + """Verify that dbobj.DB and dbobj.DBEnv work properly""" + db_home = 'db_home' + db_name = 'test-dbobj.db' + + def setUp(self): + homeDir = os.path.join(os.path.dirname(sys.argv[0]), 'db_home') + self.homeDir = homeDir + try: os.mkdir(homeDir) + except os.error: pass + + def tearDown(self): + if hasattr(self, 'db'): + del self.db + if hasattr(self, 'env'): + del self.env + files = glob.glob(os.path.join(self.homeDir, '*')) + for file in files: + os.remove(file) + + def test01_both(self): + class TestDBEnv(dbobj.DBEnv): pass + class TestDB(dbobj.DB): + def put(self, key, *args, **kwargs): + key = string.upper(key) + # call our parent classes put method with an upper case key + return apply(dbobj.DB.put, (self, key) + args, kwargs) + self.env = TestDBEnv() + self.env.open(self.homeDir, db.DB_CREATE | db.DB_INIT_MPOOL) + self.db = TestDB(self.env) + self.db.open(self.db_name, db.DB_HASH, db.DB_CREATE) + self.db.put('spam', 'eggs') + assert self.db.get('spam') == None, \ + "overridden dbobj.DB.put() method failed [1]" + assert self.db.get('SPAM') == 'eggs', \ + "overridden dbobj.DB.put() method failed [2]" + self.db.close() + self.env.close() + + def test02_dbobj_dict_interface(self): + self.env = dbobj.DBEnv() + self.env.open(self.homeDir, db.DB_CREATE | db.DB_INIT_MPOOL) + self.db = dbobj.DB(self.env) + self.db.open(self.db_name+'02', db.DB_HASH, db.DB_CREATE) + # __setitem__ + self.db['spam'] = 'eggs' + # __len__ + assert len(self.db) == 1 + # __getitem__ + assert self.db['spam'] == 'eggs' + # __del__ + del self.db['spam'] + assert self.db.get('spam') == None, "dbobj __del__ failed" + self.db.close() + self.env.close() + + def test03_dbobj_type_before_open(self): + # Ensure this doesn't cause a segfault. + self.assertRaises(db.DBInvalidArgError, db.DB().type) + +#---------------------------------------------------------------------- + +def test_suite(): + return unittest.makeSuite(dbobjTestCase) + +if __name__ == '__main__': + unittest.main(defaultTest='test_suite') diff --git a/sys/lib/python/bsddb/test/test_dbshelve.py b/sys/lib/python/bsddb/test/test_dbshelve.py new file mode 100644 index 000000000..722ee5b40 --- /dev/null +++ b/sys/lib/python/bsddb/test/test_dbshelve.py @@ -0,0 +1,306 @@ +""" +TestCases for checking dbShelve objects. +""" + +import sys, os, string +import tempfile, random +from pprint import pprint +from types import * +import unittest + +try: + # For Pythons w/distutils pybsddb + from bsddb3 import db, dbshelve +except ImportError: + # For Python 2.3 + from bsddb import db, dbshelve + +from test_all import verbose + + +#---------------------------------------------------------------------- + +# We want the objects to be comparable so we can test dbshelve.values +# later on. +class DataClass: + def __init__(self): + self.value = random.random() + + def __cmp__(self, other): + return cmp(self.value, other) + +class DBShelveTestCase(unittest.TestCase): + def setUp(self): + self.filename = tempfile.mktemp() + self.do_open() + + def tearDown(self): + self.do_close() + try: + os.remove(self.filename) + except os.error: + pass + + def populateDB(self, d): + for x in string.letters: + d['S' + x] = 10 * x # add a string + d['I' + x] = ord(x) # add an integer + d['L' + x] = [x] * 10 # add a list + + inst = DataClass() # add an instance + inst.S = 10 * x + inst.I = ord(x) + inst.L = [x] * 10 + d['O' + x] = inst + + + # overridable in derived classes to affect how the shelf is created/opened + def do_open(self): + self.d = dbshelve.open(self.filename) + + # and closed... + def do_close(self): + self.d.close() + + + + def test01_basics(self): + if verbose: + print '\n', '-=' * 30 + print "Running %s.test01_basics..." % self.__class__.__name__ + + self.populateDB(self.d) + self.d.sync() + self.do_close() + self.do_open() + d = self.d + + l = len(d) + k = d.keys() + s = d.stat() + f = d.fd() + + if verbose: + print "length:", l + print "keys:", k + print "stats:", s + + assert 0 == d.has_key('bad key') + assert 1 == d.has_key('IA') + assert 1 == d.has_key('OA') + + d.delete('IA') + del d['OA'] + assert 0 == d.has_key('IA') + assert 0 == d.has_key('OA') + assert len(d) == l-2 + + values = [] + for key in d.keys(): + value = d[key] + values.append(value) + if verbose: + print "%s: %s" % (key, value) + self.checkrec(key, value) + + dbvalues = d.values() + assert len(dbvalues) == len(d.keys()) + values.sort() + dbvalues.sort() + assert values == dbvalues + + items = d.items() + assert len(items) == len(values) + + for key, value in items: + self.checkrec(key, value) + + assert d.get('bad key') == None + assert d.get('bad key', None) == None + assert d.get('bad key', 'a string') == 'a string' + assert d.get('bad key', [1, 2, 3]) == [1, 2, 3] + + d.set_get_returns_none(0) + self.assertRaises(db.DBNotFoundError, d.get, 'bad key') + d.set_get_returns_none(1) + + d.put('new key', 'new data') + assert d.get('new key') == 'new data' + assert d['new key'] == 'new data' + + + + def test02_cursors(self): + if verbose: + print '\n', '-=' * 30 + print "Running %s.test02_cursors..." % self.__class__.__name__ + + self.populateDB(self.d) + d = self.d + + count = 0 + c = d.cursor() + rec = c.first() + while rec is not None: + count = count + 1 + if verbose: + print rec + key, value = rec + self.checkrec(key, value) + rec = c.next() + del c + + assert count == len(d) + + count = 0 + c = d.cursor() + rec = c.last() + while rec is not None: + count = count + 1 + if verbose: + print rec + key, value = rec + self.checkrec(key, value) + rec = c.prev() + + assert count == len(d) + + c.set('SS') + key, value = c.current() + self.checkrec(key, value) + del c + + + + def checkrec(self, key, value): + x = key[1] + if key[0] == 'S': + assert type(value) == StringType + assert value == 10 * x + + elif key[0] == 'I': + assert type(value) == IntType + assert value == ord(x) + + elif key[0] == 'L': + assert type(value) == ListType + assert value == [x] * 10 + + elif key[0] == 'O': + assert type(value) == InstanceType + assert value.S == 10 * x + assert value.I == ord(x) + assert value.L == [x] * 10 + + else: + raise AssertionError, 'Unknown key type, fix the test' + +#---------------------------------------------------------------------- + +class BasicShelveTestCase(DBShelveTestCase): + def do_open(self): + self.d = dbshelve.DBShelf() + self.d.open(self.filename, self.dbtype, self.dbflags) + + def do_close(self): + self.d.close() + + +class BTreeShelveTestCase(BasicShelveTestCase): + dbtype = db.DB_BTREE + dbflags = db.DB_CREATE + + +class HashShelveTestCase(BasicShelveTestCase): + dbtype = db.DB_HASH + dbflags = db.DB_CREATE + + +class ThreadBTreeShelveTestCase(BasicShelveTestCase): + dbtype = db.DB_BTREE + dbflags = db.DB_CREATE | db.DB_THREAD + + +class ThreadHashShelveTestCase(BasicShelveTestCase): + dbtype = db.DB_HASH + dbflags = db.DB_CREATE | db.DB_THREAD + + +#---------------------------------------------------------------------- + +class BasicEnvShelveTestCase(DBShelveTestCase): + def do_open(self): + self.homeDir = homeDir = os.path.join( + os.path.dirname(sys.argv[0]), 'db_home') + try: os.mkdir(homeDir) + except os.error: pass + self.env = db.DBEnv() + self.env.open(homeDir, self.envflags | db.DB_INIT_MPOOL | db.DB_CREATE) + + self.filename = os.path.split(self.filename)[1] + self.d = dbshelve.DBShelf(self.env) + self.d.open(self.filename, self.dbtype, self.dbflags) + + + def do_close(self): + self.d.close() + self.env.close() + + + def tearDown(self): + self.do_close() + import glob + files = glob.glob(os.path.join(self.homeDir, '*')) + for file in files: + os.remove(file) + + + +class EnvBTreeShelveTestCase(BasicEnvShelveTestCase): + envflags = 0 + dbtype = db.DB_BTREE + dbflags = db.DB_CREATE + + +class EnvHashShelveTestCase(BasicEnvShelveTestCase): + envflags = 0 + dbtype = db.DB_HASH + dbflags = db.DB_CREATE + + +class EnvThreadBTreeShelveTestCase(BasicEnvShelveTestCase): + envflags = db.DB_THREAD + dbtype = db.DB_BTREE + dbflags = db.DB_CREATE | db.DB_THREAD + + +class EnvThreadHashShelveTestCase(BasicEnvShelveTestCase): + envflags = db.DB_THREAD + dbtype = db.DB_HASH + dbflags = db.DB_CREATE | db.DB_THREAD + + +#---------------------------------------------------------------------- +# TODO: Add test cases for a DBShelf in a RECNO DB. + + +#---------------------------------------------------------------------- + +def test_suite(): + suite = unittest.TestSuite() + + suite.addTest(unittest.makeSuite(DBShelveTestCase)) + suite.addTest(unittest.makeSuite(BTreeShelveTestCase)) + suite.addTest(unittest.makeSuite(HashShelveTestCase)) + suite.addTest(unittest.makeSuite(ThreadBTreeShelveTestCase)) + suite.addTest(unittest.makeSuite(ThreadHashShelveTestCase)) + suite.addTest(unittest.makeSuite(EnvBTreeShelveTestCase)) + suite.addTest(unittest.makeSuite(EnvHashShelveTestCase)) + suite.addTest(unittest.makeSuite(EnvThreadBTreeShelveTestCase)) + suite.addTest(unittest.makeSuite(EnvThreadHashShelveTestCase)) + + return suite + + +if __name__ == '__main__': + unittest.main(defaultTest='test_suite') diff --git a/sys/lib/python/bsddb/test/test_dbtables.py b/sys/lib/python/bsddb/test/test_dbtables.py new file mode 100644 index 000000000..f6724829a --- /dev/null +++ b/sys/lib/python/bsddb/test/test_dbtables.py @@ -0,0 +1,383 @@ +#!/usr/bin/env python +# +#----------------------------------------------------------------------- +# A test suite for the table interface built on bsddb.db +#----------------------------------------------------------------------- +# +# Copyright (C) 2000, 2001 by Autonomous Zone Industries +# Copyright (C) 2002 Gregory P. Smith +# +# March 20, 2000 +# +# License: This is free software. You may use this software for any +# purpose including modification/redistribution, so long as +# this header remains intact and that you do not claim any +# rights of ownership or authorship of this software. This +# software has been tested, but no warranty is expressed or +# implied. +# +# -- Gregory P. Smith <greg@electricrain.com> +# +# $Id: test_dbtables.py 46737 2006-06-08 05:38:11Z gregory.p.smith $ + +import sys, os, re +try: + import cPickle + pickle = cPickle +except ImportError: + import pickle + +import unittest +from test_all import verbose + +try: + # For Pythons w/distutils pybsddb + from bsddb3 import db, dbtables +except ImportError: + # For Python 2.3 + from bsddb import db, dbtables + + + +#---------------------------------------------------------------------- + +class TableDBTestCase(unittest.TestCase): + db_home = 'db_home' + db_name = 'test-table.db' + + def setUp(self): + homeDir = os.path.join(os.path.dirname(sys.argv[0]), 'db_home') + self.homeDir = homeDir + try: os.mkdir(homeDir) + except os.error: pass + self.tdb = dbtables.bsdTableDB( + filename='tabletest.db', dbhome=homeDir, create=1) + + def tearDown(self): + self.tdb.close() + import glob + files = glob.glob(os.path.join(self.homeDir, '*')) + for file in files: + os.remove(file) + + def test01(self): + tabname = "test01" + colname = 'cool numbers' + try: + self.tdb.Drop(tabname) + except dbtables.TableDBError: + pass + self.tdb.CreateTable(tabname, [colname]) + self.tdb.Insert(tabname, {colname: pickle.dumps(3.14159, 1)}) + + if verbose: + self.tdb._db_print() + + values = self.tdb.Select( + tabname, [colname], conditions={colname: None}) + + colval = pickle.loads(values[0][colname]) + assert(colval > 3.141 and colval < 3.142) + + + def test02(self): + tabname = "test02" + col0 = 'coolness factor' + col1 = 'but can it fly?' + col2 = 'Species' + testinfo = [ + {col0: pickle.dumps(8, 1), col1: 'no', col2: 'Penguin'}, + {col0: pickle.dumps(-1, 1), col1: 'no', col2: 'Turkey'}, + {col0: pickle.dumps(9, 1), col1: 'yes', col2: 'SR-71A Blackbird'} + ] + + try: + self.tdb.Drop(tabname) + except dbtables.TableDBError: + pass + self.tdb.CreateTable(tabname, [col0, col1, col2]) + for row in testinfo : + self.tdb.Insert(tabname, row) + + values = self.tdb.Select(tabname, [col2], + conditions={col0: lambda x: pickle.loads(x) >= 8}) + + assert len(values) == 2 + if values[0]['Species'] == 'Penguin' : + assert values[1]['Species'] == 'SR-71A Blackbird' + elif values[0]['Species'] == 'SR-71A Blackbird' : + assert values[1]['Species'] == 'Penguin' + else : + if verbose: + print "values= %r" % (values,) + raise "Wrong values returned!" + + def test03(self): + tabname = "test03" + try: + self.tdb.Drop(tabname) + except dbtables.TableDBError: + pass + if verbose: + print '...before CreateTable...' + self.tdb._db_print() + self.tdb.CreateTable(tabname, ['a', 'b', 'c', 'd', 'e']) + if verbose: + print '...after CreateTable...' + self.tdb._db_print() + self.tdb.Drop(tabname) + if verbose: + print '...after Drop...' + self.tdb._db_print() + self.tdb.CreateTable(tabname, ['a', 'b', 'c', 'd', 'e']) + + try: + self.tdb.Insert(tabname, + {'a': "", + 'e': pickle.dumps([{4:5, 6:7}, 'foo'], 1), + 'f': "Zero"}) + assert 0 + except dbtables.TableDBError: + pass + + try: + self.tdb.Select(tabname, [], conditions={'foo': '123'}) + assert 0 + except dbtables.TableDBError: + pass + + self.tdb.Insert(tabname, + {'a': '42', + 'b': "bad", + 'c': "meep", + 'e': 'Fuzzy wuzzy was a bear'}) + self.tdb.Insert(tabname, + {'a': '581750', + 'b': "good", + 'd': "bla", + 'c': "black", + 'e': 'fuzzy was here'}) + self.tdb.Insert(tabname, + {'a': '800000', + 'b': "good", + 'd': "bla", + 'c': "black", + 'e': 'Fuzzy wuzzy is a bear'}) + + if verbose: + self.tdb._db_print() + + # this should return two rows + values = self.tdb.Select(tabname, ['b', 'a', 'd'], + conditions={'e': re.compile('wuzzy').search, + 'a': re.compile('^[0-9]+$').match}) + assert len(values) == 2 + + # now lets delete one of them and try again + self.tdb.Delete(tabname, conditions={'b': dbtables.ExactCond('good')}) + values = self.tdb.Select( + tabname, ['a', 'd', 'b'], + conditions={'e': dbtables.PrefixCond('Fuzzy')}) + assert len(values) == 1 + assert values[0]['d'] == None + + values = self.tdb.Select(tabname, ['b'], + conditions={'c': lambda c: c == 'meep'}) + assert len(values) == 1 + assert values[0]['b'] == "bad" + + + def test04_MultiCondSelect(self): + tabname = "test04_MultiCondSelect" + try: + self.tdb.Drop(tabname) + except dbtables.TableDBError: + pass + self.tdb.CreateTable(tabname, ['a', 'b', 'c', 'd', 'e']) + + try: + self.tdb.Insert(tabname, + {'a': "", + 'e': pickle.dumps([{4:5, 6:7}, 'foo'], 1), + 'f': "Zero"}) + assert 0 + except dbtables.TableDBError: + pass + + self.tdb.Insert(tabname, {'a': "A", 'b': "B", 'c': "C", 'd': "D", + 'e': "E"}) + self.tdb.Insert(tabname, {'a': "-A", 'b': "-B", 'c': "-C", 'd': "-D", + 'e': "-E"}) + self.tdb.Insert(tabname, {'a': "A-", 'b': "B-", 'c': "C-", 'd': "D-", + 'e': "E-"}) + + if verbose: + self.tdb._db_print() + + # This select should return 0 rows. it is designed to test + # the bug identified and fixed in sourceforge bug # 590449 + # (Big Thanks to "Rob Tillotson (n9mtb)" for tracking this down + # and supplying a fix!! This one caused many headaches to say + # the least...) + values = self.tdb.Select(tabname, ['b', 'a', 'd'], + conditions={'e': dbtables.ExactCond('E'), + 'a': dbtables.ExactCond('A'), + 'd': dbtables.PrefixCond('-') + } ) + assert len(values) == 0, values + + + def test_CreateOrExtend(self): + tabname = "test_CreateOrExtend" + + self.tdb.CreateOrExtendTable( + tabname, ['name', 'taste', 'filling', 'alcohol content', 'price']) + try: + self.tdb.Insert(tabname, + {'taste': 'crap', + 'filling': 'no', + 'is it Guinness?': 'no'}) + assert 0, "Insert should've failed due to bad column name" + except: + pass + self.tdb.CreateOrExtendTable(tabname, + ['name', 'taste', 'is it Guinness?']) + + # these should both succeed as the table should contain the union of both sets of columns. + self.tdb.Insert(tabname, {'taste': 'crap', 'filling': 'no', + 'is it Guinness?': 'no'}) + self.tdb.Insert(tabname, {'taste': 'great', 'filling': 'yes', + 'is it Guinness?': 'yes', + 'name': 'Guinness'}) + + + def test_CondObjs(self): + tabname = "test_CondObjs" + + self.tdb.CreateTable(tabname, ['a', 'b', 'c', 'd', 'e', 'p']) + + self.tdb.Insert(tabname, {'a': "the letter A", + 'b': "the letter B", + 'c': "is for cookie"}) + self.tdb.Insert(tabname, {'a': "is for aardvark", + 'e': "the letter E", + 'c': "is for cookie", + 'd': "is for dog"}) + self.tdb.Insert(tabname, {'a': "the letter A", + 'e': "the letter E", + 'c': "is for cookie", + 'p': "is for Python"}) + + values = self.tdb.Select( + tabname, ['p', 'e'], + conditions={'e': dbtables.PrefixCond('the l')}) + assert len(values) == 2, values + assert values[0]['e'] == values[1]['e'], values + assert values[0]['p'] != values[1]['p'], values + + values = self.tdb.Select( + tabname, ['d', 'a'], + conditions={'a': dbtables.LikeCond('%aardvark%')}) + assert len(values) == 1, values + assert values[0]['d'] == "is for dog", values + assert values[0]['a'] == "is for aardvark", values + + values = self.tdb.Select(tabname, None, + {'b': dbtables.Cond(), + 'e':dbtables.LikeCond('%letter%'), + 'a':dbtables.PrefixCond('is'), + 'd':dbtables.ExactCond('is for dog'), + 'c':dbtables.PrefixCond('is for'), + 'p':lambda s: not s}) + assert len(values) == 1, values + assert values[0]['d'] == "is for dog", values + assert values[0]['a'] == "is for aardvark", values + + def test_Delete(self): + tabname = "test_Delete" + self.tdb.CreateTable(tabname, ['x', 'y', 'z']) + + # prior to 2001-05-09 there was a bug where Delete() would + # fail if it encountered any rows that did not have values in + # every column. + # Hunted and Squashed by <Donwulff> (Jukka Santala - donwulff@nic.fi) + self.tdb.Insert(tabname, {'x': 'X1', 'y':'Y1'}) + self.tdb.Insert(tabname, {'x': 'X2', 'y':'Y2', 'z': 'Z2'}) + + self.tdb.Delete(tabname, conditions={'x': dbtables.PrefixCond('X')}) + values = self.tdb.Select(tabname, ['y'], + conditions={'x': dbtables.PrefixCond('X')}) + assert len(values) == 0 + + def test_Modify(self): + tabname = "test_Modify" + self.tdb.CreateTable(tabname, ['Name', 'Type', 'Access']) + + self.tdb.Insert(tabname, {'Name': 'Index to MP3 files.doc', + 'Type': 'Word', 'Access': '8'}) + self.tdb.Insert(tabname, {'Name': 'Nifty.MP3', 'Access': '1'}) + self.tdb.Insert(tabname, {'Type': 'Unknown', 'Access': '0'}) + + def set_type(type): + if type == None: + return 'MP3' + return type + + def increment_access(count): + return str(int(count)+1) + + def remove_value(value): + return None + + self.tdb.Modify(tabname, + conditions={'Access': dbtables.ExactCond('0')}, + mappings={'Access': remove_value}) + self.tdb.Modify(tabname, + conditions={'Name': dbtables.LikeCond('%MP3%')}, + mappings={'Type': set_type}) + self.tdb.Modify(tabname, + conditions={'Name': dbtables.LikeCond('%')}, + mappings={'Access': increment_access}) + + try: + self.tdb.Modify(tabname, + conditions={'Name': dbtables.LikeCond('%')}, + mappings={'Access': 'What is your quest?'}) + except TypeError: + # success, the string value in mappings isn't callable + pass + else: + raise RuntimeError, "why was TypeError not raised for bad callable?" + + # Delete key in select conditions + values = self.tdb.Select( + tabname, None, + conditions={'Type': dbtables.ExactCond('Unknown')}) + assert len(values) == 1, values + assert values[0]['Name'] == None, values + assert values[0]['Access'] == None, values + + # Modify value by select conditions + values = self.tdb.Select( + tabname, None, + conditions={'Name': dbtables.ExactCond('Nifty.MP3')}) + assert len(values) == 1, values + assert values[0]['Type'] == "MP3", values + assert values[0]['Access'] == "2", values + + # Make sure change applied only to select conditions + values = self.tdb.Select( + tabname, None, conditions={'Name': dbtables.LikeCond('%doc%')}) + assert len(values) == 1, values + assert values[0]['Type'] == "Word", values + assert values[0]['Access'] == "9", values + + +def test_suite(): + suite = unittest.TestSuite() + suite.addTest(unittest.makeSuite(TableDBTestCase)) + return suite + + +if __name__ == '__main__': + unittest.main(defaultTest='test_suite') diff --git a/sys/lib/python/bsddb/test/test_env_close.py b/sys/lib/python/bsddb/test/test_env_close.py new file mode 100644 index 000000000..c1129417d --- /dev/null +++ b/sys/lib/python/bsddb/test/test_env_close.py @@ -0,0 +1,107 @@ +"""TestCases for checking that it does not segfault when a DBEnv object +is closed before its DB objects. +""" + +import os +import sys +import tempfile +import glob +import unittest + +try: + # For Pythons w/distutils pybsddb + from bsddb3 import db +except ImportError: + # For Python 2.3 + from bsddb import db + +from test_all import verbose + +# We're going to get warnings in this module about trying to close the db when +# its env is already closed. Let's just ignore those. +try: + import warnings +except ImportError: + pass +else: + warnings.filterwarnings('ignore', + message='DB could not be closed in', + category=RuntimeWarning) + + +#---------------------------------------------------------------------- + +class DBEnvClosedEarlyCrash(unittest.TestCase): + def setUp(self): + self.homeDir = os.path.join(os.path.dirname(sys.argv[0]), 'db_home') + try: os.mkdir(self.homeDir) + except os.error: pass + tempfile.tempdir = self.homeDir + self.filename = os.path.split(tempfile.mktemp())[1] + tempfile.tempdir = None + + def tearDown(self): + files = glob.glob(os.path.join(self.homeDir, '*')) + for file in files: + os.remove(file) + + + def test01_close_dbenv_before_db(self): + dbenv = db.DBEnv() + dbenv.open(self.homeDir, + db.DB_INIT_CDB| db.DB_CREATE |db.DB_THREAD|db.DB_INIT_MPOOL, + 0666) + + d = db.DB(dbenv) + d.open(self.filename, db.DB_BTREE, db.DB_CREATE | db.DB_THREAD, 0666) + + try: + dbenv.close() + except db.DBError: + try: + d.close() + except db.DBError: + return + assert 0, \ + "DB close did not raise an exception about its "\ + "DBEnv being trashed" + + # XXX This may fail when using older versions of BerkeleyDB. + # E.g. 3.2.9 never raised the exception. + assert 0, "dbenv did not raise an exception about its DB being open" + + + def test02_close_dbenv_delete_db_success(self): + dbenv = db.DBEnv() + dbenv.open(self.homeDir, + db.DB_INIT_CDB| db.DB_CREATE |db.DB_THREAD|db.DB_INIT_MPOOL, + 0666) + + d = db.DB(dbenv) + d.open(self.filename, db.DB_BTREE, db.DB_CREATE | db.DB_THREAD, 0666) + + try: + dbenv.close() + except db.DBError: + pass # good, it should raise an exception + + del d + try: + import gc + except ImportError: + gc = None + if gc: + # force d.__del__ [DB_dealloc] to be called + gc.collect() + + +#---------------------------------------------------------------------- + +def test_suite(): + suite = unittest.TestSuite() + suite.addTest(unittest.makeSuite(DBEnvClosedEarlyCrash)) + return suite + + +if __name__ == '__main__': + unittest.main(defaultTest='test_suite') diff --git a/sys/lib/python/bsddb/test/test_get_none.py b/sys/lib/python/bsddb/test/test_get_none.py new file mode 100644 index 000000000..5f09cecc6 --- /dev/null +++ b/sys/lib/python/bsddb/test/test_get_none.py @@ -0,0 +1,101 @@ +""" +TestCases for checking set_get_returns_none. +""" + +import sys, os, string +import tempfile +from pprint import pprint +import unittest + +try: + # For Pythons w/distutils pybsddb + from bsddb3 import db +except ImportError: + # For Python 2.3 + from bsddb import db + +from test_all import verbose + + +#---------------------------------------------------------------------- + +class GetReturnsNoneTestCase(unittest.TestCase): + def setUp(self): + self.filename = tempfile.mktemp() + + def tearDown(self): + try: + os.remove(self.filename) + except os.error: + pass + + + def test01_get_returns_none(self): + d = db.DB() + d.open(self.filename, db.DB_BTREE, db.DB_CREATE) + d.set_get_returns_none(1) + + for x in string.letters: + d.put(x, x * 40) + + data = d.get('bad key') + assert data == None + + data = d.get('a') + assert data == 'a'*40 + + count = 0 + c = d.cursor() + rec = c.first() + while rec: + count = count + 1 + rec = c.next() + + assert rec == None + assert count == 52 + + c.close() + d.close() + + + def test02_get_raises_exception(self): + d = db.DB() + d.open(self.filename, db.DB_BTREE, db.DB_CREATE) + d.set_get_returns_none(0) + + for x in string.letters: + d.put(x, x * 40) + + self.assertRaises(db.DBNotFoundError, d.get, 'bad key') + self.assertRaises(KeyError, d.get, 'bad key') + + data = d.get('a') + assert data == 'a'*40 + + count = 0 + exceptionHappened = 0 + c = d.cursor() + rec = c.first() + while rec: + count = count + 1 + try: + rec = c.next() + except db.DBNotFoundError: # end of the records + exceptionHappened = 1 + break + + assert rec != None + assert exceptionHappened + assert count == 52 + + c.close() + d.close() + +#---------------------------------------------------------------------- + +def test_suite(): + return unittest.makeSuite(GetReturnsNoneTestCase) + + +if __name__ == '__main__': + unittest.main(defaultTest='test_suite') diff --git a/sys/lib/python/bsddb/test/test_join.py b/sys/lib/python/bsddb/test/test_join.py new file mode 100644 index 000000000..73edd114e --- /dev/null +++ b/sys/lib/python/bsddb/test/test_join.py @@ -0,0 +1,120 @@ +"""TestCases for using the DB.join and DBCursor.join_item methods. +""" + +import sys, os, string +import tempfile +import time +from pprint import pprint + +try: + from threading import Thread, currentThread + have_threads = 1 +except ImportError: + have_threads = 0 + +import unittest +from test_all import verbose + +try: + # For Pythons w/distutils pybsddb + from bsddb3 import db, dbshelve +except ImportError: + # For Python 2.3 + from bsddb import db, dbshelve + + +#---------------------------------------------------------------------- + +ProductIndex = [ + ('apple', "Convenience Store"), + ('blueberry', "Farmer's Market"), + ('shotgun', "S-Mart"), # Aisle 12 + ('pear', "Farmer's Market"), + ('chainsaw', "S-Mart"), # "Shop smart. Shop S-Mart!" + ('strawberry', "Farmer's Market"), +] + +ColorIndex = [ + ('blue', "blueberry"), + ('red', "apple"), + ('red', "chainsaw"), + ('red', "strawberry"), + ('yellow', "peach"), + ('yellow', "pear"), + ('black', "shotgun"), +] + +class JoinTestCase(unittest.TestCase): + keytype = '' + + def setUp(self): + self.filename = self.__class__.__name__ + '.db' + homeDir = os.path.join(os.path.dirname(sys.argv[0]), 'db_home') + self.homeDir = homeDir + try: os.mkdir(homeDir) + except os.error: pass + self.env = db.DBEnv() + self.env.open(homeDir, db.DB_CREATE | db.DB_INIT_MPOOL | db.DB_INIT_LOCK ) + + def tearDown(self): + self.env.close() + import glob + files = glob.glob(os.path.join(self.homeDir, '*')) + for file in files: + os.remove(file) + + def test01_join(self): + if verbose: + print '\n', '-=' * 30 + print "Running %s.test01_join..." % \ + self.__class__.__name__ + + # create and populate primary index + priDB = db.DB(self.env) + priDB.open(self.filename, "primary", db.DB_BTREE, db.DB_CREATE) + map(lambda t, priDB=priDB: apply(priDB.put, t), ProductIndex) + + # create and populate secondary index + secDB = db.DB(self.env) + secDB.set_flags(db.DB_DUP | db.DB_DUPSORT) + secDB.open(self.filename, "secondary", db.DB_BTREE, db.DB_CREATE) + map(lambda t, secDB=secDB: apply(secDB.put, t), ColorIndex) + + sCursor = None + jCursor = None + try: + # lets look up all of the red Products + sCursor = secDB.cursor() + # Don't do the .set() in an assert, or you can get a bogus failure + # when running python -O + tmp = sCursor.set('red') + assert tmp + + # FIXME: jCursor doesn't properly hold a reference to its + # cursors, if they are closed before jcursor is used it + # can cause a crash. + jCursor = priDB.join([sCursor]) + + if jCursor.get(0) != ('apple', "Convenience Store"): + self.fail("join cursor positioned wrong") + if jCursor.join_item() != 'chainsaw': + self.fail("DBCursor.join_item returned wrong item") + if jCursor.get(0)[0] != 'strawberry': + self.fail("join cursor returned wrong thing") + if jCursor.get(0): # there were only three red items to return + self.fail("join cursor returned too many items") + finally: + if jCursor: + jCursor.close() + if sCursor: + sCursor.close() + priDB.close() + secDB.close() + + +def test_suite(): + suite = unittest.TestSuite() + + suite.addTest(unittest.makeSuite(JoinTestCase)) + + return suite diff --git a/sys/lib/python/bsddb/test/test_lock.py b/sys/lib/python/bsddb/test/test_lock.py new file mode 100644 index 000000000..7d7779805 --- /dev/null +++ b/sys/lib/python/bsddb/test/test_lock.py @@ -0,0 +1,143 @@ +""" +TestCases for testing the locking sub-system. +""" + +import sys, os, string +import tempfile +import time +from pprint import pprint + +try: + from threading import Thread, currentThread + have_threads = 1 +except ImportError: + have_threads = 0 + + +import unittest +from test_all import verbose + +try: + # For Pythons w/distutils pybsddb + from bsddb3 import db +except ImportError: + # For Python 2.3 + from bsddb import db + + +#---------------------------------------------------------------------- + +class LockingTestCase(unittest.TestCase): + + def setUp(self): + homeDir = os.path.join(os.path.dirname(sys.argv[0]), 'db_home') + self.homeDir = homeDir + try: os.mkdir(homeDir) + except os.error: pass + self.env = db.DBEnv() + self.env.open(homeDir, db.DB_THREAD | db.DB_INIT_MPOOL | + db.DB_INIT_LOCK | db.DB_CREATE) + + + def tearDown(self): + self.env.close() + import glob + files = glob.glob(os.path.join(self.homeDir, '*')) + for file in files: + os.remove(file) + + + def test01_simple(self): + if verbose: + print '\n', '-=' * 30 + print "Running %s.test01_simple..." % self.__class__.__name__ + + anID = self.env.lock_id() + if verbose: + print "locker ID: %s" % anID + lock = self.env.lock_get(anID, "some locked thing", db.DB_LOCK_WRITE) + if verbose: + print "Aquired lock: %s" % lock + time.sleep(1) + self.env.lock_put(lock) + if verbose: + print "Released lock: %s" % lock + + + + + def test02_threaded(self): + if verbose: + print '\n', '-=' * 30 + print "Running %s.test02_threaded..." % self.__class__.__name__ + + threads = [] + threads.append(Thread(target = self.theThread, + args=(5, db.DB_LOCK_WRITE))) + threads.append(Thread(target = self.theThread, + args=(1, db.DB_LOCK_READ))) + threads.append(Thread(target = self.theThread, + args=(1, db.DB_LOCK_READ))) + threads.append(Thread(target = self.theThread, + args=(1, db.DB_LOCK_WRITE))) + threads.append(Thread(target = self.theThread, + args=(1, db.DB_LOCK_READ))) + threads.append(Thread(target = self.theThread, + args=(1, db.DB_LOCK_READ))) + threads.append(Thread(target = self.theThread, + args=(1, db.DB_LOCK_WRITE))) + threads.append(Thread(target = self.theThread, + args=(1, db.DB_LOCK_WRITE))) + threads.append(Thread(target = self.theThread, + args=(1, db.DB_LOCK_WRITE))) + + for t in threads: + t.start() + for t in threads: + t.join() + + def test03_set_timeout(self): + # test that the set_timeout call works + if hasattr(self.env, 'set_timeout'): + self.env.set_timeout(0, db.DB_SET_LOCK_TIMEOUT) + self.env.set_timeout(0, db.DB_SET_TXN_TIMEOUT) + self.env.set_timeout(123456, db.DB_SET_LOCK_TIMEOUT) + self.env.set_timeout(7890123, db.DB_SET_TXN_TIMEOUT) + + def theThread(self, sleepTime, lockType): + name = currentThread().getName() + if lockType == db.DB_LOCK_WRITE: + lt = "write" + else: + lt = "read" + + anID = self.env.lock_id() + if verbose: + print "%s: locker ID: %s" % (name, anID) + + lock = self.env.lock_get(anID, "some locked thing", lockType) + if verbose: + print "%s: Aquired %s lock: %s" % (name, lt, lock) + + time.sleep(sleepTime) + + self.env.lock_put(lock) + if verbose: + print "%s: Released %s lock: %s" % (name, lt, lock) + + +#---------------------------------------------------------------------- + +def test_suite(): + suite = unittest.TestSuite() + + if have_threads: + suite.addTest(unittest.makeSuite(LockingTestCase)) + else: + suite.addTest(unittest.makeSuite(LockingTestCase, 'test01')) + + return suite + + +if __name__ == '__main__': + unittest.main(defaultTest='test_suite') diff --git a/sys/lib/python/bsddb/test/test_misc.py b/sys/lib/python/bsddb/test/test_misc.py new file mode 100644 index 000000000..88f700b46 --- /dev/null +++ b/sys/lib/python/bsddb/test/test_misc.py @@ -0,0 +1,64 @@ +"""Miscellaneous bsddb module test cases +""" + +import os +import sys +import unittest + +try: + # For Pythons w/distutils pybsddb + from bsddb3 import db, dbshelve, hashopen +except ImportError: + # For Python 2.3 + from bsddb import db, dbshelve, hashopen + +#---------------------------------------------------------------------- + +class MiscTestCase(unittest.TestCase): + def setUp(self): + self.filename = self.__class__.__name__ + '.db' + homeDir = os.path.join(os.path.dirname(sys.argv[0]), 'db_home') + self.homeDir = homeDir + try: + os.mkdir(homeDir) + except OSError: + pass + + def tearDown(self): + try: + os.remove(self.filename) + except OSError: + pass + import glob + files = glob.glob(os.path.join(self.homeDir, '*')) + for file in files: + os.remove(file) + + def test01_badpointer(self): + dbs = dbshelve.open(self.filename) + dbs.close() + self.assertRaises(db.DBError, dbs.get, "foo") + + def test02_db_home(self): + env = db.DBEnv() + # check for crash fixed when db_home is used before open() + assert env.db_home is None + env.open(self.homeDir, db.DB_CREATE) + assert self.homeDir == env.db_home + + def test03_repr_closed_db(self): + db = hashopen(self.filename) + db.close() + rp = repr(db) + self.assertEquals(rp, "{}") + + +#---------------------------------------------------------------------- + + +def test_suite(): + return unittest.makeSuite(MiscTestCase) + + +if __name__ == '__main__': + unittest.main(defaultTest='test_suite') diff --git a/sys/lib/python/bsddb/test/test_pickle.py b/sys/lib/python/bsddb/test/test_pickle.py new file mode 100644 index 000000000..3916e5cb9 --- /dev/null +++ b/sys/lib/python/bsddb/test/test_pickle.py @@ -0,0 +1,75 @@ + +import sys, os, string +import pickle +try: + import cPickle +except ImportError: + cPickle = None +import unittest +import glob + +try: + # For Pythons w/distutils pybsddb + from bsddb3 import db +except ImportError, e: + # For Python 2.3 + from bsddb import db + + +#---------------------------------------------------------------------- + +class pickleTestCase(unittest.TestCase): + """Verify that DBError can be pickled and unpickled""" + db_home = 'db_home' + db_name = 'test-dbobj.db' + + def setUp(self): + homeDir = os.path.join(os.path.dirname(sys.argv[0]), 'db_home') + self.homeDir = homeDir + try: os.mkdir(homeDir) + except os.error: pass + + def tearDown(self): + if hasattr(self, 'db'): + del self.db + if hasattr(self, 'env'): + del self.env + files = glob.glob(os.path.join(self.homeDir, '*')) + for file in files: + os.remove(file) + + def _base_test_pickle_DBError(self, pickle): + self.env = db.DBEnv() + self.env.open(self.homeDir, db.DB_CREATE | db.DB_INIT_MPOOL) + self.db = db.DB(self.env) + self.db.open(self.db_name, db.DB_HASH, db.DB_CREATE) + self.db.put('spam', 'eggs') + assert self.db['spam'] == 'eggs' + try: + self.db.put('spam', 'ham', flags=db.DB_NOOVERWRITE) + except db.DBError, egg: + pickledEgg = pickle.dumps(egg) + #print repr(pickledEgg) + rottenEgg = pickle.loads(pickledEgg) + if rottenEgg.args != egg.args or type(rottenEgg) != type(egg): + raise Exception, (rottenEgg, '!=', egg) + else: + raise Exception, "where's my DBError exception?!?" + + self.db.close() + self.env.close() + + def test01_pickle_DBError(self): + self._base_test_pickle_DBError(pickle=pickle) + + if cPickle: + def test02_cPickle_DBError(self): + self._base_test_pickle_DBError(pickle=cPickle) + +#---------------------------------------------------------------------- + +def test_suite(): + return unittest.makeSuite(pickleTestCase) + +if __name__ == '__main__': + unittest.main(defaultTest='test_suite') diff --git a/sys/lib/python/bsddb/test/test_queue.py b/sys/lib/python/bsddb/test/test_queue.py new file mode 100644 index 000000000..95cf20d0d --- /dev/null +++ b/sys/lib/python/bsddb/test/test_queue.py @@ -0,0 +1,173 @@ +""" +TestCases for exercising a Queue DB. +""" + +import sys, os, string +import tempfile +from pprint import pprint +import unittest + +try: + # For Pythons w/distutils pybsddb + from bsddb3 import db +except ImportError: + # For Python 2.3 + from bsddb import db + +from test_all import verbose + + +#---------------------------------------------------------------------- + +class SimpleQueueTestCase(unittest.TestCase): + def setUp(self): + self.filename = tempfile.mktemp() + + def tearDown(self): + try: + os.remove(self.filename) + except os.error: + pass + + + def test01_basic(self): + # Basic Queue tests using the deprecated DBCursor.consume method. + + if verbose: + print '\n', '-=' * 30 + print "Running %s.test01_basic..." % self.__class__.__name__ + + d = db.DB() + d.set_re_len(40) # Queues must be fixed length + d.open(self.filename, db.DB_QUEUE, db.DB_CREATE) + + if verbose: + print "before appends" + '-' * 30 + pprint(d.stat()) + + for x in string.letters: + d.append(x * 40) + + assert len(d) == 52 + + d.put(100, "some more data") + d.put(101, "and some more ") + d.put(75, "out of order") + d.put(1, "replacement data") + + assert len(d) == 55 + + if verbose: + print "before close" + '-' * 30 + pprint(d.stat()) + + d.close() + del d + d = db.DB() + d.open(self.filename) + + if verbose: + print "after open" + '-' * 30 + pprint(d.stat()) + + d.append("one more") + c = d.cursor() + + if verbose: + print "after append" + '-' * 30 + pprint(d.stat()) + + rec = c.consume() + while rec: + if verbose: + print rec + rec = c.consume() + c.close() + + if verbose: + print "after consume loop" + '-' * 30 + pprint(d.stat()) + + assert len(d) == 0, \ + "if you see this message then you need to rebuild " \ + "BerkeleyDB 3.1.17 with the patch in patches/qam_stat.diff" + + d.close() + + + + def test02_basicPost32(self): + # Basic Queue tests using the new DB.consume method in DB 3.2+ + # (No cursor needed) + + if verbose: + print '\n', '-=' * 30 + print "Running %s.test02_basicPost32..." % self.__class__.__name__ + + if db.version() < (3, 2, 0): + if verbose: + print "Test not run, DB not new enough..." + return + + d = db.DB() + d.set_re_len(40) # Queues must be fixed length + d.open(self.filename, db.DB_QUEUE, db.DB_CREATE) + + if verbose: + print "before appends" + '-' * 30 + pprint(d.stat()) + + for x in string.letters: + d.append(x * 40) + + assert len(d) == 52 + + d.put(100, "some more data") + d.put(101, "and some more ") + d.put(75, "out of order") + d.put(1, "replacement data") + + assert len(d) == 55 + + if verbose: + print "before close" + '-' * 30 + pprint(d.stat()) + + d.close() + del d + d = db.DB() + d.open(self.filename) + #d.set_get_returns_none(true) + + if verbose: + print "after open" + '-' * 30 + pprint(d.stat()) + + d.append("one more") + + if verbose: + print "after append" + '-' * 30 + pprint(d.stat()) + + rec = d.consume() + while rec: + if verbose: + print rec + rec = d.consume() + + if verbose: + print "after consume loop" + '-' * 30 + pprint(d.stat()) + + d.close() + + + +#---------------------------------------------------------------------- + +def test_suite(): + return unittest.makeSuite(SimpleQueueTestCase) + + +if __name__ == '__main__': + unittest.main(defaultTest='test_suite') diff --git a/sys/lib/python/bsddb/test/test_recno.py b/sys/lib/python/bsddb/test/test_recno.py new file mode 100644 index 000000000..f1ea56afd --- /dev/null +++ b/sys/lib/python/bsddb/test/test_recno.py @@ -0,0 +1,295 @@ +"""TestCases for exercising a Recno DB. +""" + +import os +import sys +import errno +import tempfile +from pprint import pprint +import unittest + +from test_all import verbose + +try: + # For Pythons w/distutils pybsddb + from bsddb3 import db +except ImportError: + # For Python 2.3 + from bsddb import db + +letters = 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ' + + +#---------------------------------------------------------------------- + +class SimpleRecnoTestCase(unittest.TestCase): + def setUp(self): + self.filename = tempfile.mktemp() + + def tearDown(self): + try: + os.remove(self.filename) + except OSError, e: + if e.errno <> errno.EEXIST: raise + + def test01_basic(self): + d = db.DB() + + get_returns_none = d.set_get_returns_none(2) + d.set_get_returns_none(get_returns_none) + + d.open(self.filename, db.DB_RECNO, db.DB_CREATE) + + for x in letters: + recno = d.append(x * 60) + assert type(recno) == type(0) + assert recno >= 1 + if verbose: + print recno, + + if verbose: print + + stat = d.stat() + if verbose: + pprint(stat) + + for recno in range(1, len(d)+1): + data = d[recno] + if verbose: + print data + + assert type(data) == type("") + assert data == d.get(recno) + + try: + data = d[0] # This should raise a KeyError!?!?! + except db.DBInvalidArgError, val: + assert val[0] == db.EINVAL + if verbose: print val + else: + self.fail("expected exception") + + # test that has_key raises DB exceptions (fixed in pybsddb 4.3.2) + try: + d.has_key(0) + except db.DBError, val: + pass + else: + self.fail("has_key did not raise a proper exception") + + try: + data = d[100] + except KeyError: + pass + else: + self.fail("expected exception") + + try: + data = d.get(100) + except db.DBNotFoundError, val: + if get_returns_none: + self.fail("unexpected exception") + else: + assert data == None + + keys = d.keys() + if verbose: + print keys + assert type(keys) == type([]) + assert type(keys[0]) == type(123) + assert len(keys) == len(d) + + items = d.items() + if verbose: + pprint(items) + assert type(items) == type([]) + assert type(items[0]) == type(()) + assert len(items[0]) == 2 + assert type(items[0][0]) == type(123) + assert type(items[0][1]) == type("") + assert len(items) == len(d) + + assert d.has_key(25) + + del d[25] + assert not d.has_key(25) + + d.delete(13) + assert not d.has_key(13) + + data = d.get_both(26, "z" * 60) + assert data == "z" * 60 + if verbose: + print data + + fd = d.fd() + if verbose: + print fd + + c = d.cursor() + rec = c.first() + while rec: + if verbose: + print rec + rec = c.next() + + c.set(50) + rec = c.current() + if verbose: + print rec + + c.put(-1, "a replacement record", db.DB_CURRENT) + + c.set(50) + rec = c.current() + assert rec == (50, "a replacement record") + if verbose: + print rec + + rec = c.set_range(30) + if verbose: + print rec + + # test that non-existant key lookups work (and that + # DBC_set_range doesn't have a memleak under valgrind) + rec = c.set_range(999999) + assert rec == None + if verbose: + print rec + + c.close() + d.close() + + d = db.DB() + d.open(self.filename) + c = d.cursor() + + # put a record beyond the consecutive end of the recno's + d[100] = "way out there" + assert d[100] == "way out there" + + try: + data = d[99] + except KeyError: + pass + else: + self.fail("expected exception") + + try: + d.get(99) + except db.DBKeyEmptyError, val: + if get_returns_none: + self.fail("unexpected DBKeyEmptyError exception") + else: + assert val[0] == db.DB_KEYEMPTY + if verbose: print val + else: + if not get_returns_none: + self.fail("expected exception") + + rec = c.set(40) + while rec: + if verbose: + print rec + rec = c.next() + + c.close() + d.close() + + def test02_WithSource(self): + """ + A Recno file that is given a "backing source file" is essentially a + simple ASCII file. Normally each record is delimited by \n and so is + just a line in the file, but you can set a different record delimiter + if needed. + """ + source = os.path.join(os.path.dirname(sys.argv[0]), + 'db_home/test_recno.txt') + if not os.path.isdir('db_home'): + os.mkdir('db_home') + f = open(source, 'w') # create the file + f.close() + + d = db.DB() + # This is the default value, just checking if both int + d.set_re_delim(0x0A) + d.set_re_delim('\n') # and char can be used... + d.set_re_source(source) + d.open(self.filename, db.DB_RECNO, db.DB_CREATE) + + data = "The quick brown fox jumped over the lazy dog".split() + for datum in data: + d.append(datum) + d.sync() + d.close() + + # get the text from the backing source + text = open(source, 'r').read() + text = text.strip() + if verbose: + print text + print data + print text.split('\n') + + assert text.split('\n') == data + + # open as a DB again + d = db.DB() + d.set_re_source(source) + d.open(self.filename, db.DB_RECNO) + + d[3] = 'reddish-brown' + d[8] = 'comatose' + + d.sync() + d.close() + + text = open(source, 'r').read() + text = text.strip() + if verbose: + print text + print text.split('\n') + + assert text.split('\n') == \ + "The quick reddish-brown fox jumped over the comatose dog".split() + + def test03_FixedLength(self): + d = db.DB() + d.set_re_len(40) # fixed length records, 40 bytes long + d.set_re_pad('-') # sets the pad character... + d.set_re_pad(45) # ...test both int and char + d.open(self.filename, db.DB_RECNO, db.DB_CREATE) + + for x in letters: + d.append(x * 35) # These will be padded + + d.append('.' * 40) # this one will be exact + + try: # this one will fail + d.append('bad' * 20) + except db.DBInvalidArgError, val: + assert val[0] == db.EINVAL + if verbose: print val + else: + self.fail("expected exception") + + c = d.cursor() + rec = c.first() + while rec: + if verbose: + print rec + rec = c.next() + + c.close() + d.close() + + +#---------------------------------------------------------------------- + + +def test_suite(): + return unittest.makeSuite(SimpleRecnoTestCase) + + +if __name__ == '__main__': + unittest.main(defaultTest='test_suite') diff --git a/sys/lib/python/bsddb/test/test_sequence.py b/sys/lib/python/bsddb/test/test_sequence.py new file mode 100644 index 000000000..979f858c4 --- /dev/null +++ b/sys/lib/python/bsddb/test/test_sequence.py @@ -0,0 +1,112 @@ +import unittest +import os +import sys +import tempfile +import glob + +try: + # For Pythons w/distutils pybsddb + from bsddb3 import db +except ImportError: + from bsddb import db + +from test_all import verbose + + +class DBSequenceTest(unittest.TestCase): + def setUp(self): + self.int_32_max = 0x100000000 + self.homeDir = os.path.join(os.path.dirname(sys.argv[0]), 'db_home') + try: + os.mkdir(self.homeDir) + except os.error: + pass + tempfile.tempdir = self.homeDir + self.filename = os.path.split(tempfile.mktemp())[1] + tempfile.tempdir = None + + self.dbenv = db.DBEnv() + self.dbenv.open(self.homeDir, db.DB_CREATE | db.DB_INIT_MPOOL, 0666) + self.d = db.DB(self.dbenv) + self.d.open(self.filename, db.DB_BTREE, db.DB_CREATE, 0666) + + def tearDown(self): + if hasattr(self, 'seq'): + self.seq.close() + del self.seq + if hasattr(self, 'd'): + self.d.close() + del self.d + if hasattr(self, 'dbenv'): + self.dbenv.close() + del self.dbenv + + files = glob.glob(os.path.join(self.homeDir, '*')) + for file in files: + os.remove(file) + + def test_get(self): + self.seq = db.DBSequence(self.d, flags=0) + start_value = 10 * self.int_32_max + self.assertEqual(0xA00000000, start_value) + self.assertEquals(None, self.seq.init_value(start_value)) + self.assertEquals(None, self.seq.open(key='id', txn=None, flags=db.DB_CREATE)) + self.assertEquals(start_value, self.seq.get(5)) + self.assertEquals(start_value + 5, self.seq.get()) + + def test_remove(self): + self.seq = db.DBSequence(self.d, flags=0) + self.assertEquals(None, self.seq.open(key='foo', txn=None, flags=db.DB_CREATE)) + self.assertEquals(None, self.seq.remove(txn=None, flags=0)) + del self.seq + + def test_get_key(self): + self.seq = db.DBSequence(self.d, flags=0) + key = 'foo' + self.assertEquals(None, self.seq.open(key=key, txn=None, flags=db.DB_CREATE)) + self.assertEquals(key, self.seq.get_key()) + + def test_get_dbp(self): + self.seq = db.DBSequence(self.d, flags=0) + self.assertEquals(None, self.seq.open(key='foo', txn=None, flags=db.DB_CREATE)) + self.assertEquals(self.d, self.seq.get_dbp()) + + def test_cachesize(self): + self.seq = db.DBSequence(self.d, flags=0) + cashe_size = 10 + self.assertEquals(None, self.seq.set_cachesize(cashe_size)) + self.assertEquals(None, self.seq.open(key='foo', txn=None, flags=db.DB_CREATE)) + self.assertEquals(cashe_size, self.seq.get_cachesize()) + + def test_flags(self): + self.seq = db.DBSequence(self.d, flags=0) + flag = db.DB_SEQ_WRAP; + self.assertEquals(None, self.seq.set_flags(flag)) + self.assertEquals(None, self.seq.open(key='foo', txn=None, flags=db.DB_CREATE)) + self.assertEquals(flag, self.seq.get_flags() & flag) + + def test_range(self): + self.seq = db.DBSequence(self.d, flags=0) + seq_range = (10 * self.int_32_max, 11 * self.int_32_max - 1) + self.assertEquals(None, self.seq.set_range(seq_range)) + self.seq.init_value(seq_range[0]) + self.assertEquals(None, self.seq.open(key='foo', txn=None, flags=db.DB_CREATE)) + self.assertEquals(seq_range, self.seq.get_range()) + + def test_stat(self): + self.seq = db.DBSequence(self.d, flags=0) + self.assertEquals(None, self.seq.open(key='foo', txn=None, flags=db.DB_CREATE)) + stat = self.seq.stat() + for param in ('nowait', 'min', 'max', 'value', 'current', + 'flags', 'cache_size', 'last_value', 'wait'): + self.assertTrue(param in stat, "parameter %s isn't in stat info" % param) + +def test_suite(): + suite = unittest.TestSuite() + if db.version() >= (4,3): + suite.addTest(unittest.makeSuite(DBSequenceTest)) + return suite + + +if __name__ == '__main__': + unittest.main(defaultTest='test_suite') diff --git a/sys/lib/python/bsddb/test/test_thread.py b/sys/lib/python/bsddb/test/test_thread.py new file mode 100644 index 000000000..31964f0b2 --- /dev/null +++ b/sys/lib/python/bsddb/test/test_thread.py @@ -0,0 +1,506 @@ +"""TestCases for multi-threaded access to a DB. +""" + +import os +import sys +import time +import errno +import shutil +import tempfile +from pprint import pprint +from random import random + +try: + True, False +except NameError: + True = 1 + False = 0 + +DASH = '-' + +try: + from threading import Thread, currentThread + have_threads = True +except ImportError: + have_threads = False + +try: + WindowsError +except NameError: + class WindowsError(Exception): + pass + +import unittest +from test_all import verbose + +try: + # For Pythons w/distutils pybsddb + from bsddb3 import db, dbutils +except ImportError: + # For Python 2.3 + from bsddb import db, dbutils + + +#---------------------------------------------------------------------- + +class BaseThreadedTestCase(unittest.TestCase): + dbtype = db.DB_UNKNOWN # must be set in derived class + dbopenflags = 0 + dbsetflags = 0 + envflags = 0 + + def setUp(self): + if verbose: + dbutils._deadlock_VerboseFile = sys.stdout + + homeDir = os.path.join(os.path.dirname(sys.argv[0]), 'db_home') + self.homeDir = homeDir + try: + os.mkdir(homeDir) + except OSError, e: + if e.errno <> errno.EEXIST: raise + self.env = db.DBEnv() + self.setEnvOpts() + self.env.open(homeDir, self.envflags | db.DB_CREATE) + + self.filename = self.__class__.__name__ + '.db' + self.d = db.DB(self.env) + if self.dbsetflags: + self.d.set_flags(self.dbsetflags) + self.d.open(self.filename, self.dbtype, self.dbopenflags|db.DB_CREATE) + + def tearDown(self): + self.d.close() + self.env.close() + shutil.rmtree(self.homeDir) + + def setEnvOpts(self): + pass + + def makeData(self, key): + return DASH.join([key] * 5) + + +#---------------------------------------------------------------------- + + +class ConcurrentDataStoreBase(BaseThreadedTestCase): + dbopenflags = db.DB_THREAD + envflags = db.DB_THREAD | db.DB_INIT_CDB | db.DB_INIT_MPOOL + readers = 0 # derived class should set + writers = 0 + records = 1000 + + def test01_1WriterMultiReaders(self): + if verbose: + print '\n', '-=' * 30 + print "Running %s.test01_1WriterMultiReaders..." % \ + self.__class__.__name__ + + threads = [] + for x in range(self.writers): + wt = Thread(target = self.writerThread, + args = (self.d, self.records, x), + name = 'writer %d' % x, + )#verbose = verbose) + threads.append(wt) + + for x in range(self.readers): + rt = Thread(target = self.readerThread, + args = (self.d, x), + name = 'reader %d' % x, + )#verbose = verbose) + threads.append(rt) + + for t in threads: + t.start() + for t in threads: + t.join() + + def writerThread(self, d, howMany, writerNum): + #time.sleep(0.01 * writerNum + 0.01) + name = currentThread().getName() + start = howMany * writerNum + stop = howMany * (writerNum + 1) - 1 + if verbose: + print "%s: creating records %d - %d" % (name, start, stop) + + for x in range(start, stop): + key = '%04d' % x + dbutils.DeadlockWrap(d.put, key, self.makeData(key), + max_retries=12) + if verbose and x % 100 == 0: + print "%s: records %d - %d finished" % (name, start, x) + + if verbose: + print "%s: finished creating records" % name + +## # Each write-cursor will be exclusive, the only one that can update the DB... +## if verbose: print "%s: deleting a few records" % name +## c = d.cursor(flags = db.DB_WRITECURSOR) +## for x in range(10): +## key = int(random() * howMany) + start +## key = '%04d' % key +## if d.has_key(key): +## c.set(key) +## c.delete() + +## c.close() + if verbose: + print "%s: thread finished" % name + + def readerThread(self, d, readerNum): + time.sleep(0.01 * readerNum) + name = currentThread().getName() + + for loop in range(5): + c = d.cursor() + count = 0 + rec = c.first() + while rec: + count += 1 + key, data = rec + self.assertEqual(self.makeData(key), data) + rec = c.next() + if verbose: + print "%s: found %d records" % (name, count) + c.close() + time.sleep(0.05) + + if verbose: + print "%s: thread finished" % name + + +class BTreeConcurrentDataStore(ConcurrentDataStoreBase): + dbtype = db.DB_BTREE + writers = 2 + readers = 10 + records = 1000 + + +class HashConcurrentDataStore(ConcurrentDataStoreBase): + dbtype = db.DB_HASH + writers = 2 + readers = 10 + records = 1000 + + +#---------------------------------------------------------------------- + +class SimpleThreadedBase(BaseThreadedTestCase): + dbopenflags = db.DB_THREAD + envflags = db.DB_THREAD | db.DB_INIT_MPOOL | db.DB_INIT_LOCK + readers = 5 + writers = 3 + records = 1000 + + def setEnvOpts(self): + self.env.set_lk_detect(db.DB_LOCK_DEFAULT) + + def test02_SimpleLocks(self): + if verbose: + print '\n', '-=' * 30 + print "Running %s.test02_SimpleLocks..." % self.__class__.__name__ + + threads = [] + for x in range(self.writers): + wt = Thread(target = self.writerThread, + args = (self.d, self.records, x), + name = 'writer %d' % x, + )#verbose = verbose) + threads.append(wt) + for x in range(self.readers): + rt = Thread(target = self.readerThread, + args = (self.d, x), + name = 'reader %d' % x, + )#verbose = verbose) + threads.append(rt) + + for t in threads: + t.start() + for t in threads: + t.join() + + def writerThread(self, d, howMany, writerNum): + name = currentThread().getName() + start = howMany * writerNum + stop = howMany * (writerNum + 1) - 1 + if verbose: + print "%s: creating records %d - %d" % (name, start, stop) + + # create a bunch of records + for x in xrange(start, stop): + key = '%04d' % x + dbutils.DeadlockWrap(d.put, key, self.makeData(key), + max_retries=12) + + if verbose and x % 100 == 0: + print "%s: records %d - %d finished" % (name, start, x) + + # do a bit or reading too + if random() <= 0.05: + for y in xrange(start, x): + key = '%04d' % x + data = dbutils.DeadlockWrap(d.get, key, max_retries=12) + self.assertEqual(data, self.makeData(key)) + + # flush them + try: + dbutils.DeadlockWrap(d.sync, max_retries=12) + except db.DBIncompleteError, val: + if verbose: + print "could not complete sync()..." + + # read them back, deleting a few + for x in xrange(start, stop): + key = '%04d' % x + data = dbutils.DeadlockWrap(d.get, key, max_retries=12) + if verbose and x % 100 == 0: + print "%s: fetched record (%s, %s)" % (name, key, data) + self.assertEqual(data, self.makeData(key)) + if random() <= 0.10: + dbutils.DeadlockWrap(d.delete, key, max_retries=12) + if verbose: + print "%s: deleted record %s" % (name, key) + + if verbose: + print "%s: thread finished" % name + + def readerThread(self, d, readerNum): + time.sleep(0.01 * readerNum) + name = currentThread().getName() + + for loop in range(5): + c = d.cursor() + count = 0 + rec = dbutils.DeadlockWrap(c.first, max_retries=10) + while rec: + count += 1 + key, data = rec + self.assertEqual(self.makeData(key), data) + rec = dbutils.DeadlockWrap(c.next, max_retries=10) + if verbose: + print "%s: found %d records" % (name, count) + c.close() + time.sleep(0.05) + + if verbose: + print "%s: thread finished" % name + + +class BTreeSimpleThreaded(SimpleThreadedBase): + dbtype = db.DB_BTREE + + +class HashSimpleThreaded(SimpleThreadedBase): + dbtype = db.DB_HASH + + +#---------------------------------------------------------------------- + + +class ThreadedTransactionsBase(BaseThreadedTestCase): + dbopenflags = db.DB_THREAD | db.DB_AUTO_COMMIT + envflags = (db.DB_THREAD | + db.DB_INIT_MPOOL | + db.DB_INIT_LOCK | + db.DB_INIT_LOG | + db.DB_INIT_TXN + ) + readers = 0 + writers = 0 + records = 2000 + txnFlag = 0 + + def setEnvOpts(self): + #self.env.set_lk_detect(db.DB_LOCK_DEFAULT) + pass + + def test03_ThreadedTransactions(self): + if verbose: + print '\n', '-=' * 30 + print "Running %s.test03_ThreadedTransactions..." % \ + self.__class__.__name__ + + threads = [] + for x in range(self.writers): + wt = Thread(target = self.writerThread, + args = (self.d, self.records, x), + name = 'writer %d' % x, + )#verbose = verbose) + threads.append(wt) + + for x in range(self.readers): + rt = Thread(target = self.readerThread, + args = (self.d, x), + name = 'reader %d' % x, + )#verbose = verbose) + threads.append(rt) + + dt = Thread(target = self.deadlockThread) + dt.start() + + for t in threads: + t.start() + for t in threads: + t.join() + + self.doLockDetect = False + dt.join() + + def doWrite(self, d, name, start, stop): + finished = False + while not finished: + try: + txn = self.env.txn_begin(None, self.txnFlag) + for x in range(start, stop): + key = '%04d' % x + d.put(key, self.makeData(key), txn) + if verbose and x % 100 == 0: + print "%s: records %d - %d finished" % (name, start, x) + txn.commit() + finished = True + except (db.DBLockDeadlockError, db.DBLockNotGrantedError), val: + if verbose: + print "%s: Aborting transaction (%s)" % (name, val[1]) + txn.abort() + time.sleep(0.05) + + def writerThread(self, d, howMany, writerNum): + name = currentThread().getName() + start = howMany * writerNum + stop = howMany * (writerNum + 1) - 1 + if verbose: + print "%s: creating records %d - %d" % (name, start, stop) + + step = 100 + for x in range(start, stop, step): + self.doWrite(d, name, x, min(stop, x+step)) + + if verbose: + print "%s: finished creating records" % name + if verbose: + print "%s: deleting a few records" % name + + finished = False + while not finished: + try: + recs = [] + txn = self.env.txn_begin(None, self.txnFlag) + for x in range(10): + key = int(random() * howMany) + start + key = '%04d' % key + data = d.get(key, None, txn, db.DB_RMW) + if data is not None: + d.delete(key, txn) + recs.append(key) + txn.commit() + finished = True + if verbose: + print "%s: deleted records %s" % (name, recs) + except (db.DBLockDeadlockError, db.DBLockNotGrantedError), val: + if verbose: + print "%s: Aborting transaction (%s)" % (name, val[1]) + txn.abort() + time.sleep(0.05) + + if verbose: + print "%s: thread finished" % name + + def readerThread(self, d, readerNum): + time.sleep(0.01 * readerNum + 0.05) + name = currentThread().getName() + + for loop in range(5): + finished = False + while not finished: + try: + txn = self.env.txn_begin(None, self.txnFlag) + c = d.cursor(txn) + count = 0 + rec = c.first() + while rec: + count += 1 + key, data = rec + self.assertEqual(self.makeData(key), data) + rec = c.next() + if verbose: print "%s: found %d records" % (name, count) + c.close() + txn.commit() + finished = True + except (db.DBLockDeadlockError, db.DBLockNotGrantedError), val: + if verbose: + print "%s: Aborting transaction (%s)" % (name, val[1]) + c.close() + txn.abort() + time.sleep(0.05) + + time.sleep(0.05) + + if verbose: + print "%s: thread finished" % name + + def deadlockThread(self): + self.doLockDetect = True + while self.doLockDetect: + time.sleep(0.5) + try: + aborted = self.env.lock_detect( + db.DB_LOCK_RANDOM, db.DB_LOCK_CONFLICT) + if verbose and aborted: + print "deadlock: Aborted %d deadlocked transaction(s)" \ + % aborted + except db.DBError: + pass + + +class BTreeThreadedTransactions(ThreadedTransactionsBase): + dbtype = db.DB_BTREE + writers = 3 + readers = 5 + records = 2000 + +class HashThreadedTransactions(ThreadedTransactionsBase): + dbtype = db.DB_HASH + writers = 1 + readers = 5 + records = 2000 + +class BTreeThreadedNoWaitTransactions(ThreadedTransactionsBase): + dbtype = db.DB_BTREE + writers = 3 + readers = 5 + records = 2000 + txnFlag = db.DB_TXN_NOWAIT + +class HashThreadedNoWaitTransactions(ThreadedTransactionsBase): + dbtype = db.DB_HASH + writers = 1 + readers = 5 + records = 2000 + txnFlag = db.DB_TXN_NOWAIT + + +#---------------------------------------------------------------------- + +def test_suite(): + suite = unittest.TestSuite() + + if have_threads: + suite.addTest(unittest.makeSuite(BTreeConcurrentDataStore)) + suite.addTest(unittest.makeSuite(HashConcurrentDataStore)) + suite.addTest(unittest.makeSuite(BTreeSimpleThreaded)) + suite.addTest(unittest.makeSuite(HashSimpleThreaded)) + suite.addTest(unittest.makeSuite(BTreeThreadedTransactions)) + suite.addTest(unittest.makeSuite(HashThreadedTransactions)) + suite.addTest(unittest.makeSuite(BTreeThreadedNoWaitTransactions)) + suite.addTest(unittest.makeSuite(HashThreadedNoWaitTransactions)) + + else: + print "Threads not available, skipping thread tests." + + return suite + + +if __name__ == '__main__': + unittest.main(defaultTest='test_suite') |