diff options
author | Ori Bernstein <ori@eigenstate.org> | 2021-06-14 00:00:37 +0000 |
---|---|---|
committer | Ori Bernstein <ori@eigenstate.org> | 2021-06-14 00:00:37 +0000 |
commit | a73a964e51247ed169d322c725a3a18859f109a3 (patch) | |
tree | 3f752d117274d444bda44e85609aeac1acf313f3 /sys/lib/python/logging | |
parent | e64efe273fcb921a61bf27d33b230c4e64fcd425 (diff) |
python, hg: tow outside the environment.
they've served us well, and can ride off into the sunset.
Diffstat (limited to 'sys/lib/python/logging')
-rw-r--r-- | sys/lib/python/logging/__init__.py | 1372 | ||||
-rw-r--r-- | sys/lib/python/logging/config.py | 348 | ||||
-rw-r--r-- | sys/lib/python/logging/handlers.py | 1019 |
3 files changed, 0 insertions, 2739 deletions
diff --git a/sys/lib/python/logging/__init__.py b/sys/lib/python/logging/__init__.py deleted file mode 100644 index b1887dfe0..000000000 --- a/sys/lib/python/logging/__init__.py +++ /dev/null @@ -1,1372 +0,0 @@ -# Copyright 2001-2007 by Vinay Sajip. All Rights Reserved. -# -# Permission to use, copy, modify, and distribute this software and its -# documentation for any purpose and without fee is hereby granted, -# provided that the above copyright notice appear in all copies and that -# both that copyright notice and this permission notice appear in -# supporting documentation, and that the name of Vinay Sajip -# not be used in advertising or publicity pertaining to distribution -# of the software without specific, written prior permission. -# VINAY SAJIP DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, INCLUDING -# ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL -# VINAY SAJIP BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR -# ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER -# IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT -# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - -""" -Logging package for Python. Based on PEP 282 and comments thereto in -comp.lang.python, and influenced by Apache's log4j system. - -Should work under Python versions >= 1.5.2, except that source line -information is not available unless 'sys._getframe()' is. - -Copyright (C) 2001-2007 Vinay Sajip. All Rights Reserved. - -To use, simply 'import logging' and log away! -""" - -import sys, os, types, time, string, cStringIO, traceback - -try: - import codecs -except ImportError: - codecs = None - -try: - import thread - import threading -except ImportError: - thread = None - -__author__ = "Vinay Sajip <vinay_sajip@red-dove.com>" -__status__ = "production" -__version__ = "0.5.0.2" -__date__ = "16 February 2007" - -#--------------------------------------------------------------------------- -# Miscellaneous module data -#--------------------------------------------------------------------------- - -# -# _srcfile is used when walking the stack to check when we've got the first -# caller stack frame. -# -if hasattr(sys, 'frozen'): #support for py2exe - _srcfile = "logging%s__init__%s" % (os.sep, __file__[-4:]) -elif string.lower(__file__[-4:]) in ['.pyc', '.pyo']: - _srcfile = __file__[:-4] + '.py' -else: - _srcfile = __file__ -_srcfile = os.path.normcase(_srcfile) - -# next bit filched from 1.5.2's inspect.py -def currentframe(): - """Return the frame object for the caller's stack frame.""" - try: - raise Exception - except: - return sys.exc_traceback.tb_frame.f_back - -if hasattr(sys, '_getframe'): currentframe = lambda: sys._getframe(3) -# done filching - -# _srcfile is only used in conjunction with sys._getframe(). -# To provide compatibility with older versions of Python, set _srcfile -# to None if _getframe() is not available; this value will prevent -# findCaller() from being called. -#if not hasattr(sys, "_getframe"): -# _srcfile = None - -# -#_startTime is used as the base when calculating the relative time of events -# -_startTime = time.time() - -# -#raiseExceptions is used to see if exceptions during handling should be -#propagated -# -raiseExceptions = 1 - -# -# If you don't want threading information in the log, set this to zero -# -logThreads = 1 - -# -# If you don't want process information in the log, set this to zero -# -logProcesses = 1 - -#--------------------------------------------------------------------------- -# Level related stuff -#--------------------------------------------------------------------------- -# -# Default levels and level names, these can be replaced with any positive set -# of values having corresponding names. There is a pseudo-level, NOTSET, which -# is only really there as a lower limit for user-defined levels. Handlers and -# loggers are initialized with NOTSET so that they will log all messages, even -# at user-defined levels. -# - -CRITICAL = 50 -FATAL = CRITICAL -ERROR = 40 -WARNING = 30 -WARN = WARNING -INFO = 20 -DEBUG = 10 -NOTSET = 0 - -_levelNames = { - CRITICAL : 'CRITICAL', - ERROR : 'ERROR', - WARNING : 'WARNING', - INFO : 'INFO', - DEBUG : 'DEBUG', - NOTSET : 'NOTSET', - 'CRITICAL' : CRITICAL, - 'ERROR' : ERROR, - 'WARN' : WARNING, - 'WARNING' : WARNING, - 'INFO' : INFO, - 'DEBUG' : DEBUG, - 'NOTSET' : NOTSET, -} - -def getLevelName(level): - """ - Return the textual representation of logging level 'level'. - - If the level is one of the predefined levels (CRITICAL, ERROR, WARNING, - INFO, DEBUG) then you get the corresponding string. If you have - associated levels with names using addLevelName then the name you have - associated with 'level' is returned. - - If a numeric value corresponding to one of the defined levels is passed - in, the corresponding string representation is returned. - - Otherwise, the string "Level %s" % level is returned. - """ - return _levelNames.get(level, ("Level %s" % level)) - -def addLevelName(level, levelName): - """ - Associate 'levelName' with 'level'. - - This is used when converting levels to text during message formatting. - """ - _acquireLock() - try: #unlikely to cause an exception, but you never know... - _levelNames[level] = levelName - _levelNames[levelName] = level - finally: - _releaseLock() - -#--------------------------------------------------------------------------- -# Thread-related stuff -#--------------------------------------------------------------------------- - -# -#_lock is used to serialize access to shared data structures in this module. -#This needs to be an RLock because fileConfig() creates Handlers and so -#might arbitrary user threads. Since Handler.__init__() updates the shared -#dictionary _handlers, it needs to acquire the lock. But if configuring, -#the lock would already have been acquired - so we need an RLock. -#The same argument applies to Loggers and Manager.loggerDict. -# -_lock = None - -def _acquireLock(): - """ - Acquire the module-level lock for serializing access to shared data. - - This should be released with _releaseLock(). - """ - global _lock - if (not _lock) and thread: - _lock = threading.RLock() - if _lock: - _lock.acquire() - -def _releaseLock(): - """ - Release the module-level lock acquired by calling _acquireLock(). - """ - if _lock: - _lock.release() - -#--------------------------------------------------------------------------- -# The logging record -#--------------------------------------------------------------------------- - -class LogRecord: - """ - A LogRecord instance represents an event being logged. - - LogRecord instances are created every time something is logged. They - contain all the information pertinent to the event being logged. The - main information passed in is in msg and args, which are combined - using str(msg) % args to create the message field of the record. The - record also includes information such as when the record was created, - the source line where the logging call was made, and any exception - information to be logged. - """ - def __init__(self, name, level, pathname, lineno, - msg, args, exc_info, func=None): - """ - Initialize a logging record with interesting information. - """ - ct = time.time() - self.name = name - self.msg = msg - # - # The following statement allows passing of a dictionary as a sole - # argument, so that you can do something like - # logging.debug("a %(a)d b %(b)s", {'a':1, 'b':2}) - # Suggested by Stefan Behnel. - # Note that without the test for args[0], we get a problem because - # during formatting, we test to see if the arg is present using - # 'if self.args:'. If the event being logged is e.g. 'Value is %d' - # and if the passed arg fails 'if self.args:' then no formatting - # is done. For example, logger.warn('Value is %d', 0) would log - # 'Value is %d' instead of 'Value is 0'. - # For the use case of passing a dictionary, this should not be a - # problem. - if args and (len(args) == 1) and args[0] and (type(args[0]) == types.DictType): - args = args[0] - self.args = args - self.levelname = getLevelName(level) - self.levelno = level - self.pathname = pathname - try: - self.filename = os.path.basename(pathname) - self.module = os.path.splitext(self.filename)[0] - except: - self.filename = pathname - self.module = "Unknown module" - self.exc_info = exc_info - self.exc_text = None # used to cache the traceback text - self.lineno = lineno - self.funcName = func - self.created = ct - self.msecs = (ct - long(ct)) * 1000 - self.relativeCreated = (self.created - _startTime) * 1000 - if logThreads and thread: - self.thread = thread.get_ident() - self.threadName = threading.currentThread().getName() - else: - self.thread = None - self.threadName = None - if logProcesses and hasattr(os, 'getpid'): - self.process = os.getpid() - else: - self.process = None - - def __str__(self): - return '<LogRecord: %s, %s, %s, %s, "%s">'%(self.name, self.levelno, - self.pathname, self.lineno, self.msg) - - def getMessage(self): - """ - Return the message for this LogRecord. - - Return the message for this LogRecord after merging any user-supplied - arguments with the message. - """ - if not hasattr(types, "UnicodeType"): #if no unicode support... - msg = str(self.msg) - else: - msg = self.msg - if type(msg) not in (types.UnicodeType, types.StringType): - try: - msg = str(self.msg) - except UnicodeError: - msg = self.msg #Defer encoding till later - if self.args: - msg = msg % self.args - return msg - -def makeLogRecord(dict): - """ - Make a LogRecord whose attributes are defined by the specified dictionary, - This function is useful for converting a logging event received over - a socket connection (which is sent as a dictionary) into a LogRecord - instance. - """ - rv = LogRecord(None, None, "", 0, "", (), None, None) - rv.__dict__.update(dict) - return rv - -#--------------------------------------------------------------------------- -# Formatter classes and functions -#--------------------------------------------------------------------------- - -class Formatter: - """ - Formatter instances are used to convert a LogRecord to text. - - Formatters need to know how a LogRecord is constructed. They are - responsible for converting a LogRecord to (usually) a string which can - be interpreted by either a human or an external system. The base Formatter - allows a formatting string to be specified. If none is supplied, the - default value of "%s(message)\\n" is used. - - The Formatter can be initialized with a format string which makes use of - knowledge of the LogRecord attributes - e.g. the default value mentioned - above makes use of the fact that the user's message and arguments are pre- - formatted into a LogRecord's message attribute. Currently, the useful - attributes in a LogRecord are described by: - - %(name)s Name of the logger (logging channel) - %(levelno)s Numeric logging level for the message (DEBUG, INFO, - WARNING, ERROR, CRITICAL) - %(levelname)s Text logging level for the message ("DEBUG", "INFO", - "WARNING", "ERROR", "CRITICAL") - %(pathname)s Full pathname of the source file where the logging - call was issued (if available) - %(filename)s Filename portion of pathname - %(module)s Module (name portion of filename) - %(lineno)d Source line number where the logging call was issued - (if available) - %(funcName)s Function name - %(created)f Time when the LogRecord was created (time.time() - return value) - %(asctime)s Textual time when the LogRecord was created - %(msecs)d Millisecond portion of the creation time - %(relativeCreated)d Time in milliseconds when the LogRecord was created, - relative to the time the logging module was loaded - (typically at application startup time) - %(thread)d Thread ID (if available) - %(threadName)s Thread name (if available) - %(process)d Process ID (if available) - %(message)s The result of record.getMessage(), computed just as - the record is emitted - """ - - converter = time.localtime - - def __init__(self, fmt=None, datefmt=None): - """ - Initialize the formatter with specified format strings. - - Initialize the formatter either with the specified format string, or a - default as described above. Allow for specialized date formatting with - the optional datefmt argument (if omitted, you get the ISO8601 format). - """ - if fmt: - self._fmt = fmt - else: - self._fmt = "%(message)s" - self.datefmt = datefmt - - def formatTime(self, record, datefmt=None): - """ - Return the creation time of the specified LogRecord as formatted text. - - This method should be called from format() by a formatter which - wants to make use of a formatted time. This method can be overridden - in formatters to provide for any specific requirement, but the - basic behaviour is as follows: if datefmt (a string) is specified, - it is used with time.strftime() to format the creation time of the - record. Otherwise, the ISO8601 format is used. The resulting - string is returned. This function uses a user-configurable function - to convert the creation time to a tuple. By default, time.localtime() - is used; to change this for a particular formatter instance, set the - 'converter' attribute to a function with the same signature as - time.localtime() or time.gmtime(). To change it for all formatters, - for example if you want all logging times to be shown in GMT, - set the 'converter' attribute in the Formatter class. - """ - ct = self.converter(record.created) - if datefmt: - s = time.strftime(datefmt, ct) - else: - t = time.strftime("%Y-%m-%d %H:%M:%S", ct) - s = "%s,%03d" % (t, record.msecs) - return s - - def formatException(self, ei): - """ - Format and return the specified exception information as a string. - - This default implementation just uses - traceback.print_exception() - """ - sio = cStringIO.StringIO() - traceback.print_exception(ei[0], ei[1], ei[2], None, sio) - s = sio.getvalue() - sio.close() - if s[-1] == "\n": - s = s[:-1] - return s - - def format(self, record): - """ - Format the specified record as text. - - The record's attribute dictionary is used as the operand to a - string formatting operation which yields the returned string. - Before formatting the dictionary, a couple of preparatory steps - are carried out. The message attribute of the record is computed - using LogRecord.getMessage(). If the formatting string contains - "%(asctime)", formatTime() is called to format the event time. - If there is exception information, it is formatted using - formatException() and appended to the message. - """ - record.message = record.getMessage() - if string.find(self._fmt,"%(asctime)") >= 0: - record.asctime = self.formatTime(record, self.datefmt) - s = self._fmt % record.__dict__ - if record.exc_info: - # Cache the traceback text to avoid converting it multiple times - # (it's constant anyway) - if not record.exc_text: - record.exc_text = self.formatException(record.exc_info) - if record.exc_text: - if s[-1] != "\n": - s = s + "\n" - s = s + record.exc_text - return s - -# -# The default formatter to use when no other is specified -# -_defaultFormatter = Formatter() - -class BufferingFormatter: - """ - A formatter suitable for formatting a number of records. - """ - def __init__(self, linefmt=None): - """ - Optionally specify a formatter which will be used to format each - individual record. - """ - if linefmt: - self.linefmt = linefmt - else: - self.linefmt = _defaultFormatter - - def formatHeader(self, records): - """ - Return the header string for the specified records. - """ - return "" - - def formatFooter(self, records): - """ - Return the footer string for the specified records. - """ - return "" - - def format(self, records): - """ - Format the specified records and return the result as a string. - """ - rv = "" - if len(records) > 0: - rv = rv + self.formatHeader(records) - for record in records: - rv = rv + self.linefmt.format(record) - rv = rv + self.formatFooter(records) - return rv - -#--------------------------------------------------------------------------- -# Filter classes and functions -#--------------------------------------------------------------------------- - -class Filter: - """ - Filter instances are used to perform arbitrary filtering of LogRecords. - - Loggers and Handlers can optionally use Filter instances to filter - records as desired. The base filter class only allows events which are - below a certain point in the logger hierarchy. For example, a filter - initialized with "A.B" will allow events logged by loggers "A.B", - "A.B.C", "A.B.C.D", "A.B.D" etc. but not "A.BB", "B.A.B" etc. If - initialized with the empty string, all events are passed. - """ - def __init__(self, name=''): - """ - Initialize a filter. - - Initialize with the name of the logger which, together with its - children, will have its events allowed through the filter. If no - name is specified, allow every event. - """ - self.name = name - self.nlen = len(name) - - def filter(self, record): - """ - Determine if the specified record is to be logged. - - Is the specified record to be logged? Returns 0 for no, nonzero for - yes. If deemed appropriate, the record may be modified in-place. - """ - if self.nlen == 0: - return 1 - elif self.name == record.name: - return 1 - elif string.find(record.name, self.name, 0, self.nlen) != 0: - return 0 - return (record.name[self.nlen] == ".") - -class Filterer: - """ - A base class for loggers and handlers which allows them to share - common code. - """ - def __init__(self): - """ - Initialize the list of filters to be an empty list. - """ - self.filters = [] - - def addFilter(self, filter): - """ - Add the specified filter to this handler. - """ - if not (filter in self.filters): - self.filters.append(filter) - - def removeFilter(self, filter): - """ - Remove the specified filter from this handler. - """ - if filter in self.filters: - self.filters.remove(filter) - - def filter(self, record): - """ - Determine if a record is loggable by consulting all the filters. - - The default is to allow the record to be logged; any filter can veto - this and the record is then dropped. Returns a zero value if a record - is to be dropped, else non-zero. - """ - rv = 1 - for f in self.filters: - if not f.filter(record): - rv = 0 - break - return rv - -#--------------------------------------------------------------------------- -# Handler classes and functions -#--------------------------------------------------------------------------- - -_handlers = {} #repository of handlers (for flushing when shutdown called) -_handlerList = [] # added to allow handlers to be removed in reverse of order initialized - -class Handler(Filterer): - """ - Handler instances dispatch logging events to specific destinations. - - The base handler class. Acts as a placeholder which defines the Handler - interface. Handlers can optionally use Formatter instances to format - records as desired. By default, no formatter is specified; in this case, - the 'raw' message as determined by record.message is logged. - """ - def __init__(self, level=NOTSET): - """ - Initializes the instance - basically setting the formatter to None - and the filter list to empty. - """ - Filterer.__init__(self) - self.level = level - self.formatter = None - #get the module data lock, as we're updating a shared structure. - _acquireLock() - try: #unlikely to raise an exception, but you never know... - _handlers[self] = 1 - _handlerList.insert(0, self) - finally: - _releaseLock() - self.createLock() - - def createLock(self): - """ - Acquire a thread lock for serializing access to the underlying I/O. - """ - if thread: - self.lock = threading.RLock() - else: - self.lock = None - - def acquire(self): - """ - Acquire the I/O thread lock. - """ - if self.lock: - self.lock.acquire() - - def release(self): - """ - Release the I/O thread lock. - """ - if self.lock: - self.lock.release() - - def setLevel(self, level): - """ - Set the logging level of this handler. - """ - self.level = level - - def format(self, record): - """ - Format the specified record. - - If a formatter is set, use it. Otherwise, use the default formatter - for the module. - """ - if self.formatter: - fmt = self.formatter - else: - fmt = _defaultFormatter - return fmt.format(record) - - def emit(self, record): - """ - Do whatever it takes to actually log the specified logging record. - - This version is intended to be implemented by subclasses and so - raises a NotImplementedError. - """ - raise NotImplementedError, 'emit must be implemented '\ - 'by Handler subclasses' - - def handle(self, record): - """ - Conditionally emit the specified logging record. - - Emission depends on filters which may have been added to the handler. - Wrap the actual emission of the record with acquisition/release of - the I/O thread lock. Returns whether the filter passed the record for - emission. - """ - rv = self.filter(record) - if rv: - self.acquire() - try: - self.emit(record) - finally: - self.release() - return rv - - def setFormatter(self, fmt): - """ - Set the formatter for this handler. - """ - self.formatter = fmt - - def flush(self): - """ - Ensure all logging output has been flushed. - - This version does nothing and is intended to be implemented by - subclasses. - """ - pass - - def close(self): - """ - Tidy up any resources used by the handler. - - This version does removes the handler from an internal list - of handlers which is closed when shutdown() is called. Subclasses - should ensure that this gets called from overridden close() - methods. - """ - #get the module data lock, as we're updating a shared structure. - _acquireLock() - try: #unlikely to raise an exception, but you never know... - del _handlers[self] - _handlerList.remove(self) - finally: - _releaseLock() - - def handleError(self, record): - """ - Handle errors which occur during an emit() call. - - This method should be called from handlers when an exception is - encountered during an emit() call. If raiseExceptions is false, - exceptions get silently ignored. This is what is mostly wanted - for a logging system - most users will not care about errors in - the logging system, they are more interested in application errors. - You could, however, replace this with a custom handler if you wish. - The record which was being processed is passed in to this method. - """ - if raiseExceptions: - ei = sys.exc_info() - traceback.print_exception(ei[0], ei[1], ei[2], None, sys.stderr) - del ei - -class StreamHandler(Handler): - """ - A handler class which writes logging records, appropriately formatted, - to a stream. Note that this class does not close the stream, as - sys.stdout or sys.stderr may be used. - """ - def __init__(self, strm=None): - """ - Initialize the handler. - - If strm is not specified, sys.stderr is used. - """ - Handler.__init__(self) - if strm is None: - strm = sys.stderr - self.stream = strm - self.formatter = None - - def flush(self): - """ - Flushes the stream. - """ - self.stream.flush() - - def emit(self, record): - """ - Emit a record. - - If a formatter is specified, it is used to format the record. - The record is then written to the stream with a trailing newline - [N.B. this may be removed depending on feedback]. If exception - information is present, it is formatted using - traceback.print_exception and appended to the stream. - """ - try: - msg = self.format(record) - fs = "%s\n" - if not hasattr(types, "UnicodeType"): #if no unicode support... - self.stream.write(fs % msg) - else: - try: - self.stream.write(fs % msg) - except UnicodeError: - self.stream.write(fs % msg.encode("UTF-8")) - self.flush() - except (KeyboardInterrupt, SystemExit): - raise - except: - self.handleError(record) - -class FileHandler(StreamHandler): - """ - A handler class which writes formatted logging records to disk files. - """ - def __init__(self, filename, mode='a', encoding=None): - """ - Open the specified file and use it as the stream for logging. - """ - if codecs is None: - encoding = None - if encoding is None: - stream = open(filename, mode) - else: - stream = codecs.open(filename, mode, encoding) - StreamHandler.__init__(self, stream) - #keep the absolute path, otherwise derived classes which use this - #may come a cropper when the current directory changes - self.baseFilename = os.path.abspath(filename) - self.mode = mode - - def close(self): - """ - Closes the stream. - """ - self.flush() - self.stream.close() - StreamHandler.close(self) - -#--------------------------------------------------------------------------- -# Manager classes and functions -#--------------------------------------------------------------------------- - -class PlaceHolder: - """ - PlaceHolder instances are used in the Manager logger hierarchy to take - the place of nodes for which no loggers have been defined. This class is - intended for internal use only and not as part of the public API. - """ - def __init__(self, alogger): - """ - Initialize with the specified logger being a child of this placeholder. - """ - #self.loggers = [alogger] - self.loggerMap = { alogger : None } - - def append(self, alogger): - """ - Add the specified logger as a child of this placeholder. - """ - #if alogger not in self.loggers: - if not self.loggerMap.has_key(alogger): - #self.loggers.append(alogger) - self.loggerMap[alogger] = None - -# -# Determine which class to use when instantiating loggers. -# -_loggerClass = None - -def setLoggerClass(klass): - """ - Set the class to be used when instantiating a logger. The class should - define __init__() such that only a name argument is required, and the - __init__() should call Logger.__init__() - """ - if klass != Logger: - if not issubclass(klass, Logger): - raise TypeError, "logger not derived from logging.Logger: " + \ - klass.__name__ - global _loggerClass - _loggerClass = klass - -def getLoggerClass(): - """ - Return the class to be used when instantiating a logger. - """ - - return _loggerClass - -class Manager: - """ - There is [under normal circumstances] just one Manager instance, which - holds the hierarchy of loggers. - """ - def __init__(self, rootnode): - """ - Initialize the manager with the root node of the logger hierarchy. - """ - self.root = rootnode - self.disable = 0 - self.emittedNoHandlerWarning = 0 - self.loggerDict = {} - - def getLogger(self, name): - """ - Get a logger with the specified name (channel name), creating it - if it doesn't yet exist. This name is a dot-separated hierarchical - name, such as "a", "a.b", "a.b.c" or similar. - - If a PlaceHolder existed for the specified name [i.e. the logger - didn't exist but a child of it did], replace it with the created - logger and fix up the parent/child references which pointed to the - placeholder to now point to the logger. - """ - rv = None - _acquireLock() - try: - if self.loggerDict.has_key(name): - rv = self.loggerDict[name] - if isinstance(rv, PlaceHolder): - ph = rv - rv = _loggerClass(name) - rv.manager = self - self.loggerDict[name] = rv - self._fixupChildren(ph, rv) - self._fixupParents(rv) - else: - rv = _loggerClass(name) - rv.manager = self - self.loggerDict[name] = rv - self._fixupParents(rv) - finally: - _releaseLock() - return rv - - def _fixupParents(self, alogger): - """ - Ensure that there are either loggers or placeholders all the way - from the specified logger to the root of the logger hierarchy. - """ - name = alogger.name - i = string.rfind(name, ".") - rv = None - while (i > 0) and not rv: - substr = name[:i] - if not self.loggerDict.has_key(substr): - self.loggerDict[substr] = PlaceHolder(alogger) - else: - obj = self.loggerDict[substr] - if isinstance(obj, Logger): - rv = obj - else: - assert isinstance(obj, PlaceHolder) - obj.append(alogger) - i = string.rfind(name, ".", 0, i - 1) - if not rv: - rv = self.root - alogger.parent = rv - - def _fixupChildren(self, ph, alogger): - """ - Ensure that children of the placeholder ph are connected to the - specified logger. - """ - name = alogger.name - namelen = len(name) - for c in ph.loggerMap.keys(): - #The if means ... if not c.parent.name.startswith(nm) - #if string.find(c.parent.name, nm) <> 0: - if c.parent.name[:namelen] != name: - alogger.parent = c.parent - c.parent = alogger - -#--------------------------------------------------------------------------- -# Logger classes and functions -#--------------------------------------------------------------------------- - -class Logger(Filterer): - """ - Instances of the Logger class represent a single logging channel. A - "logging channel" indicates an area of an application. Exactly how an - "area" is defined is up to the application developer. Since an - application can have any number of areas, logging channels are identified - by a unique string. Application areas can be nested (e.g. an area - of "input processing" might include sub-areas "read CSV files", "read - XLS files" and "read Gnumeric files"). To cater for this natural nesting, - channel names are organized into a namespace hierarchy where levels are - separated by periods, much like the Java or Python package namespace. So - in the instance given above, channel names might be "input" for the upper - level, and "input.csv", "input.xls" and "input.gnu" for the sub-levels. - There is no arbitrary limit to the depth of nesting. - """ - def __init__(self, name, level=NOTSET): - """ - Initialize the logger with a name and an optional level. - """ - Filterer.__init__(self) - self.name = name - self.level = level - self.parent = None - self.propagate = 1 - self.handlers = [] - self.disabled = 0 - - def setLevel(self, level): - """ - Set the logging level of this logger. - """ - self.level = level - - def debug(self, msg, *args, **kwargs): - """ - Log 'msg % args' with severity 'DEBUG'. - - To pass exception information, use the keyword argument exc_info with - a true value, e.g. - - logger.debug("Houston, we have a %s", "thorny problem", exc_info=1) - """ - if self.manager.disable >= DEBUG: - return - if DEBUG >= self.getEffectiveLevel(): - apply(self._log, (DEBUG, msg, args), kwargs) - - def info(self, msg, *args, **kwargs): - """ - Log 'msg % args' with severity 'INFO'. - - To pass exception information, use the keyword argument exc_info with - a true value, e.g. - - logger.info("Houston, we have a %s", "interesting problem", exc_info=1) - """ - if self.manager.disable >= INFO: - return - if INFO >= self.getEffectiveLevel(): - apply(self._log, (INFO, msg, args), kwargs) - - def warning(self, msg, *args, **kwargs): - """ - Log 'msg % args' with severity 'WARNING'. - - To pass exception information, use the keyword argument exc_info with - a true value, e.g. - - logger.warning("Houston, we have a %s", "bit of a problem", exc_info=1) - """ - if self.manager.disable >= WARNING: - return - if self.isEnabledFor(WARNING): - apply(self._log, (WARNING, msg, args), kwargs) - - warn = warning - - def error(self, msg, *args, **kwargs): - """ - Log 'msg % args' with severity 'ERROR'. - - To pass exception information, use the keyword argument exc_info with - a true value, e.g. - - logger.error("Houston, we have a %s", "major problem", exc_info=1) - """ - if self.manager.disable >= ERROR: - return - if self.isEnabledFor(ERROR): - apply(self._log, (ERROR, msg, args), kwargs) - - def exception(self, msg, *args): - """ - Convenience method for logging an ERROR with exception information. - """ - apply(self.error, (msg,) + args, {'exc_info': 1}) - - def critical(self, msg, *args, **kwargs): - """ - Log 'msg % args' with severity 'CRITICAL'. - - To pass exception information, use the keyword argument exc_info with - a true value, e.g. - - logger.critical("Houston, we have a %s", "major disaster", exc_info=1) - """ - if self.manager.disable >= CRITICAL: - return - if CRITICAL >= self.getEffectiveLevel(): - apply(self._log, (CRITICAL, msg, args), kwargs) - - fatal = critical - - def log(self, level, msg, *args, **kwargs): - """ - Log 'msg % args' with the integer severity 'level'. - - To pass exception information, use the keyword argument exc_info with - a true value, e.g. - - logger.log(level, "We have a %s", "mysterious problem", exc_info=1) - """ - if type(level) != types.IntType: - if raiseExceptions: - raise TypeError, "level must be an integer" - else: - return - if self.manager.disable >= level: - return - if self.isEnabledFor(level): - apply(self._log, (level, msg, args), kwargs) - - def findCaller(self): - """ - Find the stack frame of the caller so that we can note the source - file name, line number and function name. - """ - f = currentframe().f_back - rv = "(unknown file)", 0, "(unknown function)" - while hasattr(f, "f_code"): - co = f.f_code - filename = os.path.normcase(co.co_filename) - if filename == _srcfile: - f = f.f_back - continue - rv = (filename, f.f_lineno, co.co_name) - break - return rv - - def makeRecord(self, name, level, fn, lno, msg, args, exc_info, func=None, extra=None): - """ - A factory method which can be overridden in subclasses to create - specialized LogRecords. - """ - rv = LogRecord(name, level, fn, lno, msg, args, exc_info, func) - if extra: - for key in extra: - if (key in ["message", "asctime"]) or (key in rv.__dict__): - raise KeyError("Attempt to overwrite %r in LogRecord" % key) - rv.__dict__[key] = extra[key] - return rv - - def _log(self, level, msg, args, exc_info=None, extra=None): - """ - Low-level logging routine which creates a LogRecord and then calls - all the handlers of this logger to handle the record. - """ - if _srcfile: - fn, lno, func = self.findCaller() - else: - fn, lno, func = "(unknown file)", 0, "(unknown function)" - if exc_info: - if type(exc_info) != types.TupleType: - exc_info = sys.exc_info() - record = self.makeRecord(self.name, level, fn, lno, msg, args, exc_info, func, extra) - self.handle(record) - - def handle(self, record): - """ - Call the handlers for the specified record. - - This method is used for unpickled records received from a socket, as - well as those created locally. Logger-level filtering is applied. - """ - if (not self.disabled) and self.filter(record): - self.callHandlers(record) - - def addHandler(self, hdlr): - """ - Add the specified handler to this logger. - """ - if not (hdlr in self.handlers): - self.handlers.append(hdlr) - - def removeHandler(self, hdlr): - """ - Remove the specified handler from this logger. - """ - if hdlr in self.handlers: - #hdlr.close() - hdlr.acquire() - try: - self.handlers.remove(hdlr) - finally: - hdlr.release() - - def callHandlers(self, record): - """ - Pass a record to all relevant handlers. - - Loop through all handlers for this logger and its parents in the - logger hierarchy. If no handler was found, output a one-off error - message to sys.stderr. Stop searching up the hierarchy whenever a - logger with the "propagate" attribute set to zero is found - that - will be the last logger whose handlers are called. - """ - c = self - found = 0 - while c: - for hdlr in c.handlers: - found = found + 1 - if record.levelno >= hdlr.level: - hdlr.handle(record) - if not c.propagate: - c = None #break out - else: - c = c.parent - if (found == 0) and raiseExceptions and not self.manager.emittedNoHandlerWarning: - sys.stderr.write("No handlers could be found for logger" - " \"%s\"\n" % self.name) - self.manager.emittedNoHandlerWarning = 1 - - def getEffectiveLevel(self): - """ - Get the effective level for this logger. - - Loop through this logger and its parents in the logger hierarchy, - looking for a non-zero logging level. Return the first one found. - """ - logger = self - while logger: - if logger.level: - return logger.level - logger = logger.parent - return NOTSET - - def isEnabledFor(self, level): - """ - Is this logger enabled for level 'level'? - """ - if self.manager.disable >= level: - return 0 - return level >= self.getEffectiveLevel() - -class RootLogger(Logger): - """ - A root logger is not that different to any other logger, except that - it must have a logging level and there is only one instance of it in - the hierarchy. - """ - def __init__(self, level): - """ - Initialize the logger with the name "root". - """ - Logger.__init__(self, "root", level) - -_loggerClass = Logger - -root = RootLogger(WARNING) -Logger.root = root -Logger.manager = Manager(Logger.root) - -#--------------------------------------------------------------------------- -# Configuration classes and functions -#--------------------------------------------------------------------------- - -BASIC_FORMAT = "%(levelname)s:%(name)s:%(message)s" - -def basicConfig(**kwargs): - """ - Do basic configuration for the logging system. - - This function does nothing if the root logger already has handlers - configured. It is a convenience method intended for use by simple scripts - to do one-shot configuration of the logging package. - - The default behaviour is to create a StreamHandler which writes to - sys.stderr, set a formatter using the BASIC_FORMAT format string, and - add the handler to the root logger. - - A number of optional keyword arguments may be specified, which can alter - the default behaviour. - - filename Specifies that a FileHandler be created, using the specified - filename, rather than a StreamHandler. - filemode Specifies the mode to open the file, if filename is specified - (if filemode is unspecified, it defaults to 'a'). - format Use the specified format string for the handler. - datefmt Use the specified date/time format. - level Set the root logger level to the specified level. - stream Use the specified stream to initialize the StreamHandler. Note - that this argument is incompatible with 'filename' - if both - are present, 'stream' is ignored. - - Note that you could specify a stream created using open(filename, mode) - rather than passing the filename and mode in. However, it should be - remembered that StreamHandler does not close its stream (since it may be - using sys.stdout or sys.stderr), whereas FileHandler closes its stream - when the handler is closed. - """ - if len(root.handlers) == 0: - filename = kwargs.get("filename") - if filename: - mode = kwargs.get("filemode", 'a') - hdlr = FileHandler(filename, mode) - else: - stream = kwargs.get("stream") - hdlr = StreamHandler(stream) - fs = kwargs.get("format", BASIC_FORMAT) - dfs = kwargs.get("datefmt", None) - fmt = Formatter(fs, dfs) - hdlr.setFormatter(fmt) - root.addHandler(hdlr) - level = kwargs.get("level") - if level: - root.setLevel(level) - -#--------------------------------------------------------------------------- -# Utility functions at module level. -# Basically delegate everything to the root logger. -#--------------------------------------------------------------------------- - -def getLogger(name=None): - """ - Return a logger with the specified name, creating it if necessary. - - If no name is specified, return the root logger. - """ - if name: - return Logger.manager.getLogger(name) - else: - return root - -#def getRootLogger(): -# """ -# Return the root logger. -# -# Note that getLogger('') now does the same thing, so this function is -# deprecated and may disappear in the future. -# """ -# return root - -def critical(msg, *args, **kwargs): - """ - Log a message with severity 'CRITICAL' on the root logger. - """ - if len(root.handlers) == 0: - basicConfig() - apply(root.critical, (msg,)+args, kwargs) - -fatal = critical - -def error(msg, *args, **kwargs): - """ - Log a message with severity 'ERROR' on the root logger. - """ - if len(root.handlers) == 0: - basicConfig() - apply(root.error, (msg,)+args, kwargs) - -def exception(msg, *args): - """ - Log a message with severity 'ERROR' on the root logger, - with exception information. - """ - apply(error, (msg,)+args, {'exc_info': 1}) - -def warning(msg, *args, **kwargs): - """ - Log a message with severity 'WARNING' on the root logger. - """ - if len(root.handlers) == 0: - basicConfig() - apply(root.warning, (msg,)+args, kwargs) - -warn = warning - -def info(msg, *args, **kwargs): - """ - Log a message with severity 'INFO' on the root logger. - """ - if len(root.handlers) == 0: - basicConfig() - apply(root.info, (msg,)+args, kwargs) - -def debug(msg, *args, **kwargs): - """ - Log a message with severity 'DEBUG' on the root logger. - """ - if len(root.handlers) == 0: - basicConfig() - apply(root.debug, (msg,)+args, kwargs) - -def log(level, msg, *args, **kwargs): - """ - Log 'msg % args' with the integer severity 'level' on the root logger. - """ - if len(root.handlers) == 0: - basicConfig() - apply(root.log, (level, msg)+args, kwargs) - -def disable(level): - """ - Disable all logging calls less severe than 'level'. - """ - root.manager.disable = level - -def shutdown(handlerList=_handlerList): - """ - Perform any cleanup actions in the logging system (e.g. flushing - buffers). - - Should be called at application exit. - """ - for h in handlerList[:]: - #errors might occur, for example, if files are locked - #we just ignore them if raiseExceptions is not set - try: - h.flush() - h.close() - except: - if raiseExceptions: - raise - #else, swallow - -#Let's try and shutdown automatically on application exit... -try: - import atexit - atexit.register(shutdown) -except ImportError: # for Python versions < 2.0 - def exithook(status, old_exit=sys.exit): - try: - shutdown() - finally: - old_exit(status) - - sys.exit = exithook diff --git a/sys/lib/python/logging/config.py b/sys/lib/python/logging/config.py deleted file mode 100644 index 11d2b7a79..000000000 --- a/sys/lib/python/logging/config.py +++ /dev/null @@ -1,348 +0,0 @@ -# Copyright 2001-2005 by Vinay Sajip. All Rights Reserved. -# -# Permission to use, copy, modify, and distribute this software and its -# documentation for any purpose and without fee is hereby granted, -# provided that the above copyright notice appear in all copies and that -# both that copyright notice and this permission notice appear in -# supporting documentation, and that the name of Vinay Sajip -# not be used in advertising or publicity pertaining to distribution -# of the software without specific, written prior permission. -# VINAY SAJIP DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, INCLUDING -# ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL -# VINAY SAJIP BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR -# ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER -# IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT -# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - -""" -Configuration functions for the logging package for Python. The core package -is based on PEP 282 and comments thereto in comp.lang.python, and influenced -by Apache's log4j system. - -Should work under Python versions >= 1.5.2, except that source line -information is not available unless 'sys._getframe()' is. - -Copyright (C) 2001-2004 Vinay Sajip. All Rights Reserved. - -To use, simply 'import logging' and log away! -""" - -import sys, logging, logging.handlers, string, socket, struct, os, traceback, types - -try: - import thread - import threading -except ImportError: - thread = None - -from SocketServer import ThreadingTCPServer, StreamRequestHandler - - -DEFAULT_LOGGING_CONFIG_PORT = 9030 - -if sys.platform == "win32": - RESET_ERROR = 10054 #WSAECONNRESET -else: - RESET_ERROR = 104 #ECONNRESET - -# -# The following code implements a socket listener for on-the-fly -# reconfiguration of logging. -# -# _listener holds the server object doing the listening -_listener = None - -def fileConfig(fname, defaults=None): - """ - Read the logging configuration from a ConfigParser-format file. - - This can be called several times from an application, allowing an end user - the ability to select from various pre-canned configurations (if the - developer provides a mechanism to present the choices and load the chosen - configuration). - In versions of ConfigParser which have the readfp method [typically - shipped in 2.x versions of Python], you can pass in a file-like object - rather than a filename, in which case the file-like object will be read - using readfp. - """ - import ConfigParser - - cp = ConfigParser.ConfigParser(defaults) - if hasattr(cp, 'readfp') and hasattr(fname, 'readline'): - cp.readfp(fname) - else: - cp.read(fname) - - formatters = _create_formatters(cp) - - # critical section - logging._acquireLock() - try: - logging._handlers.clear() - del logging._handlerList[:] - # Handlers add themselves to logging._handlers - handlers = _install_handlers(cp, formatters) - _install_loggers(cp, handlers) - finally: - logging._releaseLock() - - -def _resolve(name): - """Resolve a dotted name to a global object.""" - name = string.split(name, '.') - used = name.pop(0) - found = __import__(used) - for n in name: - used = used + '.' + n - try: - found = getattr(found, n) - except AttributeError: - __import__(used) - found = getattr(found, n) - return found - - -def _create_formatters(cp): - """Create and return formatters""" - flist = cp.get("formatters", "keys") - if not len(flist): - return {} - flist = string.split(flist, ",") - formatters = {} - for form in flist: - sectname = "formatter_%s" % string.strip(form) - opts = cp.options(sectname) - if "format" in opts: - fs = cp.get(sectname, "format", 1) - else: - fs = None - if "datefmt" in opts: - dfs = cp.get(sectname, "datefmt", 1) - else: - dfs = None - c = logging.Formatter - if "class" in opts: - class_name = cp.get(sectname, "class") - if class_name: - c = _resolve(class_name) - f = c(fs, dfs) - formatters[form] = f - return formatters - - -def _install_handlers(cp, formatters): - """Install and return handlers""" - hlist = cp.get("handlers", "keys") - if not len(hlist): - return {} - hlist = string.split(hlist, ",") - handlers = {} - fixups = [] #for inter-handler references - for hand in hlist: - sectname = "handler_%s" % string.strip(hand) - klass = cp.get(sectname, "class") - opts = cp.options(sectname) - if "formatter" in opts: - fmt = cp.get(sectname, "formatter") - else: - fmt = "" - klass = eval(klass, vars(logging)) - args = cp.get(sectname, "args") - args = eval(args, vars(logging)) - h = apply(klass, args) - if "level" in opts: - level = cp.get(sectname, "level") - h.setLevel(logging._levelNames[level]) - if len(fmt): - h.setFormatter(formatters[fmt]) - #temporary hack for FileHandler and MemoryHandler. - if klass == logging.handlers.MemoryHandler: - if "target" in opts: - target = cp.get(sectname,"target") - else: - target = "" - if len(target): #the target handler may not be loaded yet, so keep for later... - fixups.append((h, target)) - handlers[hand] = h - #now all handlers are loaded, fixup inter-handler references... - for h, t in fixups: - h.setTarget(handlers[t]) - return handlers - - -def _install_loggers(cp, handlers): - """Create and install loggers""" - - # configure the root first - llist = cp.get("loggers", "keys") - llist = string.split(llist, ",") - llist = map(lambda x: string.strip(x), llist) - llist.remove("root") - sectname = "logger_root" - root = logging.root - log = root - opts = cp.options(sectname) - if "level" in opts: - level = cp.get(sectname, "level") - log.setLevel(logging._levelNames[level]) - for h in root.handlers[:]: - root.removeHandler(h) - hlist = cp.get(sectname, "handlers") - if len(hlist): - hlist = string.split(hlist, ",") - for hand in hlist: - log.addHandler(handlers[string.strip(hand)]) - - #and now the others... - #we don't want to lose the existing loggers, - #since other threads may have pointers to them. - #existing is set to contain all existing loggers, - #and as we go through the new configuration we - #remove any which are configured. At the end, - #what's left in existing is the set of loggers - #which were in the previous configuration but - #which are not in the new configuration. - existing = root.manager.loggerDict.keys() - #now set up the new ones... - for log in llist: - sectname = "logger_%s" % log - qn = cp.get(sectname, "qualname") - opts = cp.options(sectname) - if "propagate" in opts: - propagate = cp.getint(sectname, "propagate") - else: - propagate = 1 - logger = logging.getLogger(qn) - if qn in existing: - existing.remove(qn) - if "level" in opts: - level = cp.get(sectname, "level") - logger.setLevel(logging._levelNames[level]) - for h in logger.handlers[:]: - logger.removeHandler(h) - logger.propagate = propagate - logger.disabled = 0 - hlist = cp.get(sectname, "handlers") - if len(hlist): - hlist = string.split(hlist, ",") - for hand in hlist: - logger.addHandler(handlers[string.strip(hand)]) - - #Disable any old loggers. There's no point deleting - #them as other threads may continue to hold references - #and by disabling them, you stop them doing any logging. - for log in existing: - root.manager.loggerDict[log].disabled = 1 - - -def listen(port=DEFAULT_LOGGING_CONFIG_PORT): - """ - Start up a socket server on the specified port, and listen for new - configurations. - - These will be sent as a file suitable for processing by fileConfig(). - Returns a Thread object on which you can call start() to start the server, - and which you can join() when appropriate. To stop the server, call - stopListening(). - """ - if not thread: - raise NotImplementedError, "listen() needs threading to work" - - class ConfigStreamHandler(StreamRequestHandler): - """ - Handler for a logging configuration request. - - It expects a completely new logging configuration and uses fileConfig - to install it. - """ - def handle(self): - """ - Handle a request. - - Each request is expected to be a 4-byte length, packed using - struct.pack(">L", n), followed by the config file. - Uses fileConfig() to do the grunt work. - """ - import tempfile - try: - conn = self.connection - chunk = conn.recv(4) - if len(chunk) == 4: - slen = struct.unpack(">L", chunk)[0] - chunk = self.connection.recv(slen) - while len(chunk) < slen: - chunk = chunk + conn.recv(slen - len(chunk)) - #Apply new configuration. We'd like to be able to - #create a StringIO and pass that in, but unfortunately - #1.5.2 ConfigParser does not support reading file - #objects, only actual files. So we create a temporary - #file and remove it later. - file = tempfile.mktemp(".ini") - f = open(file, "w") - f.write(chunk) - f.close() - try: - fileConfig(file) - except (KeyboardInterrupt, SystemExit): - raise - except: - traceback.print_exc() - os.remove(file) - except socket.error, e: - if type(e.args) != types.TupleType: - raise - else: - errcode = e.args[0] - if errcode != RESET_ERROR: - raise - - class ConfigSocketReceiver(ThreadingTCPServer): - """ - A simple TCP socket-based logging config receiver. - """ - - allow_reuse_address = 1 - - def __init__(self, host='localhost', port=DEFAULT_LOGGING_CONFIG_PORT, - handler=None): - ThreadingTCPServer.__init__(self, (host, port), handler) - logging._acquireLock() - self.abort = 0 - logging._releaseLock() - self.timeout = 1 - - def serve_until_stopped(self): - import select - abort = 0 - while not abort: - rd, wr, ex = select.select([self.socket.fileno()], - [], [], - self.timeout) - if rd: - self.handle_request() - logging._acquireLock() - abort = self.abort - logging._releaseLock() - - def serve(rcvr, hdlr, port): - server = rcvr(port=port, handler=hdlr) - global _listener - logging._acquireLock() - _listener = server - logging._releaseLock() - server.serve_until_stopped() - - return threading.Thread(target=serve, - args=(ConfigSocketReceiver, - ConfigStreamHandler, port)) - -def stopListening(): - """ - Stop the listening server which was created with a call to listen(). - """ - global _listener - if _listener: - logging._acquireLock() - _listener.abort = 1 - _listener = None - logging._releaseLock() diff --git a/sys/lib/python/logging/handlers.py b/sys/lib/python/logging/handlers.py deleted file mode 100644 index 4ef896ed5..000000000 --- a/sys/lib/python/logging/handlers.py +++ /dev/null @@ -1,1019 +0,0 @@ -# Copyright 2001-2005 by Vinay Sajip. All Rights Reserved. -# -# Permission to use, copy, modify, and distribute this software and its -# documentation for any purpose and without fee is hereby granted, -# provided that the above copyright notice appear in all copies and that -# both that copyright notice and this permission notice appear in -# supporting documentation, and that the name of Vinay Sajip -# not be used in advertising or publicity pertaining to distribution -# of the software without specific, written prior permission. -# VINAY SAJIP DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, INCLUDING -# ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL -# VINAY SAJIP BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR -# ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER -# IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT -# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - -""" -Additional handlers for the logging package for Python. The core package is -based on PEP 282 and comments thereto in comp.lang.python, and influenced by -Apache's log4j system. - -Should work under Python versions >= 1.5.2, except that source line -information is not available unless 'sys._getframe()' is. - -Copyright (C) 2001-2004 Vinay Sajip. All Rights Reserved. - -To use, simply 'import logging' and log away! -""" - -import sys, logging, socket, types, os, string, cPickle, struct, time, glob - -try: - import codecs -except ImportError: - codecs = None - -# -# Some constants... -# - -DEFAULT_TCP_LOGGING_PORT = 9020 -DEFAULT_UDP_LOGGING_PORT = 9021 -DEFAULT_HTTP_LOGGING_PORT = 9022 -DEFAULT_SOAP_LOGGING_PORT = 9023 -SYSLOG_UDP_PORT = 514 - -_MIDNIGHT = 24 * 60 * 60 # number of seconds in a day - -class BaseRotatingHandler(logging.FileHandler): - """ - Base class for handlers that rotate log files at a certain point. - Not meant to be instantiated directly. Instead, use RotatingFileHandler - or TimedRotatingFileHandler. - """ - def __init__(self, filename, mode, encoding=None): - """ - Use the specified filename for streamed logging - """ - if codecs is None: - encoding = None - logging.FileHandler.__init__(self, filename, mode, encoding) - self.mode = mode - self.encoding = encoding - - def emit(self, record): - """ - Emit a record. - - Output the record to the file, catering for rollover as described - in doRollover(). - """ - try: - if self.shouldRollover(record): - self.doRollover() - logging.FileHandler.emit(self, record) - except (KeyboardInterrupt, SystemExit): - raise - except: - self.handleError(record) - -class RotatingFileHandler(BaseRotatingHandler): - """ - Handler for logging to a set of files, which switches from one file - to the next when the current file reaches a certain size. - """ - def __init__(self, filename, mode='a', maxBytes=0, backupCount=0, encoding=None): - """ - Open the specified file and use it as the stream for logging. - - By default, the file grows indefinitely. You can specify particular - values of maxBytes and backupCount to allow the file to rollover at - a predetermined size. - - Rollover occurs whenever the current log file is nearly maxBytes in - length. If backupCount is >= 1, the system will successively create - new files with the same pathname as the base file, but with extensions - ".1", ".2" etc. appended to it. For example, with a backupCount of 5 - and a base file name of "app.log", you would get "app.log", - "app.log.1", "app.log.2", ... through to "app.log.5". The file being - written to is always "app.log" - when it gets filled up, it is closed - and renamed to "app.log.1", and if files "app.log.1", "app.log.2" etc. - exist, then they are renamed to "app.log.2", "app.log.3" etc. - respectively. - - If maxBytes is zero, rollover never occurs. - """ - if maxBytes > 0: - mode = 'a' # doesn't make sense otherwise! - BaseRotatingHandler.__init__(self, filename, mode, encoding) - self.maxBytes = maxBytes - self.backupCount = backupCount - - def doRollover(self): - """ - Do a rollover, as described in __init__(). - """ - - self.stream.close() - if self.backupCount > 0: - for i in range(self.backupCount - 1, 0, -1): - sfn = "%s.%d" % (self.baseFilename, i) - dfn = "%s.%d" % (self.baseFilename, i + 1) - if os.path.exists(sfn): - #print "%s -> %s" % (sfn, dfn) - if os.path.exists(dfn): - os.remove(dfn) - os.rename(sfn, dfn) - dfn = self.baseFilename + ".1" - if os.path.exists(dfn): - os.remove(dfn) - os.rename(self.baseFilename, dfn) - #print "%s -> %s" % (self.baseFilename, dfn) - if self.encoding: - self.stream = codecs.open(self.baseFilename, 'w', self.encoding) - else: - self.stream = open(self.baseFilename, 'w') - - def shouldRollover(self, record): - """ - Determine if rollover should occur. - - Basically, see if the supplied record would cause the file to exceed - the size limit we have. - """ - if self.maxBytes > 0: # are we rolling over? - msg = "%s\n" % self.format(record) - self.stream.seek(0, 2) #due to non-posix-compliant Windows feature - if self.stream.tell() + len(msg) >= self.maxBytes: - return 1 - return 0 - -class TimedRotatingFileHandler(BaseRotatingHandler): - """ - Handler for logging to a file, rotating the log file at certain timed - intervals. - - If backupCount is > 0, when rollover is done, no more than backupCount - files are kept - the oldest ones are deleted. - """ - def __init__(self, filename, when='h', interval=1, backupCount=0, encoding=None): - BaseRotatingHandler.__init__(self, filename, 'a', encoding) - self.when = string.upper(when) - self.backupCount = backupCount - # Calculate the real rollover interval, which is just the number of - # seconds between rollovers. Also set the filename suffix used when - # a rollover occurs. Current 'when' events supported: - # S - Seconds - # M - Minutes - # H - Hours - # D - Days - # midnight - roll over at midnight - # W{0-6} - roll over on a certain day; 0 - Monday - # - # Case of the 'when' specifier is not important; lower or upper case - # will work. - currentTime = int(time.time()) - if self.when == 'S': - self.interval = 1 # one second - self.suffix = "%Y-%m-%d_%H-%M-%S" - elif self.when == 'M': - self.interval = 60 # one minute - self.suffix = "%Y-%m-%d_%H-%M" - elif self.when == 'H': - self.interval = 60 * 60 # one hour - self.suffix = "%Y-%m-%d_%H" - elif self.when == 'D' or self.when == 'MIDNIGHT': - self.interval = 60 * 60 * 24 # one day - self.suffix = "%Y-%m-%d" - elif self.when.startswith('W'): - self.interval = 60 * 60 * 24 * 7 # one week - if len(self.when) != 2: - raise ValueError("You must specify a day for weekly rollover from 0 to 6 (0 is Monday): %s" % self.when) - if self.when[1] < '0' or self.when[1] > '6': - raise ValueError("Invalid day specified for weekly rollover: %s" % self.when) - self.dayOfWeek = int(self.when[1]) - self.suffix = "%Y-%m-%d" - else: - raise ValueError("Invalid rollover interval specified: %s" % self.when) - - self.interval = self.interval * interval # multiply by units requested - self.rolloverAt = currentTime + self.interval - - # If we are rolling over at midnight or weekly, then the interval is already known. - # What we need to figure out is WHEN the next interval is. In other words, - # if you are rolling over at midnight, then your base interval is 1 day, - # but you want to start that one day clock at midnight, not now. So, we - # have to fudge the rolloverAt value in order to trigger the first rollover - # at the right time. After that, the regular interval will take care of - # the rest. Note that this code doesn't care about leap seconds. :) - if self.when == 'MIDNIGHT' or self.when.startswith('W'): - # This could be done with less code, but I wanted it to be clear - t = time.localtime(currentTime) - currentHour = t[3] - currentMinute = t[4] - currentSecond = t[5] - # r is the number of seconds left between now and midnight - r = _MIDNIGHT - ((currentHour * 60 + currentMinute) * 60 + - currentSecond) - self.rolloverAt = currentTime + r - # If we are rolling over on a certain day, add in the number of days until - # the next rollover, but offset by 1 since we just calculated the time - # until the next day starts. There are three cases: - # Case 1) The day to rollover is today; in this case, do nothing - # Case 2) The day to rollover is further in the interval (i.e., today is - # day 2 (Wednesday) and rollover is on day 6 (Sunday). Days to - # next rollover is simply 6 - 2 - 1, or 3. - # Case 3) The day to rollover is behind us in the interval (i.e., today - # is day 5 (Saturday) and rollover is on day 3 (Thursday). - # Days to rollover is 6 - 5 + 3, or 4. In this case, it's the - # number of days left in the current week (1) plus the number - # of days in the next week until the rollover day (3). - if when.startswith('W'): - day = t[6] # 0 is Monday - if day > self.dayOfWeek: - daysToWait = (day - self.dayOfWeek) - 1 - self.rolloverAt = self.rolloverAt + (daysToWait * (60 * 60 * 24)) - if day < self.dayOfWeek: - daysToWait = (6 - self.dayOfWeek) + day - self.rolloverAt = self.rolloverAt + (daysToWait * (60 * 60 * 24)) - - #print "Will rollover at %d, %d seconds from now" % (self.rolloverAt, self.rolloverAt - currentTime) - - def shouldRollover(self, record): - """ - Determine if rollover should occur - - record is not used, as we are just comparing times, but it is needed so - the method siguratures are the same - """ - t = int(time.time()) - if t >= self.rolloverAt: - return 1 - #print "No need to rollover: %d, %d" % (t, self.rolloverAt) - return 0 - - def doRollover(self): - """ - do a rollover; in this case, a date/time stamp is appended to the filename - when the rollover happens. However, you want the file to be named for the - start of the interval, not the current time. If there is a backup count, - then we have to get a list of matching filenames, sort them and remove - the one with the oldest suffix. - """ - self.stream.close() - # get the time that this sequence started at and make it a TimeTuple - t = self.rolloverAt - self.interval - timeTuple = time.localtime(t) - dfn = self.baseFilename + "." + time.strftime(self.suffix, timeTuple) - if os.path.exists(dfn): - os.remove(dfn) - os.rename(self.baseFilename, dfn) - if self.backupCount > 0: - # find the oldest log file and delete it - s = glob.glob(self.baseFilename + ".20*") - if len(s) > self.backupCount: - s.sort() - os.remove(s[0]) - #print "%s -> %s" % (self.baseFilename, dfn) - if self.encoding: - self.stream = codecs.open(self.baseFilename, 'w', self.encoding) - else: - self.stream = open(self.baseFilename, 'w') - self.rolloverAt = self.rolloverAt + self.interval - -class SocketHandler(logging.Handler): - """ - A handler class which writes logging records, in pickle format, to - a streaming socket. The socket is kept open across logging calls. - If the peer resets it, an attempt is made to reconnect on the next call. - The pickle which is sent is that of the LogRecord's attribute dictionary - (__dict__), so that the receiver does not need to have the logging module - installed in order to process the logging event. - - To unpickle the record at the receiving end into a LogRecord, use the - makeLogRecord function. - """ - - def __init__(self, host, port): - """ - Initializes the handler with a specific host address and port. - - The attribute 'closeOnError' is set to 1 - which means that if - a socket error occurs, the socket is silently closed and then - reopened on the next logging call. - """ - logging.Handler.__init__(self) - self.host = host - self.port = port - self.sock = None - self.closeOnError = 0 - self.retryTime = None - # - # Exponential backoff parameters. - # - self.retryStart = 1.0 - self.retryMax = 30.0 - self.retryFactor = 2.0 - - def makeSocket(self): - """ - A factory method which allows subclasses to define the precise - type of socket they want. - """ - s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) - s.connect((self.host, self.port)) - return s - - def createSocket(self): - """ - Try to create a socket, using an exponential backoff with - a max retry time. Thanks to Robert Olson for the original patch - (SF #815911) which has been slightly refactored. - """ - now = time.time() - # Either retryTime is None, in which case this - # is the first time back after a disconnect, or - # we've waited long enough. - if self.retryTime is None: - attempt = 1 - else: - attempt = (now >= self.retryTime) - if attempt: - try: - self.sock = self.makeSocket() - self.retryTime = None # next time, no delay before trying - except: - #Creation failed, so set the retry time and return. - if self.retryTime is None: - self.retryPeriod = self.retryStart - else: - self.retryPeriod = self.retryPeriod * self.retryFactor - if self.retryPeriod > self.retryMax: - self.retryPeriod = self.retryMax - self.retryTime = now + self.retryPeriod - - def send(self, s): - """ - Send a pickled string to the socket. - - This function allows for partial sends which can happen when the - network is busy. - """ - if self.sock is None: - self.createSocket() - #self.sock can be None either because we haven't reached the retry - #time yet, or because we have reached the retry time and retried, - #but are still unable to connect. - if self.sock: - try: - if hasattr(self.sock, "sendall"): - self.sock.sendall(s) - else: - sentsofar = 0 - left = len(s) - while left > 0: - sent = self.sock.send(s[sentsofar:]) - sentsofar = sentsofar + sent - left = left - sent - except socket.error: - self.sock.close() - self.sock = None # so we can call createSocket next time - - def makePickle(self, record): - """ - Pickles the record in binary format with a length prefix, and - returns it ready for transmission across the socket. - """ - ei = record.exc_info - if ei: - dummy = self.format(record) # just to get traceback text into record.exc_text - record.exc_info = None # to avoid Unpickleable error - s = cPickle.dumps(record.__dict__, 1) - if ei: - record.exc_info = ei # for next handler - slen = struct.pack(">L", len(s)) - return slen + s - - def handleError(self, record): - """ - Handle an error during logging. - - An error has occurred during logging. Most likely cause - - connection lost. Close the socket so that we can retry on the - next event. - """ - if self.closeOnError and self.sock: - self.sock.close() - self.sock = None #try to reconnect next time - else: - logging.Handler.handleError(self, record) - - def emit(self, record): - """ - Emit a record. - - Pickles the record and writes it to the socket in binary format. - If there is an error with the socket, silently drop the packet. - If there was a problem with the socket, re-establishes the - socket. - """ - try: - s = self.makePickle(record) - self.send(s) - except (KeyboardInterrupt, SystemExit): - raise - except: - self.handleError(record) - - def close(self): - """ - Closes the socket. - """ - if self.sock: - self.sock.close() - self.sock = None - logging.Handler.close(self) - -class DatagramHandler(SocketHandler): - """ - A handler class which writes logging records, in pickle format, to - a datagram socket. The pickle which is sent is that of the LogRecord's - attribute dictionary (__dict__), so that the receiver does not need to - have the logging module installed in order to process the logging event. - - To unpickle the record at the receiving end into a LogRecord, use the - makeLogRecord function. - - """ - def __init__(self, host, port): - """ - Initializes the handler with a specific host address and port. - """ - SocketHandler.__init__(self, host, port) - self.closeOnError = 0 - - def makeSocket(self): - """ - The factory method of SocketHandler is here overridden to create - a UDP socket (SOCK_DGRAM). - """ - s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) - return s - - def send(self, s): - """ - Send a pickled string to a socket. - - This function no longer allows for partial sends which can happen - when the network is busy - UDP does not guarantee delivery and - can deliver packets out of sequence. - """ - if self.sock is None: - self.createSocket() - self.sock.sendto(s, (self.host, self.port)) - -class SysLogHandler(logging.Handler): - """ - A handler class which sends formatted logging records to a syslog - server. Based on Sam Rushing's syslog module: - http://www.nightmare.com/squirl/python-ext/misc/syslog.py - Contributed by Nicolas Untz (after which minor refactoring changes - have been made). - """ - - # from <linux/sys/syslog.h>: - # ====================================================================== - # priorities/facilities are encoded into a single 32-bit quantity, where - # the bottom 3 bits are the priority (0-7) and the top 28 bits are the - # facility (0-big number). Both the priorities and the facilities map - # roughly one-to-one to strings in the syslogd(8) source code. This - # mapping is included in this file. - # - # priorities (these are ordered) - - LOG_EMERG = 0 # system is unusable - LOG_ALERT = 1 # action must be taken immediately - LOG_CRIT = 2 # critical conditions - LOG_ERR = 3 # error conditions - LOG_WARNING = 4 # warning conditions - LOG_NOTICE = 5 # normal but significant condition - LOG_INFO = 6 # informational - LOG_DEBUG = 7 # debug-level messages - - # facility codes - LOG_KERN = 0 # kernel messages - LOG_USER = 1 # random user-level messages - LOG_MAIL = 2 # mail system - LOG_DAEMON = 3 # system daemons - LOG_AUTH = 4 # security/authorization messages - LOG_SYSLOG = 5 # messages generated internally by syslogd - LOG_LPR = 6 # line printer subsystem - LOG_NEWS = 7 # network news subsystem - LOG_UUCP = 8 # UUCP subsystem - LOG_CRON = 9 # clock daemon - LOG_AUTHPRIV = 10 # security/authorization messages (private) - - # other codes through 15 reserved for system use - LOG_LOCAL0 = 16 # reserved for local use - LOG_LOCAL1 = 17 # reserved for local use - LOG_LOCAL2 = 18 # reserved for local use - LOG_LOCAL3 = 19 # reserved for local use - LOG_LOCAL4 = 20 # reserved for local use - LOG_LOCAL5 = 21 # reserved for local use - LOG_LOCAL6 = 22 # reserved for local use - LOG_LOCAL7 = 23 # reserved for local use - - priority_names = { - "alert": LOG_ALERT, - "crit": LOG_CRIT, - "critical": LOG_CRIT, - "debug": LOG_DEBUG, - "emerg": LOG_EMERG, - "err": LOG_ERR, - "error": LOG_ERR, # DEPRECATED - "info": LOG_INFO, - "notice": LOG_NOTICE, - "panic": LOG_EMERG, # DEPRECATED - "warn": LOG_WARNING, # DEPRECATED - "warning": LOG_WARNING, - } - - facility_names = { - "auth": LOG_AUTH, - "authpriv": LOG_AUTHPRIV, - "cron": LOG_CRON, - "daemon": LOG_DAEMON, - "kern": LOG_KERN, - "lpr": LOG_LPR, - "mail": LOG_MAIL, - "news": LOG_NEWS, - "security": LOG_AUTH, # DEPRECATED - "syslog": LOG_SYSLOG, - "user": LOG_USER, - "uucp": LOG_UUCP, - "local0": LOG_LOCAL0, - "local1": LOG_LOCAL1, - "local2": LOG_LOCAL2, - "local3": LOG_LOCAL3, - "local4": LOG_LOCAL4, - "local5": LOG_LOCAL5, - "local6": LOG_LOCAL6, - "local7": LOG_LOCAL7, - } - - #The map below appears to be trivially lowercasing the key. However, - #there's more to it than meets the eye - in some locales, lowercasing - #gives unexpected results. See SF #1524081: in the Turkish locale, - #"INFO".lower() != "info" - priority_map = { - "DEBUG" : "debug", - "INFO" : "info", - "WARNING" : "warning", - "ERROR" : "error", - "CRITICAL" : "critical" - } - - def __init__(self, address=('localhost', SYSLOG_UDP_PORT), facility=LOG_USER): - """ - Initialize a handler. - - If address is specified as a string, UNIX socket is used. - If facility is not specified, LOG_USER is used. - """ - logging.Handler.__init__(self) - - self.address = address - self.facility = facility - if type(address) == types.StringType: - self.unixsocket = 1 - self._connect_unixsocket(address) - else: - self.unixsocket = 0 - self.socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) - - self.formatter = None - - def _connect_unixsocket(self, address): - self.socket = socket.socket(socket.AF_UNIX, socket.SOCK_DGRAM) - # syslog may require either DGRAM or STREAM sockets - try: - self.socket.connect(address) - except socket.error: - self.socket.close() - self.socket = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) - self.socket.connect(address) - - # curious: when talking to the unix-domain '/dev/log' socket, a - # zero-terminator seems to be required. this string is placed - # into a class variable so that it can be overridden if - # necessary. - log_format_string = '<%d>%s\000' - - def encodePriority(self, facility, priority): - """ - Encode the facility and priority. You can pass in strings or - integers - if strings are passed, the facility_names and - priority_names mapping dictionaries are used to convert them to - integers. - """ - if type(facility) == types.StringType: - facility = self.facility_names[facility] - if type(priority) == types.StringType: - priority = self.priority_names[priority] - return (facility << 3) | priority - - def close (self): - """ - Closes the socket. - """ - if self.unixsocket: - self.socket.close() - logging.Handler.close(self) - - def mapPriority(self, levelName): - """ - Map a logging level name to a key in the priority_names map. - This is useful in two scenarios: when custom levels are being - used, and in the case where you can't do a straightforward - mapping by lowercasing the logging level name because of locale- - specific issues (see SF #1524081). - """ - return self.priority_map.get(levelName, "warning") - - def emit(self, record): - """ - Emit a record. - - The record is formatted, and then sent to the syslog server. If - exception information is present, it is NOT sent to the server. - """ - msg = self.format(record) - """ - We need to convert record level to lowercase, maybe this will - change in the future. - """ - msg = self.log_format_string % ( - self.encodePriority(self.facility, - self.mapPriority(record.levelname)), - msg) - try: - if self.unixsocket: - try: - self.socket.send(msg) - except socket.error: - self._connect_unixsocket(self.address) - self.socket.send(msg) - else: - self.socket.sendto(msg, self.address) - except (KeyboardInterrupt, SystemExit): - raise - except: - self.handleError(record) - -class SMTPHandler(logging.Handler): - """ - A handler class which sends an SMTP email for each logging event. - """ - def __init__(self, mailhost, fromaddr, toaddrs, subject): - """ - Initialize the handler. - - Initialize the instance with the from and to addresses and subject - line of the email. To specify a non-standard SMTP port, use the - (host, port) tuple format for the mailhost argument. - """ - logging.Handler.__init__(self) - if type(mailhost) == types.TupleType: - host, port = mailhost - self.mailhost = host - self.mailport = port - else: - self.mailhost = mailhost - self.mailport = None - self.fromaddr = fromaddr - if type(toaddrs) == types.StringType: - toaddrs = [toaddrs] - self.toaddrs = toaddrs - self.subject = subject - - def getSubject(self, record): - """ - Determine the subject for the email. - - If you want to specify a subject line which is record-dependent, - override this method. - """ - return self.subject - - weekdayname = ['Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun'] - - monthname = [None, - 'Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', - 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec'] - - def date_time(self): - """ - Return the current date and time formatted for a MIME header. - Needed for Python 1.5.2 (no email package available) - """ - year, month, day, hh, mm, ss, wd, y, z = time.gmtime(time.time()) - s = "%s, %02d %3s %4d %02d:%02d:%02d GMT" % ( - self.weekdayname[wd], - day, self.monthname[month], year, - hh, mm, ss) - return s - - def emit(self, record): - """ - Emit a record. - - Format the record and send it to the specified addressees. - """ - try: - import smtplib - try: - from email.Utils import formatdate - except: - formatdate = self.date_time - port = self.mailport - if not port: - port = smtplib.SMTP_PORT - smtp = smtplib.SMTP(self.mailhost, port) - msg = self.format(record) - msg = "From: %s\r\nTo: %s\r\nSubject: %s\r\nDate: %s\r\n\r\n%s" % ( - self.fromaddr, - string.join(self.toaddrs, ","), - self.getSubject(record), - formatdate(), msg) - smtp.sendmail(self.fromaddr, self.toaddrs, msg) - smtp.quit() - except (KeyboardInterrupt, SystemExit): - raise - except: - self.handleError(record) - -class NTEventLogHandler(logging.Handler): - """ - A handler class which sends events to the NT Event Log. Adds a - registry entry for the specified application name. If no dllname is - provided, win32service.pyd (which contains some basic message - placeholders) is used. Note that use of these placeholders will make - your event logs big, as the entire message source is held in the log. - If you want slimmer logs, you have to pass in the name of your own DLL - which contains the message definitions you want to use in the event log. - """ - def __init__(self, appname, dllname=None, logtype="Application"): - logging.Handler.__init__(self) - try: - import win32evtlogutil, win32evtlog - self.appname = appname - self._welu = win32evtlogutil - if not dllname: - dllname = os.path.split(self._welu.__file__) - dllname = os.path.split(dllname[0]) - dllname = os.path.join(dllname[0], r'win32service.pyd') - self.dllname = dllname - self.logtype = logtype - self._welu.AddSourceToRegistry(appname, dllname, logtype) - self.deftype = win32evtlog.EVENTLOG_ERROR_TYPE - self.typemap = { - logging.DEBUG : win32evtlog.EVENTLOG_INFORMATION_TYPE, - logging.INFO : win32evtlog.EVENTLOG_INFORMATION_TYPE, - logging.WARNING : win32evtlog.EVENTLOG_WARNING_TYPE, - logging.ERROR : win32evtlog.EVENTLOG_ERROR_TYPE, - logging.CRITICAL: win32evtlog.EVENTLOG_ERROR_TYPE, - } - except ImportError: - print "The Python Win32 extensions for NT (service, event "\ - "logging) appear not to be available." - self._welu = None - - def getMessageID(self, record): - """ - Return the message ID for the event record. If you are using your - own messages, you could do this by having the msg passed to the - logger being an ID rather than a formatting string. Then, in here, - you could use a dictionary lookup to get the message ID. This - version returns 1, which is the base message ID in win32service.pyd. - """ - return 1 - - def getEventCategory(self, record): - """ - Return the event category for the record. - - Override this if you want to specify your own categories. This version - returns 0. - """ - return 0 - - def getEventType(self, record): - """ - Return the event type for the record. - - Override this if you want to specify your own types. This version does - a mapping using the handler's typemap attribute, which is set up in - __init__() to a dictionary which contains mappings for DEBUG, INFO, - WARNING, ERROR and CRITICAL. If you are using your own levels you will - either need to override this method or place a suitable dictionary in - the handler's typemap attribute. - """ - return self.typemap.get(record.levelno, self.deftype) - - def emit(self, record): - """ - Emit a record. - - Determine the message ID, event category and event type. Then - log the message in the NT event log. - """ - if self._welu: - try: - id = self.getMessageID(record) - cat = self.getEventCategory(record) - type = self.getEventType(record) - msg = self.format(record) - self._welu.ReportEvent(self.appname, id, cat, type, [msg]) - except (KeyboardInterrupt, SystemExit): - raise - except: - self.handleError(record) - - def close(self): - """ - Clean up this handler. - - You can remove the application name from the registry as a - source of event log entries. However, if you do this, you will - not be able to see the events as you intended in the Event Log - Viewer - it needs to be able to access the registry to get the - DLL name. - """ - #self._welu.RemoveSourceFromRegistry(self.appname, self.logtype) - logging.Handler.close(self) - -class HTTPHandler(logging.Handler): - """ - A class which sends records to a Web server, using either GET or - POST semantics. - """ - def __init__(self, host, url, method="GET"): - """ - Initialize the instance with the host, the request URL, and the method - ("GET" or "POST") - """ - logging.Handler.__init__(self) - method = string.upper(method) - if method not in ["GET", "POST"]: - raise ValueError, "method must be GET or POST" - self.host = host - self.url = url - self.method = method - - def mapLogRecord(self, record): - """ - Default implementation of mapping the log record into a dict - that is sent as the CGI data. Overwrite in your class. - Contributed by Franz Glasner. - """ - return record.__dict__ - - def emit(self, record): - """ - Emit a record. - - Send the record to the Web server as an URL-encoded dictionary - """ - try: - import httplib, urllib - host = self.host - h = httplib.HTTP(host) - url = self.url - data = urllib.urlencode(self.mapLogRecord(record)) - if self.method == "GET": - if (string.find(url, '?') >= 0): - sep = '&' - else: - sep = '?' - url = url + "%c%s" % (sep, data) - h.putrequest(self.method, url) - # support multiple hosts on one IP address... - # need to strip optional :port from host, if present - i = string.find(host, ":") - if i >= 0: - host = host[:i] - h.putheader("Host", host) - if self.method == "POST": - h.putheader("Content-type", - "application/x-www-form-urlencoded") - h.putheader("Content-length", str(len(data))) - h.endheaders() - if self.method == "POST": - h.send(data) - h.getreply() #can't do anything with the result - except (KeyboardInterrupt, SystemExit): - raise - except: - self.handleError(record) - -class BufferingHandler(logging.Handler): - """ - A handler class which buffers logging records in memory. Whenever each - record is added to the buffer, a check is made to see if the buffer should - be flushed. If it should, then flush() is expected to do what's needed. - """ - def __init__(self, capacity): - """ - Initialize the handler with the buffer size. - """ - logging.Handler.__init__(self) - self.capacity = capacity - self.buffer = [] - - def shouldFlush(self, record): - """ - Should the handler flush its buffer? - - Returns true if the buffer is up to capacity. This method can be - overridden to implement custom flushing strategies. - """ - return (len(self.buffer) >= self.capacity) - - def emit(self, record): - """ - Emit a record. - - Append the record. If shouldFlush() tells us to, call flush() to process - the buffer. - """ - self.buffer.append(record) - if self.shouldFlush(record): - self.flush() - - def flush(self): - """ - Override to implement custom flushing behaviour. - - This version just zaps the buffer to empty. - """ - self.buffer = [] - - def close(self): - """ - Close the handler. - - This version just flushes and chains to the parent class' close(). - """ - self.flush() - logging.Handler.close(self) - -class MemoryHandler(BufferingHandler): - """ - A handler class which buffers logging records in memory, periodically - flushing them to a target handler. Flushing occurs whenever the buffer - is full, or when an event of a certain severity or greater is seen. - """ - def __init__(self, capacity, flushLevel=logging.ERROR, target=None): - """ - Initialize the handler with the buffer size, the level at which - flushing should occur and an optional target. - - Note that without a target being set either here or via setTarget(), - a MemoryHandler is no use to anyone! - """ - BufferingHandler.__init__(self, capacity) - self.flushLevel = flushLevel - self.target = target - - def shouldFlush(self, record): - """ - Check for buffer full or a record at the flushLevel or higher. - """ - return (len(self.buffer) >= self.capacity) or \ - (record.levelno >= self.flushLevel) - - def setTarget(self, target): - """ - Set the target handler for this handler. - """ - self.target = target - - def flush(self): - """ - For a MemoryHandler, flushing means just sending the buffered - records to the target, if there is one. Override if you want - different behaviour. - """ - if self.target: - for record in self.buffer: - self.target.handle(record) - self.buffer = [] - - def close(self): - """ - Flush, set the target to None and lose the buffer. - """ - self.flush() - self.target = None - BufferingHandler.close(self) |