summaryrefslogtreecommitdiff
path: root/sys/lib/python/email
diff options
context:
space:
mode:
authorOri Bernstein <ori@eigenstate.org>2021-06-14 00:00:37 +0000
committerOri Bernstein <ori@eigenstate.org>2021-06-14 00:00:37 +0000
commita73a964e51247ed169d322c725a3a18859f109a3 (patch)
tree3f752d117274d444bda44e85609aeac1acf313f3 /sys/lib/python/email
parente64efe273fcb921a61bf27d33b230c4e64fcd425 (diff)
python, hg: tow outside the environment.
they've served us well, and can ride off into the sunset.
Diffstat (limited to 'sys/lib/python/email')
-rw-r--r--sys/lib/python/email/__init__.py123
-rw-r--r--sys/lib/python/email/_parseaddr.py480
-rw-r--r--sys/lib/python/email/base64mime.py184
-rw-r--r--sys/lib/python/email/charset.py388
-rw-r--r--sys/lib/python/email/encoders.py88
-rw-r--r--sys/lib/python/email/errors.py57
-rw-r--r--sys/lib/python/email/feedparser.py480
-rw-r--r--sys/lib/python/email/generator.py348
-rw-r--r--sys/lib/python/email/header.py503
-rw-r--r--sys/lib/python/email/iterators.py73
-rw-r--r--sys/lib/python/email/message.py786
-rw-r--r--sys/lib/python/email/mime/__init__.py0
-rw-r--r--sys/lib/python/email/mime/application.py36
-rw-r--r--sys/lib/python/email/mime/audio.py73
-rw-r--r--sys/lib/python/email/mime/base.py26
-rw-r--r--sys/lib/python/email/mime/image.py46
-rw-r--r--sys/lib/python/email/mime/message.py34
-rw-r--r--sys/lib/python/email/mime/multipart.py41
-rw-r--r--sys/lib/python/email/mime/nonmultipart.py26
-rw-r--r--sys/lib/python/email/mime/text.py30
-rw-r--r--sys/lib/python/email/parser.py91
-rw-r--r--sys/lib/python/email/quoprimime.py336
-rw-r--r--sys/lib/python/email/utils.py323
23 files changed, 0 insertions, 4572 deletions
diff --git a/sys/lib/python/email/__init__.py b/sys/lib/python/email/__init__.py
deleted file mode 100644
index 8d230fdeb..000000000
--- a/sys/lib/python/email/__init__.py
+++ /dev/null
@@ -1,123 +0,0 @@
-# Copyright (C) 2001-2006 Python Software Foundation
-# Author: Barry Warsaw
-# Contact: email-sig@python.org
-
-"""A package for parsing, handling, and generating email messages."""
-
-__version__ = '4.0.1'
-
-__all__ = [
- # Old names
- 'base64MIME',
- 'Charset',
- 'Encoders',
- 'Errors',
- 'Generator',
- 'Header',
- 'Iterators',
- 'Message',
- 'MIMEAudio',
- 'MIMEBase',
- 'MIMEImage',
- 'MIMEMessage',
- 'MIMEMultipart',
- 'MIMENonMultipart',
- 'MIMEText',
- 'Parser',
- 'quopriMIME',
- 'Utils',
- 'message_from_string',
- 'message_from_file',
- # new names
- 'base64mime',
- 'charset',
- 'encoders',
- 'errors',
- 'generator',
- 'header',
- 'iterators',
- 'message',
- 'mime',
- 'parser',
- 'quoprimime',
- 'utils',
- ]
-
-
-
-# Some convenience routines. Don't import Parser and Message as side-effects
-# of importing email since those cascadingly import most of the rest of the
-# email package.
-def message_from_string(s, *args, **kws):
- """Parse a string into a Message object model.
-
- Optional _class and strict are passed to the Parser constructor.
- """
- from email.parser import Parser
- return Parser(*args, **kws).parsestr(s)
-
-
-def message_from_file(fp, *args, **kws):
- """Read a file and parse its contents into a Message object model.
-
- Optional _class and strict are passed to the Parser constructor.
- """
- from email.parser import Parser
- return Parser(*args, **kws).parse(fp)
-
-
-
-# Lazy loading to provide name mapping from new-style names (PEP 8 compatible
-# email 4.0 module names), to old-style names (email 3.0 module names).
-import sys
-
-class LazyImporter(object):
- def __init__(self, module_name):
- self.__name__ = 'email.' + module_name
-
- def __getattr__(self, name):
- __import__(self.__name__)
- mod = sys.modules[self.__name__]
- self.__dict__.update(mod.__dict__)
- return getattr(mod, name)
-
-
-_LOWERNAMES = [
- # email.<old name> -> email.<new name is lowercased old name>
- 'Charset',
- 'Encoders',
- 'Errors',
- 'FeedParser',
- 'Generator',
- 'Header',
- 'Iterators',
- 'Message',
- 'Parser',
- 'Utils',
- 'base64MIME',
- 'quopriMIME',
- ]
-
-_MIMENAMES = [
- # email.MIME<old name> -> email.mime.<new name is lowercased old name>
- 'Audio',
- 'Base',
- 'Image',
- 'Message',
- 'Multipart',
- 'NonMultipart',
- 'Text',
- ]
-
-for _name in _LOWERNAMES:
- importer = LazyImporter(_name.lower())
- sys.modules['email.' + _name] = importer
- setattr(sys.modules['email'], _name, importer)
-
-
-import email.mime
-for _name in _MIMENAMES:
- importer = LazyImporter('mime.' + _name.lower())
- sys.modules['email.MIME' + _name] = importer
- setattr(sys.modules['email'], 'MIME' + _name, importer)
- setattr(sys.modules['email.mime'], _name, importer)
diff --git a/sys/lib/python/email/_parseaddr.py b/sys/lib/python/email/_parseaddr.py
deleted file mode 100644
index 791d8928e..000000000
--- a/sys/lib/python/email/_parseaddr.py
+++ /dev/null
@@ -1,480 +0,0 @@
-# Copyright (C) 2002-2007 Python Software Foundation
-# Contact: email-sig@python.org
-
-"""Email address parsing code.
-
-Lifted directly from rfc822.py. This should eventually be rewritten.
-"""
-
-__all__ = [
- 'mktime_tz',
- 'parsedate',
- 'parsedate_tz',
- 'quote',
- ]
-
-import time
-
-SPACE = ' '
-EMPTYSTRING = ''
-COMMASPACE = ', '
-
-# Parse a date field
-_monthnames = ['jan', 'feb', 'mar', 'apr', 'may', 'jun', 'jul',
- 'aug', 'sep', 'oct', 'nov', 'dec',
- 'january', 'february', 'march', 'april', 'may', 'june', 'july',
- 'august', 'september', 'october', 'november', 'december']
-
-_daynames = ['mon', 'tue', 'wed', 'thu', 'fri', 'sat', 'sun']
-
-# The timezone table does not include the military time zones defined
-# in RFC822, other than Z. According to RFC1123, the description in
-# RFC822 gets the signs wrong, so we can't rely on any such time
-# zones. RFC1123 recommends that numeric timezone indicators be used
-# instead of timezone names.
-
-_timezones = {'UT':0, 'UTC':0, 'GMT':0, 'Z':0,
- 'AST': -400, 'ADT': -300, # Atlantic (used in Canada)
- 'EST': -500, 'EDT': -400, # Eastern
- 'CST': -600, 'CDT': -500, # Central
- 'MST': -700, 'MDT': -600, # Mountain
- 'PST': -800, 'PDT': -700 # Pacific
- }
-
-
-def parsedate_tz(data):
- """Convert a date string to a time tuple.
-
- Accounts for military timezones.
- """
- data = data.split()
- # The FWS after the comma after the day-of-week is optional, so search and
- # adjust for this.
- if data[0].endswith(',') or data[0].lower() in _daynames:
- # There's a dayname here. Skip it
- del data[0]
- else:
- i = data[0].rfind(',')
- if i >= 0:
- data[0] = data[0][i+1:]
- if len(data) == 3: # RFC 850 date, deprecated
- stuff = data[0].split('-')
- if len(stuff) == 3:
- data = stuff + data[1:]
- if len(data) == 4:
- s = data[3]
- i = s.find('+')
- if i > 0:
- data[3:] = [s[:i], s[i+1:]]
- else:
- data.append('') # Dummy tz
- if len(data) < 5:
- return None
- data = data[:5]
- [dd, mm, yy, tm, tz] = data
- mm = mm.lower()
- if mm not in _monthnames:
- dd, mm = mm, dd.lower()
- if mm not in _monthnames:
- return None
- mm = _monthnames.index(mm) + 1
- if mm > 12:
- mm -= 12
- if dd[-1] == ',':
- dd = dd[:-1]
- i = yy.find(':')
- if i > 0:
- yy, tm = tm, yy
- if yy[-1] == ',':
- yy = yy[:-1]
- if not yy[0].isdigit():
- yy, tz = tz, yy
- if tm[-1] == ',':
- tm = tm[:-1]
- tm = tm.split(':')
- if len(tm) == 2:
- [thh, tmm] = tm
- tss = '0'
- elif len(tm) == 3:
- [thh, tmm, tss] = tm
- else:
- return None
- try:
- yy = int(yy)
- dd = int(dd)
- thh = int(thh)
- tmm = int(tmm)
- tss = int(tss)
- except ValueError:
- return None
- tzoffset = None
- tz = tz.upper()
- if _timezones.has_key(tz):
- tzoffset = _timezones[tz]
- else:
- try:
- tzoffset = int(tz)
- except ValueError:
- pass
- # Convert a timezone offset into seconds ; -0500 -> -18000
- if tzoffset:
- if tzoffset < 0:
- tzsign = -1
- tzoffset = -tzoffset
- else:
- tzsign = 1
- tzoffset = tzsign * ( (tzoffset//100)*3600 + (tzoffset % 100)*60)
- # Daylight Saving Time flag is set to -1, since DST is unknown.
- return yy, mm, dd, thh, tmm, tss, 0, 1, -1, tzoffset
-
-
-def parsedate(data):
- """Convert a time string to a time tuple."""
- t = parsedate_tz(data)
- if isinstance(t, tuple):
- return t[:9]
- else:
- return t
-
-
-def mktime_tz(data):
- """Turn a 10-tuple as returned by parsedate_tz() into a UTC timestamp."""
- if data[9] is None:
- # No zone info, so localtime is better assumption than GMT
- return time.mktime(data[:8] + (-1,))
- else:
- t = time.mktime(data[:8] + (0,))
- return t - data[9] - time.timezone
-
-
-def quote(str):
- """Add quotes around a string."""
- return str.replace('\\', '\\\\').replace('"', '\\"')
-
-
-class AddrlistClass:
- """Address parser class by Ben Escoto.
-
- To understand what this class does, it helps to have a copy of RFC 2822 in
- front of you.
-
- Note: this class interface is deprecated and may be removed in the future.
- Use rfc822.AddressList instead.
- """
-
- def __init__(self, field):
- """Initialize a new instance.
-
- `field' is an unparsed address header field, containing
- one or more addresses.
- """
- self.specials = '()<>@,:;.\"[]'
- self.pos = 0
- self.LWS = ' \t'
- self.CR = '\r\n'
- self.FWS = self.LWS + self.CR
- self.atomends = self.specials + self.LWS + self.CR
- # Note that RFC 2822 now specifies `.' as obs-phrase, meaning that it
- # is obsolete syntax. RFC 2822 requires that we recognize obsolete
- # syntax, so allow dots in phrases.
- self.phraseends = self.atomends.replace('.', '')
- self.field = field
- self.commentlist = []
-
- def gotonext(self):
- """Parse up to the start of the next address."""
- while self.pos < len(self.field):
- if self.field[self.pos] in self.LWS + '\n\r':
- self.pos += 1
- elif self.field[self.pos] == '(':
- self.commentlist.append(self.getcomment())
- else:
- break
-
- def getaddrlist(self):
- """Parse all addresses.
-
- Returns a list containing all of the addresses.
- """
- result = []
- while self.pos < len(self.field):
- ad = self.getaddress()
- if ad:
- result += ad
- else:
- result.append(('', ''))
- return result
-
- def getaddress(self):
- """Parse the next address."""
- self.commentlist = []
- self.gotonext()
-
- oldpos = self.pos
- oldcl = self.commentlist
- plist = self.getphraselist()
-
- self.gotonext()
- returnlist = []
-
- if self.pos >= len(self.field):
- # Bad email address technically, no domain.
- if plist:
- returnlist = [(SPACE.join(self.commentlist), plist[0])]
-
- elif self.field[self.pos] in '.@':
- # email address is just an addrspec
- # this isn't very efficient since we start over
- self.pos = oldpos
- self.commentlist = oldcl
- addrspec = self.getaddrspec()
- returnlist = [(SPACE.join(self.commentlist), addrspec)]
-
- elif self.field[self.pos] == ':':
- # address is a group
- returnlist = []
-
- fieldlen = len(self.field)
- self.pos += 1
- while self.pos < len(self.field):
- self.gotonext()
- if self.pos < fieldlen and self.field[self.pos] == ';':
- self.pos += 1
- break
- returnlist = returnlist + self.getaddress()
-
- elif self.field[self.pos] == '<':
- # Address is a phrase then a route addr
- routeaddr = self.getrouteaddr()
-
- if self.commentlist:
- returnlist = [(SPACE.join(plist) + ' (' +
- ' '.join(self.commentlist) + ')', routeaddr)]
- else:
- returnlist = [(SPACE.join(plist), routeaddr)]
-
- else:
- if plist:
- returnlist = [(SPACE.join(self.commentlist), plist[0])]
- elif self.field[self.pos] in self.specials:
- self.pos += 1
-
- self.gotonext()
- if self.pos < len(self.field) and self.field[self.pos] == ',':
- self.pos += 1
- return returnlist
-
- def getrouteaddr(self):
- """Parse a route address (Return-path value).
-
- This method just skips all the route stuff and returns the addrspec.
- """
- if self.field[self.pos] != '<':
- return
-
- expectroute = False
- self.pos += 1
- self.gotonext()
- adlist = ''
- while self.pos < len(self.field):
- if expectroute:
- self.getdomain()
- expectroute = False
- elif self.field[self.pos] == '>':
- self.pos += 1
- break
- elif self.field[self.pos] == '@':
- self.pos += 1
- expectroute = True
- elif self.field[self.pos] == ':':
- self.pos += 1
- else:
- adlist = self.getaddrspec()
- self.pos += 1
- break
- self.gotonext()
-
- return adlist
-
- def getaddrspec(self):
- """Parse an RFC 2822 addr-spec."""
- aslist = []
-
- self.gotonext()
- while self.pos < len(self.field):
- if self.field[self.pos] == '.':
- aslist.append('.')
- self.pos += 1
- elif self.field[self.pos] == '"':
- aslist.append('"%s"' % self.getquote())
- elif self.field[self.pos] in self.atomends:
- break
- else:
- aslist.append(self.getatom())
- self.gotonext()
-
- if self.pos >= len(self.field) or self.field[self.pos] != '@':
- return EMPTYSTRING.join(aslist)
-
- aslist.append('@')
- self.pos += 1
- self.gotonext()
- return EMPTYSTRING.join(aslist) + self.getdomain()
-
- def getdomain(self):
- """Get the complete domain name from an address."""
- sdlist = []
- while self.pos < len(self.field):
- if self.field[self.pos] in self.LWS:
- self.pos += 1
- elif self.field[self.pos] == '(':
- self.commentlist.append(self.getcomment())
- elif self.field[self.pos] == '[':
- sdlist.append(self.getdomainliteral())
- elif self.field[self.pos] == '.':
- self.pos += 1
- sdlist.append('.')
- elif self.field[self.pos] in self.atomends:
- break
- else:
- sdlist.append(self.getatom())
- return EMPTYSTRING.join(sdlist)
-
- def getdelimited(self, beginchar, endchars, allowcomments=True):
- """Parse a header fragment delimited by special characters.
-
- `beginchar' is the start character for the fragment.
- If self is not looking at an instance of `beginchar' then
- getdelimited returns the empty string.
-
- `endchars' is a sequence of allowable end-delimiting characters.
- Parsing stops when one of these is encountered.
-
- If `allowcomments' is non-zero, embedded RFC 2822 comments are allowed
- within the parsed fragment.
- """
- if self.field[self.pos] != beginchar:
- return ''
-
- slist = ['']
- quote = False
- self.pos += 1
- while self.pos < len(self.field):
- if quote:
- slist.append(self.field[self.pos])
- quote = False
- elif self.field[self.pos] in endchars:
- self.pos += 1
- break
- elif allowcomments and self.field[self.pos] == '(':
- slist.append(self.getcomment())
- continue # have already advanced pos from getcomment
- elif self.field[self.pos] == '\\':
- quote = True
- else:
- slist.append(self.field[self.pos])
- self.pos += 1
-
- return EMPTYSTRING.join(slist)
-
- def getquote(self):
- """Get a quote-delimited fragment from self's field."""
- return self.getdelimited('"', '"\r', False)
-
- def getcomment(self):
- """Get a parenthesis-delimited fragment from self's field."""
- return self.getdelimited('(', ')\r', True)
-
- def getdomainliteral(self):
- """Parse an RFC 2822 domain-literal."""
- return '[%s]' % self.getdelimited('[', ']\r', False)
-
- def getatom(self, atomends=None):
- """Parse an RFC 2822 atom.
-
- Optional atomends specifies a different set of end token delimiters
- (the default is to use self.atomends). This is used e.g. in
- getphraselist() since phrase endings must not include the `.' (which
- is legal in phrases)."""
- atomlist = ['']
- if atomends is None:
- atomends = self.atomends
-
- while self.pos < len(self.field):
- if self.field[self.pos] in atomends:
- break
- else:
- atomlist.append(self.field[self.pos])
- self.pos += 1
-
- return EMPTYSTRING.join(atomlist)
-
- def getphraselist(self):
- """Parse a sequence of RFC 2822 phrases.
-
- A phrase is a sequence of words, which are in turn either RFC 2822
- atoms or quoted-strings. Phrases are canonicalized by squeezing all
- runs of continuous whitespace into one space.
- """
- plist = []
-
- while self.pos < len(self.field):
- if self.field[self.pos] in self.FWS:
- self.pos += 1
- elif self.field[self.pos] == '"':
- plist.append(self.getquote())
- elif self.field[self.pos] == '(':
- self.commentlist.append(self.getcomment())
- elif self.field[self.pos] in self.phraseends:
- break
- else:
- plist.append(self.getatom(self.phraseends))
-
- return plist
-
-class AddressList(AddrlistClass):
- """An AddressList encapsulates a list of parsed RFC 2822 addresses."""
- def __init__(self, field):
- AddrlistClass.__init__(self, field)
- if field:
- self.addresslist = self.getaddrlist()
- else:
- self.addresslist = []
-
- def __len__(self):
- return len(self.addresslist)
-
- def __add__(self, other):
- # Set union
- newaddr = AddressList(None)
- newaddr.addresslist = self.addresslist[:]
- for x in other.addresslist:
- if not x in self.addresslist:
- newaddr.addresslist.append(x)
- return newaddr
-
- def __iadd__(self, other):
- # Set union, in-place
- for x in other.addresslist:
- if not x in self.addresslist:
- self.addresslist.append(x)
- return self
-
- def __sub__(self, other):
- # Set difference
- newaddr = AddressList(None)
- for x in self.addresslist:
- if not x in other.addresslist:
- newaddr.addresslist.append(x)
- return newaddr
-
- def __isub__(self, other):
- # Set difference, in-place
- for x in other.addresslist:
- if x in self.addresslist:
- self.addresslist.remove(x)
- return self
-
- def __getitem__(self, index):
- # Make indexing, slices, and 'in' work
- return self.addresslist[index]
diff --git a/sys/lib/python/email/base64mime.py b/sys/lib/python/email/base64mime.py
deleted file mode 100644
index 0129d9d4e..000000000
--- a/sys/lib/python/email/base64mime.py
+++ /dev/null
@@ -1,184 +0,0 @@
-# Copyright (C) 2002-2006 Python Software Foundation
-# Author: Ben Gertzfield
-# Contact: email-sig@python.org
-
-"""Base64 content transfer encoding per RFCs 2045-2047.
-
-This module handles the content transfer encoding method defined in RFC 2045
-to encode arbitrary 8-bit data using the three 8-bit bytes in four 7-bit
-characters encoding known as Base64.
-
-It is used in the MIME standards for email to attach images, audio, and text
-using some 8-bit character sets to messages.
-
-This module provides an interface to encode and decode both headers and bodies
-with Base64 encoding.
-
-RFC 2045 defines a method for including character set information in an
-`encoded-word' in a header. This method is commonly used for 8-bit real names
-in To:, From:, Cc:, etc. fields, as well as Subject: lines.
-
-This module does not do the line wrapping or end-of-line character conversion
-necessary for proper internationalized headers; it only does dumb encoding and
-decoding. To deal with the various line wrapping issues, use the email.Header
-module.
-"""
-
-__all__ = [
- 'base64_len',
- 'body_decode',
- 'body_encode',
- 'decode',
- 'decodestring',
- 'encode',
- 'encodestring',
- 'header_encode',
- ]
-
-import re
-
-from binascii import b2a_base64, a2b_base64
-from email.utils import fix_eols
-
-CRLF = '\r\n'
-NL = '\n'
-EMPTYSTRING = ''
-
-# See also Charset.py
-MISC_LEN = 7
-
-
-
-# Helpers
-def base64_len(s):
- """Return the length of s when it is encoded with base64."""
- groups_of_3, leftover = divmod(len(s), 3)
- # 4 bytes out for each 3 bytes (or nonzero fraction thereof) in.
- # Thanks, Tim!
- n = groups_of_3 * 4
- if leftover:
- n += 4
- return n
-
-
-
-def header_encode(header, charset='iso-8859-1', keep_eols=False,
- maxlinelen=76, eol=NL):
- """Encode a single header line with Base64 encoding in a given charset.
-
- Defined in RFC 2045, this Base64 encoding is identical to normal Base64
- encoding, except that each line must be intelligently wrapped (respecting
- the Base64 encoding), and subsequent lines must start with a space.
-
- charset names the character set to use to encode the header. It defaults
- to iso-8859-1.
-
- End-of-line characters (\\r, \\n, \\r\\n) will be automatically converted
- to the canonical email line separator \\r\\n unless the keep_eols
- parameter is True (the default is False).
-
- Each line of the header will be terminated in the value of eol, which
- defaults to "\\n". Set this to "\\r\\n" if you are using the result of
- this function directly in email.
-
- The resulting string will be in the form:
-
- "=?charset?b?WW/5ciBtYXp66XLrIHf8eiBhIGhhbXBzdGHuciBBIFlv+XIgbWF6euly?=\\n
- =?charset?b?6yB3/HogYSBoYW1wc3Rh7nIgQkMgWW/5ciBtYXp66XLrIHf8eiBhIGhh?="
-
- with each line wrapped at, at most, maxlinelen characters (defaults to 76
- characters).
- """
- # Return empty headers unchanged
- if not header:
- return header
-
- if not keep_eols:
- header = fix_eols(header)
-
- # Base64 encode each line, in encoded chunks no greater than maxlinelen in
- # length, after the RFC chrome is added in.
- base64ed = []
- max_encoded = maxlinelen - len(charset) - MISC_LEN
- max_unencoded = max_encoded * 3 // 4
-
- for i in range(0, len(header), max_unencoded):
- base64ed.append(b2a_base64(header[i:i+max_unencoded]))
-
- # Now add the RFC chrome to each encoded chunk
- lines = []
- for line in base64ed:
- # Ignore the last character of each line if it is a newline
- if line.endswith(NL):
- line = line[:-1]
- # Add the chrome
- lines.append('=?%s?b?%s?=' % (charset, line))
- # Glue the lines together and return it. BAW: should we be able to
- # specify the leading whitespace in the joiner?
- joiner = eol + ' '
- return joiner.join(lines)
-
-
-
-def encode(s, binary=True, maxlinelen=76, eol=NL):
- """Encode a string with base64.
-
- Each line will be wrapped at, at most, maxlinelen characters (defaults to
- 76 characters).
-
- If binary is False, end-of-line characters will be converted to the
- canonical email end-of-line sequence \\r\\n. Otherwise they will be left
- verbatim (this is the default).
-
- Each line of encoded text will end with eol, which defaults to "\\n". Set
- this to "\r\n" if you will be using the result of this function directly
- in an email.
- """
- if not s:
- return s
-
- if not binary:
- s = fix_eols(s)
-
- encvec = []
- max_unencoded = maxlinelen * 3 // 4
- for i in range(0, len(s), max_unencoded):
- # BAW: should encode() inherit b2a_base64()'s dubious behavior in
- # adding a newline to the encoded string?
- enc = b2a_base64(s[i:i + max_unencoded])
- if enc.endswith(NL) and eol <> NL:
- enc = enc[:-1] + eol
- encvec.append(enc)
- return EMPTYSTRING.join(encvec)
-
-
-# For convenience and backwards compatibility w/ standard base64 module
-body_encode = encode
-encodestring = encode
-
-
-
-def decode(s, convert_eols=None):
- """Decode a raw base64 string.
-
- If convert_eols is set to a string value, all canonical email linefeeds,
- e.g. "\\r\\n", in the decoded text will be converted to the value of
- convert_eols. os.linesep is a good choice for convert_eols if you are
- decoding a text attachment.
-
- This function does not parse a full MIME header value encoded with
- base64 (like =?iso-8895-1?b?bmloISBuaWgh?=) -- please use the high
- level email.Header class for that functionality.
- """
- if not s:
- return s
-
- dec = a2b_base64(s)
- if convert_eols:
- return dec.replace(CRLF, convert_eols)
- return dec
-
-
-# For convenience and backwards compatibility w/ standard base64 module
-body_decode = decode
-decodestring = decode
diff --git a/sys/lib/python/email/charset.py b/sys/lib/python/email/charset.py
deleted file mode 100644
index 8f218b209..000000000
--- a/sys/lib/python/email/charset.py
+++ /dev/null
@@ -1,388 +0,0 @@
-# Copyright (C) 2001-2006 Python Software Foundation
-# Author: Ben Gertzfield, Barry Warsaw
-# Contact: email-sig@python.org
-
-__all__ = [
- 'Charset',
- 'add_alias',
- 'add_charset',
- 'add_codec',
- ]
-
-import email.base64mime
-import email.quoprimime
-
-from email import errors
-from email.encoders import encode_7or8bit
-
-
-
-# Flags for types of header encodings
-QP = 1 # Quoted-Printable
-BASE64 = 2 # Base64
-SHORTEST = 3 # the shorter of QP and base64, but only for headers
-
-# In "=?charset?q?hello_world?=", the =?, ?q?, and ?= add up to 7
-MISC_LEN = 7
-
-DEFAULT_CHARSET = 'us-ascii'
-
-
-
-# Defaults
-CHARSETS = {
- # input header enc body enc output conv
- 'iso-8859-1': (QP, QP, None),
- 'iso-8859-2': (QP, QP, None),
- 'iso-8859-3': (QP, QP, None),
- 'iso-8859-4': (QP, QP, None),
- # iso-8859-5 is Cyrillic, and not especially used
- # iso-8859-6 is Arabic, also not particularly used
- # iso-8859-7 is Greek, QP will not make it readable
- # iso-8859-8 is Hebrew, QP will not make it readable
- 'iso-8859-9': (QP, QP, None),
- 'iso-8859-10': (QP, QP, None),
- # iso-8859-11 is Thai, QP will not make it readable
- 'iso-8859-13': (QP, QP, None),
- 'iso-8859-14': (QP, QP, None),
- 'iso-8859-15': (QP, QP, None),
- 'windows-1252':(QP, QP, None),
- 'viscii': (QP, QP, None),
- 'us-ascii': (None, None, None),
- 'big5': (BASE64, BASE64, None),
- 'gb2312': (BASE64, BASE64, None),
- 'euc-jp': (BASE64, None, 'iso-2022-jp'),
- 'shift_jis': (BASE64, None, 'iso-2022-jp'),
- 'iso-2022-jp': (BASE64, None, None),
- 'koi8-r': (BASE64, BASE64, None),
- 'utf-8': (SHORTEST, BASE64, 'utf-8'),
- # We're making this one up to represent raw unencoded 8-bit
- '8bit': (None, BASE64, 'utf-8'),
- }
-
-# Aliases for other commonly-used names for character sets. Map
-# them to the real ones used in email.
-ALIASES = {
- 'latin_1': 'iso-8859-1',
- 'latin-1': 'iso-8859-1',
- 'latin_2': 'iso-8859-2',
- 'latin-2': 'iso-8859-2',
- 'latin_3': 'iso-8859-3',
- 'latin-3': 'iso-8859-3',
- 'latin_4': 'iso-8859-4',
- 'latin-4': 'iso-8859-4',
- 'latin_5': 'iso-8859-9',
- 'latin-5': 'iso-8859-9',
- 'latin_6': 'iso-8859-10',
- 'latin-6': 'iso-8859-10',
- 'latin_7': 'iso-8859-13',
- 'latin-7': 'iso-8859-13',
- 'latin_8': 'iso-8859-14',
- 'latin-8': 'iso-8859-14',
- 'latin_9': 'iso-8859-15',
- 'latin-9': 'iso-8859-15',
- 'cp949': 'ks_c_5601-1987',
- 'euc_jp': 'euc-jp',
- 'euc_kr': 'euc-kr',
- 'ascii': 'us-ascii',
- }
-
-
-# Map charsets to their Unicode codec strings.
-CODEC_MAP = {
- 'gb2312': 'eucgb2312_cn',
- 'big5': 'big5_tw',
- # Hack: We don't want *any* conversion for stuff marked us-ascii, as all
- # sorts of garbage might be sent to us in the guise of 7-bit us-ascii.
- # Let that stuff pass through without conversion to/from Unicode.
- 'us-ascii': None,
- }
-
-
-
-# Convenience functions for extending the above mappings
-def add_charset(charset, header_enc=None, body_enc=None, output_charset=None):
- """Add character set properties to the global registry.
-
- charset is the input character set, and must be the canonical name of a
- character set.
-
- Optional header_enc and body_enc is either Charset.QP for
- quoted-printable, Charset.BASE64 for base64 encoding, Charset.SHORTEST for
- the shortest of qp or base64 encoding, or None for no encoding. SHORTEST
- is only valid for header_enc. It describes how message headers and
- message bodies in the input charset are to be encoded. Default is no
- encoding.
-
- Optional output_charset is the character set that the output should be
- in. Conversions will proceed from input charset, to Unicode, to the
- output charset when the method Charset.convert() is called. The default
- is to output in the same character set as the input.
-
- Both input_charset and output_charset must have Unicode codec entries in
- the module's charset-to-codec mapping; use add_codec(charset, codecname)
- to add codecs the module does not know about. See the codecs module's
- documentation for more information.
- """
- if body_enc == SHORTEST:
- raise ValueError('SHORTEST not allowed for body_enc')
- CHARSETS[charset] = (header_enc, body_enc, output_charset)
-
-
-def add_alias(alias, canonical):
- """Add a character set alias.
-
- alias is the alias name, e.g. latin-1
- canonical is the character set's canonical name, e.g. iso-8859-1
- """
- ALIASES[alias] = canonical
-
-
-def add_codec(charset, codecname):
- """Add a codec that map characters in the given charset to/from Unicode.
-
- charset is the canonical name of a character set. codecname is the name
- of a Python codec, as appropriate for the second argument to the unicode()
- built-in, or to the encode() method of a Unicode string.
- """
- CODEC_MAP[charset] = codecname
-
-
-
-class Charset:
- """Map character sets to their email properties.
-
- This class provides information about the requirements imposed on email
- for a specific character set. It also provides convenience routines for
- converting between character sets, given the availability of the
- applicable codecs. Given a character set, it will do its best to provide
- information on how to use that character set in an email in an
- RFC-compliant way.
-
- Certain character sets must be encoded with quoted-printable or base64
- when used in email headers or bodies. Certain character sets must be
- converted outright, and are not allowed in email. Instances of this
- module expose the following information about a character set:
-
- input_charset: The initial character set specified. Common aliases
- are converted to their `official' email names (e.g. latin_1
- is converted to iso-8859-1). Defaults to 7-bit us-ascii.
-
- header_encoding: If the character set must be encoded before it can be
- used in an email header, this attribute will be set to
- Charset.QP (for quoted-printable), Charset.BASE64 (for
- base64 encoding), or Charset.SHORTEST for the shortest of
- QP or BASE64 encoding. Otherwise, it will be None.
-
- body_encoding: Same as header_encoding, but describes the encoding for the
- mail message's body, which indeed may be different than the
- header encoding. Charset.SHORTEST is not allowed for
- body_encoding.
-
- output_charset: Some character sets must be converted before the can be
- used in email headers or bodies. If the input_charset is
- one of them, this attribute will contain the name of the
- charset output will be converted to. Otherwise, it will
- be None.
-
- input_codec: The name of the Python codec used to convert the
- input_charset to Unicode. If no conversion codec is
- necessary, this attribute will be None.
-
- output_codec: The name of the Python codec used to convert Unicode
- to the output_charset. If no conversion codec is necessary,
- this attribute will have the same value as the input_codec.
- """
- def __init__(self, input_charset=DEFAULT_CHARSET):
- # RFC 2046, $4.1.2 says charsets are not case sensitive. We coerce to
- # unicode because its .lower() is locale insensitive. If the argument
- # is already a unicode, we leave it at that, but ensure that the
- # charset is ASCII, as the standard (RFC XXX) requires.
- try:
- if isinstance(input_charset, unicode):
- input_charset.encode('ascii')
- else:
- input_charset = unicode(input_charset, 'ascii')
- except UnicodeError:
- raise errors.CharsetError(input_charset)
- input_charset = input_charset.lower()
- # Set the input charset after filtering through the aliases
- self.input_charset = ALIASES.get(input_charset, input_charset)
- # We can try to guess which encoding and conversion to use by the
- # charset_map dictionary. Try that first, but let the user override
- # it.
- henc, benc, conv = CHARSETS.get(self.input_charset,
- (SHORTEST, BASE64, None))
- if not conv:
- conv = self.input_charset
- # Set the attributes, allowing the arguments to override the default.
- self.header_encoding = henc
- self.body_encoding = benc
- self.output_charset = ALIASES.get(conv, conv)
- # Now set the codecs. If one isn't defined for input_charset,
- # guess and try a Unicode codec with the same name as input_codec.
- self.input_codec = CODEC_MAP.get(self.input_charset,
- self.input_charset)
- self.output_codec = CODEC_MAP.get(self.output_charset,
- self.output_charset)
-
- def __str__(self):
- return self.input_charset.lower()
-
- __repr__ = __str__
-
- def __eq__(self, other):
- return str(self) == str(other).lower()
-
- def __ne__(self, other):
- return not self.__eq__(other)
-
- def get_body_encoding(self):
- """Return the content-transfer-encoding used for body encoding.
-
- This is either the string `quoted-printable' or `base64' depending on
- the encoding used, or it is a function in which case you should call
- the function with a single argument, the Message object being
- encoded. The function should then set the Content-Transfer-Encoding
- header itself to whatever is appropriate.
-
- Returns "quoted-printable" if self.body_encoding is QP.
- Returns "base64" if self.body_encoding is BASE64.
- Returns "7bit" otherwise.
- """
- assert self.body_encoding <> SHORTEST
- if self.body_encoding == QP:
- return 'quoted-printable'
- elif self.body_encoding == BASE64:
- return 'base64'
- else:
- return encode_7or8bit
-
- def convert(self, s):
- """Convert a string from the input_codec to the output_codec."""
- if self.input_codec <> self.output_codec:
- return unicode(s, self.input_codec).encode(self.output_codec)
- else:
- return s
-
- def to_splittable(self, s):
- """Convert a possibly multibyte string to a safely splittable format.
-
- Uses the input_codec to try and convert the string to Unicode, so it
- can be safely split on character boundaries (even for multibyte
- characters).
-
- Returns the string as-is if it isn't known how to convert it to
- Unicode with the input_charset.
-
- Characters that could not be converted to Unicode will be replaced
- with the Unicode replacement character U+FFFD.
- """
- if isinstance(s, unicode) or self.input_codec is None:
- return s
- try:
- return unicode(s, self.input_codec, 'replace')
- except LookupError:
- # Input codec not installed on system, so return the original
- # string unchanged.
- return s
-
- def from_splittable(self, ustr, to_output=True):
- """Convert a splittable string back into an encoded string.
-
- Uses the proper codec to try and convert the string from Unicode back
- into an encoded format. Return the string as-is if it is not Unicode,
- or if it could not be converted from Unicode.
-
- Characters that could not be converted from Unicode will be replaced
- with an appropriate character (usually '?').
-
- If to_output is True (the default), uses output_codec to convert to an
- encoded format. If to_output is False, uses input_codec.
- """
- if to_output:
- codec = self.output_codec
- else:
- codec = self.input_codec
- if not isinstance(ustr, unicode) or codec is None:
- return ustr
- try:
- return ustr.encode(codec, 'replace')
- except LookupError:
- # Output codec not installed
- return ustr
-
- def get_output_charset(self):
- """Return the output character set.
-
- This is self.output_charset if that is not None, otherwise it is
- self.input_charset.
- """
- return self.output_charset or self.input_charset
-
- def encoded_header_len(self, s):
- """Return the length of the encoded header string."""
- cset = self.get_output_charset()
- # The len(s) of a 7bit encoding is len(s)
- if self.header_encoding == BASE64:
- return email.base64mime.base64_len(s) + len(cset) + MISC_LEN
- elif self.header_encoding == QP:
- return email.quoprimime.header_quopri_len(s) + len(cset) + MISC_LEN
- elif self.header_encoding == SHORTEST:
- lenb64 = email.base64mime.base64_len(s)
- lenqp = email.quoprimime.header_quopri_len(s)
- return min(lenb64, lenqp) + len(cset) + MISC_LEN
- else:
- return len(s)
-
- def header_encode(self, s, convert=False):
- """Header-encode a string, optionally converting it to output_charset.
-
- If convert is True, the string will be converted from the input
- charset to the output charset automatically. This is not useful for
- multibyte character sets, which have line length issues (multibyte
- characters must be split on a character, not a byte boundary); use the
- high-level Header class to deal with these issues. convert defaults
- to False.
-
- The type of encoding (base64 or quoted-printable) will be based on
- self.header_encoding.
- """
- cset = self.get_output_charset()
- if convert:
- s = self.convert(s)
- # 7bit/8bit encodings return the string unchanged (modulo conversions)
- if self.header_encoding == BASE64:
- return email.base64mime.header_encode(s, cset)
- elif self.header_encoding == QP:
- return email.quoprimime.header_encode(s, cset, maxlinelen=None)
- elif self.header_encoding == SHORTEST:
- lenb64 = email.base64mime.base64_len(s)
- lenqp = email.quoprimime.header_quopri_len(s)
- if lenb64 < lenqp:
- return email.base64mime.header_encode(s, cset)
- else:
- return email.quoprimime.header_encode(s, cset, maxlinelen=None)
- else:
- return s
-
- def body_encode(self, s, convert=True):
- """Body-encode a string and convert it to output_charset.
-
- If convert is True (the default), the string will be converted from
- the input charset to output charset automatically. Unlike
- header_encode(), there are no issues with byte boundaries and
- multibyte charsets in email bodies, so this is usually pretty safe.
-
- The type of encoding (base64 or quoted-printable) will be based on
- self.body_encoding.
- """
- if convert:
- s = self.convert(s)
- # 7bit/8bit encodings return the string unchanged (module conversions)
- if self.body_encoding is BASE64:
- return email.base64mime.body_encode(s)
- elif self.body_encoding is QP:
- return email.quoprimime.body_encode(s)
- else:
- return s
diff --git a/sys/lib/python/email/encoders.py b/sys/lib/python/email/encoders.py
deleted file mode 100644
index 06016cdea..000000000
--- a/sys/lib/python/email/encoders.py
+++ /dev/null
@@ -1,88 +0,0 @@
-# Copyright (C) 2001-2006 Python Software Foundation
-# Author: Barry Warsaw
-# Contact: email-sig@python.org
-
-"""Encodings and related functions."""
-
-__all__ = [
- 'encode_7or8bit',
- 'encode_base64',
- 'encode_noop',
- 'encode_quopri',
- ]
-
-import base64
-
-from quopri import encodestring as _encodestring
-
-
-
-def _qencode(s):
- enc = _encodestring(s, quotetabs=True)
- # Must encode spaces, which quopri.encodestring() doesn't do
- return enc.replace(' ', '=20')
-
-
-def _bencode(s):
- # We can't quite use base64.encodestring() since it tacks on a "courtesy
- # newline". Blech!
- if not s:
- return s
- hasnewline = (s[-1] == '\n')
- value = base64.encodestring(s)
- if not hasnewline and value[-1] == '\n':
- return value[:-1]
- return value
-
-
-
-def encode_base64(msg):
- """Encode the message's payload in Base64.
-
- Also, add an appropriate Content-Transfer-Encoding header.
- """
- orig = msg.get_payload()
- encdata = _bencode(orig)
- msg.set_payload(encdata)
- msg['Content-Transfer-Encoding'] = 'base64'
-
-
-
-def encode_quopri(msg):
- """Encode the message's payload in quoted-printable.
-
- Also, add an appropriate Content-Transfer-Encoding header.
- """
- orig = msg.get_payload()
- encdata = _qencode(orig)
- msg.set_payload(encdata)
- msg['Content-Transfer-Encoding'] = 'quoted-printable'
-
-
-
-def encode_7or8bit(msg):
- """Set the Content-Transfer-Encoding header to 7bit or 8bit."""
- orig = msg.get_payload()
- if orig is None:
- # There's no payload. For backwards compatibility we use 7bit
- msg['Content-Transfer-Encoding'] = '7bit'
- return
- # We play a trick to make this go fast. If encoding to ASCII succeeds, we
- # know the data must be 7bit, otherwise treat it as 8bit.
- try:
- orig.encode('ascii')
- except UnicodeError:
- # iso-2022-* is non-ASCII but still 7-bit
- charset = msg.get_charset()
- output_cset = charset and charset.output_charset
- if output_cset and output_cset.lower().startswith('iso-2202-'):
- msg['Content-Transfer-Encoding'] = '7bit'
- else:
- msg['Content-Transfer-Encoding'] = '8bit'
- else:
- msg['Content-Transfer-Encoding'] = '7bit'
-
-
-
-def encode_noop(msg):
- """Do nothing."""
diff --git a/sys/lib/python/email/errors.py b/sys/lib/python/email/errors.py
deleted file mode 100644
index d52a62460..000000000
--- a/sys/lib/python/email/errors.py
+++ /dev/null
@@ -1,57 +0,0 @@
-# Copyright (C) 2001-2006 Python Software Foundation
-# Author: Barry Warsaw
-# Contact: email-sig@python.org
-
-"""email package exception classes."""
-
-
-
-class MessageError(Exception):
- """Base class for errors in the email package."""
-
-
-class MessageParseError(MessageError):
- """Base class for message parsing errors."""
-
-
-class HeaderParseError(MessageParseError):
- """Error while parsing headers."""
-
-
-class BoundaryError(MessageParseError):
- """Couldn't find terminating boundary."""
-
-
-class MultipartConversionError(MessageError, TypeError):
- """Conversion to a multipart is prohibited."""
-
-
-class CharsetError(MessageError):
- """An illegal charset was given."""
-
-
-
-# These are parsing defects which the parser was able to work around.
-class MessageDefect:
- """Base class for a message defect."""
-
- def __init__(self, line=None):
- self.line = line
-
-class NoBoundaryInMultipartDefect(MessageDefect):
- """A message claimed to be a multipart but had no boundary parameter."""
-
-class StartBoundaryNotFoundDefect(MessageDefect):
- """The claimed start boundary was never found."""
-
-class FirstHeaderLineIsContinuationDefect(MessageDefect):
- """A message had a continuation line as its first header line."""
-
-class MisplacedEnvelopeHeaderDefect(MessageDefect):
- """A 'Unix-from' header was found in the middle of a header block."""
-
-class MalformedHeaderDefect(MessageDefect):
- """Found a header that was missing a colon, or was otherwise malformed."""
-
-class MultipartInvariantViolationDefect(MessageDefect):
- """A message claimed to be a multipart but no subparts were found."""
diff --git a/sys/lib/python/email/feedparser.py b/sys/lib/python/email/feedparser.py
deleted file mode 100644
index afb02b32b..000000000
--- a/sys/lib/python/email/feedparser.py
+++ /dev/null
@@ -1,480 +0,0 @@
-# Copyright (C) 2004-2006 Python Software Foundation
-# Authors: Baxter, Wouters and Warsaw
-# Contact: email-sig@python.org
-
-"""FeedParser - An email feed parser.
-
-The feed parser implements an interface for incrementally parsing an email
-message, line by line. This has advantages for certain applications, such as
-those reading email messages off a socket.
-
-FeedParser.feed() is the primary interface for pushing new data into the
-parser. It returns when there's nothing more it can do with the available
-data. When you have no more data to push into the parser, call .close().
-This completes the parsing and returns the root message object.
-
-The other advantage of this parser is that it will never throw a parsing
-exception. Instead, when it finds something unexpected, it adds a 'defect' to
-the current message. Defects are just instances that live on the message
-object's .defects attribute.
-"""
-
-__all__ = ['FeedParser']
-
-import re
-
-from email import errors
-from email import message
-
-NLCRE = re.compile('\r\n|\r|\n')
-NLCRE_bol = re.compile('(\r\n|\r|\n)')
-NLCRE_eol = re.compile('(\r\n|\r|\n)$')
-NLCRE_crack = re.compile('(\r\n|\r|\n)')
-# RFC 2822 $3.6.8 Optional fields. ftext is %d33-57 / %d59-126, Any character
-# except controls, SP, and ":".
-headerRE = re.compile(r'^(From |[\041-\071\073-\176]{1,}:|[\t ])')
-EMPTYSTRING = ''
-NL = '\n'
-
-NeedMoreData = object()
-
-
-
-class BufferedSubFile(object):
- """A file-ish object that can have new data loaded into it.
-
- You can also push and pop line-matching predicates onto a stack. When the
- current predicate matches the current line, a false EOF response
- (i.e. empty string) is returned instead. This lets the parser adhere to a
- simple abstraction -- it parses until EOF closes the current message.
- """
- def __init__(self):
- # The last partial line pushed into this object.
- self._partial = ''
- # The list of full, pushed lines, in reverse order
- self._lines = []
- # The stack of false-EOF checking predicates.
- self._eofstack = []
- # A flag indicating whether the file has been closed or not.
- self._closed = False
-
- def push_eof_matcher(self, pred):
- self._eofstack.append(pred)
-
- def pop_eof_matcher(self):
- return self._eofstack.pop()
-
- def close(self):
- # Don't forget any trailing partial line.
- self._lines.append(self._partial)
- self._partial = ''
- self._closed = True
-
- def readline(self):
- if not self._lines:
- if self._closed:
- return ''
- return NeedMoreData
- # Pop the line off the stack and see if it matches the current
- # false-EOF predicate.
- line = self._lines.pop()
- # RFC 2046, section 5.1.2 requires us to recognize outer level
- # boundaries at any level of inner nesting. Do this, but be sure it's
- # in the order of most to least nested.
- for ateof in self._eofstack[::-1]:
- if ateof(line):
- # We're at the false EOF. But push the last line back first.
- self._lines.append(line)
- return ''
- return line
-
- def unreadline(self, line):
- # Let the consumer push a line back into the buffer.
- assert line is not NeedMoreData
- self._lines.append(line)
-
- def push(self, data):
- """Push some new data into this object."""
- # Handle any previous leftovers
- data, self._partial = self._partial + data, ''
- # Crack into lines, but preserve the newlines on the end of each
- parts = NLCRE_crack.split(data)
- # The *ahem* interesting behaviour of re.split when supplied grouping
- # parentheses is that the last element of the resulting list is the
- # data after the final RE. In the case of a NL/CR terminated string,
- # this is the empty string.
- self._partial = parts.pop()
- # parts is a list of strings, alternating between the line contents
- # and the eol character(s). Gather up a list of lines after
- # re-attaching the newlines.
- lines = []
- for i in range(len(parts) // 2):
- lines.append(parts[i*2] + parts[i*2+1])
- self.pushlines(lines)
-
- def pushlines(self, lines):
- # Reverse and insert at the front of the lines.
- self._lines[:0] = lines[::-1]
-
- def is_closed(self):
- return self._closed
-
- def __iter__(self):
- return self
-
- def next(self):
- line = self.readline()
- if line == '':
- raise StopIteration
- return line
-
-
-
-class FeedParser:
- """A feed-style parser of email."""
-
- def __init__(self, _factory=message.Message):
- """_factory is called with no arguments to create a new message obj"""
- self._factory = _factory
- self._input = BufferedSubFile()
- self._msgstack = []
- self._parse = self._parsegen().next
- self._cur = None
- self._last = None
- self._headersonly = False
-
- # Non-public interface for supporting Parser's headersonly flag
- def _set_headersonly(self):
- self._headersonly = True
-
- def feed(self, data):
- """Push more data into the parser."""
- self._input.push(data)
- self._call_parse()
-
- def _call_parse(self):
- try:
- self._parse()
- except StopIteration:
- pass
-
- def close(self):
- """Parse all remaining data and return the root message object."""
- self._input.close()
- self._call_parse()
- root = self._pop_message()
- assert not self._msgstack
- # Look for final set of defects
- if root.get_content_maintype() == 'multipart' \
- and not root.is_multipart():
- root.defects.append(errors.MultipartInvariantViolationDefect())
- return root
-
- def _new_message(self):
- msg = self._factory()
- if self._cur and self._cur.get_content_type() == 'multipart/digest':
- msg.set_default_type('message/rfc822')
- if self._msgstack:
- self._msgstack[-1].attach(msg)
- self._msgstack.append(msg)
- self._cur = msg
- self._last = msg
-
- def _pop_message(self):
- retval = self._msgstack.pop()
- if self._msgstack:
- self._cur = self._msgstack[-1]
- else:
- self._cur = None
- return retval
-
- def _parsegen(self):
- # Create a new message and start by parsing headers.
- self._new_message()
- headers = []
- # Collect the headers, searching for a line that doesn't match the RFC
- # 2822 header or continuation pattern (including an empty line).
- for line in self._input:
- if line is NeedMoreData:
- yield NeedMoreData
- continue
- if not headerRE.match(line):
- # If we saw the RFC defined header/body separator
- # (i.e. newline), just throw it away. Otherwise the line is
- # part of the body so push it back.
- if not NLCRE.match(line):
- self._input.unreadline(line)
- break
- headers.append(line)
- # Done with the headers, so parse them and figure out what we're
- # supposed to see in the body of the message.
- self._parse_headers(headers)
- # Headers-only parsing is a backwards compatibility hack, which was
- # necessary in the older parser, which could throw errors. All
- # remaining lines in the input are thrown into the message body.
- if self._headersonly:
- lines = []
- while True:
- line = self._input.readline()
- if line is NeedMoreData:
- yield NeedMoreData
- continue
- if line == '':
- break
- lines.append(line)
- self._cur.set_payload(EMPTYSTRING.join(lines))
- return
- if self._cur.get_content_type() == 'message/delivery-status':
- # message/delivery-status contains blocks of headers separated by
- # a blank line. We'll represent each header block as a separate
- # nested message object, but the processing is a bit different
- # than standard message/* types because there is no body for the
- # nested messages. A blank line separates the subparts.
- while True:
- self._input.push_eof_matcher(NLCRE.match)
- for retval in self._parsegen():
- if retval is NeedMoreData:
- yield NeedMoreData
- continue
- break
- msg = self._pop_message()
- # We need to pop the EOF matcher in order to tell if we're at
- # the end of the current file, not the end of the last block
- # of message headers.
- self._input.pop_eof_matcher()
- # The input stream must be sitting at the newline or at the
- # EOF. We want to see if we're at the end of this subpart, so
- # first consume the blank line, then test the next line to see
- # if we're at this subpart's EOF.
- while True:
- line = self._input.readline()
- if line is NeedMoreData:
- yield NeedMoreData
- continue
- break
- while True:
- line = self._input.readline()
- if line is NeedMoreData:
- yield NeedMoreData
- continue
- break
- if line == '':
- break
- # Not at EOF so this is a line we're going to need.
- self._input.unreadline(line)
- return
- if self._cur.get_content_maintype() == 'message':
- # The message claims to be a message/* type, then what follows is
- # another RFC 2822 message.
- for retval in self._parsegen():
- if retval is NeedMoreData:
- yield NeedMoreData
- continue
- break
- self._pop_message()
- return
- if self._cur.get_content_maintype() == 'multipart':
- boundary = self._cur.get_boundary()
- if boundary is None:
- # The message /claims/ to be a multipart but it has not
- # defined a boundary. That's a problem which we'll handle by
- # reading everything until the EOF and marking the message as
- # defective.
- self._cur.defects.append(errors.NoBoundaryInMultipartDefect())
- lines = []
- for line in self._input:
- if line is NeedMoreData:
- yield NeedMoreData
- continue
- lines.append(line)
- self._cur.set_payload(EMPTYSTRING.join(lines))
- return
- # Create a line match predicate which matches the inter-part
- # boundary as well as the end-of-multipart boundary. Don't push
- # this onto the input stream until we've scanned past the
- # preamble.
- separator = '--' + boundary
- boundaryre = re.compile(
- '(?P<sep>' + re.escape(separator) +
- r')(?P<end>--)?(?P<ws>[ \t]*)(?P<linesep>\r\n|\r|\n)?$')
- capturing_preamble = True
- preamble = []
- linesep = False
- while True:
- line = self._input.readline()
- if line is NeedMoreData:
- yield NeedMoreData
- continue
- if line == '':
- break
- mo = boundaryre.match(line)
- if mo:
- # If we're looking at the end boundary, we're done with
- # this multipart. If there was a newline at the end of
- # the closing boundary, then we need to initialize the
- # epilogue with the empty string (see below).
- if mo.group('end'):
- linesep = mo.group('linesep')
- break
- # We saw an inter-part boundary. Were we in the preamble?
- if capturing_preamble:
- if preamble:
- # According to RFC 2046, the last newline belongs
- # to the boundary.
- lastline = preamble[-1]
- eolmo = NLCRE_eol.search(lastline)
- if eolmo:
- preamble[-1] = lastline[:-len(eolmo.group(0))]
- self._cur.preamble = EMPTYSTRING.join(preamble)
- capturing_preamble = False
- self._input.unreadline(line)
- continue
- # We saw a boundary separating two parts. Consume any
- # multiple boundary lines that may be following. Our
- # interpretation of RFC 2046 BNF grammar does not produce
- # body parts within such double boundaries.
- while True:
- line = self._input.readline()
- if line is NeedMoreData:
- yield NeedMoreData
- continue
- mo = boundaryre.match(line)
- if not mo:
- self._input.unreadline(line)
- break
- # Recurse to parse this subpart; the input stream points
- # at the subpart's first line.
- self._input.push_eof_matcher(boundaryre.match)
- for retval in self._parsegen():
- if retval is NeedMoreData:
- yield NeedMoreData
- continue
- break
- # Because of RFC 2046, the newline preceding the boundary
- # separator actually belongs to the boundary, not the
- # previous subpart's payload (or epilogue if the previous
- # part is a multipart).
- if self._last.get_content_maintype() == 'multipart':
- epilogue = self._last.epilogue
- if epilogue == '':
- self._last.epilogue = None
- elif epilogue is not None:
- mo = NLCRE_eol.search(epilogue)
- if mo:
- end = len(mo.group(0))
- self._last.epilogue = epilogue[:-end]
- else:
- payload = self._last.get_payload()
- if isinstance(payload, basestring):
- mo = NLCRE_eol.search(payload)
- if mo:
- payload = payload[:-len(mo.group(0))]
- self._last.set_payload(payload)
- self._input.pop_eof_matcher()
- self._pop_message()
- # Set the multipart up for newline cleansing, which will
- # happen if we're in a nested multipart.
- self._last = self._cur
- else:
- # I think we must be in the preamble
- assert capturing_preamble
- preamble.append(line)
- # We've seen either the EOF or the end boundary. If we're still
- # capturing the preamble, we never saw the start boundary. Note
- # that as a defect and store the captured text as the payload.
- # Everything from here to the EOF is epilogue.
- if capturing_preamble:
- self._cur.defects.append(errors.StartBoundaryNotFoundDefect())
- self._cur.set_payload(EMPTYSTRING.join(preamble))
- epilogue = []
- for line in self._input:
- if line is NeedMoreData:
- yield NeedMoreData
- continue
- self._cur.epilogue = EMPTYSTRING.join(epilogue)
- return
- # If the end boundary ended in a newline, we'll need to make sure
- # the epilogue isn't None
- if linesep:
- epilogue = ['']
- else:
- epilogue = []
- for line in self._input:
- if line is NeedMoreData:
- yield NeedMoreData
- continue
- epilogue.append(line)
- # Any CRLF at the front of the epilogue is not technically part of
- # the epilogue. Also, watch out for an empty string epilogue,
- # which means a single newline.
- if epilogue:
- firstline = epilogue[0]
- bolmo = NLCRE_bol.match(firstline)
- if bolmo:
- epilogue[0] = firstline[len(bolmo.group(0)):]
- self._cur.epilogue = EMPTYSTRING.join(epilogue)
- return
- # Otherwise, it's some non-multipart type, so the entire rest of the
- # file contents becomes the payload.
- lines = []
- for line in self._input:
- if line is NeedMoreData:
- yield NeedMoreData
- continue
- lines.append(line)
- self._cur.set_payload(EMPTYSTRING.join(lines))
-
- def _parse_headers(self, lines):
- # Passed a list of lines that make up the headers for the current msg
- lastheader = ''
- lastvalue = []
- for lineno, line in enumerate(lines):
- # Check for continuation
- if line[0] in ' \t':
- if not lastheader:
- # The first line of the headers was a continuation. This
- # is illegal, so let's note the defect, store the illegal
- # line, and ignore it for purposes of headers.
- defect = errors.FirstHeaderLineIsContinuationDefect(line)
- self._cur.defects.append(defect)
- continue
- lastvalue.append(line)
- continue
- if lastheader:
- # XXX reconsider the joining of folded lines
- lhdr = EMPTYSTRING.join(lastvalue)[:-1].rstrip('\r\n')
- self._cur[lastheader] = lhdr
- lastheader, lastvalue = '', []
- # Check for envelope header, i.e. unix-from
- if line.startswith('From '):
- if lineno == 0:
- # Strip off the trailing newline
- mo = NLCRE_eol.search(line)
- if mo:
- line = line[:-len(mo.group(0))]
- self._cur.set_unixfrom(line)
- continue
- elif lineno == len(lines) - 1:
- # Something looking like a unix-from at the end - it's
- # probably the first line of the body, so push back the
- # line and stop.
- self._input.unreadline(line)
- return
- else:
- # Weirdly placed unix-from line. Note this as a defect
- # and ignore it.
- defect = errors.MisplacedEnvelopeHeaderDefect(line)
- self._cur.defects.append(defect)
- continue
- # Split the line on the colon separating field name from value.
- i = line.find(':')
- if i < 0:
- defect = errors.MalformedHeaderDefect(line)
- self._cur.defects.append(defect)
- continue
- lastheader = line[:i]
- lastvalue = [line[i+1:].lstrip()]
- # Done with all the lines, so handle the last header.
- if lastheader:
- # XXX reconsider the joining of folded lines
- self._cur[lastheader] = EMPTYSTRING.join(lastvalue).rstrip('\r\n')
diff --git a/sys/lib/python/email/generator.py b/sys/lib/python/email/generator.py
deleted file mode 100644
index 6e7a51530..000000000
--- a/sys/lib/python/email/generator.py
+++ /dev/null
@@ -1,348 +0,0 @@
-# Copyright (C) 2001-2006 Python Software Foundation
-# Author: Barry Warsaw
-# Contact: email-sig@python.org
-
-"""Classes to generate plain text from a message object tree."""
-
-__all__ = ['Generator', 'DecodedGenerator']
-
-import re
-import sys
-import time
-import random
-import warnings
-
-from cStringIO import StringIO
-from email.header import Header
-
-UNDERSCORE = '_'
-NL = '\n'
-
-fcre = re.compile(r'^From ', re.MULTILINE)
-
-def _is8bitstring(s):
- if isinstance(s, str):
- try:
- unicode(s, 'us-ascii')
- except UnicodeError:
- return True
- return False
-
-
-
-class Generator:
- """Generates output from a Message object tree.
-
- This basic generator writes the message to the given file object as plain
- text.
- """
- #
- # Public interface
- #
-
- def __init__(self, outfp, mangle_from_=True, maxheaderlen=78):
- """Create the generator for message flattening.
-
- outfp is the output file-like object for writing the message to. It
- must have a write() method.
-
- Optional mangle_from_ is a flag that, when True (the default), escapes
- From_ lines in the body of the message by putting a `>' in front of
- them.
-
- Optional maxheaderlen specifies the longest length for a non-continued
- header. When a header line is longer (in characters, with tabs
- expanded to 8 spaces) than maxheaderlen, the header will split as
- defined in the Header class. Set maxheaderlen to zero to disable
- header wrapping. The default is 78, as recommended (but not required)
- by RFC 2822.
- """
- self._fp = outfp
- self._mangle_from_ = mangle_from_
- self._maxheaderlen = maxheaderlen
-
- def write(self, s):
- # Just delegate to the file object
- self._fp.write(s)
-
- def flatten(self, msg, unixfrom=False):
- """Print the message object tree rooted at msg to the output file
- specified when the Generator instance was created.
-
- unixfrom is a flag that forces the printing of a Unix From_ delimiter
- before the first object in the message tree. If the original message
- has no From_ delimiter, a `standard' one is crafted. By default, this
- is False to inhibit the printing of any From_ delimiter.
-
- Note that for subobjects, no From_ line is printed.
- """
- if unixfrom:
- ufrom = msg.get_unixfrom()
- if not ufrom:
- ufrom = 'From nobody ' + time.ctime(time.time())
- print >> self._fp, ufrom
- self._write(msg)
-
- def clone(self, fp):
- """Clone this generator with the exact same options."""
- return self.__class__(fp, self._mangle_from_, self._maxheaderlen)
-
- #
- # Protected interface - undocumented ;/
- #
-
- def _write(self, msg):
- # We can't write the headers yet because of the following scenario:
- # say a multipart message includes the boundary string somewhere in
- # its body. We'd have to calculate the new boundary /before/ we write
- # the headers so that we can write the correct Content-Type:
- # parameter.
- #
- # The way we do this, so as to make the _handle_*() methods simpler,
- # is to cache any subpart writes into a StringIO. The we write the
- # headers and the StringIO contents. That way, subpart handlers can
- # Do The Right Thing, and can still modify the Content-Type: header if
- # necessary.
- oldfp = self._fp
- try:
- self._fp = sfp = StringIO()
- self._dispatch(msg)
- finally:
- self._fp = oldfp
- # Write the headers. First we see if the message object wants to
- # handle that itself. If not, we'll do it generically.
- meth = getattr(msg, '_write_headers', None)
- if meth is None:
- self._write_headers(msg)
- else:
- meth(self)
- self._fp.write(sfp.getvalue())
-
- def _dispatch(self, msg):
- # Get the Content-Type: for the message, then try to dispatch to
- # self._handle_<maintype>_<subtype>(). If there's no handler for the
- # full MIME type, then dispatch to self._handle_<maintype>(). If
- # that's missing too, then dispatch to self._writeBody().
- main = msg.get_content_maintype()
- sub = msg.get_content_subtype()
- specific = UNDERSCORE.join((main, sub)).replace('-', '_')
- meth = getattr(self, '_handle_' + specific, None)
- if meth is None:
- generic = main.replace('-', '_')
- meth = getattr(self, '_handle_' + generic, None)
- if meth is None:
- meth = self._writeBody
- meth(msg)
-
- #
- # Default handlers
- #
-
- def _write_headers(self, msg):
- for h, v in msg.items():
- print >> self._fp, '%s:' % h,
- if self._maxheaderlen == 0:
- # Explicit no-wrapping
- print >> self._fp, v
- elif isinstance(v, Header):
- # Header instances know what to do
- print >> self._fp, v.encode()
- elif _is8bitstring(v):
- # If we have raw 8bit data in a byte string, we have no idea
- # what the encoding is. There is no safe way to split this
- # string. If it's ascii-subset, then we could do a normal
- # ascii split, but if it's multibyte then we could break the
- # string. There's no way to know so the least harm seems to
- # be to not split the string and risk it being too long.
- print >> self._fp, v
- else:
- # Header's got lots of smarts, so use it.
- print >> self._fp, Header(
- v, maxlinelen=self._maxheaderlen,
- header_name=h, continuation_ws='\t').encode()
- # A blank line always separates headers from body
- print >> self._fp
-
- #
- # Handlers for writing types and subtypes
- #
-
- def _handle_text(self, msg):
- payload = msg.get_payload()
- if payload is None:
- return
- if not isinstance(payload, basestring):
- raise TypeError('string payload expected: %s' % type(payload))
- if self._mangle_from_:
- payload = fcre.sub('>From ', payload)
- self._fp.write(payload)
-
- # Default body handler
- _writeBody = _handle_text
-
- def _handle_multipart(self, msg):
- # The trick here is to write out each part separately, merge them all
- # together, and then make sure that the boundary we've chosen isn't
- # present in the payload.
- msgtexts = []
- subparts = msg.get_payload()
- if subparts is None:
- subparts = []
- elif isinstance(subparts, basestring):
- # e.g. a non-strict parse of a message with no starting boundary.
- self._fp.write(subparts)
- return
- elif not isinstance(subparts, list):
- # Scalar payload
- subparts = [subparts]
- for part in subparts:
- s = StringIO()
- g = self.clone(s)
- g.flatten(part, unixfrom=False)
- msgtexts.append(s.getvalue())
- # Now make sure the boundary we've selected doesn't appear in any of
- # the message texts.
- alltext = NL.join(msgtexts)
- # BAW: What about boundaries that are wrapped in double-quotes?
- boundary = msg.get_boundary(failobj=_make_boundary(alltext))
- # If we had to calculate a new boundary because the body text
- # contained that string, set the new boundary. We don't do it
- # unconditionally because, while set_boundary() preserves order, it
- # doesn't preserve newlines/continuations in headers. This is no big
- # deal in practice, but turns out to be inconvenient for the unittest
- # suite.
- if msg.get_boundary() <> boundary:
- msg.set_boundary(boundary)
- # If there's a preamble, write it out, with a trailing CRLF
- if msg.preamble is not None:
- print >> self._fp, msg.preamble
- # dash-boundary transport-padding CRLF
- print >> self._fp, '--' + boundary
- # body-part
- if msgtexts:
- self._fp.write(msgtexts.pop(0))
- # *encapsulation
- # --> delimiter transport-padding
- # --> CRLF body-part
- for body_part in msgtexts:
- # delimiter transport-padding CRLF
- print >> self._fp, '\n--' + boundary
- # body-part
- self._fp.write(body_part)
- # close-delimiter transport-padding
- self._fp.write('\n--' + boundary + '--')
- if msg.epilogue is not None:
- print >> self._fp
- self._fp.write(msg.epilogue)
-
- def _handle_message_delivery_status(self, msg):
- # We can't just write the headers directly to self's file object
- # because this will leave an extra newline between the last header
- # block and the boundary. Sigh.
- blocks = []
- for part in msg.get_payload():
- s = StringIO()
- g = self.clone(s)
- g.flatten(part, unixfrom=False)
- text = s.getvalue()
- lines = text.split('\n')
- # Strip off the unnecessary trailing empty line
- if lines and lines[-1] == '':
- blocks.append(NL.join(lines[:-1]))
- else:
- blocks.append(text)
- # Now join all the blocks with an empty line. This has the lovely
- # effect of separating each block with an empty line, but not adding
- # an extra one after the last one.
- self._fp.write(NL.join(blocks))
-
- def _handle_message(self, msg):
- s = StringIO()
- g = self.clone(s)
- # The payload of a message/rfc822 part should be a multipart sequence
- # of length 1. The zeroth element of the list should be the Message
- # object for the subpart. Extract that object, stringify it, and
- # write it out.
- g.flatten(msg.get_payload(0), unixfrom=False)
- self._fp.write(s.getvalue())
-
-
-
-_FMT = '[Non-text (%(type)s) part of message omitted, filename %(filename)s]'
-
-class DecodedGenerator(Generator):
- """Generator a text representation of a message.
-
- Like the Generator base class, except that non-text parts are substituted
- with a format string representing the part.
- """
- def __init__(self, outfp, mangle_from_=True, maxheaderlen=78, fmt=None):
- """Like Generator.__init__() except that an additional optional
- argument is allowed.
-
- Walks through all subparts of a message. If the subpart is of main
- type `text', then it prints the decoded payload of the subpart.
-
- Otherwise, fmt is a format string that is used instead of the message
- payload. fmt is expanded with the following keywords (in
- %(keyword)s format):
-
- type : Full MIME type of the non-text part
- maintype : Main MIME type of the non-text part
- subtype : Sub-MIME type of the non-text part
- filename : Filename of the non-text part
- description: Description associated with the non-text part
- encoding : Content transfer encoding of the non-text part
-
- The default value for fmt is None, meaning
-
- [Non-text (%(type)s) part of message omitted, filename %(filename)s]
- """
- Generator.__init__(self, outfp, mangle_from_, maxheaderlen)
- if fmt is None:
- self._fmt = _FMT
- else:
- self._fmt = fmt
-
- def _dispatch(self, msg):
- for part in msg.walk():
- maintype = part.get_content_maintype()
- if maintype == 'text':
- print >> self, part.get_payload(decode=True)
- elif maintype == 'multipart':
- # Just skip this
- pass
- else:
- print >> self, self._fmt % {
- 'type' : part.get_content_type(),
- 'maintype' : part.get_content_maintype(),
- 'subtype' : part.get_content_subtype(),
- 'filename' : part.get_filename('[no filename]'),
- 'description': part.get('Content-Description',
- '[no description]'),
- 'encoding' : part.get('Content-Transfer-Encoding',
- '[no encoding]'),
- }
-
-
-
-# Helper
-_width = len(repr(sys.maxint-1))
-_fmt = '%%0%dd' % _width
-
-def _make_boundary(text=None):
- # Craft a random boundary. If text is given, ensure that the chosen
- # boundary doesn't appear in the text.
- token = random.randrange(sys.maxint)
- boundary = ('=' * 15) + (_fmt % token) + '=='
- if text is None:
- return boundary
- b = boundary
- counter = 0
- while True:
- cre = re.compile('^--' + re.escape(b) + '(--)?$', re.MULTILINE)
- if not cre.search(text):
- break
- b = boundary + '.' + str(counter)
- counter += 1
- return b
diff --git a/sys/lib/python/email/header.py b/sys/lib/python/email/header.py
deleted file mode 100644
index e139ccf64..000000000
--- a/sys/lib/python/email/header.py
+++ /dev/null
@@ -1,503 +0,0 @@
-# Copyright (C) 2002-2006 Python Software Foundation
-# Author: Ben Gertzfield, Barry Warsaw
-# Contact: email-sig@python.org
-
-"""Header encoding and decoding functionality."""
-
-__all__ = [
- 'Header',
- 'decode_header',
- 'make_header',
- ]
-
-import re
-import binascii
-
-import email.quoprimime
-import email.base64mime
-
-from email.errors import HeaderParseError
-from email.charset import Charset
-
-NL = '\n'
-SPACE = ' '
-USPACE = u' '
-SPACE8 = ' ' * 8
-UEMPTYSTRING = u''
-
-MAXLINELEN = 76
-
-USASCII = Charset('us-ascii')
-UTF8 = Charset('utf-8')
-
-# Match encoded-word strings in the form =?charset?q?Hello_World?=
-ecre = re.compile(r'''
- =\? # literal =?
- (?P<charset>[^?]*?) # non-greedy up to the next ? is the charset
- \? # literal ?
- (?P<encoding>[qb]) # either a "q" or a "b", case insensitive
- \? # literal ?
- (?P<encoded>.*?) # non-greedy up to the next ?= is the encoded string
- \?= # literal ?=
- (?=[ \t]|$) # whitespace or the end of the string
- ''', re.VERBOSE | re.IGNORECASE | re.MULTILINE)
-
-# Field name regexp, including trailing colon, but not separating whitespace,
-# according to RFC 2822. Character range is from tilde to exclamation mark.
-# For use with .match()
-fcre = re.compile(r'[\041-\176]+:$')
-
-
-
-# Helpers
-_max_append = email.quoprimime._max_append
-
-
-
-def decode_header(header):
- """Decode a message header value without converting charset.
-
- Returns a list of (decoded_string, charset) pairs containing each of the
- decoded parts of the header. Charset is None for non-encoded parts of the
- header, otherwise a lower-case string containing the name of the character
- set specified in the encoded string.
-
- An email.Errors.HeaderParseError may be raised when certain decoding error
- occurs (e.g. a base64 decoding exception).
- """
- # If no encoding, just return the header
- header = str(header)
- if not ecre.search(header):
- return [(header, None)]
- decoded = []
- dec = ''
- for line in header.splitlines():
- # This line might not have an encoding in it
- if not ecre.search(line):
- decoded.append((line, None))
- continue
- parts = ecre.split(line)
- while parts:
- unenc = parts.pop(0).strip()
- if unenc:
- # Should we continue a long line?
- if decoded and decoded[-1][1] is None:
- decoded[-1] = (decoded[-1][0] + SPACE + unenc, None)
- else:
- decoded.append((unenc, None))
- if parts:
- charset, encoding = [s.lower() for s in parts[0:2]]
- encoded = parts[2]
- dec = None
- if encoding == 'q':
- dec = email.quoprimime.header_decode(encoded)
- elif encoding == 'b':
- try:
- dec = email.base64mime.decode(encoded)
- except binascii.Error:
- # Turn this into a higher level exception. BAW: Right
- # now we throw the lower level exception away but
- # when/if we get exception chaining, we'll preserve it.
- raise HeaderParseError
- if dec is None:
- dec = encoded
-
- if decoded and decoded[-1][1] == charset:
- decoded[-1] = (decoded[-1][0] + dec, decoded[-1][1])
- else:
- decoded.append((dec, charset))
- del parts[0:3]
- return decoded
-
-
-
-def make_header(decoded_seq, maxlinelen=None, header_name=None,
- continuation_ws=' '):
- """Create a Header from a sequence of pairs as returned by decode_header()
-
- decode_header() takes a header value string and returns a sequence of
- pairs of the format (decoded_string, charset) where charset is the string
- name of the character set.
-
- This function takes one of those sequence of pairs and returns a Header
- instance. Optional maxlinelen, header_name, and continuation_ws are as in
- the Header constructor.
- """
- h = Header(maxlinelen=maxlinelen, header_name=header_name,
- continuation_ws=continuation_ws)
- for s, charset in decoded_seq:
- # None means us-ascii but we can simply pass it on to h.append()
- if charset is not None and not isinstance(charset, Charset):
- charset = Charset(charset)
- h.append(s, charset)
- return h
-
-
-
-class Header:
- def __init__(self, s=None, charset=None,
- maxlinelen=None, header_name=None,
- continuation_ws=' ', errors='strict'):
- """Create a MIME-compliant header that can contain many character sets.
-
- Optional s is the initial header value. If None, the initial header
- value is not set. You can later append to the header with .append()
- method calls. s may be a byte string or a Unicode string, but see the
- .append() documentation for semantics.
-
- Optional charset serves two purposes: it has the same meaning as the
- charset argument to the .append() method. It also sets the default
- character set for all subsequent .append() calls that omit the charset
- argument. If charset is not provided in the constructor, the us-ascii
- charset is used both as s's initial charset and as the default for
- subsequent .append() calls.
-
- The maximum line length can be specified explicit via maxlinelen. For
- splitting the first line to a shorter value (to account for the field
- header which isn't included in s, e.g. `Subject') pass in the name of
- the field in header_name. The default maxlinelen is 76.
-
- continuation_ws must be RFC 2822 compliant folding whitespace (usually
- either a space or a hard tab) which will be prepended to continuation
- lines.
-
- errors is passed through to the .append() call.
- """
- if charset is None:
- charset = USASCII
- if not isinstance(charset, Charset):
- charset = Charset(charset)
- self._charset = charset
- self._continuation_ws = continuation_ws
- cws_expanded_len = len(continuation_ws.replace('\t', SPACE8))
- # BAW: I believe `chunks' and `maxlinelen' should be non-public.
- self._chunks = []
- if s is not None:
- self.append(s, charset, errors)
- if maxlinelen is None:
- maxlinelen = MAXLINELEN
- if header_name is None:
- # We don't know anything about the field header so the first line
- # is the same length as subsequent lines.
- self._firstlinelen = maxlinelen
- else:
- # The first line should be shorter to take into account the field
- # header. Also subtract off 2 extra for the colon and space.
- self._firstlinelen = maxlinelen - len(header_name) - 2
- # Second and subsequent lines should subtract off the length in
- # columns of the continuation whitespace prefix.
- self._maxlinelen = maxlinelen - cws_expanded_len
-
- def __str__(self):
- """A synonym for self.encode()."""
- return self.encode()
-
- def __unicode__(self):
- """Helper for the built-in unicode function."""
- uchunks = []
- lastcs = None
- for s, charset in self._chunks:
- # We must preserve spaces between encoded and non-encoded word
- # boundaries, which means for us we need to add a space when we go
- # from a charset to None/us-ascii, or from None/us-ascii to a
- # charset. Only do this for the second and subsequent chunks.
- nextcs = charset
- if uchunks:
- if lastcs not in (None, 'us-ascii'):
- if nextcs in (None, 'us-ascii'):
- uchunks.append(USPACE)
- nextcs = None
- elif nextcs not in (None, 'us-ascii'):
- uchunks.append(USPACE)
- lastcs = nextcs
- uchunks.append(unicode(s, str(charset)))
- return UEMPTYSTRING.join(uchunks)
-
- # Rich comparison operators for equality only. BAW: does it make sense to
- # have or explicitly disable <, <=, >, >= operators?
- def __eq__(self, other):
- # other may be a Header or a string. Both are fine so coerce
- # ourselves to a string, swap the args and do another comparison.
- return other == self.encode()
-
- def __ne__(self, other):
- return not self == other
-
- def append(self, s, charset=None, errors='strict'):
- """Append a string to the MIME header.
-
- Optional charset, if given, should be a Charset instance or the name
- of a character set (which will be converted to a Charset instance). A
- value of None (the default) means that the charset given in the
- constructor is used.
-
- s may be a byte string or a Unicode string. If it is a byte string
- (i.e. isinstance(s, str) is true), then charset is the encoding of
- that byte string, and a UnicodeError will be raised if the string
- cannot be decoded with that charset. If s is a Unicode string, then
- charset is a hint specifying the character set of the characters in
- the string. In this case, when producing an RFC 2822 compliant header
- using RFC 2047 rules, the Unicode string will be encoded using the
- following charsets in order: us-ascii, the charset hint, utf-8. The
- first character set not to provoke a UnicodeError is used.
-
- Optional `errors' is passed as the third argument to any unicode() or
- ustr.encode() call.
- """
- if charset is None:
- charset = self._charset
- elif not isinstance(charset, Charset):
- charset = Charset(charset)
- # If the charset is our faux 8bit charset, leave the string unchanged
- if charset <> '8bit':
- # We need to test that the string can be converted to unicode and
- # back to a byte string, given the input and output codecs of the
- # charset.
- if isinstance(s, str):
- # Possibly raise UnicodeError if the byte string can't be
- # converted to a unicode with the input codec of the charset.
- incodec = charset.input_codec or 'us-ascii'
- ustr = unicode(s, incodec, errors)
- # Now make sure that the unicode could be converted back to a
- # byte string with the output codec, which may be different
- # than the iput coded. Still, use the original byte string.
- outcodec = charset.output_codec or 'us-ascii'
- ustr.encode(outcodec, errors)
- elif isinstance(s, unicode):
- # Now we have to be sure the unicode string can be converted
- # to a byte string with a reasonable output codec. We want to
- # use the byte string in the chunk.
- for charset in USASCII, charset, UTF8:
- try:
- outcodec = charset.output_codec or 'us-ascii'
- s = s.encode(outcodec, errors)
- break
- except UnicodeError:
- pass
- else:
- assert False, 'utf-8 conversion failed'
- self._chunks.append((s, charset))
-
- def _split(self, s, charset, maxlinelen, splitchars):
- # Split up a header safely for use with encode_chunks.
- splittable = charset.to_splittable(s)
- encoded = charset.from_splittable(splittable, True)
- elen = charset.encoded_header_len(encoded)
- # If the line's encoded length first, just return it
- if elen <= maxlinelen:
- return [(encoded, charset)]
- # If we have undetermined raw 8bit characters sitting in a byte
- # string, we really don't know what the right thing to do is. We
- # can't really split it because it might be multibyte data which we
- # could break if we split it between pairs. The least harm seems to
- # be to not split the header at all, but that means they could go out
- # longer than maxlinelen.
- if charset == '8bit':
- return [(s, charset)]
- # BAW: I'm not sure what the right test here is. What we're trying to
- # do is be faithful to RFC 2822's recommendation that ($2.2.3):
- #
- # "Note: Though structured field bodies are defined in such a way that
- # folding can take place between many of the lexical tokens (and even
- # within some of the lexical tokens), folding SHOULD be limited to
- # placing the CRLF at higher-level syntactic breaks."
- #
- # For now, I can only imagine doing this when the charset is us-ascii,
- # although it's possible that other charsets may also benefit from the
- # higher-level syntactic breaks.
- elif charset == 'us-ascii':
- return self._split_ascii(s, charset, maxlinelen, splitchars)
- # BAW: should we use encoded?
- elif elen == len(s):
- # We can split on _maxlinelen boundaries because we know that the
- # encoding won't change the size of the string
- splitpnt = maxlinelen
- first = charset.from_splittable(splittable[:splitpnt], False)
- last = charset.from_splittable(splittable[splitpnt:], False)
- else:
- # Binary search for split point
- first, last = _binsplit(splittable, charset, maxlinelen)
- # first is of the proper length so just wrap it in the appropriate
- # chrome. last must be recursively split.
- fsplittable = charset.to_splittable(first)
- fencoded = charset.from_splittable(fsplittable, True)
- chunk = [(fencoded, charset)]
- return chunk + self._split(last, charset, self._maxlinelen, splitchars)
-
- def _split_ascii(self, s, charset, firstlen, splitchars):
- chunks = _split_ascii(s, firstlen, self._maxlinelen,
- self._continuation_ws, splitchars)
- return zip(chunks, [charset]*len(chunks))
-
- def _encode_chunks(self, newchunks, maxlinelen):
- # MIME-encode a header with many different charsets and/or encodings.
- #
- # Given a list of pairs (string, charset), return a MIME-encoded
- # string suitable for use in a header field. Each pair may have
- # different charsets and/or encodings, and the resulting header will
- # accurately reflect each setting.
- #
- # Each encoding can be email.Utils.QP (quoted-printable, for
- # ASCII-like character sets like iso-8859-1), email.Utils.BASE64
- # (Base64, for non-ASCII like character sets like KOI8-R and
- # iso-2022-jp), or None (no encoding).
- #
- # Each pair will be represented on a separate line; the resulting
- # string will be in the format:
- #
- # =?charset1?q?Mar=EDa_Gonz=E1lez_Alonso?=\n
- # =?charset2?b?SvxyZ2VuIEL2aW5n?="
- chunks = []
- for header, charset in newchunks:
- if not header:
- continue
- if charset is None or charset.header_encoding is None:
- s = header
- else:
- s = charset.header_encode(header)
- # Don't add more folding whitespace than necessary
- if chunks and chunks[-1].endswith(' '):
- extra = ''
- else:
- extra = ' '
- _max_append(chunks, s, maxlinelen, extra)
- joiner = NL + self._continuation_ws
- return joiner.join(chunks)
-
- def encode(self, splitchars=';, '):
- """Encode a message header into an RFC-compliant format.
-
- There are many issues involved in converting a given string for use in
- an email header. Only certain character sets are readable in most
- email clients, and as header strings can only contain a subset of
- 7-bit ASCII, care must be taken to properly convert and encode (with
- Base64 or quoted-printable) header strings. In addition, there is a
- 75-character length limit on any given encoded header field, so
- line-wrapping must be performed, even with double-byte character sets.
-
- This method will do its best to convert the string to the correct
- character set used in email, and encode and line wrap it safely with
- the appropriate scheme for that character set.
-
- If the given charset is not known or an error occurs during
- conversion, this function will return the header untouched.
-
- Optional splitchars is a string containing characters to split long
- ASCII lines on, in rough support of RFC 2822's `highest level
- syntactic breaks'. This doesn't affect RFC 2047 encoded lines.
- """
- newchunks = []
- maxlinelen = self._firstlinelen
- lastlen = 0
- for s, charset in self._chunks:
- # The first bit of the next chunk should be just long enough to
- # fill the next line. Don't forget the space separating the
- # encoded words.
- targetlen = maxlinelen - lastlen - 1
- if targetlen < charset.encoded_header_len(''):
- # Stick it on the next line
- targetlen = maxlinelen
- newchunks += self._split(s, charset, targetlen, splitchars)
- lastchunk, lastcharset = newchunks[-1]
- lastlen = lastcharset.encoded_header_len(lastchunk)
- return self._encode_chunks(newchunks, maxlinelen)
-
-
-
-def _split_ascii(s, firstlen, restlen, continuation_ws, splitchars):
- lines = []
- maxlen = firstlen
- for line in s.splitlines():
- # Ignore any leading whitespace (i.e. continuation whitespace) already
- # on the line, since we'll be adding our own.
- line = line.lstrip()
- if len(line) < maxlen:
- lines.append(line)
- maxlen = restlen
- continue
- # Attempt to split the line at the highest-level syntactic break
- # possible. Note that we don't have a lot of smarts about field
- # syntax; we just try to break on semi-colons, then commas, then
- # whitespace.
- for ch in splitchars:
- if ch in line:
- break
- else:
- # There's nothing useful to split the line on, not even spaces, so
- # just append this line unchanged
- lines.append(line)
- maxlen = restlen
- continue
- # Now split the line on the character plus trailing whitespace
- cre = re.compile(r'%s\s*' % ch)
- if ch in ';,':
- eol = ch
- else:
- eol = ''
- joiner = eol + ' '
- joinlen = len(joiner)
- wslen = len(continuation_ws.replace('\t', SPACE8))
- this = []
- linelen = 0
- for part in cre.split(line):
- curlen = linelen + max(0, len(this)-1) * joinlen
- partlen = len(part)
- onfirstline = not lines
- # We don't want to split after the field name, if we're on the
- # first line and the field name is present in the header string.
- if ch == ' ' and onfirstline and \
- len(this) == 1 and fcre.match(this[0]):
- this.append(part)
- linelen += partlen
- elif curlen + partlen > maxlen:
- if this:
- lines.append(joiner.join(this) + eol)
- # If this part is longer than maxlen and we aren't already
- # splitting on whitespace, try to recursively split this line
- # on whitespace.
- if partlen > maxlen and ch <> ' ':
- subl = _split_ascii(part, maxlen, restlen,
- continuation_ws, ' ')
- lines.extend(subl[:-1])
- this = [subl[-1]]
- else:
- this = [part]
- linelen = wslen + len(this[-1])
- maxlen = restlen
- else:
- this.append(part)
- linelen += partlen
- # Put any left over parts on a line by themselves
- if this:
- lines.append(joiner.join(this))
- return lines
-
-
-
-def _binsplit(splittable, charset, maxlinelen):
- i = 0
- j = len(splittable)
- while i < j:
- # Invariants:
- # 1. splittable[:k] fits for all k <= i (note that we *assume*,
- # at the start, that splittable[:0] fits).
- # 2. splittable[:k] does not fit for any k > j (at the start,
- # this means we shouldn't look at any k > len(splittable)).
- # 3. We don't know about splittable[:k] for k in i+1..j.
- # 4. We want to set i to the largest k that fits, with i <= k <= j.
- #
- m = (i+j+1) >> 1 # ceiling((i+j)/2); i < m <= j
- chunk = charset.from_splittable(splittable[:m], True)
- chunklen = charset.encoded_header_len(chunk)
- if chunklen <= maxlinelen:
- # m is acceptable, so is a new lower bound.
- i = m
- else:
- # m is not acceptable, so final i must be < m.
- j = m - 1
- # i == j. Invariant #1 implies that splittable[:i] fits, and
- # invariant #2 implies that splittable[:i+1] does not fit, so i
- # is what we're looking for.
- first = charset.from_splittable(splittable[:i], False)
- last = charset.from_splittable(splittable[i:], False)
- return first, last
diff --git a/sys/lib/python/email/iterators.py b/sys/lib/python/email/iterators.py
deleted file mode 100644
index e99f2280d..000000000
--- a/sys/lib/python/email/iterators.py
+++ /dev/null
@@ -1,73 +0,0 @@
-# Copyright (C) 2001-2006 Python Software Foundation
-# Author: Barry Warsaw
-# Contact: email-sig@python.org
-
-"""Various types of useful iterators and generators."""
-
-__all__ = [
- 'body_line_iterator',
- 'typed_subpart_iterator',
- 'walk',
- # Do not include _structure() since it's part of the debugging API.
- ]
-
-import sys
-from cStringIO import StringIO
-
-
-
-# This function will become a method of the Message class
-def walk(self):
- """Walk over the message tree, yielding each subpart.
-
- The walk is performed in depth-first order. This method is a
- generator.
- """
- yield self
- if self.is_multipart():
- for subpart in self.get_payload():
- for subsubpart in subpart.walk():
- yield subsubpart
-
-
-
-# These two functions are imported into the Iterators.py interface module.
-def body_line_iterator(msg, decode=False):
- """Iterate over the parts, returning string payloads line-by-line.
-
- Optional decode (default False) is passed through to .get_payload().
- """
- for subpart in msg.walk():
- payload = subpart.get_payload(decode=decode)
- if isinstance(payload, basestring):
- for line in StringIO(payload):
- yield line
-
-
-def typed_subpart_iterator(msg, maintype='text', subtype=None):
- """Iterate over the subparts with a given MIME type.
-
- Use `maintype' as the main MIME type to match against; this defaults to
- "text". Optional `subtype' is the MIME subtype to match against; if
- omitted, only the main type is matched.
- """
- for subpart in msg.walk():
- if subpart.get_content_maintype() == maintype:
- if subtype is None or subpart.get_content_subtype() == subtype:
- yield subpart
-
-
-
-def _structure(msg, fp=None, level=0, include_default=False):
- """A handy debugging aid"""
- if fp is None:
- fp = sys.stdout
- tab = ' ' * (level * 4)
- print >> fp, tab + msg.get_content_type(),
- if include_default:
- print >> fp, '[%s]' % msg.get_default_type()
- else:
- print >> fp
- if msg.is_multipart():
- for subpart in msg.get_payload():
- _structure(subpart, fp, level+1, include_default)
diff --git a/sys/lib/python/email/message.py b/sys/lib/python/email/message.py
deleted file mode 100644
index 88ae1833e..000000000
--- a/sys/lib/python/email/message.py
+++ /dev/null
@@ -1,786 +0,0 @@
-# Copyright (C) 2001-2006 Python Software Foundation
-# Author: Barry Warsaw
-# Contact: email-sig@python.org
-
-"""Basic message object for the email package object model."""
-
-__all__ = ['Message']
-
-import re
-import uu
-import binascii
-import warnings
-from cStringIO import StringIO
-
-# Intrapackage imports
-import email.charset
-from email import utils
-from email import errors
-
-SEMISPACE = '; '
-
-# Regular expression used to split header parameters. BAW: this may be too
-# simple. It isn't strictly RFC 2045 (section 5.1) compliant, but it catches
-# most headers found in the wild. We may eventually need a full fledged
-# parser eventually.
-paramre = re.compile(r'\s*;\s*')
-# Regular expression that matches `special' characters in parameters, the
-# existance of which force quoting of the parameter value.
-tspecials = re.compile(r'[ \(\)<>@,;:\\"/\[\]\?=]')
-
-
-
-# Helper functions
-def _formatparam(param, value=None, quote=True):
- """Convenience function to format and return a key=value pair.
-
- This will quote the value if needed or if quote is true.
- """
- if value is not None and len(value) > 0:
- # A tuple is used for RFC 2231 encoded parameter values where items
- # are (charset, language, value). charset is a string, not a Charset
- # instance.
- if isinstance(value, tuple):
- # Encode as per RFC 2231
- param += '*'
- value = utils.encode_rfc2231(value[2], value[0], value[1])
- # BAW: Please check this. I think that if quote is set it should
- # force quoting even if not necessary.
- if quote or tspecials.search(value):
- return '%s="%s"' % (param, utils.quote(value))
- else:
- return '%s=%s' % (param, value)
- else:
- return param
-
-def _parseparam(s):
- plist = []
- while s[:1] == ';':
- s = s[1:]
- end = s.find(';')
- while end > 0 and s.count('"', 0, end) % 2:
- end = s.find(';', end + 1)
- if end < 0:
- end = len(s)
- f = s[:end]
- if '=' in f:
- i = f.index('=')
- f = f[:i].strip().lower() + '=' + f[i+1:].strip()
- plist.append(f.strip())
- s = s[end:]
- return plist
-
-
-def _unquotevalue(value):
- # This is different than utils.collapse_rfc2231_value() because it doesn't
- # try to convert the value to a unicode. Message.get_param() and
- # Message.get_params() are both currently defined to return the tuple in
- # the face of RFC 2231 parameters.
- if isinstance(value, tuple):
- return value[0], value[1], utils.unquote(value[2])
- else:
- return utils.unquote(value)
-
-
-
-class Message:
- """Basic message object.
-
- A message object is defined as something that has a bunch of RFC 2822
- headers and a payload. It may optionally have an envelope header
- (a.k.a. Unix-From or From_ header). If the message is a container (i.e. a
- multipart or a message/rfc822), then the payload is a list of Message
- objects, otherwise it is a string.
-
- Message objects implement part of the `mapping' interface, which assumes
- there is exactly one occurrance of the header per message. Some headers
- do in fact appear multiple times (e.g. Received) and for those headers,
- you must use the explicit API to set or get all the headers. Not all of
- the mapping methods are implemented.
- """
- def __init__(self):
- self._headers = []
- self._unixfrom = None
- self._payload = None
- self._charset = None
- # Defaults for multipart messages
- self.preamble = self.epilogue = None
- self.defects = []
- # Default content type
- self._default_type = 'text/plain'
-
- def __str__(self):
- """Return the entire formatted message as a string.
- This includes the headers, body, and envelope header.
- """
- return self.as_string(unixfrom=True)
-
- def as_string(self, unixfrom=False):
- """Return the entire formatted message as a string.
- Optional `unixfrom' when True, means include the Unix From_ envelope
- header.
-
- This is a convenience method and may not generate the message exactly
- as you intend because by default it mangles lines that begin with
- "From ". For more flexibility, use the flatten() method of a
- Generator instance.
- """
- from email.Generator import Generator
- fp = StringIO()
- g = Generator(fp)
- g.flatten(self, unixfrom=unixfrom)
- return fp.getvalue()
-
- def is_multipart(self):
- """Return True if the message consists of multiple parts."""
- return isinstance(self._payload, list)
-
- #
- # Unix From_ line
- #
- def set_unixfrom(self, unixfrom):
- self._unixfrom = unixfrom
-
- def get_unixfrom(self):
- return self._unixfrom
-
- #
- # Payload manipulation.
- #
- def attach(self, payload):
- """Add the given payload to the current payload.
-
- The current payload will always be a list of objects after this method
- is called. If you want to set the payload to a scalar object, use
- set_payload() instead.
- """
- if self._payload is None:
- self._payload = [payload]
- else:
- self._payload.append(payload)
-
- def get_payload(self, i=None, decode=False):
- """Return a reference to the payload.
-
- The payload will either be a list object or a string. If you mutate
- the list object, you modify the message's payload in place. Optional
- i returns that index into the payload.
-
- Optional decode is a flag indicating whether the payload should be
- decoded or not, according to the Content-Transfer-Encoding header
- (default is False).
-
- When True and the message is not a multipart, the payload will be
- decoded if this header's value is `quoted-printable' or `base64'. If
- some other encoding is used, or the header is missing, or if the
- payload has bogus data (i.e. bogus base64 or uuencoded data), the
- payload is returned as-is.
-
- If the message is a multipart and the decode flag is True, then None
- is returned.
- """
- if i is None:
- payload = self._payload
- elif not isinstance(self._payload, list):
- raise TypeError('Expected list, got %s' % type(self._payload))
- else:
- payload = self._payload[i]
- if decode:
- if self.is_multipart():
- return None
- cte = self.get('content-transfer-encoding', '').lower()
- if cte == 'quoted-printable':
- return utils._qdecode(payload)
- elif cte == 'base64':
- try:
- return utils._bdecode(payload)
- except binascii.Error:
- # Incorrect padding
- return payload
- elif cte in ('x-uuencode', 'uuencode', 'uue', 'x-uue'):
- sfp = StringIO()
- try:
- uu.decode(StringIO(payload+'\n'), sfp, quiet=True)
- payload = sfp.getvalue()
- except uu.Error:
- # Some decoding problem
- return payload
- # Everything else, including encodings with 8bit or 7bit are returned
- # unchanged.
- return payload
-
- def set_payload(self, payload, charset=None):
- """Set the payload to the given value.
-
- Optional charset sets the message's default character set. See
- set_charset() for details.
- """
- self._payload = payload
- if charset is not None:
- self.set_charset(charset)
-
- def set_charset(self, charset):
- """Set the charset of the payload to a given character set.
-
- charset can be a Charset instance, a string naming a character set, or
- None. If it is a string it will be converted to a Charset instance.
- If charset is None, the charset parameter will be removed from the
- Content-Type field. Anything else will generate a TypeError.
-
- The message will be assumed to be of type text/* encoded with
- charset.input_charset. It will be converted to charset.output_charset
- and encoded properly, if needed, when generating the plain text
- representation of the message. MIME headers (MIME-Version,
- Content-Type, Content-Transfer-Encoding) will be added as needed.
-
- """
- if charset is None:
- self.del_param('charset')
- self._charset = None
- return
- if isinstance(charset, basestring):
- charset = email.charset.Charset(charset)
- if not isinstance(charset, email.charset.Charset):
- raise TypeError(charset)
- # BAW: should we accept strings that can serve as arguments to the
- # Charset constructor?
- self._charset = charset
- if not self.has_key('MIME-Version'):
- self.add_header('MIME-Version', '1.0')
- if not self.has_key('Content-Type'):
- self.add_header('Content-Type', 'text/plain',
- charset=charset.get_output_charset())
- else:
- self.set_param('charset', charset.get_output_charset())
- if str(charset) <> charset.get_output_charset():
- self._payload = charset.body_encode(self._payload)
- if not self.has_key('Content-Transfer-Encoding'):
- cte = charset.get_body_encoding()
- try:
- cte(self)
- except TypeError:
- self._payload = charset.body_encode(self._payload)
- self.add_header('Content-Transfer-Encoding', cte)
-
- def get_charset(self):
- """Return the Charset instance associated with the message's payload.
- """
- return self._charset
-
- #
- # MAPPING INTERFACE (partial)
- #
- def __len__(self):
- """Return the total number of headers, including duplicates."""
- return len(self._headers)
-
- def __getitem__(self, name):
- """Get a header value.
-
- Return None if the header is missing instead of raising an exception.
-
- Note that if the header appeared multiple times, exactly which
- occurrance gets returned is undefined. Use get_all() to get all
- the values matching a header field name.
- """
- return self.get(name)
-
- def __setitem__(self, name, val):
- """Set the value of a header.
-
- Note: this does not overwrite an existing header with the same field
- name. Use __delitem__() first to delete any existing headers.
- """
- self._headers.append((name, val))
-
- def __delitem__(self, name):
- """Delete all occurrences of a header, if present.
-
- Does not raise an exception if the header is missing.
- """
- name = name.lower()
- newheaders = []
- for k, v in self._headers:
- if k.lower() <> name:
- newheaders.append((k, v))
- self._headers = newheaders
-
- def __contains__(self, name):
- return name.lower() in [k.lower() for k, v in self._headers]
-
- def has_key(self, name):
- """Return true if the message contains the header."""
- missing = object()
- return self.get(name, missing) is not missing
-
- def keys(self):
- """Return a list of all the message's header field names.
-
- These will be sorted in the order they appeared in the original
- message, or were added to the message, and may contain duplicates.
- Any fields deleted and re-inserted are always appended to the header
- list.
- """
- return [k for k, v in self._headers]
-
- def values(self):
- """Return a list of all the message's header values.
-
- These will be sorted in the order they appeared in the original
- message, or were added to the message, and may contain duplicates.
- Any fields deleted and re-inserted are always appended to the header
- list.
- """
- return [v for k, v in self._headers]
-
- def items(self):
- """Get all the message's header fields and values.
-
- These will be sorted in the order they appeared in the original
- message, or were added to the message, and may contain duplicates.
- Any fields deleted and re-inserted are always appended to the header
- list.
- """
- return self._headers[:]
-
- def get(self, name, failobj=None):
- """Get a header value.
-
- Like __getitem__() but return failobj instead of None when the field
- is missing.
- """
- name = name.lower()
- for k, v in self._headers:
- if k.lower() == name:
- return v
- return failobj
-
- #
- # Additional useful stuff
- #
-
- def get_all(self, name, failobj=None):
- """Return a list of all the values for the named field.
-
- These will be sorted in the order they appeared in the original
- message, and may contain duplicates. Any fields deleted and
- re-inserted are always appended to the header list.
-
- If no such fields exist, failobj is returned (defaults to None).
- """
- values = []
- name = name.lower()
- for k, v in self._headers:
- if k.lower() == name:
- values.append(v)
- if not values:
- return failobj
- return values
-
- def add_header(self, _name, _value, **_params):
- """Extended header setting.
-
- name is the header field to add. keyword arguments can be used to set
- additional parameters for the header field, with underscores converted
- to dashes. Normally the parameter will be added as key="value" unless
- value is None, in which case only the key will be added.
-
- Example:
-
- msg.add_header('content-disposition', 'attachment', filename='bud.gif')
- """
- parts = []
- for k, v in _params.items():
- if v is None:
- parts.append(k.replace('_', '-'))
- else:
- parts.append(_formatparam(k.replace('_', '-'), v))
- if _value is not None:
- parts.insert(0, _value)
- self._headers.append((_name, SEMISPACE.join(parts)))
-
- def replace_header(self, _name, _value):
- """Replace a header.
-
- Replace the first matching header found in the message, retaining
- header order and case. If no matching header was found, a KeyError is
- raised.
- """
- _name = _name.lower()
- for i, (k, v) in zip(range(len(self._headers)), self._headers):
- if k.lower() == _name:
- self._headers[i] = (k, _value)
- break
- else:
- raise KeyError(_name)
-
- #
- # Use these three methods instead of the three above.
- #
-
- def get_content_type(self):
- """Return the message's content type.
-
- The returned string is coerced to lower case of the form
- `maintype/subtype'. If there was no Content-Type header in the
- message, the default type as given by get_default_type() will be
- returned. Since according to RFC 2045, messages always have a default
- type this will always return a value.
-
- RFC 2045 defines a message's default type to be text/plain unless it
- appears inside a multipart/digest container, in which case it would be
- message/rfc822.
- """
- missing = object()
- value = self.get('content-type', missing)
- if value is missing:
- # This should have no parameters
- return self.get_default_type()
- ctype = paramre.split(value)[0].lower().strip()
- # RFC 2045, section 5.2 says if its invalid, use text/plain
- if ctype.count('/') <> 1:
- return 'text/plain'
- return ctype
-
- def get_content_maintype(self):
- """Return the message's main content type.
-
- This is the `maintype' part of the string returned by
- get_content_type().
- """
- ctype = self.get_content_type()
- return ctype.split('/')[0]
-
- def get_content_subtype(self):
- """Returns the message's sub-content type.
-
- This is the `subtype' part of the string returned by
- get_content_type().
- """
- ctype = self.get_content_type()
- return ctype.split('/')[1]
-
- def get_default_type(self):
- """Return the `default' content type.
-
- Most messages have a default content type of text/plain, except for
- messages that are subparts of multipart/digest containers. Such
- subparts have a default content type of message/rfc822.
- """
- return self._default_type
-
- def set_default_type(self, ctype):
- """Set the `default' content type.
-
- ctype should be either "text/plain" or "message/rfc822", although this
- is not enforced. The default content type is not stored in the
- Content-Type header.
- """
- self._default_type = ctype
-
- def _get_params_preserve(self, failobj, header):
- # Like get_params() but preserves the quoting of values. BAW:
- # should this be part of the public interface?
- missing = object()
- value = self.get(header, missing)
- if value is missing:
- return failobj
- params = []
- for p in _parseparam(';' + value):
- try:
- name, val = p.split('=', 1)
- name = name.strip()
- val = val.strip()
- except ValueError:
- # Must have been a bare attribute
- name = p.strip()
- val = ''
- params.append((name, val))
- params = utils.decode_params(params)
- return params
-
- def get_params(self, failobj=None, header='content-type', unquote=True):
- """Return the message's Content-Type parameters, as a list.
-
- The elements of the returned list are 2-tuples of key/value pairs, as
- split on the `=' sign. The left hand side of the `=' is the key,
- while the right hand side is the value. If there is no `=' sign in
- the parameter the value is the empty string. The value is as
- described in the get_param() method.
-
- Optional failobj is the object to return if there is no Content-Type
- header. Optional header is the header to search instead of
- Content-Type. If unquote is True, the value is unquoted.
- """
- missing = object()
- params = self._get_params_preserve(missing, header)
- if params is missing:
- return failobj
- if unquote:
- return [(k, _unquotevalue(v)) for k, v in params]
- else:
- return params
-
- def get_param(self, param, failobj=None, header='content-type',
- unquote=True):
- """Return the parameter value if found in the Content-Type header.
-
- Optional failobj is the object to return if there is no Content-Type
- header, or the Content-Type header has no such parameter. Optional
- header is the header to search instead of Content-Type.
-
- Parameter keys are always compared case insensitively. The return
- value can either be a string, or a 3-tuple if the parameter was RFC
- 2231 encoded. When it's a 3-tuple, the elements of the value are of
- the form (CHARSET, LANGUAGE, VALUE). Note that both CHARSET and
- LANGUAGE can be None, in which case you should consider VALUE to be
- encoded in the us-ascii charset. You can usually ignore LANGUAGE.
-
- Your application should be prepared to deal with 3-tuple return
- values, and can convert the parameter to a Unicode string like so:
-
- param = msg.get_param('foo')
- if isinstance(param, tuple):
- param = unicode(param[2], param[0] or 'us-ascii')
-
- In any case, the parameter value (either the returned string, or the
- VALUE item in the 3-tuple) is always unquoted, unless unquote is set
- to False.
- """
- if not self.has_key(header):
- return failobj
- for k, v in self._get_params_preserve(failobj, header):
- if k.lower() == param.lower():
- if unquote:
- return _unquotevalue(v)
- else:
- return v
- return failobj
-
- def set_param(self, param, value, header='Content-Type', requote=True,
- charset=None, language=''):
- """Set a parameter in the Content-Type header.
-
- If the parameter already exists in the header, its value will be
- replaced with the new value.
-
- If header is Content-Type and has not yet been defined for this
- message, it will be set to "text/plain" and the new parameter and
- value will be appended as per RFC 2045.
-
- An alternate header can specified in the header argument, and all
- parameters will be quoted as necessary unless requote is False.
-
- If charset is specified, the parameter will be encoded according to RFC
- 2231. Optional language specifies the RFC 2231 language, defaulting
- to the empty string. Both charset and language should be strings.
- """
- if not isinstance(value, tuple) and charset:
- value = (charset, language, value)
-
- if not self.has_key(header) and header.lower() == 'content-type':
- ctype = 'text/plain'
- else:
- ctype = self.get(header)
- if not self.get_param(param, header=header):
- if not ctype:
- ctype = _formatparam(param, value, requote)
- else:
- ctype = SEMISPACE.join(
- [ctype, _formatparam(param, value, requote)])
- else:
- ctype = ''
- for old_param, old_value in self.get_params(header=header,
- unquote=requote):
- append_param = ''
- if old_param.lower() == param.lower():
- append_param = _formatparam(param, value, requote)
- else:
- append_param = _formatparam(old_param, old_value, requote)
- if not ctype:
- ctype = append_param
- else:
- ctype = SEMISPACE.join([ctype, append_param])
- if ctype <> self.get(header):
- del self[header]
- self[header] = ctype
-
- def del_param(self, param, header='content-type', requote=True):
- """Remove the given parameter completely from the Content-Type header.
-
- The header will be re-written in place without the parameter or its
- value. All values will be quoted as necessary unless requote is
- False. Optional header specifies an alternative to the Content-Type
- header.
- """
- if not self.has_key(header):
- return
- new_ctype = ''
- for p, v in self.get_params(header=header, unquote=requote):
- if p.lower() <> param.lower():
- if not new_ctype:
- new_ctype = _formatparam(p, v, requote)
- else:
- new_ctype = SEMISPACE.join([new_ctype,
- _formatparam(p, v, requote)])
- if new_ctype <> self.get(header):
- del self[header]
- self[header] = new_ctype
-
- def set_type(self, type, header='Content-Type', requote=True):
- """Set the main type and subtype for the Content-Type header.
-
- type must be a string in the form "maintype/subtype", otherwise a
- ValueError is raised.
-
- This method replaces the Content-Type header, keeping all the
- parameters in place. If requote is False, this leaves the existing
- header's quoting as is. Otherwise, the parameters will be quoted (the
- default).
-
- An alternative header can be specified in the header argument. When
- the Content-Type header is set, we'll always also add a MIME-Version
- header.
- """
- # BAW: should we be strict?
- if not type.count('/') == 1:
- raise ValueError
- # Set the Content-Type, you get a MIME-Version
- if header.lower() == 'content-type':
- del self['mime-version']
- self['MIME-Version'] = '1.0'
- if not self.has_key(header):
- self[header] = type
- return
- params = self.get_params(header=header, unquote=requote)
- del self[header]
- self[header] = type
- # Skip the first param; it's the old type.
- for p, v in params[1:]:
- self.set_param(p, v, header, requote)
-
- def get_filename(self, failobj=None):
- """Return the filename associated with the payload if present.
-
- The filename is extracted from the Content-Disposition header's
- `filename' parameter, and it is unquoted. If that header is missing
- the `filename' parameter, this method falls back to looking for the
- `name' parameter.
- """
- missing = object()
- filename = self.get_param('filename', missing, 'content-disposition')
- if filename is missing:
- filename = self.get_param('name', missing, 'content-disposition')
- if filename is missing:
- return failobj
- return utils.collapse_rfc2231_value(filename).strip()
-
- def get_boundary(self, failobj=None):
- """Return the boundary associated with the payload if present.
-
- The boundary is extracted from the Content-Type header's `boundary'
- parameter, and it is unquoted.
- """
- missing = object()
- boundary = self.get_param('boundary', missing)
- if boundary is missing:
- return failobj
- # RFC 2046 says that boundaries may begin but not end in w/s
- return utils.collapse_rfc2231_value(boundary).rstrip()
-
- def set_boundary(self, boundary):
- """Set the boundary parameter in Content-Type to 'boundary'.
-
- This is subtly different than deleting the Content-Type header and
- adding a new one with a new boundary parameter via add_header(). The
- main difference is that using the set_boundary() method preserves the
- order of the Content-Type header in the original message.
-
- HeaderParseError is raised if the message has no Content-Type header.
- """
- missing = object()
- params = self._get_params_preserve(missing, 'content-type')
- if params is missing:
- # There was no Content-Type header, and we don't know what type
- # to set it to, so raise an exception.
- raise errors.HeaderParseError('No Content-Type header found')
- newparams = []
- foundp = False
- for pk, pv in params:
- if pk.lower() == 'boundary':
- newparams.append(('boundary', '"%s"' % boundary))
- foundp = True
- else:
- newparams.append((pk, pv))
- if not foundp:
- # The original Content-Type header had no boundary attribute.
- # Tack one on the end. BAW: should we raise an exception
- # instead???
- newparams.append(('boundary', '"%s"' % boundary))
- # Replace the existing Content-Type header with the new value
- newheaders = []
- for h, v in self._headers:
- if h.lower() == 'content-type':
- parts = []
- for k, v in newparams:
- if v == '':
- parts.append(k)
- else:
- parts.append('%s=%s' % (k, v))
- newheaders.append((h, SEMISPACE.join(parts)))
-
- else:
- newheaders.append((h, v))
- self._headers = newheaders
-
- def get_content_charset(self, failobj=None):
- """Return the charset parameter of the Content-Type header.
-
- The returned string is always coerced to lower case. If there is no
- Content-Type header, or if that header has no charset parameter,
- failobj is returned.
- """
- missing = object()
- charset = self.get_param('charset', missing)
- if charset is missing:
- return failobj
- if isinstance(charset, tuple):
- # RFC 2231 encoded, so decode it, and it better end up as ascii.
- pcharset = charset[0] or 'us-ascii'
- try:
- # LookupError will be raised if the charset isn't known to
- # Python. UnicodeError will be raised if the encoded text
- # contains a character not in the charset.
- charset = unicode(charset[2], pcharset).encode('us-ascii')
- except (LookupError, UnicodeError):
- charset = charset[2]
- # charset character must be in us-ascii range
- try:
- if isinstance(charset, str):
- charset = unicode(charset, 'us-ascii')
- charset = charset.encode('us-ascii')
- except UnicodeError:
- return failobj
- # RFC 2046, $4.1.2 says charsets are not case sensitive
- return charset.lower()
-
- def get_charsets(self, failobj=None):
- """Return a list containing the charset(s) used in this message.
-
- The returned list of items describes the Content-Type headers'
- charset parameter for this message and all the subparts in its
- payload.
-
- Each item will either be a string (the value of the charset parameter
- in the Content-Type header of that part) or the value of the
- 'failobj' parameter (defaults to None), if the part does not have a
- main MIME type of "text", or the charset is not defined.
-
- The list will contain one string for each part of the message, plus
- one for the container message (i.e. self), so that a non-multipart
- message will still return a list of length 1.
- """
- return [part.get_content_charset(failobj) for part in self.walk()]
-
- # I.e. def walk(self): ...
- from email.Iterators import walk
diff --git a/sys/lib/python/email/mime/__init__.py b/sys/lib/python/email/mime/__init__.py
deleted file mode 100644
index e69de29bb..000000000
--- a/sys/lib/python/email/mime/__init__.py
+++ /dev/null
diff --git a/sys/lib/python/email/mime/application.py b/sys/lib/python/email/mime/application.py
deleted file mode 100644
index 6f8bb8a82..000000000
--- a/sys/lib/python/email/mime/application.py
+++ /dev/null
@@ -1,36 +0,0 @@
-# Copyright (C) 2001-2006 Python Software Foundation
-# Author: Keith Dart
-# Contact: email-sig@python.org
-
-"""Class representing application/* type MIME documents."""
-
-__all__ = ["MIMEApplication"]
-
-from email import encoders
-from email.mime.nonmultipart import MIMENonMultipart
-
-
-class MIMEApplication(MIMENonMultipart):
- """Class for generating application/* MIME documents."""
-
- def __init__(self, _data, _subtype='octet-stream',
- _encoder=encoders.encode_base64, **_params):
- """Create an application/* type MIME document.
-
- _data is a string containing the raw applicatoin data.
-
- _subtype is the MIME content type subtype, defaulting to
- 'octet-stream'.
-
- _encoder is a function which will perform the actual encoding for
- transport of the application data, defaulting to base64 encoding.
-
- Any additional keyword arguments are passed to the base class
- constructor, which turns them into parameters on the Content-Type
- header.
- """
- if _subtype is None:
- raise TypeError('Invalid application MIME subtype')
- MIMENonMultipart.__init__(self, 'application', _subtype, **_params)
- self.set_payload(_data)
- _encoder(self)
diff --git a/sys/lib/python/email/mime/audio.py b/sys/lib/python/email/mime/audio.py
deleted file mode 100644
index c7290c4b1..000000000
--- a/sys/lib/python/email/mime/audio.py
+++ /dev/null
@@ -1,73 +0,0 @@
-# Copyright (C) 2001-2006 Python Software Foundation
-# Author: Anthony Baxter
-# Contact: email-sig@python.org
-
-"""Class representing audio/* type MIME documents."""
-
-__all__ = ['MIMEAudio']
-
-import sndhdr
-
-from cStringIO import StringIO
-from email import encoders
-from email.mime.nonmultipart import MIMENonMultipart
-
-
-
-_sndhdr_MIMEmap = {'au' : 'basic',
- 'wav' :'x-wav',
- 'aiff':'x-aiff',
- 'aifc':'x-aiff',
- }
-
-# There are others in sndhdr that don't have MIME types. :(
-# Additional ones to be added to sndhdr? midi, mp3, realaudio, wma??
-def _whatsnd(data):
- """Try to identify a sound file type.
-
- sndhdr.what() has a pretty cruddy interface, unfortunately. This is why
- we re-do it here. It would be easier to reverse engineer the Unix 'file'
- command and use the standard 'magic' file, as shipped with a modern Unix.
- """
- hdr = data[:512]
- fakefile = StringIO(hdr)
- for testfn in sndhdr.tests:
- res = testfn(hdr, fakefile)
- if res is not None:
- return _sndhdr_MIMEmap.get(res[0])
- return None
-
-
-
-class MIMEAudio(MIMENonMultipart):
- """Class for generating audio/* MIME documents."""
-
- def __init__(self, _audiodata, _subtype=None,
- _encoder=encoders.encode_base64, **_params):
- """Create an audio/* type MIME document.
-
- _audiodata is a string containing the raw audio data. If this data
- can be decoded by the standard Python `sndhdr' module, then the
- subtype will be automatically included in the Content-Type header.
- Otherwise, you can specify the specific audio subtype via the
- _subtype parameter. If _subtype is not given, and no subtype can be
- guessed, a TypeError is raised.
-
- _encoder is a function which will perform the actual encoding for
- transport of the image data. It takes one argument, which is this
- Image instance. It should use get_payload() and set_payload() to
- change the payload to the encoded form. It should also add any
- Content-Transfer-Encoding or other headers to the message as
- necessary. The default encoding is Base64.
-
- Any additional keyword arguments are passed to the base class
- constructor, which turns them into parameters on the Content-Type
- header.
- """
- if _subtype is None:
- _subtype = _whatsnd(_audiodata)
- if _subtype is None:
- raise TypeError('Could not find audio MIME subtype')
- MIMENonMultipart.__init__(self, 'audio', _subtype, **_params)
- self.set_payload(_audiodata)
- _encoder(self)
diff --git a/sys/lib/python/email/mime/base.py b/sys/lib/python/email/mime/base.py
deleted file mode 100644
index ac919258b..000000000
--- a/sys/lib/python/email/mime/base.py
+++ /dev/null
@@ -1,26 +0,0 @@
-# Copyright (C) 2001-2006 Python Software Foundation
-# Author: Barry Warsaw
-# Contact: email-sig@python.org
-
-"""Base class for MIME specializations."""
-
-__all__ = ['MIMEBase']
-
-from email import message
-
-
-
-class MIMEBase(message.Message):
- """Base class for MIME specializations."""
-
- def __init__(self, _maintype, _subtype, **_params):
- """This constructor adds a Content-Type: and a MIME-Version: header.
-
- The Content-Type: header is taken from the _maintype and _subtype
- arguments. Additional parameters for this header are taken from the
- keyword arguments.
- """
- message.Message.__init__(self)
- ctype = '%s/%s' % (_maintype, _subtype)
- self.add_header('Content-Type', ctype, **_params)
- self['MIME-Version'] = '1.0'
diff --git a/sys/lib/python/email/mime/image.py b/sys/lib/python/email/mime/image.py
deleted file mode 100644
index 556382323..000000000
--- a/sys/lib/python/email/mime/image.py
+++ /dev/null
@@ -1,46 +0,0 @@
-# Copyright (C) 2001-2006 Python Software Foundation
-# Author: Barry Warsaw
-# Contact: email-sig@python.org
-
-"""Class representing image/* type MIME documents."""
-
-__all__ = ['MIMEImage']
-
-import imghdr
-
-from email import encoders
-from email.mime.nonmultipart import MIMENonMultipart
-
-
-
-class MIMEImage(MIMENonMultipart):
- """Class for generating image/* type MIME documents."""
-
- def __init__(self, _imagedata, _subtype=None,
- _encoder=encoders.encode_base64, **_params):
- """Create an image/* type MIME document.
-
- _imagedata is a string containing the raw image data. If this data
- can be decoded by the standard Python `imghdr' module, then the
- subtype will be automatically included in the Content-Type header.
- Otherwise, you can specify the specific image subtype via the _subtype
- parameter.
-
- _encoder is a function which will perform the actual encoding for
- transport of the image data. It takes one argument, which is this
- Image instance. It should use get_payload() and set_payload() to
- change the payload to the encoded form. It should also add any
- Content-Transfer-Encoding or other headers to the message as
- necessary. The default encoding is Base64.
-
- Any additional keyword arguments are passed to the base class
- constructor, which turns them into parameters on the Content-Type
- header.
- """
- if _subtype is None:
- _subtype = imghdr.what(None, _imagedata)
- if _subtype is None:
- raise TypeError('Could not guess image MIME subtype')
- MIMENonMultipart.__init__(self, 'image', _subtype, **_params)
- self.set_payload(_imagedata)
- _encoder(self)
diff --git a/sys/lib/python/email/mime/message.py b/sys/lib/python/email/mime/message.py
deleted file mode 100644
index 275dbfd08..000000000
--- a/sys/lib/python/email/mime/message.py
+++ /dev/null
@@ -1,34 +0,0 @@
-# Copyright (C) 2001-2006 Python Software Foundation
-# Author: Barry Warsaw
-# Contact: email-sig@python.org
-
-"""Class representing message/* MIME documents."""
-
-__all__ = ['MIMEMessage']
-
-from email import message
-from email.mime.nonmultipart import MIMENonMultipart
-
-
-
-class MIMEMessage(MIMENonMultipart):
- """Class representing message/* MIME documents."""
-
- def __init__(self, _msg, _subtype='rfc822'):
- """Create a message/* type MIME document.
-
- _msg is a message object and must be an instance of Message, or a
- derived class of Message, otherwise a TypeError is raised.
-
- Optional _subtype defines the subtype of the contained message. The
- default is "rfc822" (this is defined by the MIME standard, even though
- the term "rfc822" is technically outdated by RFC 2822).
- """
- MIMENonMultipart.__init__(self, 'message', _subtype)
- if not isinstance(_msg, message.Message):
- raise TypeError('Argument is not an instance of Message')
- # It's convenient to use this base class method. We need to do it
- # this way or we'll get an exception
- message.Message.attach(self, _msg)
- # And be sure our default type is set correctly
- self.set_default_type('message/rfc822')
diff --git a/sys/lib/python/email/mime/multipart.py b/sys/lib/python/email/mime/multipart.py
deleted file mode 100644
index 5c8c9dbc4..000000000
--- a/sys/lib/python/email/mime/multipart.py
+++ /dev/null
@@ -1,41 +0,0 @@
-# Copyright (C) 2002-2006 Python Software Foundation
-# Author: Barry Warsaw
-# Contact: email-sig@python.org
-
-"""Base class for MIME multipart/* type messages."""
-
-__all__ = ['MIMEMultipart']
-
-from email.mime.base import MIMEBase
-
-
-
-class MIMEMultipart(MIMEBase):
- """Base class for MIME multipart/* type messages."""
-
- def __init__(self, _subtype='mixed', boundary=None, _subparts=None,
- **_params):
- """Creates a multipart/* type message.
-
- By default, creates a multipart/mixed message, with proper
- Content-Type and MIME-Version headers.
-
- _subtype is the subtype of the multipart content type, defaulting to
- `mixed'.
-
- boundary is the multipart boundary string. By default it is
- calculated as needed.
-
- _subparts is a sequence of initial subparts for the payload. It
- must be an iterable object, such as a list. You can always
- attach new subparts to the message by using the attach() method.
-
- Additional parameters for the Content-Type header are taken from the
- keyword arguments (or passed into the _params argument).
- """
- MIMEBase.__init__(self, 'multipart', _subtype, **_params)
- if _subparts:
- for p in _subparts:
- self.attach(p)
- if boundary:
- self.set_boundary(boundary)
diff --git a/sys/lib/python/email/mime/nonmultipart.py b/sys/lib/python/email/mime/nonmultipart.py
deleted file mode 100644
index dd280b51d..000000000
--- a/sys/lib/python/email/mime/nonmultipart.py
+++ /dev/null
@@ -1,26 +0,0 @@
-# Copyright (C) 2002-2006 Python Software Foundation
-# Author: Barry Warsaw
-# Contact: email-sig@python.org
-
-"""Base class for MIME type messages that are not multipart."""
-
-__all__ = ['MIMENonMultipart']
-
-from email import errors
-from email.mime.base import MIMEBase
-
-
-
-class MIMENonMultipart(MIMEBase):
- """Base class for MIME multipart/* type messages."""
-
- __pychecker__ = 'unusednames=payload'
-
- def attach(self, payload):
- # The public API prohibits attaching multiple subparts to MIMEBase
- # derived subtypes since none of them are, by definition, of content
- # type multipart/*
- raise errors.MultipartConversionError(
- 'Cannot attach additional subparts to non-multipart/*')
-
- del __pychecker__
diff --git a/sys/lib/python/email/mime/text.py b/sys/lib/python/email/mime/text.py
deleted file mode 100644
index 5747db5d6..000000000
--- a/sys/lib/python/email/mime/text.py
+++ /dev/null
@@ -1,30 +0,0 @@
-# Copyright (C) 2001-2006 Python Software Foundation
-# Author: Barry Warsaw
-# Contact: email-sig@python.org
-
-"""Class representing text/* type MIME documents."""
-
-__all__ = ['MIMEText']
-
-from email.encoders import encode_7or8bit
-from email.mime.nonmultipart import MIMENonMultipart
-
-
-
-class MIMEText(MIMENonMultipart):
- """Class for generating text/* type MIME documents."""
-
- def __init__(self, _text, _subtype='plain', _charset='us-ascii'):
- """Create a text/* type MIME document.
-
- _text is the string for this message object.
-
- _subtype is the MIME sub content type, defaulting to "plain".
-
- _charset is the character set parameter added to the Content-Type
- header. This defaults to "us-ascii". Note that as a side-effect, the
- Content-Transfer-Encoding header will also be set.
- """
- MIMENonMultipart.__init__(self, 'text', _subtype,
- **{'charset': _charset})
- self.set_payload(_text, _charset)
diff --git a/sys/lib/python/email/parser.py b/sys/lib/python/email/parser.py
deleted file mode 100644
index 2fcaf2545..000000000
--- a/sys/lib/python/email/parser.py
+++ /dev/null
@@ -1,91 +0,0 @@
-# Copyright (C) 2001-2006 Python Software Foundation
-# Author: Barry Warsaw, Thomas Wouters, Anthony Baxter
-# Contact: email-sig@python.org
-
-"""A parser of RFC 2822 and MIME email messages."""
-
-__all__ = ['Parser', 'HeaderParser']
-
-import warnings
-from cStringIO import StringIO
-
-from email.feedparser import FeedParser
-from email.message import Message
-
-
-
-class Parser:
- def __init__(self, *args, **kws):
- """Parser of RFC 2822 and MIME email messages.
-
- Creates an in-memory object tree representing the email message, which
- can then be manipulated and turned over to a Generator to return the
- textual representation of the message.
-
- The string must be formatted as a block of RFC 2822 headers and header
- continuation lines, optionally preceeded by a `Unix-from' header. The
- header block is terminated either by the end of the string or by a
- blank line.
-
- _class is the class to instantiate for new message objects when they
- must be created. This class must have a constructor that can take
- zero arguments. Default is Message.Message.
- """
- if len(args) >= 1:
- if '_class' in kws:
- raise TypeError("Multiple values for keyword arg '_class'")
- kws['_class'] = args[0]
- if len(args) == 2:
- if 'strict' in kws:
- raise TypeError("Multiple values for keyword arg 'strict'")
- kws['strict'] = args[1]
- if len(args) > 2:
- raise TypeError('Too many arguments')
- if '_class' in kws:
- self._class = kws['_class']
- del kws['_class']
- else:
- self._class = Message
- if 'strict' in kws:
- warnings.warn("'strict' argument is deprecated (and ignored)",
- DeprecationWarning, 2)
- del kws['strict']
- if kws:
- raise TypeError('Unexpected keyword arguments')
-
- def parse(self, fp, headersonly=False):
- """Create a message structure from the data in a file.
-
- Reads all the data from the file and returns the root of the message
- structure. Optional headersonly is a flag specifying whether to stop
- parsing after reading the headers or not. The default is False,
- meaning it parses the entire contents of the file.
- """
- feedparser = FeedParser(self._class)
- if headersonly:
- feedparser._set_headersonly()
- while True:
- data = fp.read(8192)
- if not data:
- break
- feedparser.feed(data)
- return feedparser.close()
-
- def parsestr(self, text, headersonly=False):
- """Create a message structure from a string.
-
- Returns the root of the message structure. Optional headersonly is a
- flag specifying whether to stop parsing after reading the headers or
- not. The default is False, meaning it parses the entire contents of
- the file.
- """
- return self.parse(StringIO(text), headersonly=headersonly)
-
-
-
-class HeaderParser(Parser):
- def parse(self, fp, headersonly=True):
- return Parser.parse(self, fp, True)
-
- def parsestr(self, text, headersonly=True):
- return Parser.parsestr(self, text, True)
diff --git a/sys/lib/python/email/quoprimime.py b/sys/lib/python/email/quoprimime.py
deleted file mode 100644
index a5658dd3f..000000000
--- a/sys/lib/python/email/quoprimime.py
+++ /dev/null
@@ -1,336 +0,0 @@
-# Copyright (C) 2001-2006 Python Software Foundation
-# Author: Ben Gertzfield
-# Contact: email-sig@python.org
-
-"""Quoted-printable content transfer encoding per RFCs 2045-2047.
-
-This module handles the content transfer encoding method defined in RFC 2045
-to encode US ASCII-like 8-bit data called `quoted-printable'. It is used to
-safely encode text that is in a character set similar to the 7-bit US ASCII
-character set, but that includes some 8-bit characters that are normally not
-allowed in email bodies or headers.
-
-Quoted-printable is very space-inefficient for encoding binary files; use the
-email.base64MIME module for that instead.
-
-This module provides an interface to encode and decode both headers and bodies
-with quoted-printable encoding.
-
-RFC 2045 defines a method for including character set information in an
-`encoded-word' in a header. This method is commonly used for 8-bit real names
-in To:/From:/Cc: etc. fields, as well as Subject: lines.
-
-This module does not do the line wrapping or end-of-line character
-conversion necessary for proper internationalized headers; it only
-does dumb encoding and decoding. To deal with the various line
-wrapping issues, use the email.Header module.
-"""
-
-__all__ = [
- 'body_decode',
- 'body_encode',
- 'body_quopri_check',
- 'body_quopri_len',
- 'decode',
- 'decodestring',
- 'encode',
- 'encodestring',
- 'header_decode',
- 'header_encode',
- 'header_quopri_check',
- 'header_quopri_len',
- 'quote',
- 'unquote',
- ]
-
-import re
-
-from string import hexdigits
-from email.utils import fix_eols
-
-CRLF = '\r\n'
-NL = '\n'
-
-# See also Charset.py
-MISC_LEN = 7
-
-hqre = re.compile(r'[^-a-zA-Z0-9!*+/ ]')
-bqre = re.compile(r'[^ !-<>-~\t]')
-
-
-
-# Helpers
-def header_quopri_check(c):
- """Return True if the character should be escaped with header quopri."""
- return bool(hqre.match(c))
-
-
-def body_quopri_check(c):
- """Return True if the character should be escaped with body quopri."""
- return bool(bqre.match(c))
-
-
-def header_quopri_len(s):
- """Return the length of str when it is encoded with header quopri."""
- count = 0
- for c in s:
- if hqre.match(c):
- count += 3
- else:
- count += 1
- return count
-
-
-def body_quopri_len(str):
- """Return the length of str when it is encoded with body quopri."""
- count = 0
- for c in str:
- if bqre.match(c):
- count += 3
- else:
- count += 1
- return count
-
-
-def _max_append(L, s, maxlen, extra=''):
- if not L:
- L.append(s.lstrip())
- elif len(L[-1]) + len(s) <= maxlen:
- L[-1] += extra + s
- else:
- L.append(s.lstrip())
-
-
-def unquote(s):
- """Turn a string in the form =AB to the ASCII character with value 0xab"""
- return chr(int(s[1:3], 16))
-
-
-def quote(c):
- return "=%02X" % ord(c)
-
-
-
-def header_encode(header, charset="iso-8859-1", keep_eols=False,
- maxlinelen=76, eol=NL):
- """Encode a single header line with quoted-printable (like) encoding.
-
- Defined in RFC 2045, this `Q' encoding is similar to quoted-printable, but
- used specifically for email header fields to allow charsets with mostly 7
- bit characters (and some 8 bit) to remain more or less readable in non-RFC
- 2045 aware mail clients.
-
- charset names the character set to use to encode the header. It defaults
- to iso-8859-1.
-
- The resulting string will be in the form:
-
- "=?charset?q?I_f=E2rt_in_your_g=E8n=E8ral_dire=E7tion?\\n
- =?charset?q?Silly_=C8nglish_Kn=EEghts?="
-
- with each line wrapped safely at, at most, maxlinelen characters (defaults
- to 76 characters). If maxlinelen is None, the entire string is encoded in
- one chunk with no splitting.
-
- End-of-line characters (\\r, \\n, \\r\\n) will be automatically converted
- to the canonical email line separator \\r\\n unless the keep_eols
- parameter is True (the default is False).
-
- Each line of the header will be terminated in the value of eol, which
- defaults to "\\n". Set this to "\\r\\n" if you are using the result of
- this function directly in email.
- """
- # Return empty headers unchanged
- if not header:
- return header
-
- if not keep_eols:
- header = fix_eols(header)
-
- # Quopri encode each line, in encoded chunks no greater than maxlinelen in
- # length, after the RFC chrome is added in.
- quoted = []
- if maxlinelen is None:
- # An obnoxiously large number that's good enough
- max_encoded = 100000
- else:
- max_encoded = maxlinelen - len(charset) - MISC_LEN - 1
-
- for c in header:
- # Space may be represented as _ instead of =20 for readability
- if c == ' ':
- _max_append(quoted, '_', max_encoded)
- # These characters can be included verbatim
- elif not hqre.match(c):
- _max_append(quoted, c, max_encoded)
- # Otherwise, replace with hex value like =E2
- else:
- _max_append(quoted, "=%02X" % ord(c), max_encoded)
-
- # Now add the RFC chrome to each encoded chunk and glue the chunks
- # together. BAW: should we be able to specify the leading whitespace in
- # the joiner?
- joiner = eol + ' '
- return joiner.join(['=?%s?q?%s?=' % (charset, line) for line in quoted])
-
-
-
-def encode(body, binary=False, maxlinelen=76, eol=NL):
- """Encode with quoted-printable, wrapping at maxlinelen characters.
-
- If binary is False (the default), end-of-line characters will be converted
- to the canonical email end-of-line sequence \\r\\n. Otherwise they will
- be left verbatim.
-
- Each line of encoded text will end with eol, which defaults to "\\n". Set
- this to "\\r\\n" if you will be using the result of this function directly
- in an email.
-
- Each line will be wrapped at, at most, maxlinelen characters (defaults to
- 76 characters). Long lines will have the `soft linefeed' quoted-printable
- character "=" appended to them, so the decoded text will be identical to
- the original text.
- """
- if not body:
- return body
-
- if not binary:
- body = fix_eols(body)
-
- # BAW: We're accumulating the body text by string concatenation. That
- # can't be very efficient, but I don't have time now to rewrite it. It
- # just feels like this algorithm could be more efficient.
- encoded_body = ''
- lineno = -1
- # Preserve line endings here so we can check later to see an eol needs to
- # be added to the output later.
- lines = body.splitlines(1)
- for line in lines:
- # But strip off line-endings for processing this line.
- if line.endswith(CRLF):
- line = line[:-2]
- elif line[-1] in CRLF:
- line = line[:-1]
-
- lineno += 1
- encoded_line = ''
- prev = None
- linelen = len(line)
- # Now we need to examine every character to see if it needs to be
- # quopri encoded. BAW: again, string concatenation is inefficient.
- for j in range(linelen):
- c = line[j]
- prev = c
- if bqre.match(c):
- c = quote(c)
- elif j+1 == linelen:
- # Check for whitespace at end of line; special case
- if c not in ' \t':
- encoded_line += c
- prev = c
- continue
- # Check to see to see if the line has reached its maximum length
- if len(encoded_line) + len(c) >= maxlinelen:
- encoded_body += encoded_line + '=' + eol
- encoded_line = ''
- encoded_line += c
- # Now at end of line..
- if prev and prev in ' \t':
- # Special case for whitespace at end of file
- if lineno + 1 == len(lines):
- prev = quote(prev)
- if len(encoded_line) + len(prev) > maxlinelen:
- encoded_body += encoded_line + '=' + eol + prev
- else:
- encoded_body += encoded_line + prev
- # Just normal whitespace at end of line
- else:
- encoded_body += encoded_line + prev + '=' + eol
- encoded_line = ''
- # Now look at the line we just finished and it has a line ending, we
- # need to add eol to the end of the line.
- if lines[lineno].endswith(CRLF) or lines[lineno][-1] in CRLF:
- encoded_body += encoded_line + eol
- else:
- encoded_body += encoded_line
- encoded_line = ''
- return encoded_body
-
-
-# For convenience and backwards compatibility w/ standard base64 module
-body_encode = encode
-encodestring = encode
-
-
-
-# BAW: I'm not sure if the intent was for the signature of this function to be
-# the same as base64MIME.decode() or not...
-def decode(encoded, eol=NL):
- """Decode a quoted-printable string.
-
- Lines are separated with eol, which defaults to \\n.
- """
- if not encoded:
- return encoded
- # BAW: see comment in encode() above. Again, we're building up the
- # decoded string with string concatenation, which could be done much more
- # efficiently.
- decoded = ''
-
- for line in encoded.splitlines():
- line = line.rstrip()
- if not line:
- decoded += eol
- continue
-
- i = 0
- n = len(line)
- while i < n:
- c = line[i]
- if c <> '=':
- decoded += c
- i += 1
- # Otherwise, c == "=". Are we at the end of the line? If so, add
- # a soft line break.
- elif i+1 == n:
- i += 1
- continue
- # Decode if in form =AB
- elif i+2 < n and line[i+1] in hexdigits and line[i+2] in hexdigits:
- decoded += unquote(line[i:i+3])
- i += 3
- # Otherwise, not in form =AB, pass literally
- else:
- decoded += c
- i += 1
-
- if i == n:
- decoded += eol
- # Special case if original string did not end with eol
- if not encoded.endswith(eol) and decoded.endswith(eol):
- decoded = decoded[:-1]
- return decoded
-
-
-# For convenience and backwards compatibility w/ standard base64 module
-body_decode = decode
-decodestring = decode
-
-
-
-def _unquote_match(match):
- """Turn a match in the form =AB to the ASCII character with value 0xab"""
- s = match.group(0)
- return unquote(s)
-
-
-# Header decoding is done a bit differently
-def header_decode(s):
- """Decode a string encoded with RFC 2045 MIME header `Q' encoding.
-
- This function does not parse a full MIME header value encoded with
- quoted-printable (like =?iso-8895-1?q?Hello_World?=) -- please use
- the high level email.Header class for that functionality.
- """
- s = s.replace('_', ' ')
- return re.sub(r'=\w{2}', _unquote_match, s)
diff --git a/sys/lib/python/email/utils.py b/sys/lib/python/email/utils.py
deleted file mode 100644
index ee952d392..000000000
--- a/sys/lib/python/email/utils.py
+++ /dev/null
@@ -1,323 +0,0 @@
-# Copyright (C) 2001-2006 Python Software Foundation
-# Author: Barry Warsaw
-# Contact: email-sig@python.org
-
-"""Miscellaneous utilities."""
-
-__all__ = [
- 'collapse_rfc2231_value',
- 'decode_params',
- 'decode_rfc2231',
- 'encode_rfc2231',
- 'formataddr',
- 'formatdate',
- 'getaddresses',
- 'make_msgid',
- 'parseaddr',
- 'parsedate',
- 'parsedate_tz',
- 'unquote',
- ]
-
-import os
-import re
-import time
-import base64
-import random
-import socket
-import urllib
-import warnings
-from cStringIO import StringIO
-
-from email._parseaddr import quote
-from email._parseaddr import AddressList as _AddressList
-from email._parseaddr import mktime_tz
-
-# We need wormarounds for bugs in these methods in older Pythons (see below)
-from email._parseaddr import parsedate as _parsedate
-from email._parseaddr import parsedate_tz as _parsedate_tz
-
-from quopri import decodestring as _qdecode
-
-# Intrapackage imports
-from email.encoders import _bencode, _qencode
-
-COMMASPACE = ', '
-EMPTYSTRING = ''
-UEMPTYSTRING = u''
-CRLF = '\r\n'
-TICK = "'"
-
-specialsre = re.compile(r'[][\\()<>@,:;".]')
-escapesre = re.compile(r'[][\\()"]')
-
-
-
-# Helpers
-
-def _identity(s):
- return s
-
-
-def _bdecode(s):
- # We can't quite use base64.encodestring() since it tacks on a "courtesy
- # newline". Blech!
- if not s:
- return s
- value = base64.decodestring(s)
- if not s.endswith('\n') and value.endswith('\n'):
- return value[:-1]
- return value
-
-
-
-def fix_eols(s):
- """Replace all line-ending characters with \r\n."""
- # Fix newlines with no preceding carriage return
- s = re.sub(r'(?<!\r)\n', CRLF, s)
- # Fix carriage returns with no following newline
- s = re.sub(r'\r(?!\n)', CRLF, s)
- return s
-
-
-
-def formataddr(pair):
- """The inverse of parseaddr(), this takes a 2-tuple of the form
- (realname, email_address) and returns the string value suitable
- for an RFC 2822 From, To or Cc header.
-
- If the first element of pair is false, then the second element is
- returned unmodified.
- """
- name, address = pair
- if name:
- quotes = ''
- if specialsre.search(name):
- quotes = '"'
- name = escapesre.sub(r'\\\g<0>', name)
- return '%s%s%s <%s>' % (quotes, name, quotes, address)
- return address
-
-
-
-def getaddresses(fieldvalues):
- """Return a list of (REALNAME, EMAIL) for each fieldvalue."""
- all = COMMASPACE.join(fieldvalues)
- a = _AddressList(all)
- return a.addresslist
-
-
-
-ecre = re.compile(r'''
- =\? # literal =?
- (?P<charset>[^?]*?) # non-greedy up to the next ? is the charset
- \? # literal ?
- (?P<encoding>[qb]) # either a "q" or a "b", case insensitive
- \? # literal ?
- (?P<atom>.*?) # non-greedy up to the next ?= is the atom
- \?= # literal ?=
- ''', re.VERBOSE | re.IGNORECASE)
-
-
-
-def formatdate(timeval=None, localtime=False, usegmt=False):
- """Returns a date string as specified by RFC 2822, e.g.:
-
- Fri, 09 Nov 2001 01:08:47 -0000
-
- Optional timeval if given is a floating point time value as accepted by
- gmtime() and localtime(), otherwise the current time is used.
-
- Optional localtime is a flag that when True, interprets timeval, and
- returns a date relative to the local timezone instead of UTC, properly
- taking daylight savings time into account.
-
- Optional argument usegmt means that the timezone is written out as
- an ascii string, not numeric one (so "GMT" instead of "+0000"). This
- is needed for HTTP, and is only used when localtime==False.
- """
- # Note: we cannot use strftime() because that honors the locale and RFC
- # 2822 requires that day and month names be the English abbreviations.
- if timeval is None:
- timeval = time.time()
- if localtime:
- now = time.localtime(timeval)
- # Calculate timezone offset, based on whether the local zone has
- # daylight savings time, and whether DST is in effect.
- if time.daylight and now[-1]:
- offset = time.altzone
- else:
- offset = time.timezone
- hours, minutes = divmod(abs(offset), 3600)
- # Remember offset is in seconds west of UTC, but the timezone is in
- # minutes east of UTC, so the signs differ.
- if offset > 0:
- sign = '-'
- else:
- sign = '+'
- zone = '%s%02d%02d' % (sign, hours, minutes // 60)
- else:
- now = time.gmtime(timeval)
- # Timezone offset is always -0000
- if usegmt:
- zone = 'GMT'
- else:
- zone = '-0000'
- return '%s, %02d %s %04d %02d:%02d:%02d %s' % (
- ['Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun'][now[6]],
- now[2],
- ['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun',
- 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec'][now[1] - 1],
- now[0], now[3], now[4], now[5],
- zone)
-
-
-
-def make_msgid(idstring=None):
- """Returns a string suitable for RFC 2822 compliant Message-ID, e.g:
-
- <20020201195627.33539.96671@nightshade.la.mastaler.com>
-
- Optional idstring if given is a string used to strengthen the
- uniqueness of the message id.
- """
- timeval = time.time()
- utcdate = time.strftime('%Y%m%d%H%M%S', time.gmtime(timeval))
- pid = os.getpid()
- randint = random.randrange(100000)
- if idstring is None:
- idstring = ''
- else:
- idstring = '.' + idstring
- idhost = socket.getfqdn()
- msgid = '<%s.%s.%s%s@%s>' % (utcdate, pid, randint, idstring, idhost)
- return msgid
-
-
-
-# These functions are in the standalone mimelib version only because they've
-# subsequently been fixed in the latest Python versions. We use this to worm
-# around broken older Pythons.
-def parsedate(data):
- if not data:
- return None
- return _parsedate(data)
-
-
-def parsedate_tz(data):
- if not data:
- return None
- return _parsedate_tz(data)
-
-
-def parseaddr(addr):
- addrs = _AddressList(addr).addresslist
- if not addrs:
- return '', ''
- return addrs[0]
-
-
-# rfc822.unquote() doesn't properly de-backslash-ify in Python pre-2.3.
-def unquote(str):
- """Remove quotes from a string."""
- if len(str) > 1:
- if str.startswith('"') and str.endswith('"'):
- return str[1:-1].replace('\\\\', '\\').replace('\\"', '"')
- if str.startswith('<') and str.endswith('>'):
- return str[1:-1]
- return str
-
-
-
-# RFC2231-related functions - parameter encoding and decoding
-def decode_rfc2231(s):
- """Decode string according to RFC 2231"""
- parts = s.split(TICK, 2)
- if len(parts) <= 2:
- return None, None, s
- return parts
-
-
-def encode_rfc2231(s, charset=None, language=None):
- """Encode string according to RFC 2231.
-
- If neither charset nor language is given, then s is returned as-is. If
- charset is given but not language, the string is encoded using the empty
- string for language.
- """
- import urllib
- s = urllib.quote(s, safe='')
- if charset is None and language is None:
- return s
- if language is None:
- language = ''
- return "%s'%s'%s" % (charset, language, s)
-
-
-rfc2231_continuation = re.compile(r'^(?P<name>\w+)\*((?P<num>[0-9]+)\*?)?$')
-
-def decode_params(params):
- """Decode parameters list according to RFC 2231.
-
- params is a sequence of 2-tuples containing (param name, string value).
- """
- # Copy params so we don't mess with the original
- params = params[:]
- new_params = []
- # Map parameter's name to a list of continuations. The values are a
- # 3-tuple of the continuation number, the string value, and a flag
- # specifying whether a particular segment is %-encoded.
- rfc2231_params = {}
- name, value = params.pop(0)
- new_params.append((name, value))
- while params:
- name, value = params.pop(0)
- if name.endswith('*'):
- encoded = True
- else:
- encoded = False
- value = unquote(value)
- mo = rfc2231_continuation.match(name)
- if mo:
- name, num = mo.group('name', 'num')
- if num is not None:
- num = int(num)
- rfc2231_params.setdefault(name, []).append((num, value, encoded))
- else:
- new_params.append((name, '"%s"' % quote(value)))
- if rfc2231_params:
- for name, continuations in rfc2231_params.items():
- value = []
- extended = False
- # Sort by number
- continuations.sort()
- # And now append all values in numerical order, converting
- # %-encodings for the encoded segments. If any of the
- # continuation names ends in a *, then the entire string, after
- # decoding segments and concatenating, must have the charset and
- # language specifiers at the beginning of the string.
- for num, s, encoded in continuations:
- if encoded:
- s = urllib.unquote(s)
- extended = True
- value.append(s)
- value = quote(EMPTYSTRING.join(value))
- if extended:
- charset, language, value = decode_rfc2231(value)
- new_params.append((name, (charset, language, '"%s"' % value)))
- else:
- new_params.append((name, '"%s"' % value))
- return new_params
-
-def collapse_rfc2231_value(value, errors='replace',
- fallback_charset='us-ascii'):
- if isinstance(value, tuple):
- rawval = unquote(value[2])
- charset = value[0] or 'us-ascii'
- try:
- return unicode(rawval, charset, errors)
- except LookupError:
- # XXX charset is unknown to Python.
- return unicode(rawval, fallback_charset, errors)
- else:
- return unquote(value)